From noreply at buildbot.pypy.org Fri Aug 1 03:15:45 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 1 Aug 2014 03:15:45 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: redo 7e04e788d910 without breaking translation Message-ID: <20140801011545.568521C09B2@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72627:bbf05facf765 Date: 2014-07-31 18:02 -0700 http://bitbucket.org/pypy/pypy/changeset/bbf05facf765/ Log: redo 7e04e788d910 without breaking translation diff --git a/pypy/interpreter/astcompiler/validate.py b/pypy/interpreter/astcompiler/validate.py --- a/pypy/interpreter/astcompiler/validate.py +++ b/pypy/interpreter/astcompiler/validate.py @@ -14,6 +14,9 @@ def __init__(self, message): self.message = message + def __str__(self): + return self.message + def expr_context_name(ctx): if not 1 <= ctx <= len(ast.expr_context_to_class): From noreply at buildbot.pypy.org Fri Aug 1 03:19:34 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Fri, 1 Aug 2014 03:19:34 +0200 (CEST) Subject: [pypy-commit] pypy py3.3-fixes: fix incorrect value of kwargname (issue #1831) Message-ID: <20140801011934.686F61C09B2@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3-fixes Changeset: r72628:0a5f64f5722e Date: 2014-07-31 19:02 +0200 http://bitbucket.org/pypy/pypy/changeset/0a5f64f5722e/ Log: fix incorrect value of kwargname (issue #1831) diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -60,7 +60,7 @@ else: varargname = None if code.co_flags & CO_VARKEYWORDS: - kwargname = code.co_varnames[argcount] + kwargname = code.co_varnames[argcount+kwonlyargcount] argcount += 1 else: kwargname = None diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -714,6 +714,22 @@ else: py.test.fail("Did not raise") + def test_signature_kwargname(self): + from pypy.interpreter.pycode import cpython_code_signature + from pypy.interpreter.signature import Signature + + snippet = 'def f(a, b, m=1, n=2, **kwargs): pass' + containing_co = self.compiler.compile(snippet, '', 'single', 0) + co = containing_co.co_consts_w[2] + sig = cpython_code_signature(co) + assert sig == Signature(['a', 'b', 'm', 'n'], None, 'kwargs', []) + + snippet = 'def f(a, b, *, m=1, n=2, **kwargs): pass' + containing_co = self.compiler.compile(snippet, '', 'single', 0) + co = containing_co.co_consts_w[4] + sig = cpython_code_signature(co) + assert sig == Signature(['a', 'b'], None, 'kwargs', ['m', 'n']) + class AppTestCompiler: From noreply at buildbot.pypy.org Fri Aug 1 03:19:36 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Fri, 1 Aug 2014 03:19:36 +0200 (CEST) Subject: [pypy-commit] pypy py3.3-fixes: use helper function to locate the code object Message-ID: <20140801011936.1112E1C09B2@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3-fixes Changeset: r72629:4f709511ed94 Date: 2014-07-31 21:40 +0200 http://bitbucket.org/pypy/pypy/changeset/4f709511ed94/ Log: use helper function to locate the code object diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -718,15 +718,20 @@ from pypy.interpreter.pycode import cpython_code_signature from pypy.interpreter.signature import Signature + def find_func(code): + for w_const in code.co_consts_w: + if isinstance(w_const, PyCode): + return w_const + snippet = 'def f(a, b, m=1, n=2, **kwargs): pass' containing_co = self.compiler.compile(snippet, '', 'single', 0) - co = containing_co.co_consts_w[2] + co = find_func(containing_co) sig = cpython_code_signature(co) assert sig == Signature(['a', 'b', 'm', 'n'], None, 'kwargs', []) snippet = 'def f(a, b, *, m=1, n=2, **kwargs): pass' containing_co = self.compiler.compile(snippet, '', 'single', 0) - co = containing_co.co_consts_w[4] + co = find_func(containing_co) sig = cpython_code_signature(co) assert sig == Signature(['a', 'b'], None, 'kwargs', ['m', 'n']) From noreply at buildbot.pypy.org Fri Aug 1 03:19:37 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 1 Aug 2014 03:19:37 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Merged in numerodix/pypy/py3.3-fixes (pull request #258) Message-ID: <20140801011937.934AE1C09B2@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72630:4ced75933639 Date: 2014-07-31 18:19 -0700 http://bitbucket.org/pypy/pypy/changeset/4ced75933639/ Log: Merged in numerodix/pypy/py3.3-fixes (pull request #258) fix incorrect value of kwargname (issue #1831) diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -60,7 +60,7 @@ else: varargname = None if code.co_flags & CO_VARKEYWORDS: - kwargname = code.co_varnames[argcount] + kwargname = code.co_varnames[argcount+kwonlyargcount] argcount += 1 else: kwargname = None diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -714,6 +714,27 @@ else: py.test.fail("Did not raise") + def test_signature_kwargname(self): + from pypy.interpreter.pycode import cpython_code_signature + from pypy.interpreter.signature import Signature + + def find_func(code): + for w_const in code.co_consts_w: + if isinstance(w_const, PyCode): + return w_const + + snippet = 'def f(a, b, m=1, n=2, **kwargs): pass' + containing_co = self.compiler.compile(snippet, '', 'single', 0) + co = find_func(containing_co) + sig = cpython_code_signature(co) + assert sig == Signature(['a', 'b', 'm', 'n'], None, 'kwargs', []) + + snippet = 'def f(a, b, *, m=1, n=2, **kwargs): pass' + containing_co = self.compiler.compile(snippet, '', 'single', 0) + co = find_func(containing_co) + sig = cpython_code_signature(co) + assert sig == Signature(['a', 'b'], None, 'kwargs', ['m', 'n']) + class AppTestCompiler: From noreply at buildbot.pypy.org Fri Aug 1 14:46:09 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Fri, 1 Aug 2014 14:46:09 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs-fixes: fix typos Message-ID: <20140801124609.4B5DF1C0250@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: improve-docs-fixes Changeset: r72631:91450a0f17bc Date: 2014-07-31 20:58 +0200 http://bitbucket.org/pypy/pypy/changeset/91450a0f17bc/ Log: fix typos diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -443,7 +443,7 @@ Adding an entry under pypy/module (e.g. mymodule) entails automatic creation of a new config option (such as --withmod-mymodule and ---withoutmod-mymodule (the later being the default)) for py.py and +--withoutmod-mymodule (the latter being the default)) for py.py and translate.py. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -325,7 +325,7 @@ * directly calling the internal magic methods of a few built-in types with invalid arguments may have a slightly different result. For example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return - ``NotImplemented`` on PyPy; on CPython, only the later does, and the + ``NotImplemented`` on PyPy; on CPython, only the latter does, and the former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` both raise ``TypeError`` everywhere.) This difference is an implementation detail that shows up because of internal C-level slots From noreply at buildbot.pypy.org Fri Aug 1 14:46:10 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Fri, 1 Aug 2014 14:46:10 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs-fixes: fix typo Message-ID: <20140801124610.91E9F1C0250@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: improve-docs-fixes Changeset: r72632:98cb8999d9f7 Date: 2014-07-31 21:18 +0200 http://bitbucket.org/pypy/pypy/changeset/98cb8999d9f7/ Log: fix typo diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -640,7 +640,7 @@ assert self.result == 2 ** 6 which executes the code string function with the given arguments at app level. -Note the use of ``w_result`` in ``setup_class`` but self.result in the test +Note the use of ``w_result`` in ``setup_class`` but self.result in the test. Here is how to define an app level class in ``setup_class`` that can be used in subsequent tests:: From noreply at buildbot.pypy.org Fri Aug 1 14:46:11 2014 From: noreply at buildbot.pypy.org (mjacob) Date: Fri, 1 Aug 2014 14:46:11 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Merged in numerodix/pypy/improve-docs-fixes (pull request #259) Message-ID: <20140801124611.CDE081C0250@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r72633:7cb52d2a97f0 Date: 2014-08-01 14:45 +0200 http://bitbucket.org/pypy/pypy/changeset/7cb52d2a97f0/ Log: Merged in numerodix/pypy/improve-docs-fixes (pull request #259) fix typos diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -443,7 +443,7 @@ Adding an entry under pypy/module (e.g. mymodule) entails automatic creation of a new config option (such as --withmod-mymodule and ---withoutmod-mymodule (the later being the default)) for py.py and +--withoutmod-mymodule (the latter being the default)) for py.py and translate.py. @@ -640,7 +640,7 @@ assert self.result == 2 ** 6 which executes the code string function with the given arguments at app level. -Note the use of ``w_result`` in ``setup_class`` but self.result in the test +Note the use of ``w_result`` in ``setup_class`` but self.result in the test. Here is how to define an app level class in ``setup_class`` that can be used in subsequent tests:: diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -325,7 +325,7 @@ * directly calling the internal magic methods of a few built-in types with invalid arguments may have a slightly different result. For example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return - ``NotImplemented`` on PyPy; on CPython, only the later does, and the + ``NotImplemented`` on PyPy; on CPython, only the latter does, and the former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` both raise ``TypeError`` everywhere.) This difference is an implementation detail that shows up because of internal C-level slots From noreply at buildbot.pypy.org Fri Aug 1 14:46:33 2014 From: noreply at buildbot.pypy.org (mjacob) Date: Fri, 1 Aug 2014 14:46:33 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs-fixes: Close branch improve-docs-fixes Message-ID: <20140801124633.3B6101C0250@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs-fixes Changeset: r72634:61b902f1f07b Date: 2014-08-01 14:45 +0200 http://bitbucket.org/pypy/pypy/changeset/61b902f1f07b/ Log: Close branch improve-docs-fixes From noreply at buildbot.pypy.org Fri Aug 1 19:41:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 1 Aug 2014 19:41:49 +0200 (CEST) Subject: [pypy-commit] pypy default: transplant 7cb52d2a97f0 Message-ID: <20140801174149.81E521C08D5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72635:09f1776bf62a Date: 2014-08-01 19:41 +0200 http://bitbucket.org/pypy/pypy/changeset/09f1776bf62a/ Log: transplant 7cb52d2a97f0 diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -740,7 +740,7 @@ Adding an entry under pypy/module (e.g. mymodule) entails automatic creation of a new config option (such as --withmod-mymodule and ---withoutmod-mymodule (the later being the default)) for py.py and +--withoutmod-mymodule (the latter being the default)) for py.py and translate.py. Testing modules in ``lib_pypy/`` @@ -931,7 +931,7 @@ assert self.result == 2 ** 6 which executes the code string function with the given arguments at app level. -Note the use of ``w_result`` in ``setup_class`` but self.result in the test +Note the use of ``w_result`` in ``setup_class`` but self.result in the test. Here is how to define an app level class in ``setup_class`` that can be used in subsequent tests:: diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -328,7 +328,7 @@ * directly calling the internal magic methods of a few built-in types with invalid arguments may have a slightly different result. For example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return - ``NotImplemented`` on PyPy; on CPython, only the later does, and the + ``NotImplemented`` on PyPy; on CPython, only the latter does, and the former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` both raise ``TypeError`` everywhere.) This difference is an implementation detail that shows up because of internal C-level slots From noreply at buildbot.pypy.org Fri Aug 1 23:55:27 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 1 Aug 2014 23:55:27 +0200 (CEST) Subject: [pypy-commit] pypy default: fix bound classmethods lacking an im_class Message-ID: <20140801215527.5B1501C08D5@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r72636:8b19970c9881 Date: 2014-08-01 14:54 -0700 http://bitbucket.org/pypy/pypy/changeset/8b19970c9881/ Log: fix bound classmethods lacking an im_class diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -616,7 +616,8 @@ def descr_classmethod_get(self, space, w_obj, w_klass=None): if space.is_none(w_klass): w_klass = space.type(w_obj) - return space.wrap(Method(space, self.w_function, w_klass, space.w_None)) + return space.wrap(Method(space, self.w_function, w_klass, + space.type(w_klass))) def descr_classmethod__new__(space, w_subtype, w_function): instance = space.allocate_instance(ClassMethod, w_subtype) diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -388,6 +388,13 @@ # differs from .im_class in case the method is # defined in some parent class of l's actual class + def test_classmethod_im_class(self): + class Foo(object): + @classmethod + def bar(cls): + pass + assert Foo.bar.im_class is type + def test_func_closure(self): x = 2 def f(): From noreply at buildbot.pypy.org Sat Aug 2 00:23:22 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 2 Aug 2014 00:23:22 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140801222322.D38ED1C142A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72637:33d48832dcfd Date: 2014-08-01 15:21 -0700 http://bitbucket.org/pypy/pypy/changeset/33d48832dcfd/ Log: merge default diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -740,7 +740,7 @@ Adding an entry under pypy/module (e.g. mymodule) entails automatic creation of a new config option (such as --withmod-mymodule and ---withoutmod-mymodule (the later being the default)) for py.py and +--withoutmod-mymodule (the latter being the default)) for py.py and translate.py. Testing modules in ``lib_pypy/`` @@ -931,7 +931,7 @@ assert self.result == 2 ** 6 which executes the code string function with the given arguments at app level. -Note the use of ``w_result`` in ``setup_class`` but self.result in the test +Note the use of ``w_result`` in ``setup_class`` but self.result in the test. Here is how to define an app level class in ``setup_class`` that can be used in subsequent tests:: diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -328,7 +328,7 @@ * directly calling the internal magic methods of a few built-in types with invalid arguments may have a slightly different result. For example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return - ``NotImplemented`` on PyPy; on CPython, only the later does, and the + ``NotImplemented`` on PyPy; on CPython, only the latter does, and the former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` both raise ``TypeError`` everywhere.) This difference is an implementation detail that shows up because of internal C-level slots diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py --- a/pypy/interpreter/pycompiler.py +++ b/pypy/interpreter/pycompiler.py @@ -96,7 +96,7 @@ XXX: This class should override the baseclass implementation of compile_command() in order to optimize it, especially in case - of incomplete inputs (e.g. we shouldn't re-compile from sracth + of incomplete inputs (e.g. we shouldn't re-compile from scratch the whole source after having only added a new '\n') """ def __init__(self, space, override_version=None): diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -394,6 +394,13 @@ # differs from .im_class in case the method is # defined in some parent class of l's actual class + def test_classmethod_im_class(self): + class Foo(object): + @classmethod + def bar(cls): + pass + assert Foo.bar.im_class is type + def test_func_closure(self): x = 2 def f(): diff --git a/rpython/flowspace/test/test_model.py b/rpython/flowspace/test/test_model.py --- a/rpython/flowspace/test/test_model.py +++ b/rpython/flowspace/test/test_model.py @@ -13,7 +13,7 @@ class pieces: """ The manually-built graph corresponding to the sample_function(). """ - i = Variable("i") + i0 = Variable("i0") i1 = Variable("i1") i2 = Variable("i2") i3 = Variable("i3") @@ -25,12 +25,12 @@ conditionop = SpaceOperation("gt", [i1, Constant(0)], conditionres) addop = SpaceOperation("add", [sum2, i2], sum3) decop = SpaceOperation("sub", [i2, Constant(1)], i3) - startblock = Block([i]) + startblock = Block([i0]) headerblock = Block([i1, sum1]) whileblock = Block([i2, sum2]) graph = FunctionGraph("f", startblock) - startblock.closeblock(Link([i, Constant(0)], headerblock)) + startblock.closeblock(Link([i0, Constant(0)], headerblock)) headerblock.operations.append(conditionop) headerblock.exitswitch = conditionres headerblock.closeblock(Link([sum1], graph.returnblock, False), @@ -55,7 +55,7 @@ def test_graphattributes(): assert graph.startblock is pieces.startblock assert graph.returnblock is pieces.headerblock.exits[0].target - assert graph.getargs() == [pieces.i] + assert graph.getargs() == [pieces.i0] assert [graph.getreturnvar()] == graph.returnblock.inputargs assert graph.source == inspect.getsource(sample_function) diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -21,7 +21,7 @@ # this is a basic test that tries to hit a number of features and their # translation: # - jitting of loops and bridges - # - virtualizables + # - two virtualizable types # - set_param interface # - profiler # - full optimizer @@ -79,22 +79,28 @@ if rposix.get_errno() != total: raise ValueError return chr(total % 253) # + class Virt2(object): + _virtualizable_ = ['i'] + def __init__(self, i): + self.i = i from rpython.rlib.libffi import types, CDLL, ArgChain from rpython.rlib.test.test_clibffi import get_libm_name libm_name = get_libm_name(sys.platform) - jitdriver2 = JitDriver(greens=[], reds = ['i', 'func', 'res', 'x']) + jitdriver2 = JitDriver(greens=[], reds = ['v2', 'func', 'res', 'x'], + virtualizables = ['v2']) def libffi_stuff(i, j): lib = CDLL(libm_name) func = lib.getpointer('fabs', [types.double], types.double) res = 0.0 x = float(j) - while i > 0: - jitdriver2.jit_merge_point(i=i, res=res, func=func, x=x) + v2 = Virt2(i) + while v2.i > 0: + jitdriver2.jit_merge_point(v2=v2, res=res, func=func, x=x) promote(func) argchain = ArgChain() argchain.arg(x) res = func.call(argchain, rffi.DOUBLE) - i -= 1 + v2.i -= 1 return res # def main(i, j): diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -1611,6 +1611,40 @@ op.getopnum() == rop.GUARD_NOT_FORCED_2] assert len(l) == 0 + def test_two_virtualizable_types(self): + class A: + _virtualizable_ = ['x'] + def __init__(self, x): + self.x = x + + class B: + _virtualizable_ = ['lst[*]'] + def __init__(self, lst): + self.lst = lst + + driver_a = JitDriver(greens=[], reds=['a'], virtualizables=['a']) + driver_b = JitDriver(greens=[], reds=['b'], virtualizables=['b']) + + def foo_a(a): + while a.x > 0: + driver_a.jit_merge_point(a=a) + a.x -= 2 + return a.x + + def foo_b(b): + while b.lst[0] > 0: + driver_b.jit_merge_point(b=b) + b.lst[0] -= 2 + return b.lst[0] + + def f(): + return foo_a(A(13)) * 100 + foo_b(B([13])) + + assert f() == -101 + res = self.meta_interp(f, [], listops=True) + assert res == -101 + + class TestLLtype(ExplicitVirtualizableTests, ImplicitVirtualizableTests, LLJitMixin): diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -1154,7 +1154,12 @@ type(other).__name__,)) if self._TYPE != other._TYPE: raise TypeError("comparing %r and %r" % (self._TYPE, other._TYPE)) - return self._obj == other._obj + try: + return self._obj == other._obj + except DelayedPointer: + # if one of the two pointers is delayed, they cannot + # possibly be equal unless they are the same _ptr instance + return self is other def __ne__(self, other): return not (self == other) diff --git a/rpython/rtyper/normalizecalls.py b/rpython/rtyper/normalizecalls.py --- a/rpython/rtyper/normalizecalls.py +++ b/rpython/rtyper/normalizecalls.py @@ -93,7 +93,12 @@ return False # nothing to do, all signatures already match shape_cnt, shape_keys, shape_star = shape - assert not shape_star, "XXX not implemented" + if shape_star: + raise TyperError( + "not implemented: a call is done with a '*' argument, and the" + " multiple functions or methods that it can go to don't have" + " all the same signature (different argument names or defaults)." + " The call can go to:\n%s" % '\n'.join(map(repr, graphs))) # for the first 'shape_cnt' arguments we need to generalize to # a common type diff --git a/rpython/rtyper/test/test_annlowlevel.py b/rpython/rtyper/test/test_annlowlevel.py --- a/rpython/rtyper/test/test_annlowlevel.py +++ b/rpython/rtyper/test/test_annlowlevel.py @@ -64,3 +64,13 @@ assert lltype.typeOf(ptr) == OBJECTPTR y = annlowlevel.cast_base_ptr_to_instance(X, ptr) assert y is x + + def test_delayedptr(self): + FUNCTYPE = lltype.FuncType([], lltype.Signed) + name = "delayed!myfunc" + delayedptr1 = lltype._ptr(lltype.Ptr(FUNCTYPE), name, solid=True) + delayedptr2 = lltype._ptr(lltype.Ptr(FUNCTYPE), name, solid=True) + assert delayedptr1 == delayedptr1 + assert delayedptr1 != delayedptr2 + assert bool(delayedptr1) + assert delayedptr1 != lltype.nullptr(FUNCTYPE) diff --git a/rpython/rtyper/test/test_normalizecalls.py b/rpython/rtyper/test/test_normalizecalls.py --- a/rpython/rtyper/test/test_normalizecalls.py +++ b/rpython/rtyper/test/test_normalizecalls.py @@ -192,6 +192,25 @@ import re assert re.match(msg, excinfo.value.args[0]) + def test_methods_with_named_arg_call(self): + class Base: + def fn(self, y): + raise NotImplementedError + class Sub1(Base): + def fn(self, y): + return 1 + y + class Sub2(Base): + def fn(self, x): # different name! + return x - 2 + def dummyfn(n): + if n == 1: + s = Sub1() + else: + s = Sub2() + return s.fn(*(n,)) + + py.test.raises(TyperError, self.rtype, dummyfn, [int], int) + class PBase: def fn(self): From noreply at buildbot.pypy.org Sat Aug 2 00:23:24 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 2 Aug 2014 00:23:24 +0200 (CEST) Subject: [pypy-commit] pypy py3k: issue1804: fix nonlocal decls not being added to free_vars Message-ID: <20140801222324.690F21C142A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72638:1f716034739d Date: 2014-08-01 15:21 -0700 http://bitbucket.org/pypy/pypy/changeset/1f716034739d/ Log: issue1804: fix nonlocal decls not being added to free_vars diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -136,6 +136,7 @@ err = "no binding for nonlocal '%s' found" % (name,) raise SyntaxError(err, self.lineno, self.col_offset) self.symbols[name] = SCOPE_FREE + self.free_vars.append(name) free[name] = None elif flags & SYM_BOUND: self.symbols[name] = SCOPE_LOCAL diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -808,6 +808,17 @@ return y""" yield self.st, test, "f()", 4 + def test_nonlocal_from_arg(self): + test = """if 1: + def test1(x): + def test2(): + nonlocal x + def test3(): + return x + return test3() + return test2()""" + yield self.st, test, "test1(2)", 2 + def test_lots_of_loops(self): source = "for x in y: pass\n" * 1000 compile_with_astcompiler(source, 'exec', self.space) From noreply at buildbot.pypy.org Sat Aug 2 00:23:26 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 2 Aug 2014 00:23:26 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge py3k Message-ID: <20140801222326.391CC1C142A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72639:64027c84d8c6 Date: 2014-08-01 15:21 -0700 http://bitbucket.org/pypy/pypy/changeset/64027c84d8c6/ Log: merge py3k diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -740,7 +740,7 @@ Adding an entry under pypy/module (e.g. mymodule) entails automatic creation of a new config option (such as --withmod-mymodule and ---withoutmod-mymodule (the later being the default)) for py.py and +--withoutmod-mymodule (the latter being the default)) for py.py and translate.py. Testing modules in ``lib_pypy/`` @@ -931,7 +931,7 @@ assert self.result == 2 ** 6 which executes the code string function with the given arguments at app level. -Note the use of ``w_result`` in ``setup_class`` but self.result in the test +Note the use of ``w_result`` in ``setup_class`` but self.result in the test. Here is how to define an app level class in ``setup_class`` that can be used in subsequent tests:: diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -328,7 +328,7 @@ * directly calling the internal magic methods of a few built-in types with invalid arguments may have a slightly different result. For example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return - ``NotImplemented`` on PyPy; on CPython, only the later does, and the + ``NotImplemented`` on PyPy; on CPython, only the latter does, and the former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` both raise ``TypeError`` everywhere.) This difference is an implementation detail that shows up because of internal C-level slots diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -136,6 +136,7 @@ err = "no binding for nonlocal '%s' found" % (name,) raise SyntaxError(err, self.lineno, self.col_offset) self.symbols[name] = SCOPE_FREE + self.free_vars.append(name) free[name] = None elif flags & SYM_BOUND: self.symbols[name] = SCOPE_LOCAL diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -808,6 +808,17 @@ return y""" yield self.st, test, "f()", 4 + def test_nonlocal_from_arg(self): + test = """if 1: + def test1(x): + def test2(): + nonlocal x + def test3(): + return x + return test3() + return test2()""" + yield self.st, test, "test1(2)", 2 + def test_lots_of_loops(self): source = "for x in y: pass\n" * 1000 compile_with_astcompiler(source, 'exec', self.space) diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py --- a/pypy/interpreter/pycompiler.py +++ b/pypy/interpreter/pycompiler.py @@ -96,7 +96,7 @@ XXX: This class should override the baseclass implementation of compile_command() in order to optimize it, especially in case - of incomplete inputs (e.g. we shouldn't re-compile from sracth + of incomplete inputs (e.g. we shouldn't re-compile from scratch the whole source after having only added a new '\n') """ def __init__(self, space, override_version=None): diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -394,6 +394,13 @@ # differs from .im_class in case the method is # defined in some parent class of l's actual class + def test_classmethod_im_class(self): + class Foo(object): + @classmethod + def bar(cls): + pass + assert Foo.bar.im_class is type + def test_func_closure(self): x = 2 def f(): diff --git a/rpython/flowspace/test/test_model.py b/rpython/flowspace/test/test_model.py --- a/rpython/flowspace/test/test_model.py +++ b/rpython/flowspace/test/test_model.py @@ -13,7 +13,7 @@ class pieces: """ The manually-built graph corresponding to the sample_function(). """ - i = Variable("i") + i0 = Variable("i0") i1 = Variable("i1") i2 = Variable("i2") i3 = Variable("i3") @@ -25,12 +25,12 @@ conditionop = SpaceOperation("gt", [i1, Constant(0)], conditionres) addop = SpaceOperation("add", [sum2, i2], sum3) decop = SpaceOperation("sub", [i2, Constant(1)], i3) - startblock = Block([i]) + startblock = Block([i0]) headerblock = Block([i1, sum1]) whileblock = Block([i2, sum2]) graph = FunctionGraph("f", startblock) - startblock.closeblock(Link([i, Constant(0)], headerblock)) + startblock.closeblock(Link([i0, Constant(0)], headerblock)) headerblock.operations.append(conditionop) headerblock.exitswitch = conditionres headerblock.closeblock(Link([sum1], graph.returnblock, False), @@ -55,7 +55,7 @@ def test_graphattributes(): assert graph.startblock is pieces.startblock assert graph.returnblock is pieces.headerblock.exits[0].target - assert graph.getargs() == [pieces.i] + assert graph.getargs() == [pieces.i0] assert [graph.getreturnvar()] == graph.returnblock.inputargs assert graph.source == inspect.getsource(sample_function) diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -21,7 +21,7 @@ # this is a basic test that tries to hit a number of features and their # translation: # - jitting of loops and bridges - # - virtualizables + # - two virtualizable types # - set_param interface # - profiler # - full optimizer @@ -79,22 +79,28 @@ if rposix.get_errno() != total: raise ValueError return chr(total % 253) # + class Virt2(object): + _virtualizable_ = ['i'] + def __init__(self, i): + self.i = i from rpython.rlib.libffi import types, CDLL, ArgChain from rpython.rlib.test.test_clibffi import get_libm_name libm_name = get_libm_name(sys.platform) - jitdriver2 = JitDriver(greens=[], reds = ['i', 'func', 'res', 'x']) + jitdriver2 = JitDriver(greens=[], reds = ['v2', 'func', 'res', 'x'], + virtualizables = ['v2']) def libffi_stuff(i, j): lib = CDLL(libm_name) func = lib.getpointer('fabs', [types.double], types.double) res = 0.0 x = float(j) - while i > 0: - jitdriver2.jit_merge_point(i=i, res=res, func=func, x=x) + v2 = Virt2(i) + while v2.i > 0: + jitdriver2.jit_merge_point(v2=v2, res=res, func=func, x=x) promote(func) argchain = ArgChain() argchain.arg(x) res = func.call(argchain, rffi.DOUBLE) - i -= 1 + v2.i -= 1 return res # def main(i, j): diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -1611,6 +1611,40 @@ op.getopnum() == rop.GUARD_NOT_FORCED_2] assert len(l) == 0 + def test_two_virtualizable_types(self): + class A: + _virtualizable_ = ['x'] + def __init__(self, x): + self.x = x + + class B: + _virtualizable_ = ['lst[*]'] + def __init__(self, lst): + self.lst = lst + + driver_a = JitDriver(greens=[], reds=['a'], virtualizables=['a']) + driver_b = JitDriver(greens=[], reds=['b'], virtualizables=['b']) + + def foo_a(a): + while a.x > 0: + driver_a.jit_merge_point(a=a) + a.x -= 2 + return a.x + + def foo_b(b): + while b.lst[0] > 0: + driver_b.jit_merge_point(b=b) + b.lst[0] -= 2 + return b.lst[0] + + def f(): + return foo_a(A(13)) * 100 + foo_b(B([13])) + + assert f() == -101 + res = self.meta_interp(f, [], listops=True) + assert res == -101 + + class TestLLtype(ExplicitVirtualizableTests, ImplicitVirtualizableTests, LLJitMixin): diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -1154,7 +1154,12 @@ type(other).__name__,)) if self._TYPE != other._TYPE: raise TypeError("comparing %r and %r" % (self._TYPE, other._TYPE)) - return self._obj == other._obj + try: + return self._obj == other._obj + except DelayedPointer: + # if one of the two pointers is delayed, they cannot + # possibly be equal unless they are the same _ptr instance + return self is other def __ne__(self, other): return not (self == other) diff --git a/rpython/rtyper/normalizecalls.py b/rpython/rtyper/normalizecalls.py --- a/rpython/rtyper/normalizecalls.py +++ b/rpython/rtyper/normalizecalls.py @@ -93,7 +93,12 @@ return False # nothing to do, all signatures already match shape_cnt, shape_keys, shape_star = shape - assert not shape_star, "XXX not implemented" + if shape_star: + raise TyperError( + "not implemented: a call is done with a '*' argument, and the" + " multiple functions or methods that it can go to don't have" + " all the same signature (different argument names or defaults)." + " The call can go to:\n%s" % '\n'.join(map(repr, graphs))) # for the first 'shape_cnt' arguments we need to generalize to # a common type diff --git a/rpython/rtyper/test/test_annlowlevel.py b/rpython/rtyper/test/test_annlowlevel.py --- a/rpython/rtyper/test/test_annlowlevel.py +++ b/rpython/rtyper/test/test_annlowlevel.py @@ -64,3 +64,13 @@ assert lltype.typeOf(ptr) == OBJECTPTR y = annlowlevel.cast_base_ptr_to_instance(X, ptr) assert y is x + + def test_delayedptr(self): + FUNCTYPE = lltype.FuncType([], lltype.Signed) + name = "delayed!myfunc" + delayedptr1 = lltype._ptr(lltype.Ptr(FUNCTYPE), name, solid=True) + delayedptr2 = lltype._ptr(lltype.Ptr(FUNCTYPE), name, solid=True) + assert delayedptr1 == delayedptr1 + assert delayedptr1 != delayedptr2 + assert bool(delayedptr1) + assert delayedptr1 != lltype.nullptr(FUNCTYPE) diff --git a/rpython/rtyper/test/test_normalizecalls.py b/rpython/rtyper/test/test_normalizecalls.py --- a/rpython/rtyper/test/test_normalizecalls.py +++ b/rpython/rtyper/test/test_normalizecalls.py @@ -192,6 +192,25 @@ import re assert re.match(msg, excinfo.value.args[0]) + def test_methods_with_named_arg_call(self): + class Base: + def fn(self, y): + raise NotImplementedError + class Sub1(Base): + def fn(self, y): + return 1 + y + class Sub2(Base): + def fn(self, x): # different name! + return x - 2 + def dummyfn(n): + if n == 1: + s = Sub1() + else: + s = Sub2() + return s.fn(*(n,)) + + py.test.raises(TyperError, self.rtype, dummyfn, [int], int) + class PBase: def fn(self): From noreply at buildbot.pypy.org Sat Aug 2 00:23:27 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 2 Aug 2014 00:23:27 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: add SimpleNamespace.__repr__ Message-ID: <20140801222327.D88871C142A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72640:6b3258e026e3 Date: 2014-08-01 15:22 -0700 http://bitbucket.org/pypy/pypy/changeset/6b3258e026e3/ Log: add SimpleNamespace.__repr__ diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py --- a/pypy/module/sys/app.py +++ b/pypy/module/sys/app.py @@ -129,6 +129,17 @@ def __dict__(self): return self._ns + def __repr__(self, recurse=set()): + ident = id(self) + if ident in recurse: + return "namespace(...)" + recurse.add(ident) + try: + pairs = ('%s=%r' % item for item in sorted(self._ns.items())) + return "namespace(%s)" % ', '.join(pairs) + finally: + recurse.remove(ident) + implementation = SimpleNamespace( name='pypy', diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py --- a/pypy/module/sys/test/test_sysmodule.py +++ b/pypy/module/sys/test/test_sysmodule.py @@ -519,6 +519,9 @@ # PEP 421 requires that .name be lower case. assert sys.implementation.name == sys.implementation.name.lower() + ns1 = type(sys.implementation)(x=1, y=2, w=3) + assert repr(ns1) == "namespace(w=3, x=1, y=2)" + def test_settrace(self): import sys counts = [] From noreply at buildbot.pypy.org Sat Aug 2 00:39:58 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 2 Aug 2014 00:39:58 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: factor dir() built-in out into object, type and module methods Message-ID: <20140801223958.780FB1C0250@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3 Changeset: r72641:2bdf0e894a85 Date: 2014-07-29 19:44 +0200 http://bitbucket.org/pypy/pypy/changeset/2bdf0e894a85/ Log: factor dir() built-in out into object, type and module methods This happened in bugs.python.org/issue12166 diff --git a/pypy/interpreter/module.py b/pypy/interpreter/module.py --- a/pypy/interpreter/module.py +++ b/pypy/interpreter/module.py @@ -123,3 +123,15 @@ except OperationError: __file__ = u'?' return space.wrap(u"" % (name, __file__)) + + def descr_module__dir__(self, space): + try: + w__dict__ = space.getattr(self, space.wrap('__dict__')) + result = space.listview(w__dict__) + w_result = space.wrap(result) + w_result.sort(False) + return w_result + except OperationError as e: + if e.match(space, space.w_AttributeError): + return space.wrap([]) + raise diff --git a/pypy/interpreter/test/test_module.py b/pypy/interpreter/test/test_module.py --- a/pypy/interpreter/test/test_module.py +++ b/pypy/interpreter/test/test_module.py @@ -68,6 +68,12 @@ m = type(_pypy_interact).__new__(type(_pypy_interact)) assert repr(m).startswith(" Author: Martin Matusiak Branch: py3.3 Changeset: r72642:239d45aea639 Date: 2014-07-29 20:58 +0200 http://bitbucket.org/pypy/pypy/changeset/239d45aea639/ Log: prefer use of __mro__ instead of recursing the inheritance hierarchy diff --git a/pypy/objspace/std/objecttype.py b/pypy/objspace/std/objecttype.py --- a/pypy/objspace/std/objecttype.py +++ b/pypy/objspace/std/objecttype.py @@ -27,16 +27,14 @@ Dict.update(klass.__dict__) except AttributeError: pass try: - # XXX - Use of .__mro__ would be suggested, if the existance - # of that attribute could be guarranted. - bases = klass.__bases__ + bases = klass.__mro__ except AttributeError: pass else: try: #Note that since we are only interested in the keys, # the order we merge classes is unimportant for base in bases: - Dict.update(_classdir(base)) + Dict.update(base.__dict__) except TypeError: pass return Dict diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -738,16 +738,14 @@ Dict.update(klass.__dict__) except AttributeError: pass try: - # XXX - Use of .__mro__ would be suggested, if the existance - # of that attribute could be guarranted. - bases = klass.__bases__ + bases = klass.__mro__ except AttributeError: pass else: try: #Note that since we are only interested in the keys, # the order we merge classes is unimportant for base in bases: - Dict.update(_classdir(base)) + Dict.update(base.__dict__) except TypeError: pass return Dict From noreply at buildbot.pypy.org Sat Aug 2 00:40:01 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 2 Aug 2014 00:40:01 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Merged pypy/pypy/py3.3 into py3.3 Message-ID: <20140801224001.D49EF1C0250@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3 Changeset: r72643:61ea6233fd47 Date: 2014-07-30 21:05 +0200 http://bitbucket.org/pypy/pypy/changeset/61ea6233fd47/ Log: Merged pypy/pypy/py3.3 into py3.3 diff --git a/lib-python/3/test/test_builtin.py b/lib-python/3/test/test_builtin.py --- a/lib-python/3/test/test_builtin.py +++ b/lib-python/3/test/test_builtin.py @@ -15,7 +15,8 @@ import unittest import warnings from operator import neg -from test.support import TESTFN, unlink, run_unittest, check_warnings +from test.support import ( + TESTFN, unlink, run_unittest, check_warnings, check_impl_detail) try: import pty, signal except ImportError: @@ -423,7 +424,9 @@ try: raise IndexError except: - self.assertEqual(len(dir(sys.exc_info()[2])), 4) + methods = [meth for meth in dir(sys.exc_info()[2]) + if not meth.startswith('_')] + self.assertEqual(len(methods), 4) # test that object has a __dir__() self.assertEqual(sorted([].__dir__()), dir([])) @@ -558,18 +561,21 @@ self.assertEqual((g, l), ({'a': 1}, {'b': 2})) def test_exec_globals(self): - code = compile("print('Hello World!')", "", "exec") - # no builtin function - self.assertRaisesRegex(NameError, "name 'print' is not defined", - exec, code, {'__builtins__': {}}) - # __builtins__ must be a mapping type - self.assertRaises(TypeError, - exec, code, {'__builtins__': 123}) + if check_impl_detail(): + # strict __builtins__ compliance (CPython) + code = compile("print('Hello World!')", "", "exec") + # no builtin function + self.assertRaisesRegex(NameError, "name 'print' is not defined", + exec, code, {'__builtins__': {}}) + # __builtins__ must be a mapping type + self.assertRaises(TypeError, + exec, code, {'__builtins__': 123}) - # no __build_class__ function - code = compile("class A: pass", "", "exec") - self.assertRaisesRegex(NameError, "__build_class__ not found", - exec, code, {'__builtins__': {}}) + # no __build_class__ function + code = compile("class A: pass", "", "exec") + if True: + self.assertRaisesRegex(NameError, "__build_class__ not found", + exec, code, {'__builtins__': {}}) class frozendict_error(Exception): pass @@ -579,7 +585,7 @@ raise frozendict_error("frozendict is readonly") # read-only builtins - frozen_builtins = frozendict(__builtins__) + frozen_builtins = frozendict(builtins.__dict__) code = compile("__builtins__['superglobal']=2; print(superglobal)", "test", "exec") self.assertRaises(frozendict_error, exec, code, {'__builtins__': frozen_builtins}) diff --git a/lib-python/3/test/test_concurrent_futures.py b/lib-python/3/test/test_concurrent_futures.py --- a/lib-python/3/test/test_concurrent_futures.py +++ b/lib-python/3/test/test_concurrent_futures.py @@ -295,14 +295,19 @@ event = threading.Event() def future_func(): event.wait() - oldswitchinterval = sys.getswitchinterval() - sys.setswitchinterval(1e-6) + newgil = hasattr(sys, 'getswitchinterval') + if newgil: + geti, seti = sys.getswitchinterval, sys.setswitchinterval + else: + geti, seti = sys.getcheckinterval, sys.setcheckinterval + oldinterval = geti() + seti(1e-6 if newgil else 1) try: fs = {self.executor.submit(future_func) for i in range(100)} event.set() futures.wait(fs, return_when=futures.ALL_COMPLETED) finally: - sys.setswitchinterval(oldswitchinterval) + seti(oldinterval) class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests, unittest.TestCase): diff --git a/lib-python/3/test/test_imp.py b/lib-python/3/test/test_imp.py --- a/lib-python/3/test/test_imp.py +++ b/lib-python/3/test/test_imp.py @@ -317,7 +317,6 @@ @unittest.skipUnless(sys.implementation.cache_tag is not None, 'requires sys.implementation.cache_tag not be None') - @support.impl_detail("PyPy ignores the optimize flag", pypy=False) def test_cache_from_source(self): # Given the path to a .py file, return the path to its PEP 3147 # defined .pyc file (i.e. under __pycache__). @@ -339,7 +338,6 @@ 'file{}.pyc'.format(self.tag)) self.assertEqual(imp.cache_from_source(path, True), expect) - @support.impl_detail("PyPy ignores the optimize flag", pypy=False) def test_cache_from_source_optimized(self): # Given the path to a .py file, return the path to its PEP 3147 # defined .pyo file (i.e. under __pycache__). diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -37,7 +37,7 @@ "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "_continuation", "_cffi_backend", "_csv", "_pypyjson", "_posixsubprocess", # "cppyy", "micronumpy" - "faulthandler", + "faulthandler", "_lzma", ]) translation_modules = default_modules.copy() @@ -106,6 +106,7 @@ "_hashlib" : ["pypy.module._ssl.interp_ssl"], "_minimal_curses": ["pypy.module._minimal_curses.fficurses"], "_continuation": ["rpython.rlib.rstacklet"], + "_lzma" : ["pypy.module._lzma.interp_lzma"], } def get_module_validator(modname): diff --git a/pypy/doc/config/objspace.usemodules._lzma.txt b/pypy/doc/config/objspace.usemodules._lzma.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.usemodules._lzma.txt @@ -0,0 +1,2 @@ +Use the '_lzma' module. +This module is expected to be working and is included by default. diff --git a/pypy/interpreter/astcompiler/validate.py b/pypy/interpreter/astcompiler/validate.py --- a/pypy/interpreter/astcompiler/validate.py +++ b/pypy/interpreter/astcompiler/validate.py @@ -11,8 +11,7 @@ class ValidationError(Exception): - def __init__(self, message): - self.message = message + """Signals an invalid AST""" def expr_context_name(ctx): diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1012,13 +1012,14 @@ else: w_retval = space.call_method(w_gen, "send", w_value) except OperationError as e: - if not e.match(self.space, self.space.w_StopIteration): + if not e.match(space, space.w_StopIteration): raise self.popvalue() # Remove iter from stack + e.normalize_exception(space) try: w_value = space.getattr(e.get_w_value(space), space.wrap("value")) except OperationError as e: - if not e.match(self.space, self.space.w_AttributeError): + if not e.match(space, space.w_AttributeError): raise w_value = space.w_None self.pushvalue(w_value) diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -268,10 +268,7 @@ def test_return_in_generator(self): code = 'def f():\n return None\n yield 19\n' - e = py.test.raises(OperationError, self.compiler.compile, code, '', 'single', 0) - ex = e.value - ex.normalize_exception(self.space) - assert ex.match(self.space, self.space.w_SyntaxError) + self.compiler.compile(code, '', 'single', 0) def test_yield_in_finally(self): code ='def f():\n try:\n yield 19\n finally:\n pass\n' diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -316,6 +316,24 @@ assert False, 'Expected StopIteration' """ + def test_yield_from_return(self): + """ + def f1(): + result = yield from f2() + return result + def f2(): + yield 1 + return 2 + g = f1() + assert next(g) == 1 + try: + next(g) + except StopIteration as e: + assert e.value == 2 + else: + assert False, 'Expected StopIteration' + """ + def test_should_not_inline(space): from pypy.interpreter.generator import should_not_inline diff --git a/pypy/module/_lzma/__init__.py b/pypy/module/_lzma/__init__.py new file mode 100644 --- /dev/null +++ b/pypy/module/_lzma/__init__.py @@ -0,0 +1,20 @@ +from pypy.interpreter.mixedmodule import MixedModule + +class Module(MixedModule): + # The private part of the lzma module. + + applevel_name = '_lzma' + + interpleveldefs = { + 'LZMACompressor': 'interp_lzma.W_LZMACompressor', + 'LZMADecompressor': 'interp_lzma.W_LZMADecompressor', + '_encode_filter_properties': 'interp_lzma.encode_filter_properties', + '_decode_filter_properties': 'interp_lzma.decode_filter_properties', + 'FORMAT_AUTO': 'space.wrap(interp_lzma.FORMAT_AUTO)', + 'FORMAT_XZ': 'space.wrap(interp_lzma.FORMAT_XZ)', + 'FORMAT_ALONE': 'space.wrap(interp_lzma.FORMAT_ALONE)', + 'FORMAT_RAW': 'space.wrap(interp_lzma.FORMAT_RAW)', + } + + appleveldefs = { + } diff --git a/pypy/module/_lzma/interp_lzma.py b/pypy/module/_lzma/interp_lzma.py new file mode 100644 --- /dev/null +++ b/pypy/module/_lzma/interp_lzma.py @@ -0,0 +1,359 @@ +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.typedef import ( + TypeDef, interp_attrproperty_bytes, interp_attrproperty) +from pypy.interpreter.error import oefmt +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.module.thread.os_lock import Lock +from rpython.rlib.objectmodel import specialize +from rpython.rlib.rarithmetic import LONGLONG_MASK, r_ulonglong +from rpython.rtyper.tool import rffi_platform as platform +from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import lltype +from rpython.translator.tool.cbuild import ExternalCompilationInfo + + +FORMAT_AUTO, FORMAT_XZ, FORMAT_ALONE, FORMAT_RAW = range(4) + + +eci = ExternalCompilationInfo( + includes = ['lzma.h'], + libraries = ['lzma'], + ) +eci = platform.configure_external_library( + 'lzma', eci, + [dict(prefix='lzma-')]) +if not eci: + raise ImportError("Could not find lzma library") + + +class CConfig: + _compilation_info_ = eci + calling_conv = 'c' + + BUFSIZ = platform.ConstantInteger("BUFSIZ") + + lzma_stream = platform.Struct( + 'lzma_stream', + [('next_in', rffi.CCHARP), + ('avail_in', rffi.UINT), + ('total_in', rffi.UINT), + ('next_out', rffi.CCHARP), + ('avail_out', rffi.UINT), + ('total_out', rffi.UINT), + ]) + + lzma_options_lzma = platform.Struct( + 'lzma_options_lzma', + []) + +constant_names = ''' + LZMA_RUN LZMA_FINISH + LZMA_OK LZMA_GET_CHECK LZMA_NO_CHECK LZMA_STREAM_END + LZMA_PRESET_DEFAULT + LZMA_CHECK_ID_MAX + LZMA_TELL_ANY_CHECK LZMA_TELL_NO_CHECK + '''.split() +for name in constant_names: + setattr(CConfig, name, platform.ConstantInteger(name)) + +class cConfig(object): + pass +for k, v in platform.configure(CConfig).items(): + setattr(cConfig, k, v) + +for name in constant_names: + globals()[name] = getattr(cConfig, name) +lzma_stream = lltype.Ptr(cConfig.lzma_stream) +lzma_options_lzma = lltype.Ptr(cConfig.lzma_options_lzma) +BUFSIZ = cConfig.BUFSIZ +LZMA_CHECK_UNKNOWN = LZMA_CHECK_ID_MAX + 1 + +def external(name, args, result, **kwds): + return rffi.llexternal(name, args, result, compilation_info= + CConfig._compilation_info_, **kwds) + +lzma_ret = rffi.INT +lzma_action = rffi.INT +lzma_bool = rffi.INT + +lzma_lzma_preset = external('lzma_lzma_preset', [lzma_options_lzma, rffi.UINT], lzma_bool) +lzma_alone_encoder = external('lzma_alone_encoder', [lzma_stream, lzma_options_lzma], lzma_ret) +lzma_end = external('lzma_end', [lzma_stream], lltype.Void, releasegil=False) + +lzma_auto_decoder = external('lzma_auto_decoder', [lzma_stream, rffi.LONG, rffi.INT], lzma_ret) +lzma_get_check = external('lzma_get_check', [lzma_stream], rffi.INT) + +lzma_code = external('lzma_code', [lzma_stream, lzma_action], rffi.INT) + + + at specialize.arg(1) +def raise_error(space, fmt, *args): + raise oefmt(space.w_RuntimeError, fmt, *args) + + +def _catch_lzma_error(space, lzret): + if (lzret == LZMA_OK or lzret == LZMA_GET_CHECK or + lzret == LZMA_NO_CHECK or lzret == LZMA_STREAM_END): + return + raise raise_error(space, "Unrecognized error from liblzma: %d", lzret) + + +if BUFSIZ < 8192: + SMALLCHUNK = 8192 +else: + SMALLCHUNK = BUFSIZ +if rffi.sizeof(rffi.INT) > 4: + BIGCHUNK = 512 * 32 +else: + BIGCHUNK = 512 * 1024 + + +def _new_buffer_size(current_size): + # keep doubling until we reach BIGCHUNK; then the buffer size is no + # longer increased + if current_size < BIGCHUNK: + return current_size + current_size + return current_size + + +class OutBuffer(object): + """Handler for the output buffer. A bit custom code trying to + encapsulate the logic of setting up the fields of 'lzs' and + allocating raw memory as needed. + """ + def __init__(self, lzs, initial_size=SMALLCHUNK): + # when the constructor is called, allocate a piece of memory + # of length 'piece_size' and make lzs ready to dump there. + self.temp = [] + self.lzs = lzs + self._allocate_chunk(initial_size) + + def _allocate_chunk(self, size): + self.raw_buf, self.gc_buf = rffi.alloc_buffer(size) + self.current_size = size + self.lzs.c_next_out = self.raw_buf + rffi.setintfield(self.lzs, 'c_avail_out', size) + + def _get_chunk(self, chunksize): + assert 0 <= chunksize <= self.current_size + raw_buf = self.raw_buf + gc_buf = self.gc_buf + s = rffi.str_from_buffer(raw_buf, gc_buf, self.current_size, chunksize) + rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) + self.current_size = 0 + return s + + def prepare_next_chunk(self): + size = self.current_size + self.temp.append(self._get_chunk(size)) + self._allocate_chunk(_new_buffer_size(size)) + + def make_result_string(self): + count_unoccupied = rffi.getintfield(self.lzs, 'c_avail_out') + s = self._get_chunk(self.current_size - count_unoccupied) + if self.temp: + self.temp.append(s) + return ''.join(self.temp) + else: + return s + + def free(self): + if self.current_size > 0: + rffi.keep_buffer_alive_until_here(self.raw_buf, self.gc_buf) + + def __enter__(self): + return self + def __exit__(self, *args): + self.free() + + +class W_LZMACompressor(W_Root): + def __init__(self, space, format): + self.format = format + self.lock = Lock(space) + self.flushed = False + self.lzs = lltype.malloc(lzma_stream.TO, flavor='raw', zero=True) + + def __del__(self): + lzma_end(self.lzs) + lltype.free(self.lzs, flavor='raw') + + def _init_alone(self, space, preset, w_filters): + if space.is_none(w_filters): + with lltype.scoped_alloc(lzma_options_lzma.TO) as options: + if lzma_lzma_preset(options, preset): + raise_error(space, "Invalid compression preset: %d", preset) + lzret = lzma_alone_encoder(self.lzs, options) + else: + raise NotImplementedError + _catch_lzma_error(space, lzret) + + @staticmethod + @unwrap_spec(format=int, + w_check=WrappedDefault(None), + w_preset=WrappedDefault(None), + w_filters=WrappedDefault(None)) + def descr_new_comp(space, w_subtype, format=FORMAT_XZ, + w_check=None, w_preset=None, w_filters=None): + w_self = space.allocate_instance(W_LZMACompressor, w_subtype) + self = space.interp_w(W_LZMACompressor, w_self) + W_LZMACompressor.__init__(self, space, format) + + if space.is_none(w_preset): + preset = LZMA_PRESET_DEFAULT + else: + preset = space.int_w(w_preset) + + if format == FORMAT_ALONE: + self._init_alone(space, preset, w_filters) + else: + raise NotImplementedError + + return w_self + + @unwrap_spec(data='bufferstr') + def compress_w(self, space, data): + with self.lock: + if self.flushed: + raise oefmt(space.w_ValueError, "Compressor has been flushed") + result = self._compress(space, data, LZMA_RUN) + return space.wrapbytes(result) + + def flush_w(self, space): + with self.lock: + if self.flushed: + raise oefmt(space.w_ValueError, "Repeated call to flush()") + result = self._compress(space, "", LZMA_FINISH) + return space.wrapbytes(result) + + def _compress(self, space, data, action): + datasize = len(data) + with lltype.scoped_alloc(rffi.CCHARP.TO, datasize) as in_buf: + for i in range(datasize): + in_buf[i] = data[i] + + with OutBuffer(self.lzs) as out: + self.lzs.c_next_in = in_buf + rffi.setintfield(self.lzs, 'c_avail_in', datasize) + + while True: + lzret = lzma_code(self.lzs, action) + _catch_lzma_error(space, lzret) + + if (action == LZMA_RUN and + rffi.getintfield(self.lzs, 'c_avail_in') == 0): + break + if action == LZMA_FINISH and lzret == LZMA_STREAM_END: + break + elif rffi.getintfield(self.lzs, 'c_avail_out') == 0: + out.prepare_next_chunk() + + return out.make_result_string() + + +W_LZMACompressor.typedef = TypeDef("LZMACompressor", + __new__ = interp2app(W_LZMACompressor.descr_new_comp), + compress = interp2app(W_LZMACompressor.compress_w), + flush = interp2app(W_LZMACompressor.flush_w), +) + + +class W_LZMADecompressor(W_Root): + def __init__(self, space, format): + self.format = format + self.lock = Lock(space) + self.eof = False + self.lzs = lltype.malloc(lzma_stream.TO, flavor='raw', zero=True) + self.check = LZMA_CHECK_UNKNOWN + self.unused_data = '' + + def __del__(self): + lzma_end(self.lzs) + lltype.free(self.lzs, flavor='raw') + + @staticmethod + @unwrap_spec(format=int, + w_memlimit=WrappedDefault(None), + w_filters=WrappedDefault(None)) + def descr_new_dec(space, w_subtype, format=FORMAT_AUTO, + w_memlimit=None, w_filters=None): + w_self = space.allocate_instance(W_LZMADecompressor, w_subtype) + self = space.interp_w(W_LZMADecompressor, w_self) + W_LZMADecompressor.__init__(self, space, format) + + if space.is_none(w_memlimit): + memlimit = r_ulonglong(LONGLONG_MASK) + else: + memlimit = space.r_ulonglong_w(w_memlimit) + + decoder_flags = LZMA_TELL_ANY_CHECK | LZMA_TELL_NO_CHECK + + if format == FORMAT_AUTO: + lzret = lzma_auto_decoder(self.lzs, memlimit, decoder_flags) + _catch_lzma_error(space, lzret) + else: + raise NotImplementedError + + return w_self + + @unwrap_spec(data='bufferstr') + def decompress_w(self, space, data): + with self.lock: + if self.eof: + raise oefmt(space.w_EOFError, "Already at end of stream") + result = self._decompress(space, data) + return space.wrapbytes(result) + + def _decompress(self, space, data): + datasize = len(data) + + with lltype.scoped_alloc(rffi.CCHARP.TO, datasize) as in_buf: + for i in range(datasize): + in_buf[i] = data[i] + + with OutBuffer(self.lzs) as out: + self.lzs.c_next_in = in_buf + rffi.setintfield(self.lzs, 'c_avail_in', datasize) + + while True: + lzret = lzma_code(self.lzs, LZMA_RUN) + _catch_lzma_error(space, lzret) + if lzret == LZMA_GET_CHECK or lzret == LZMA_NO_CHECK: + self.check = lzma_get_check(self.lzs) + if lzret == LZMA_STREAM_END: + self.eof = True + if rffi.getintfield(self.lzs, 'c_avail_in') > 0: + unused = [self.lzs.c_next_in[i] + for i in range( + rffi.getintfield(self.lzs, + 'c_avail_in'))] + self.unused_data = "".join(unused) + break + if rffi.getintfield(self.lzs, 'c_avail_in') == 0: + break + elif rffi.getintfield(self.lzs, 'c_avail_out') == 0: + out.prepare_next_chunk() + + return out.make_result_string() + + +W_LZMADecompressor.typedef = TypeDef("LZMADecompressor", + __new__ = interp2app(W_LZMADecompressor.descr_new_dec), + decompress = interp2app(W_LZMADecompressor.decompress_w), + eof = interp_attrproperty("eof", W_LZMADecompressor), + unused_data = interp_attrproperty_bytes("unused_data", W_LZMADecompressor), +) + + +def encode_filter_properties(space, w_filter): + """Return a bytes object encoding the options (properties) of the filter + specified by *filter* (a dict). + + The result does not include the filter ID itself, only the options. + """ + +def decode_filter_properties(space, w_filter_id, w_encoded_props): + """Return a dict describing a filter with ID *filter_id*, and options + (properties) decoded from the bytes object *encoded_props*. + """ + diff --git a/pypy/module/_lzma/test/test_lzma.py b/pypy/module/_lzma/test/test_lzma.py new file mode 100644 --- /dev/null +++ b/pypy/module/_lzma/test/test_lzma.py @@ -0,0 +1,17 @@ +class AppTestBZ2File: + spaceconfig = { + "usemodules": ["_lzma"] + } + + def test_module(self): + import lzma + + def test_simple_compress(self): + import lzma + compressed = lzma.compress(b'Insert Data Here', format=lzma.FORMAT_ALONE) + assert compressed == (b']\x00\x00\x80\x00\xff\xff\xff\xff\xff' + b'\xff\xff\xff\x00$\x9b\x8afg\x91' + b'(\xcb\xde\xfa\x03\r\x1eQT\xbe' + b't\x9e\xdfI]\xff\xf4\x9d\x80\x00') + decompressed = lzma.decompress(compressed) + assert decompressed == b'Insert Data Here' diff --git a/pypy/module/_lzma/test/test_ztranslation.py b/pypy/module/_lzma/test/test_ztranslation.py new file mode 100644 --- /dev/null +++ b/pypy/module/_lzma/test/test_ztranslation.py @@ -0,0 +1,4 @@ +from pypy.objspace.fake.checkmodule import checkmodule + +def test_lzma_translates(): + checkmodule('_lzma') diff --git a/pypy/module/_posixsubprocess/_posixsubprocess.c b/pypy/module/_posixsubprocess/_posixsubprocess.c --- a/pypy/module/_posixsubprocess/_posixsubprocess.c +++ b/pypy/module/_posixsubprocess/_posixsubprocess.c @@ -67,7 +67,7 @@ * that properly supports /dev/fd. */ static int -_is_fdescfs_mounted_on_dev_fd() +_is_fdescfs_mounted_on_dev_fd(void) { struct stat dev_stat; struct stat dev_fd_stat; @@ -142,17 +142,11 @@ * This structure is very old and stable: It will not change unless the kernel * chooses to break compatibility with all existing binaries. Highly Unlikely. */ -struct linux_dirent { -#if defined(__x86_64__) && defined(__ILP32__) - /* Support the wacky x32 ABI (fake 32-bit userspace speaking to x86_64 - * kernel interfaces) - https://sites.google.com/site/x32abi/ */ +struct linux_dirent64 { unsigned long long d_ino; - unsigned long long d_off; -#else - unsigned long d_ino; /* Inode number */ - unsigned long d_off; /* Offset to next linux_dirent */ -#endif + long long d_off; unsigned short d_reclen; /* Length of this linux_dirent */ + unsigned char d_type; char d_name[256]; /* Filename (null-terminated) */ }; @@ -196,16 +190,16 @@ num_fds_to_keep); return; } else { - char buffer[sizeof(struct linux_dirent)]; + char buffer[sizeof(struct linux_dirent64)]; int bytes; - while ((bytes = syscall(SYS_getdents, fd_dir_fd, - (struct linux_dirent *)buffer, + while ((bytes = syscall(SYS_getdents64, fd_dir_fd, + (struct linux_dirent64 *)buffer, sizeof(buffer))) > 0) { - struct linux_dirent *entry; + struct linux_dirent64 *entry; int offset; for (offset = 0; offset < bytes; offset += entry->d_reclen) { int fd; - entry = (struct linux_dirent *)(buffer + offset); + entry = (struct linux_dirent64 *)(buffer + offset); if ((fd = _pos_int_from_ascii(entry->d_name)) < 0) continue; /* Not a number. */ if (fd != fd_dir_fd && fd >= start_fd && fd < end_fd && @@ -299,6 +293,7 @@ #endif /* else NOT (defined(__linux__) && defined(HAVE_SYS_SYSCALL_H)) */ + /* * This function is code executed in the child process immediately after fork * to set things up and call exec(). @@ -389,17 +384,6 @@ POSIX_CALL(close(errwrite)); } - if (close_fds) { - int local_max_fd = max_fd; -#if defined(__NetBSD__) - local_max_fd = fcntl(0, F_MAXFD); - if (local_max_fd < 0) - local_max_fd = max_fd; -#endif - /* TODO HP-UX could use pstat_getproc() if anyone cares about it. */ - _close_open_fd_range(3, local_max_fd, py_fds_to_keep, num_fds_to_keep); - } - if (cwd) POSIX_CALL(chdir(cwd)); @@ -428,6 +412,18 @@ } } + /* close FDs after executing preexec_fn, which might open FDs */ + if (close_fds) { + int local_max_fd = max_fd; +#if defined(__NetBSD__) + local_max_fd = fcntl(0, F_MAXFD); + if (local_max_fd < 0) + local_max_fd = max_fd; +#endif + /* TODO HP-UX could use pstat_getproc() if anyone cares about it. */ + _close_open_fd_range(3, local_max_fd, py_fds_to_keep, num_fds_to_keep); + } + /* This loop matches the Lib/os.py _execvpe()'s PATH search when */ /* given the executable_list generated by Lib/subprocess.py. */ saved_errno = 0; @@ -478,20 +474,18 @@ int pypy_subprocess_cloexec_pipe(int *fds) { - int res; + int res, saved_errno; + long oldflags; #ifdef HAVE_PIPE2 Py_BEGIN_ALLOW_THREADS res = pipe2(fds, O_CLOEXEC); Py_END_ALLOW_THREADS if (res != 0 && errno == ENOSYS) { - { #endif /* We hold the GIL which offers some protection from other code calling * fork() before the CLOEXEC flags have been set but we can't guarantee * anything without pipe2(). */ - long oldflags; - res = pipe(fds); if (res == 0) { @@ -508,9 +502,47 @@ if (res == 0) res = fcntl(fds[1], F_SETFD, oldflags | FD_CLOEXEC); #ifdef HAVE_PIPE2 - } } #endif + if (res == 0 && fds[1] < 3) { + /* We always want the write end of the pipe to avoid fds 0, 1 and 2 + * as our child may claim those for stdio connections. */ + int write_fd = fds[1]; + int fds_to_close[3] = {-1, -1, -1}; + int fds_to_close_idx = 0; +#ifdef F_DUPFD_CLOEXEC + fds_to_close[fds_to_close_idx++] = write_fd; + write_fd = fcntl(write_fd, F_DUPFD_CLOEXEC, 3); + if (write_fd < 0) /* We don't support F_DUPFD_CLOEXEC / other error */ +#endif + { + /* Use dup a few times until we get a desirable fd. */ + for (; fds_to_close_idx < 3; ++fds_to_close_idx) { + fds_to_close[fds_to_close_idx] = write_fd; + write_fd = dup(write_fd); + if (write_fd >= 3) + break; + /* We may dup a few extra times if it returns an error but + * that is okay. Repeat calls should return the same error. */ + } + if (write_fd < 0) res = write_fd; + if (res == 0) { + oldflags = fcntl(write_fd, F_GETFD, 0); + if (oldflags < 0) res = oldflags; + if (res == 0) + res = fcntl(write_fd, F_SETFD, oldflags | FD_CLOEXEC); + } + } + saved_errno = errno; + /* Close fds we tried for the write end that were too low. */ + for (fds_to_close_idx=0; fds_to_close_idx < 3; ++fds_to_close_idx) { + int temp_fd = fds_to_close[fds_to_close_idx]; + while (temp_fd >= 0 && close(temp_fd) < 0 && errno == EINTR); + } + errno = saved_errno; /* report dup or fcntl errors, not close. */ + fds[1] = write_fd; + } /* end if write fd was too small */ + if (res != 0) return res; return 0; diff --git a/pypy/module/_posixsubprocess/test/test_subprocess.py b/pypy/module/_posixsubprocess/test/test_subprocess.py --- a/pypy/module/_posixsubprocess/test/test_subprocess.py +++ b/pypy/module/_posixsubprocess/test/test_subprocess.py @@ -1,7 +1,8 @@ from os.path import dirname class AppTestSubprocess: - spaceconfig = dict(usemodules=('_posixsubprocess', 'signal', 'fcntl', 'select')) + spaceconfig = dict(usemodules=('_posixsubprocess', 'signal', + 'fcntl', 'select', 'rctime')) # XXX write more tests def setup_class(cls): @@ -17,6 +18,7 @@ os.close(fd2) def test_close_fds_true(self): + import traceback # Work around a recursion limit import subprocess import os.path import os @@ -43,6 +45,7 @@ # For code coverage of calling setsid(). We don't care if we get an # EPERM error from it depending on the test execution environment, that # still indicates that it was called. + import traceback # Work around a recursion limit import subprocess import os try: diff --git a/pypy/module/bz2/__init__.py b/pypy/module/bz2/__init__.py --- a/pypy/module/bz2/__init__.py +++ b/pypy/module/bz2/__init__.py @@ -1,19 +1,14 @@ -# REVIEWME from pypy.interpreter.mixedmodule import MixedModule class Module(MixedModule): - """The python bz2 module provides a comprehensive interface for -the bz2 compression library. It implements a complete file -interface, one shot (de)compression functions, and types for -sequential (de)compression.""" + # The private part of the bz2 module. + + applevel_name = '_bz2' interpleveldefs = { 'BZ2Compressor': 'interp_bz2.W_BZ2Compressor', 'BZ2Decompressor': 'interp_bz2.W_BZ2Decompressor', - 'compress': 'interp_bz2.compress', - 'decompress': 'interp_bz2.decompress', } appleveldefs = { - 'BZ2File': 'app_bz2file.BZ2File', } diff --git a/pypy/module/bz2/app_bz2file.py b/pypy/module/bz2/app_bz2file.py deleted file mode 100644 --- a/pypy/module/bz2/app_bz2file.py +++ /dev/null @@ -1,370 +0,0 @@ -"""Interface to the libbzip2 compression library. - -This file is an almost exact copy of CPython3.3 Lib/bz2.py. -""" - -import io - -from bz2 import BZ2Compressor, BZ2Decompressor - - -_MODE_CLOSED = 0 -_MODE_READ = 1 -_MODE_READ_EOF = 2 -_MODE_WRITE = 3 - -_BUFFER_SIZE = 8192 - - -class BZ2File(io.BufferedIOBase): - - """A file object providing transparent bzip2 (de)compression. - - A BZ2File can act as a wrapper for an existing file object, or refer - directly to a named file on disk. - - Note that BZ2File provides a *binary* file interface - data read is - returned as bytes, and data to be written should be given as bytes. - """ - - def __init__(self, filename=None, mode="r", buffering=None, - compresslevel=9, fileobj=None): - """Open a bzip2-compressed file. - - If filename is given, open the named file. Otherwise, operate on - the file object given by fileobj. Exactly one of these two - parameters should be provided. - - mode can be 'r' for reading (default), or 'w' for writing. - - buffering is ignored. Its use is deprecated. - - If mode is 'w', compresslevel can be a number between 1 and 9 - specifying the level of compression: 1 produces the least - compression, and 9 (default) produces the most compression. - """ - # This lock must be recursive, so that BufferedIOBase's - # readline(), readlines() and writelines() don't deadlock. - import threading - self._lock = threading.RLock() - self._fp = None - self._closefp = False - self._mode = _MODE_CLOSED - self._pos = 0 - self._size = -1 - - if not (1 <= compresslevel <= 9): - raise ValueError("compresslevel must be between 1 and 9") - - if mode in ("", "r", "rb"): - mode = "rb" - mode_code = _MODE_READ - self._decompressor = BZ2Decompressor() - self._buffer = None - elif mode in ("w", "wb"): - mode = "wb" - mode_code = _MODE_WRITE - self._compressor = BZ2Compressor(compresslevel) - elif mode in ("a", "ab"): - mode = "ab" - mode_code = _MODE_WRITE - self._compressor = BZ2Compressor(compresslevel) - else: - raise ValueError("Invalid mode: {!r}".format(mode)) - - if filename is not None and fileobj is None: - self._fp = open(filename, mode) - self._closefp = True - self._mode = mode_code - elif fileobj is not None and filename is None: - self._fp = fileobj - self._mode = mode_code - else: - raise ValueError("Must give exactly one of filename and fileobj") - - def close(self): - """Flush and close the file. - - May be called more than once without error. Once the file is - closed, any other operation on it will raise a ValueError. - """ - with self._lock: - if self._mode == _MODE_CLOSED: - return - try: - if self._mode in (_MODE_READ, _MODE_READ_EOF): - self._decompressor = None - elif self._mode == _MODE_WRITE: - self._fp.write(self._compressor.flush()) - self._compressor = None - finally: - try: - if self._closefp: - self._fp.close() - finally: - self._fp = None - self._closefp = False - self._mode = _MODE_CLOSED - self._buffer = None - - @property - def closed(self): - """True if this file is closed.""" - return self._mode == _MODE_CLOSED - - def fileno(self): - """Return the file descriptor for the underlying file.""" - self._check_not_closed() - return self._fp.fileno() - - def seekable(self): - """Return whether the file supports seeking.""" - return self.readable() - - def readable(self): - """Return whether the file was opened for reading.""" - self._check_not_closed() - return self._mode in (_MODE_READ, _MODE_READ_EOF) - - def writable(self): - """Return whether the file was opened for writing.""" - self._check_not_closed() - return self._mode == _MODE_WRITE - - # Mode-checking helper functions. - - def _check_not_closed(self): - if self.closed: - raise ValueError("I/O operation on closed file") - - def _check_can_read(self): - if not self.readable(): - raise io.UnsupportedOperation("File not open for reading") - - def _check_can_write(self): - if not self.writable(): - raise io.UnsupportedOperation("File not open for writing") - - def _check_can_seek(self): - if not self.seekable(): - raise io.UnsupportedOperation("Seeking is only supported " - "on files open for reading") - - # Fill the readahead buffer if it is empty. Returns False on EOF. - def _fill_buffer(self): - if self._buffer: - return True - - if self._decompressor.unused_data: - rawblock = self._decompressor.unused_data - else: - rawblock = self._fp.read(_BUFFER_SIZE) - - if not rawblock: - if self._decompressor.eof: - self._mode = _MODE_READ_EOF - self._size = self._pos - return False - else: - raise EOFError("Compressed file ended before the " - "end-of-stream marker was reached") - - # Continue to next stream. - if self._decompressor.eof: - self._decompressor = BZ2Decompressor() - - self._buffer = self._decompressor.decompress(rawblock) - return True - - # Read data until EOF. - # If return_data is false, consume the data without returning it. - def _read_all(self, return_data=True): - blocks = [] - while self._fill_buffer(): - if return_data: - blocks.append(self._buffer) - self._pos += len(self._buffer) - self._buffer = None - if return_data: - return b"".join(blocks) - - # Read a block of up to n bytes. - # If return_data is false, consume the data without returning it. - def _read_block(self, n, return_data=True): - blocks = [] - while n > 0 and self._fill_buffer(): - if n < len(self._buffer): - data = self._buffer[:n] - self._buffer = self._buffer[n:] - else: - data = self._buffer - self._buffer = None - if return_data: - blocks.append(data) - self._pos += len(data) - n -= len(data) - if return_data: - return b"".join(blocks) - - def peek(self, n=0): - """Return buffered data without advancing the file position. - - Always returns at least one byte of data, unless at EOF. - The exact number of bytes returned is unspecified. - """ - with self._lock: - self._check_can_read() - if self._mode == _MODE_READ_EOF or not self._fill_buffer(): - return b"" - return self._buffer - - def read(self, size=-1): - """Read up to size uncompressed bytes from the file. - - If size is negative or omitted, read until EOF is reached. - Returns b'' if the file is already at EOF. - """ - with self._lock: - self._check_can_read() - if self._mode == _MODE_READ_EOF or size == 0: - return b"" - elif size < 0: - return self._read_all() - else: - return self._read_block(size) - - def read1(self, size=-1): - """Read up to size uncompressed bytes with at most one read - from the underlying stream. - - Returns b'' if the file is at EOF. - """ - with self._lock: - self._check_can_read() - if (size == 0 or self._mode == _MODE_READ_EOF or - not self._fill_buffer()): - return b"" - if 0 < size < len(self._buffer): - data = self._buffer[:size] - self._buffer = self._buffer[size:] - else: - data = self._buffer - self._buffer = None - self._pos += len(data) - return data - - def readinto(self, b): - """Read up to len(b) bytes into b. - - Returns the number of bytes read (0 for EOF). - """ - with self._lock: - return io.BufferedIOBase.readinto(self, b) - - def readline(self, size=-1): - """Read a line of uncompressed bytes from the file. - - The terminating newline (if present) is retained. If size is - non-negative, no more than size bytes will be read (in which - case the line may be incomplete). Returns b'' if already at EOF. - """ - if not hasattr(size, "__index__"): - raise TypeError("Integer argument expected") - size = size.__index__() - with self._lock: - return io.BufferedIOBase.readline(self, size) - - def readlines(self, size=-1): - """Read a list of lines of uncompressed bytes from the file. - - size can be specified to control the number of lines read: no - further lines will be read once the total size of the lines read - so far equals or exceeds size. - """ - if not hasattr(size, "__index__"): - raise TypeError("Integer argument expected") - size = size.__index__() - with self._lock: - return io.BufferedIOBase.readlines(self, size) - - def write(self, data): - """Write a byte string to the file. - - Returns the number of uncompressed bytes written, which is - always len(data). Note that due to buffering, the file on disk - may not reflect the data written until close() is called. - """ - with self._lock: - self._check_can_write() - compressed = self._compressor.compress(data) - self._fp.write(compressed) - self._pos += len(data) - return len(data) - - def writelines(self, seq): - """Write a sequence of byte strings to the file. - - Returns the number of uncompressed bytes written. - seq can be any iterable yielding byte strings. - - Line separators are not added between the written byte strings. - """ - with self._lock: - return io.BufferedIOBase.writelines(self, seq) - - # Rewind the file to the beginning of the data stream. - def _rewind(self): - self._fp.seek(0, 0) - self._mode = _MODE_READ - self._pos = 0 - self._decompressor = BZ2Decompressor() - self._buffer = None - - def seek(self, offset, whence=0): - """Change the file position. - - The new position is specified by offset, relative to the - position indicated by whence. Values for whence are: - - 0: start of stream (default); offset must not be negative - 1: current stream position - 2: end of stream; offset must not be positive - - Returns the new file position. - - Note that seeking is emulated, so depending on the parameters, - this operation may be extremely slow. - """ - with self._lock: - self._check_can_seek() - - # Recalculate offset as an absolute file position. - if whence == 0: - pass - elif whence == 1: - offset = self._pos + offset - elif whence == 2: - # Seeking relative to EOF - we need to know the file's size. - if self._size < 0: - self._read_all(return_data=False) - offset = self._size + offset - else: - raise ValueError("Invalid value for whence: {}".format(whence)) - - # Make it so that offset is the number of bytes to skip forward. - if offset < self._pos: - self._rewind() - else: - offset -= self._pos - - # Read and discard data until we reach the desired position. - if self._mode != _MODE_READ_EOF: - self._read_block(offset, return_data=False) - - return self._pos - - def tell(self): - """Return the current file position.""" - with self._lock: - self._check_not_closed() - return self._pos diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -330,7 +330,7 @@ res = out.make_result_string() return self.space.wrapbytes(res) -W_BZ2Compressor.typedef = TypeDef("BZ2Compressor", +W_BZ2Compressor.typedef = TypeDef("_bz2.BZ2Compressor", __doc__ = W_BZ2Compressor.__doc__, __new__ = interp2app(descr_compressor__new__), compress = interp2app(W_BZ2Compressor.compress), @@ -426,98 +426,10 @@ return self.space.wrapbytes(res) -W_BZ2Decompressor.typedef = TypeDef("BZ2Decompressor", +W_BZ2Decompressor.typedef = TypeDef("_bz2.BZ2Decompressor", __doc__ = W_BZ2Decompressor.__doc__, __new__ = interp2app(descr_decompressor__new__), unused_data = interp_attrproperty_bytes("unused_data", W_BZ2Decompressor), eof = GetSetProperty(W_BZ2Decompressor.eof_w), decompress = interp2app(W_BZ2Decompressor.decompress), ) - - - at unwrap_spec(data='bufferstr', compresslevel=int) -def compress(space, data, compresslevel=9): - """compress(data [, compresslevel=9]) -> string - - Compress data in one shot. If you want to compress data sequentially, - use an instance of BZ2Compressor instead. The compresslevel parameter, if - given, must be a number between 1 and 9.""" - - if compresslevel < 1 or compresslevel > 9: - raise OperationError(space.w_ValueError, - space.wrap("compresslevel must be between 1 and 9")) - - with lltype.scoped_alloc(bz_stream.TO, zero=True) as bzs: - in_bufsize = len(data) - - with lltype.scoped_alloc(rffi.CCHARP.TO, in_bufsize) as in_buf: - for i in range(in_bufsize): - in_buf[i] = data[i] - bzs.c_next_in = in_buf - rffi.setintfield(bzs, 'c_avail_in', in_bufsize) - - # conforming to bz2 manual, this is large enough to fit compressed - # data in one shot. We will check it later anyway. - with OutBuffer(bzs, - in_bufsize + (in_bufsize / 100 + 1) + 600) as out: - - bzerror = BZ2_bzCompressInit(bzs, compresslevel, 0, 0) - if bzerror != BZ_OK: - _catch_bz2_error(space, bzerror) - - while True: - bzerror = BZ2_bzCompress(bzs, BZ_FINISH) - if bzerror == BZ_STREAM_END: - break - elif bzerror != BZ_FINISH_OK: - BZ2_bzCompressEnd(bzs) - _catch_bz2_error(space, bzerror) - - if rffi.getintfield(bzs, 'c_avail_out') == 0: - out.prepare_next_chunk() - - res = out.make_result_string() - BZ2_bzCompressEnd(bzs) - return space.wrapbytes(res) - - at unwrap_spec(data='bufferstr') -def decompress(space, data): - """decompress(data) -> decompressed data - - Decompress data in one shot. If you want to decompress data sequentially, - use an instance of BZ2Decompressor instead.""" - - in_bufsize = len(data) - if in_bufsize == 0: - return space.wrapbytes("") - - with lltype.scoped_alloc(bz_stream.TO, zero=True) as bzs: - with lltype.scoped_alloc(rffi.CCHARP.TO, in_bufsize) as in_buf: - for i in range(in_bufsize): - in_buf[i] = data[i] - bzs.c_next_in = in_buf - rffi.setintfield(bzs, 'c_avail_in', in_bufsize) - - with OutBuffer(bzs) as out: - bzerror = BZ2_bzDecompressInit(bzs, 0, 0) - if bzerror != BZ_OK: - _catch_bz2_error(space, bzerror) - - while True: - bzerror = BZ2_bzDecompress(bzs) - if bzerror == BZ_STREAM_END: - break - if bzerror != BZ_OK: - BZ2_bzDecompressEnd(bzs) - _catch_bz2_error(space, bzerror) - - if rffi.getintfield(bzs, 'c_avail_in') == 0: - BZ2_bzDecompressEnd(bzs) - raise OperationError(space.w_ValueError, space.wrap( - "couldn't find end of stream")) - elif rffi.getintfield(bzs, 'c_avail_out') == 0: - out.prepare_next_chunk() - - res = out.make_result_string() - BZ2_bzDecompressEnd(bzs) - return space.wrapbytes(res) diff --git a/pypy/module/bz2/test/test_bz2_compdecomp.py b/pypy/module/bz2/test/test_bz2_compdecomp.py --- a/pypy/module/bz2/test/test_bz2_compdecomp.py +++ b/pypy/module/bz2/test/test_bz2_compdecomp.py @@ -41,7 +41,7 @@ interp_bz2.SMALLCHUNK = mod.OLD_SMALLCHUNK class AppTestBZ2Compressor(CheckAllocation): - spaceconfig = dict(usemodules=('bz2',)) + spaceconfig = dict(usemodules=('bz2', 'rctime')) def setup_class(cls): cls.w_TEXT = cls.space.wrapbytes(TEXT) @@ -54,6 +54,8 @@ cls.w_decompress = cls.space.wrap(gateway.interp2app(decompress_w)) cls.w_HUGE_OK = cls.space.wrap(HUGE_OK) + cls.space.appexec([], """(): import warnings""") # Work around a recursion limit + def test_creation(self): from bz2 import BZ2Compressor @@ -108,13 +110,15 @@ class AppTestBZ2Decompressor(CheckAllocation): - spaceconfig = dict(usemodules=('bz2',)) + spaceconfig = dict(usemodules=('bz2', 'rctime')) def setup_class(cls): cls.w_TEXT = cls.space.wrapbytes(TEXT) cls.w_DATA = cls.space.wrapbytes(DATA) cls.w_BUGGY_DATA = cls.space.wrapbytes(BUGGY_DATA) + cls.space.appexec([], """(): import warnings""") # Work around a recursion limit + def test_creation(self): from bz2 import BZ2Decompressor @@ -184,7 +188,7 @@ class AppTestBZ2ModuleFunctions(CheckAllocation): - spaceconfig = dict(usemodules=('bz2',)) + spaceconfig = dict(usemodules=('bz2', 'rctime')) def setup_class(cls): cls.w_TEXT = cls.space.wrapbytes(TEXT) diff --git a/pypy/module/bz2/test/test_bz2_file.py b/pypy/module/bz2/test/test_bz2_file.py --- a/pypy/module/bz2/test/test_bz2_file.py +++ b/pypy/module/bz2/test/test_bz2_file.py @@ -87,6 +87,8 @@ gateway.interp2app(create_broken_temp_file_w)) cls.w_random_data = cls.space.wrapbytes(RANDOM_DATA) + cls.space.appexec([], """(): import warnings""") # Work around a recursion limit + def test_attributes(self): from bz2 import BZ2File diff --git a/pypy/module/faulthandler/__init__.py b/pypy/module/faulthandler/__init__.py --- a/pypy/module/faulthandler/__init__.py +++ b/pypy/module/faulthandler/__init__.py @@ -6,5 +6,7 @@ interpleveldefs = { 'enable': 'interp_faulthandler.enable', + 'disable': 'interp_faulthandler.disable', + 'is_enabled': 'interp_faulthandler.is_enabled', 'register': 'interp_faulthandler.register', } diff --git a/pypy/module/faulthandler/interp_faulthandler.py b/pypy/module/faulthandler/interp_faulthandler.py --- a/pypy/module/faulthandler/interp_faulthandler.py +++ b/pypy/module/faulthandler/interp_faulthandler.py @@ -1,5 +1,15 @@ -def enable(space, __args__): - pass +class FatalErrorState(object): + def __init__(self, space): + self.enabled = False + +def enable(space): + space.fromcache(FatalErrorState).enabled = True + +def disable(space): + space.fromcache(FatalErrorState).enabled = False + +def is_enabled(space): + return space.wrap(space.fromcache(FatalErrorState).enabled) def register(space, __args__): pass diff --git a/pypy/module/faulthandler/test/test_faulthander.py b/pypy/module/faulthandler/test/test_faulthander.py new file mode 100644 --- /dev/null +++ b/pypy/module/faulthandler/test/test_faulthander.py @@ -0,0 +1,11 @@ +class AppTestFaultHandler: + spaceconfig = { + "usemodules": ["faulthandler"] + } + + def test_enable(self): + import faulthandler + faulthandler.enable() + assert faulthandler.is_enabled() is True + faulthandler.disable() + assert faulthandler.is_enabled() is False From noreply at buildbot.pypy.org Sat Aug 2 00:40:03 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 2 Aug 2014 00:40:03 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: use space.call_method over w_result.sort Message-ID: <20140801224003.21CC81C0250@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3 Changeset: r72644:2c73a529647b Date: 2014-07-31 18:35 +0200 http://bitbucket.org/pypy/pypy/changeset/2c73a529647b/ Log: use space.call_method over w_result.sort diff --git a/pypy/interpreter/module.py b/pypy/interpreter/module.py --- a/pypy/interpreter/module.py +++ b/pypy/interpreter/module.py @@ -129,7 +129,7 @@ w__dict__ = space.getattr(self, space.wrap('__dict__')) result = space.listview(w__dict__) w_result = space.wrap(result) - w_result.sort(False) + space.call_method(w_result, 'sort') return w_result except OperationError as e: if e.match(space, space.w_AttributeError): From noreply at buildbot.pypy.org Sat Aug 2 00:40:04 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 2 Aug 2014 00:40:04 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merging Message-ID: <20140801224004.51D821C0250@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3 Changeset: r72645:7a470162ff0e Date: 2014-07-31 19:13 +0200 http://bitbucket.org/pypy/pypy/changeset/7a470162ff0e/ Log: merging diff --git a/pypy/interpreter/astcompiler/validate.py b/pypy/interpreter/astcompiler/validate.py --- a/pypy/interpreter/astcompiler/validate.py +++ b/pypy/interpreter/astcompiler/validate.py @@ -11,7 +11,8 @@ class ValidationError(Exception): - """Signals an invalid AST""" + def __init__(self, message): + self.message = message def expr_context_name(ctx): From noreply at buildbot.pypy.org Sat Aug 2 00:40:05 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 2 Aug 2014 00:40:05 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: omit sorting the items in dir/type/module __dir__, like cpython does Message-ID: <20140801224005.96E0D1C0250@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3 Changeset: r72646:743d934f01f7 Date: 2014-08-01 21:16 +0200 http://bitbucket.org/pypy/pypy/changeset/743d934f01f7/ Log: omit sorting the items in dir/type/module __dir__, like cpython does diff --git a/pypy/interpreter/module.py b/pypy/interpreter/module.py --- a/pypy/interpreter/module.py +++ b/pypy/interpreter/module.py @@ -129,7 +129,6 @@ w__dict__ = space.getattr(self, space.wrap('__dict__')) result = space.listview(w__dict__) w_result = space.wrap(result) - space.call_method(w_result, 'sort') return w_result except OperationError as e: if e.match(space, space.w_AttributeError): diff --git a/pypy/interpreter/test/test_module.py b/pypy/interpreter/test/test_module.py --- a/pypy/interpreter/test/test_module.py +++ b/pypy/interpreter/test/test_module.py @@ -71,8 +71,7 @@ def test_dir(self): import sys items = sys.__dir__() - assert items == sorted(items) - assert items == dir(sys) + assert sorted(items) == dir(sys) def test_package(self): import sys diff --git a/pypy/objspace/std/objecttype.py b/pypy/objspace/std/objecttype.py --- a/pypy/objspace/std/objecttype.py +++ b/pypy/objspace/std/objecttype.py @@ -49,7 +49,6 @@ except AttributeError: pass result = list(Dict.keys()) - result.sort() return result """) return w_result diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -113,7 +113,7 @@ obj = A() obj_items = dir(obj) assert obj_items == sorted(obj_items) - assert obj_items == dir(obj) + assert obj_items == sorted(object.__dir__(obj)) def test_is_on_primitives(self): diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -728,7 +728,6 @@ pass C_items = dir(C) - assert C_items == sorted(C_items) assert C_items != C.__dir__(C) # as in cpython assert 'a_var' in C_items diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -750,7 +750,6 @@ return Dict result = list(_classdir(obj).keys()) - result.sort() return result """) return w_result From noreply at buildbot.pypy.org Sat Aug 2 00:40:06 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 2 Aug 2014 00:40:06 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merging Message-ID: <20140801224006.D63DD1C0250@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3 Changeset: r72647:80b1862342a0 Date: 2014-08-01 21:17 +0200 http://bitbucket.org/pypy/pypy/changeset/80b1862342a0/ Log: merging diff --git a/pypy/interpreter/astcompiler/validate.py b/pypy/interpreter/astcompiler/validate.py --- a/pypy/interpreter/astcompiler/validate.py +++ b/pypy/interpreter/astcompiler/validate.py @@ -11,7 +11,8 @@ class ValidationError(Exception): - """Signals an invalid AST""" + def __init__(self, message): + self.message = message def expr_context_name(ctx): From noreply at buildbot.pypy.org Sat Aug 2 00:40:08 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 2 Aug 2014 00:40:08 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Merged in numerodix/pypy/py3.3 (pull request #255) Message-ID: <20140801224008.1B94E1C0250@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72648:49884de0d7c3 Date: 2014-08-01 15:39 -0700 http://bitbucket.org/pypy/pypy/changeset/49884de0d7c3/ Log: Merged in numerodix/pypy/py3.3 (pull request #255) factor dir() built-in out into object, type and module methods diff --git a/pypy/interpreter/module.py b/pypy/interpreter/module.py --- a/pypy/interpreter/module.py +++ b/pypy/interpreter/module.py @@ -123,3 +123,14 @@ except OperationError: __file__ = u'?' return space.wrap(u"" % (name, __file__)) + + def descr_module__dir__(self, space): + try: + w__dict__ = space.getattr(self, space.wrap('__dict__')) + result = space.listview(w__dict__) + w_result = space.wrap(result) + return w_result + except OperationError as e: + if e.match(space, space.w_AttributeError): + return space.wrap([]) + raise diff --git a/pypy/interpreter/test/test_module.py b/pypy/interpreter/test/test_module.py --- a/pypy/interpreter/test/test_module.py +++ b/pypy/interpreter/test/test_module.py @@ -68,6 +68,11 @@ m = type(_pypy_interact).__new__(type(_pypy_interact)) assert repr(m).startswith(" Author: Philip Jenvey Branch: py3.3 Changeset: r72649:70a5990e4f23 Date: 2014-08-01 17:11 -0700 http://bitbucket.org/pypy/pypy/changeset/70a5990e4f23/ Log: fix translation (newlist vs wrap) and match cpython a bit more diff --git a/pypy/interpreter/module.py b/pypy/interpreter/module.py --- a/pypy/interpreter/module.py +++ b/pypy/interpreter/module.py @@ -3,7 +3,7 @@ """ from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from rpython.rlib.objectmodel import we_are_translated @@ -125,12 +125,8 @@ return space.wrap(u"" % (name, __file__)) def descr_module__dir__(self, space): - try: - w__dict__ = space.getattr(self, space.wrap('__dict__')) - result = space.listview(w__dict__) - w_result = space.wrap(result) - return w_result - except OperationError as e: - if e.match(space, space.w_AttributeError): - return space.wrap([]) - raise + w_dict = space.getattr(self, space.wrap('__dict__')) + if not space.isinstance_w(w_dict, space.w_dict): + raise oefmt(space.w_TypeError, "%N.__dict__ is not a dictionary", + self) + return space.newlist(space.listview(w_dict)) From noreply at buildbot.pypy.org Sat Aug 2 02:35:33 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 2 Aug 2014 02:35:33 +0200 (CEST) Subject: [pypy-commit] pypy py3k: n/a to py3 Message-ID: <20140802003533.13B391C08D5@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72650:11b895f1f0af Date: 2014-08-01 17:35 -0700 http://bitbucket.org/pypy/pypy/changeset/11b895f1f0af/ Log: n/a to py3 diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -394,13 +394,6 @@ # differs from .im_class in case the method is # defined in some parent class of l's actual class - def test_classmethod_im_class(self): - class Foo(object): - @classmethod - def bar(cls): - pass - assert Foo.bar.im_class is type - def test_func_closure(self): x = 2 def f(): From noreply at buildbot.pypy.org Sat Aug 2 03:41:45 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 2 Aug 2014 03:41:45 +0200 (CEST) Subject: [pypy-commit] pypy default: cleanup (forgot to commit this a while back) Message-ID: <20140802014146.017361C100F@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r72651:a6963d0635d1 Date: 2014-08-01 17:35 -0700 http://bitbucket.org/pypy/pypy/changeset/a6963d0635d1/ Log: cleanup (forgot to commit this a while back) diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -7,8 +7,8 @@ from __pypy__ import lookup_special -def _caller_locals(): - return sys._getframe(0).f_locals +def _caller_locals(): + return sys._getframe(0).f_locals def vars(*obj): """Return a dictionary of all the attributes currently bound in obj. If @@ -17,12 +17,11 @@ if len(obj) == 0: return _caller_locals() elif len(obj) != 1: - raise TypeError, "vars() takes at most 1 argument." - else: - try: - return obj[0].__dict__ - except AttributeError: - raise TypeError, "vars() argument must have __dict__ attribute" + raise TypeError("vars() takes at most 1 argument.") + try: + return obj[0].__dict__ + except AttributeError: + raise TypeError("vars() argument must have __dict__ attribute") def dir(*args): """dir([object]) -> list of strings @@ -38,8 +37,7 @@ attributes of its class's base classes. """ if len(args) > 1: - raise TypeError("dir expected at most 1 arguments, got %d" - % len(args)) + raise TypeError("dir expected at most 1 arguments, got %d" % len(args)) if len(args) == 0: local_names = _caller_locals().keys() # 2 stackframes away if not isinstance(local_names, list): @@ -48,92 +46,70 @@ return local_names import types - obj = args[0] - - dir_meth = None if isinstance(obj, types.InstanceType): - try: - dir_meth = getattr(obj, "__dir__") - except AttributeError: - pass + dir_meth = getattr(obj, '__dir__', None) else: - dir_meth = lookup_special(obj, "__dir__") + dir_meth = lookup_special(obj, '__dir__') if dir_meth is not None: - result = dir_meth() - if not isinstance(result, list): + names = dir_meth() + if not isinstance(names, list): raise TypeError("__dir__() must return a list, not %r" % ( - type(result),)) - result.sort() - return result + type(names),)) + names.sort() + return names elif isinstance(obj, types.ModuleType): try: - result = list(obj.__dict__) - result.sort() - return result + return sorted(obj.__dict__) except AttributeError: return [] - elif isinstance(obj, (types.TypeType, types.ClassType)): - #Don't look at __class__, as metaclass methods would be confusing. - result = _classdir(obj).keys() - result.sort() - return result - - else: #(regular item) - Dict = {} - try: - if isinstance(obj.__dict__, dict): - Dict.update(obj.__dict__) - except AttributeError: - pass - try: - Dict.update(_classdir(obj.__class__)) - except AttributeError: - pass + # Don't look at __class__, as metaclass methods would be confusing. + return sorted(_classdir(obj)) + else: + names = set() + ns = getattr(obj, '__dict__', None) + if isinstance(ns, dict): + names.update(ns) + klass = getattr(obj, '__class__', None) + if klass is not None: + names.update(_classdir(klass)) ## Comment from object.c: ## /* Merge in __members__ and __methods__ (if any). ## XXX Would like this to go away someday; for now, it's ## XXX needed to get at im_self etc of method objects. */ - for attr in ['__members__','__methods__']: - try: - l = getattr(obj, attr) - if not isinstance(l, list): - continue - for item in l: - if isinstance(item, types.StringTypes): - Dict[item] = None - except (AttributeError, TypeError): - pass + for attr in '__members__', '__methods__': + l = getattr(obj, attr, None) + if not isinstance(l, list): + continue + names.extend(item for item in l if isinstance(item, str)) - result = Dict.keys() - result.sort() - return result + return sorted(names) def _classdir(klass): - """Return a dict of the accessible attributes of class/type klass. + """Return a set of the accessible attributes of class/type klass. - This includes all attributes of klass and all of the - base classes recursively. - - The values of this dict have no meaning - only the keys have - meaning. + This includes all attributes of klass and all of the base classes + recursively. """ - Dict = {} + names = set() try: - Dict.update(klass.__dict__) - except AttributeError: pass + names.update(klass.__dict__) + except AttributeError: + pass try: - # XXX - Use of .__mro__ would be suggested, if the existance - # of that attribute could be guarranted. + # XXX - Use of .__mro__ would be suggested, if the existance of + # that attribute could be guarranted. bases = klass.__bases__ - except AttributeError: pass + except AttributeError: + pass else: try: - #Note that since we are only interested in the keys, - # the order we merge classes is unimportant + # Note that since we are only interested in the keys, the + # order we merge classes is unimportant for base in bases: - Dict.update(_classdir(base)) - except TypeError: pass - return Dict + names.update(_classdir(base)) + except TypeError: + pass + return names From noreply at buildbot.pypy.org Sat Aug 2 03:41:47 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 2 Aug 2014 03:41:47 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140802014147.46D251C100F@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72652:20b6589f4314 Date: 2014-08-01 18:05 -0700 http://bitbucket.org/pypy/pypy/changeset/20b6589f4314/ Log: merge default diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -7,8 +7,8 @@ from __pypy__ import lookup_special -def _caller_locals(): - return sys._getframe(0).f_locals +def _caller_locals(): + return sys._getframe(0).f_locals def vars(*obj): """Return a dictionary of all the attributes currently bound in obj. If @@ -18,11 +18,10 @@ return _caller_locals() elif len(obj) != 1: raise TypeError("vars() takes at most 1 argument.") - else: - try: - return obj[0].__dict__ - except AttributeError: - raise TypeError("vars() argument must have __dict__ attribute") + try: + return obj[0].__dict__ + except AttributeError: + raise TypeError("vars() argument must have __dict__ attribute") def dir(*args): """dir([object]) -> list of strings @@ -38,77 +37,74 @@ attributes of its class's base classes. """ if len(args) > 1: - raise TypeError("dir expected at most 1 arguments, got %d" - % len(args)) + raise TypeError("dir expected at most 1 arguments, got %d" % len(args)) if len(args) == 0: local_names = list(_caller_locals().keys()) # 2 stackframes away local_names.sort() return local_names import types - obj = args[0] - dir_meth = lookup_special(obj, "__dir__") if dir_meth is not None: - result = dir_meth() - if not isinstance(result, list): + names = dir_meth() + if not isinstance(names, list): raise TypeError("__dir__() must return a list, not %r" % ( - type(result),)) - result.sort() - return result + type(names),)) + names.sort() + return names elif isinstance(obj, types.ModuleType): try: - result = list(obj.__dict__) - result.sort() - return result + return sorted(obj.__dict__) except AttributeError: return [] + elif isinstance(obj, type): + # Don't look at __class__, as metaclass methods would be confusing. + return sorted(_classdir(obj)) + else: + names = set() + ns = getattr(obj, '__dict__', None) + if isinstance(ns, dict): + names.update(ns) + klass = getattr(obj, '__class__', None) + if klass is not None: + names.update(_classdir(klass)) - elif isinstance(obj, type): - #Don't look at __class__, as metaclass methods would be confusing. - result = list(_classdir(obj).keys()) - result.sort() - return result + ## Comment from object.c: + ## /* Merge in __members__ and __methods__ (if any). + ## XXX Would like this to go away someday; for now, it's + ## XXX needed to get at im_self etc of method objects. */ + for attr in '__members__', '__methods__': + l = getattr(obj, attr, None) + if not isinstance(l, list): + continue + names.extend(item for item in l if isinstance(item, str)) - else: #(regular item) - Dict = {} - try: - if isinstance(obj.__dict__, dict): - Dict.update(obj.__dict__) - except AttributeError: - pass - try: - Dict.update(_classdir(obj.__class__)) - except AttributeError: - pass - result = list(Dict.keys()) - result.sort() - return result + return sorted(names) def _classdir(klass): - """Return a dict of the accessible attributes of class/type klass. + """Return a set of the accessible attributes of class/type klass. - This includes all attributes of klass and all of the - base classes recursively. - - The values of this dict have no meaning - only the keys have - meaning. + This includes all attributes of klass and all of the base classes + recursively. """ - Dict = {} + names = set() try: - Dict.update(klass.__dict__) - except AttributeError: pass + names.update(klass.__dict__) + except AttributeError: + pass try: - # XXX - Use of .__mro__ would be suggested, if the existance - # of that attribute could be guarranted. + # XXX - Use of .__mro__ would be suggested, if the existance of + # that attribute could be guarranted. bases = klass.__bases__ - except AttributeError: pass + except AttributeError: + pass else: try: - #Note that since we are only interested in the keys, - # the order we merge classes is unimportant + # Note that since we are only interested in the keys, the + # order we merge classes is unimportant for base in bases: - Dict.update(_classdir(base)) - except TypeError: pass - return Dict + names.update(_classdir(base)) + except TypeError: + pass + return names From noreply at buildbot.pypy.org Sat Aug 2 03:41:48 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 2 Aug 2014 03:41:48 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge py3k Message-ID: <20140802014148.7B4051C100F@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72653:777f424bdd72 Date: 2014-08-01 18:11 -0700 http://bitbucket.org/pypy/pypy/changeset/777f424bdd72/ Log: merge py3k diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -394,13 +394,6 @@ # differs from .im_class in case the method is # defined in some parent class of l's actual class - def test_classmethod_im_class(self): - class Foo(object): - @classmethod - def bar(cls): - pass - assert Foo.bar.im_class is type - def test_func_closure(self): x = 2 def f(): diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -7,8 +7,8 @@ from __pypy__ import lookup_special -def _caller_locals(): - return sys._getframe(0).f_locals +def _caller_locals(): + return sys._getframe(0).f_locals def vars(*obj): """Return a dictionary of all the attributes currently bound in obj. If @@ -18,11 +18,10 @@ return _caller_locals() elif len(obj) != 1: raise TypeError("vars() takes at most 1 argument.") - else: - try: - return obj[0].__dict__ - except AttributeError: - raise TypeError("vars() argument must have __dict__ attribute") + try: + return obj[0].__dict__ + except AttributeError: + raise TypeError("vars() argument must have __dict__ attribute") def dir(*args): """dir([object]) -> list of strings @@ -38,15 +37,13 @@ attributes of its class's base classes. """ if len(args) > 1: - raise TypeError("dir expected at most 1 arguments, got %d" - % len(args)) + raise TypeError("dir expected at most 1 arguments, got %d" % len(args)) if len(args) == 0: local_names = list(_caller_locals().keys()) # 2 stackframes away local_names.sort() return local_names obj = args[0] - dir_meth = lookup_special(obj, "__dir__") if dir_meth is not None: result = dir_meth() diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -1,3 +1,5 @@ +from pypy.interpreter import gateway + def negate(f): """Create a function which calls `f` and negates its result. When the result is ``space.w_NotImplemented``, ``space.w_NotImplemented`` is From noreply at buildbot.pypy.org Sat Aug 2 11:15:22 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Sat, 2 Aug 2014 11:15:22 +0200 (CEST) Subject: [pypy-commit] pyrepl default: fix this bug, which has been around unnoticed forever Message-ID: <20140802091522.E751F1C0548@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r258:cef132a7e464 Date: 2014-08-02 10:15 +0100 http://bitbucket.org/pypy/pyrepl/changeset/cef132a7e464/ Log: fix this bug, which has been around unnoticed forever diff --git a/pyrepl/completing_reader.py b/pyrepl/completing_reader.py --- a/pyrepl/completing_reader.py +++ b/pyrepl/completing_reader.py @@ -40,9 +40,8 @@ STRIPCOLOR_REGEX = re.compile(r"\x1B\[([0-9]{1,3}(;[0-9]{1,2})?)?[m|K]") - def stripcolor(s): - return STRIPCOLOR_REGEX.regexp.sub('', s) + return STRIPCOLOR_REGEX.sub('', s) def real_len(s): From noreply at buildbot.pypy.org Sat Aug 2 11:50:16 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Sat, 2 Aug 2014 11:50:16 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: fix formatting and references Message-ID: <20140802095016.2A9631C038C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5384:08cb49bfcb5e Date: 2014-08-02 11:50 +0200 http://bitbucket.org/pypy/extradoc/changeset/08cb49bfcb5e/ Log: fix formatting and references diff --git a/talk/dls2014/paper/paper.pdf b/talk/dls2014/paper/paper.pdf index 7c5dd9b395b326795850964f603f80bf65a15bd3..1c5deed039b2d15eb371549260ac32539fc6eeb3 GIT binary patch [cut] diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -1098,6 +1098,7 @@ committed read anymore. +\vspace{1cm} %% formatting hack \section{Evaluation} @@ -1490,74 +1491,78 @@ \softraggedright \bibitem{cfbolz09} Carl Friedrich Bolz, Antonio Cuni, Maciej - Fijalkowski, and Armin Rigo. 2009. Tracing the meta-level: PyPy's - tracing JIT compiler. \emph{In Proceedings of the 4th workshop on the + Fijalkowski, and Armin Rigo. 2009. ``Tracing the meta-level: PyPy's + tracing JIT compiler.'' \emph{Proc. 4th workshop on the Implementation, Compilation, Optimization of Object-Oriented Languages - and Programming Systems} (ICOOOLPS '09). + and Programming Systems} (ICOOOLPS '09), 18-25 -\bibitem{kevin10} Kevin Millikin, Florian Schneider. 2010. A New - Crankshaft for V8. +\bibitem{kevin10} Kevin Millikin, Florian Schneider. 2010. ``A New + Crankshaft for V8.'' \url{http://blog.chromium.org/2010/12/new-crankshaft-for-v8.html} \bibitem{ionmonkey} IonMonkey from Mozilla. 2014. \url{https://wiki.mozilla.org/IonMonkey/Overview} -\bibitem{wayforward14} Remigius Meier, Armin Rigo. 2014. A Way Forward - in Parallelising Dynamic Languages. To appear in ICOOOLPS'14. +\bibitem{wayforward14} Remigius Meier, Armin Rigo. 2014. ``A Way Forward + in Parallelising Dynamic Languages.'' In \emph{Proc. 9th International + Workshop on Implementation, Compilation, Optimization of Object-Oriented + Languages, Programs and Systems PLE} (ICOOOLPS '14) \bibitem{cpython} CPython. \url{www.python.org} \bibitem{webjython} The Jython Project, \url{www.jython.org} \bibitem{ironpython} IronPython. \url{www.ironpython.net} \bibitem{pypy} PyPy Project. \url{www.pypy.org} -\bibitem{beazley10} Beazley, David. "Understanding the Python GIL." +\bibitem{beazley10} David Beazley. ``Understanding the Python GIL.'' \emph{PyCON Python Conference}. Atlanta, Georgia. 2010. -\bibitem{harris10} Harris, Tim, James Larus, and Ravi - Rajwar. "Transactional memory." \emph{Synthesis Lectures on Computer - Architecture 5.1} (2010) +\bibitem{harris10} Tim Harris, James Larus, and Ravi + Rajwar. 2010. ``Transactional memory.'' \emph{Synthesis Lectures on Computer + Architecture 5.1} \bibitem{guerraoui08} - Rachid Guerraoui and Michal Kapalka. 2008. On the correctness of - transactional memory. In \emph{Proceedings of the 13th ACM SIGPLAN - Symposium on Principles and practice of parallel programming} (PPoPP - '08). + Rachid Guerraoui and Michal Kapalka. 2008. ``On the correctness of + transactional memory.'' In \emph{Proc. 13th ACM SIGPLAN Symposium on + Principles and practice of parallel programming} (PPoPP '08), + 175-184 -\bibitem{blundell06} Blundell, Colin, E. Christopher Lewis, and Milo - Martin. "Unrestricted transactional memory: Supporting I/O and system - calls within transactions." (2006). +\bibitem{blundell06} Colin Blundell, E. Christopher Lewis, and Milo + Martin. 2006. ``Unrestricted transactional memory: Supporting I/O and system + calls within transactions.'' \bibitem{spear08} Michael F. Spear and Michael Silverman and Luke - Daless and Maged M. Michael and Michael L. Scott. 2008. "Implementing - and exploiting inevitability in software transactional memory." In + Daless and Maged M. Michael and Michael L. Scott. 2008. ``Implementing + and exploiting inevitability in software transactional memory.'' In \emph{Proc. 37th IEEE international conference on parallel processing} (ICPP '08), 59-66 -\bibitem{fergus02} Fergus Henderson. 2002. Accurate garbage collection - in an uncooperative environment. \emph{In Proceedings of the 3rd - international symposium on Memory management} (ISMM '02). +\bibitem{fergus02} Fergus Henderson. 2002. ``Accurate garbage collection + in an uncooperative environment.'' In \emph{Proc. 3rd + International Symposium on Memory management} (ISMM '02), 150-156 -\bibitem{stmupdate13} Armin Rigo, Remigius Meier. 2013. Update on - STM. \url{morepypy.blogspot.ch/2013/10/update-on-stm.html} +\bibitem{stmupdate13} Armin Rigo, Remigius Meier. 2013. ``Update on + STM.'' \url{morepypy.blogspot.ch/2013/10/update-on-stm.html} \bibitem{rajwar05} Ravi Rajwar, Maurice Herlihy, and Konrad - Lai. 2005. Virtualizing Transactional Memory. In \emph{Proceedings of - the 32nd annual international symposium on Computer Architecture} - (ISCA '05). + Lai. 2005. ``Virtualizing Transactional Memory.'' In \emph{Proc. + 32nd annual International Symposium on Computer Architecture} + (ISCA '05), 494-505 \bibitem{chung06} JaeWoong Chung, Chi Cao Minh, Austen McDonald, Travis Skare, Hassan Chafi, Brian D. Carlstrom, Christos Kozyrakis, - and Kunle Olukotun. 2006. Tradeoffs in transactional memory - virtualization. \emph{SIGOPS Oper. Syst. Rev.} 40, 5 (October 2006), - 371-381. + and Kunle Olukotun. 2006. ``Tradeoffs in transactional memory + virtualization.'' In \emph{Proc. 12th international conference on + Architectural support for programming languages and operating + systems} (ASPLOS XII), 371-381 \bibitem{martin09} Martín Abadi, Tim Harris, and Mojtaba - Mehrara. 2009. Transactional memory with strong atomicity using - off-the-shelf memory protection hardware. \emph{SIGPLAN Not.} 44, 4 - (February 2009), 185-196. + Mehrara. 2009. ``Transactional memory with strong atomicity using + off-the-shelf memory protection hardware.'' In \emph{Proc. 14th ACM + SIGPLAN symposium on Principles and practice of parallel + programming} (PPoPP '09), 185-196 \bibitem{pypybenchs} PyPy benchmarks repository. 2014. Revision - a26f2fb58413. \url{bitbucket.org/pypy/benchmarks} + fd2da4da8f33. \url{bitbucket.org/pypy/benchmarks} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -1570,43 +1575,44 @@ \bibitem{odaira14} - Rei Odaira, Jose G. Castanos, and Hisanobu Tomari. 2014. Eliminating + Rei Odaira, Jose G. Castanos, and Hisanobu Tomari. 2014. ``Eliminating global interpreter locks in Ruby through hardware transactional - memory. In \emph{Proc. 19th ACM SIGPLAN Symposium on - Principles and Practice of Parallel Programming} (PPoPP '14). + memory.'' In \emph{Proc. 19th ACM SIGPLAN Symposium on + Principles and Practice of Parallel Programming} (PPoPP '14), 131-142 \bibitem{warmhoff13} - Jons-Tobias Wamhoff, Christof Fetzer, Pascal Felber, Etienne Rivière, - and Gilles Muller. 2013. FastLane: improving performance of software - transactional memory for low thread counts. \emph{SIGPLAN Not.} 48, 8 - (February 2013), 113-122. + Jons-Tobias Wamhoff, Christof Fetzer, Pascal Felber, Etienne + Rivière, and Gilles Muller. 2013. ``FastLane: improving performance + of software transactional memory for low thread counts.'' In + \emph{Proc. 18th ACM SIGPLAN symposium on Principles and practice of + parallel programming} (PPoPP '13), 113-122 \bibitem{drago11} Aleksandar Dragojević, Pascal Felber, Vincent - Gramoli, and Rachid Guerraoui. 2011. Why STM can be more than a - research toy. \emph{Commun. ACM} 54, 4 (April 2011), 70-77. + Gramoli, and Rachid Guerraoui. 2011. ``Why STM can be more than a + research toy.'' \emph{Commun. ACM} 54, 4 (April 2011), 70-77. \bibitem{cascaval08} Calin Cascaval, Colin Blundell, Maged Michael, Harold W. Cain, Peng - Wu, Stefanie Chiras, and Siddhartha Chatterjee. 2008. Software + Wu, Stefanie Chiras, and Siddhartha Chatterjee. 2008. ``Software transactional memory: why is it only a research - toy?. \emph{Commun. ACM} 51, 11 (November 2008), 40-46. + toy?.'' \emph{Commun. ACM} 51, 11 (November 2008), 40-46. \bibitem{nicholas06} - Nicholas Riley and Craig Zilles. 2006. Hardware transactional memory - support for lightweight dynamic language evolution. \emph{In + Nicholas Riley and Craig Zilles. 2006. ``Hardware transactional memory + support for lightweight dynamic language evolution.'' \emph{In Companion to the 21st ACM SIGPLAN symposium on Object-oriented programming systems, languages, and applications} (OOPSLA - '06). + '06), 998-1008 \bibitem{fuad10} - Fuad Tabba. 2010. Adding concurrency in Python using a commercial - processor's hardware transactional memory support. \emph{SIGARCH + Fuad Tabba. 2010. ``Adding concurrency in Python using a commercial + processor's hardware transactional memory support.'' \emph{SIGARCH Comput. Archit. News 38}, 5 (April 2010), 12-19. \bibitem{felber07} Pascal Felber and Torvald Riegel and Christof Fetzer and Martin - Süßkraut and Ulrich Müller and Heiko Sturzrehm. 2007. Transactifying - applications using an open compiler framework. \emph{TRANSACT}, August + Süßkraut and Ulrich Müller and Heiko Sturzrehm. 2007. ``Transactifying + applications using an open compiler framework.'' \emph{TRANSACT}, August (2007): 4-6. % \bibitem{bill06} @@ -1618,55 +1624,55 @@ \bibitem{spear09} Luke Dalessandro, Dave Dice, Michael Scott, Nir Shavit, and Michael - Spear. 2010. Transactional mutex locks. In \emph{Proceedings of the + Spear. 2010. ``Transactional mutex locks.'' In \emph{Proc. 16th international Euro-Par conference on Parallel processing: Part II} (Euro-Par'10), Pasqua D'Ambra, Mario Guarracino, and Domenico Talia (Eds.). Springer-Verlag, Berlin, Heidelberg, 2-13. \bibitem{lamport79} - L. Lamport. 1979. How to Make a Multiprocessor Computer That Correctly - Executes Multiprocess Programs. \emph{IEEE Trans. Comput.} 28, 9 + L. Lamport. 1979. ``How to Make a Multiprocessor Computer That Correctly + Executes Multiprocess Programs.'' \emph{IEEE Trans. Comput.} 28, 9 (September 1979), 690-691. \bibitem{victor11} - Victor Pankratius and Ali-Reza Adl-Tabatabai. 2011. A study of - transactional memory vs. locks in practice. In \emph{Proc. - twenty-third annual ACM Symposium on Parallelism in Algorithms - and Architectures} (SPAA '11). + Victor Pankratius and Ali-Reza Adl-Tabatabai. 2011. ``A study of + transactional memory vs. locks in practice.'' In \emph{Proc. + 23rd annual ACM Symposium on Parallelism in Algorithms + and Architectures} (SPAA '11), 43-52 \bibitem{christopher10} Christopher J. Rossbach, Owen S. Hofmann, and Emmett - Witchel. 2010. Is transactional programming actually - easier?. \emph{Proc. 15th ACM SIGPLAN Symposium on - Principles and Practice of Parallel Programming} (PPoPP '10). + Witchel. 2010. ``Is transactional programming actually + easier?.'' \emph{Proc. 15th ACM SIGPLAN Symposium on + Principles and Practice of Parallel Programming} (PPoPP '10), 47-56 \bibitem{tim03} - Tim Harris and Keir Fraser. 2003. Language support for lightweight - transactions. \emph{In Proceedings of the 18th annual ACM SIGPLAN + Tim Harris and Keir Fraser. 2003. ``Language support for lightweight + transactions.'' In \emph{Proc. 18th annual ACM SIGPLAN conference on Object-oriented programing, systems, languages, and applications} (OOPSLA '03), 388-402. \bibitem{tim05} Tim Harris, Simon Marlow, Simon Peyton Jones, and - Maurice Herlihy. 2008. Composable memory transactions. + Maurice Herlihy. 2008. ``Composable memory transactions.'' \emph{Commun. ACM} 51, 8 (August 2008), 91-100. \bibitem{shan08} - Shan Lu, Soyeon Park, Eunsoo Seo, and Yuanyuan Zhou. 2008. Learning + Shan Lu, Soyeon Park, Eunsoo Seo, and Yuanyuan Zhou. 2008. ``Learning from mistakes: a comprehensive study on real world concurrency bug - characteristics. \emph{SIGARCH Comput. Archit. News} 36, 1 (March 2008), + characteristics.'' \emph{SIGARCH Comput. Archit. News} 36, 1 (March 2008), 329-339. \bibitem{bennet10} Bennet Yee, David Sehr, Gregory Dardyk, J. Bradley Chen, Robert Muth, Tavis Ormandy, Shiki Okasaka, Neha Narula, and Nicholas - Fullagar. 2010. Native Client: a sandbox for portable, untrusted x86 - native code. \emph{Commun. ACM} 53, 1 (January 2010), 91-99. + Fullagar. 2010. ``Native Client: a sandbox for portable, untrusted x86 + native code.'' \emph{Commun. ACM} 53, 1 (January 2010), 91-99. % \bibitem{leis14} -% Leis, Viktor, Alfons Kemper, and Thomas Neumann. "Exploiting -% Hardware Transactional Memory in Main-Memory Databases." +% Leis, Viktor, Alfons Kemper, and Thomas Neumann. ``Exploiting +% Hardware Transactional Memory in Main-Memory Databases.'' % \emph{Proc. of ICDE}. 2014. % \bibitem{biased} From noreply at buildbot.pypy.org Sat Aug 2 15:00:21 2014 From: noreply at buildbot.pypy.org (codeZeilen) Date: Sat, 2 Aug 2014 15:00:21 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk default: README.md edited online with Bitbucket Message-ID: <20140802130021.52ECB1C0548@cobra.cs.uni-duesseldorf.de> Author: Patrick Rein Branch: Changeset: r1007:7a6927034c24 Date: 2014-08-02 13:00 +0000 http://bitbucket.org/pypy/lang-smalltalk/changeset/7a6927034c24/ Log: README.md edited online with Bitbucket diff --git a/README.md b/README.md --- a/README.md +++ b/README.md @@ -1,69 +1,71 @@ -Spy -========= - -A Squeak VM written in RPython, called "SPy VM". - -Setup ----- -### Required Projects -You need three repositories: -* This one -* pypy/pypy -* pypy/rsdl - -### Required packages -You need the following packages on your OS. Install with your favorite package -manager: -* pypy (For faster translation of the SPY VM) -* libsdl-dev - -### Adjusting the PYTHONPATH -In order to allow the RPython toolchain to find the rsdl module you have to add -the rsdl folder to the PYTHONPATH. Note that you have to add the rsdl subfolder -of the rsdl repository to the PYTHONPATH. - -``` -export PYTHONPATH=${PYTHONPATH}:[path to rsdl repository]/rsdl -``` - -### Setting the SDL Driver -For testing the basic functionality of the VM it is currently best to disable -the UI. You can do so by setting the SDL_VIDEODRIVER environment variable to -dummy. -``` -export SDL_VIDEODRIVER=dummy -``` - -### Building -To build the VM enter the following: - -``` -[path to pypy repository]/rpython/bin/rpython [path to lang-smalltalk -repository]/targetimageloadingsmalltalk.py -``` - -To build the VM with enabled just-in-time compiler: -``` -[path to pypy repository]/rpython/bin/rpython -O jit [path to lang-smalltalk -repository]/targetimageloadingsmalltalk.py -``` - -### Starting an image -The build process will produce an executable e.g. called -targetimageloadingsmalltalk-c. Start it with the following: -``` -./targetimageloadingsmalltalk-c images/Squeak4.5-*.image -``` - -Setup for stm-enabled SPY ---- -There are two branches integrating the RPython STM into SPY: stm-c4, -storage-stm-c4. You have to change two things of the setup to build those -branches. - -1. Change your local pypy repository to the stm-c4 branch. -2. Build using the following command: -``` -[path to pypy repository]/rpython/bin/rpython --gc=stmgc [path to lang-smalltalk -repository]/targetimageloadingsmalltalk.py -``` +Spy +========= + +A Squeak VM written in RPython, called "SPy VM". + +Setup +---- +### Required Projects +You need three repositories: + +* This one +* pypy/pypy +* pypy/rsdl + +### Required packages +You need the following packages on your OS. Install with your favorite package +manager: +* pypy (For faster translation of the SPY VM) +* libsdl-dev + +### Adjusting the PYTHONPATH +In order to allow the RPython toolchain to find the rsdl module you have to add +the rsdl folder to the PYTHONPATH. Note that you have to add the rsdl subfolder +of the rsdl repository to the PYTHONPATH. + +``` +export PYTHONPATH=${PYTHONPATH}:[path to rsdl repository]/rsdl +``` + +### Setting the SDL Driver +For testing the basic functionality of the VM it is currently best to disable +the UI. You can do so by setting the SDL_VIDEODRIVER environment variable to +dummy. +``` +export SDL_VIDEODRIVER=dummy +``` + +### Building +To build the VM enter the following: + +``` +[path to pypy repository]/rpython/bin/rpython [path to lang-smalltalk +repository]/targetimageloadingsmalltalk.py +``` + +To build the VM with enabled just-in-time compiler: +``` +[path to pypy repository]/rpython/bin/rpython -O jit [path to lang-smalltalk +repository]/targetimageloadingsmalltalk.py +``` + +### Starting an image +The build process will produce an executable e.g. called +targetimageloadingsmalltalk-c. Start it with the following: +``` +./targetimageloadingsmalltalk-c images/Squeak4.5-*.image +``` + +Setup for stm-enabled SPY +--- +You can see the current state of the integration of the RPython STM in our stmgc-c7 branch. +Beware that you can only build this branch if you have a 64-bit linux. To build this branch you have to setup several things: + +1. Change your local pypy repository to the stm-gc7 branch, commit dd3c06b +2. Get a clang which has the patches from ([Clang patches](https://bitbucket.org/pypy/stmgc/src/d164a5bcad5e7615b4362b6a1a49d51e2e06de0c/c7/llvmfix/?at=default)). If you have a Debian-based OS you can use the following package: https://launchpad.net/~malte.swart/+archive/ubuntu/llvm-pypystm + +To build, use the following command: +``` +[path to pypy repository]/rpython/bin/rpython --gc=stmgc [path to lang-smalltalk +repository]/targetimageloadingsmalltalk.py +``` \ No newline at end of file From noreply at buildbot.pypy.org Sat Aug 2 16:47:06 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Sat, 2 Aug 2014 16:47:06 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: some more adjustments Message-ID: <20140802144706.0C9201C0991@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: extradoc Changeset: r5385:f298bd328835 Date: 2014-08-02 16:47 +0200 http://bitbucket.org/pypy/extradoc/changeset/f298bd328835/ Log: some more adjustments diff --git a/talk/dls2014/paper/paper.pdf b/talk/dls2014/paper/paper.pdf index 1c5deed039b2d15eb371549260ac32539fc6eeb3..1b6090da505763b644a566a54df6a1b33bfeed25 GIT binary patch [cut] diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -267,8 +267,8 @@ \vspace{3mm} % XXX:HACK:TODO \item We integrate the system closely with a garbage collector (GC) to lower the overhead of STM. -\item This new STM system is used to replace the GIL in one - implementation of Python and is then evaluated. +\item This new STM system is used to replace the GIL in a Python + interpreter called PyPy and is then evaluated. \end{itemize} The STM described here is efficient and has low overhead and is @@ -599,11 +599,11 @@ \caption{Application programming interface\label{lst:api}} \end{code} -Our TM system is designed as a C library that covers all aspects around -transactions and object memory management. It is designed for -object-oriented dynamic language VMs as a replacement for the GIL. -The library exposes the functions in Listing~\ref{lst:api} as its -interface for use by a dynamic language interpreter. +Our TM system is designed as a C library~\cite{stmgc-c7} that covers +all aspects around transactions and object memory management. It is +designed for object-oriented dynamic language VMs as a replacement for +the GIL. The library exposes the functions in Listing~\ref{lst:api} +as its interface for use by a dynamic language interpreter. \lstinline!stm_start_transaction()! starts a transaction in the current thread. Internally, it uses \lstinline!setjmp()! to remember @@ -635,14 +635,13 @@ In the following sections, whenever we use SO, we go through the address translation to get to the actual contents of an object. This is also signified by the type \lstinline!object_t!. This type is -special as it causes the -compiler\footnote{We use Clang 3.5 with patches to deal with bugs in -its ``address-space 256'' feature. Patches are available from authors -until inclusion into the official clang.} to make all accesses through -it relative to the $\%gs$ register. With exceptions, nearly all -accesses to objects managed by the TM system use this type so -that the CPU will translate the reference to the right version of the -object. +special as it causes the compiler\footnote{We use Clang 3.5 with + patches to deal with bugs in its ``address-space 256'' + feature. Patches are available from authors until inclusion into the + official clang~\cite{stmgc-c7}.} to make all accesses through it +relative to the $\%gs$ register. With exceptions, nearly all accesses +to objects managed by the TM system use this type so that the CPU will +translate the reference to the right version of the object. \medskip % why, Latex, why?? @@ -1086,7 +1085,7 @@ \item prefer transactions that started earlier to younger transactions to increase the chance of long transactions succeeding \item to support \emph{inevitable} transactions, we always prefer them - to others since they cannot abort (similar to~\cite{blundell06}) + to others since they cannot abort~\cite{blundell06} \end{itemize} We can either simply abort a transaction to let the other one succeed, @@ -1336,6 +1335,12 @@ $1-2.5\times$, and we beat the GIL's single-thread performance in 5 out of 8 benchmarks. +Again we see that the GIL degrades performance on multiple threads. +On the other hand, STM gets faster with more threads, or at least +not much slower. Hence, any application using threads for concurrency +immediately benefits from running on STM. On 4 threads, its performance +is better than the GIL's in 7 out of 8 benchmarks by up to $4\times$. + We see that generally, the group of embarrassingly parallel benchmarks scales best. The other three benchmarks scale barely or not at all with the number of threads. The reason for this is likely again the @@ -1365,7 +1370,7 @@ \end{figure} - +\vspace{3mm} % formatting hack \section{Related Work} There have been several attempts at removing the GIL using TM. We @@ -1564,6 +1569,8 @@ \bibitem{pypybenchs} PyPy benchmarks repository. 2014. Revision fd2da4da8f33. \url{bitbucket.org/pypy/benchmarks} +\bibitem{stmgc-c7} STMGC-C7 library repository. 2014 + \url{bitbucket.org/pypy/stmgc} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% From noreply at buildbot.pypy.org Sat Aug 2 23:40:20 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 2 Aug 2014 23:40:20 +0200 (CEST) Subject: [pypy-commit] pypy py3.3-fixes: wrap LONGLONG_MASK (fixes #1836) Message-ID: <20140802214020.CF8791C0250@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3-fixes Changeset: r72654:744ada0a9c08 Date: 2014-08-02 23:36 +0200 http://bitbucket.org/pypy/pypy/changeset/744ada0a9c08/ Log: wrap LONGLONG_MASK (fixes #1836) diff --git a/pypy/module/_lzma/interp_lzma.py b/pypy/module/_lzma/interp_lzma.py --- a/pypy/module/_lzma/interp_lzma.py +++ b/pypy/module/_lzma/interp_lzma.py @@ -13,6 +13,7 @@ FORMAT_AUTO, FORMAT_XZ, FORMAT_ALONE, FORMAT_RAW = range(4) +R_LONGLONG_MASK = r_ulonglong(LONGLONG_MASK) eci = ExternalCompilationInfo( @@ -282,7 +283,7 @@ W_LZMADecompressor.__init__(self, space, format) if space.is_none(w_memlimit): - memlimit = r_ulonglong(LONGLONG_MASK) + memlimit = R_LONGLONG_MASK else: memlimit = space.r_ulonglong_w(w_memlimit) From noreply at buildbot.pypy.org Sat Aug 2 23:40:22 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 2 Aug 2014 23:40:22 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Merged in numerodix/pypy/py3.3-fixes (pull request #263) Message-ID: <20140802214022.1D9A61C0250@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: py3.3 Changeset: r72655:3d0db2b14b40 Date: 2014-08-02 14:39 -0700 http://bitbucket.org/pypy/pypy/changeset/3d0db2b14b40/ Log: Merged in numerodix/pypy/py3.3-fixes (pull request #263) wrap LONGLONG_MASK (fixes #1836) diff --git a/pypy/module/_lzma/interp_lzma.py b/pypy/module/_lzma/interp_lzma.py --- a/pypy/module/_lzma/interp_lzma.py +++ b/pypy/module/_lzma/interp_lzma.py @@ -13,6 +13,7 @@ FORMAT_AUTO, FORMAT_XZ, FORMAT_ALONE, FORMAT_RAW = range(4) +R_LONGLONG_MASK = r_ulonglong(LONGLONG_MASK) eci = ExternalCompilationInfo( @@ -282,7 +283,7 @@ W_LZMADecompressor.__init__(self, space, format) if space.is_none(w_memlimit): - memlimit = r_ulonglong(LONGLONG_MASK) + memlimit = R_LONGLONG_MASK else: memlimit = space.r_ulonglong_w(w_memlimit) From noreply at buildbot.pypy.org Sun Aug 3 01:04:58 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 3 Aug 2014 01:04:58 +0200 (CEST) Subject: [pypy-commit] pypy py3k: follow up to 1f716034739d: use a set (actually a dict) to avoid potential Message-ID: <20140802230458.B928D1C0250@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72656:ae5b2d7eb9e3 Date: 2014-08-02 16:03 -0700 http://bitbucket.org/pypy/pypy/changeset/ae5b2d7eb9e3/ Log: follow up to 1f716034739d: use a set (actually a dict) to avoid potential duplicates in free_vars and ensure has_free is now set diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -7,7 +7,7 @@ from pypy.tool import stdlib_opcode as ops from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import specialize, we_are_translated from rpython.rlib import rfloat @@ -141,11 +141,12 @@ i += 1 return result -def _list_to_dict(l, offset=0): + at specialize.argtype(0) +def _iter_to_dict(iterable, offset=0): result = {} index = offset - for i in range(len(l)): - result[l[i]] = index + for item in iterable: + result[item] = index index += 1 return result @@ -161,10 +162,10 @@ self.first_block = self.new_block() self.use_block(self.first_block) self.names = {} - self.var_names = _list_to_dict(scope.varnames) + self.var_names = _iter_to_dict(scope.varnames) self.cell_vars = _make_index_dict_filter(scope.symbols, symtable.SCOPE_CELL) - self.free_vars = _list_to_dict(scope.free_vars, len(self.cell_vars)) + self.free_vars = _iter_to_dict(scope.free_vars, len(self.cell_vars)) self.w_consts = space.newdict() self.argcount = 0 self.kwonlyargcount = 0 diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -37,7 +37,7 @@ self.roles = {} self.varnames = [] self.children = [] - self.free_vars = [] + self.free_vars = {} self.temp_name_counter = 1 self.has_free = False self.child_has_free = False @@ -136,8 +136,9 @@ err = "no binding for nonlocal '%s' found" % (name,) raise SyntaxError(err, self.lineno, self.col_offset) self.symbols[name] = SCOPE_FREE - self.free_vars.append(name) + self.free_vars[name] = None free[name] = None + self.has_free = True elif flags & SYM_BOUND: self.symbols[name] = SCOPE_LOCAL local[name] = None @@ -147,7 +148,7 @@ pass elif bound and name in bound: self.symbols[name] = SCOPE_FREE - self.free_vars.append(name) + self.free_vars[name] = None free[name] = None self.has_free = True elif name in globs: @@ -204,7 +205,7 @@ except KeyError: if name in bound: self.symbols[name] = SCOPE_FREE - self.free_vars.append(name) + self.free_vars[name] = None else: if role_here & (SYM_BOUND | SYM_GLOBAL) and \ self._hide_bound_from_nested_scopes: @@ -213,7 +214,7 @@ # scope. We add the name to the class scope's list of free # vars, so it will be passed through by the interpreter, but # we leave the scope alone, so it can be local on its own. - self.free_vars.append(name) + self.free_vars[name] = None self._check_optimization() free.update(new_free) diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -819,6 +819,17 @@ return test2()""" yield self.st, test, "test1(2)", 2 + def test_class_nonlocal_from_arg(self): + test = """if 1: + def f(x): + class c: + nonlocal x + x += 1 + def get(self): + return x + return c().get()""" + yield self.st, test, "f(3)", 4 + def test_lots_of_loops(self): source = "for x in y: pass\n" * 1000 compile_with_astcompiler(source, 'exec', self.space) From noreply at buildbot.pypy.org Sun Aug 3 03:46:40 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 3 Aug 2014 03:46:40 +0200 (CEST) Subject: [pypy-commit] pypy py3k: rekill this Message-ID: <20140803014640.6E1931C0250@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72657:10c00f1dd4c4 Date: 2014-08-02 18:30 -0700 http://bitbucket.org/pypy/pypy/changeset/10c00f1dd4c4/ Log: rekill this diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -69,17 +69,6 @@ klass = getattr(obj, '__class__', None) if klass is not None: names.update(_classdir(klass)) - - ## Comment from object.c: - ## /* Merge in __members__ and __methods__ (if any). - ## XXX Would like this to go away someday; for now, it's - ## XXX needed to get at im_self etc of method objects. */ - for attr in '__members__', '__methods__': - l = getattr(obj, attr, None) - if not isinstance(l, list): - continue - names.extend(item for item in l if isinstance(item, str)) - return sorted(names) def _classdir(klass): From noreply at buildbot.pypy.org Sun Aug 3 03:46:41 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 3 Aug 2014 03:46:41 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge py3k Message-ID: <20140803014641.CE6081C0250@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72658:a85416828246 Date: 2014-08-02 18:45 -0700 http://bitbucket.org/pypy/pypy/changeset/a85416828246/ Log: merge py3k diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -7,7 +7,7 @@ from pypy.tool import stdlib_opcode as ops from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import specialize, we_are_translated from rpython.rlib import rfloat @@ -141,11 +141,12 @@ i += 1 return result -def _list_to_dict(l, offset=0): + at specialize.argtype(0) +def _iter_to_dict(iterable, offset=0): result = {} index = offset - for i in range(len(l)): - result[l[i]] = index + for item in iterable: + result[item] = index index += 1 return result @@ -161,10 +162,10 @@ self.first_block = self.new_block() self.use_block(self.first_block) self.names = {} - self.var_names = _list_to_dict(scope.varnames) + self.var_names = _iter_to_dict(scope.varnames) self.cell_vars = _make_index_dict_filter(scope.symbols, symtable.SCOPE_CELL) - self.free_vars = _list_to_dict(scope.free_vars, len(self.cell_vars)) + self.free_vars = _iter_to_dict(scope.free_vars, len(self.cell_vars)) self.w_consts = space.newdict() self.argcount = 0 self.kwonlyargcount = 0 diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -37,7 +37,7 @@ self.roles = {} self.varnames = [] self.children = [] - self.free_vars = [] + self.free_vars = {} self.temp_name_counter = 1 self.has_free = False self.child_has_free = False @@ -136,8 +136,9 @@ err = "no binding for nonlocal '%s' found" % (name,) raise SyntaxError(err, self.lineno, self.col_offset) self.symbols[name] = SCOPE_FREE - self.free_vars.append(name) + self.free_vars[name] = None free[name] = None + self.has_free = True elif flags & SYM_BOUND: self.symbols[name] = SCOPE_LOCAL local[name] = None @@ -147,7 +148,7 @@ pass elif bound and name in bound: self.symbols[name] = SCOPE_FREE - self.free_vars.append(name) + self.free_vars[name] = None free[name] = None self.has_free = True elif name in globs: @@ -204,7 +205,7 @@ except KeyError: if name in bound: self.symbols[name] = SCOPE_FREE - self.free_vars.append(name) + self.free_vars[name] = None else: if role_here & (SYM_BOUND | SYM_GLOBAL) and \ self._hide_bound_from_nested_scopes: @@ -213,7 +214,7 @@ # scope. We add the name to the class scope's list of free # vars, so it will be passed through by the interpreter, but # we leave the scope alone, so it can be local on its own. - self.free_vars.append(name) + self.free_vars[name] = None self._check_optimization() free.update(new_free) diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -819,6 +819,17 @@ return test2()""" yield self.st, test, "test1(2)", 2 + def test_class_nonlocal_from_arg(self): + test = """if 1: + def f(x): + class c: + nonlocal x + x += 1 + def get(self): + return x + return c().get()""" + yield self.st, test, "f(3)", 4 + def test_lots_of_loops(self): source = "for x in y: pass\n" * 1000 compile_with_astcompiler(source, 'exec', self.space) From noreply at buildbot.pypy.org Sun Aug 3 03:46:43 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 3 Aug 2014 03:46:43 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge upstream Message-ID: <20140803014643.3333E1C0250@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72659:7b27ed91a952 Date: 2014-08-02 18:46 -0700 http://bitbucket.org/pypy/pypy/changeset/7b27ed91a952/ Log: merge upstream diff --git a/pypy/module/_lzma/interp_lzma.py b/pypy/module/_lzma/interp_lzma.py --- a/pypy/module/_lzma/interp_lzma.py +++ b/pypy/module/_lzma/interp_lzma.py @@ -13,6 +13,7 @@ FORMAT_AUTO, FORMAT_XZ, FORMAT_ALONE, FORMAT_RAW = range(4) +R_LONGLONG_MASK = r_ulonglong(LONGLONG_MASK) eci = ExternalCompilationInfo( @@ -282,7 +283,7 @@ W_LZMADecompressor.__init__(self, space, format) if space.is_none(w_memlimit): - memlimit = r_ulonglong(LONGLONG_MASK) + memlimit = R_LONGLONG_MASK else: memlimit = space.r_ulonglong_w(w_memlimit) From noreply at buildbot.pypy.org Sun Aug 3 18:14:39 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sun, 3 Aug 2014 18:14:39 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs-fixes: add lzma to build-time dependencies (required for py3.3) Message-ID: <20140803161439.0294A1C3382@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: improve-docs-fixes Changeset: r72660:cbb086cc9406 Date: 2014-08-02 19:52 +0200 http://bitbucket.org/pypy/pypy/changeset/cbb086cc9406/ Log: add lzma to build-time dependencies (required for py3.3) diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -61,6 +61,9 @@ bz2 libbz2 +lzma + liblzma + sqlite3 libsqlite3 @@ -79,12 +82,12 @@ On Debian, this is the command to install all build-time dependencies:: apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ - libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev + liblzma-dev libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev On Fedora:: yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ - lib-sqlite3-devel ncurses-devel expat-devel openssl-devel + xz-devel lib-sqlite3-devel ncurses-devel expat-devel openssl-devel On Mac OS X, most of these build-time dependencies are installed alongside the Developer Tools. However, note that in order for the installation to From noreply at buildbot.pypy.org Sun Aug 3 18:14:40 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sun, 3 Aug 2014 18:14:40 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs-fixes: fix typos and awkward wording Message-ID: <20140803161440.483E21C3382@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: improve-docs-fixes Changeset: r72661:b3db0567b334 Date: 2014-08-02 20:20 +0200 http://bitbucket.org/pypy/pypy/changeset/b3db0567b334/ Log: fix typos and awkward wording diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst --- a/pypy/doc/you-want-to-help.rst +++ b/pypy/doc/you-want-to-help.rst @@ -14,14 +14,14 @@ * Because of the above, we are very serious about Test Driven Development. It's not only what we believe in, but also that PyPy's architecture is working very well with TDD in mind and not so well without it. Often - the development means progressing in an unrelated corner, one unittest + development means progressing in an unrelated corner, one unittest at a time; and then flipping a giant switch, bringing it all together. (It generally works out of the box. If it doesn't, then we didn't - write enough unit tests.) It's worth repeating - PyPy - approach is great if you do TDD, not so great otherwise. + write enough unit tests.) It's worth repeating - PyPy's + approach is great if you do TDD, and not so great otherwise. * PyPy uses an entirely different set of tools - most of them included - in the PyPy repository. There is no Makefile, nor autoconf. More below + in the PyPy repository. There is no Makefile, nor autoconf. More below. Architecture @@ -32,13 +32,13 @@ * :doc:`RPython ` is the language in which we write interpreters. Not the entire PyPy project is written in RPython, only the parts that are compiled in the translation process. The interesting point is that RPython has no parser, - it's compiled from the live python objects, which make it possible to do + it's compiled from the live python objects, which makes it possible to do all kinds of metaprogramming during import time. In short, Python is a meta programming language for RPython. The RPython standard library is to be found in the ``rlib`` subdirectory. -* The translation toolchain - this is the part that takes care about translating +* The translation toolchain - this is the part that takes care of translating RPython to flow graphs and then to C. There is more in the :doc:`architecture ` document written about it. @@ -67,7 +67,7 @@ that turns it into machine code. Writing a new backend is a traditional way to get into the project. -* Garbage Collectors (GC): as you can notice if you are used to CPython's +* Garbage Collectors (GC): as you may notice if you are used to CPython's C code, there are no ``Py_INCREF/Py_DECREF`` equivalents in RPython code. :doc:`rpython:garbage-collection` is inserted during translation. Moreover, this is not reference counting; it is a real From noreply at buildbot.pypy.org Sun Aug 3 18:14:41 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sun, 3 Aug 2014 18:14:41 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs-fixes: update link to bug tracker Message-ID: <20140803161441.76D171C3382@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: improve-docs-fixes Changeset: r72662:6057597cf892 Date: 2014-08-03 12:17 +0200 http://bitbucket.org/pypy/pypy/changeset/6057597cf892/ Log: update link to bug tracker diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -110,7 +110,7 @@ .. _here: http://www.tismer.com/pypy/irc-logs/pypy/ .. _Development mailing list: http://mail.python.org/mailman/listinfo/pypy-dev .. _Commit mailing list: http://mail.python.org/mailman/listinfo/pypy-commit -.. _Development bug/feature tracker: https://bugs.pypy.org/ +.. _Development bug/feature tracker: https://bitbucket.org/pypy/pypy/issues?status=new&status=open Indices and tables From noreply at buildbot.pypy.org Sun Aug 3 18:14:42 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sun, 3 Aug 2014 18:14:42 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs-fixes: remove obsolete paragraph about translation to Java/.NET Message-ID: <20140803161442.AA6431C3382@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: improve-docs-fixes Changeset: r72663:2e6e8dfca126 Date: 2014-08-03 12:18 +0200 http://bitbucket.org/pypy/pypy/changeset/2e6e8dfca126/ Log: remove obsolete paragraph about translation to Java/.NET diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -75,9 +75,6 @@ zipimport zlib - When translated to Java or .NET, the list is smaller; see - :source:`pypy/config/pypyoption.py` for details. - When translated on Windows, a few Unix-only modules are skipped, and the following module is built instead: From noreply at buildbot.pypy.org Sun Aug 3 18:14:43 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sun, 3 Aug 2014 18:14:43 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs-fixes: move lzma instructions to dedicated Python 3.3 section Message-ID: <20140803161443.CD67C1C3382@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: improve-docs-fixes Changeset: r72664:0c0a5730d7c7 Date: 2014-08-03 14:00 +0200 http://bitbucket.org/pypy/pypy/changeset/0c0a5730d7c7/ Log: move lzma instructions to dedicated Python 3.3 section diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -61,9 +61,6 @@ bz2 libbz2 -lzma - liblzma - sqlite3 libsqlite3 @@ -82,12 +79,12 @@ On Debian, this is the command to install all build-time dependencies:: apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ - liblzma-dev libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev + libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev On Fedora:: yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ - xz-devel lib-sqlite3-devel ncurses-devel expat-devel openssl-devel + lib-sqlite3-devel ncurses-devel expat-devel openssl-devel On Mac OS X, most of these build-time dependencies are installed alongside the Developer Tools. However, note that in order for the installation to @@ -96,6 +93,26 @@ xcode-select --install +Python 3.3 +~~~~~~~~~~ + +For versions of PyPy that implement Python 3.3 and later you will +also need: + +lzma + liblzma + +On Debian. install it using:: + + apt-get install liblzma-dev + +On Fedora:: + + yum install xz-devel + +TODO: Mac OS X + + Run the translation ------------------- From noreply at buildbot.pypy.org Sun Aug 3 18:14:45 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sun, 3 Aug 2014 18:14:45 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs-fixes: restore lzma instructions to the common section Message-ID: <20140803161445.0A0341C3382@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: improve-docs-fixes Changeset: r72665:1c82abf52290 Date: 2014-08-03 16:56 +0200 http://bitbucket.org/pypy/pypy/changeset/1c82abf52290/ Log: restore lzma instructions to the common section diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -61,6 +61,9 @@ bz2 libbz2 +lzma (required for Python 3.3 and later) + liblzma + sqlite3 libsqlite3 @@ -81,11 +84,15 @@ apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev +For Python 3.3 you will also need ``liblzma-dev``. + On Fedora:: yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ lib-sqlite3-devel ncurses-devel expat-devel openssl-devel +For Python 3.3 you will also need ``xz-devel``. + On Mac OS X, most of these build-time dependencies are installed alongside the Developer Tools. However, note that in order for the installation to find them you may need to run:: @@ -93,26 +100,6 @@ xcode-select --install -Python 3.3 -~~~~~~~~~~ - -For versions of PyPy that implement Python 3.3 and later you will -also need: - -lzma - liblzma - -On Debian. install it using:: - - apt-get install liblzma-dev - -On Fedora:: - - yum install xz-devel - -TODO: Mac OS X - - Run the translation ------------------- From noreply at buildbot.pypy.org Sun Aug 3 18:14:46 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sun, 3 Aug 2014 18:14:46 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs-fixes: use a more robust (hopefully) bug tracker url Message-ID: <20140803161446.268401C3382@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: improve-docs-fixes Changeset: r72666:ef9b286e5879 Date: 2014-08-03 17:59 +0200 http://bitbucket.org/pypy/pypy/changeset/ef9b286e5879/ Log: use a more robust (hopefully) bug tracker url diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -110,7 +110,7 @@ .. _here: http://www.tismer.com/pypy/irc-logs/pypy/ .. _Development mailing list: http://mail.python.org/mailman/listinfo/pypy-dev .. _Commit mailing list: http://mail.python.org/mailman/listinfo/pypy-commit -.. _Development bug/feature tracker: https://bitbucket.org/pypy/pypy/issues?status=new&status=open +.. _Development bug/feature tracker: https://bitbucket.org/pypy/pypy/issues Indices and tables From noreply at buildbot.pypy.org Sun Aug 3 18:14:47 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sun, 3 Aug 2014 18:14:47 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs-fixes: refer to PyPy3 rather than Python 3.3 Message-ID: <20140803161447.4764A1C3382@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: improve-docs-fixes Changeset: r72667:a8628eb1bb4f Date: 2014-08-03 18:06 +0200 http://bitbucket.org/pypy/pypy/changeset/a8628eb1bb4f/ Log: refer to PyPy3 rather than Python 3.3 diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -61,7 +61,7 @@ bz2 libbz2 -lzma (required for Python 3.3 and later) +lzma (required for PyPy3) liblzma sqlite3 @@ -84,14 +84,14 @@ apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev -For Python 3.3 you will also need ``liblzma-dev``. +For PyPy3 you will also need ``liblzma-dev``. On Fedora:: yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ lib-sqlite3-devel ncurses-devel expat-devel openssl-devel -For Python 3.3 you will also need ``xz-devel``. +For PyPy3 you will also need ``xz-devel``. On Mac OS X, most of these build-time dependencies are installed alongside the Developer Tools. However, note that in order for the installation to From noreply at buildbot.pypy.org Sun Aug 3 18:14:48 2014 From: noreply at buildbot.pypy.org (mjacob) Date: Sun, 3 Aug 2014 18:14:48 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Merged in numerodix/pypy/improve-docs-fixes (pull request #262) Message-ID: <20140803161448.6DCE41C3382@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r72668:79d59d37eb63 Date: 2014-08-03 18:14 +0200 http://bitbucket.org/pypy/pypy/changeset/79d59d37eb63/ Log: Merged in numerodix/pypy/improve-docs-fixes (pull request #262) various minor doc fixes diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -61,6 +61,9 @@ bz2 libbz2 +lzma (required for PyPy3) + liblzma + sqlite3 libsqlite3 @@ -81,11 +84,15 @@ apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev +For PyPy3 you will also need ``liblzma-dev``. + On Fedora:: yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ lib-sqlite3-devel ncurses-devel expat-devel openssl-devel +For PyPy3 you will also need ``xz-devel``. + On Mac OS X, most of these build-time dependencies are installed alongside the Developer Tools. However, note that in order for the installation to find them you may need to run:: diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -75,9 +75,6 @@ zipimport zlib - When translated to Java or .NET, the list is smaller; see - :source:`pypy/config/pypyoption.py` for details. - When translated on Windows, a few Unix-only modules are skipped, and the following module is built instead: diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -110,7 +110,7 @@ .. _here: http://www.tismer.com/pypy/irc-logs/pypy/ .. _Development mailing list: http://mail.python.org/mailman/listinfo/pypy-dev .. _Commit mailing list: http://mail.python.org/mailman/listinfo/pypy-commit -.. _Development bug/feature tracker: https://bugs.pypy.org/ +.. _Development bug/feature tracker: https://bitbucket.org/pypy/pypy/issues Indices and tables diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst --- a/pypy/doc/you-want-to-help.rst +++ b/pypy/doc/you-want-to-help.rst @@ -14,14 +14,14 @@ * Because of the above, we are very serious about Test Driven Development. It's not only what we believe in, but also that PyPy's architecture is working very well with TDD in mind and not so well without it. Often - the development means progressing in an unrelated corner, one unittest + development means progressing in an unrelated corner, one unittest at a time; and then flipping a giant switch, bringing it all together. (It generally works out of the box. If it doesn't, then we didn't - write enough unit tests.) It's worth repeating - PyPy - approach is great if you do TDD, not so great otherwise. + write enough unit tests.) It's worth repeating - PyPy's + approach is great if you do TDD, and not so great otherwise. * PyPy uses an entirely different set of tools - most of them included - in the PyPy repository. There is no Makefile, nor autoconf. More below + in the PyPy repository. There is no Makefile, nor autoconf. More below. Architecture @@ -32,13 +32,13 @@ * :doc:`RPython ` is the language in which we write interpreters. Not the entire PyPy project is written in RPython, only the parts that are compiled in the translation process. The interesting point is that RPython has no parser, - it's compiled from the live python objects, which make it possible to do + it's compiled from the live python objects, which makes it possible to do all kinds of metaprogramming during import time. In short, Python is a meta programming language for RPython. The RPython standard library is to be found in the ``rlib`` subdirectory. -* The translation toolchain - this is the part that takes care about translating +* The translation toolchain - this is the part that takes care of translating RPython to flow graphs and then to C. There is more in the :doc:`architecture ` document written about it. @@ -67,7 +67,7 @@ that turns it into machine code. Writing a new backend is a traditional way to get into the project. -* Garbage Collectors (GC): as you can notice if you are used to CPython's +* Garbage Collectors (GC): as you may notice if you are used to CPython's C code, there are no ``Py_INCREF/Py_DECREF`` equivalents in RPython code. :doc:`rpython:garbage-collection` is inserted during translation. Moreover, this is not reference counting; it is a real From noreply at buildbot.pypy.org Sun Aug 3 18:14:57 2014 From: noreply at buildbot.pypy.org (mjacob) Date: Sun, 3 Aug 2014 18:14:57 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs-fixes: Close branch improve-docs-fixes Message-ID: <20140803161457.7082E1C3382@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs-fixes Changeset: r72669:fdf329dca5f0 Date: 2014-08-03 18:14 +0200 http://bitbucket.org/pypy/pypy/changeset/fdf329dca5f0/ Log: Close branch improve-docs-fixes From noreply at buildbot.pypy.org Sun Aug 3 19:50:08 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 3 Aug 2014 19:50:08 +0200 (CEST) Subject: [pypy-commit] pypy default: more cleanup Message-ID: <20140803175008.96F7A1C3382@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r72670:971aedb02438 Date: 2014-08-03 10:48 -0700 http://bitbucket.org/pypy/pypy/changeset/971aedb02438/ Log: more cleanup diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -94,22 +94,13 @@ recursively. """ names = set() - try: - names.update(klass.__dict__) - except AttributeError: - pass - try: - # XXX - Use of .__mro__ would be suggested, if the existance of - # that attribute could be guarranted. - bases = klass.__bases__ - except AttributeError: - pass - else: - try: - # Note that since we are only interested in the keys, the - # order we merge classes is unimportant - for base in bases: - names.update(_classdir(base)) - except TypeError: - pass + ns = getattr(klass, '__dict__', None) + if ns is not None: + names.update(ns) + bases = getattr(klass, '__bases__', None) + if bases is not None: + # Note that since we are only interested in the keys, the order + # we merge classes is unimportant + for base in bases: + names.update(_classdir(base)) return names From noreply at buildbot.pypy.org Sun Aug 3 19:50:09 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 3 Aug 2014 19:50:09 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140803175009.C5BFD1C3382@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72671:8afd884adfe3 Date: 2014-08-03 10:49 -0700 http://bitbucket.org/pypy/pypy/changeset/8afd884adfe3/ Log: merge default diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -78,22 +78,13 @@ recursively. """ names = set() - try: - names.update(klass.__dict__) - except AttributeError: - pass - try: - # XXX - Use of .__mro__ would be suggested, if the existance of - # that attribute could be guarranted. - bases = klass.__bases__ - except AttributeError: - pass - else: - try: - # Note that since we are only interested in the keys, the - # order we merge classes is unimportant - for base in bases: - names.update(_classdir(base)) - except TypeError: - pass + ns = getattr(klass, '__dict__', None) + if ns is not None: + names.update(ns) + bases = getattr(klass, '__bases__', None) + if bases is not None: + # Note that since we are only interested in the keys, the order + # we merge classes is unimportant + for base in bases: + names.update(_classdir(base)) return names From noreply at buildbot.pypy.org Sun Aug 3 19:50:10 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 3 Aug 2014 19:50:10 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge py3k Message-ID: <20140803175010.EDF6D1C3382@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72672:8c32f11d3e94 Date: 2014-08-03 10:49 -0700 http://bitbucket.org/pypy/pypy/changeset/8c32f11d3e94/ Log: merge py3k From noreply at buildbot.pypy.org Sun Aug 3 21:41:05 2014 From: noreply at buildbot.pypy.org (mjacob) Date: Sun, 3 Aug 2014 21:41:05 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Make it clear that the lzma module is optional. Message-ID: <20140803194105.738041C3382@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r72673:ef99392021e7 Date: 2014-08-03 21:29 +0200 http://bitbucket.org/pypy/pypy/changeset/ef99392021e7/ Log: Make it clear that the lzma module is optional. diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -61,7 +61,7 @@ bz2 libbz2 -lzma (required for PyPy3) +lzma (PyPy3 only) liblzma sqlite3 @@ -84,14 +84,14 @@ apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \ libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev -For PyPy3 you will also need ``liblzma-dev``. +For the optional lzma module on PyPy3 you will also need ``liblzma-dev``. On Fedora:: yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \ lib-sqlite3-devel ncurses-devel expat-devel openssl-devel -For PyPy3 you will also need ``xz-devel``. +For the optional lzma module on PyPy3 you will also need ``xz-devel``. On Mac OS X, most of these build-time dependencies are installed alongside the Developer Tools. However, note that in order for the installation to From noreply at buildbot.pypy.org Sun Aug 3 21:41:06 2014 From: noreply at buildbot.pypy.org (mjacob) Date: Sun, 3 Aug 2014 21:41:06 +0200 (CEST) Subject: [pypy-commit] pypy default: Redo some recent changes from the improve-docs branch. Message-ID: <20140803194106.C62AE1C3382@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r72674:78d5d873a260 Date: 2014-08-03 21:40 +0200 http://bitbucket.org/pypy/pypy/changeset/78d5d873a260/ Log: Redo some recent changes from the improve-docs branch. Original pull request by Martin Matusiak. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -74,9 +74,6 @@ zipimport zlib - When translated to Java or .NET, the list is smaller; see - `pypy/config/pypyoption.py`_ for details. - When translated on Windows, a few Unix-only modules are skipped, and the following module is built instead: diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -50,6 +50,8 @@ libz-dev libbz2-dev libncurses-dev libexpat1-dev \ libssl-dev libgc-dev python-sphinx python-greenlet + For the optional lzma module on PyPy3 you will also need ``liblzma-dev``. + On a Fedora-16 box these are:: [user at fedora-or-rh-box ~]$ sudo yum install \ @@ -57,6 +59,8 @@ zlib-devel bzip2-devel ncurses-devel expat-devel \ openssl-devel gc-devel python-sphinx python-greenlet + For the optional lzma module on PyPy3 you will also need ``xz-devel``. + On SLES11: $ sudo zypper install gcc make python-devel pkg-config \ @@ -74,6 +78,7 @@ * ``pkg-config`` (to help us locate libffi files) * ``libz-dev`` (for the optional ``zlib`` module) * ``libbz2-dev`` (for the optional ``bz2`` module) + * ``liblzma`` (for the optional ``lzma`` module, PyPy3 only) * ``libsqlite3-dev`` (for the optional ``sqlite3`` module via cffi) * ``libncurses-dev`` (for the optional ``_minimal_curses`` module) * ``libexpat1-dev`` (for the optional ``pyexpat`` module) diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -102,7 +102,7 @@ .. _Python: http://docs.python.org/index.html .. _`more...`: architecture.html#mission-statement .. _`PyPy blog`: http://morepypy.blogspot.com/ -.. _`development bug/feature tracker`: https://bugs.pypy.org +.. _`development bug/feature tracker`: https://bitbucket.org/pypy/pypy/issues .. _here: http://www.tismer.com/pypy/irc-logs/pypy/ .. _`Mercurial commit mailing list`: http://mail.python.org/mailman/listinfo/pypy-commit .. _`development mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst --- a/pypy/doc/you-want-to-help.rst +++ b/pypy/doc/you-want-to-help.rst @@ -15,14 +15,14 @@ * Because of the above, we are very serious about Test Driven Development. It's not only what we believe in, but also that PyPy's architecture is working very well with TDD in mind and not so well without it. Often - the development means progressing in an unrelated corner, one unittest + development means progressing in an unrelated corner, one unittest at a time; and then flipping a giant switch, bringing it all together. (It generally works out of the box. If it doesn't, then we didn't - write enough unit tests.) It's worth repeating - PyPy - approach is great if you do TDD, not so great otherwise. + write enough unit tests.) It's worth repeating - PyPy's + approach is great if you do TDD, and not so great otherwise. * PyPy uses an entirely different set of tools - most of them included - in the PyPy repository. There is no Makefile, nor autoconf. More below + in the PyPy repository. There is no Makefile, nor autoconf. More below. Architecture ============ @@ -32,7 +32,7 @@ * `RPython`_ is the language in which we write interpreters. Not the entire PyPy project is written in RPython, only the parts that are compiled in the translation process. The interesting point is that RPython has no parser, - it's compiled from the live python objects, which make it possible to do + it's compiled from the live python objects, which makes it possible to do all kinds of metaprogramming during import time. In short, Python is a meta programming language for RPython. @@ -40,7 +40,7 @@ .. _`RPython`: coding-guide.html#RPython -* The translation toolchain - this is the part that takes care about translating +* The translation toolchain - this is the part that takes care of translating RPython to flow graphs and then to C. There is more in the `architecture`_ document written about it. @@ -73,7 +73,7 @@ .. _`we have a tracing JIT`: jit/index.html -* Garbage Collectors (GC): as you can notice if you are used to CPython's +* Garbage Collectors (GC): as you may notice if you are used to CPython's C code, there are no ``Py_INCREF/Py_DECREF`` equivalents in RPython code. `Garbage collection in PyPy`_ is inserted during translation. Moreover, this is not reference counting; it is a real From noreply at buildbot.pypy.org Sun Aug 3 21:56:39 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 3 Aug 2014 21:56:39 +0200 (CEST) Subject: [pypy-commit] pypy py3k: simplify Message-ID: <20140803195639.333361C3382@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72675:895497f3e839 Date: 2014-08-03 10:56 -0700 http://bitbucket.org/pypy/pypy/changeset/895497f3e839/ Log: simplify diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -39,9 +39,7 @@ if len(args) > 1: raise TypeError("dir expected at most 1 arguments, got %d" % len(args)) if len(args) == 0: - local_names = list(_caller_locals().keys()) # 2 stackframes away - local_names.sort() - return local_names + return sorted(_caller_locals().keys()) # 2 stackframes away import types obj = args[0] From noreply at buildbot.pypy.org Sun Aug 3 21:56:40 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 3 Aug 2014 21:56:40 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: integrate cleanup from default branch, simplify Message-ID: <20140803195640.9CD231C3382@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72676:2015801d9e67 Date: 2014-08-03 11:25 -0700 http://bitbucket.org/pypy/pypy/changeset/2015801d9e67/ Log: integrate cleanup from default branch, simplify diff --git a/pypy/interpreter/module.py b/pypy/interpreter/module.py --- a/pypy/interpreter/module.py +++ b/pypy/interpreter/module.py @@ -129,4 +129,4 @@ if not space.isinstance_w(w_dict, space.w_dict): raise oefmt(space.w_TypeError, "%N.__dict__ is not a dictionary", self) - return space.newlist(space.listview(w_dict)) + return space.call_function(space.w_list, w_dict) diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -44,12 +44,9 @@ return local_names obj = args[0] - dir_meth = lookup_special(obj, "__dir__") + dir_meth = lookup_special(obj, '__dir__') if dir_meth is not None: - result = dir_meth() - if not isinstance(result, list): - result = list(result) # Will throw TypeError if not iterable - result.sort() - return result - - return [] # we should never reach here since object.__dir__ exists + # Will throw TypeError if not iterable + return sorted(dir_meth()) + # we should never reach here since object.__dir__ exists + return [] diff --git a/pypy/objspace/std/objecttype.py b/pypy/objspace/std/objecttype.py --- a/pypy/objspace/std/objecttype.py +++ b/pypy/objspace/std/objecttype.py @@ -20,38 +20,8 @@ return w_obj.getrepr(space, u'%s object' % (classname,)) def descr__dir__(space, w_obj): - w_result = space.appexec([w_obj], """(obj): - def _classdir(klass): - Dict = {} - try: - Dict.update(klass.__dict__) - except AttributeError: pass - try: - bases = klass.__mro__ - except AttributeError: pass - else: - try: - #Note that since we are only interested in the keys, - # the order we merge classes is unimportant - for base in bases: - Dict.update(base.__dict__) - except TypeError: pass - return Dict - - Dict = {} - try: - if isinstance(obj.__dict__, dict): - Dict.update(obj.__dict__) - except AttributeError: - pass - try: - Dict.update(_classdir(obj.__class__)) - except AttributeError: - pass - result = list(Dict.keys()) - return result - """) - return w_result + from pypy.objspace.std.util import _objectdir + return space.call_function(space.w_list, _objectdir(space, w_obj)) def descr__str__(space, w_obj): w_type = space.type(w_obj) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -731,28 +731,8 @@ return space.get(w_result, space.w_None, w_type) def descr__dir(space, w_type): - w_result = space.appexec([w_type], """(obj): - def _classdir(klass): - Dict = {} - try: - Dict.update(klass.__dict__) - except AttributeError: pass - try: - bases = klass.__mro__ - except AttributeError: pass - else: - try: - #Note that since we are only interested in the keys, - # the order we merge classes is unimportant - for base in bases: - Dict.update(base.__dict__) - except TypeError: pass - return Dict - - result = list(_classdir(obj).keys()) - return result - """) - return w_result + from pypy.objspace.std.util import _classdir + return space.call_function(space.w_list, _classdir(space, w_type)) def descr__flags(space, w_type): from copy_reg import _HEAPTYPE diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -24,3 +24,41 @@ where = length assert where >= 0 return where + +app = gateway.applevel(r''' + def _classdir(klass): + """__dir__ for type objects + + This includes all attributes of klass and all of the base + classes recursively. + """ + names = set() + ns = getattr(klass, '__dict__', None) + if ns is not None: + names.update(ns) + bases = getattr(klass, '__bases__', None) + if bases is not None: + # Note that since we are only interested in the keys, the order + # we merge classes is unimportant + for base in bases: + names.update(_classdir(base)) + return names + + def _objectdir(obj): + """__dir__ for generic objects + + Returns __dict__, __class__ and recursively up the + __class__.__bases__ chain. + """ + names = set() + ns = getattr(obj, '__dict__', None) + if isinstance(ns, dict): + names.update(ns) + klass = getattr(obj, '__class__', None) + if klass is not None: + names.update(_classdir(klass)) + return names +''', filename=__file__) + +_classdir = app.interphook('_classdir') +_objectdir = app.interphook('_objectdir') From noreply at buildbot.pypy.org Sun Aug 3 21:56:41 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 3 Aug 2014 21:56:41 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge py3k Message-ID: <20140803195641.D79E81C3382@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72677:7aed11f278e7 Date: 2014-08-03 11:25 -0700 http://bitbucket.org/pypy/pypy/changeset/7aed11f278e7/ Log: merge py3k diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -39,9 +39,7 @@ if len(args) > 1: raise TypeError("dir expected at most 1 arguments, got %d" % len(args)) if len(args) == 0: - local_names = list(_caller_locals().keys()) # 2 stackframes away - local_names.sort() - return local_names + return sorted(_caller_locals().keys()) # 2 stackframes away obj = args[0] dir_meth = lookup_special(obj, '__dir__') From noreply at buildbot.pypy.org Sun Aug 3 21:58:16 2014 From: noreply at buildbot.pypy.org (kvas) Date: Sun, 3 Aug 2014 21:58:16 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Add more tests for dir(...) and .__dir__(). Message-ID: <20140803195816.0B4831C3382@cobra.cs.uni-duesseldorf.de> Author: Vasily Kuznetsov Branch: py3.3 Changeset: r72678:0bfa6c3b47d3 Date: 2014-07-30 11:14 +0200 http://bitbucket.org/pypy/pypy/changeset/0bfa6c3b47d3/ Log: Add more tests for dir(...) and .__dir__(). diff --git a/pypy/module/__builtin__/test/test_dir.py b/pypy/module/__builtin__/test/test_dir.py --- a/pypy/module/__builtin__/test/test_dir.py +++ b/pypy/module/__builtin__/test/test_dir.py @@ -24,3 +24,84 @@ def __dir__(self): return 42 raises(TypeError, dir, Foo()) + + def test_dir_traceback(self): + """Test dir() of traceback.""" + import sys + + try: + raise IndexError + except: + assert len(dir(sys.exc_info()[2])) == 4 + + def test_dir_object_inheritance(self): + """Dir should behave the same regardless of inheriting from object.""" + class A: + pass + + class B(object): + pass + assert dir(A) == dir(B) + + def test_dir_sanity(self): + """Test that dir returns reasonable items.""" + class A(object): + a = 1 + + class B(A): + y = 2 + + b = B() + b.z = 1 + + names = dir(b) + for name in 'ayz': + assert name in names + + assert '__doc__' in names + assert '__module__' in names + assert '__dict__' in names + assert '__dir__' in names + assert '__weakref__' in names + assert '__class__' in names + assert '__format__' in names + # Not an exhaustive list, but will be enough if dir is very broken. + + def test_dir_module(self): + import sys + assert dir(sys) == list(sorted(sys.__dict__)) + + def test_dir_list(self): + """Check that dir([]) has methods from list and from object.""" + names = dir([]) + + dct = {} + dct.update(list.__dict__) + dct.update(object.__dict__) + + assert names == sorted(dct) + + def test_dir_builtins(self): + """Test that builtin objects have sane __dir__().""" + import sys + + def check_dir(obj): + print(dir(obj)) + assert sorted(obj.__dir__()) == dir(obj) + + for builtin in [sys, object(), [], {}, {1}, "", 1, (), sys, + map(ord, "abc"), filter(None, "abc"), zip([1, 2], [3, 4]), + compile('1', '', 'exec')]: + check_dir(builtin) + + def test_dir_type(self): + """Test .__dir__() and dir(...) behavior on types. + + * t.__dir__() throws a TypeError, + * dir(t) == sorted(t().__dir__()) + + This is the behavior that I observe with cpython3.3.2. + """ + for t in [int, list, tuple, set, str]: + raises(TypeError, t.__dir__) + assert dir(t) == sorted(t().__dir__()) From noreply at buildbot.pypy.org Sun Aug 3 21:58:17 2014 From: noreply at buildbot.pypy.org (kvas) Date: Sun, 3 Aug 2014 21:58:17 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Implement custom __dir__ for tracebacks following cpython example. Message-ID: <20140803195817.7379B1C3382@cobra.cs.uni-duesseldorf.de> Author: Vasily Kuznetsov Branch: py3.3 Changeset: r72679:2a7237fb5557 Date: 2014-08-02 09:31 +0200 http://bitbucket.org/pypy/pypy/changeset/2a7237fb5557/ Log: Implement custom __dir__ for tracebacks following cpython example. diff --git a/pypy/interpreter/pytraceback.py b/pypy/interpreter/pytraceback.py --- a/pypy/interpreter/pytraceback.py +++ b/pypy/interpreter/pytraceback.py @@ -50,6 +50,10 @@ self.lasti = space.int_w(w_lasti) self.next = space.interp_w(PyTraceback, w_next, can_be_None=True) + def descr__dir__(self, space): + return space.newtuple([space.wrap(n) for n in + ['tb_frame', 'tb_next', 'tb_lasti', 'tb_lineno']]) + def record_application_traceback(space, operror, frame, last_instruction): if frame.pycode.hidden_applevel: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -910,6 +910,7 @@ PyTraceback.typedef = TypeDef("traceback", __reduce__ = interp2app(PyTraceback.descr__reduce__), __setstate__ = interp2app(PyTraceback.descr__setstate__), + __dir__ = interp2app(PyTraceback.descr__dir__), tb_frame = interp_attrproperty('frame', cls=PyTraceback), tb_lasti = interp_attrproperty('lasti', cls=PyTraceback), tb_lineno = GetSetProperty(PyTraceback.descr_tb_lineno), diff --git a/pypy/module/__builtin__/test/test_dir.py b/pypy/module/__builtin__/test/test_dir.py --- a/pypy/module/__builtin__/test/test_dir.py +++ b/pypy/module/__builtin__/test/test_dir.py @@ -32,7 +32,9 @@ try: raise IndexError except: - assert len(dir(sys.exc_info()[2])) == 4 + tb_dir = dir(sys.exc_info()[2]) + assert tb_dir == ['tb_frame', 'tb_lasti', 'tb_lineno', 'tb_next'] + def test_dir_object_inheritance(self): """Dir should behave the same regardless of inheriting from object.""" From noreply at buildbot.pypy.org Sun Aug 3 21:58:18 2014 From: noreply at buildbot.pypy.org (kvas) Date: Sun, 3 Aug 2014 21:58:18 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Upgrade the warning from format(object, fmt_str) with non-empty format string. Message-ID: <20140803195818.A8C231C3382@cobra.cs.uni-duesseldorf.de> Author: Vasily Kuznetsov Branch: py3.3 Changeset: r72680:3b7ff7cf933b Date: 2014-08-02 11:25 +0200 http://bitbucket.org/pypy/pypy/changeset/3b7ff7cf933b/ Log: Upgrade the warning from format(object, fmt_str) with non-empty format string. diff --git a/pypy/module/__builtin__/test/test_format.py b/pypy/module/__builtin__/test/test_format.py new file mode 100644 --- /dev/null +++ b/pypy/module/__builtin__/test/test_format.py @@ -0,0 +1,38 @@ +class AppTestFormat: + + def test_format(self): + """Test deprecation warnings from format(object(), 'nonempty')""" + + import warnings + + def test_deprecated(obj, fmt_str, should_raise_warning): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", DeprecationWarning) + format(obj, fmt_str) + if should_raise_warning: + assert len(w) == 1 + assert isinstance(w[0].message, DeprecationWarning) + assert 'object.__format__ with a non-empty format string '\ + in str(w[0].message) + else: + assert len(w) == 0 + + fmt_strs = ['', 's'] + + class A: + def __format__(self, fmt_str): + return format('', fmt_str) + + for fmt_str in fmt_strs: + test_deprecated(A(), fmt_str, False) + + class B: + pass + + class C(object): + pass + + for cls in [object, B, C]: + for fmt_str in fmt_strs: + print(cls, fmt_str) + test_deprecated(cls(), fmt_str, len(fmt_str) != 0) diff --git a/pypy/objspace/std/objecttype.py b/pypy/objspace/std/objecttype.py --- a/pypy/objspace/std/objecttype.py +++ b/pypy/objspace/std/objecttype.py @@ -154,7 +154,7 @@ raise OperationError(space.w_TypeError, space.wrap(msg)) if space.len_w(w_format_spec) > 0: msg = "object.__format__ with a non-empty format string is deprecated" - space.warn(space.wrap(msg), space.w_PendingDeprecationWarning) + space.warn(space.wrap(msg), space.w_DeprecationWarning) return space.format(w_as_str, w_format_spec) def descr___subclasshook__(space, __args__): From noreply at buildbot.pypy.org Sun Aug 3 21:58:20 2014 From: noreply at buildbot.pypy.org (kvas) Date: Sun, 3 Aug 2014 21:58:20 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Fix test_open_default_encoding in lib-python/3/test/test_builtin.py. Message-ID: <20140803195820.016291C3382@cobra.cs.uni-duesseldorf.de> Author: Vasily Kuznetsov Branch: py3.3 Changeset: r72681:bd4891f611bd Date: 2014-08-02 15:49 +0200 http://bitbucket.org/pypy/pypy/changeset/bd4891f611bd/ Log: Fix test_open_default_encoding in lib-python/3/test/test_builtin.py. diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -287,7 +287,8 @@ try: w_locale = space.call_method(space.builtin, '__import__', space.wrap('locale')) - w_encoding = space.call_method(w_locale, 'getpreferredencoding') + w_encoding = space.call_method(w_locale, 'getpreferredencoding', + space.w_False) except OperationError as e: # getpreferredencoding() may also raise ImportError if not e.match(space, space.w_ImportError): From noreply at buildbot.pypy.org Sun Aug 3 21:58:21 2014 From: noreply at buildbot.pypy.org (kvas) Date: Sun, 3 Aug 2014 21:58:21 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Remove debugging help in the test. Message-ID: <20140803195821.3BA3A1C3382@cobra.cs.uni-duesseldorf.de> Author: Vasily Kuznetsov Branch: py3.3 Changeset: r72682:8e99253b142e Date: 2014-08-02 15:54 +0200 http://bitbucket.org/pypy/pypy/changeset/8e99253b142e/ Log: Remove debugging help in the test. diff --git a/pypy/module/__builtin__/test/test_dir.py b/pypy/module/__builtin__/test/test_dir.py --- a/pypy/module/__builtin__/test/test_dir.py +++ b/pypy/module/__builtin__/test/test_dir.py @@ -87,14 +87,10 @@ """Test that builtin objects have sane __dir__().""" import sys - def check_dir(obj): - print(dir(obj)) - assert sorted(obj.__dir__()) == dir(obj) - for builtin in [sys, object(), [], {}, {1}, "", 1, (), sys, map(ord, "abc"), filter(None, "abc"), zip([1, 2], [3, 4]), compile('1', '', 'exec')]: - check_dir(builtin) + assert sorted(builtin.__dir__()) == dir(builtin) def test_dir_type(self): """Test .__dir__() and dir(...) behavior on types. From noreply at buildbot.pypy.org Sun Aug 3 21:58:22 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 3 Aug 2014 21:58:22 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Merged in kvas/pypy/py3.3 (pull request #261) Message-ID: <20140803195822.6F0181C3382@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72683:799debfe0709 Date: 2014-08-03 12:57 -0700 http://bitbucket.org/pypy/pypy/changeset/799debfe0709/ Log: Merged in kvas/pypy/py3.3 (pull request #261) Fixes for remaining test failures in test_builtin.py diff --git a/pypy/interpreter/pytraceback.py b/pypy/interpreter/pytraceback.py --- a/pypy/interpreter/pytraceback.py +++ b/pypy/interpreter/pytraceback.py @@ -50,6 +50,10 @@ self.lasti = space.int_w(w_lasti) self.next = space.interp_w(PyTraceback, w_next, can_be_None=True) + def descr__dir__(self, space): + return space.newtuple([space.wrap(n) for n in + ['tb_frame', 'tb_next', 'tb_lasti', 'tb_lineno']]) + def record_application_traceback(space, operror, frame, last_instruction): if frame.pycode.hidden_applevel: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -910,6 +910,7 @@ PyTraceback.typedef = TypeDef("traceback", __reduce__ = interp2app(PyTraceback.descr__reduce__), __setstate__ = interp2app(PyTraceback.descr__setstate__), + __dir__ = interp2app(PyTraceback.descr__dir__), tb_frame = interp_attrproperty('frame', cls=PyTraceback), tb_lasti = interp_attrproperty('lasti', cls=PyTraceback), tb_lineno = GetSetProperty(PyTraceback.descr_tb_lineno), diff --git a/pypy/module/__builtin__/test/test_dir.py b/pypy/module/__builtin__/test/test_dir.py --- a/pypy/module/__builtin__/test/test_dir.py +++ b/pypy/module/__builtin__/test/test_dir.py @@ -24,3 +24,82 @@ def __dir__(self): return 42 raises(TypeError, dir, Foo()) + + def test_dir_traceback(self): + """Test dir() of traceback.""" + import sys + + try: + raise IndexError + except: + tb_dir = dir(sys.exc_info()[2]) + assert tb_dir == ['tb_frame', 'tb_lasti', 'tb_lineno', 'tb_next'] + + + def test_dir_object_inheritance(self): + """Dir should behave the same regardless of inheriting from object.""" + class A: + pass + + class B(object): + pass + assert dir(A) == dir(B) + + def test_dir_sanity(self): + """Test that dir returns reasonable items.""" + class A(object): + a = 1 + + class B(A): + y = 2 + + b = B() + b.z = 1 + + names = dir(b) + for name in 'ayz': + assert name in names + + assert '__doc__' in names + assert '__module__' in names + assert '__dict__' in names + assert '__dir__' in names + assert '__weakref__' in names + assert '__class__' in names + assert '__format__' in names + # Not an exhaustive list, but will be enough if dir is very broken. + + def test_dir_module(self): + import sys + assert dir(sys) == list(sorted(sys.__dict__)) + + def test_dir_list(self): + """Check that dir([]) has methods from list and from object.""" + names = dir([]) + + dct = {} + dct.update(list.__dict__) + dct.update(object.__dict__) + + assert names == sorted(dct) + + def test_dir_builtins(self): + """Test that builtin objects have sane __dir__().""" + import sys + + for builtin in [sys, object(), [], {}, {1}, "", 1, (), sys, + map(ord, "abc"), filter(None, "abc"), zip([1, 2], [3, 4]), + compile('1', '', 'exec')]: + assert sorted(builtin.__dir__()) == dir(builtin) + + def test_dir_type(self): + """Test .__dir__() and dir(...) behavior on types. + + * t.__dir__() throws a TypeError, + * dir(t) == sorted(t().__dir__()) + + This is the behavior that I observe with cpython3.3.2. + """ + for t in [int, list, tuple, set, str]: + raises(TypeError, t.__dir__) + assert dir(t) == sorted(t().__dir__()) diff --git a/pypy/module/__builtin__/test/test_format.py b/pypy/module/__builtin__/test/test_format.py new file mode 100644 --- /dev/null +++ b/pypy/module/__builtin__/test/test_format.py @@ -0,0 +1,38 @@ +class AppTestFormat: + + def test_format(self): + """Test deprecation warnings from format(object(), 'nonempty')""" + + import warnings + + def test_deprecated(obj, fmt_str, should_raise_warning): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", DeprecationWarning) + format(obj, fmt_str) + if should_raise_warning: + assert len(w) == 1 + assert isinstance(w[0].message, DeprecationWarning) + assert 'object.__format__ with a non-empty format string '\ + in str(w[0].message) + else: + assert len(w) == 0 + + fmt_strs = ['', 's'] + + class A: + def __format__(self, fmt_str): + return format('', fmt_str) + + for fmt_str in fmt_strs: + test_deprecated(A(), fmt_str, False) + + class B: + pass + + class C(object): + pass + + for cls in [object, B, C]: + for fmt_str in fmt_strs: + print(cls, fmt_str) + test_deprecated(cls(), fmt_str, len(fmt_str) != 0) diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -287,7 +287,8 @@ try: w_locale = space.call_method(space.builtin, '__import__', space.wrap('locale')) - w_encoding = space.call_method(w_locale, 'getpreferredencoding') + w_encoding = space.call_method(w_locale, 'getpreferredencoding', + space.w_False) except OperationError as e: # getpreferredencoding() may also raise ImportError if not e.match(space, space.w_ImportError): diff --git a/pypy/objspace/std/objecttype.py b/pypy/objspace/std/objecttype.py --- a/pypy/objspace/std/objecttype.py +++ b/pypy/objspace/std/objecttype.py @@ -124,7 +124,7 @@ raise OperationError(space.w_TypeError, space.wrap(msg)) if space.len_w(w_format_spec) > 0: msg = "object.__format__ with a non-empty format string is deprecated" - space.warn(space.wrap(msg), space.w_PendingDeprecationWarning) + space.warn(space.wrap(msg), space.w_DeprecationWarning) return space.format(w_as_str, w_format_spec) def descr___subclasshook__(space, __args__): From noreply at buildbot.pypy.org Sun Aug 3 22:05:28 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 3 Aug 2014 22:05:28 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: might as well return a list to match cpython Message-ID: <20140803200528.4BEB41C3382@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72684:2c1d4539a787 Date: 2014-08-03 13:04 -0700 http://bitbucket.org/pypy/pypy/changeset/2c1d4539a787/ Log: might as well return a list to match cpython diff --git a/pypy/interpreter/pytraceback.py b/pypy/interpreter/pytraceback.py --- a/pypy/interpreter/pytraceback.py +++ b/pypy/interpreter/pytraceback.py @@ -51,8 +51,8 @@ self.next = space.interp_w(PyTraceback, w_next, can_be_None=True) def descr__dir__(self, space): - return space.newtuple([space.wrap(n) for n in - ['tb_frame', 'tb_next', 'tb_lasti', 'tb_lineno']]) + return space.newlist([space.wrap(n) for n in + ('tb_frame', 'tb_next', 'tb_lasti', 'tb_lineno')]) def record_application_traceback(space, operror, frame, last_instruction): From noreply at buildbot.pypy.org Sun Aug 3 22:05:29 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 3 Aug 2014 22:05:29 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: cleanup Message-ID: <20140803200529.A619B1C3382@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72685:ea9862aff3ad Date: 2014-08-03 13:04 -0700 http://bitbucket.org/pypy/pypy/changeset/ea9862aff3ad/ Log: cleanup diff --git a/pypy/module/__builtin__/test/test_dir.py b/pypy/module/__builtin__/test/test_dir.py --- a/pypy/module/__builtin__/test/test_dir.py +++ b/pypy/module/__builtin__/test/test_dir.py @@ -27,15 +27,12 @@ def test_dir_traceback(self): """Test dir() of traceback.""" - import sys - try: raise IndexError - except: - tb_dir = dir(sys.exc_info()[2]) + except Exception as e: + tb_dir = dir(e.__traceback__) assert tb_dir == ['tb_frame', 'tb_lasti', 'tb_lineno', 'tb_next'] - def test_dir_object_inheritance(self): """Dir should behave the same regardless of inheriting from object.""" class A: diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -288,7 +288,7 @@ w_locale = space.call_method(space.builtin, '__import__', space.wrap('locale')) w_encoding = space.call_method(w_locale, 'getpreferredencoding', - space.w_False) + space.w_False) except OperationError as e: # getpreferredencoding() may also raise ImportError if not e.match(space, space.w_ImportError): From noreply at buildbot.pypy.org Sun Aug 3 22:07:11 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 3 Aug 2014 22:07:11 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: no longer a need for this workaround Message-ID: <20140803200711.A64531C3382@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72686:531fb5ccd8c9 Date: 2014-08-03 13:06 -0700 http://bitbucket.org/pypy/pypy/changeset/531fb5ccd8c9/ Log: no longer a need for this workaround diff --git a/lib-python/3/test/test_builtin.py b/lib-python/3/test/test_builtin.py --- a/lib-python/3/test/test_builtin.py +++ b/lib-python/3/test/test_builtin.py @@ -424,9 +424,7 @@ try: raise IndexError except: - methods = [meth for meth in dir(sys.exc_info()[2]) - if not meth.startswith('_')] - self.assertEqual(len(methods), 4) + self.assertEqual(len(dir(sys.exc_info()[2])), 4) # test that object has a __dir__() self.assertEqual(sorted([].__dir__()), dir([])) From noreply at buildbot.pypy.org Mon Aug 4 01:35:11 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 4 Aug 2014 01:35:11 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: issue1835: (numerodix) fix dir(None) Message-ID: <20140803233511.258241C3382@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72687:b8010ea183a7 Date: 2014-08-03 16:01 -0700 http://bitbucket.org/pypy/pypy/changeset/b8010ea183a7/ Log: issue1835: (numerodix) fix dir(None) diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -44,7 +44,9 @@ obj = args[0] dir_meth = lookup_special(obj, '__dir__') if dir_meth is not None: + # obscure: lookup_special won't bind None.__dir__! + result = dir_meth(obj) if obj is None else dir_meth() # Will throw TypeError if not iterable - return sorted(dir_meth()) + return sorted(result) # we should never reach here since object.__dir__ exists return [] diff --git a/pypy/module/__builtin__/test/test_dir.py b/pypy/module/__builtin__/test/test_dir.py --- a/pypy/module/__builtin__/test/test_dir.py +++ b/pypy/module/__builtin__/test/test_dir.py @@ -100,3 +100,6 @@ for t in [int, list, tuple, set, str]: raises(TypeError, t.__dir__) assert dir(t) == sorted(t().__dir__()) + + def test_dir_none(self): + assert dir(None) == sorted(None.__dir__()) From noreply at buildbot.pypy.org Mon Aug 4 01:55:52 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 4 Aug 2014 01:55:52 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix tests with py2 metaclass syntax that's broken on py3 Message-ID: <20140803235552.4774F1D2488@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72688:437cea3ad956 Date: 2014-08-03 16:54 -0700 http://bitbucket.org/pypy/pypy/changeset/437cea3ad956/ Log: fix tests with py2 metaclass syntax that's broken on py3 diff --git a/pypy/interpreter/test/test_raise.py b/pypy/interpreter/test/test_raise.py --- a/pypy/interpreter/test/test_raise.py +++ b/pypy/interpreter/test/test_raise.py @@ -256,6 +256,7 @@ fail("Did not raise") def test_obscure_bases(self): + """ # this test checks bug-to-bug cpython compatibility e = ValueError() e.__bases__ = (5,) @@ -267,20 +268,21 @@ # explodes on CPython and py.test, not sure why flag = False - class A(BaseException): - class __metaclass__(type): - def __getattribute__(self, name): - if flag and name == '__bases__': - fail("someone read bases attr") - else: - return type.__getattribute__(self, name) - + class metaclass(type): + def __getattribute__(self, name): + if flag and name == '__bases__': + fail("someone read bases attr") + else: + return type.__getattribute__(self, name) + class A(BaseException, metaclass=metaclass): + pass try: a = A() flag = True raise a except A: pass + """ def test_new_returns_bad_instance(self): class MyException(Exception): diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -224,13 +224,15 @@ assert dir(Foo("a_mod")) == ["blah"] def test_dir_custom_lookup(self): + """ class M(type): def __dir__(self, *args): return ["14"] - class X(object): - __metaclass__ = M + class X(metaclass=M): + pass x = X() x.__dir__ = lambda x: ["14"] assert dir(x) != ["14"] + """ def test_format(self): assert format(4) == "4" diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -852,9 +852,11 @@ assert res == (0, 1, 0) def test_custom_metaclass(self): - class A(object): - class __metaclass__(type): - pass + """ + class metaclass(type): + pass + class A(metaclass=metaclass): + pass a = A() a.x = 42 def f(): @@ -866,6 +868,7 @@ assert res == (0, 1, 0) res = self.check(f, 'x') assert res == (0, 1, 0) + """ def test_old_style_base(self): skip('py3k no longer has old style classes') diff --git a/pypy/objspace/test/test_descroperation.py b/pypy/objspace/test/test_descroperation.py --- a/pypy/objspace/test/test_descroperation.py +++ b/pypy/objspace/test/test_descroperation.py @@ -19,8 +19,8 @@ class Meta(type): def __subclasscheck__(mcls, cls): return False - class Base: - __metaclass__ = Meta + class Base(metaclass=Meta): + pass class Sub(Base): pass return Base, Sub""") @@ -306,17 +306,20 @@ raises(TypeError, operate, A()) def test_missing_getattribute(self): + """ class X(object): pass - class Y(X): - class __metaclass__(type): - def mro(cls): - return [cls, X] + class metaclass(type): + def mro(cls): + return [cls, X] + class Y(X, metaclass=metaclass): + pass x = X() x.__class__ = Y raises(AttributeError, getattr, x, 'a') + """ def test_unordeable_types(self): class A(object): pass From noreply at buildbot.pypy.org Mon Aug 4 06:48:36 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 4 Aug 2014 06:48:36 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: fix translation Message-ID: <20140804044836.AD94C1C038C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72689:a4092f8a43f8 Date: 2014-08-03 21:48 -0700 http://bitbucket.org/pypy/pypy/changeset/a4092f8a43f8/ Log: fix translation diff --git a/pypy/interpreter/pytraceback.py b/pypy/interpreter/pytraceback.py --- a/pypy/interpreter/pytraceback.py +++ b/pypy/interpreter/pytraceback.py @@ -52,7 +52,7 @@ def descr__dir__(self, space): return space.newlist([space.wrap(n) for n in - ('tb_frame', 'tb_next', 'tb_lasti', 'tb_lineno')]) + ['tb_frame', 'tb_next', 'tb_lasti', 'tb_lineno']]) def record_application_traceback(space, operror, frame, last_instruction): From noreply at buildbot.pypy.org Mon Aug 4 16:31:23 2014 From: noreply at buildbot.pypy.org (waedt) Date: Mon, 4 Aug 2014 16:31:23 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Backout 92302cdd34ec - magic methods turned out to be unpopular Message-ID: <20140804143123.969D81C0588@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72690:4ad5118d6c66 Date: 2014-07-31 04:24 -0500 http://bitbucket.org/pypy/pypy/changeset/4ad5118d6c66/ Log: Backout 92302cdd34ec - magic methods turned out to be unpopular diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -718,19 +718,6 @@ thistype = pairtype(SomeInstance, SomeInstance) return super(thistype, pair(ins1, ins2)).improve() - def eq((s_obj1, s_obj2)): - if s_obj1.classdef.classdesc.lookup('__eq__'): - return s_obj1._emulate_call("__eq__", s_obj2) - elif s_obj2.classdef.classdesc.lookup('__eq__'): - return s_obj2._emulate_call("__eq__", s_obj1) - return s_Bool - - def ne((s_obj1, s_obj2)): - if s_obj1.classdef.classdesc.lookup('__ne__'): - return s_obj1._emulate_call("__ne__", s_obj2) - elif s_obj2.classdef.classdesc.lookup('__ne__'): - return s_obj2._emulate_call("__ne__", s_obj1) - return s_Bool class __extend__(pairtype(SomeInstance, SomeObject)): def getitem((s_ins, s_idx)): @@ -739,33 +726,6 @@ def setitem((s_ins, s_idx), s_value): return s_ins._emulate_call("__setitem__", s_idx, s_value) - def add((s_ins, s_other)): - return s_ins._emulate_call("__add__", s_other) - - def mul((s_ins, s_other)): - return s_ins._emulate_call("__mul__", s_other) - - def eq((s_ins, s_obj)): - if s_ins.classdef.classdesc.lookup('__eq__'): - return s_ins._emulate_call("__eq__", s_obj) - return super(pairtype(SomeInstance, SomeObject), pair(s_ins, s_obj)).eq() - - def ne((s_ins, s_obj)): - if s_ins.classdef.classdesc.lookup('__ne__'): - return s_ins._emulate_call("__ne__", s_obj) - return super(pairtype(SomeInstance, SomeObject), pair(s_ins, s_obj)).ne() - -class __extend__(pairtype(SomeObject, SomeInstance)): - def eq((s_obj, s_ins)): - if s_ins.classdef.classdesc.lookup('__eq__'): - return s_ins._emulate_call("__eq__", s_obj) - return super(pairtype(SomeObject, SomeInstance), pair(s_obj, s_ins)).eq() - - def ne((s_obj, s_ins)): - if s_ins.classdef.classdesc.lookup('__ne__'): - return s_ins._emulate_call("__ne__", s_obj) - return super(pairtype(SomeObject, SomeInstance), pair(s_obj, s_ins)).ne() - class __extend__(pairtype(SomeIterator, SomeIterator)): diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -476,17 +476,6 @@ if self.pyobj not in classdef.FORCE_ATTRIBUTES_INTO_CLASSES: self.all_enforced_attrs = [] # no attribute allowed - if (self.lookup('__eq__') and - not all(b.lookup('__eq__') for b in self.getallbases())): - raise AnnotatorError("A class may only define a __eq__ method if " - "the class at the base of its heirarchy also " - "has a __eq__ method.") - if (self.lookup('__ne__') and - not all(b.lookup('__ne__') for b in self.getallbases())): - raise AnnotatorError("A class may only define a __ne__ method if " - "the class at the base of its heirarchy also " - "has a __ne__ method.") - def add_source_attribute(self, name, value, mixin=False): if isinstance(value, types.FunctionType): # for debugging diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -2780,42 +2780,6 @@ s = a.build_types(f, []) assert s.knowntype == int - def test__eq__in_sub_class(self): - class Base(object): - pass - class A(Base): - def __eq__(self, other): - return True - - def f(a): - if a: - o = Base() - else: - o = A() - - return o == Base() - - a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) - - def test__ne__in_sub_class(self): - class Base(object): - pass - class A(Base): - def __ne__(self, other): - return True - - def f(a): - if a: - o = Base() - else: - o = A() - - return o != Base() - - a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) - def test_chr_out_of_bounds(self): def g(n, max): if n < max: diff --git a/rpython/rtyper/lltypesystem/rclass.py b/rpython/rtyper/lltypesystem/rclass.py --- a/rpython/rtyper/lltypesystem/rclass.py +++ b/rpython/rtyper/lltypesystem/rclass.py @@ -657,8 +657,10 @@ r_ins = getinstancerepr(r_ins1.rtyper, basedef, r_ins1.gcflavor) return pairtype(Repr, Repr).rtype_is_(pair(r_ins, r_ins), hop) - def _rtype_ne(rpair, hop): - v = rpair.rtype_is_(hop) + rtype_eq = rtype_is_ + + def rtype_ne(rpair, hop): + v = rpair.rtype_eq(hop) return hop.genop("bool_not", [v], resulttype=Bool) # ____________________________________________________________ diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -7,7 +7,7 @@ from rpython.rtyper.lltypesystem.lltype import Void from rpython.rtyper.rmodel import Repr, getgcflavor, inputconst from rpython.rlib.objectmodel import UnboxedValue -from rpython.tool.pairtype import pair, pairtype +from rpython.tool.pairtype import pairtype class FieldListAccessor(object): @@ -471,77 +471,14 @@ break -def create_forwarding_func(name): - def f((r_ins, r_obj), hop): - return r_ins._emulate_call(hop, name) - return f +class __extend__(pairtype(AbstractInstanceRepr, Repr)): + def rtype_getitem((r_ins, r_obj), hop): + return r_ins._emulate_call(hop, "__getitem__") -class __extend__(pairtype(AbstractInstanceRepr, Repr)): - rtype_getitem = create_forwarding_func('__getitem__') - rtype_setitem = create_forwarding_func('__setitem__') - rtype_add = create_forwarding_func('__add__') - rtype_mul = create_forwarding_func('__mul__') + def rtype_setitem((r_ins, r_obj), hop): + return r_ins._emulate_call(hop, "__setitem__") - rtype_inplace_add = rtype_add - rtype_inplace_mul = rtype_mul - def rtype_eq((r_ins, r_other), hop): - if r_ins.classdef.classdesc.lookup('__eq__'): - return r_ins._emulate_call(hop, '__eq__') - return super(pairtype(AbstractInstanceRepr, Repr), - pair(r_ins, r_other)).rtype_eq(hop) - - def rtype_ne((r_ins, r_other), hop): - if r_ins.classdef.classdesc.lookup('__ne__'): - return r_ins._emulate_call(hop, '__ne__') - return super(pairtype(AbstractInstanceRepr, Repr), - pair(r_ins, r_other)).rtype_ne(hop) - -class __extend__(pairtype(AbstractInstanceRepr, AbstractInstanceRepr)): - def rtype_eq((r_ins, r_other), hop): - if r_ins.classdef.classdesc.lookup('__eq__'): - return r_ins._emulate_call(hop, '__eq__') - elif r_other.classdef.classdesc.lookup('__eq__'): - # Reverse the order of the arguments before the call to __eq__ - hop2 = hop.copy() - hop2.args_r = hop.args_r[::-1] - hop2.args_s = hop.args_s[::-1] - hop2.args_v = hop.args_v[::-1] - return r_other._emulate_call(hop2, '__eq__') - return pair(r_ins, r_other).rtype_is_(hop) - - def rtype_ne((r_ins, r_other), hop): - if r_ins.classdef.classdesc.lookup('__ne__'): - return r_ins._emulate_call(hop, '__ne__') - elif r_other.classdef.classdesc.lookup('__ne__'): - # Reverse the order of the arguments before the call to __ne__ - hop2 = hop.copy() - hop2.args_r = hop.args_r[::-1] - hop2.args_s = hop.args_s[::-1] - hop2.args_v = hop.args_v[::-1] - return r_other._emulate_call(hop2, '__ne__') - return pair(r_ins, r_other)._rtype_ne(hop) - -class __extend__(pairtype(Repr, AbstractInstanceRepr)): - def rtype_eq((r_other, r_ins), hop): - if r_ins.classdef.classdesc.lookup('__eq__'): - hop2 = hop.copy() - hop2.args_r = hop.args_r[::-1] - hop2.args_s = hop.args_s[::-1] - hop2.args_v = hop.args_v[::-1] - return r_ins._emulate_call(hop2, '__eq__') - return super(pairtype(Repr, AbstractInstanceRepr), - pair(r_other, r_ins)).rtype_eq(hop) - - def rtype_ne((r_other, r_ins), hop): - if r_ins.classdef.classdesc.lookup('__ne__'): - hop2 = hop.copy() - hop2.args_r = hop.args_r[::-1] - hop2.args_s = hop.args_s[::-1] - hop2.args_v = hop.args_v[::-1] - return r_ins._emulate_call(hop2, '__ne__') - return super(pairtype(Repr, AbstractInstanceRepr), - pair(r_other, r_ins)).rtype_ne(hop) # ____________________________________________________________ diff --git a/rpython/rtyper/test/test_rclass.py b/rpython/rtyper/test/test_rclass.py --- a/rpython/rtyper/test/test_rclass.py +++ b/rpython/rtyper/test/test_rclass.py @@ -1271,87 +1271,3 @@ return cls[k](a, b).b assert self.interpret(f, [1, 4, 7]) == 7 - - def test_overriding_eq(self): - class Base(object): - def __eq__(self, other): - return self is other - class A(Base): - def __eq__(self, other): - return True - - def f(a): - if a: - o = Base() - else: - o = A() - - return o == Base() - - assert self.interpret(f, [0]) == f(0) - assert self.interpret(f, [1]) == f(1) - - def test_eq_reversed(self): - class A(object): - def __eq__(self, other): - return not bool(other) - - def f(a): - return (a == A()) == (A() == a) - assert self.interpret(f, [0]) == f(0) - assert self.interpret(f, [1]) == f(1) - - def test_eq_without_ne(self): - class A(object): - def __eq__(self, other): - return False - - def f(): - a = A() - return a != A() - - assert self.interpret(f, []) == f() - - def test_overriding_ne(self): - class Base(object): - def __ne__(self, other): - return self is other - class A(Base): - def __ne__(self, other): - return True - - def f(a): - if a: - o = Base() - else: - o = A() - - return o != Base() - - assert self.interpret(f, [0]) == f(0) - assert self.interpret(f, [1]) == f(1) - - def test_ne_reversed(self): - class A(object): - def __ne__(self, other): - return not bool(other) - - def f(a): - return (a != A()) == (A() != a) - assert self.interpret(f, [0]) == f(0) - assert self.interpret(f, [1]) == f(1) - - def test_arithmetic_ops(self): - class A(object): - def __add__(self, other): - return other + other - - def __mul__(self, other): - return other * other - - def f(a): - o = A() - return (o + a) + (o * a) - - for i in range(10): - assert self.interpret(f, [i]) == f(i) From noreply at buildbot.pypy.org Mon Aug 4 16:31:25 2014 From: noreply at buildbot.pypy.org (waedt) Date: Mon, 4 Aug 2014 16:31:25 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: WIP fixing translation Message-ID: <20140804143125.0C9191C0588@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72691:8a2f88e6348d Date: 2014-08-04 09:26 -0500 http://bitbucket.org/pypy/pypy/changeset/8a2f88e6348d/ Log: WIP fixing translation diff --git a/pypy/interpreter/test/test_utf8.py b/pypy/interpreter/test/test_utf8.py --- a/pypy/interpreter/test/test_utf8.py +++ b/pypy/interpreter/test/test_utf8.py @@ -243,24 +243,4 @@ rffi.free_wcharp(wcharp) -def test_translate_utf8(): - def f(): - s = build_utf8str() - s *= 10 - s += Utf8Str('one') - return len(s) - assert interpret(f, []) == f() - - def f(): - one = Utf8Str("one") - two = Utf8Str("one") - - return int(one == two) + int(not (one != two)) - assert interpret(f, []) == f() - - def f(): - one = Utf8Str("one") - - return one == None - assert interpret(f, []) == f() diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -1,9 +1,11 @@ from rpython.rlib.rstring import StringBuilder -from rpython.rlib.objectmodel import we_are_translated, specialize +from rpython.rlib.objectmodel import ( + we_are_translated, specialize, import_from_mixin) from rpython.rlib.runicode import utf8_code_length from rpython.rlib.unicodedata import unicodedb_5_2_0 as unicodedb from rpython.rlib.rarithmetic import r_uint, intmask, base_int from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.tool.sourcetools import func_with_new_name wchar_rint = rffi.r_uint @@ -26,21 +28,24 @@ codepoint_length = utf8_code_length[ord(bytes[start])] if codepoint_length == 1: - return ord(bytes[start]) + res = ord(bytes[start]) elif codepoint_length == 2: - return ((ord(bytes[start]) & 0x1F) << 6 | - (ord(bytes[start + 1]) & 0x3F)) + res = ((ord(bytes[start]) & 0x1F) << 6 | + (ord(bytes[start + 1]) & 0x3F)) elif codepoint_length == 3: - return ((ord(bytes[start]) & 0xF) << 12 | - (ord(bytes[start + 1]) & 0x3F) << 6 | - (ord(bytes[start + 2]) & 0x3F)) + res = ((ord(bytes[start]) & 0xF) << 12 | + (ord(bytes[start + 1]) & 0x3F) << 6 | + (ord(bytes[start + 2]) & 0x3F)) else: assert codepoint_length == 4 - return ((ord(bytes[start]) & 0xF) << 18 | - (ord(bytes[start + 1]) & 0x3F) << 12 | - (ord(bytes[start + 2]) & 0x3F) << 6 | - (ord(bytes[start + 3]) & 0x3F)) + res = ((ord(bytes[start]) & 0xF) << 18 | + (ord(bytes[start + 1]) & 0x3F) << 12 | + (ord(bytes[start + 2]) & 0x3F) << 6 | + (ord(bytes[start + 3]) & 0x3F)) + + assert res >= 0 + return res def utf8ord(ustr, start=0): start = ustr.index_of_char(start) @@ -53,6 +58,45 @@ else: return ord(s[pos]) + at specialize.argtype(0) +def EQ(s1, s2): + if s1 is None: + return s1 is s2 + if isinstance(s1, Utf8Str): + return s1.__eq__(s2) + else: + return s1 == s2 + + at specialize.argtype(0) +def NE(s1, s2): + if s1 is None: + return s1 is not s2 + if isinstance(s1, Utf8Str): + return s1.__ne__(s2) + else: + return s1 != s2 + + at specialize.argtype(0) +def ADD(s1, s2): + if isinstance(s1, Utf8Str): + return s1.__add__(s2) + else: + return s1 + s2 + + at specialize.argtype(0) +def MUL(s1, s2): + if isinstance(s1, Utf8Str): + return s1.__mul__(s2) + else: + return s1 * s2 + + at specialize.argtype(0, 1) +def IN(s1, s2): + if isinstance(s1, Utf8Str): + return s2.__contains__(s1) + else: + return s1 in s2 + class Utf8Str(object): _immutable_fields_ = ['bytes', '_is_ascii', '_len'] @@ -69,7 +113,6 @@ self._len = length else: if not is_ascii: - #self._len = -1 self._calc_length() else: self._len = len(data) @@ -112,14 +155,22 @@ char_pos += self._len return self[char_pos:char_pos+1] + @specialize.argtype(1, 2) def __getslice__(self, start, stop): + if start is None: + start = 0 + if stop is None: + stop = len(self) + + assert start >= 0 assert start <= stop + if start == stop: return Utf8Str('') - # TODO: If start > _len or stop >= _len, then raise exception if stop > len(self): stop = len(self) + assert stop >= 0 if self._is_ascii: return Utf8Str(self.bytes[start:stop], True) @@ -155,6 +206,7 @@ return Utf8Str(self.bytes * count, self._is_ascii) def __len__(self): + assert self._len >= 0 return self._len def __hash__(self): @@ -252,13 +304,12 @@ else: end = self.index_of_char(end) - assert start >= 0 return start, end - @specialize.argtype(2, 3) + @specialize.argtype(1, 2, 3) def find(self, other, start=None, end=None): start, end = self._bound_check(start, end) - if start == -1: + if start < 0: return -1 if isinstance(other, Utf8Str): @@ -275,17 +326,18 @@ return self.char_index_of_byte(pos) - @specialize.argtype(2, 3) + @specialize.argtype(1, 2, 3) def rfind(self, other, start=None, end=None): start, end = self._bound_check(start, end) - if start == -1: + if start < 0: return -1 if isinstance(other, Utf8Str): pos = self.bytes.rfind(other.bytes, start, end) elif isinstance(other, unicode): return unicode(self.bytes, 'utf8').rfind(other, start, end) - elif isinstance(other, str): + else: + assert isinstance(other, str) pos = self.bytes.rfind(other, start, end) if pos == -1: @@ -293,17 +345,18 @@ return self.char_index_of_byte(pos) - @specialize.argtype(2, 3) + @specialize.argtype(1, 2, 3) def count(self, other, start=None, end=None): start, end = self._bound_check(start, end) - if start == -1: + if start < 0: return 0 if isinstance(other, Utf8Str): count = self.bytes.count(other.bytes, start, end) elif isinstance(other, unicode): return unicode(self.bytes, 'utf8').count(other, start, end) - elif isinstance(other, str): + else: + assert isinstance(other, str) count = self.bytes.count(other, start, end) if count == -1: @@ -319,7 +372,8 @@ if other is not None: if isinstance(other, str): other_bytes = other - if isinstance(other, Utf8Str): + else: + assert isinstance(other, Utf8Str) other_bytes = other.bytes return [Utf8Str(s) for s in self.bytes.split(other_bytes, maxsplit)] @@ -334,6 +388,7 @@ break start_byte = iter.byte_pos + assert start_byte >= 0 if maxsplit == 0: res.append(Utf8Str(self.bytes[start_byte:len(self.bytes)], @@ -349,8 +404,9 @@ self._is_ascii)) break - res.append(Utf8Str(self.bytes[start_byte:iter.byte_pos], - self._is_ascii)) + end = iter.byte_pos + assert end >= 0 + res.append(Utf8Str(self.bytes[start_byte:end], self._is_ascii)) maxsplit -= 1 return res @@ -360,7 +416,8 @@ if other is not None: if isinstance(other, str): other_bytes = other - if isinstance(other, Utf8Str): + else: + assert isinstance(other, Utf8Str) other_bytes = other.bytes return [Utf8Str(s) for s in self.bytes.rsplit(other_bytes, maxsplit)] @@ -397,21 +454,22 @@ res.reverse() return res - @specialize.argtype(1) + #@specialize.argtype(1) def join(self, other): if len(other) == 0: return Utf8Str('') if isinstance(other[0], Utf8Str): - return Utf8Str( - self.bytes.join([s.bytes for s in other]), - self._is_ascii and all(s._is_ascii for s in other) - ) + is_ascii = self._is_ascii + if is_ascii: + for s in other: + if not s._is_ascii: + is_ascii = False + break + return Utf8Str(self.bytes.join([s.bytes for s in other]), is_ascii) else: - return Utf8Str( - self.bytes.join([s for s in other]), - self._is_ascii and all(s._is_ascii for s in other) - ) + return Utf8Str(self.bytes.join([s for s in other])) + join._annspecialcase_ = 'specialize:arglistitemtype(1)' def as_unicode(self): """NOT_RPYTHON""" @@ -423,6 +481,7 @@ return Utf8Str(u.encode('utf-8')) def next_char(self, byte_pos): + assert byte_pos >= 0 return byte_pos + utf8_code_length[ord(self.bytes[byte_pos])] def prev_char(self, byte_pos): @@ -558,6 +617,7 @@ else: self._builder = StringBuilder(init_size) self._is_ascii = True + self._length = 0 @specialize.argtype(1) @@ -566,9 +626,11 @@ self._builder.append(c.bytes) if not c._is_ascii: self._is_ascii = False - elif isinstance(c, int) or isinstance(c, r_uint): - if isinstance(c, base_int): - c = intmask(c) + self._length += len(c) + + elif isinstance(c, int) or isinstance(c, base_int): + c = intmask(c) + if c < 0x80: self._builder.append(chr(c)) elif c < 0x800: @@ -588,12 +650,19 @@ self._is_ascii = False else: raise ValueError("Invalid unicode codepoint > 0x10FFFF.") - else: + self._length += 1 + elif isinstance(c, str): # TODO: Remove this check? if len(c) == 1: assert ord(c) < 128 self._builder.append(c) + # XXX The assumption here is that the bytes being appended are + # ASCII, ie 1:1 byte:char + self._length += len(c) + else: + raise TypeError() + @specialize.argtype(1) def append_slice(self, s, start, end): if isinstance(s, str): @@ -604,6 +673,7 @@ else: raise TypeError("Invalid type '%s' for Utf8Str.append_slice" % type(s)) + self._length += end - start @specialize.argtype(1) def append_multiple_char(self, c, count): @@ -613,12 +683,14 @@ self._builder.append_multiple_char(chr(c), count) return - if len(c) > 1: - import pdb; pdb.set_trace() if isinstance(c, str): self._builder.append_multiple_char(c, count) else: self._builder.append_multiple_char(c.bytes, count) + self._length += count + + def getlength(self): + return self._length def build(self): return Utf8Str(self._builder.build(), self._is_ascii) @@ -746,9 +818,10 @@ return iter def make_iterator(name, base, calc_value, default): - class C(base): + class C(object): + import_from_mixin(base, ['__init__', '__iter__']) _default = default - _value = calc_value + _value = func_with_new_name(calc_value, '_value') C.__name__ = name return C @@ -780,3 +853,5 @@ del ForwardIterBase del ReverseIterBase + + diff --git a/pypy/interpreter/utf8_codecs.py b/pypy/interpreter/utf8_codecs.py --- a/pypy/interpreter/utf8_codecs.py +++ b/pypy/interpreter/utf8_codecs.py @@ -6,7 +6,8 @@ from rpython.rlib.unicodedata import unicodedb from rpython.rlib.runicode import utf8_code_length -from pypy.interpreter.utf8 import Utf8Str, Utf8Builder, utf8chr, utf8ord, ORD +from pypy.interpreter import utf8 +from pypy.interpreter.utf8 import Utf8Str, Utf8Builder, utf8chr, utf8ord BYTEORDER = sys.byteorder @@ -416,7 +417,7 @@ result.append(rs) continue for ch in ru: - cd = ORD(ch, 0) + cd = utf8.ORD(ch, 0) if cd < limit: result.append(chr(cd)) else: @@ -1293,7 +1294,7 @@ ch = s[pos] c = mapping.get(ch, ERROR_CHAR) - if c == ERROR_CHAR: + if utf8.EQ(c, ERROR_CHAR): r, pos = errorhandler(errors, "charmap", "character maps to ", s, pos, pos + 1) @@ -1543,7 +1544,7 @@ # py3k only errorhandler('strict', 'decimal', msg, s, collstart, collend) for i in range(len(ru)): - ch = ORD(ru, i) + ch = utf8.ORD(ru, i) if unicodedb.isspace(ch): result.append(' ') continue @@ -1571,16 +1572,16 @@ if errors == 'replace': return _unicode_error_replacement, endingpos if errors == 'ignore': - return '', endingpos + return Utf8Str(''), endingpos raise UnicodeDecodeError(encoding, s, startingpos, endingpos, msg) _unicode_error_replacement = Utf8Str.from_unicode(u'\ufffd') def default_unicode_error_encode(errors, encoding, msg, u, startingpos, endingpos): if errors == 'replace': - return '?', None, endingpos + return Utf8Str('?'), None, endingpos if errors == 'ignore': - return '', None, endingpos + return Utf8Str(''), None, endingpos if we_are_translated(): # The constructor for UnicodeEncodeError requires an actual unicode diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -151,7 +151,7 @@ return utf8ord(s, 0) if (isinstance(w_ob, cdataobj.W_CData) and isinstance(w_ob.ctype, W_CTypePrimitiveUniChar)): - return rffi.cast(utf8.WCHAR_INTP, w_ob._cdata)[0] + return intmask(rffi.cast(utf8.WCHAR_INTP, w_ob._cdata)[0]) raise self._convert_error("unicode string of length 1", w_ob) def convert_from_object(self, cdata, w_ob): diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -3,6 +3,7 @@ from rpython.rlib.rstring import UnicodeBuilder from rpython.rlib.runicode import code_to_unichr, MAXUNICODE +from pypy.interpreter import utf8 from pypy.interpreter.utf8 import Utf8Builder, Utf8Str, utf8chr, utf8ord from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault @@ -213,7 +214,7 @@ text = utf8chr(0xfffd) return space.newtuple([space.wrap(text), w_end]) elif space.isinstance_w(w_exc, space.w_UnicodeTranslateError): - text = utf8chr(0xfffd) * size + text = utf8.MUL(utf8chr(0xfffd), size) return space.newtuple([space.wrap(text), w_end]) else: raise oefmt(space.w_TypeError, @@ -264,7 +265,7 @@ lnum = len(num) nb = zeros + 2 - lnum # num starts with '0x' if nb > 0: - builder.append_multiple_char(u'0', nb) + builder.append_multiple_char('0', nb) builder.append_slice(num, 2, lnum) pos += 1 return space.newtuple([space.wrap(builder.build()), w_end]) @@ -678,7 +679,7 @@ string = space.readbuf_w(w_string).as_str() if len(string) == 0: - return space.newtuple([space.wrap(u''), space.wrap(0)]) + return space.newtuple([space.wrap(Utf8Str('')), space.wrap(0)]) final = True state = space.fromcache(CodecState) diff --git a/pypy/module/_io/interp_stringio.py b/pypy/module/_io/interp_stringio.py --- a/pypy/module/_io/interp_stringio.py +++ b/pypy/module/_io/interp_stringio.py @@ -111,7 +111,8 @@ def resize_buffer(self, newlength): if len(self.buf) > newlength: - self.buf = self.buf[:newlength] + assert newlength >= 0 + self.buf = self.buf[0:newlength] if len(self.buf) < newlength: self.buf.extend([Utf8Str('\0')] * (newlength - len(self.buf))) @@ -190,8 +191,9 @@ endpos += start else: endpos = end + self.pos = endpos + assert start >= 0 assert endpos >= 0 - self.pos = endpos return space.wrap(Utf8Str("").join(self.buf[start:endpos])) @unwrap_spec(pos=int, mode=int) diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -6,6 +6,7 @@ from pypy.interpreter.typedef import ( GetSetProperty, TypeDef, generic_new_descr, interp_attrproperty, interp_attrproperty_w) +from pypy.interpreter import utf8 from pypy.interpreter.utf8 import Utf8Str, Utf8Builder, utf8ord from pypy.module._codecs import interp_codecs from pypy.module._io.interp_iobase import W_IOBase, convert_size, trap_eintr @@ -76,7 +77,7 @@ output = space.unicode_w(w_output) output_len = len(output) if self.pendingcr and (final or output_len): - output = Utf8Str('\r') + output + output = utf8.ADD(Utf8Str('\r'), output) self.pendingcr = False output_len += 1 @@ -85,7 +86,7 @@ if not final and output_len > 0: last = output_len - 1 assert last >= 0 - if output[last] == Utf8Str('\r'): + if utf8ord(output, last) == ord('\r'): output = output[:last] self.pendingcr = True output_len -= 1 @@ -101,7 +102,7 @@ # for the \r only_lf = False if seennl == SEEN_LF or seennl == 0: - only_lf = (output.find(Utf8Str('\r')) < 0) + only_lf = (output.find('\r') < 0) if only_lf: # If not already seen, quick scan for a possible "\n" character. @@ -371,8 +372,9 @@ newline = None else: newline = space.unicode_w(w_newline) - if newline and newline not in (Utf8Str('\n'), Utf8Str('\r\n'), - Utf8Str('\r')): + if newline and not (utf8.EQ(newline, Utf8Str('\n')) or + utf8.EQ(newline, Utf8Str('\r\n')) or + utf8.EQ(newline, Utf8Str('\r'))): r = space.str_w(space.repr(w_newline)) raise OperationError(space.w_ValueError, space.wrap( "illegal newline value: %s" % (r,))) @@ -386,7 +388,7 @@ self.writetranslate = (newline is None or len(newline) == 0) if not self.readuniversal: self.writenl = self.readnl - if self.writenl == Utf8Str('\n'): + if utf8.EQ(self.writenl, Utf8Str('\n')): self.writenl = None elif _WINDOWS: self.writenl = Utf8Str("\r\n") @@ -662,7 +664,7 @@ offset_to_buffer = 0 else: assert self.decoded_chars_used == 0 - line = remaining + self.decoded_chars + line = utf8.ADD(remaining, self.decoded_chars) start = 0 offset_to_buffer = len(remaining) remaining = None diff --git a/pypy/module/_locale/interp_locale.py b/pypy/module/_locale/interp_locale.py --- a/pypy/module/_locale/interp_locale.py +++ b/pypy/module/_locale/interp_locale.py @@ -3,6 +3,7 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter.utf8 import Utf8Str from rpython.rlib import rlocale from pypy.module.exceptions.interp_exceptions import _new_exception, W_Exception @@ -136,8 +137,8 @@ s1, s2 = space.unicode_w(w_s1), space.unicode_w(w_s2) - s1_c = rffi.unicode2wcharp(s1) - s2_c = rffi.unicode2wcharp(s2) + s1_c = Utf8Str.copy_to_new_wcharp(s1) + s2_c = Utf8Str.copy_to_new_wcharp(s2) try: result = _wcscoll(s1_c, s2_c) finally: diff --git a/pypy/module/_multibytecodec/interp_incremental.py b/pypy/module/_multibytecodec/interp_incremental.py --- a/pypy/module/_multibytecodec/interp_incremental.py +++ b/pypy/module/_multibytecodec/interp_incremental.py @@ -6,6 +6,8 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.interpreter import utf8 +from pypy.interpreter.utf8 import Utf8Str from pypy.module._codecs.interp_codecs import CodecState @@ -87,7 +89,7 @@ def _initialize(self): self.encodebuf = c_codecs.pypy_cjk_enc_new(self.codec) - self.pending = u"" + self.pending = Utf8Str("") def _free(self): self.pending = None @@ -100,7 +102,7 @@ space = self.space state = space.fromcache(CodecState) if len(self.pending) > 0: - object = self.pending + object + object = utf8.ADD(self.pending, object) try: output = c_codecs.encodeex(self.encodebuf, object, self.errors, state.encode_error_handler, self.name, diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py --- a/pypy/module/_pypyjson/interp_decoder.py +++ b/pypy/module/_pypyjson/interp_decoder.py @@ -1,9 +1,10 @@ import sys from rpython.rlib.rstring import StringBuilder from rpython.rlib.objectmodel import specialize -from rpython.rlib import rfloat, runicode +from rpython.rlib import rfloat from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.utf8 import utf8chr from pypy.interpreter import unicodehelper OVF_DIGITS = len(str(sys.maxint)) @@ -30,6 +31,7 @@ No bound checking is done, use carefully. """ + ''' from rpython.rtyper.annlowlevel import llstr, hlunicode from rpython.rtyper.lltypesystem.rstr import malloc, UNICODE from rpython.rtyper.lltypesystem.lltype import cast_primitive, UniChar @@ -41,6 +43,12 @@ ch = ll_s.chars[start+i] ll_res.chars[i] = cast_primitive(UniChar, ch) return hlunicode(ll_res) + ''' + # TODO: Actually do this without slicing + from pypy.interpreter.utf8_codecs import str_decode_latin_1 + assert start >= 0 + assert end >= 0 + return str_decode_latin_1(s[start:end], end - start, 'strict')[0] TYPE_UNKNOWN = 0 TYPE_STRING = 1 @@ -369,7 +377,7 @@ return # help the annotator to know that we'll never go beyond # this point # - uchr = runicode.code_to_unichr(val) # may be a surrogate pair again + uchr = utf8chr(val) # may be a surrogate pair again utf8_ch = unicodehelper.encode_utf8(self.space, uchr) builder.append(utf8_ch) return i diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -415,7 +415,6 @@ "Expected unicode string of length one as wide character")) val = utf8ord(s) - #val = 0 if rffi.sizeof(rffi.WCHAR_T) == 2 and val > 0xFFFF: # Utf-16 must be used on systems with a 2 byte wchar_t to # encode codepoints > 0xFFFF @@ -597,7 +596,7 @@ def wcharp2rawunicode(space, address, maxlength=-1): if maxlength == -1: return wcharp2unicode(space, address) - s = rffi.wcharpsize2unicode(rffi.cast(rffi.CWCHARP, address), maxlength) + s = Utf8Str.from_wcharpsize(rffi.cast(rffi.CWCHARP, address), maxlength) return space.wrap(s) @unwrap_spec(address=r_uint, newcontent='bufferstr') diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -5,6 +5,7 @@ from pypy.interpreter.typedef import make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.error import OperationError +from pypy.interpreter import utf8 from pypy.interpreter.utf8 import Utf8Str, utf8ord from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit @@ -121,6 +122,8 @@ pos = len(unicodestr) if endpos > len(unicodestr): endpos = len(unicodestr) + assert pos >= 0 + assert endpos >= 0 return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) else: @@ -232,7 +235,7 @@ else: if space.isinstance_w(w_ptemplate, space.w_unicode): filter_as_unicode = space.unicode_w(w_ptemplate) - literal = u'\\' not in filter_as_unicode + literal = utf8.IN('\\', filter_as_unicode) else: try: filter_as_string = space.str_w(w_ptemplate) diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -1,5 +1,5 @@ from pypy.interpreter.error import OperationError -from pypy.interpreter.utf8 import Utf8Str +from pypy.interpreter.utf8 import Utf8Str, utf8chr from pypy.interpreter import utf8_codecs from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.unicodedata import unicodedb @@ -138,17 +138,17 @@ @cpython_api([Py_UNICODE], Py_UNICODE, error=CANNOT_FAIL) def Py_UNICODE_TOLOWER(space, ch): """Return the character ch converted to lower case.""" - return unichr(unicodedb.tolower(ord(ch))) + return utf8chr(unicodedb.tolower(ord(ch))) @cpython_api([Py_UNICODE], Py_UNICODE, error=CANNOT_FAIL) def Py_UNICODE_TOUPPER(space, ch): """Return the character ch converted to upper case.""" - return unichr(unicodedb.toupper(ord(ch))) + return utf8chr(unicodedb.toupper(ord(ch))) @cpython_api([Py_UNICODE], Py_UNICODE, error=CANNOT_FAIL) def Py_UNICODE_TOTITLE(space, ch): """Return the character ch converted to title case.""" - return unichr(unicodedb.totitle(ord(ch))) + return utf8chr(unicodedb.totitle(ord(ch))) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) def Py_UNICODE_TODECIMAL(space, ch): @@ -331,7 +331,7 @@ Therefore, modification of the resulting Unicode object is only allowed when u is NULL.""" if wchar_p: - s = rffi.Utf8Str.from_wcharpsize(wchar_p, length) + s = Utf8Str.from_wcharpsize(wchar_p, length) return make_ref(space, space.wrap(s)) else: return rffi.cast(PyObject, new_empty_unicode(space, length)) diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -77,6 +77,7 @@ descr_set_dict, descr_del_dict) from pypy.interpreter.gateway import interp2app from pypy.interpreter.error import OperationError +from pypy.interpreter.utf8 import Utf8Str from rpython.rlib import rwin32 @@ -126,7 +127,7 @@ return space.call_function(space.w_unicode, w_as_str) lgt = len(self.args_w) if lgt == 0: - return space.wrap(u"") + return space.wrap(Utf8Str("")) if lgt == 1: return space.call_function(space.w_unicode, self.args_w[0]) else: diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -2,6 +2,7 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.utf8 import ORD from rpython.rlib import rgc, jit from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.tool import rffi_platform @@ -589,8 +590,8 @@ "multi-byte encodings are not supported") for i in range(256): - c = translationmap[i] - if c == u'\ufffd': + c = ORD(translationmap, i) + if c == 0xFFFD: info.c_map[i] = rffi.cast(rffi.INT, -1) else: info.c_map[i] = rffi.cast(rffi.INT, c) diff --git a/pypy/module/unicodedata/interp_ucd.py b/pypy/module/unicodedata/interp_ucd.py --- a/pypy/module/unicodedata/interp_ucd.py +++ b/pypy/module/unicodedata/interp_ucd.py @@ -6,7 +6,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError from pypy.interpreter.typedef import TypeDef, interp_attrproperty -from pypy.interpreter.utf8 import utf8chr +from pypy.interpreter.utf8 import Utf8Str, utf8chr from rpython.rlib.rarithmetic import r_longlong from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.unicodedata import unicodedb_5_2_0, unicodedb_3_2_0 @@ -225,10 +225,12 @@ result[0] = ch if not composed: # If decomposed normalization we are done - return space.wrap(u''.join([unichr(i) for i in result[:j]])) + return space.wrap(Utf8Str('').join( + [utf8chr(i) for i in result[:j]])) if j <= 1: - return space.wrap(u''.join([unichr(i) for i in result[:j]])) + return space.wrap(Utf8Str('').join( + [utf8chr(i) for i in result[:j]])) current = result[0] starter_pos = 0 @@ -275,7 +277,8 @@ result[starter_pos] = current - return space.wrap(u''.join([unichr(i) for i in result[:next_insert]])) + return space.wrap(Utf8Str('').join( + [utf8chr(i) for i in result[:next_insert]])) methods = {} diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -9,7 +9,7 @@ from rpython.rlib.rarithmetic import INT_MAX from rpython.tool.sourcetools import func_with_new_name from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.utf8 import Utf8Builder, ORD +from pypy.interpreter.utf8 import Utf8Builder, ORD, utf8chr class BaseStringFormatter(object): @@ -156,11 +156,6 @@ # to build two subclasses of the BaseStringFormatter class, # each one getting its own subtle differences and RPython types. - if do_unicode: - const = unicode - else: - const = str - class StringFormatter(BaseStringFormatter): def __init__(self, space, fmt, values_w, w_valuedict): BaseStringFormatter.__init__(self, space, values_w, w_valuedict) @@ -365,6 +360,7 @@ return if prec >= 0 and prec < length: length = prec # ignore the end of the string if too long + result = self.result padding = self.width - length if padding < 0: @@ -475,7 +471,7 @@ n = space.int_w(w_value) if do_unicode: try: - c = unichr(n) + c = utf8chr(n) except ValueError: raise OperationError(space.w_OverflowError, space.wrap("unicode character code out of range")) diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -681,8 +681,12 @@ buf.append(c) for i in range(d_state - 1, d_state - n_chars - 1, -1): buf.append(digits[i]) + + zero = "0" + if self.is_unicode: + zero = Utf8Str("0") for i in range(n_zeros): - buf.append("0") + buf.append(zero) def _group_digits(self, spec, digits): buf = [] @@ -727,9 +731,12 @@ def _upcase_string(self, s): buf = [] for c in s: - index = ord(c) + index = ORD(c, 0) if ord("a") <= index <= ord("z"): - c = chr(index - 32) + if self.is_unicode: + c = utf8chr(index - 32) + else: + c = chr(index - 32) buf.append(c) return self.empty.join(buf) @@ -1061,7 +1068,7 @@ tmp_align = self._align tmp_width = self._width self._fill_char = ord("\0") - self._align = "<" + self._align = ord("<") self._width = -1 #determine if we have remainder, might include dec or exponent or both diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -9,7 +9,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import WrappedDefault, unwrap_spec -from pypy.interpreter.utf8 import ORD +from pypy.interpreter import utf8 from pypy.objspace.std import slicetype from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice @@ -29,6 +29,8 @@ lenself = len(value) start, end = slicetype.unwrap_start_stop( space, lenself, w_start, w_end, upper_bound=upper_bound) + assert start >= 0 + assert end >= 0 return (value, start, end) def _multi_chr(self, c): @@ -64,7 +66,7 @@ if e.match(space, space.w_TypeError): return space.w_NotImplemented raise - return self._new(self._val(space) + other) + return self._new(utf8.ADD(self._val(space), other)) # Bytearray overrides this method, CPython doesn't support contacting # buffers and strs, and unicodes are always handled above @@ -80,8 +82,9 @@ if times <= 0: return self._empty() if self._len() == 1: - return self._new(self._multi_chr(self._val(space)[0]) * times) - return self._new(self._val(space) * times) + return self._new(utf8.MUL(self._multi_chr(self._val(space)[0]), + times)) + return self._new(utf8.MUL(self._val(space), times)) descr_rmul = descr_mul @@ -142,7 +145,9 @@ if d > 0: offset = d//2 + (d & width & 1) fillchar = self._multi_chr(fillchar[0]) - centered = fillchar * offset + value + fillchar * (d - offset) + #centered = fillchar * offset + value + fillchar * (d - offset) + centered = utf8.ADD(utf8.ADD(utf8.MUL(fillchar, offset), value), + utf8.MUL(fillchar, (d - offset))) else: centered = value @@ -204,8 +209,11 @@ expanded = oldtoken = splitted.pop(0) for token in splitted: - expanded += self._multi_chr(' ') * self._tabindent(oldtoken, - tabsize) + token + #expanded += self._multi_chr(' ') * self._tabindent(oldtoken, + # tabsize) + token + m = utf8.MUL(self._multi_chr(' '), + self._tabindent(oldtoken, tabsize)) + expanded = utf8.ADD(expanded, utf8.ADD(m, token)) oldtoken = token return self._new(expanded) @@ -219,8 +227,8 @@ offset = len(token) while 1: - if (ORD(token, offset-1) == ord("\n") or - ORD(token, offset-1) == ord("\r")): + if (utf8.ORD(token, offset-1) == ord("\n") or + utf8.ORD(token, offset-1) == ord("\r")): break distance += 1 offset -= 1 @@ -457,7 +465,8 @@ d = width - len(value) if d > 0: fillchar = self._multi_chr(fillchar[0]) - value += fillchar * d + #value += fillchar * d + value = utf8.ADD(value, utf8.MUL(fillchar, d)) return self._new(value) @@ -471,7 +480,8 @@ d = width - len(value) if d > 0: fillchar = self._multi_chr(fillchar[0]) - value = fillchar * d + value + #value = fillchar * d + value + value = utf8.ADD(utf8.MUL(fillchar, d), value) return self._new(value) @@ -606,8 +616,8 @@ eol = pos pos += 1 # read CRLF as one line break - if (pos < length and ORD(value, eol) == ord('\r') and - ORD(value, pos) == ord('\n')): + if (pos < length and utf8.ORD(value, eol) == ord('\r') and + utf8.ORD(value, pos) == ord('\n')): pos += 1 if keepends: eol = pos @@ -768,15 +778,16 @@ def descr_zfill(self, space, width): selfval = self._val(space) if len(selfval) == 0: - return self._new(self._multi_chr('0') * width) + #return self._new(self._multi_chr('0') * width) + return self._new(utf8.MUL(self._multi_chr('0'), width)) num_zeros = width - len(selfval) if num_zeros <= 0: # cannot return self, in case it is a subclass of str return self._new(selfval) builder = self._builder(width) - if len(selfval) > 0 and (ORD(selfval, 0) == ord('+') or - ORD(selfval, 0) == ord('-')): + if len(selfval) > 0 and (utf8.ORD(selfval, 0) == ord('+') or + utf8.ORD(selfval, 0) == ord('-')): # copy sign to first position builder.append(selfval[0]) start = 1 diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -8,6 +8,7 @@ from pypy.interpreter import unicodehelper from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter import utf8 from pypy.interpreter.utf8 import Utf8Str, Utf8Builder, utf8chr, utf8ord from pypy.interpreter.utf8_codecs import ( make_unicode_escape_function, str_decode_ascii, str_decode_utf_8, @@ -91,7 +92,7 @@ return W_UnicodeObject(value) def _new_from_list(self, value): - return W_UnicodeObject(u''.join(value)) + return W_UnicodeObject(Utf8Str('').join(value)) def _empty(self): return W_UnicodeObject.EMPTY @@ -109,12 +110,21 @@ @staticmethod def _op_val(space, w_other): + if space.isinstance_w(w_other, space.w_str): + w_other = unicode_from_string(space, w_other) + elif not isinstance(w_other, W_UnicodeObject): + w_other = unicode_from_encoded_object( + space, w_other, None, "strict") + assert isinstance(w_other, W_UnicodeObject) + return w_other._value + ''' if isinstance(w_other, W_UnicodeObject): return w_other._value if space.isinstance_w(w_other, space.w_str): return unicode_from_string(space, w_other)._value return unicode_from_encoded_object( space, w_other, None, "strict")._value + ''' def _chr(self, char): assert len(char) == 1 @@ -228,7 +238,7 @@ def descr_eq(self, space, w_other): try: - res = self._val(space) == self._op_val(space, w_other) + res = self._val(space).__eq__(self._op_val(space, w_other)) except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented @@ -244,7 +254,7 @@ def descr_ne(self, space, w_other): try: - res = self._val(space) != self._op_val(space, w_other) + res = self._val(space).__ne__(self._op_val(space, w_other)) except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented @@ -260,7 +270,7 @@ def descr_lt(self, space, w_other): try: - res = self._val(space) < self._op_val(space, w_other) + res = self._val(space).__lt__(self._op_val(space, w_other)) except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented @@ -269,7 +279,7 @@ def descr_le(self, space, w_other): try: - res = self._val(space) <= self._op_val(space, w_other) + res = self._val(space).__le__(self._op_val(space, w_other)) except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented @@ -278,7 +288,7 @@ def descr_gt(self, space, w_other): try: - res = self._val(space) > self._op_val(space, w_other) + res = self._val(space).__gt__(self._op_val(space, w_other)) except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented @@ -287,7 +297,7 @@ def descr_ge(self, space, w_other): try: - res = self._val(space) >= self._op_val(space, w_other) + res = self._val(space).__ge__(self._op_val(space, w_other)) except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented diff --git a/pypy/tool/ann_override.py b/pypy/tool/ann_override.py --- a/pypy/tool/ann_override.py +++ b/pypy/tool/ann_override.py @@ -21,12 +21,16 @@ def specialize__wrap(pol, funcdesc, args_s): from pypy.interpreter.baseobjspace import W_Root + from pypy.interpreter.utf8 import Utf8Str from rpython.annotator.classdef import ClassDef W_Root_def = funcdesc.bookkeeper.getuniqueclassdef(W_Root) typ = args_s[1].knowntype if isinstance(typ, ClassDef): - assert typ.issubclass(W_Root_def) - typ = W_Root + if typ.issubclass(W_Root_def): + typ = W_Root + else: + assert typ.classdesc.pyobj is Utf8Str + typ = Utf8Str else: assert not issubclass(typ, W_Root) assert typ != tuple, "space.wrap(tuple) forbidden; use newtuple()" From noreply at buildbot.pypy.org Mon Aug 4 21:07:18 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 4 Aug 2014 21:07:18 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: start to handle signature Message-ID: <20140804190718.A37581C3382@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r72692:08c63a7da2da Date: 2014-08-04 22:00 +0300 http://bitbucket.org/pypy/pypy/changeset/08c63a7da2da/ Log: start to handle signature diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -214,6 +214,9 @@ assert res.get_scalar_value().imag == 4. def test_Ufunc_FromFuncAndDataAndSignature(self. space, api): + PyUFuncGenericFunction funcs[] = {&double_times2, &int_times2}; + char types[] = { NPY_DOUBLE,NPY_DOUBLE, NPY_INT, NPY_INT }; + void *array_data[] = {NULL, NULL}; ufunc = api._PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, types, ntypes, nin, nout, identity, doc, check_return, signature) diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -16,6 +16,8 @@ def decode_w_dtype(space, w_dtype): if space.is_none(w_dtype): return None + if isinstance(w_dtype, W_Dtype): + return w_dtype return space.interp_w( W_Dtype, space.call_function(space.gettypefor(W_Dtype), w_dtype)) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -150,16 +150,17 @@ def setup_class(cls): BaseNumpyAppTest.setup_class.im_func(cls) if cfuncs: + print 'cfuncs.int_times2',cfuncs.int_times2 def int_times2(space, __args__): args, kwargs = __args__.unpack() arr = map(space.unwrap, args) # Assume arr is contiguous - addr = cfuncs.new('char *[2]') + addr = ffi.new('char *[3]') addr[0] = arr[0].data addr[1] = arr[1].data - dims = cfuncs.new('int *[1]') + dims = ffi.new('int *[1]') dims[0] = arr[0].size - steps = cfuncs.new('int *[1]') + steps = ffi.new('int *[1]') steps[0] = arr[0].strides[-1] cfuncs.int_times2(addr, dims, steps, 0) def double_times2(space, __args__): @@ -175,10 +176,11 @@ steps[0] = arr[0].strides[-1] cfuncs.double_times2(addr, dims, steps, 0) if option.runappdirect: - times2 = cls.space.wrap([double_times2, int_times2]) + times2 = cls.space.wrap([int_times2, double_times2]) else: - times2 = cls.space.wrap([interp2app(double_times2), - interp2app(int_times2)]) + times2 = cls.space.wrap([interp2app(int_times2), + interp2app(double_times2), + ]) else: times2 = None cls.w_times2 = cls.space.wrap(times2) @@ -233,13 +235,15 @@ def test_from_cffi_func(self): import sys - if '__pypy__' not in sys.builtin_module_names: - skip('pypy-only test') + #if '__pypy__' not in sys.builtin_module_names: + # skip('pypy-only test') from numpy import frompyfunc, dtype, arange if self.times2 is None: skip('cffi not available') ufunc = frompyfunc(self.times2, 1, 1, signature='()->()', - dtypes=[dtype(float), dtype(float), dtype(int), dtype(int)], + dtypes=[dtype(int), dtype(int), + dtype(float), dtype(float) + ] ) f = arange(10, dtype=int) f2 = ufunc(f) diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -542,10 +542,21 @@ outargs[i] = out index = self.type_resolver(space, inargs, outargs) self.alloc_outargs(space, index, inargs, outargs) - # XXX handle inner-loop indexing new_shape = inargs[0].get_shape() assert isinstance(outargs[0], W_NDimArray) res_dtype = outargs[0].get_dtype() + # XXX handle inner-loop indexing + sign_parts = self.signature.split('->') + if len(sign_parts) == 2 and sign_parts[0].strip() == '()' \ + and sign_parts[1].strip() == '()': + + arglist = space.newlist(inargs + outargs) + func = self.funcs[index] + # XXXX TODO in test_ufuncs's test_from_cffi_func, + # XXXX func is an app-level python function, + # XXXX how do we call it? + assert False + return if len(outargs) < 2: return loop.call_many_to_one(space, new_shape, self.funcs[index], res_dtype, inargs, outargs[0]) @@ -968,7 +979,6 @@ if not space.is_true(space.callable(w_func)): raise oefmt(space.w_TypeError, 'func must be callable') func = [w_func] - match_dtypes = False if space.is_none(w_dtypes) and not signature: raise oefmt(space.w_NotImplementedError, @@ -976,13 +986,15 @@ elif (space.isinstance_w(w_dtypes, space.w_tuple) or space.isinstance_w(w_dtypes, space.w_list)): _dtypes = space.listview(w_dtypes) - if space.str_w(_dtypes[0]) == 'match': + if space.isinstance_w(_dtypes[0], space.w_str) and space.str_w(_dtypes[0]) == 'match': dtypes = [] match_dtypes = True else: dtypes = [None]*len(_dtypes) for i in range(len(dtypes)): + print 'decoding',_dtypes[i] dtypes[i] = descriptor.decode_w_dtype(space, _dtypes[i]) + print 'got',dtypes[i] else: raise oefmt(space.w_ValueError, 'dtypes must be None or a list of dtypes') From noreply at buildbot.pypy.org Mon Aug 4 22:52:58 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 4 Aug 2014 22:52:58 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: simplify test Message-ID: <20140804205258.0DDFB1C0588@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r72693:f6a9b2e384d6 Date: 2014-08-04 23:22 +0300 http://bitbucket.org/pypy/pypy/changeset/f6a9b2e384d6/ Log: simplify test diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -6,69 +6,6 @@ from pypy.conftest import option -try: - import cffi - ffi = cffi.FFI() - if ffi.sizeof('int *') == ffi.sizeof('long'): - intp = 'long' - elif ffi.sizeof('int *') == ffi.sizeof('int'): - intp = 'int' - else: - raise ValueError('unknown size of int *') - ffi.cdef(''' - void double_times2(char **args, {0} *dimensions, - {0} * steps, void* data); - void int_times2(char **args, {0} **dimensions, - {0} **steps, void* data); - '''.format(intp) - ) - cfuncs = ffi.verify(''' - void double_times2(char **args, {0} *dimensions, - {0} * steps, void* data) - {{ - {0} i; - {0} n = dimensions[0]; - char *in = args[0], *out = args[1]; - {0} in_step = steps[0], out_step = steps[1]; - - double tmp; - - for (i = 0; i < n; i++) {{ - /*BEGIN main ufunc computation*/ - tmp = *(double *)in; - tmp *=2.0; - *((double *)out) = tmp; - /*END main ufunc computation*/ - - in += in_step; - out += out_step; - }} - }}; - void int_times2(char **args, {0} *dimensions, - {0} * steps, void* data) - {{ - {0} i; - {0} n = dimensions[0]; - char *in = args[0], *out = args[1]; - {0} in_step = steps[0], out_step = steps[1]; - - int tmp; - - for (i = 0; i < n; i++) {{ - /*BEGIN main ufunc computation*/ - tmp = *(int *)in; - tmp *=2.0; - *((int *)out) = tmp; - /*END main ufunc computation*/ - - in += in_step; - out += out_step; - }} - }} - '''.format(intp)) -except ImportError: - cfuncs = None - class TestUfuncCoercion(object): def test_binops(self, space): bool_dtype = get_dtype_cache(space).w_booldtype @@ -147,44 +84,6 @@ assert find_unaryop_result_dtype(space, bool_dtype, promote_bools=True) is int8_dtype class AppTestUfuncs(BaseNumpyAppTest): - def setup_class(cls): - BaseNumpyAppTest.setup_class.im_func(cls) - if cfuncs: - print 'cfuncs.int_times2',cfuncs.int_times2 - def int_times2(space, __args__): - args, kwargs = __args__.unpack() - arr = map(space.unwrap, args) - # Assume arr is contiguous - addr = ffi.new('char *[3]') - addr[0] = arr[0].data - addr[1] = arr[1].data - dims = ffi.new('int *[1]') - dims[0] = arr[0].size - steps = ffi.new('int *[1]') - steps[0] = arr[0].strides[-1] - cfuncs.int_times2(addr, dims, steps, 0) - def double_times2(space, __args__): - args, kwargs = __args__.unpack() - arr = map(space.unwrap, args) - # Assume arr is contiguous - addr = cfuncs.new('char *[2]') - addr[0] = arr[0].data - addr[1] = arr[1].data - dims = cfuncs.new('int *[1]') - dims[0] = arr[0].size - steps = cfuncs.new('int *[1]') - steps[0] = arr[0].strides[-1] - cfuncs.double_times2(addr, dims, steps, 0) - if option.runappdirect: - times2 = cls.space.wrap([int_times2, double_times2]) - else: - times2 = cls.space.wrap([interp2app(int_times2), - interp2app(double_times2), - ]) - else: - times2 = None - cls.w_times2 = cls.space.wrap(times2) - def test_constants(self): import numpy as np assert np.FLOATING_POINT_SUPPORT == 1 @@ -198,7 +97,7 @@ assert add.__name__ == 'add' raises(TypeError, ufunc) - def test_frompyfunc(self): + def test_frompyfunc_innerloop(self): from numpy import ufunc, frompyfunc, arange, dtype def adder(a, b): return a+b @@ -233,21 +132,32 @@ assert isinstance(res, tuple) assert (res[0] == arange(10)).all() - def test_from_cffi_func(self): - import sys - #if '__pypy__' not in sys.builtin_module_names: - # skip('pypy-only test') + def test_frompyfunc_outerloop(self): + def int_times2(in_array, out_array): + assert in_array.dtype == int + in_flat = in_array.flat + out_flat = out_array.flat + for i in range(in_array.size): + out_flat[i] = in_flat[i] * 2 + def double_times2(space, __args__): + assert in_array.dtype == float + in_flat = in_array.flat + out_flat = out_array.flat + for i in range(in_array.size): + out_flat[i] = in_flat[i] * 2 from numpy import frompyfunc, dtype, arange - if self.times2 is None: - skip('cffi not available') - ufunc = frompyfunc(self.times2, 1, 1, signature='()->()', - dtypes=[dtype(int), dtype(int), - dtype(float), dtype(float) + ufunc = frompyfunc([int_times2, double_times2], 1, 1, + signature='()->()', + dtypes=[dtype(int), dtype(int), + dtype(float), dtype(float) ] ) - f = arange(10, dtype=int) - f2 = ufunc(f) - assert f2 + ai = arange(10, dtype=int) + ai2 = ufunc(ai) + assert all(ai2 == ai * 2) + af = arange(10, dtype=float) + af2 = ufunc(af) + assert all(af2 == af * 2) def test_ufunc_attrs(self): from numpy import add, multiply, sin diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -552,11 +552,10 @@ arglist = space.newlist(inargs + outargs) func = self.funcs[index] - # XXXX TODO in test_ufuncs's test_from_cffi_func, - # XXXX func is an app-level python function, - # XXXX how do we call it? - assert False - return + space.call_function(func, *(inargs + outargs)) + if len(outargs) < 2: + return outargs[0] + return outargs if len(outargs) < 2: return loop.call_many_to_one(space, new_shape, self.funcs[index], res_dtype, inargs, outargs[0]) From noreply at buildbot.pypy.org Mon Aug 4 22:52:59 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 4 Aug 2014 22:52:59 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: remove signature since upstream postponed generalized universal ufuncs until after 1.9 Message-ID: <20140804205259.52E481C0588@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r72694:ca3b82260c3a Date: 2014-08-04 23:46 +0300 http://bitbucket.org/pypy/pypy/changeset/ca3b82260c3a/ Log: remove signature since upstream postponed generalized universal ufuncs until after 1.9 diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -139,7 +139,7 @@ out_flat = out_array.flat for i in range(in_array.size): out_flat[i] = in_flat[i] * 2 - def double_times2(space, __args__): + def double_times2(in_array, out_array): assert in_array.dtype == float in_flat = in_array.flat out_flat = out_array.flat @@ -147,7 +147,6 @@ out_flat[i] = in_flat[i] * 2 from numpy import frompyfunc, dtype, arange ufunc = frompyfunc([int_times2, double_times2], 1, 1, - signature='()->()', dtypes=[dtype(int), dtype(int), dtype(float), dtype(float) ] diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -482,22 +482,20 @@ class W_UfuncGeneric(W_Ufunc): ''' - Handle a number of python functions, each with a signature and dtypes. - The signature can specify how to create the inner loop, i.e. - (i,j),(j,k)->(i,k) for a dot-like matrix multiplication, and the dtypes - can specify the input, output args for the function. When called, the actual - function used will be resolved by examining the input arg's dtypes. + Handle a number of python functions, each with a dtypes. + The dtypes can specify the input, output args for the function. + When called, the actual function used will be resolved by examining + the input arg's dtypes. If dtypes == 'match', only one argument is provided and the output dtypes will match the input dtype (not cpython numpy compatible) ''' _immutable_fields_ = ["funcs", "dtypes", "data"] - def __init__(self, space, funcs, name, identity, nin, nout, dtypes, signature, match_dtypes=False): - # XXX make sure funcs, signature, dtypes, nin, nout are consistent + def __init__(self, space, funcs, name, identity, nin, nout, dtypes, match_dtypes=False): + # XXX make sure funcs, dtypes, nin, nout are consistent - # These don't matter, we use the signature and dtypes for determining - # output dtype + # These don't matter, we use the dtypes for determining output dtype promote_to_largest = promote_to_float = promote_bools = False allow_bool = allow_complex = True int_only = complex_to_float = False @@ -514,7 +512,6 @@ raise oefmt(space.w_ValueError, "generic ufunc with %d functions, %d arguments, but %d dtypes", len(funcs), self.nargs, len(dtypes)) - self.signature = signature def reduce(self, space, w_obj, w_axis, keepdims=False, out=None, dtype=None, cumulative=False): @@ -545,27 +542,30 @@ new_shape = inargs[0].get_shape() assert isinstance(outargs[0], W_NDimArray) res_dtype = outargs[0].get_dtype() - # XXX handle inner-loop indexing - sign_parts = self.signature.split('->') - if len(sign_parts) == 2 and sign_parts[0].strip() == '()' \ - and sign_parts[1].strip() == '()': - - arglist = space.newlist(inargs + outargs) + if not self.match_dtypes: func = self.funcs[index] space.call_function(func, *(inargs + outargs)) if len(outargs) < 2: return outargs[0] return outargs + # XXX TODO handle more complicated signatures, + # for now, assume (i) -> (i) if len(outargs) < 2: return loop.call_many_to_one(space, new_shape, self.funcs[index], res_dtype, inargs, outargs[0]) return loop.call_many_to_many(space, new_shape, self.funcs[index], res_dtype, inargs, outargs) - def type_resolver(self, space, index, outargs): + def type_resolver(self, space, inargs, outargs): # Find a match for the inargs.dtype in self.dtypes, like # linear_search_type_resolver in numy ufunc_type_resolutions.c - return 0 + for i in range(0, len(self.dtypes), self.nargs): + if inargs[0].get_dtype() == self.dtypes[i]: + break + else: + raise oefmt(space.w_TypeError, + 'input dtype %r did not match any known dtypes', inargs[0].get_dtype()) + return i / self.nargs def alloc_outargs(self, space, index, inargs, outargs): # Any None outarg should be allocated here @@ -911,12 +911,12 @@ def get(space): return space.fromcache(UfuncState) - at unwrap_spec(nin=int, nout=int, signature=str, w_identity=WrappedDefault(None), + at unwrap_spec(nin=int, nout=int, w_identity=WrappedDefault(None), name=str, doc=str) -def frompyfunc(space, w_func, nin, nout, w_dtypes=None, signature='', +def frompyfunc(space, w_func, nin, nout, w_dtypes=None, w_identity=None, name='', doc=''): ''' frompyfunc(func, nin, nout) #cpython numpy compatible - frompyfunc(func, nin, nout, dtypes=None, signature='', + frompyfunc(func, nin, nout, dtypes=None, identity=None, name='', doc='') Takes an arbitrary Python function and returns a ufunc. @@ -934,9 +934,6 @@ The number of arrays returned by `func`. dtypes: None or [dtype, ...] of the input, output args for each function, or 'match' to force output to exactly match input dtype - signature*: str, default='' - The mapping of input args to output args, defining the - inner-loop indexing identity*: None (default) or int For reduce-type ufuncs, the default value name: str, default='' @@ -951,7 +948,7 @@ Notes ----- - If the signature and out_dtype are both missing, the returned ufunc always + If the signature and dtypes are both missing, the returned ufunc always returns PyObject arrays (cpython numpy compatability). Examples @@ -979,7 +976,7 @@ raise oefmt(space.w_TypeError, 'func must be callable') func = [w_func] match_dtypes = False - if space.is_none(w_dtypes) and not signature: + if space.is_none(w_dtypes): raise oefmt(space.w_NotImplementedError, 'object dtype requested but not implemented') elif (space.isinstance_w(w_dtypes, space.w_tuple) or @@ -991,9 +988,7 @@ else: dtypes = [None]*len(_dtypes) for i in range(len(dtypes)): - print 'decoding',_dtypes[i] dtypes[i] = descriptor.decode_w_dtype(space, _dtypes[i]) - print 'got',dtypes[i] else: raise oefmt(space.w_ValueError, 'dtypes must be None or a list of dtypes') @@ -1004,7 +999,7 @@ identity = \ descriptor.get_dtype_cache(space).w_longdtype.box(w_identity) - w_ret = W_UfuncGeneric(space, func, name, identity, nin, nout, dtypes, signature, + w_ret = W_UfuncGeneric(space, func, name, identity, nin, nout, dtypes, match_dtypes=match_dtypes) if doc: w_ret.w_doc = space.wrap(doc) From noreply at buildbot.pypy.org Mon Aug 4 22:53:02 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 4 Aug 2014 22:53:02 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: merge default into branch Message-ID: <20140804205302.DA5F91C0588@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r72695:5e27940eacc0 Date: 2014-08-04 23:48 +0300 http://bitbucket.org/pypy/pypy/changeset/5e27940eacc0/ Log: merge default into branch diff too long, truncating to 2000 out of 2595 lines diff --git a/lib-python/2.7/xml/sax/saxutils.py b/lib-python/2.7/xml/sax/saxutils.py --- a/lib-python/2.7/xml/sax/saxutils.py +++ b/lib-python/2.7/xml/sax/saxutils.py @@ -98,13 +98,14 @@ except AttributeError: pass # wrap a binary writer with TextIOWrapper - class UnbufferedTextIOWrapper(io.TextIOWrapper): - def write(self, s): - super(UnbufferedTextIOWrapper, self).write(s) - self.flush() - return UnbufferedTextIOWrapper(buffer, encoding=encoding, + return _UnbufferedTextIOWrapper(buffer, encoding=encoding, errors='xmlcharrefreplace', newline='\n') +# PyPy: moved this class outside the function above +class _UnbufferedTextIOWrapper(io.TextIOWrapper): + def write(self, s): + super(_UnbufferedTextIOWrapper, self).write(s) + self.flush() class XMLGenerator(handler.ContentHandler): diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -309,11 +309,9 @@ #endif int _m_ispad(WINDOW *win) { -#if defined WINDOW_HAS_FLAGS + // may not have _flags (and possibly _ISPAD), + // but for now let's assume that always has it return (win->_flags & _ISPAD); -#else - return 0; -#endif } void _m_getsyx(int *yx) { diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.2" -__version_info__ = (0, 8, 2) +__version__ = "0.8.6" +__version_info__ = (0, 8, 6) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -55,8 +55,7 @@ # _cffi_backend.so compiled. import _cffi_backend as backend from . import __version__ - assert (backend.__version__ == __version__ or - backend.__version__ == __version__[:3]) + assert backend.__version__ == __version__ # (If you insist you can also try to pass the option # 'backend=backend_ctypes.CTypesBackend()', but don't # rely on it! It's probably not going to work well.) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -435,14 +435,14 @@ enumerator, enumerator, enumvalue)) prnt(' char buf[64];') prnt(' if ((%s) < 0)' % enumerator) - prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator) prnt(' else') - prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % enumerator) - prnt(' snprintf(out_error, 255,' + prnt(' sprintf(out_error,' ' "%s has the real value %s, not %s",') prnt(' "%s", buf, "%d");' % ( - enumerator, enumvalue)) + enumerator[:100], enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return 0;') diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -740,7 +740,7 @@ Adding an entry under pypy/module (e.g. mymodule) entails automatic creation of a new config option (such as --withmod-mymodule and ---withoutmod-mymodule (the later being the default)) for py.py and +--withoutmod-mymodule (the latter being the default)) for py.py and translate.py. Testing modules in ``lib_pypy/`` @@ -931,7 +931,7 @@ assert self.result == 2 ** 6 which executes the code string function with the given arguments at app level. -Note the use of ``w_result`` in ``setup_class`` but self.result in the test +Note the use of ``w_result`` in ``setup_class`` but self.result in the test. Here is how to define an app level class in ``setup_class`` that can be used in subsequent tests:: diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -74,9 +74,6 @@ zipimport zlib - When translated to Java or .NET, the list is smaller; see - `pypy/config/pypyoption.py`_ for details. - When translated on Windows, a few Unix-only modules are skipped, and the following module is built instead: @@ -328,7 +325,7 @@ * directly calling the internal magic methods of a few built-in types with invalid arguments may have a slightly different result. For example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return - ``NotImplemented`` on PyPy; on CPython, only the later does, and the + ``NotImplemented`` on PyPy; on CPython, only the latter does, and the former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` both raise ``TypeError`` everywhere.) This difference is an implementation detail that shows up because of internal C-level slots diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -171,16 +171,21 @@ You might be interested in our `benchmarking site`_ and our `jit documentation`_. -Note that the JIT has a very high warm-up cost, meaning that the -programs are slow at the beginning. If you want to compare the timings -with CPython, even relatively simple programs need to run *at least* one -second, preferrably at least a few seconds. Large, complicated programs -need even more time to warm-up the JIT. +`Your tests are not a benchmark`_: tests tend to be slow under PyPy +because they run exactly once; if they are good tests, they exercise +various corner cases in your code. This is a bad case for JIT +compilers. Note also that our JIT has a very high warm-up cost, meaning +that any program is slow at the beginning. If you want to compare the +timings with CPython, even relatively simple programs need to run *at +least* one second, preferrably at least a few seconds. Large, +complicated programs need even more time to warm-up the JIT. .. _`benchmarking site`: http://speed.pypy.org .. _`jit documentation`: jit/index.html +.. _`your tests are not a benchmark`: http://alexgaynor.net/2013/jul/15/your-tests-are-not-benchmark/ + --------------------------------------------------------------- Couldn't the JIT dump and reload already-compiled machine code? --------------------------------------------------------------- diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -50,6 +50,8 @@ libz-dev libbz2-dev libncurses-dev libexpat1-dev \ libssl-dev libgc-dev python-sphinx python-greenlet + For the optional lzma module on PyPy3 you will also need ``liblzma-dev``. + On a Fedora-16 box these are:: [user at fedora-or-rh-box ~]$ sudo yum install \ @@ -57,6 +59,8 @@ zlib-devel bzip2-devel ncurses-devel expat-devel \ openssl-devel gc-devel python-sphinx python-greenlet + For the optional lzma module on PyPy3 you will also need ``xz-devel``. + On SLES11: $ sudo zypper install gcc make python-devel pkg-config \ @@ -74,6 +78,7 @@ * ``pkg-config`` (to help us locate libffi files) * ``libz-dev`` (for the optional ``zlib`` module) * ``libbz2-dev`` (for the optional ``bz2`` module) + * ``liblzma`` (for the optional ``lzma`` module, PyPy3 only) * ``libsqlite3-dev`` (for the optional ``sqlite3`` module via cffi) * ``libncurses-dev`` (for the optional ``_minimal_curses`` module) * ``libexpat1-dev`` (for the optional ``pyexpat`` module) diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -102,7 +102,7 @@ .. _Python: http://docs.python.org/index.html .. _`more...`: architecture.html#mission-statement .. _`PyPy blog`: http://morepypy.blogspot.com/ -.. _`development bug/feature tracker`: https://bugs.pypy.org +.. _`development bug/feature tracker`: https://bitbucket.org/pypy/pypy/issues .. _here: http://www.tismer.com/pypy/irc-logs/pypy/ .. _`Mercurial commit mailing list`: http://mail.python.org/mailman/listinfo/pypy-commit .. _`development mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev diff --git a/pypy/doc/jit-hooks.rst b/pypy/doc/jit-hooks.rst --- a/pypy/doc/jit-hooks.rst +++ b/pypy/doc/jit-hooks.rst @@ -34,7 +34,7 @@ aborted due to some reason. The hook will be invoked with the siagnture: - ``hook(jitdriver_name, greenkey, reason)`` + ``hook(jitdriver_name, greenkey, reason, oplist)`` Reason is a string, the meaning of other arguments is the same as attributes on JitLoopInfo object diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -28,7 +28,8 @@ Introduction ============ -``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats_ +``pypy-stm`` is a variant of the regular PyPy interpreter. (This +version supports Python 2.7; see below for `Python 3`_.) With caveats_ listed below, it should be in theory within 20%-50% slower than a regular PyPy, comparing the JIT version in both cases (but see below!). It is called @@ -137,6 +138,25 @@ +Python 3 +======== + +In this document I describe "pypy-stm", which is based on PyPy's Python +2.7 interpreter. Supporting Python 3 should take about half an +afternoon of work. Obviously, what I *don't* mean is that by tomorrow +you can have a finished and polished "pypy3-stm" product. General py3k +work is still missing; and general stm work is also still missing. But +they are rather independent from each other, as usual in PyPy. The +required afternoon of work will certainly be done one of these days now +that the internal interfaces seem to stabilize. + +The same is true for other languages implemented in the RPython +framework, although the amount of work to put there might vary, because +the STM framework within RPython is currently targeting the PyPy +interpreter and other ones might have slightly different needs. + + + User Guide ========== @@ -489,8 +509,6 @@ The last two lines are special; they are an internal marker read by ``transactional_memory.print_abort_info()``. -These statistics are not printed out for the main thread, for now. - Reference to implementation details ----------------------------------- diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst --- a/pypy/doc/you-want-to-help.rst +++ b/pypy/doc/you-want-to-help.rst @@ -15,14 +15,14 @@ * Because of the above, we are very serious about Test Driven Development. It's not only what we believe in, but also that PyPy's architecture is working very well with TDD in mind and not so well without it. Often - the development means progressing in an unrelated corner, one unittest + development means progressing in an unrelated corner, one unittest at a time; and then flipping a giant switch, bringing it all together. (It generally works out of the box. If it doesn't, then we didn't - write enough unit tests.) It's worth repeating - PyPy - approach is great if you do TDD, not so great otherwise. + write enough unit tests.) It's worth repeating - PyPy's + approach is great if you do TDD, and not so great otherwise. * PyPy uses an entirely different set of tools - most of them included - in the PyPy repository. There is no Makefile, nor autoconf. More below + in the PyPy repository. There is no Makefile, nor autoconf. More below. Architecture ============ @@ -32,7 +32,7 @@ * `RPython`_ is the language in which we write interpreters. Not the entire PyPy project is written in RPython, only the parts that are compiled in the translation process. The interesting point is that RPython has no parser, - it's compiled from the live python objects, which make it possible to do + it's compiled from the live python objects, which makes it possible to do all kinds of metaprogramming during import time. In short, Python is a meta programming language for RPython. @@ -40,7 +40,7 @@ .. _`RPython`: coding-guide.html#RPython -* The translation toolchain - this is the part that takes care about translating +* The translation toolchain - this is the part that takes care of translating RPython to flow graphs and then to C. There is more in the `architecture`_ document written about it. @@ -73,7 +73,7 @@ .. _`we have a tracing JIT`: jit/index.html -* Garbage Collectors (GC): as you can notice if you are used to CPython's +* Garbage Collectors (GC): as you may notice if you are used to CPython's C code, there are no ``Py_INCREF/Py_DECREF`` equivalents in RPython code. `Garbage collection in PyPy`_ is inserted during translation. Moreover, this is not reference counting; it is a real diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -665,8 +665,11 @@ else: # translated case follows. self.threadlocals is either from # 'pypy.interpreter.miscutils' or 'pypy.module.thread.threadlocals'. - # the result is assumed to be non-null: enter_thread() was called. - return self.threadlocals.get_ec() + # the result is assumed to be non-null: enter_thread() was called + # by space.startup(). + ec = self.threadlocals.get_ec() + assert ec is not None + return ec def _freeze_(self): return True @@ -1498,9 +1501,7 @@ return buf.as_str() def str_or_None_w(self, w_obj): - if self.is_w(w_obj, self.w_None): - return None - return self.str_w(w_obj) + return None if self.is_none(w_obj) else self.str_w(w_obj) def str_w(self, w_obj): return w_obj.str_w(self) diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -616,7 +616,8 @@ def descr_classmethod_get(self, space, w_obj, w_klass=None): if space.is_none(w_klass): w_klass = space.type(w_obj) - return space.wrap(Method(space, self.w_function, w_klass, space.w_None)) + return space.wrap(Method(space, self.w_function, w_klass, + space.type(w_klass))) def descr_classmethod__new__(space, w_subtype, w_function): instance = space.allocate_instance(ClassMethod, w_subtype) diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py --- a/pypy/interpreter/pycompiler.py +++ b/pypy/interpreter/pycompiler.py @@ -96,7 +96,7 @@ XXX: This class should override the baseclass implementation of compile_command() in order to optimize it, especially in case - of incomplete inputs (e.g. we shouldn't re-compile from sracth + of incomplete inputs (e.g. we shouldn't re-compile from scratch the whole source after having only added a new '\n') """ def __init__(self, space, override_version=None): diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -200,7 +200,7 @@ elif opcode == opcodedesc.BREAK_LOOP.index: next_instr = self.BREAK_LOOP(oparg, next_instr) elif opcode == opcodedesc.CONTINUE_LOOP.index: - next_instr = self.CONTINUE_LOOP(oparg, next_instr) + return self.CONTINUE_LOOP(oparg, next_instr) elif opcode == opcodedesc.FOR_ITER.index: next_instr = self.FOR_ITER(oparg, next_instr) elif opcode == opcodedesc.JUMP_FORWARD.index: diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -17,7 +17,7 @@ yield 1 assert g.gi_running g = f() - assert g.gi_code is f.func_code + assert g.gi_code is f.__code__ assert g.__name__ == 'f' assert g.gi_frame is not None assert not g.gi_running @@ -26,7 +26,7 @@ raises(StopIteration, g.next) assert not g.gi_running assert g.gi_frame is None - assert g.gi_code is f.func_code + assert g.gi_code is f.__code__ assert g.__name__ == 'f' def test_generator3(self): @@ -286,13 +286,13 @@ w_co = space.appexec([], '''(): def g(x): yield x + 5 - return g.func_code + return g.__code__ ''') assert should_not_inline(w_co) == False w_co = space.appexec([], '''(): def g(x): yield x + 5 yield x + 6 - return g.func_code + return g.__code__ ''') assert should_not_inline(w_co) == True diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -388,6 +388,13 @@ # differs from .im_class in case the method is # defined in some parent class of l's actual class + def test_classmethod_im_class(self): + class Foo(object): + @classmethod + def bar(cls): + pass + assert Foo.bar.im_class is type + def test_func_closure(self): x = 2 def f(): diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -7,8 +7,8 @@ from __pypy__ import lookup_special -def _caller_locals(): - return sys._getframe(0).f_locals +def _caller_locals(): + return sys._getframe(0).f_locals def vars(*obj): """Return a dictionary of all the attributes currently bound in obj. If @@ -17,12 +17,11 @@ if len(obj) == 0: return _caller_locals() elif len(obj) != 1: - raise TypeError, "vars() takes at most 1 argument." - else: - try: - return obj[0].__dict__ - except AttributeError: - raise TypeError, "vars() argument must have __dict__ attribute" + raise TypeError("vars() takes at most 1 argument.") + try: + return obj[0].__dict__ + except AttributeError: + raise TypeError("vars() argument must have __dict__ attribute") def dir(*args): """dir([object]) -> list of strings @@ -38,8 +37,7 @@ attributes of its class's base classes. """ if len(args) > 1: - raise TypeError("dir expected at most 1 arguments, got %d" - % len(args)) + raise TypeError("dir expected at most 1 arguments, got %d" % len(args)) if len(args) == 0: local_names = _caller_locals().keys() # 2 stackframes away if not isinstance(local_names, list): @@ -48,92 +46,61 @@ return local_names import types - obj = args[0] - - dir_meth = None if isinstance(obj, types.InstanceType): - try: - dir_meth = getattr(obj, "__dir__") - except AttributeError: - pass + dir_meth = getattr(obj, '__dir__', None) else: - dir_meth = lookup_special(obj, "__dir__") + dir_meth = lookup_special(obj, '__dir__') if dir_meth is not None: - result = dir_meth() - if not isinstance(result, list): + names = dir_meth() + if not isinstance(names, list): raise TypeError("__dir__() must return a list, not %r" % ( - type(result),)) - result.sort() - return result + type(names),)) + names.sort() + return names elif isinstance(obj, types.ModuleType): try: - result = list(obj.__dict__) - result.sort() - return result + return sorted(obj.__dict__) except AttributeError: return [] - elif isinstance(obj, (types.TypeType, types.ClassType)): - #Don't look at __class__, as metaclass methods would be confusing. - result = _classdir(obj).keys() - result.sort() - return result - - else: #(regular item) - Dict = {} - try: - if isinstance(obj.__dict__, dict): - Dict.update(obj.__dict__) - except AttributeError: - pass - try: - Dict.update(_classdir(obj.__class__)) - except AttributeError: - pass + # Don't look at __class__, as metaclass methods would be confusing. + return sorted(_classdir(obj)) + else: + names = set() + ns = getattr(obj, '__dict__', None) + if isinstance(ns, dict): + names.update(ns) + klass = getattr(obj, '__class__', None) + if klass is not None: + names.update(_classdir(klass)) ## Comment from object.c: ## /* Merge in __members__ and __methods__ (if any). ## XXX Would like this to go away someday; for now, it's ## XXX needed to get at im_self etc of method objects. */ - for attr in ['__members__','__methods__']: - try: - l = getattr(obj, attr) - if not isinstance(l, list): - continue - for item in l: - if isinstance(item, types.StringTypes): - Dict[item] = None - except (AttributeError, TypeError): - pass + for attr in '__members__', '__methods__': + l = getattr(obj, attr, None) + if not isinstance(l, list): + continue + names.extend(item for item in l if isinstance(item, str)) - result = Dict.keys() - result.sort() - return result + return sorted(names) def _classdir(klass): - """Return a dict of the accessible attributes of class/type klass. + """Return a set of the accessible attributes of class/type klass. - This includes all attributes of klass and all of the - base classes recursively. - - The values of this dict have no meaning - only the keys have - meaning. + This includes all attributes of klass and all of the base classes + recursively. """ - Dict = {} - try: - Dict.update(klass.__dict__) - except AttributeError: pass - try: - # XXX - Use of .__mro__ would be suggested, if the existance - # of that attribute could be guarranted. - bases = klass.__bases__ - except AttributeError: pass - else: - try: - #Note that since we are only interested in the keys, - # the order we merge classes is unimportant - for base in bases: - Dict.update(_classdir(base)) - except TypeError: pass - return Dict + names = set() + ns = getattr(klass, '__dict__', None) + if ns is not None: + names.update(ns) + bases = getattr(klass, '__bases__', None) + if bases is not None: + # Note that since we are only interested in the keys, the order + # we merge classes is unimportant + for base in bases: + names.update(_classdir(base)) + return names diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -73,13 +73,12 @@ 'builtinify' : 'interp_magic.builtinify', 'lookup_special' : 'interp_magic.lookup_special', 'do_what_I_mean' : 'interp_magic.do_what_I_mean', - 'list_strategy' : 'interp_magic.list_strategy', 'validate_fd' : 'interp_magic.validate_fd', 'resizelist_hint' : 'interp_magic.resizelist_hint', 'newlist_hint' : 'interp_magic.newlist_hint', 'add_memory_pressure' : 'interp_magic.add_memory_pressure', 'newdict' : 'interp_dict.newdict', - 'dictstrategy' : 'interp_dict.dictstrategy', + 'strategy' : 'interp_magic.strategy', # dict,set,list 'set_debug' : 'interp_magic.set_debug', 'locals_to_fast' : 'interp_magic.locals_to_fast', } diff --git a/pypy/module/__pypy__/interp_dict.py b/pypy/module/__pypy__/interp_dict.py --- a/pypy/module/__pypy__/interp_dict.py +++ b/pypy/module/__pypy__/interp_dict.py @@ -1,7 +1,6 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec -from pypy.objspace.std.dictmultiobject import W_DictMultiObject @unwrap_spec(type=str) def newdict(space, type): @@ -31,13 +30,3 @@ return space.newdict(strdict=True) else: raise oefmt(space.w_TypeError, "unknown type of dict %s", type) - -def dictstrategy(space, w_obj): - """ dictstrategy(dict) - - show the underlaying strategy used by a dict object - """ - if not isinstance(w_obj, W_DictMultiObject): - raise OperationError(space.w_TypeError, - space.wrap("expecting dict object")) - return space.wrap('%r' % (w_obj.strategy,)) diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -2,7 +2,9 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.pyframe import PyFrame from rpython.rlib.objectmodel import we_are_translated +from pypy.objspace.std.dictmultiobject import W_DictMultiObject from pypy.objspace.std.listobject import W_ListObject +from pypy.objspace.std.setobject import W_BaseSetObject from pypy.objspace.std.typeobject import MethodCache from pypy.objspace.std.mapdict import MapAttrCache from rpython.rlib import rposix, rgc @@ -70,12 +72,23 @@ def do_what_I_mean(space): return space.wrap(42) -def list_strategy(space, w_list): - if isinstance(w_list, W_ListObject): - return space.wrap(w_list.strategy._applevel_repr) + +def strategy(space, w_obj): + """ strategy(dict or list or set) + + Return the underlying strategy currently used by a dict, list or set object + """ + if isinstance(w_obj, W_DictMultiObject): + name = w_obj.strategy.__class__.__name__ + elif isinstance(w_obj, W_ListObject): + name = w_obj.strategy.__class__.__name__ + elif isinstance(w_obj, W_BaseSetObject): + name = w_obj.strategy.__class__.__name__ else: - w_msg = space.wrap("Can only get the list strategy of a list") - raise OperationError(space.w_TypeError, w_msg) + raise OperationError(space.w_TypeError, + space.wrap("expecting dict or list or set object")) + return space.wrap(name) + @unwrap_spec(fd='c_int') def validate_fd(space, fd): diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -46,26 +46,42 @@ assert x == 42 def test_list_strategy(self): - from __pypy__ import list_strategy + from __pypy__ import strategy l = [1, 2, 3] - assert list_strategy(l) == "int" + assert strategy(l) == "IntegerListStrategy" l = ["a", "b", "c"] - assert list_strategy(l) == "bytes" + assert strategy(l) == "BytesListStrategy" l = [u"a", u"b", u"c"] - assert list_strategy(l) == "unicode" + assert strategy(l) == "UnicodeListStrategy" l = [1.1, 2.2, 3.3] - assert list_strategy(l) == "float" + assert strategy(l) == "FloatListStrategy" l = range(3) - assert list_strategy(l) == "simple_range" + assert strategy(l) == "SimpleRangeListStrategy" l = range(1, 2) - assert list_strategy(l) == "range" + assert strategy(l) == "RangeListStrategy" l = [1, "b", 3] - assert list_strategy(l) == "object" + assert strategy(l) == "ObjectListStrategy" l = [] - assert list_strategy(l) == "empty" + assert strategy(l) == "EmptyListStrategy" o = 5 - raises(TypeError, list_strategy, 5) + raises(TypeError, strategy, 5) + + def test_dict_strategy(self): + from __pypy__ import strategy + + d = {} + assert strategy(d) == "EmptyDictStrategy" + d = {1: None, 5: None} + assert strategy(d) == "IntDictStrategy" + + def test_set_strategy(self): + from __pypy__ import strategy + + s = set() + assert strategy(s) == "EmptySetStrategy" + s = set([2, 3, 4]) + assert strategy(s) == "IntegerSetStrategy" class AppTestJitFeatures(object): diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -8,7 +8,7 @@ appleveldefs = { } interpleveldefs = { - '__version__': 'space.wrap("0.8.2")', + '__version__': 'space.wrap("0.8.6")', 'load_library': 'libraryobj.load_library', diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3188,4 +3188,4 @@ def test_version(): # this test is here mostly for PyPy - assert __version__ == "0.8.2" + assert __version__ == "0.8.6" diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -1,14 +1,12 @@ -from __future__ import with_statement -from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib import rpoll, rsocket +from rpython.rlib.rarithmetic import intmask +from rpython.rlib.ropenssl import * +from rpython.rtyper.lltypesystem import lltype, rffi + +from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError -from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.gateway import interp2app, unwrap_spec - -from rpython.rlib.rarithmetic import intmask -from rpython.rlib import rpoll, rsocket -from rpython.rlib.ropenssl import * - from pypy.module._socket import interp_socket @@ -83,19 +81,15 @@ Mix string into the OpenSSL PRNG state. entropy (a float) is a lower bound on the entropy contained in string.""" - - buf = rffi.str2charp(string) - try: + with rffi.scoped_str2charp(string) as buf: libssl_RAND_add(buf, len(string), entropy) - finally: - rffi.free_charp(buf) def RAND_status(space): """RAND_status() -> 0 or 1 - Returns 1 if the OpenSSL PRNG has been seeded with enough data and 0 if not. - It is necessary to seed the PRNG with RAND_add() on some platforms before - using the ssl() function.""" + Returns 1 if the OpenSSL PRNG has been seeded with enough data + and 0 if not. It is necessary to seed the PRNG with RAND_add() + on some platforms before using the ssl() function.""" res = libssl_RAND_status() return space.wrap(res) @@ -107,16 +101,12 @@ Queries the entropy gather daemon (EGD) on socket path. Returns number of bytes read. Raises socket.sslerror if connection to EGD fails or if it does provide enough data to seed PRNG.""" - - socket_path = rffi.str2charp(path) - try: + with rffi.scoped_str2charp(path) as socket_path: bytes = libssl_RAND_egd(socket_path) - finally: - rffi.free_charp(socket_path) if bytes == -1: - msg = "EGD connection failed or EGD did not return" - msg += " enough data to seed the PRNG" - raise ssl_error(space, msg) + raise ssl_error(space, + "EGD connection failed or EGD did not return " + "enough data to seed the PRNG") return space.wrap(bytes) @@ -127,17 +117,19 @@ self.ctx = lltype.nullptr(SSL_CTX.TO) self.ssl = lltype.nullptr(SSL.TO) self.peer_cert = lltype.nullptr(X509.TO) - self._server = lltype.malloc(rffi.CCHARP.TO, X509_NAME_MAXLEN, flavor='raw') + self._server = lltype.malloc(rffi.CCHARP.TO, X509_NAME_MAXLEN, + flavor='raw') self._server[0] = '\0' - self._issuer = lltype.malloc(rffi.CCHARP.TO, X509_NAME_MAXLEN, flavor='raw') + self._issuer = lltype.malloc(rffi.CCHARP.TO, X509_NAME_MAXLEN, + flavor='raw') self._issuer[0] = '\0' self.shutdown_seen_zero = False - def server(self): - return self.space.wrap(rffi.charp2str(self._server)) + def server(self, space): + return space.wrap(rffi.charp2str(self._server)) - def issuer(self): - return self.space.wrap(rffi.charp2str(self._issuer)) + def issuer(self, space): + return space.wrap(rffi.charp2str(self._issuer)) def __del__(self): self.enqueue_for_destruction(self.space, SSLObject.destructor, @@ -155,21 +147,20 @@ lltype.free(self._issuer, flavor='raw') @unwrap_spec(data='bufferstr') - def write(self, data): + def write(self, space, data): """write(s) -> len Writes the string s into the SSL object. Returns the number of bytes written.""" - self._refresh_nonblocking(self.space) + self._refresh_nonblocking(space) - sockstate = check_socket_and_wait_for_timeout(self.space, - self.w_socket, True) + sockstate = checkwait(space, self.w_socket, True) if sockstate == SOCKET_HAS_TIMED_OUT: - raise ssl_error(self.space, "The write operation timed out") + raise ssl_error(space, "The write operation timed out") elif sockstate == SOCKET_HAS_BEEN_CLOSED: - raise ssl_error(self.space, "Underlying socket has been closed.") + raise ssl_error(space, "Underlying socket has been closed.") elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT: - raise ssl_error(self.space, "Underlying socket too large for select().") + raise ssl_error(space, "Underlying socket too large for select().") num_bytes = 0 while True: @@ -179,18 +170,16 @@ err = libssl_SSL_get_error(self.ssl, num_bytes) if err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout(self.space, - self.w_socket, False) + sockstate = checkwait(space, self.w_socket, False) elif err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout(self.space, - self.w_socket, True) + sockstate = checkwait(space, self.w_socket, True) else: sockstate = SOCKET_OPERATION_OK if sockstate == SOCKET_HAS_TIMED_OUT: - raise ssl_error(self.space, "The write operation timed out") + raise ssl_error(space, "The write operation timed out") elif sockstate == SOCKET_HAS_BEEN_CLOSED: - raise ssl_error(self.space, "Underlying socket has been closed.") + raise ssl_error(space, "Underlying socket has been closed.") elif sockstate == SOCKET_IS_NONBLOCKING: break @@ -200,38 +189,38 @@ break if num_bytes > 0: - return self.space.wrap(num_bytes) + return space.wrap(num_bytes) else: - raise _ssl_seterror(self.space, self, num_bytes) + raise _ssl_seterror(space, self, num_bytes) - def pending(self): + def pending(self, space): """pending() -> count Returns the number of already decrypted bytes available for read, pending on the connection.""" count = libssl_SSL_pending(self.ssl) if count < 0: - raise _ssl_seterror(self.space, self, count) - return self.space.wrap(count) + raise _ssl_seterror(space, self, count) + return space.wrap(count) @unwrap_spec(num_bytes=int) - def read(self, num_bytes=1024): + def read(self, space, num_bytes=1024): """read([len]) -> string Read up to len bytes from the SSL socket.""" - count = libssl_SSL_pending(self.ssl) if not count: - sockstate = check_socket_and_wait_for_timeout(self.space, - self.w_socket, False) + sockstate = checkwait(space, self.w_socket, False) if sockstate == SOCKET_HAS_TIMED_OUT: - raise ssl_error(self.space, "The read operation timed out") + raise ssl_error(space, "The read operation timed out") elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT: - raise ssl_error(self.space, "Underlying socket too large for select().") + raise ssl_error(space, + "Underlying socket too large for select().") elif sockstate == SOCKET_HAS_BEEN_CLOSED: if libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN: - return self.space.wrap('') - raise ssl_error(self.space, "Socket closed without SSL shutdown handshake") + return space.wrap('') + raise ssl_error(space, + "Socket closed without SSL shutdown handshake") with rffi.scoped_alloc_buffer(num_bytes) as buf: while True: @@ -241,19 +230,17 @@ err = libssl_SSL_get_error(self.ssl, count) if err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout(self.space, - self.w_socket, False) + sockstate = checkwait(space, self.w_socket, False) elif err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout(self.space, - self.w_socket, True) + sockstate = checkwait(space, self.w_socket, True) elif (err == SSL_ERROR_ZERO_RETURN and libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN): - return self.space.wrap("") + return space.wrap("") else: sockstate = SOCKET_OPERATION_OK if sockstate == SOCKET_HAS_TIMED_OUT: - raise ssl_error(self.space, "The read operation timed out") + raise ssl_error(space, "The read operation timed out") elif sockstate == SOCKET_IS_NONBLOCKING: break @@ -263,11 +250,11 @@ break if count <= 0: - raise _ssl_seterror(self.space, self, count) + raise _ssl_seterror(space, self, count) result = buf.str(count) - return self.space.wrap(result) + return space.wrap(result) def _refresh_nonblocking(self, space): # just in case the blocking state of the socket has been changed @@ -286,11 +273,9 @@ err = libssl_SSL_get_error(self.ssl, ret) # XXX PyErr_CheckSignals() if err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout( - space, self.w_socket, False) + sockstate = checkwait(space, self.w_socket, False) elif err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout( - space, self.w_socket, True) + sockstate = checkwait(space, self.w_socket, True) else: sockstate = SOCKET_OPERATION_OK if sockstate == SOCKET_HAS_TIMED_OUT: @@ -298,7 +283,8 @@ elif sockstate == SOCKET_HAS_BEEN_CLOSED: raise ssl_error(space, "Underlying socket has been closed.") elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT: - raise ssl_error(space, "Underlying socket too large for select().") + raise ssl_error(space, + "Underlying socket too large for select().") elif sockstate == SOCKET_IS_NONBLOCKING: break @@ -330,7 +316,6 @@ self._refresh_nonblocking(space) zeros = 0 - while True: # Disable read-ahead so that unwrap can work correctly. # Otherwise OpenSSL might read in too much data, @@ -360,21 +345,20 @@ # Possibly retry shutdown until timeout or failure ssl_err = libssl_SSL_get_error(self.ssl, ret) if ssl_err == SSL_ERROR_WANT_READ: - sockstate = check_socket_and_wait_for_timeout( - self.space, self.w_socket, False) + sockstate = checkwait(space, self.w_socket, False) elif ssl_err == SSL_ERROR_WANT_WRITE: - sockstate = check_socket_and_wait_for_timeout( - self.space, self.w_socket, True) + sockstate = checkwait(space, self.w_socket, True) else: break if sockstate == SOCKET_HAS_TIMED_OUT: if ssl_err == SSL_ERROR_WANT_READ: - raise ssl_error(self.space, "The read operation timed out") + raise ssl_error(space, "The read operation timed out") else: - raise ssl_error(self.space, "The write operation timed out") + raise ssl_error(space, "The write operation timed out") elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT: - raise ssl_error(space, "Underlying socket too large for select().") + raise ssl_error(space, + "Underlying socket too large for select().") elif sockstate != SOCKET_OPERATION_OK: # Retain the SSL error code break @@ -392,37 +376,31 @@ return space.w_None name = libssl_SSL_CIPHER_get_name(current) - if name: - w_name = space.wrap(rffi.charp2str(name)) - else: - w_name = space.w_None + w_name = space.wrap(rffi.charp2str(name)) if name else space.w_None proto = libssl_SSL_CIPHER_get_version(current) - if proto: - w_proto = space.wrap(rffi.charp2str(proto)) - else: - w_proto = space.w_None + w_proto = space.wrap(rffi.charp2str(proto)) if proto else space.w_None bits = libssl_SSL_CIPHER_get_bits(current, lltype.nullptr(rffi.INTP.TO)) w_bits = space.newint(bits) - return space.newtuple([w_name, w_proto, w_bits]) @unwrap_spec(der=bool) - def peer_certificate(self, der=False): + def peer_certificate(self, space, der=False): """peer_certificate([der=False]) -> certificate - Returns the certificate for the peer. If no certificate was provided, - returns None. If a certificate was provided, but not validated, returns - an empty dictionary. Otherwise returns a dict containing information - about the peer certificate. + Returns the certificate for the peer. If no certificate was + provided, returns None. If a certificate was provided, but not + validated, returns an empty dictionary. Otherwise returns a + dict containing information about the peer certificate. - If the optional argument is True, returns a DER-encoded copy of the - peer certificate, or None if no certificate was provided. This will - return the certificate even if it wasn't validated.""" + If the optional argument is True, returns a DER-encoded copy of + the peer certificate, or None if no certificate was provided. + This will return the certificate even if it wasn't validated. + """ if not self.peer_cert: - return self.space.w_None + return space.w_None if der: # return cert in DER-encoded format @@ -430,20 +408,19 @@ buf_ptr[0] = lltype.nullptr(rffi.CCHARP.TO) length = libssl_i2d_X509(self.peer_cert, buf_ptr) if length < 0: - raise _ssl_seterror(self.space, self, length) + raise _ssl_seterror(space, self, length) try: # this is actually an immutable bytes sequence - return self.space.wrap(rffi.charpsize2str(buf_ptr[0], - length)) + return space.wrap(rffi.charpsize2str(buf_ptr[0], length)) finally: libssl_OPENSSL_free(buf_ptr[0]) else: verification = libssl_SSL_CTX_get_verify_mode( libssl_SSL_get_SSL_CTX(self.ssl)) if not verification & SSL_VERIFY_PEER: - return self.space.newdict() + return space.newdict() else: - return _decode_certificate(self.space, self.peer_cert) + return _decode_certificate(space, self.peer_cert) def _decode_certificate(space, certificate, verbose=False): w_retval = space.newdict() @@ -580,15 +557,16 @@ name = libssl_sk_GENERAL_NAME_value(names, j) gntype = intmask(name[0].c_type) if gntype == GEN_DIRNAME: - # we special-case DirName as a tuple of tuples of attributes + # we special-case DirName as a tuple of tuples of + # attributes dirname = libssl_pypy_GENERAL_NAME_dirn(name) w_t = space.newtuple([ space.wrap("DirName"), _create_tuple_for_X509_NAME(space, dirname) ]) elif gntype in (GEN_EMAIL, GEN_DNS, GEN_URI): - # GENERAL_NAME_print() doesn't handle NULL bytes in ASN1_string - # correctly, CVE-2013-4238 + # GENERAL_NAME_print() doesn't handle NULL bytes in + # ASN1_string correctly, CVE-2013-4238 if gntype == GEN_EMAIL: v = space.wrap("email") elif gntype == GEN_DNS: @@ -665,26 +643,11 @@ sock_fd = space.int_w(space.call_method(w_sock, "fileno")) w_timeout = space.call_method(w_sock, "gettimeout") - if space.is_none(w_timeout): - has_timeout = False - else: - has_timeout = True - if space.is_none(w_key_file): - key_file = None - else: - key_file = space.str_w(w_key_file) - if space.is_none(w_cert_file): - cert_file = None - else: - cert_file = space.str_w(w_cert_file) - if space.is_none(w_cacerts_file): - cacerts_file = None - else: - cacerts_file = space.str_w(w_cacerts_file) - if space.is_none(w_ciphers): - ciphers = None - else: - ciphers = space.str_w(w_ciphers) + has_timeout = not space.is_none(w_timeout) + key_file = space.str_or_None_w(w_key_file) + cert_file = space.str_or_None_w(w_cert_file) + cacerts_file = space.str_or_None_w(w_cacerts_file) + ciphers = space.str_or_None_w(w_ciphers) if side == PY_SSL_SERVER and (not key_file or not cert_file): raise ssl_error(space, "Both the key & certificate files " @@ -747,8 +710,8 @@ libssl_SSL_set_fd(ss.ssl, sock_fd) # set the socket for SSL # The ACCEPT_MOVING_WRITE_BUFFER flag is necessary because the address # of a str object may be changed by the garbage collector. - libssl_SSL_set_mode(ss.ssl, - SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) + libssl_SSL_set_mode( + ss.ssl, SSL_MODE_AUTO_RETRY | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) # If the socket is in non-blocking mode or timeout mode, set the BIO # to non-blocking mode (blocking is the default) @@ -765,7 +728,7 @@ ss.w_socket = w_sock return ss -def check_socket_and_wait_for_timeout(space, w_sock, writing): +def checkwait(space, w_sock, writing): """If the socket has a timeout, do a select()/poll() on the socket. The argument writing indicates the direction. Returns one of the possibilities in the timeout_state enum (above).""" diff --git a/pypy/module/_winreg/interp_winreg.py b/pypy/module/_winreg/interp_winreg.py --- a/pypy/module/_winreg/interp_winreg.py +++ b/pypy/module/_winreg/interp_winreg.py @@ -667,11 +667,6 @@ space.wrap(nValues[0]), space.wrap(l)]) -def str_or_None_w(space, w_obj): - if space.is_w(w_obj, space.w_None): - return None - return space.str_w(w_obj) - def ConnectRegistry(space, w_machine, w_hkey): """key = ConnectRegistry(computer_name, key) @@ -683,7 +678,7 @@ The return value is the handle of the opened key. If the function fails, an EnvironmentError exception is raised.""" - machine = str_or_None_w(space, w_machine) + machine = space.str_or_None_w(w_machine) hkey = hkey_w(w_hkey, space) with lltype.scoped_alloc(rwinreg.PHKEY.TO, 1) as rethkey: ret = rwinreg.RegConnectRegistry(machine, hkey, rethkey) diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -228,8 +228,11 @@ Convert the array to an array of machine values and return the string representation. """ + size = self.len + if size == 0: + return space.wrap('') cbuf = self._charbuf_start() - s = rffi.charpsize2str(cbuf, self.len * self.itemsize) + s = rffi.charpsize2str(cbuf, size * self.itemsize) self._charbuf_stop() return self.space.wrap(s) diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -418,6 +418,10 @@ assert self.array('u', unicode('hello')).tounicode() == \ unicode('hello') + def test_empty_tostring(self): + a = self.array('l') + assert a.tostring() == b'' + def test_buffer(self): a = self.array('h', 'Hi') buf = buffer(a) diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -511,6 +511,9 @@ return space.wrap(dtype.itemtype.to_str(read_val)) return read_val + def descr_iter(self, space): + return space.newseqiter(self) + def descr_setitem(self, space, w_item, w_value): if space.isinstance_w(w_item, space.w_basestring): item = space.str_w(w_item) @@ -782,6 +785,7 @@ __new__ = interp2app(W_VoidBox.descr__new__.im_func), __getitem__ = interp2app(W_VoidBox.descr_getitem), __setitem__ = interp2app(W_VoidBox.descr_setitem), + __iter__ = interp2app(W_VoidBox.descr_iter), ) W_CharacterBox.typedef = TypeDef("numpy.character", W_FlexibleBox.typedef, diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -75,7 +75,7 @@ self.base = subdtype.base def __repr__(self): - if self.fields is not None: + if self.fields: return '' % self.fields return '' % self.itemtype @@ -258,7 +258,7 @@ def _compute_hash(self, space, x): from rpython.rlib.rarithmetic import intmask - if self.fields is None and self.subdtype is None: + if not self.fields and self.subdtype is None: endian = self.byteorder if endian == NPY.NATIVE: endian = NPY.NATBYTE @@ -271,7 +271,7 @@ if self.is_flexible(): y = intmask((1000003 * y) ^ self.alignment) return intmask((1000003 * x) ^ y) - if self.fields is not None: + if self.fields: for name, (offset, subdtype) in self.fields.iteritems(): assert isinstance(subdtype, W_Dtype) y = intmask(1000003 * (0x345678 ^ compute_hash(name))) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -428,6 +428,8 @@ for t in [np.int_, np.float_]: dt = np.dtype(t) dt1 = dt.newbyteorder().newbyteorder() + assert dt.isbuiltin + assert not dt1.isbuiltin dt2 = dt.newbyteorder("<") dt3 = dt.newbyteorder(">") assert dt.byteorder != dt1.byteorder diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_function.py b/pypy/module/test_lib_pypy/cffi_tests/test_function.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_function.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_function.py @@ -403,3 +403,18 @@ if wr() is not None: import gc; gc.collect() assert wr() is None # 'data' does not leak + + def test_windows_stdcall(self): + if sys.platform != 'win32': + py.test.skip("Windows-only test") + if self.Backend is CTypesBackend: + py.test.skip("not with the ctypes backend") + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + BOOL QueryPerformanceFrequency(LONGLONG *lpFrequency); + """) + m = ffi.dlopen("Kernel32.dll") + p_freq = ffi.new("LONGLONG *") + res = m.QueryPerformanceFrequency(p_freq) + assert res != 0 + assert p_freq[0] != 0 diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_version.py b/pypy/module/test_lib_pypy/cffi_tests/test_version.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_version.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_version.py @@ -6,18 +6,20 @@ if '_cffi_backend' in sys.builtin_module_names: py.test.skip("this is embedded version") -BACKEND_VERSIONS = { - '0.4.2': '0.4', # did not change - '0.7.1': '0.7', # did not change - '0.7.2': '0.7', # did not change - '0.8.1': '0.8', # did not change (essentially) - } +#BACKEND_VERSIONS = { +# '0.4.2': '0.4', # did not change +# '0.7.1': '0.7', # did not change +# '0.7.2': '0.7', # did not change +# '0.8.1': '0.8', # did not change (essentially) +# '0.8.4': '0.8.3', # did not change +# } def test_version(): v = cffi.__version__ version_info = '.'.join(str(i) for i in cffi.__version_info__) assert v == version_info - assert BACKEND_VERSIONS.get(v, v) == _cffi_backend.__version__ + #v = BACKEND_VERSIONS.get(v, v) + assert v == _cffi_backend.__version__ def test_doc_version(): parent = os.path.dirname(os.path.dirname(__file__)) @@ -48,5 +50,5 @@ v = cffi.__version__ p = os.path.join(parent, 'c', 'test_c.py') content = open(p).read() - assert (('assert __version__ == "%s"' % BACKEND_VERSIONS.get(v, v)) - in content) + #v = BACKEND_VERSIONS.get(v, v) + assert (('assert __version__ == "%s"' % v) in content) diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -41,6 +41,8 @@ return ''.join(self.data) def _new(self, value): + if value is self.data: + value = value[:] return W_BytearrayObject(value) def _new_from_buffer(self, buffer): diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -30,10 +30,6 @@ raise NotImplementedError def descr_reduce(self, space): - """ - XXX to do: remove this __reduce__ method and do - a registration with copy_reg, instead. - """ from pypy.interpreter.mixedmodule import MixedModule w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) @@ -125,10 +121,6 @@ self.index = space.int_w(self.w_len) + index def descr_reduce(self, space): - """ - XXX to do: remove this __reduce__ method and do - a registration with copy_reg, instead. - """ from pypy.interpreter.mixedmodule import MixedModule w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -842,8 +842,6 @@ W_Lists do not switch back to EmptyListStrategy when becoming empty again. """ - _applevel_repr = "empty" - def __init__(self, space): ListStrategy.__init__(self, space) @@ -1102,8 +1100,6 @@ method providing only positive length. The storage is a one element tuple with positive integer storing length.""" - _applevel_repr = "simple_range" - erase, unerase = rerased.new_erasing_pair("simple_range") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -1176,8 +1172,6 @@ destroying the range (inserting, appending non-ints) the strategy is switched to IntegerListStrategy.""" - _applevel_repr = "range" - erase, unerase = rerased.new_erasing_pair("range") erase = staticmethod(erase) unerase = staticmethod(unerase) @@ -1555,7 +1549,6 @@ import_from_mixin(AbstractUnwrappedStrategy) _none_value = None - _applevel_repr = "object" def unwrap(self, w_obj): return w_obj @@ -1590,7 +1583,6 @@ import_from_mixin(AbstractUnwrappedStrategy) _none_value = 0 - _applevel_repr = "int" def wrap(self, intval): return self.space.wrap(intval) @@ -1644,7 +1636,6 @@ import_from_mixin(AbstractUnwrappedStrategy) _none_value = 0.0 - _applevel_repr = "float" def wrap(self, floatval): return self.space.wrap(floatval) @@ -1677,7 +1668,6 @@ import_from_mixin(AbstractUnwrappedStrategy) _none_value = None - _applevel_repr = "bytes" def wrap(self, stringval): return self.space.wrap(stringval) @@ -1710,7 +1700,6 @@ import_from_mixin(AbstractUnwrappedStrategy) _none_value = None - _applevel_repr = "unicode" def wrap(self, stringval): return self.space.wrap(stringval) diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1060,10 +1060,14 @@ return storage, strategy def symmetric_difference(self, w_set, w_other): + if w_other.length() == 0: + return w_set.copy_real() storage, strategy = self._symmetric_difference_base(w_set, w_other) return w_set.from_storage_and_strategy(storage, strategy) def symmetric_difference_update(self, w_set, w_other): + if w_other.length() == 0: + return storage, strategy = self._symmetric_difference_base(w_set, w_other) w_set.strategy = strategy w_set.sstorage = storage @@ -1181,7 +1185,8 @@ d_other = self.unerase(w_other.sstorage) d_set.update(d_other) return - + if w_other.length() == 0: + return w_set.switch_to_object_strategy(self.space) w_set.update(w_other) diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -455,7 +455,7 @@ d = width - len(value) if d > 0: fillchar = self._multi_chr(fillchar[0]) - value += d * fillchar + value = value + fillchar * d return self._new(value) diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py --- a/pypy/objspace/std/test/test_bytearrayobject.py +++ b/pypy/objspace/std/test/test_bytearrayobject.py @@ -223,6 +223,20 @@ check(bytearray('abc').rstrip(memoryview('c')), 'ab') check(bytearray('aba').strip('a'), 'b') + def test_xjust_no_mutate(self): + # a previous regression + b = bytearray(b'') + assert b.ljust(1) == bytearray(b' ') + assert not len(b) + + b2 = b.ljust(0) + b2 += b' ' + assert not len(b) + + b2 = b.rjust(0) + b2 += b' ' + assert not len(b) + def test_split(self): # methods that should return a sequence of bytearrays def check(result, expected): diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -960,3 +960,35 @@ # did not work before because of an optimization that swaps both # operands when the first set is larger than the second assert type(frozenset([1, 2]) & set([2])) is frozenset + + def test_update_bug_strategy(self): + from __pypy__ import strategy + s = set([1, 2, 3]) + assert strategy(s) == "IntegerSetStrategy" + s.update(set()) + assert strategy(s) == "IntegerSetStrategy" + # + s = set([1, 2, 3]) + s |= set() + assert strategy(s) == "IntegerSetStrategy" + # + s = set([1, 2, 3]).difference(set()) + assert strategy(s) == "IntegerSetStrategy" + # + s = set([1, 2, 3]) + s.difference_update(set()) + assert strategy(s) == "IntegerSetStrategy" + # + s = set([1, 2, 3]).symmetric_difference(set()) + assert strategy(s) == "IntegerSetStrategy" + # + s = set([1, 2, 3]) + s.symmetric_difference_update(set()) + assert strategy(s) == "IntegerSetStrategy" + # + s = set([1, 2, 3]).intersection(set()) + assert strategy(s) == "EmptySetStrategy" + # + s = set([1, 2, 3]) + s.intersection_update(set()) + assert strategy(s) == "EmptySetStrategy" diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4276,6 +4276,15 @@ py.test.raises(annmodel.AnnotatorError, a.build_types, f, [annmodel.s_None]) + def test_class___name__(self): + class Abc(object): + pass + def f(): + return Abc().__class__.__name__ + a = self.RPythonAnnotator() + s = a.build_types(f, []) + assert isinstance(s, annmodel.SomeString) + def g(n): return [0, 1, 2, n] diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -745,6 +745,11 @@ class __extend__(SomePBC): def getattr(self, s_attr): + assert s_attr.is_constant() + if s_attr.const == '__name__': + from rpython.annotator.description import ClassDesc + if self.getKind() is ClassDesc: + return SomeString() bookkeeper = getbookkeeper() return bookkeeper.pbc_getattr(self, s_attr) getattr.can_only_throw = [] diff --git a/rpython/flowspace/test/test_model.py b/rpython/flowspace/test/test_model.py --- a/rpython/flowspace/test/test_model.py +++ b/rpython/flowspace/test/test_model.py @@ -13,7 +13,7 @@ class pieces: """ The manually-built graph corresponding to the sample_function(). """ - i = Variable("i") + i0 = Variable("i0") i1 = Variable("i1") i2 = Variable("i2") i3 = Variable("i3") @@ -25,12 +25,12 @@ conditionop = SpaceOperation("gt", [i1, Constant(0)], conditionres) addop = SpaceOperation("add", [sum2, i2], sum3) decop = SpaceOperation("sub", [i2, Constant(1)], i3) - startblock = Block([i]) + startblock = Block([i0]) headerblock = Block([i1, sum1]) whileblock = Block([i2, sum2]) graph = FunctionGraph("f", startblock) - startblock.closeblock(Link([i, Constant(0)], headerblock)) + startblock.closeblock(Link([i0, Constant(0)], headerblock)) headerblock.operations.append(conditionop) headerblock.exitswitch = conditionres headerblock.closeblock(Link([sum1], graph.returnblock, False), @@ -55,7 +55,7 @@ def test_graphattributes(): assert graph.startblock is pieces.startblock assert graph.returnblock is pieces.headerblock.exits[0].target - assert graph.getargs() == [pieces.i] + assert graph.getargs() == [pieces.i0] assert [graph.getreturnvar()] == graph.returnblock.inputargs assert graph.source == inspect.getsource(sample_function) diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -21,7 +21,7 @@ # this is a basic test that tries to hit a number of features and their # translation: # - jitting of loops and bridges - # - virtualizables + # - two virtualizable types # - set_param interface # - profiler # - full optimizer @@ -79,22 +79,28 @@ if rposix.get_errno() != total: raise ValueError return chr(total % 253) # + class Virt2(object): + _virtualizable_ = ['i'] + def __init__(self, i): + self.i = i from rpython.rlib.libffi import types, CDLL, ArgChain from rpython.rlib.test.test_clibffi import get_libm_name libm_name = get_libm_name(sys.platform) - jitdriver2 = JitDriver(greens=[], reds = ['i', 'func', 'res', 'x']) + jitdriver2 = JitDriver(greens=[], reds = ['v2', 'func', 'res', 'x'], + virtualizables = ['v2']) def libffi_stuff(i, j): lib = CDLL(libm_name) func = lib.getpointer('fabs', [types.double], types.double) res = 0.0 x = float(j) - while i > 0: - jitdriver2.jit_merge_point(i=i, res=res, func=func, x=x) + v2 = Virt2(i) + while v2.i > 0: + jitdriver2.jit_merge_point(v2=v2, res=res, func=func, x=x) promote(func) argchain = ArgChain() argchain.arg(x) res = func.call(argchain, rffi.DOUBLE) - i -= 1 + v2.i -= 1 return res # def main(i, j): diff --git a/rpython/jit/backend/test/test_ll_random.py b/rpython/jit/backend/test/test_ll_random.py --- a/rpython/jit/backend/test/test_ll_random.py +++ b/rpython/jit/backend/test/test_ll_random.py @@ -112,11 +112,7 @@ self.vtable_counter += 1 S = self.get_random_structure_type(r, with_vtable=vtable, cache=False) name = S._name - vtable.name = lltype.malloc(lltype.Array(lltype.Char), len(name)+1, - immortal=True) - for i in range(len(name)): - vtable.name[i] = name[i] - vtable.name[len(name)] = '\x00' + vtable.name = rclass.alloc_array_name(name) self.structure_types_and_vtables.append((S, vtable)) # heaptracker.register_known_gctype(self.cpu, vtable, S) diff --git a/rpython/jit/codewriter/assembler.py b/rpython/jit/codewriter/assembler.py --- a/rpython/jit/codewriter/assembler.py +++ b/rpython/jit/codewriter/assembler.py @@ -248,7 +248,7 @@ if isinstance(TYPE, lltype.FuncType): name = value._obj._name elif TYPE == rclass.OBJECT_VTABLE: - name = ''.join(value.name).rstrip('\x00') + name = ''.join(value.name.chars) else: return addr = llmemory.cast_ptr_to_adr(value) diff --git a/rpython/jit/codewriter/heaptracker.py b/rpython/jit/codewriter/heaptracker.py --- a/rpython/jit/codewriter/heaptracker.py +++ b/rpython/jit/codewriter/heaptracker.py @@ -66,11 +66,7 @@ def set_testing_vtable_for_gcstruct(GCSTRUCT, vtable, name): # only for tests that need to register the vtable of their malloc'ed # structures in case they are GcStruct inheriting from OBJECT. - namez = name + '\x00' - vtable.name = lltype.malloc(rclass.OBJECT_VTABLE.name.TO, len(namez), - immortal=True) - for i in range(len(namez)): - vtable.name[i] = namez[i] + vtable.name = rclass.alloc_array_name(name) testing_gcstruct2vtable[GCSTRUCT] = vtable testing_gcstruct2vtable = {} diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -1,6 +1,6 @@ import py, random -from rpython.rtyper.lltypesystem import lltype, llmemory, rclass, rstr, rffi +from rpython.rtyper.lltypesystem import lltype, llmemory, rclass, rffi from rpython.rtyper.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from rpython.rtyper.rclass import FieldListAccessor, IR_QUASIIMMUTABLE @@ -331,7 +331,7 @@ def get_name_from_address(self, addr): # hack try: - return "".join(addr.ptr.name)[:-1] # remove \x00 + return "".join(addr.ptr.name.chars) except AttributeError: return "" diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1377,49 +1377,53 @@ def do_residual_call(self, funcbox, argboxes, descr, pc, assembler_call=False, assembler_call_jd=None): - # First build allboxes: it may need some reordering from the - # list provided in argboxes, depending on the order in which - # the arguments are expected by the function - # - allboxes = self._build_allboxes(funcbox, argboxes, descr) - effectinfo = descr.get_extra_info() - if (assembler_call or - effectinfo.check_forces_virtual_or_virtualizable()): - # residual calls require attention to keep virtualizables in-sync - self.metainterp.clear_exception() - if effectinfo.oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUAL: - resbox = self._do_jit_force_virtual(allboxes, descr, pc) + debug_start("jit-residual-call") + try: + # First build allboxes: it may need some reordering from the + # list provided in argboxes, depending on the order in which + # the arguments are expected by the function + # + allboxes = self._build_allboxes(funcbox, argboxes, descr) + effectinfo = descr.get_extra_info() + if (assembler_call or + effectinfo.check_forces_virtual_or_virtualizable()): + # residual calls require attention to keep virtualizables in-sync + self.metainterp.clear_exception() + if effectinfo.oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUAL: + resbox = self._do_jit_force_virtual(allboxes, descr, pc) + if resbox is not None: + return resbox + self.metainterp.vable_and_vrefs_before_residual_call() + resbox = self.metainterp.execute_and_record_varargs( + rop.CALL_MAY_FORCE, allboxes, descr=descr) + if effectinfo.is_call_release_gil(): + self.metainterp.direct_call_release_gil() + self.metainterp.vrefs_after_residual_call() + vablebox = None + if assembler_call: + vablebox = self.metainterp.direct_assembler_call( + assembler_call_jd) if resbox is not None: - return resbox - self.metainterp.vable_and_vrefs_before_residual_call() - resbox = self.metainterp.execute_and_record_varargs( - rop.CALL_MAY_FORCE, allboxes, descr=descr) - if effectinfo.is_call_release_gil(): - self.metainterp.direct_call_release_gil() - self.metainterp.vrefs_after_residual_call() - vablebox = None - if assembler_call: - vablebox = self.metainterp.direct_assembler_call( - assembler_call_jd) - if resbox is not None: - self.make_result_of_lastop(resbox) - self.metainterp.vable_after_residual_call(funcbox) - self.metainterp.generate_guard(rop.GUARD_NOT_FORCED, None) - if vablebox is not None: - self.metainterp.history.record(rop.KEEPALIVE, [vablebox], None) - self.metainterp.handle_possible_exception() - # XXX refactor: direct_libffi_call() is a hack - if effectinfo.oopspecindex == effectinfo.OS_LIBFFI_CALL: - self.metainterp.direct_libffi_call() - return resbox - else: - effect = effectinfo.extraeffect - if effect == effectinfo.EF_LOOPINVARIANT: - return self.execute_varargs(rop.CALL_LOOPINVARIANT, allboxes, - descr, False, False) - exc = effectinfo.check_can_raise() - pure = effectinfo.check_is_elidable() - return self.execute_varargs(rop.CALL, allboxes, descr, exc, pure) + self.make_result_of_lastop(resbox) + self.metainterp.vable_after_residual_call(funcbox) + self.metainterp.generate_guard(rop.GUARD_NOT_FORCED, None) + if vablebox is not None: + self.metainterp.history.record(rop.KEEPALIVE, [vablebox], None) + self.metainterp.handle_possible_exception() + # XXX refactor: direct_libffi_call() is a hack + if effectinfo.oopspecindex == effectinfo.OS_LIBFFI_CALL: + self.metainterp.direct_libffi_call() + return resbox + else: + effect = effectinfo.extraeffect + if effect == effectinfo.EF_LOOPINVARIANT: + return self.execute_varargs(rop.CALL_LOOPINVARIANT, allboxes, + descr, False, False) + exc = effectinfo.check_can_raise() + pure = effectinfo.check_is_elidable() + return self.execute_varargs(rop.CALL, allboxes, descr, exc, pure) + finally: + debug_stop("jit-residual-call") def do_conditional_call(self, condbox, funcbox, argboxes, descr, pc): if isinstance(condbox, ConstInt) and condbox.value == 0: diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -1611,6 +1611,40 @@ op.getopnum() == rop.GUARD_NOT_FORCED_2] assert len(l) == 0 + def test_two_virtualizable_types(self): + class A: + _virtualizable_ = ['x'] + def __init__(self, x): + self.x = x + + class B: + _virtualizable_ = ['lst[*]'] + def __init__(self, lst): + self.lst = lst + + driver_a = JitDriver(greens=[], reds=['a'], virtualizables=['a']) + driver_b = JitDriver(greens=[], reds=['b'], virtualizables=['b']) + + def foo_a(a): + while a.x > 0: + driver_a.jit_merge_point(a=a) + a.x -= 2 + return a.x + + def foo_b(b): + while b.lst[0] > 0: + driver_b.jit_merge_point(b=b) + b.lst[0] -= 2 + return b.lst[0] + + def f(): + return foo_a(A(13)) * 100 + foo_b(B([13])) + + assert f() == -101 + res = self.meta_interp(f, [], listops=True) + assert res == -101 + + class TestLLtype(ExplicitVirtualizableTests, ImplicitVirtualizableTests, LLJitMixin): diff --git a/rpython/jit/metainterp/test/test_virtualref.py b/rpython/jit/metainterp/test/test_virtualref.py --- a/rpython/jit/metainterp/test/test_virtualref.py +++ b/rpython/jit/metainterp/test/test_virtualref.py @@ -34,7 +34,7 @@ # def check_call(op, fname): assert op.opname == 'direct_call' - assert op.args[0].value._obj._name == fname + assert op.args[0].value._obj._name.startswith(fname) # ops = [op for block, op in graph.iterblockops()] check_call(ops[-3], 'virtual_ref') diff --git a/rpython/jit/tool/oparser_model.py b/rpython/jit/tool/oparser_model.py --- a/rpython/jit/tool/oparser_model.py +++ b/rpython/jit/tool/oparser_model.py @@ -124,6 +124,15 @@ class ExtendedTreeLoop(model.TreeLoop): + def as_json(self): + return { + 'comment': self.comment, + 'name': self.name, + 'operations': [op.as_json() for op in self.operations], + 'inputargs': self.inputargs, + 'last_offset': self.last_offset + } + def getboxes(self): def opboxes(operations): for op in operations: diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -340,6 +340,7 @@ # ____________________________________________________________ # VRefs + at specialize.argtype(0) def virtual_ref(x): """Creates a 'vref' object that contains a reference to 'x'. Calls to virtual_ref/virtual_ref_finish must be properly nested. The idea @@ -351,6 +352,7 @@ return DirectJitVRef(x) virtual_ref.oopspec = 'virtual_ref(x)' + at specialize.argtype(1) def virtual_ref_finish(vref, x): """See docstring in virtual_ref(x)""" keepalive_until_here(x) # otherwise the whole function call is removed diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -42,7 +42,7 @@ return ': '.join([str(x) for x in self.args]) def type_name(etype): - return ''.join(etype.name).rstrip('\x00') + return ''.join(etype.name.chars) class LLInterpreter(object): """ low level interpreter working with concrete values. """ @@ -145,7 +145,7 @@ assert isinstance(exc, LLException) klass, inst = exc.args[0], exc.args[1] for cls in enumerate_exceptions_top_down(): - if "".join(klass.name).rstrip("\0") == cls.__name__: + if "".join(klass.name.chars) == cls.__name__: return cls raise ValueError("couldn't match exception, maybe it" " has RPython attributes like OSError?") diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -1154,7 +1154,12 @@ type(other).__name__,)) if self._TYPE != other._TYPE: raise TypeError("comparing %r and %r" % (self._TYPE, other._TYPE)) - return self._obj == other._obj + try: + return self._obj == other._obj + except DelayedPointer: + # if one of the two pointers is delayed, they cannot From noreply at buildbot.pypy.org Tue Aug 5 10:59:53 2014 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 5 Aug 2014 10:59:53 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Add support for str.join([]) Message-ID: <20140805085953.3F6F61C0588@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72696:9661a2f7dd0f Date: 2014-08-04 14:36 -0500 http://bitbucket.org/pypy/pypy/changeset/9661a2f7dd0f/ Log: Add support for str.join([]) diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -820,6 +820,32 @@ i += 1 return result + def ll_join_chars_with_str(s, length, chars): + s_chars = s.chars + s_len = len(s_chars) + num_chars = length + if num_chars == 0: + return s.empty() + + try: + seplen = ovfcheck(s_len * (num_chars - 1)) + except OverflowError: + raise MemoryError + + # a single '+' at the end is allowed to overflow: it gets + # a negative result, and the gc will complain + result = s.malloc(num_chars + seplen) + res_index = 1 + result.chars[0] = chars[0] + i = 1 + while i < num_chars: + s.copy_contents(s, result, 0, res_index, s_len) + res_index += s_len + result.chars[res_index] = chars[i] + res_index += 1 + i += 1 + return result + @jit.oopspec('stroruni.slice(s1, start, stop)') @signature(types.any(), types.int(), types.int(), returns=types.any()) @jit.elidable diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -235,6 +235,8 @@ else: if r_lst.item_repr == rstr.repr: llfn = self.ll.ll_join + elif r_lst.item_repr == char_repr: + llfn = self.ll.ll_join_chars_with_str else: raise TyperError("sep.join() of non-string list: %r" % r_lst) return hop.gendirectcall(llfn, v_str, v_length, v_items) diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py --- a/rpython/rtyper/test/test_rstr.py +++ b/rpython/rtyper/test/test_rstr.py @@ -516,6 +516,9 @@ res = self.interpret(lambda: const('.').join([const('abc'), const('def')]), []) assert self.ll_to_string(res) == const('abc.def') + res = self.interpret(lambda: const(' ').join([const('a'), const('b'), const('c')]), []) + assert self.ll_to_string(res) == const('a b c') + def fn(i, j): s1 = [ const(''), const(','), const(' and ')] s2 = [ [], [const('foo')], [const('bar'), const('baz'), const('bazz')]] From noreply at buildbot.pypy.org Tue Aug 5 10:59:54 2014 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 5 Aug 2014 10:59:54 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fixed translation Message-ID: <20140805085954.853BB1C0588@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72697:1ae88439bc79 Date: 2014-08-05 02:54 -0500 http://bitbucket.org/pypy/pypy/changeset/1ae88439bc79/ Log: Fixed translation diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -53,6 +53,7 @@ @specialize.argtype(0) def ORD(s, pos): + assert s is not None if isinstance(s, Utf8Str): return utf8ord(s, pos) else: @@ -76,8 +77,17 @@ else: return s1 != s2 + at specialize.argtype(0, 1) +def LT(s1, s2): + assert s1 is not None + if isinstance(s1, Utf8Str): + return s1.__lt__(s2) + else: + return s1 < s2 + @specialize.argtype(0) def ADD(s1, s2): + assert s1 is not None if isinstance(s1, Utf8Str): return s1.__add__(s2) else: @@ -85,14 +95,17 @@ @specialize.argtype(0) def MUL(s1, s2): + assert s1 is not None if isinstance(s1, Utf8Str): return s1.__mul__(s2) else: + assert not isinstance(s1, Utf8Str) return s1 * s2 @specialize.argtype(0, 1) def IN(s1, s2): - if isinstance(s1, Utf8Str): + assert s1 is not None + if isinstance(s2, Utf8Str): return s2.__contains__(s1) else: return s1 in s2 @@ -468,6 +481,7 @@ break return Utf8Str(self.bytes.join([s.bytes for s in other]), is_ascii) else: + assert isinstance(other[0], str) return Utf8Str(self.bytes.join([s for s in other])) join._annspecialcase_ = 'specialize:arglistitemtype(1)' @@ -652,9 +666,6 @@ raise ValueError("Invalid unicode codepoint > 0x10FFFF.") self._length += 1 elif isinstance(c, str): - # TODO: Remove this check? - if len(c) == 1: - assert ord(c) < 128 self._builder.append(c) # XXX The assumption here is that the bytes being appended are diff --git a/pypy/module/_io/interp_stringio.py b/pypy/module/_io/interp_stringio.py --- a/pypy/module/_io/interp_stringio.py +++ b/pypy/module/_io/interp_stringio.py @@ -1,6 +1,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.typedef import ( TypeDef, generic_new_descr, GetSetProperty) +from pypy.interpreter import utf8 from pypy.interpreter.utf8 import Utf8Str, utf8ord from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.module._io.interp_textio import W_TextIOBase, W_IncrementalNewlineDecoder @@ -28,7 +29,9 @@ newline = space.unicode_w(w_newline) if (newline is not None and len(newline) != 0 and - newline not in (Utf8Str('\n'), Utf8Str('\r\n'), Utf8Str('\r'))): + utf8.NE(newline, Utf8Str('\n')) and + utf8.NE(newline, Utf8Str('\r\n')) and + utf8.NE(newline, Utf8Str('\r'))): # Not using oefmt() because I don't know how to ues it # with unicode raise OperationError(space.w_ValueError, diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -521,7 +521,6 @@ if self.decoded_chars_used > 0 or size < available: start = self.decoded_chars_used end = self.decoded_chars_used + size - assert start >= 0 assert end >= 0 chars = self.decoded_chars[start:end] else: diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -16,6 +16,7 @@ interp2app) from pypy.interpreter.generator import GeneratorIterator from pypy.interpreter.signature import Signature +from pypy.interpreter import utf8 from pypy.objspace.std import slicetype from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.floatobject import W_FloatObject @@ -1807,7 +1808,7 @@ class UnicodeSort(UnicodeBaseTimSort): def lt(self, a, b): - return a < b + return utf8.LT(a, b) class CustomCompareSort(SimpleSort): diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -663,11 +663,11 @@ rpos = len(value) if left: - while lpos < rpos and value[lpos] in chars: + while lpos < rpos and utf8.IN(value[lpos], chars): lpos += 1 if right: - while rpos > lpos and value[rpos - 1] in chars: + while rpos > lpos and utf8.IN(value[rpos - 1], chars): rpos -= 1 assert rpos >= lpos # annotator hint, don't remove From noreply at buildbot.pypy.org Tue Aug 5 10:59:55 2014 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 5 Aug 2014 10:59:55 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Don't pass r_uint's to Utf8Builder.append Message-ID: <20140805085955.B9CCE1C0588@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72698:965da5f2a8a3 Date: 2014-08-05 03:27 -0500 http://bitbucket.org/pypy/pypy/changeset/965da5f2a8a3/ Log: Don't pass r_uint's to Utf8Builder.append diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -642,9 +642,7 @@ self._is_ascii = False self._length += len(c) - elif isinstance(c, int) or isinstance(c, base_int): - c = intmask(c) - + elif isinstance(c, int): if c < 0x80: self._builder.append(chr(c)) elif c < 0x800: @@ -665,14 +663,13 @@ else: raise ValueError("Invalid unicode codepoint > 0x10FFFF.") self._length += 1 - elif isinstance(c, str): + else: + assert isinstance(c, str) self._builder.append(c) # XXX The assumption here is that the bytes being appended are # ASCII, ie 1:1 byte:char self._length += len(c) - else: - raise TypeError() @specialize.argtype(1) def append_slice(self, s, start, end): diff --git a/pypy/interpreter/utf8_codecs.py b/pypy/interpreter/utf8_codecs.py --- a/pypy/interpreter/utf8_codecs.py +++ b/pypy/interpreter/utf8_codecs.py @@ -163,7 +163,7 @@ else: # when we get here, chr is a 32-bit unicode character if chr <= MAXUNICODE: - builder.append(chr) + builder.append(intmask(chr)) pos += digits else: From noreply at buildbot.pypy.org Tue Aug 5 18:06:37 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 5 Aug 2014 18:06:37 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk rstrategies: Split shadow.py into 3 storage*.py files. Message-ID: <20140805160637.D443F1D37DC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: rstrategies Changeset: r1008:a42e8c9ffe86 Date: 2014-07-28 15:52 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/a42e8c9ffe86/ Log: Split shadow.py into 3 storage*.py files. diff too long, truncating to 2000 out of 3376 lines diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -1,6 +1,6 @@ import os -from spyvm.shadow import MethodContextShadow, ActiveContext, InactiveContext, DirtyContext +from spyvm.storage_contexts import MethodContextShadow, ActiveContext, InactiveContext, DirtyContext from spyvm import model, constants, wrapper, objspace, interpreter_bytecodes, error from rpython.rlib import jit, rstackovf, unroll diff --git a/spyvm/interpreter_bytecodes.py b/spyvm/interpreter_bytecodes.py --- a/spyvm/interpreter_bytecodes.py +++ b/spyvm/interpreter_bytecodes.py @@ -1,5 +1,6 @@ -from spyvm.shadow import ContextPartShadow, ClassShadow +from spyvm.storage_contexts import ContextPartShadow +from spyvm.storage_classes import ClassShadow from spyvm import model, primitives, wrapper, error from spyvm.tool.bitmanipulation import splitter from rpython.rlib import objectmodel, unroll, jit diff --git a/spyvm/interpreter_debugging.py b/spyvm/interpreter_debugging.py --- a/spyvm/interpreter_debugging.py +++ b/spyvm/interpreter_debugging.py @@ -1,6 +1,6 @@ import pdb -from spyvm.shadow import ContextPartShadow +from spyvm.storage_contexts import ContextPartShadow from spyvm import model, constants, primitives # This module patches up the interpreter and adds breakpoints at certain execution points. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -544,9 +544,9 @@ return "? (no class)" def invariant(self): - from spyvm import shadow + from spyvm import storage_classes return (W_AbstractObjectWithIdentityHash.invariant(self) and - isinstance(self.w_class.shadow, shadow.ClassShadow)) + isinstance(self.w_class.shadow, storage_classes.ClassShadow)) def _become(self, w_other): assert isinstance(w_other, W_AbstractObjectWithClassReference) @@ -578,7 +578,7 @@ self.initialize_storage(space, size, weak) def initialize_storage(self, space, size, weak=False): - from spyvm.shadow import empty_storage + from spyvm.storage import empty_storage storage = empty_storage(space, self, size, weak) self.store_shadow(storage) self.log_storage("Initialized") @@ -590,18 +590,18 @@ g_obj.fillin(space) pointers = g_self.get_pointers() # TODO -- Also handle weak objects loaded from images. - from spyvm.shadow import find_storage_for_objects + from spyvm.storage import find_storage_for_objects storage = find_storage_for_objects(space, pointers, g_self.isweak())(space, self, len(pointers)) self.store_shadow(storage) self.store_all(space, pointers) self.log_storage("Filledin", log_classname=False) def is_weak(self): - from shadow import WeakListStorageShadow + from storage import WeakListStorageShadow return isinstance(self.shadow, WeakListStorageShadow) def is_class(self, space): - from spyvm.shadow import ClassShadow + from spyvm.storage_classes import ClassShadow if isinstance(self.shadow, ClassShadow): return True return W_AbstractObjectWithClassReference.is_class(self, space) @@ -701,23 +701,23 @@ return shadow def get_shadow(self, space): - from spyvm.shadow import AbstractShadow + from spyvm.storage import AbstractShadow return self.as_special_get_shadow(space, AbstractShadow) def as_class_get_shadow(self, space): - from spyvm.shadow import ClassShadow + from spyvm.storage_classes import ClassShadow return jit.promote(self.as_special_get_shadow(space, ClassShadow)) def as_blockcontext_get_shadow(self, space): - from spyvm.shadow import BlockContextShadow + from spyvm.storage_contexts import BlockContextShadow return self.as_special_get_shadow(space, BlockContextShadow) def as_methodcontext_get_shadow(self, space): - from spyvm.shadow import MethodContextShadow + from spyvm.storage_contexts import MethodContextShadow return self.as_special_get_shadow(space, MethodContextShadow) def as_context_get_shadow(self, space): - from spyvm.shadow import ContextPartShadow + from spyvm.storage_contexts import ContextPartShadow if not isinstance(self.shadow, ContextPartShadow): if self.getclass(space).is_same_object(space.w_BlockContext): return self.as_blockcontext_get_shadow(space) @@ -727,15 +727,15 @@ return self.as_special_get_shadow(space, ContextPartShadow) def as_methoddict_get_shadow(self, space): - from spyvm.shadow import MethodDictionaryShadow + from spyvm.storage_classes import MethodDictionaryShadow return self.as_special_get_shadow(space, MethodDictionaryShadow) def as_cached_object_get_shadow(self, space): - from spyvm.shadow import CachedObjectShadow + from spyvm.storage import CachedObjectShadow return self.as_special_get_shadow(space, CachedObjectShadow) def as_observed_get_shadow(self, space): - from spyvm.shadow import ObserveeShadow + from spyvm.storage import ObserveeShadow return self.as_special_get_shadow(space, ObserveeShadow) def has_shadow(self): @@ -1267,7 +1267,7 @@ return True def create_frame(self, space, receiver, arguments=[]): - from spyvm.shadow import MethodContextShadow + from spyvm.storage_contexts import MethodContextShadow assert len(arguments) == self.argsize return MethodContextShadow(space, w_method=self, w_receiver=receiver, arguments=arguments) @@ -1313,7 +1313,7 @@ # methods in order to avoid side effects that prevent translation. w_class = self.safe_compiled_in() if isinstance(w_class, W_PointersObject): - from spyvm.shadow import ClassShadow + from spyvm.storage_classes import ClassShadow s_class = w_class.shadow if isinstance(s_class, ClassShadow): return "%s >> #%s" % (s_class.getname(), self.lookup_selector) diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -1,6 +1,6 @@ import os -from spyvm import constants, model, model_display, shadow, wrapper, version, display +from spyvm import constants, model, model_display, wrapper, version, display from spyvm.error import UnwrappingError, WrappingError, PrimitiveFailedError from rpython.rlib import jit, rpath from rpython.rlib.objectmodel import instantiate, specialize, import_from_mixin diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -1,6 +1,6 @@ from spyvm import model_display, model from spyvm.error import PrimitiveFailedError -from spyvm.shadow import AbstractCachingShadow +from spyvm.storage import AbstractCachingShadow from spyvm.plugins.plugin import Plugin from rpython.rlib import jit, objectmodel diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -2,7 +2,7 @@ import inspect import math import operator -from spyvm import model, model_display, shadow, error, constants, display +from spyvm import model, model_display, storage_contexts, error, constants, display from spyvm.error import PrimitiveFailedError, PrimitiveNotYetWrittenError from spyvm import wrapper @@ -947,7 +947,7 @@ def func(interp, s_frame, w_rcvr): # This takes a long time (at least in interpreted mode), and is not really necessary. # We are monitoring changes to MethodDictionaries, so there is no need for the image to tell us. - #walk_gc_objects_of_type(shadow.MethodDictionaryShadow, lambda s_dict: s_dict.flush_method_cache()) + #walk_gc_objects_of_type(storage_contexts.MethodDictionaryShadow, lambda s_dict: s_dict.flush_method_cache()) return w_rcvr # ___________________________________________________________________________ @@ -1292,7 +1292,7 @@ # The block bytecodes are stored inline: so we skip past the # byteodes to invoke this primitive to find them (hence +2) initialip = s_frame.pc() + 2 - s_new_context = shadow.BlockContextShadow(interp.space, None, 0, w_method_context, argcnt, initialip) + s_new_context = storage_contexts.BlockContextShadow(interp.space, None, 0, w_method_context, argcnt, initialip) return s_new_context.w_self() @expose_primitive(VALUE, result_is_new_frame=True) diff --git a/spyvm/shadow.py b/spyvm/shadow.py deleted file mode 100644 --- a/spyvm/shadow.py +++ /dev/null @@ -1,1312 +0,0 @@ -import sys, weakref -from spyvm import model, constants, error, wrapper, version -from spyvm.version import elidable_for_version, constant_for_version, constant_for_version_arg -from rpython.tool.pairtype import extendabletype -from rpython.rlib import rarithmetic, objectmodel, jit, longlong2float -from rpython.rlib.objectmodel import import_from_mixin -from rpython.rlib.debug import make_sure_not_resized -from rpython.rlib.rstruct.runpack import runpack -from rpython.rtyper.lltypesystem import rffi, lltype - -class AbstractShadow(object): - """A shadow is an optional extra bit of information that - can be attached at run-time to any Smalltalk object. - """ - _attrs_ = ['_w_self', 'space'] - _immutable_fields_ = ['space'] - provides_getname = False - repr_classname = "AbstractShadow" - - def __init__(self, space, w_self, size): - self.space = space - assert w_self is None or isinstance(w_self, model.W_PointersObject) - self._w_self = w_self - def w_self(self): - return self._w_self - def getname(self): - raise NotImplementedError("Abstract class") - def __repr__(self): - if self.provides_getname: - return "<%s %s>" % (self.repr_classname, self.getname()) - else: - return "<%s>" % self.repr_classname - - def fetch(self, n0): - raise NotImplementedError("Abstract class") - def store(self, n0, w_value): - raise NotImplementedError("Abstract class") - def size(self): - raise NotImplementedError("Abstract class") - - # This will invoke an appropriate copy_from_* method. - # Overwriting this allows optimized transitions between certain storage types. - def copy_into(self, other_shadow): - other_shadow.copy_from(self) - - def attach_shadow(self): pass - - def copy_field_from(self, n0, other_shadow): - self.store(n0, other_shadow.fetch(n0)) - - def copy_from(self, other_shadow): - assert self.size() == other_shadow.size() - for i in range(self.size()): - self.copy_field_from(i, other_shadow) - - def copy_from_AllNil(self, all_nil_storage): - self.copy_from(all_nil_storage) - def copy_from_SmallIntegerOrNil(self, small_int_storage): - self.copy_from(small_int_storage) - def copy_from_FloatOrNil(self, float_storage): - self.copy_from(float_storage) - -class AbstractStorageShadow(AbstractShadow): - _attrs_ = [] - repr_classname = "AbstractStorageShadow" - def __init__(self, space, w_self, size): - AbstractShadow.__init__(self, space, w_self, size) - def store(self, n0, w_val): - if self.can_contain(w_val): - return self.do_store(n0, w_val) - new_storage = self.generalized_strategy_for(w_val) - return self._w_self.store_with_new_storage(new_storage, n0, w_val) - def can_contain(self, w_val): - return self.static_can_contain(self.space, w_val) - @staticmethod - def static_can_contain(space, w_val): - raise NotImplementedError() - def do_store(self, n0, w_val): - raise NotImplementedError() - def generalized_strategy_for(self, w_val): - raise NotImplementedError() - - def copy_from_AllNil(self, all_nil_storage): - pass # Already initialized - def copy_from(self, other_shadow): - assert self.size() == other_shadow.size() - for i in range(self.size()): - w_val = other_shadow.fetch(i) - if not w_val.is_nil(self.space): # nil fields already initialized - self.store(i, w_val) - -class AllNilStorageShadow(AbstractStorageShadow): - repr_classname = "AllNilStorageShadow" - _attrs_ = ['_size'] - _immutable_fields_ = ['_size'] - def __init__(self, space, w_self, size): - AbstractStorageShadow.__init__(self, space, w_self, size) - self._size = size - def fetch(self, n0): - if n0 >= self._size: - raise IndexError - return self.space.w_nil - def copy_into(self, other_shadow): - other_shadow.copy_from_AllNil(self) - def do_store(self, n0, w_value): - pass - def size(self): - return self._size - def generalized_strategy_for(self, w_val): - return find_storage_for_objects(self.space, [w_val]) - @staticmethod - def static_can_contain(space, w_val): - return isinstance(w_val, model.W_Object) and w_val.is_nil(space) - -class AbstractValueOrNilStorageMixin(object): - # Class must provide: wrap, unwrap, nil_value, is_nil_value, wrapper_class - _attrs_ = ['storage'] - _immutable_fields_ = ['storage'] - - def __init__(self, space, w_self, size): - AbstractStorageShadow.__init__(self, space, w_self, size) - self.storage = [self.nil_value] * size - - def size(self): - return len(self.storage) - - def generalized_strategy_for(self, w_val): - return ListStorageShadow - - def fetch(self, n0): - val = self.storage[n0] - if self.is_nil_value(val): - return self.space.w_nil - else: - return self.wrap(self.space, val) - - def do_store(self, n0, w_val): - if w_val.is_nil(self.space): - self.storage[n0] = self.nil_value - else: - self.storage[n0] = self.unwrap(self.space, w_val) - -# This is to avoid code duplication - at objectmodel.specialize.arg(0) -def _value_or_nil_can_handle(cls, space, w_val): - return isinstance(w_val, model.W_Object) and w_val.is_nil(space) or \ - (isinstance(w_val, cls.wrapper_class) \ - and not cls.is_nil_value(cls.unwrap(space, w_val))) - -class SmallIntegerOrNilStorageShadow(AbstractStorageShadow): - repr_classname = "SmallIntegerOrNilStorageShadow" - nil_value = constants.MAXINT - wrapper_class = model.W_SmallInteger - import_from_mixin(AbstractValueOrNilStorageMixin) - - @staticmethod - def static_can_contain(space, w_val): - return _value_or_nil_can_handle(SmallIntegerOrNilStorageShadow, space, w_val) - @staticmethod - def is_nil_value(val): - return val == SmallIntegerOrNilStorageShadow.nil_value - @staticmethod - def wrap(space, val): - return space.wrap_int(val) - @staticmethod - def unwrap(space, w_val): - return space.unwrap_int(w_val) - def copy_into(self, other_shadow): - other_shadow.copy_from_SmallIntegerOrNil(self) - -class FloatOrNilStorageShadow(AbstractStorageShadow): - repr_classname = "FloatOrNilStorageShadow" - nil_value = sys.float_info.max - wrapper_class = model.W_Float - import_from_mixin(AbstractValueOrNilStorageMixin) - - @staticmethod - def static_can_contain(space, w_val): - return _value_or_nil_can_handle(FloatOrNilStorageShadow, space, w_val) - @staticmethod - def is_nil_value(val): - return val == FloatOrNilStorageShadow.nil_value - @staticmethod - def wrap(space, val): - return space.wrap_float(val) - @staticmethod - def unwrap(space, w_val): - return space.unwrap_float(w_val) - def copy_into(self, other_shadow): - other_shadow.copy_from_FloatOrNil(self) - -def empty_storage(space, w_self, size, weak=False): - if weak: - return WeakListStorageShadow(space, w_self, size) - if space.no_specialized_storage.is_set(): - return ListStorageShadow(space, w_self, size) - return AllNilStorageShadow(space, w_self, size) - - at jit.unroll_safe -def find_storage_for_objects(space, vars, weak=False): - if weak: - return WeakListStorageShadow - if space.no_specialized_storage.is_set(): - return ListStorageShadow - specialized_strategies = 3 - all_nil_can_handle = True - small_int_can_handle = True - float_can_handle = True - for w_obj in vars: - if all_nil_can_handle and not AllNilStorageShadow.static_can_contain(space, w_obj): - all_nil_can_handle = False - specialized_strategies = specialized_strategies - 1 - if small_int_can_handle and not SmallIntegerOrNilStorageShadow.static_can_contain(space, w_obj): - small_int_can_handle = False - specialized_strategies = specialized_strategies - 1 - if float_can_handle and not FloatOrNilStorageShadow.static_can_contain(space, w_obj): - float_can_handle = False - specialized_strategies = specialized_strategies - 1 - - if specialized_strategies <= 0: - return ListStorageShadow - - if all_nil_can_handle: - return AllNilStorageShadow - if small_int_can_handle: - return SmallIntegerOrNilStorageShadow - if float_can_handle: - return FloatOrNilStorageShadow - - # If this happens, please look for a bug in the code above. - assert False, "No strategy could be found for list..." - -class ListStorageMixin(object): - def __init__(self, space, w_self, size): - AbstractStorageShadow.__init__(self, space, w_self, size) - self.initialize_storage(size) - def size(self): - return len(self.storage) - -class ListStorageShadow(AbstractStorageShadow): - _attrs_ = ['storage'] - _immutable_fields_ = ['storage'] - repr_classname = "ListStorageShadow" - import_from_mixin(ListStorageMixin) - - def initialize_storage(self, size): - self.storage = [self.space.w_nil] * size - def fetch(self, n0): - return self.storage[n0] - def store(self, n0, w_value): - self.storage[n0] = w_value - -class WeakListStorageShadow(AbstractStorageShadow): - _attrs_ = ['storage'] - _immutable_fields_ = ['storage'] - repr_classname = "WeakListStorageShadow" - import_from_mixin(ListStorageMixin) - - def initialize_storage(self, size): - self.storage = [weakref.ref(self.space.w_nil)] * size - def fetch(self, n0): - weakobj = self.storage[n0] - return weakobj() or self.space.w_nil - def store(self, n0, w_value): - assert w_value is not None - self.storage[n0] = weakref.ref(w_value) - -class AbstractCachingShadow(ListStorageShadow): - _immutable_fields_ = ['version?'] - _attrs_ = ['version'] - repr_classname = "AbstractCachingShadow" - import_from_mixin(version.VersionMixin) - version = None - - def __init__(self, space, w_self, size): - ListStorageShadow.__init__(self, space, w_self, size) - self.changed() - -# ____________________________________________________________ - -POINTERS = 0 -BYTES = 1 -WORDS = 2 -WEAK_POINTERS = 3 -COMPILED_METHOD = 4 -FLOAT = 5 -LARGE_POSITIVE_INTEGER = 6 - -class ClassShadowError(error.SmalltalkException): - exception_type = "ClassShadowError" - -class ClassShadow(AbstractCachingShadow): - """A shadow for Smalltalk objects that are classes - (i.e. used as the class of another Smalltalk object). - """ - - _attrs_ = ["name", "_instance_size", "instance_varsized", "instance_kind", - "_s_methoddict", "_s_superclass", "subclass_s"] - name = '??? (incomplete class info)' - _s_superclass = _s_methoddict = None - provides_getname = True - repr_classname = "ClassShadow" - - def __init__(self, space, w_self, size): - self.subclass_s = {} - AbstractCachingShadow.__init__(self, space, w_self, size) - - def store(self, n0, w_val): - AbstractCachingShadow.store(self, n0, w_val) - if n0 == constants.CLASS_SUPERCLASS_INDEX: - self.store_w_superclass(w_val) - elif n0 == constants.CLASS_METHODDICT_INDEX: - self.store_w_methoddict(w_val) - elif n0 == constants.CLASS_FORMAT_INDEX: - # read and painfully decode the format - assert isinstance(w_val, model.W_SmallInteger) - classformat = self.space.unwrap_int(w_val) - # The classformat in Squeak, as an integer value, is: - # <2 bits=instSize//64><5 bits=cClass><4 bits=instSpec> - # <6 bits=instSize\\64><1 bit=0> - # In Slang the value is read directly as a boxed integer, so that - # the code gets a "pointer" whose bits are set as above, but - # shifted one bit to the left and with the lowest bit set to 1. - - # Compute the instance size (really the size, not the number of bytes) - instsize_lo = (classformat >> 1) & 0x3F - instsize_hi = (classformat >> (9 + 1)) & 0xC0 - self._instance_size = (instsize_lo | instsize_hi) - 1 # subtract hdr - # decode the instSpec - format = (classformat >> 7) & 15 - self.instance_varsized = format >= 2 - - # In case of raised exception below. - self.changed() - - if format < 4: - self.instance_kind = POINTERS - elif format == 4: - self.instance_kind = WEAK_POINTERS - elif format == 6: - if self.space.w_Float.is_same_object(self.w_self()): - self.instance_kind = FLOAT - else: - self.instance_kind = WORDS - if self.instsize() != 0: - raise ClassShadowError("can't have both words and a non-zero " - "base instance size") - elif 8 <= format <= 11: - if self.space.w_LargePositiveInteger.is_same_object(self.w_self()): - self.instance_kind = LARGE_POSITIVE_INTEGER - else: - self.instance_kind = BYTES - if self.instsize() != 0: - raise ClassShadowError("can't have both bytes and a non-zero " - "base instance size") - elif 12 <= format <= 15: - self.instance_kind = COMPILED_METHOD - else: - raise ClassShadowError("unknown format %d" % (format,)) - else: - if self._w_self.w_class == self.space.classtable["w_Metaclass"]: - # In case of Metaclasses, the "instance" class is stored in the last field. - if n0 == self.size() - 1 and isinstance(w_val, model.W_PointersObject): - cl_shadow = w_val.as_class_get_shadow(self.space) - self.name = "%s class" % cl_shadow.getname() - else: - return - elif n0 == constants.CLASS_NAME_INDEX: - # In case of regular classes, the name is stored here. - self.store_w_name(w_val) - else: - return - # Some of the special info has changed -> Switch version. - self.changed() - - def store_w_superclass(self, w_class): - superclass = self._s_superclass - if w_class is None or w_class.is_nil(self.space): - if superclass: superclass.detach_s_class(self) - self._s_superclass = None - else: - assert isinstance(w_class, model.W_PointersObject) - s_new_superclass = w_class.as_class_get_shadow(self.space) - if superclass is s_new_superclass: - return - if superclass: superclass.detach_s_class(self) - self._s_superclass = s_new_superclass - s_new_superclass.attach_s_class(self) - - def store_w_methoddict(self, w_methoddict): - methoddict = self._s_methoddict - if w_methoddict is None or w_methoddict.is_nil(self.space): - if methoddict: methoddict.s_class = None - self._s_methoddict = None - else: - assert isinstance(w_methoddict, model.W_PointersObject) - s_new_methoddict = w_methoddict.as_methoddict_get_shadow(self.space) - if methoddict is s_new_methoddict: - return - if methoddict: methoddict.s_class = None - self.store_s_methoddict(s_new_methoddict) - - def store_s_methoddict(self, s_methoddict): - s_methoddict.s_class = self - s_methoddict.sync_method_cache() - self._s_methoddict = s_methoddict - - def attach_s_class(self, s_other): - self.subclass_s[s_other] = None - - def detach_s_class(self, s_other): - del self.subclass_s[s_other] - - def store_w_name(self, w_name): - if isinstance(w_name, model.W_BytesObject): - self.name = w_name.as_string() - else: - self.name = None - - @jit.unroll_safe - def flush_method_caches(self): - look_in_shadow = self - while look_in_shadow is not None: - look_in_shadow.s_methoddict().flush_method_cache() - look_in_shadow = look_in_shadow._s_superclass - - def new(self, extrasize=0): - w_cls = self.w_self() - if self.instance_kind == POINTERS: - size = self.instsize() + extrasize - w_new = model.W_PointersObject(self.space, w_cls, size) - elif self.instance_kind == WORDS: - w_new = model.W_WordsObject(self.space, w_cls, extrasize) - elif self.instance_kind == BYTES: - w_new = model.W_BytesObject(self.space, w_cls, extrasize) - elif self.instance_kind == COMPILED_METHOD: - w_new = model.W_CompiledMethod(self.space, extrasize) - elif self.instance_kind == FLOAT: - w_new = model.W_Float(0) # Squeak gives a random piece of memory - elif self.instance_kind == LARGE_POSITIVE_INTEGER: - if extrasize <= 4: - w_new = model.W_LargePositiveInteger1Word(0, extrasize) - else: - w_new = model.W_BytesObject(self.space, w_cls, extrasize) - elif self.instance_kind == WEAK_POINTERS: - size = self.instsize() + extrasize - w_new = model.W_PointersObject(self.space, w_cls, size, weak=True) - else: - raise NotImplementedError(self.instance_kind) - return w_new - - def w_methoddict(self): - return self._s_methoddict.w_self() - - def s_methoddict(self): - return self._s_methoddict - - def s_superclass(self): - return self._s_superclass - - def getname(self): - return self.name - - # _______________________________________________________________ - # Methods for querying the format word, taken from the blue book: - # - # included so that we can reproduce code from the reference impl - # more easily - - def ispointers(self): - " True if instances of this class have data stored as pointers " - XXX # what about weak pointers? - return self.format == POINTERS - - def iswords(self): - " True if instances of this class have data stored as numerical words " - XXX # what about weak pointers? - return self.format in (POINTERS, WORDS) - - def isbytes(self): - " True if instances of this class have data stored as numerical bytes " - return self.format == BYTES - - @constant_for_version - def isvariable(self): - " True if instances of this class have indexed inst variables " - return self.instance_varsized - - @constant_for_version - def instsize(self): - " Number of named instance variables for each instance of this class " - return self._instance_size - - # _______________________________________________________________ - # Other Methods - - @constant_for_version_arg - def lookup(self, w_selector): - look_in_shadow = self - while look_in_shadow is not None: - w_method = look_in_shadow.s_methoddict().find_selector(w_selector) - if w_method is not None: - return w_method - look_in_shadow = look_in_shadow._s_superclass - raise error.MethodNotFound() - - def changed(self): - self.superclass_changed(version.Version()) - - # this is done, because the class-hierarchy contains cycles - def superclass_changed(self, version): - if self.version is not version: - self.version = version - for s_class in self.subclass_s: - s_class.superclass_changed(version) - - # _______________________________________________________________ - # Methods used only in testing - - def inherits_from(self, s_superclass): - "NOT_RPYTHON" # this is only for testing. - classshadow = self - while classshadow is not None: - if classshadow is s_superclass: - return True - classshadow = classshadow.s_superclass() - else: - return False - - def initialize_methoddict(self): - "NOT_RPYTHON" # this is only for testing. - if self._s_methoddict is None: - w_methoddict = model.W_PointersObject(self.space, None, 2) - w_methoddict.store(self.space, constants.METHODDICT_VALUES_INDEX, model.W_PointersObject(self.space, None, 0)) - self.store_s_methoddict(w_methoddict.as_methoddict_get_shadow(self.space)) - self.s_methoddict().invalid = False - - def installmethod(self, w_selector, w_method): - "NOT_RPYTHON" # this is only for testing. - assert not isinstance(w_selector, str) - self.initialize_methoddict() - self.s_methoddict().methoddict[w_selector] = w_method - if isinstance(w_method, model.W_CompiledMethod): - w_method.compiledin_class = self.w_self() - -class MethodDictionaryShadow(ListStorageShadow): - - _immutable_fields_ = ['invalid?', 's_class'] - _attrs_ = ['methoddict', 'invalid', 's_class'] - repr_classname = "MethodDictionaryShadow" - - def __init__(self, space, w_self, size): - self.invalid = True - self.s_class = None - self.methoddict = {} - ListStorageShadow.__init__(self, space, w_self, size) - - def update(self): - self.sync_method_cache() - - def find_selector(self, w_selector): - if self.invalid: - return None # we may be invalid if Smalltalk code did not call flushCache - return self.methoddict.get(w_selector, None) - - # We do not call update() after changes to ourselves: - # Whenever a method is added, it's keyword is added to w_self, then the - # w_compiled_method is added to our observee. - # sync_method_cache at this point would not have the desired effect, because in - # the Smalltalk Implementation, the dictionary changes first. Afterwards - # its contents array is filled with the value belonging to the new key. - def store(self, n0, w_value): - ListStorageShadow.store(self, n0, w_value) - if n0 == constants.METHODDICT_VALUES_INDEX: - self.setup_notification() - if n0 >= constants.METHODDICT_NAMES_INDEX: - self.invalid = True - - def setup_notification(self): - self.w_values().as_observed_get_shadow(self.space).notify(self) - - def w_values(self): - w_values = self.fetch(constants.METHODDICT_VALUES_INDEX) - assert isinstance(w_values, model.W_PointersObject) - return w_values - - def flush_method_cache(self): - # Lazy synchronization: Only flush the cache, if we are already synchronized. - if self.invalid: - self.sync_method_cache() - - def sync_method_cache(self): - if self.size() == 0: - return - self.methoddict = {} - size = self.size() - constants.METHODDICT_NAMES_INDEX - w_values = self.w_values() - for i in range(size): - w_selector = self.w_self().fetch(self.space, constants.METHODDICT_NAMES_INDEX+i) - if not w_selector.is_nil(self.space): - if isinstance(w_selector, model.W_BytesObject): - selector = w_selector.as_string() - else: - selector = "? (non-byteobject selector)" - pass - # TODO: Check if there's more assumptions about this. - # Putting any key in the methodDict and running with - # perform is actually supported in Squeak - # raise ClassShadowError("bogus selector in method dict") - w_compiledmethod = w_values.fetch(self.space, i) - if not isinstance(w_compiledmethod, model.W_CompiledMethod): - raise ClassShadowError("The methoddict must contain " - "CompiledMethods only, for now. " - "If the value observed is nil, our " - "invalidating mechanism may be broken.") - self.methoddict[w_selector] = w_compiledmethod - w_compiledmethod.set_lookup_class_and_name(self.s_class.w_self(), selector) - if self.s_class: - self.s_class.changed() - self.invalid = False - -class AbstractRedirectingShadow(AbstractShadow): - _attrs_ = ['_w_self_size'] - repr_classname = "AbstractRedirectingShadow" - - def __init__(self, space, w_self, size): - if w_self is not None: - self._w_self_size = w_self.size() - else: - self._w_self_size = size - AbstractShadow.__init__(self, space, w_self, self._w_self_size) - - def size(self): - return self._w_self_size - -class ContextState(object): - def __init__(self, name): - self.name = name - def __str__(self): - return self.name - def __repr__(self): - return self.name -InactiveContext = ContextState("InactiveContext") -ActiveContext = ContextState("ActiveContext") -DirtyContext = ContextState("DirtyContext") - -class ContextPartShadow(AbstractRedirectingShadow): - - __metaclass__ = extendabletype - _attrs_ = ['_s_sender', - '_pc', '_temps_and_stack', - '_stack_ptr', 'instances_w', 'state'] - repr_classname = "ContextPartShadow" - - _virtualizable_ = [ - '_s_sender', - "_pc", "_temps_and_stack[*]", "_stack_ptr", - "_w_self", "_w_self_size", 'state' - ] - - # ______________________________________________________________________ - # Initialization - - def __init__(self, space, w_self, size=0): - self._s_sender = None - AbstractRedirectingShadow.__init__(self, space, w_self, size) - self.instances_w = {} - self.state = InactiveContext - - def copy_from(self, other_shadow): - # Some fields have to be initialized before the rest, to ensure correct initialization. - privileged_fields = self.fields_to_copy_first() - for n0 in privileged_fields: - self.copy_field_from(n0, other_shadow) - - # Now the temp size will be known. - self.init_stack_and_temps() - - for n0 in range(self.size()): - if n0 not in privileged_fields: - self.copy_field_from(n0, other_shadow) - - def fields_to_copy_first(self): - return [] - - # ______________________________________________________________________ - # Accessing object fields - - def fetch(self, n0): - if n0 == constants.CTXPART_SENDER_INDEX: - return self.w_sender() - if n0 == constants.CTXPART_PC_INDEX: - return self.wrap_pc() - if n0 == constants.CTXPART_STACKP_INDEX: - return self.wrap_stackpointer() - if self.stackstart() <= n0 < self.external_stackpointer(): - temp_i = self.stackdepth() - (n0-self.stackstart()) - 1 - assert temp_i >= 0 - return self.peek(temp_i) - if self.external_stackpointer() <= n0 < self.stackend(): - return self.space.w_nil - else: - # XXX later should store tail out of known context part as well - raise error.WrapperException("Index in context out of bounds") - - def store(self, n0, w_value): - if n0 == constants.CTXPART_SENDER_INDEX: - assert isinstance(w_value, model.W_PointersObject) - if w_value.is_nil(self.space): - self.store_s_sender(None) - else: - self.store_s_sender(w_value.as_context_get_shadow(self.space)) - return - if n0 == constants.CTXPART_PC_INDEX: - return self.store_unwrap_pc(w_value) - if n0 == constants.CTXPART_STACKP_INDEX: - return self.unwrap_store_stackpointer(w_value) - if self.stackstart() <= n0 < self.external_stackpointer(): # XXX can be simplified? - temp_i = self.stackdepth() - (n0-self.stackstart()) - 1 - assert temp_i >= 0 - return self.set_top(w_value, temp_i) - if self.external_stackpointer() <= n0 < self.stackend(): - return - else: - # XXX later should store tail out of known context part as well - raise error.WrapperException("Index in context out of bounds") - - # === Sender === - - def store_s_sender(self, s_sender): - if s_sender is not self._s_sender: - self._s_sender = s_sender - # If new sender is None, we are just being marked as returned. - if s_sender is not None and self.state is ActiveContext: - self.state = DirtyContext - - def w_sender(self): - sender = self.s_sender() - if sender is None: - return self.space.w_nil - return sender.w_self() - - def s_sender(self): - return self._s_sender - - # === Stack Pointer === - - def unwrap_store_stackpointer(self, w_sp1): - # the stackpointer in the W_PointersObject starts counting at the - # tempframe start - # Stackpointer from smalltalk world == stacksize in python world - self.store_stackpointer(self.space.unwrap_int(w_sp1)) - - def store_stackpointer(self, size): - depth = self.stackdepth() - if size < depth: - # TODO Warn back to user - assert size >= 0 - self.pop_n(depth - size) - else: - for i in range(depth, size): - self.push(self.space.w_nil) - - def stackdepth(self): - return rarithmetic.intmask(self._stack_ptr) - - def wrap_stackpointer(self): - return self.space.wrap_int(self.stackdepth()) - - # === Program Counter === - - def store_unwrap_pc(self, w_pc): - if w_pc.is_nil(self.space): - self.store_pc(-1) - else: - pc = self.space.unwrap_int(w_pc) - pc -= self.w_method().bytecodeoffset() - pc -= 1 - self.store_pc(pc) - - def wrap_pc(self): - pc = self.pc() - if pc == -1: - return self.space.w_nil - else: - pc += 1 - pc += self.w_method().bytecodeoffset() - return self.space.wrap_int(pc) - - def pc(self): - return self._pc - - def store_pc(self, newpc): - assert newpc >= -1 - self._pc = newpc - - # === Subclassed accessors === - - def s_home(self): - raise NotImplementedError() - - def stackstart(self): - raise NotImplementedError() - - def w_receiver(self): - raise NotImplementedError() - - def w_method(self): - raise NotImplementedError() - - def tempsize(self): - raise NotImplementedError() - - def is_closure_context(self): - raise NotImplementedError() - - def is_BlockClosure_ensure(self): - raise NotImplementedError() - - def home_is_self(self): - raise NotImplementedError() - - # === Other properties of Contexts === - - def mark_returned(self): - self.store_pc(-1) - self.store_s_sender(None) - - def is_returned(self): - return self.pc() == -1 and self.w_sender().is_nil(self.space) - - def external_stackpointer(self): - return self.stackdepth() + self.stackstart() - - def stackend(self): - # XXX this is incorrect when there is subclassing - return self._w_self_size - - def fetch_next_bytecode(self): - pc = jit.promote(self._pc) - assert pc >= 0 - self._pc += 1 - return self.fetch_bytecode(pc) - - def fetch_bytecode(self, pc): - bytecode = self.w_method().fetch_bytecode(pc) - return ord(bytecode) - - # ______________________________________________________________________ - # Temporary Variables - # - # Every context has it's own stack. BlockContexts share their temps with - # their home contexts. MethodContexts created from a BlockClosure get their - # temps copied from the closure upon activation. Changes are not propagated back; - # this is handled by the compiler by allocating an extra Array for temps. - - def gettemp(self, index): - raise NotImplementedError() - - def settemp(self, index, w_value): - raise NotImplementedError() - - # ______________________________________________________________________ - # Stack Manipulation - - @jit.unroll_safe - def init_stack_and_temps(self): - stacksize = self.stackend() - self.stackstart() - tempsize = self.tempsize() - temps_and_stack = [None] * (stacksize + tempsize) - self._temps_and_stack = temps_and_stack - make_sure_not_resized(temps_and_stack) - for i in range(tempsize): - temps_and_stack[i] = self.space.w_nil - self._stack_ptr = rarithmetic.r_uint(tempsize) # we point after the last element - - def stack_get(self, index0): - return self._temps_and_stack[index0] - - def stack_put(self, index0, w_val): - self._temps_and_stack[index0] = w_val - - def stack(self): - """NOT_RPYTHON""" # purely for testing - return self._temps_and_stack[self.tempsize():self._stack_ptr] - - def pop(self): - #assert self._stack_ptr > self.tempsize() - ptr = jit.promote(self._stack_ptr) - 1 - ret = self.stack_get(ptr) # you get OverflowError if the stack is empty - self.stack_put(ptr, None) - self._stack_ptr = ptr - return ret - - def push(self, w_v): - #assert self._stack_ptr >= self.tempsize() - #assert self._stack_ptr < self.stackend() - self.stackstart() + self.tempsize() - ptr = jit.promote(self._stack_ptr) - self.stack_put(ptr, w_v) - self._stack_ptr = ptr + 1 - - @jit.unroll_safe - def push_all(self, lst): - for elt in lst: - self.push(elt) - - def top(self): - return self.peek(0) - - def set_top(self, value, position=0): - rpos = rarithmetic.r_uint(position) - ptr = self._stack_ptr + ~rpos - self.stack_put(ptr, value) - - def peek(self, idx): - rpos = rarithmetic.r_uint(idx) - ptr = jit.promote(self._stack_ptr) + ~rpos - return self.stack_get(ptr) - - @jit.unroll_safe - def pop_n(self, n): - #assert n == 0 or self._stack_ptr - n >= self.tempsize() - jit.promote(self._stack_ptr) - while n > 0: - n -= 1 - self._stack_ptr -= 1 - self.stack_put(self._stack_ptr, None) - - @jit.unroll_safe - def pop_and_return_n(self, n): - result = [self.peek(i) for i in range(n - 1, -1, -1)] - self.pop_n(n) - return result - - # ______________________________________________________________________ - # Primitive support - - def store_instances_array(self, w_class, match_w): - # used for primitives 77 & 78 - self.instances_w[w_class] = match_w - - @jit.elidable - def instances_array(self, w_class): - return self.instances_w.get(w_class, None) - - # ______________________________________________________________________ - # Printing - - def argument_strings(self): - return [ w_arg.as_repr_string() for w_arg in self.w_arguments() ] - - def __str__(self): - retval = self.short_str() - retval += "\n%s" % self.w_method().bytecode_string(markBytecode=self.pc() + 1) - retval += "\nArgs:----------------" - argcount = self.w_method().argsize - j = 0 - for w_obj in self._temps_and_stack[:self._stack_ptr]: - if j == argcount: - retval += "\nTemps:---------------" - if j == self.tempsize(): - retval += "\nStack:---------------" - retval += "\n %0.2i: %s" % (j, w_obj.as_repr_string()) - j += 1 - retval += "\n---------------------" - return retval - - def short_str(self): - arg_strings = self.argument_strings() - if len(arg_strings) > 0: - args = " , ".join(arg_strings) - args = " (%d arg(s): %s)" % (len(arg_strings), args) - else: - args = "" - return '%s [pc: %d] (rcvr: %s)%s' % ( - self.method_str(), - self.pc() + 1, - self.w_receiver().as_repr_string(), - args - ) - - def print_stack(self, method=True): - return self.print_padded_stack(method)[1] - - def print_padded_stack(self, method): - padding = ret_str = '' - if self.s_sender() is not None: - padding, ret_str = self.s_sender().print_padded_stack(method) - if method: - desc = self.method_str() - else: - desc = self.short_str() - return padding + ' ', '%s\n%s%s' % (ret_str, padding, desc) - -class BlockContextShadow(ContextPartShadow): - _attrs_ = ['_w_home', '_initialip', '_eargc'] - repr_classname = "BlockContextShadow" - - # === Initialization === - - def __init__(self, space, w_self=None, size=0, w_home=None, argcnt=0, initialip=0): - self = jit.hint(self, access_directly=True, fresh_virtualizable=True) - creating_w_self = w_self is None - if creating_w_self: - s_home = w_home.as_methodcontext_get_shadow(space) - contextsize = s_home.size() - s_home.tempsize() - w_self = model.W_PointersObject(space, space.w_BlockContext, contextsize) - ContextPartShadow.__init__(self, space, w_self, size) - if creating_w_self: - w_self.store_shadow(self) - self.store_expected_argument_count(argcnt) - self.store_initialip(initialip) - if w_home: - self.store_w_home(w_home) - self.store_pc(initialip) - self.init_stack_and_temps() - - def fields_to_copy_first(self): - return [ constants.BLKCTX_HOME_INDEX ] - - # === Implemented accessors === - - def s_home(self): - return self._w_home.as_methodcontext_get_shadow(self.space) - - def stackstart(self): - return constants.BLKCTX_STACK_START - - def tempsize(self): - # A blockcontext doesn't have any temps - return 0 - - def w_receiver(self): - return self.s_home().w_receiver() - - def w_method(self): - retval = self.s_home().w_method() - assert isinstance(retval, model.W_CompiledMethod) - return retval - - def is_closure_context(self): - return True - - def is_BlockClosure_ensure(self): - return False - - def home_is_self(self): - return False - - # === Temporary variables === - - def gettemp(self, index): - return self.s_home().gettemp(index) - - def settemp(self, index, w_value): - self.s_home().settemp(index, w_value) - - # === Accessing object fields === - - def fetch(self, n0): - if n0 == constants.BLKCTX_HOME_INDEX: - return self._w_home - if n0 == constants.BLKCTX_INITIAL_IP_INDEX: - return self.wrap_initialip() - if n0 == constants.BLKCTX_BLOCK_ARGUMENT_COUNT_INDEX: - return self.wrap_eargc() - else: - return ContextPartShadow.fetch(self, n0) - - def store(self, n0, w_value): - if n0 == constants.BLKCTX_HOME_INDEX: - return self.store_w_home(w_value) - if n0 == constants.BLKCTX_INITIAL_IP_INDEX: - return self.unwrap_store_initialip(w_value) - if n0 == constants.BLKCTX_BLOCK_ARGUMENT_COUNT_INDEX: - return self.unwrap_store_eargc(w_value) - else: - return ContextPartShadow.store(self, n0, w_value) - - def store_w_home(self, w_home): - assert isinstance(w_home, model.W_PointersObject) - self._w_home = w_home - - def unwrap_store_initialip(self, w_value): - initialip = self.space.unwrap_int(w_value) - initialip -= 1 + self.w_method().literalsize - self.store_initialip(initialip) - - def store_initialip(self, initialip): - self._initialip = initialip - - def wrap_initialip(self): - initialip = self.initialip() - initialip += 1 + self.w_method().literalsize - return self.space.wrap_int(initialip) - - def reset_pc(self): - self.store_pc(self.initialip()) - - def initialip(self): - return self._initialip - - def unwrap_store_eargc(self, w_value): - self.store_expected_argument_count(self.space.unwrap_int(w_value)) - - def wrap_eargc(self): - return self.space.wrap_int(self.expected_argument_count()) - - def expected_argument_count(self): - return self._eargc - - def store_expected_argument_count(self, argc): - self._eargc = argc - - # === Stack Manipulation === - - def reset_stack(self): - self.pop_n(self.stackdepth()) - - # === Printing === - - def w_arguments(self): - return [] - - def method_str(self): - return '[] in %s' % self.w_method().get_identifier_string() - -class MethodContextShadow(ContextPartShadow): - _attrs_ = ['closure', '_w_receiver', '_w_method', '_is_BlockClosure_ensure'] - repr_classname = "MethodContextShadow" - - # === Initialization === - - @jit.unroll_safe - def __init__(self, space, w_self=None, size=0, w_method=None, w_receiver=None, - arguments=[], closure=None, pc=0): - self = jit.hint(self, access_directly=True, fresh_virtualizable=True) - ContextPartShadow.__init__(self, space, w_self, size) - self.store_w_receiver(w_receiver) - self.store_pc(pc) - self.closure = closure - - if w_method: - self.store_w_method(w_method) - # The summand is needed, because we calculate i.a. our stackdepth relative of the size of w_self. - size = w_method.compute_frame_size() + self.space.w_MethodContext.as_class_get_shadow(self.space).instsize() - self._w_self_size = size - self.init_stack_and_temps() - else: - self._w_method = None - self._is_BlockClosure_ensure = False - - argc = len(arguments) - for i0 in range(argc): - self.settemp(i0, arguments[i0]) - - if closure: - for i0 in range(closure.size()): - self.settemp(i0+argc, closure.at0(i0)) - - def fields_to_copy_first(self): - return [ constants.MTHDCTX_METHOD, constants.MTHDCTX_CLOSURE_OR_NIL ] - - # === Accessing object fields === - - def fetch(self, n0): - if n0 == constants.MTHDCTX_METHOD: - return self.w_method() - if n0 == constants.MTHDCTX_CLOSURE_OR_NIL: - if self.closure: - return self.closure._w_self - else: - return self.space.w_nil - if n0 == constants.MTHDCTX_RECEIVER: - return self.w_receiver() - temp_i = n0-constants.MTHDCTX_TEMP_FRAME_START - if (0 <= temp_i < self.tempsize()): - return self.gettemp(temp_i) - else: - return ContextPartShadow.fetch(self, n0) - - def store(self, n0, w_value): - if n0 == constants.MTHDCTX_METHOD: - return self.store_w_method(w_value) - if n0 == constants.MTHDCTX_CLOSURE_OR_NIL: - if w_value.is_nil(self.space): - self.closure = None - else: - self.closure = wrapper.BlockClosureWrapper(self.space, w_value) - return - if n0 == constants.MTHDCTX_RECEIVER: - self.store_w_receiver(w_value) - return - temp_i = n0-constants.MTHDCTX_TEMP_FRAME_START - if (0 <= temp_i < self.tempsize()): - return self.settemp(temp_i, w_value) - else: - return ContextPartShadow.store(self, n0, w_value) - - def store_w_receiver(self, w_receiver): - self._w_receiver = w_receiver - - # === Implemented Accessors === - - def s_home(self): - if self.is_closure_context(): - # this is a context for a blockClosure - w_outerContext = self.closure.outerContext() - assert isinstance(w_outerContext, model.W_PointersObject) - s_outerContext = w_outerContext.as_context_get_shadow(self.space) - # XXX check whether we can actually return from that context - if s_outerContext.is_returned(): - raise error.BlockCannotReturnError() - return s_outerContext.s_home() - else: - return self - - def stackstart(self): - return constants.MTHDCTX_TEMP_FRAME_START - - def store_w_method(self, w_method): - assert isinstance(w_method, model.W_CompiledMethod) - self._w_method = w_method - if w_method: - # Primitive 198 is used in BlockClosure >> ensure: - self._is_BlockClosure_ensure = (w_method.primitive() == 198) - - def w_receiver(self): - return self._w_receiver - - def w_method(self): - retval = self._w_method - assert isinstance(retval, model.W_CompiledMethod) - return retval - - def tempsize(self): - if not self.is_closure_context(): - return self.w_method().tempsize() - else: - return self.closure.tempsize() - - def is_closure_context(self): - return self.closure is not None - - def is_BlockClosure_ensure(self): - return self._is_BlockClosure_ensure - - def home_is_self(self): - return not self.is_closure_context() - - # ______________________________________________________________________ - # Marriage of MethodContextShadows with PointerObjects only when required - - def w_self(self): - if self._w_self is not None: - return self._w_self - else: - s_MethodContext = self.space.w_MethodContext.as_class_get_shadow(self.space) - size = self.size() - s_MethodContext.instsize() - space = self.space - w_self = s_MethodContext.new(size) - assert isinstance(w_self, model.W_PointersObject) - w_self.store_shadow(self) - self._w_self = w_self - self._w_self_size = w_self.size() - return w_self - - # === Temporary variables === - - def gettemp(self, index0): - return self.stack_get(index0) - - def settemp(self, index0, w_value): - self.stack_put(index0, w_value) - - # === Printing === - - def w_arguments(self): - argcount = self.w_method().argsize - return [ self.stack_get(i) for i in range(argcount) ] - - def method_str(self): - block = '[] in ' if self.is_closure_context() else '' - return '%s%s' % (block, self.w_method().get_identifier_string()) - -class CachedObjectShadow(AbstractCachingShadow): - repr_classname = "CachedObjectShadow" - - @elidable_for_version - def fetch(self, n0): - return AbstractCachingShadow.fetch(self, n0) - - def store(self, n0, w_value): - AbstractCachingShadow.store(self, n0, w_value) - self.changed() - -class ObserveeShadow(ListStorageShadow): - _attrs_ = ['dependent'] - repr_classname = "ObserveeShadow" - def __init__(self, space, w_self, size): - ListStorageShadow.__init__(self, space, w_self, size) - self.dependent = None - - def store(self, n0, w_value): - ListStorageShadow.store(self, n0, w_value) - if self.dependent: - self.dependent.update() - - def notify(self, dependent): - if self.dependent is not None and dependent is not self.dependent: - raise RuntimeError('Meant to be observed by only one value, so far') - self.dependent = dependent diff --git a/spyvm/storage.py b/spyvm/storage.py new file mode 100644 --- /dev/null +++ b/spyvm/storage.py @@ -0,0 +1,319 @@ + +import sys, weakref +from spyvm import model, version, constants +from spyvm.version import elidable_for_version +from rpython.rlib import objectmodel, jit +from rpython.rlib.objectmodel import import_from_mixin + +class AbstractShadow(object): + """A shadow is an optional extra bit of information that + can be attached at run-time to any Smalltalk object. + """ + _attrs_ = ['_w_self', 'space'] + _immutable_fields_ = ['space'] + provides_getname = False + repr_classname = "AbstractShadow" + + def __init__(self, space, w_self, size): + self.space = space + assert w_self is None or isinstance(w_self, model.W_PointersObject) + self._w_self = w_self + def w_self(self): + return self._w_self + def getname(self): + raise NotImplementedError("Abstract class") + def __repr__(self): + if self.provides_getname: + return "<%s %s>" % (self.repr_classname, self.getname()) + else: + return "<%s>" % self.repr_classname + + def fetch(self, n0): + raise NotImplementedError("Abstract class") + def store(self, n0, w_value): + raise NotImplementedError("Abstract class") + def size(self): + raise NotImplementedError("Abstract class") + + # This will invoke an appropriate copy_from_* method. + # Overwriting this allows optimized transitions between certain storage types. + def copy_into(self, other_shadow): + other_shadow.copy_from(self) + + def attach_shadow(self): pass + + def copy_field_from(self, n0, other_shadow): + self.store(n0, other_shadow.fetch(n0)) + + def copy_from(self, other_shadow): + assert self.size() == other_shadow.size() + for i in range(self.size()): + self.copy_field_from(i, other_shadow) + + def copy_from_AllNil(self, all_nil_storage): + self.copy_from(all_nil_storage) + def copy_from_SmallIntegerOrNil(self, small_int_storage): + self.copy_from(small_int_storage) + def copy_from_FloatOrNil(self, float_storage): + self.copy_from(float_storage) + +# ========== Storage classes implementing storage strategies ========== + +class AbstractStorageShadow(AbstractShadow): + _attrs_ = [] + repr_classname = "AbstractStorageShadow" + + def store(self, n0, w_val): + if self.can_contain(w_val): + return self.do_store(n0, w_val) + new_storage = self.generalized_strategy_for(w_val) + return self._w_self.store_with_new_storage(new_storage, n0, w_val) + def can_contain(self, w_val): + return self.static_can_contain(self.space, w_val) + @staticmethod + def static_can_contain(space, w_val): + raise NotImplementedError() + def do_store(self, n0, w_val): + raise NotImplementedError() + def generalized_strategy_for(self, w_val): + raise NotImplementedError() + + def copy_from_AllNil(self, all_nil_storage): + pass # Already initialized + def copy_from(self, other_shadow): + assert self.size() == other_shadow.size() + for i in range(self.size()): + w_val = other_shadow.fetch(i) + if not w_val.is_nil(self.space): # nil fields already initialized + self.store(i, w_val) + +class AllNilStorageShadow(AbstractStorageShadow): + repr_classname = "AllNilStorageShadow" + _attrs_ = ['_size'] + _immutable_fields_ = ['_size'] + def __init__(self, space, w_self, size): + AbstractStorageShadow.__init__(self, space, w_self, size) + self._size = size + def fetch(self, n0): + if n0 >= self._size: + raise IndexError + return self.space.w_nil + def copy_into(self, other_shadow): + other_shadow.copy_from_AllNil(self) + def do_store(self, n0, w_value): + pass + def size(self): + return self._size + def generalized_strategy_for(self, w_val): + return find_storage_for_objects(self.space, [w_val]) + @staticmethod + def static_can_contain(space, w_val): + return isinstance(w_val, model.W_Object) and w_val.is_nil(space) + +class AbstractValueOrNilStorageMixin(object): + # Class must provide: wrap, unwrap, nil_value, is_nil_value, wrapper_class + _attrs_ = ['storage'] + _immutable_fields_ = ['storage'] + + def __init__(self, space, w_self, size): + AbstractStorageShadow.__init__(self, space, w_self, size) + self.storage = [self.nil_value] * size + + def size(self): + return len(self.storage) + + def generalized_strategy_for(self, w_val): + return ListStorageShadow + + def fetch(self, n0): + val = self.storage[n0] + if self.is_nil_value(val): + return self.space.w_nil + else: + return self.wrap(self.space, val) + + def do_store(self, n0, w_val): + if w_val.is_nil(self.space): + self.storage[n0] = self.nil_value + else: + self.storage[n0] = self.unwrap(self.space, w_val) + +# This is to avoid code duplication + at objectmodel.specialize.arg(0) +def _value_or_nil_can_handle(cls, space, w_val): + return isinstance(w_val, model.W_Object) and w_val.is_nil(space) or \ + (isinstance(w_val, cls.wrapper_class) \ + and not cls.is_nil_value(cls.unwrap(space, w_val))) + +class SmallIntegerOrNilStorageShadow(AbstractStorageShadow): + repr_classname = "SmallIntegerOrNilStorageShadow" + nil_value = constants.MAXINT + wrapper_class = model.W_SmallInteger + import_from_mixin(AbstractValueOrNilStorageMixin) + + @staticmethod + def static_can_contain(space, w_val): + return _value_or_nil_can_handle(SmallIntegerOrNilStorageShadow, space, w_val) + @staticmethod + def is_nil_value(val): + return val == SmallIntegerOrNilStorageShadow.nil_value + @staticmethod + def wrap(space, val): + return space.wrap_int(val) + @staticmethod + def unwrap(space, w_val): + return space.unwrap_int(w_val) + def copy_into(self, other_shadow): + other_shadow.copy_from_SmallIntegerOrNil(self) + +class FloatOrNilStorageShadow(AbstractStorageShadow): + repr_classname = "FloatOrNilStorageShadow" + nil_value = sys.float_info.max + wrapper_class = model.W_Float + import_from_mixin(AbstractValueOrNilStorageMixin) + + @staticmethod + def static_can_contain(space, w_val): + return _value_or_nil_can_handle(FloatOrNilStorageShadow, space, w_val) + @staticmethod + def is_nil_value(val): + return val == FloatOrNilStorageShadow.nil_value + @staticmethod + def wrap(space, val): + return space.wrap_float(val) + @staticmethod + def unwrap(space, w_val): + return space.unwrap_float(w_val) + def copy_into(self, other_shadow): + other_shadow.copy_from_FloatOrNil(self) + +def empty_storage(space, w_self, size, weak=False): + if weak: + return WeakListStorageShadow(space, w_self, size) + if space.no_specialized_storage.is_set(): + return ListStorageShadow(space, w_self, size) + return AllNilStorageShadow(space, w_self, size) + + at jit.unroll_safe +def find_storage_for_objects(space, vars, weak=False): + if weak: + return WeakListStorageShadow + if space.no_specialized_storage.is_set(): + return ListStorageShadow + specialized_strategies = 3 + all_nil_can_handle = True + small_int_can_handle = True + float_can_handle = True + for w_obj in vars: + if all_nil_can_handle and not AllNilStorageShadow.static_can_contain(space, w_obj): + all_nil_can_handle = False + specialized_strategies = specialized_strategies - 1 + if small_int_can_handle and not SmallIntegerOrNilStorageShadow.static_can_contain(space, w_obj): + small_int_can_handle = False + specialized_strategies = specialized_strategies - 1 + if float_can_handle and not FloatOrNilStorageShadow.static_can_contain(space, w_obj): + float_can_handle = False + specialized_strategies = specialized_strategies - 1 + + if specialized_strategies <= 0: + return ListStorageShadow + + if all_nil_can_handle: + return AllNilStorageShadow + if small_int_can_handle: + return SmallIntegerOrNilStorageShadow + if float_can_handle: + return FloatOrNilStorageShadow + + # If this happens, please look for a bug in the code above. + assert False, "No strategy could be found for list..." + +class ListStorageMixin(object): + def __init__(self, space, w_self, size): + AbstractStorageShadow.__init__(self, space, w_self, size) + self.initialize_storage(size) + def size(self): + return len(self.storage) + +class ListStorageShadow(AbstractStorageShadow): + _attrs_ = ['storage'] + _immutable_fields_ = ['storage'] + repr_classname = "ListStorageShadow" + import_from_mixin(ListStorageMixin) + + def initialize_storage(self, size): + self.storage = [self.space.w_nil] * size + def fetch(self, n0): + return self.storage[n0] + def store(self, n0, w_value): + self.storage[n0] = w_value + +class WeakListStorageShadow(AbstractStorageShadow): + _attrs_ = ['storage'] + _immutable_fields_ = ['storage'] + repr_classname = "WeakListStorageShadow" + import_from_mixin(ListStorageMixin) + + def initialize_storage(self, size): + self.storage = [weakref.ref(self.space.w_nil)] * size + def fetch(self, n0): + weakobj = self.storage[n0] + return weakobj() or self.space.w_nil + def store(self, n0, w_value): + assert w_value is not None + self.storage[n0] = weakref.ref(w_value) + +# ========== Other storage classes, non-strategies ========== + +class AbstractRedirectingShadow(AbstractShadow): + _attrs_ = ['_w_self_size'] + repr_classname = "AbstractRedirectingShadow" + + def __init__(self, space, w_self, size): + if w_self is not None: + self._w_self_size = w_self.size() + else: + self._w_self_size = size + AbstractShadow.__init__(self, space, w_self, self._w_self_size) + + def size(self): + return self._w_self_size + +class AbstractCachingShadow(ListStorageShadow): + _immutable_fields_ = ['version?'] + _attrs_ = ['version'] + repr_classname = "AbstractCachingShadow" + import_from_mixin(version.VersionMixin) + version = None + + def __init__(self, space, w_self, size): + ListStorageShadow.__init__(self, space, w_self, size) + self.changed() + +class CachedObjectShadow(AbstractCachingShadow): + repr_classname = "CachedObjectShadow" + + @elidable_for_version + def fetch(self, n0): + return AbstractCachingShadow.fetch(self, n0) + + def store(self, n0, w_value): + AbstractCachingShadow.store(self, n0, w_value) + self.changed() + +class ObserveeShadow(ListStorageShadow): + _attrs_ = ['dependent'] + repr_classname = "ObserveeShadow" + def __init__(self, space, w_self, size): + ListStorageShadow.__init__(self, space, w_self, size) + self.dependent = None + + def store(self, n0, w_value): + ListStorageShadow.store(self, n0, w_value) + if self.dependent: + self.dependent.update() + + def notify(self, dependent): + if self.dependent is not None and dependent is not self.dependent: + raise RuntimeError('Meant to be observed by only one value, so far') + self.dependent = dependent diff --git a/spyvm/storage_classes.py b/spyvm/storage_classes.py new file mode 100644 --- /dev/null +++ b/spyvm/storage_classes.py @@ -0,0 +1,346 @@ + +from spyvm import model, constants, error, wrapper, version +from spyvm.storage import AbstractCachingShadow, ListStorageShadow +from spyvm.version import constant_for_version, constant_for_version_arg +from rpython.rlib import jit + +POINTERS = 0 +BYTES = 1 +WORDS = 2 +WEAK_POINTERS = 3 +COMPILED_METHOD = 4 +FLOAT = 5 +LARGE_POSITIVE_INTEGER = 6 + +class ClassShadowError(error.SmalltalkException): + exception_type = "ClassShadowError" + +class ClassShadow(AbstractCachingShadow): + """A shadow for Smalltalk objects that are classes + (i.e. used as the class of another Smalltalk object). + """ + + _attrs_ = ["name", "_instance_size", "instance_varsized", "instance_kind", + "_s_methoddict", "_s_superclass", "subclass_s"] + name = '??? (incomplete class info)' + _s_superclass = _s_methoddict = None + provides_getname = True + repr_classname = "ClassShadow" + + def __init__(self, space, w_self, size): + self.subclass_s = {} + AbstractCachingShadow.__init__(self, space, w_self, size) + + def store(self, n0, w_val): + AbstractCachingShadow.store(self, n0, w_val) + if n0 == constants.CLASS_SUPERCLASS_INDEX: + self.store_w_superclass(w_val) + elif n0 == constants.CLASS_METHODDICT_INDEX: + self.store_w_methoddict(w_val) + elif n0 == constants.CLASS_FORMAT_INDEX: + # read and painfully decode the format + assert isinstance(w_val, model.W_SmallInteger) + classformat = self.space.unwrap_int(w_val) + # The classformat in Squeak, as an integer value, is: + # <2 bits=instSize//64><5 bits=cClass><4 bits=instSpec> + # <6 bits=instSize\\64><1 bit=0> + # In Slang the value is read directly as a boxed integer, so that + # the code gets a "pointer" whose bits are set as above, but + # shifted one bit to the left and with the lowest bit set to 1. + + # Compute the instance size (really the size, not the number of bytes) + instsize_lo = (classformat >> 1) & 0x3F + instsize_hi = (classformat >> (9 + 1)) & 0xC0 + self._instance_size = (instsize_lo | instsize_hi) - 1 # subtract hdr + # decode the instSpec + format = (classformat >> 7) & 15 + self.instance_varsized = format >= 2 + + # In case of raised exception below. + self.changed() + + if format < 4: + self.instance_kind = POINTERS + elif format == 4: + self.instance_kind = WEAK_POINTERS + elif format == 6: + if self.space.w_Float.is_same_object(self.w_self()): + self.instance_kind = FLOAT + else: + self.instance_kind = WORDS + if self.instsize() != 0: + raise ClassShadowError("can't have both words and a non-zero " + "base instance size") + elif 8 <= format <= 11: + if self.space.w_LargePositiveInteger.is_same_object(self.w_self()): + self.instance_kind = LARGE_POSITIVE_INTEGER + else: + self.instance_kind = BYTES + if self.instsize() != 0: + raise ClassShadowError("can't have both bytes and a non-zero " + "base instance size") + elif 12 <= format <= 15: + self.instance_kind = COMPILED_METHOD + else: + raise ClassShadowError("unknown format %d" % (format,)) + else: + if self._w_self.w_class == self.space.classtable["w_Metaclass"]: + # In case of Metaclasses, the "instance" class is stored in the last field. + if n0 == self.size() - 1 and isinstance(w_val, model.W_PointersObject): + cl_shadow = w_val.as_class_get_shadow(self.space) + self.name = "%s class" % cl_shadow.getname() + else: + return + elif n0 == constants.CLASS_NAME_INDEX: + # In case of regular classes, the name is stored here. + self.store_w_name(w_val) + else: + return + # Some of the special info has changed -> Switch version. + self.changed() + + def store_w_superclass(self, w_class): + superclass = self._s_superclass + if w_class is None or w_class.is_nil(self.space): + if superclass: superclass.detach_s_class(self) + self._s_superclass = None + else: + assert isinstance(w_class, model.W_PointersObject) + s_new_superclass = w_class.as_class_get_shadow(self.space) + if superclass is s_new_superclass: + return + if superclass: superclass.detach_s_class(self) + self._s_superclass = s_new_superclass + s_new_superclass.attach_s_class(self) + + def store_w_methoddict(self, w_methoddict): + methoddict = self._s_methoddict + if w_methoddict is None or w_methoddict.is_nil(self.space): + if methoddict: methoddict.s_class = None + self._s_methoddict = None + else: + assert isinstance(w_methoddict, model.W_PointersObject) + s_new_methoddict = w_methoddict.as_methoddict_get_shadow(self.space) + if methoddict is s_new_methoddict: + return + if methoddict: methoddict.s_class = None + self.store_s_methoddict(s_new_methoddict) + + def store_s_methoddict(self, s_methoddict): + s_methoddict.s_class = self + s_methoddict.sync_method_cache() + self._s_methoddict = s_methoddict + + def attach_s_class(self, s_other): + self.subclass_s[s_other] = None + + def detach_s_class(self, s_other): + del self.subclass_s[s_other] + + def store_w_name(self, w_name): + if isinstance(w_name, model.W_BytesObject): + self.name = w_name.as_string() + else: + self.name = None + + @jit.unroll_safe + def flush_method_caches(self): + look_in_shadow = self + while look_in_shadow is not None: + look_in_shadow.s_methoddict().flush_method_cache() + look_in_shadow = look_in_shadow._s_superclass + + def new(self, extrasize=0): + w_cls = self.w_self() + if self.instance_kind == POINTERS: + size = self.instsize() + extrasize + w_new = model.W_PointersObject(self.space, w_cls, size) From noreply at buildbot.pypy.org Tue Aug 5 18:06:39 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 5 Aug 2014 18:06:39 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Merged. Message-ID: <20140805160639.305AF1D37DC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r1009:ec60f7d18ecb Date: 2014-08-03 13:31 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/ec60f7d18ecb/ Log: Merged. diff too long, truncating to 2000 out of 3376 lines diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -1,6 +1,6 @@ import os -from spyvm.shadow import MethodContextShadow, ActiveContext, InactiveContext, DirtyContext +from spyvm.storage_contexts import MethodContextShadow, ActiveContext, InactiveContext, DirtyContext from spyvm import model, constants, wrapper, objspace, interpreter_bytecodes, error from rpython.rlib import jit, rstackovf, unroll diff --git a/spyvm/interpreter_bytecodes.py b/spyvm/interpreter_bytecodes.py --- a/spyvm/interpreter_bytecodes.py +++ b/spyvm/interpreter_bytecodes.py @@ -1,5 +1,6 @@ -from spyvm.shadow import ContextPartShadow, ClassShadow +from spyvm.storage_contexts import ContextPartShadow +from spyvm.storage_classes import ClassShadow from spyvm import model, primitives, wrapper, error from spyvm.tool.bitmanipulation import splitter from rpython.rlib import objectmodel, unroll, jit diff --git a/spyvm/interpreter_debugging.py b/spyvm/interpreter_debugging.py --- a/spyvm/interpreter_debugging.py +++ b/spyvm/interpreter_debugging.py @@ -1,6 +1,6 @@ import pdb -from spyvm.shadow import ContextPartShadow +from spyvm.storage_contexts import ContextPartShadow from spyvm import model, constants, primitives # This module patches up the interpreter and adds breakpoints at certain execution points. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -544,9 +544,9 @@ return "? (no class)" def invariant(self): - from spyvm import shadow + from spyvm import storage_classes return (W_AbstractObjectWithIdentityHash.invariant(self) and - isinstance(self.w_class.shadow, shadow.ClassShadow)) + isinstance(self.w_class.shadow, storage_classes.ClassShadow)) def _become(self, w_other): assert isinstance(w_other, W_AbstractObjectWithClassReference) @@ -578,7 +578,7 @@ self.initialize_storage(space, size, weak) def initialize_storage(self, space, size, weak=False): - from spyvm.shadow import empty_storage + from spyvm.storage import empty_storage storage = empty_storage(space, self, size, weak) self.store_shadow(storage) self.log_storage("Initialized") @@ -590,18 +590,18 @@ g_obj.fillin(space) pointers = g_self.get_pointers() # TODO -- Also handle weak objects loaded from images. - from spyvm.shadow import find_storage_for_objects + from spyvm.storage import find_storage_for_objects storage = find_storage_for_objects(space, pointers, g_self.isweak())(space, self, len(pointers)) self.store_shadow(storage) self.store_all(space, pointers) self.log_storage("Filledin", log_classname=False) def is_weak(self): - from shadow import WeakListStorageShadow + from storage import WeakListStorageShadow return isinstance(self.shadow, WeakListStorageShadow) def is_class(self, space): - from spyvm.shadow import ClassShadow + from spyvm.storage_classes import ClassShadow if isinstance(self.shadow, ClassShadow): return True return W_AbstractObjectWithClassReference.is_class(self, space) @@ -701,23 +701,23 @@ return shadow def get_shadow(self, space): - from spyvm.shadow import AbstractShadow + from spyvm.storage import AbstractShadow return self.as_special_get_shadow(space, AbstractShadow) def as_class_get_shadow(self, space): - from spyvm.shadow import ClassShadow + from spyvm.storage_classes import ClassShadow return jit.promote(self.as_special_get_shadow(space, ClassShadow)) def as_blockcontext_get_shadow(self, space): - from spyvm.shadow import BlockContextShadow + from spyvm.storage_contexts import BlockContextShadow return self.as_special_get_shadow(space, BlockContextShadow) def as_methodcontext_get_shadow(self, space): - from spyvm.shadow import MethodContextShadow + from spyvm.storage_contexts import MethodContextShadow return self.as_special_get_shadow(space, MethodContextShadow) def as_context_get_shadow(self, space): - from spyvm.shadow import ContextPartShadow + from spyvm.storage_contexts import ContextPartShadow if not isinstance(self.shadow, ContextPartShadow): if self.getclass(space).is_same_object(space.w_BlockContext): return self.as_blockcontext_get_shadow(space) @@ -727,15 +727,15 @@ return self.as_special_get_shadow(space, ContextPartShadow) def as_methoddict_get_shadow(self, space): - from spyvm.shadow import MethodDictionaryShadow + from spyvm.storage_classes import MethodDictionaryShadow return self.as_special_get_shadow(space, MethodDictionaryShadow) def as_cached_object_get_shadow(self, space): - from spyvm.shadow import CachedObjectShadow + from spyvm.storage import CachedObjectShadow return self.as_special_get_shadow(space, CachedObjectShadow) def as_observed_get_shadow(self, space): - from spyvm.shadow import ObserveeShadow + from spyvm.storage import ObserveeShadow return self.as_special_get_shadow(space, ObserveeShadow) def has_shadow(self): @@ -1267,7 +1267,7 @@ return True def create_frame(self, space, receiver, arguments=[]): - from spyvm.shadow import MethodContextShadow + from spyvm.storage_contexts import MethodContextShadow assert len(arguments) == self.argsize return MethodContextShadow(space, w_method=self, w_receiver=receiver, arguments=arguments) @@ -1313,7 +1313,7 @@ # methods in order to avoid side effects that prevent translation. w_class = self.safe_compiled_in() if isinstance(w_class, W_PointersObject): - from spyvm.shadow import ClassShadow + from spyvm.storage_classes import ClassShadow s_class = w_class.shadow if isinstance(s_class, ClassShadow): return "%s >> #%s" % (s_class.getname(), self.lookup_selector) diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -1,6 +1,6 @@ import os -from spyvm import constants, model, model_display, shadow, wrapper, version, display +from spyvm import constants, model, model_display, wrapper, version, display from spyvm.error import UnwrappingError, WrappingError, PrimitiveFailedError from rpython.rlib import jit, rpath from rpython.rlib.objectmodel import instantiate, specialize, import_from_mixin diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -1,6 +1,6 @@ from spyvm import model_display, model from spyvm.error import PrimitiveFailedError -from spyvm.shadow import AbstractCachingShadow +from spyvm.storage import AbstractCachingShadow from spyvm.plugins.plugin import Plugin from rpython.rlib import jit, objectmodel diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -2,7 +2,7 @@ import inspect import math import operator -from spyvm import model, model_display, shadow, error, constants, display +from spyvm import model, model_display, storage_contexts, error, constants, display from spyvm.error import PrimitiveFailedError, PrimitiveNotYetWrittenError from spyvm import wrapper @@ -947,7 +947,7 @@ def func(interp, s_frame, w_rcvr): # This takes a long time (at least in interpreted mode), and is not really necessary. # We are monitoring changes to MethodDictionaries, so there is no need for the image to tell us. - #walk_gc_objects_of_type(shadow.MethodDictionaryShadow, lambda s_dict: s_dict.flush_method_cache()) + #walk_gc_objects_of_type(storage_contexts.MethodDictionaryShadow, lambda s_dict: s_dict.flush_method_cache()) return w_rcvr # ___________________________________________________________________________ @@ -1292,7 +1292,7 @@ # The block bytecodes are stored inline: so we skip past the # byteodes to invoke this primitive to find them (hence +2) initialip = s_frame.pc() + 2 - s_new_context = shadow.BlockContextShadow(interp.space, None, 0, w_method_context, argcnt, initialip) + s_new_context = storage_contexts.BlockContextShadow(interp.space, None, 0, w_method_context, argcnt, initialip) return s_new_context.w_self() @expose_primitive(VALUE, result_is_new_frame=True) diff --git a/spyvm/shadow.py b/spyvm/shadow.py deleted file mode 100644 --- a/spyvm/shadow.py +++ /dev/null @@ -1,1312 +0,0 @@ -import sys, weakref -from spyvm import model, constants, error, wrapper, version -from spyvm.version import elidable_for_version, constant_for_version, constant_for_version_arg -from rpython.tool.pairtype import extendabletype -from rpython.rlib import rarithmetic, objectmodel, jit, longlong2float -from rpython.rlib.objectmodel import import_from_mixin -from rpython.rlib.debug import make_sure_not_resized -from rpython.rlib.rstruct.runpack import runpack -from rpython.rtyper.lltypesystem import rffi, lltype - -class AbstractShadow(object): - """A shadow is an optional extra bit of information that - can be attached at run-time to any Smalltalk object. - """ - _attrs_ = ['_w_self', 'space'] - _immutable_fields_ = ['space'] - provides_getname = False - repr_classname = "AbstractShadow" - - def __init__(self, space, w_self, size): - self.space = space - assert w_self is None or isinstance(w_self, model.W_PointersObject) - self._w_self = w_self - def w_self(self): - return self._w_self - def getname(self): - raise NotImplementedError("Abstract class") - def __repr__(self): - if self.provides_getname: - return "<%s %s>" % (self.repr_classname, self.getname()) - else: - return "<%s>" % self.repr_classname - - def fetch(self, n0): - raise NotImplementedError("Abstract class") - def store(self, n0, w_value): - raise NotImplementedError("Abstract class") - def size(self): - raise NotImplementedError("Abstract class") - - # This will invoke an appropriate copy_from_* method. - # Overwriting this allows optimized transitions between certain storage types. - def copy_into(self, other_shadow): - other_shadow.copy_from(self) - - def attach_shadow(self): pass - - def copy_field_from(self, n0, other_shadow): - self.store(n0, other_shadow.fetch(n0)) - - def copy_from(self, other_shadow): - assert self.size() == other_shadow.size() - for i in range(self.size()): - self.copy_field_from(i, other_shadow) - - def copy_from_AllNil(self, all_nil_storage): - self.copy_from(all_nil_storage) - def copy_from_SmallIntegerOrNil(self, small_int_storage): - self.copy_from(small_int_storage) - def copy_from_FloatOrNil(self, float_storage): - self.copy_from(float_storage) - -class AbstractStorageShadow(AbstractShadow): - _attrs_ = [] - repr_classname = "AbstractStorageShadow" - def __init__(self, space, w_self, size): - AbstractShadow.__init__(self, space, w_self, size) - def store(self, n0, w_val): - if self.can_contain(w_val): - return self.do_store(n0, w_val) - new_storage = self.generalized_strategy_for(w_val) - return self._w_self.store_with_new_storage(new_storage, n0, w_val) - def can_contain(self, w_val): - return self.static_can_contain(self.space, w_val) - @staticmethod - def static_can_contain(space, w_val): - raise NotImplementedError() - def do_store(self, n0, w_val): - raise NotImplementedError() - def generalized_strategy_for(self, w_val): - raise NotImplementedError() - - def copy_from_AllNil(self, all_nil_storage): - pass # Already initialized - def copy_from(self, other_shadow): - assert self.size() == other_shadow.size() - for i in range(self.size()): - w_val = other_shadow.fetch(i) - if not w_val.is_nil(self.space): # nil fields already initialized - self.store(i, w_val) - -class AllNilStorageShadow(AbstractStorageShadow): - repr_classname = "AllNilStorageShadow" - _attrs_ = ['_size'] - _immutable_fields_ = ['_size'] - def __init__(self, space, w_self, size): - AbstractStorageShadow.__init__(self, space, w_self, size) - self._size = size - def fetch(self, n0): - if n0 >= self._size: - raise IndexError - return self.space.w_nil - def copy_into(self, other_shadow): - other_shadow.copy_from_AllNil(self) - def do_store(self, n0, w_value): - pass - def size(self): - return self._size - def generalized_strategy_for(self, w_val): - return find_storage_for_objects(self.space, [w_val]) - @staticmethod - def static_can_contain(space, w_val): - return isinstance(w_val, model.W_Object) and w_val.is_nil(space) - -class AbstractValueOrNilStorageMixin(object): - # Class must provide: wrap, unwrap, nil_value, is_nil_value, wrapper_class - _attrs_ = ['storage'] - _immutable_fields_ = ['storage'] - - def __init__(self, space, w_self, size): - AbstractStorageShadow.__init__(self, space, w_self, size) - self.storage = [self.nil_value] * size - - def size(self): - return len(self.storage) - - def generalized_strategy_for(self, w_val): - return ListStorageShadow - - def fetch(self, n0): - val = self.storage[n0] - if self.is_nil_value(val): - return self.space.w_nil - else: - return self.wrap(self.space, val) - - def do_store(self, n0, w_val): - if w_val.is_nil(self.space): - self.storage[n0] = self.nil_value - else: - self.storage[n0] = self.unwrap(self.space, w_val) - -# This is to avoid code duplication - at objectmodel.specialize.arg(0) -def _value_or_nil_can_handle(cls, space, w_val): - return isinstance(w_val, model.W_Object) and w_val.is_nil(space) or \ - (isinstance(w_val, cls.wrapper_class) \ - and not cls.is_nil_value(cls.unwrap(space, w_val))) - -class SmallIntegerOrNilStorageShadow(AbstractStorageShadow): - repr_classname = "SmallIntegerOrNilStorageShadow" - nil_value = constants.MAXINT - wrapper_class = model.W_SmallInteger - import_from_mixin(AbstractValueOrNilStorageMixin) - - @staticmethod - def static_can_contain(space, w_val): - return _value_or_nil_can_handle(SmallIntegerOrNilStorageShadow, space, w_val) - @staticmethod - def is_nil_value(val): - return val == SmallIntegerOrNilStorageShadow.nil_value - @staticmethod - def wrap(space, val): - return space.wrap_int(val) - @staticmethod - def unwrap(space, w_val): - return space.unwrap_int(w_val) - def copy_into(self, other_shadow): - other_shadow.copy_from_SmallIntegerOrNil(self) - -class FloatOrNilStorageShadow(AbstractStorageShadow): - repr_classname = "FloatOrNilStorageShadow" - nil_value = sys.float_info.max - wrapper_class = model.W_Float - import_from_mixin(AbstractValueOrNilStorageMixin) - - @staticmethod - def static_can_contain(space, w_val): - return _value_or_nil_can_handle(FloatOrNilStorageShadow, space, w_val) - @staticmethod - def is_nil_value(val): - return val == FloatOrNilStorageShadow.nil_value - @staticmethod - def wrap(space, val): - return space.wrap_float(val) - @staticmethod - def unwrap(space, w_val): - return space.unwrap_float(w_val) - def copy_into(self, other_shadow): - other_shadow.copy_from_FloatOrNil(self) - -def empty_storage(space, w_self, size, weak=False): - if weak: - return WeakListStorageShadow(space, w_self, size) - if space.no_specialized_storage.is_set(): - return ListStorageShadow(space, w_self, size) - return AllNilStorageShadow(space, w_self, size) - - at jit.unroll_safe -def find_storage_for_objects(space, vars, weak=False): - if weak: - return WeakListStorageShadow - if space.no_specialized_storage.is_set(): - return ListStorageShadow - specialized_strategies = 3 - all_nil_can_handle = True - small_int_can_handle = True - float_can_handle = True - for w_obj in vars: - if all_nil_can_handle and not AllNilStorageShadow.static_can_contain(space, w_obj): - all_nil_can_handle = False - specialized_strategies = specialized_strategies - 1 - if small_int_can_handle and not SmallIntegerOrNilStorageShadow.static_can_contain(space, w_obj): - small_int_can_handle = False - specialized_strategies = specialized_strategies - 1 - if float_can_handle and not FloatOrNilStorageShadow.static_can_contain(space, w_obj): - float_can_handle = False - specialized_strategies = specialized_strategies - 1 - - if specialized_strategies <= 0: - return ListStorageShadow - - if all_nil_can_handle: - return AllNilStorageShadow - if small_int_can_handle: - return SmallIntegerOrNilStorageShadow - if float_can_handle: - return FloatOrNilStorageShadow - - # If this happens, please look for a bug in the code above. - assert False, "No strategy could be found for list..." - -class ListStorageMixin(object): - def __init__(self, space, w_self, size): - AbstractStorageShadow.__init__(self, space, w_self, size) - self.initialize_storage(size) - def size(self): - return len(self.storage) - -class ListStorageShadow(AbstractStorageShadow): - _attrs_ = ['storage'] - _immutable_fields_ = ['storage'] - repr_classname = "ListStorageShadow" - import_from_mixin(ListStorageMixin) - - def initialize_storage(self, size): - self.storage = [self.space.w_nil] * size - def fetch(self, n0): - return self.storage[n0] - def store(self, n0, w_value): - self.storage[n0] = w_value - -class WeakListStorageShadow(AbstractStorageShadow): - _attrs_ = ['storage'] - _immutable_fields_ = ['storage'] - repr_classname = "WeakListStorageShadow" - import_from_mixin(ListStorageMixin) - - def initialize_storage(self, size): - self.storage = [weakref.ref(self.space.w_nil)] * size - def fetch(self, n0): - weakobj = self.storage[n0] - return weakobj() or self.space.w_nil - def store(self, n0, w_value): - assert w_value is not None - self.storage[n0] = weakref.ref(w_value) - -class AbstractCachingShadow(ListStorageShadow): - _immutable_fields_ = ['version?'] - _attrs_ = ['version'] - repr_classname = "AbstractCachingShadow" - import_from_mixin(version.VersionMixin) - version = None - - def __init__(self, space, w_self, size): - ListStorageShadow.__init__(self, space, w_self, size) - self.changed() - -# ____________________________________________________________ - -POINTERS = 0 -BYTES = 1 -WORDS = 2 -WEAK_POINTERS = 3 -COMPILED_METHOD = 4 -FLOAT = 5 -LARGE_POSITIVE_INTEGER = 6 - -class ClassShadowError(error.SmalltalkException): - exception_type = "ClassShadowError" - -class ClassShadow(AbstractCachingShadow): - """A shadow for Smalltalk objects that are classes - (i.e. used as the class of another Smalltalk object). - """ - - _attrs_ = ["name", "_instance_size", "instance_varsized", "instance_kind", - "_s_methoddict", "_s_superclass", "subclass_s"] - name = '??? (incomplete class info)' - _s_superclass = _s_methoddict = None - provides_getname = True - repr_classname = "ClassShadow" - - def __init__(self, space, w_self, size): - self.subclass_s = {} - AbstractCachingShadow.__init__(self, space, w_self, size) - - def store(self, n0, w_val): - AbstractCachingShadow.store(self, n0, w_val) - if n0 == constants.CLASS_SUPERCLASS_INDEX: - self.store_w_superclass(w_val) - elif n0 == constants.CLASS_METHODDICT_INDEX: - self.store_w_methoddict(w_val) - elif n0 == constants.CLASS_FORMAT_INDEX: - # read and painfully decode the format - assert isinstance(w_val, model.W_SmallInteger) - classformat = self.space.unwrap_int(w_val) - # The classformat in Squeak, as an integer value, is: - # <2 bits=instSize//64><5 bits=cClass><4 bits=instSpec> - # <6 bits=instSize\\64><1 bit=0> - # In Slang the value is read directly as a boxed integer, so that - # the code gets a "pointer" whose bits are set as above, but - # shifted one bit to the left and with the lowest bit set to 1. - - # Compute the instance size (really the size, not the number of bytes) - instsize_lo = (classformat >> 1) & 0x3F - instsize_hi = (classformat >> (9 + 1)) & 0xC0 - self._instance_size = (instsize_lo | instsize_hi) - 1 # subtract hdr - # decode the instSpec - format = (classformat >> 7) & 15 - self.instance_varsized = format >= 2 - - # In case of raised exception below. - self.changed() - - if format < 4: - self.instance_kind = POINTERS - elif format == 4: - self.instance_kind = WEAK_POINTERS - elif format == 6: - if self.space.w_Float.is_same_object(self.w_self()): - self.instance_kind = FLOAT - else: - self.instance_kind = WORDS - if self.instsize() != 0: - raise ClassShadowError("can't have both words and a non-zero " - "base instance size") - elif 8 <= format <= 11: - if self.space.w_LargePositiveInteger.is_same_object(self.w_self()): - self.instance_kind = LARGE_POSITIVE_INTEGER - else: - self.instance_kind = BYTES - if self.instsize() != 0: - raise ClassShadowError("can't have both bytes and a non-zero " - "base instance size") - elif 12 <= format <= 15: - self.instance_kind = COMPILED_METHOD - else: - raise ClassShadowError("unknown format %d" % (format,)) - else: - if self._w_self.w_class == self.space.classtable["w_Metaclass"]: - # In case of Metaclasses, the "instance" class is stored in the last field. - if n0 == self.size() - 1 and isinstance(w_val, model.W_PointersObject): - cl_shadow = w_val.as_class_get_shadow(self.space) - self.name = "%s class" % cl_shadow.getname() - else: - return - elif n0 == constants.CLASS_NAME_INDEX: - # In case of regular classes, the name is stored here. - self.store_w_name(w_val) - else: - return - # Some of the special info has changed -> Switch version. - self.changed() - - def store_w_superclass(self, w_class): - superclass = self._s_superclass - if w_class is None or w_class.is_nil(self.space): - if superclass: superclass.detach_s_class(self) - self._s_superclass = None - else: - assert isinstance(w_class, model.W_PointersObject) - s_new_superclass = w_class.as_class_get_shadow(self.space) - if superclass is s_new_superclass: - return - if superclass: superclass.detach_s_class(self) - self._s_superclass = s_new_superclass - s_new_superclass.attach_s_class(self) - - def store_w_methoddict(self, w_methoddict): - methoddict = self._s_methoddict - if w_methoddict is None or w_methoddict.is_nil(self.space): - if methoddict: methoddict.s_class = None - self._s_methoddict = None - else: - assert isinstance(w_methoddict, model.W_PointersObject) - s_new_methoddict = w_methoddict.as_methoddict_get_shadow(self.space) - if methoddict is s_new_methoddict: - return - if methoddict: methoddict.s_class = None - self.store_s_methoddict(s_new_methoddict) - - def store_s_methoddict(self, s_methoddict): - s_methoddict.s_class = self - s_methoddict.sync_method_cache() - self._s_methoddict = s_methoddict - - def attach_s_class(self, s_other): - self.subclass_s[s_other] = None - - def detach_s_class(self, s_other): - del self.subclass_s[s_other] - - def store_w_name(self, w_name): - if isinstance(w_name, model.W_BytesObject): - self.name = w_name.as_string() - else: - self.name = None - - @jit.unroll_safe - def flush_method_caches(self): - look_in_shadow = self - while look_in_shadow is not None: - look_in_shadow.s_methoddict().flush_method_cache() - look_in_shadow = look_in_shadow._s_superclass - - def new(self, extrasize=0): - w_cls = self.w_self() - if self.instance_kind == POINTERS: - size = self.instsize() + extrasize - w_new = model.W_PointersObject(self.space, w_cls, size) - elif self.instance_kind == WORDS: - w_new = model.W_WordsObject(self.space, w_cls, extrasize) - elif self.instance_kind == BYTES: - w_new = model.W_BytesObject(self.space, w_cls, extrasize) - elif self.instance_kind == COMPILED_METHOD: - w_new = model.W_CompiledMethod(self.space, extrasize) - elif self.instance_kind == FLOAT: - w_new = model.W_Float(0) # Squeak gives a random piece of memory - elif self.instance_kind == LARGE_POSITIVE_INTEGER: - if extrasize <= 4: - w_new = model.W_LargePositiveInteger1Word(0, extrasize) - else: - w_new = model.W_BytesObject(self.space, w_cls, extrasize) - elif self.instance_kind == WEAK_POINTERS: - size = self.instsize() + extrasize - w_new = model.W_PointersObject(self.space, w_cls, size, weak=True) - else: - raise NotImplementedError(self.instance_kind) - return w_new - - def w_methoddict(self): - return self._s_methoddict.w_self() - - def s_methoddict(self): - return self._s_methoddict - - def s_superclass(self): - return self._s_superclass - - def getname(self): - return self.name - - # _______________________________________________________________ - # Methods for querying the format word, taken from the blue book: - # - # included so that we can reproduce code from the reference impl - # more easily - - def ispointers(self): - " True if instances of this class have data stored as pointers " - XXX # what about weak pointers? - return self.format == POINTERS - - def iswords(self): - " True if instances of this class have data stored as numerical words " - XXX # what about weak pointers? - return self.format in (POINTERS, WORDS) - - def isbytes(self): - " True if instances of this class have data stored as numerical bytes " - return self.format == BYTES - - @constant_for_version - def isvariable(self): - " True if instances of this class have indexed inst variables " - return self.instance_varsized - - @constant_for_version - def instsize(self): - " Number of named instance variables for each instance of this class " - return self._instance_size - - # _______________________________________________________________ - # Other Methods - - @constant_for_version_arg - def lookup(self, w_selector): - look_in_shadow = self - while look_in_shadow is not None: - w_method = look_in_shadow.s_methoddict().find_selector(w_selector) - if w_method is not None: - return w_method - look_in_shadow = look_in_shadow._s_superclass - raise error.MethodNotFound() - - def changed(self): - self.superclass_changed(version.Version()) - - # this is done, because the class-hierarchy contains cycles - def superclass_changed(self, version): - if self.version is not version: - self.version = version - for s_class in self.subclass_s: - s_class.superclass_changed(version) - - # _______________________________________________________________ - # Methods used only in testing - - def inherits_from(self, s_superclass): - "NOT_RPYTHON" # this is only for testing. - classshadow = self - while classshadow is not None: - if classshadow is s_superclass: - return True - classshadow = classshadow.s_superclass() - else: - return False - - def initialize_methoddict(self): - "NOT_RPYTHON" # this is only for testing. - if self._s_methoddict is None: - w_methoddict = model.W_PointersObject(self.space, None, 2) - w_methoddict.store(self.space, constants.METHODDICT_VALUES_INDEX, model.W_PointersObject(self.space, None, 0)) - self.store_s_methoddict(w_methoddict.as_methoddict_get_shadow(self.space)) - self.s_methoddict().invalid = False - - def installmethod(self, w_selector, w_method): - "NOT_RPYTHON" # this is only for testing. - assert not isinstance(w_selector, str) - self.initialize_methoddict() - self.s_methoddict().methoddict[w_selector] = w_method - if isinstance(w_method, model.W_CompiledMethod): - w_method.compiledin_class = self.w_self() - -class MethodDictionaryShadow(ListStorageShadow): - - _immutable_fields_ = ['invalid?', 's_class'] - _attrs_ = ['methoddict', 'invalid', 's_class'] - repr_classname = "MethodDictionaryShadow" - - def __init__(self, space, w_self, size): - self.invalid = True - self.s_class = None - self.methoddict = {} - ListStorageShadow.__init__(self, space, w_self, size) - - def update(self): - self.sync_method_cache() - - def find_selector(self, w_selector): - if self.invalid: - return None # we may be invalid if Smalltalk code did not call flushCache - return self.methoddict.get(w_selector, None) - - # We do not call update() after changes to ourselves: - # Whenever a method is added, it's keyword is added to w_self, then the - # w_compiled_method is added to our observee. - # sync_method_cache at this point would not have the desired effect, because in - # the Smalltalk Implementation, the dictionary changes first. Afterwards - # its contents array is filled with the value belonging to the new key. - def store(self, n0, w_value): - ListStorageShadow.store(self, n0, w_value) - if n0 == constants.METHODDICT_VALUES_INDEX: - self.setup_notification() - if n0 >= constants.METHODDICT_NAMES_INDEX: - self.invalid = True - - def setup_notification(self): - self.w_values().as_observed_get_shadow(self.space).notify(self) - - def w_values(self): - w_values = self.fetch(constants.METHODDICT_VALUES_INDEX) - assert isinstance(w_values, model.W_PointersObject) - return w_values - - def flush_method_cache(self): - # Lazy synchronization: Only flush the cache, if we are already synchronized. - if self.invalid: - self.sync_method_cache() - - def sync_method_cache(self): - if self.size() == 0: - return - self.methoddict = {} - size = self.size() - constants.METHODDICT_NAMES_INDEX - w_values = self.w_values() - for i in range(size): - w_selector = self.w_self().fetch(self.space, constants.METHODDICT_NAMES_INDEX+i) - if not w_selector.is_nil(self.space): - if isinstance(w_selector, model.W_BytesObject): - selector = w_selector.as_string() - else: - selector = "? (non-byteobject selector)" - pass - # TODO: Check if there's more assumptions about this. - # Putting any key in the methodDict and running with - # perform is actually supported in Squeak - # raise ClassShadowError("bogus selector in method dict") - w_compiledmethod = w_values.fetch(self.space, i) - if not isinstance(w_compiledmethod, model.W_CompiledMethod): - raise ClassShadowError("The methoddict must contain " - "CompiledMethods only, for now. " - "If the value observed is nil, our " - "invalidating mechanism may be broken.") - self.methoddict[w_selector] = w_compiledmethod - w_compiledmethod.set_lookup_class_and_name(self.s_class.w_self(), selector) - if self.s_class: - self.s_class.changed() - self.invalid = False - -class AbstractRedirectingShadow(AbstractShadow): - _attrs_ = ['_w_self_size'] - repr_classname = "AbstractRedirectingShadow" - - def __init__(self, space, w_self, size): - if w_self is not None: - self._w_self_size = w_self.size() - else: - self._w_self_size = size - AbstractShadow.__init__(self, space, w_self, self._w_self_size) - - def size(self): - return self._w_self_size - -class ContextState(object): - def __init__(self, name): - self.name = name - def __str__(self): - return self.name - def __repr__(self): - return self.name -InactiveContext = ContextState("InactiveContext") -ActiveContext = ContextState("ActiveContext") -DirtyContext = ContextState("DirtyContext") - -class ContextPartShadow(AbstractRedirectingShadow): - - __metaclass__ = extendabletype - _attrs_ = ['_s_sender', - '_pc', '_temps_and_stack', - '_stack_ptr', 'instances_w', 'state'] - repr_classname = "ContextPartShadow" - - _virtualizable_ = [ - '_s_sender', - "_pc", "_temps_and_stack[*]", "_stack_ptr", - "_w_self", "_w_self_size", 'state' - ] - - # ______________________________________________________________________ - # Initialization - - def __init__(self, space, w_self, size=0): - self._s_sender = None - AbstractRedirectingShadow.__init__(self, space, w_self, size) - self.instances_w = {} - self.state = InactiveContext - - def copy_from(self, other_shadow): - # Some fields have to be initialized before the rest, to ensure correct initialization. - privileged_fields = self.fields_to_copy_first() - for n0 in privileged_fields: - self.copy_field_from(n0, other_shadow) - - # Now the temp size will be known. - self.init_stack_and_temps() - - for n0 in range(self.size()): - if n0 not in privileged_fields: - self.copy_field_from(n0, other_shadow) - - def fields_to_copy_first(self): - return [] - - # ______________________________________________________________________ - # Accessing object fields - - def fetch(self, n0): - if n0 == constants.CTXPART_SENDER_INDEX: - return self.w_sender() - if n0 == constants.CTXPART_PC_INDEX: - return self.wrap_pc() - if n0 == constants.CTXPART_STACKP_INDEX: - return self.wrap_stackpointer() - if self.stackstart() <= n0 < self.external_stackpointer(): - temp_i = self.stackdepth() - (n0-self.stackstart()) - 1 - assert temp_i >= 0 - return self.peek(temp_i) - if self.external_stackpointer() <= n0 < self.stackend(): - return self.space.w_nil - else: - # XXX later should store tail out of known context part as well - raise error.WrapperException("Index in context out of bounds") - - def store(self, n0, w_value): - if n0 == constants.CTXPART_SENDER_INDEX: - assert isinstance(w_value, model.W_PointersObject) - if w_value.is_nil(self.space): - self.store_s_sender(None) - else: - self.store_s_sender(w_value.as_context_get_shadow(self.space)) - return - if n0 == constants.CTXPART_PC_INDEX: - return self.store_unwrap_pc(w_value) - if n0 == constants.CTXPART_STACKP_INDEX: - return self.unwrap_store_stackpointer(w_value) - if self.stackstart() <= n0 < self.external_stackpointer(): # XXX can be simplified? - temp_i = self.stackdepth() - (n0-self.stackstart()) - 1 - assert temp_i >= 0 - return self.set_top(w_value, temp_i) - if self.external_stackpointer() <= n0 < self.stackend(): - return - else: - # XXX later should store tail out of known context part as well - raise error.WrapperException("Index in context out of bounds") - - # === Sender === - - def store_s_sender(self, s_sender): - if s_sender is not self._s_sender: - self._s_sender = s_sender - # If new sender is None, we are just being marked as returned. - if s_sender is not None and self.state is ActiveContext: - self.state = DirtyContext - - def w_sender(self): - sender = self.s_sender() - if sender is None: - return self.space.w_nil - return sender.w_self() - - def s_sender(self): - return self._s_sender - - # === Stack Pointer === - - def unwrap_store_stackpointer(self, w_sp1): - # the stackpointer in the W_PointersObject starts counting at the - # tempframe start - # Stackpointer from smalltalk world == stacksize in python world - self.store_stackpointer(self.space.unwrap_int(w_sp1)) - - def store_stackpointer(self, size): - depth = self.stackdepth() - if size < depth: - # TODO Warn back to user - assert size >= 0 - self.pop_n(depth - size) - else: - for i in range(depth, size): - self.push(self.space.w_nil) - - def stackdepth(self): - return rarithmetic.intmask(self._stack_ptr) - - def wrap_stackpointer(self): - return self.space.wrap_int(self.stackdepth()) - - # === Program Counter === - - def store_unwrap_pc(self, w_pc): - if w_pc.is_nil(self.space): - self.store_pc(-1) - else: - pc = self.space.unwrap_int(w_pc) - pc -= self.w_method().bytecodeoffset() - pc -= 1 - self.store_pc(pc) - - def wrap_pc(self): - pc = self.pc() - if pc == -1: - return self.space.w_nil - else: - pc += 1 - pc += self.w_method().bytecodeoffset() - return self.space.wrap_int(pc) - - def pc(self): - return self._pc - - def store_pc(self, newpc): - assert newpc >= -1 - self._pc = newpc - - # === Subclassed accessors === - - def s_home(self): - raise NotImplementedError() - - def stackstart(self): - raise NotImplementedError() - - def w_receiver(self): - raise NotImplementedError() - - def w_method(self): - raise NotImplementedError() - - def tempsize(self): - raise NotImplementedError() - - def is_closure_context(self): - raise NotImplementedError() - - def is_BlockClosure_ensure(self): - raise NotImplementedError() - - def home_is_self(self): - raise NotImplementedError() - - # === Other properties of Contexts === - - def mark_returned(self): - self.store_pc(-1) - self.store_s_sender(None) - - def is_returned(self): - return self.pc() == -1 and self.w_sender().is_nil(self.space) - - def external_stackpointer(self): - return self.stackdepth() + self.stackstart() - - def stackend(self): - # XXX this is incorrect when there is subclassing - return self._w_self_size - - def fetch_next_bytecode(self): - pc = jit.promote(self._pc) - assert pc >= 0 - self._pc += 1 - return self.fetch_bytecode(pc) - - def fetch_bytecode(self, pc): - bytecode = self.w_method().fetch_bytecode(pc) - return ord(bytecode) - - # ______________________________________________________________________ - # Temporary Variables - # - # Every context has it's own stack. BlockContexts share their temps with - # their home contexts. MethodContexts created from a BlockClosure get their - # temps copied from the closure upon activation. Changes are not propagated back; - # this is handled by the compiler by allocating an extra Array for temps. - - def gettemp(self, index): - raise NotImplementedError() - - def settemp(self, index, w_value): - raise NotImplementedError() - - # ______________________________________________________________________ - # Stack Manipulation - - @jit.unroll_safe - def init_stack_and_temps(self): - stacksize = self.stackend() - self.stackstart() - tempsize = self.tempsize() - temps_and_stack = [None] * (stacksize + tempsize) - self._temps_and_stack = temps_and_stack - make_sure_not_resized(temps_and_stack) - for i in range(tempsize): - temps_and_stack[i] = self.space.w_nil - self._stack_ptr = rarithmetic.r_uint(tempsize) # we point after the last element - - def stack_get(self, index0): - return self._temps_and_stack[index0] - - def stack_put(self, index0, w_val): - self._temps_and_stack[index0] = w_val - - def stack(self): - """NOT_RPYTHON""" # purely for testing - return self._temps_and_stack[self.tempsize():self._stack_ptr] - - def pop(self): - #assert self._stack_ptr > self.tempsize() - ptr = jit.promote(self._stack_ptr) - 1 - ret = self.stack_get(ptr) # you get OverflowError if the stack is empty - self.stack_put(ptr, None) - self._stack_ptr = ptr - return ret - - def push(self, w_v): - #assert self._stack_ptr >= self.tempsize() - #assert self._stack_ptr < self.stackend() - self.stackstart() + self.tempsize() - ptr = jit.promote(self._stack_ptr) - self.stack_put(ptr, w_v) - self._stack_ptr = ptr + 1 - - @jit.unroll_safe - def push_all(self, lst): - for elt in lst: - self.push(elt) - - def top(self): - return self.peek(0) - - def set_top(self, value, position=0): - rpos = rarithmetic.r_uint(position) - ptr = self._stack_ptr + ~rpos - self.stack_put(ptr, value) - - def peek(self, idx): - rpos = rarithmetic.r_uint(idx) - ptr = jit.promote(self._stack_ptr) + ~rpos - return self.stack_get(ptr) - - @jit.unroll_safe - def pop_n(self, n): - #assert n == 0 or self._stack_ptr - n >= self.tempsize() - jit.promote(self._stack_ptr) - while n > 0: - n -= 1 - self._stack_ptr -= 1 - self.stack_put(self._stack_ptr, None) - - @jit.unroll_safe - def pop_and_return_n(self, n): - result = [self.peek(i) for i in range(n - 1, -1, -1)] - self.pop_n(n) - return result - - # ______________________________________________________________________ - # Primitive support - - def store_instances_array(self, w_class, match_w): - # used for primitives 77 & 78 - self.instances_w[w_class] = match_w - - @jit.elidable - def instances_array(self, w_class): - return self.instances_w.get(w_class, None) - - # ______________________________________________________________________ - # Printing - - def argument_strings(self): - return [ w_arg.as_repr_string() for w_arg in self.w_arguments() ] - - def __str__(self): - retval = self.short_str() - retval += "\n%s" % self.w_method().bytecode_string(markBytecode=self.pc() + 1) - retval += "\nArgs:----------------" - argcount = self.w_method().argsize - j = 0 - for w_obj in self._temps_and_stack[:self._stack_ptr]: - if j == argcount: - retval += "\nTemps:---------------" - if j == self.tempsize(): - retval += "\nStack:---------------" - retval += "\n %0.2i: %s" % (j, w_obj.as_repr_string()) - j += 1 - retval += "\n---------------------" - return retval - - def short_str(self): - arg_strings = self.argument_strings() - if len(arg_strings) > 0: - args = " , ".join(arg_strings) - args = " (%d arg(s): %s)" % (len(arg_strings), args) - else: - args = "" - return '%s [pc: %d] (rcvr: %s)%s' % ( - self.method_str(), - self.pc() + 1, - self.w_receiver().as_repr_string(), - args - ) - - def print_stack(self, method=True): - return self.print_padded_stack(method)[1] - - def print_padded_stack(self, method): - padding = ret_str = '' - if self.s_sender() is not None: - padding, ret_str = self.s_sender().print_padded_stack(method) - if method: - desc = self.method_str() - else: - desc = self.short_str() - return padding + ' ', '%s\n%s%s' % (ret_str, padding, desc) - -class BlockContextShadow(ContextPartShadow): - _attrs_ = ['_w_home', '_initialip', '_eargc'] - repr_classname = "BlockContextShadow" - - # === Initialization === - - def __init__(self, space, w_self=None, size=0, w_home=None, argcnt=0, initialip=0): - self = jit.hint(self, access_directly=True, fresh_virtualizable=True) - creating_w_self = w_self is None - if creating_w_self: - s_home = w_home.as_methodcontext_get_shadow(space) - contextsize = s_home.size() - s_home.tempsize() - w_self = model.W_PointersObject(space, space.w_BlockContext, contextsize) - ContextPartShadow.__init__(self, space, w_self, size) - if creating_w_self: - w_self.store_shadow(self) - self.store_expected_argument_count(argcnt) - self.store_initialip(initialip) - if w_home: - self.store_w_home(w_home) - self.store_pc(initialip) - self.init_stack_and_temps() - - def fields_to_copy_first(self): - return [ constants.BLKCTX_HOME_INDEX ] - - # === Implemented accessors === - - def s_home(self): - return self._w_home.as_methodcontext_get_shadow(self.space) - - def stackstart(self): - return constants.BLKCTX_STACK_START - - def tempsize(self): - # A blockcontext doesn't have any temps - return 0 - - def w_receiver(self): - return self.s_home().w_receiver() - - def w_method(self): - retval = self.s_home().w_method() - assert isinstance(retval, model.W_CompiledMethod) - return retval - - def is_closure_context(self): - return True - - def is_BlockClosure_ensure(self): - return False - - def home_is_self(self): - return False - - # === Temporary variables === - - def gettemp(self, index): - return self.s_home().gettemp(index) - - def settemp(self, index, w_value): - self.s_home().settemp(index, w_value) - - # === Accessing object fields === - - def fetch(self, n0): - if n0 == constants.BLKCTX_HOME_INDEX: - return self._w_home - if n0 == constants.BLKCTX_INITIAL_IP_INDEX: - return self.wrap_initialip() - if n0 == constants.BLKCTX_BLOCK_ARGUMENT_COUNT_INDEX: - return self.wrap_eargc() - else: - return ContextPartShadow.fetch(self, n0) - - def store(self, n0, w_value): - if n0 == constants.BLKCTX_HOME_INDEX: - return self.store_w_home(w_value) - if n0 == constants.BLKCTX_INITIAL_IP_INDEX: - return self.unwrap_store_initialip(w_value) - if n0 == constants.BLKCTX_BLOCK_ARGUMENT_COUNT_INDEX: - return self.unwrap_store_eargc(w_value) - else: - return ContextPartShadow.store(self, n0, w_value) - - def store_w_home(self, w_home): - assert isinstance(w_home, model.W_PointersObject) - self._w_home = w_home - - def unwrap_store_initialip(self, w_value): - initialip = self.space.unwrap_int(w_value) - initialip -= 1 + self.w_method().literalsize - self.store_initialip(initialip) - - def store_initialip(self, initialip): - self._initialip = initialip - - def wrap_initialip(self): - initialip = self.initialip() - initialip += 1 + self.w_method().literalsize - return self.space.wrap_int(initialip) - - def reset_pc(self): - self.store_pc(self.initialip()) - - def initialip(self): - return self._initialip - - def unwrap_store_eargc(self, w_value): - self.store_expected_argument_count(self.space.unwrap_int(w_value)) - - def wrap_eargc(self): - return self.space.wrap_int(self.expected_argument_count()) - - def expected_argument_count(self): - return self._eargc - - def store_expected_argument_count(self, argc): - self._eargc = argc - - # === Stack Manipulation === - - def reset_stack(self): - self.pop_n(self.stackdepth()) - - # === Printing === - - def w_arguments(self): - return [] - - def method_str(self): - return '[] in %s' % self.w_method().get_identifier_string() - -class MethodContextShadow(ContextPartShadow): - _attrs_ = ['closure', '_w_receiver', '_w_method', '_is_BlockClosure_ensure'] - repr_classname = "MethodContextShadow" - - # === Initialization === - - @jit.unroll_safe - def __init__(self, space, w_self=None, size=0, w_method=None, w_receiver=None, - arguments=[], closure=None, pc=0): - self = jit.hint(self, access_directly=True, fresh_virtualizable=True) - ContextPartShadow.__init__(self, space, w_self, size) - self.store_w_receiver(w_receiver) - self.store_pc(pc) - self.closure = closure - - if w_method: - self.store_w_method(w_method) - # The summand is needed, because we calculate i.a. our stackdepth relative of the size of w_self. - size = w_method.compute_frame_size() + self.space.w_MethodContext.as_class_get_shadow(self.space).instsize() - self._w_self_size = size - self.init_stack_and_temps() - else: - self._w_method = None - self._is_BlockClosure_ensure = False - - argc = len(arguments) - for i0 in range(argc): - self.settemp(i0, arguments[i0]) - - if closure: - for i0 in range(closure.size()): - self.settemp(i0+argc, closure.at0(i0)) - - def fields_to_copy_first(self): - return [ constants.MTHDCTX_METHOD, constants.MTHDCTX_CLOSURE_OR_NIL ] - - # === Accessing object fields === - - def fetch(self, n0): - if n0 == constants.MTHDCTX_METHOD: - return self.w_method() - if n0 == constants.MTHDCTX_CLOSURE_OR_NIL: - if self.closure: - return self.closure._w_self - else: - return self.space.w_nil - if n0 == constants.MTHDCTX_RECEIVER: - return self.w_receiver() - temp_i = n0-constants.MTHDCTX_TEMP_FRAME_START - if (0 <= temp_i < self.tempsize()): - return self.gettemp(temp_i) - else: - return ContextPartShadow.fetch(self, n0) - - def store(self, n0, w_value): - if n0 == constants.MTHDCTX_METHOD: - return self.store_w_method(w_value) - if n0 == constants.MTHDCTX_CLOSURE_OR_NIL: - if w_value.is_nil(self.space): - self.closure = None - else: - self.closure = wrapper.BlockClosureWrapper(self.space, w_value) - return - if n0 == constants.MTHDCTX_RECEIVER: - self.store_w_receiver(w_value) - return - temp_i = n0-constants.MTHDCTX_TEMP_FRAME_START - if (0 <= temp_i < self.tempsize()): - return self.settemp(temp_i, w_value) - else: - return ContextPartShadow.store(self, n0, w_value) - - def store_w_receiver(self, w_receiver): - self._w_receiver = w_receiver - - # === Implemented Accessors === - - def s_home(self): - if self.is_closure_context(): - # this is a context for a blockClosure - w_outerContext = self.closure.outerContext() - assert isinstance(w_outerContext, model.W_PointersObject) - s_outerContext = w_outerContext.as_context_get_shadow(self.space) - # XXX check whether we can actually return from that context - if s_outerContext.is_returned(): - raise error.BlockCannotReturnError() - return s_outerContext.s_home() - else: - return self - - def stackstart(self): - return constants.MTHDCTX_TEMP_FRAME_START - - def store_w_method(self, w_method): - assert isinstance(w_method, model.W_CompiledMethod) - self._w_method = w_method - if w_method: - # Primitive 198 is used in BlockClosure >> ensure: - self._is_BlockClosure_ensure = (w_method.primitive() == 198) - - def w_receiver(self): - return self._w_receiver - - def w_method(self): - retval = self._w_method - assert isinstance(retval, model.W_CompiledMethod) - return retval - - def tempsize(self): - if not self.is_closure_context(): - return self.w_method().tempsize() - else: - return self.closure.tempsize() - - def is_closure_context(self): - return self.closure is not None - - def is_BlockClosure_ensure(self): - return self._is_BlockClosure_ensure - - def home_is_self(self): - return not self.is_closure_context() - - # ______________________________________________________________________ - # Marriage of MethodContextShadows with PointerObjects only when required - - def w_self(self): - if self._w_self is not None: - return self._w_self - else: - s_MethodContext = self.space.w_MethodContext.as_class_get_shadow(self.space) - size = self.size() - s_MethodContext.instsize() - space = self.space - w_self = s_MethodContext.new(size) - assert isinstance(w_self, model.W_PointersObject) - w_self.store_shadow(self) - self._w_self = w_self - self._w_self_size = w_self.size() - return w_self - - # === Temporary variables === - - def gettemp(self, index0): - return self.stack_get(index0) - - def settemp(self, index0, w_value): - self.stack_put(index0, w_value) - - # === Printing === - - def w_arguments(self): - argcount = self.w_method().argsize - return [ self.stack_get(i) for i in range(argcount) ] - - def method_str(self): - block = '[] in ' if self.is_closure_context() else '' - return '%s%s' % (block, self.w_method().get_identifier_string()) - -class CachedObjectShadow(AbstractCachingShadow): - repr_classname = "CachedObjectShadow" - - @elidable_for_version - def fetch(self, n0): - return AbstractCachingShadow.fetch(self, n0) - - def store(self, n0, w_value): - AbstractCachingShadow.store(self, n0, w_value) - self.changed() - -class ObserveeShadow(ListStorageShadow): - _attrs_ = ['dependent'] - repr_classname = "ObserveeShadow" - def __init__(self, space, w_self, size): - ListStorageShadow.__init__(self, space, w_self, size) - self.dependent = None - - def store(self, n0, w_value): - ListStorageShadow.store(self, n0, w_value) - if self.dependent: - self.dependent.update() - - def notify(self, dependent): - if self.dependent is not None and dependent is not self.dependent: - raise RuntimeError('Meant to be observed by only one value, so far') - self.dependent = dependent diff --git a/spyvm/storage.py b/spyvm/storage.py new file mode 100644 --- /dev/null +++ b/spyvm/storage.py @@ -0,0 +1,319 @@ + +import sys, weakref +from spyvm import model, version, constants +from spyvm.version import elidable_for_version +from rpython.rlib import objectmodel, jit +from rpython.rlib.objectmodel import import_from_mixin + +class AbstractShadow(object): + """A shadow is an optional extra bit of information that + can be attached at run-time to any Smalltalk object. + """ + _attrs_ = ['_w_self', 'space'] + _immutable_fields_ = ['space'] + provides_getname = False + repr_classname = "AbstractShadow" + + def __init__(self, space, w_self, size): + self.space = space + assert w_self is None or isinstance(w_self, model.W_PointersObject) + self._w_self = w_self + def w_self(self): + return self._w_self + def getname(self): + raise NotImplementedError("Abstract class") + def __repr__(self): + if self.provides_getname: + return "<%s %s>" % (self.repr_classname, self.getname()) + else: + return "<%s>" % self.repr_classname + + def fetch(self, n0): + raise NotImplementedError("Abstract class") + def store(self, n0, w_value): + raise NotImplementedError("Abstract class") + def size(self): + raise NotImplementedError("Abstract class") + + # This will invoke an appropriate copy_from_* method. + # Overwriting this allows optimized transitions between certain storage types. + def copy_into(self, other_shadow): + other_shadow.copy_from(self) + + def attach_shadow(self): pass + + def copy_field_from(self, n0, other_shadow): + self.store(n0, other_shadow.fetch(n0)) + + def copy_from(self, other_shadow): + assert self.size() == other_shadow.size() + for i in range(self.size()): + self.copy_field_from(i, other_shadow) + + def copy_from_AllNil(self, all_nil_storage): + self.copy_from(all_nil_storage) + def copy_from_SmallIntegerOrNil(self, small_int_storage): + self.copy_from(small_int_storage) + def copy_from_FloatOrNil(self, float_storage): + self.copy_from(float_storage) + +# ========== Storage classes implementing storage strategies ========== + +class AbstractStorageShadow(AbstractShadow): + _attrs_ = [] + repr_classname = "AbstractStorageShadow" + + def store(self, n0, w_val): + if self.can_contain(w_val): + return self.do_store(n0, w_val) + new_storage = self.generalized_strategy_for(w_val) + return self._w_self.store_with_new_storage(new_storage, n0, w_val) + def can_contain(self, w_val): + return self.static_can_contain(self.space, w_val) + @staticmethod + def static_can_contain(space, w_val): + raise NotImplementedError() + def do_store(self, n0, w_val): + raise NotImplementedError() + def generalized_strategy_for(self, w_val): + raise NotImplementedError() + + def copy_from_AllNil(self, all_nil_storage): + pass # Already initialized + def copy_from(self, other_shadow): + assert self.size() == other_shadow.size() + for i in range(self.size()): + w_val = other_shadow.fetch(i) + if not w_val.is_nil(self.space): # nil fields already initialized + self.store(i, w_val) + +class AllNilStorageShadow(AbstractStorageShadow): + repr_classname = "AllNilStorageShadow" + _attrs_ = ['_size'] + _immutable_fields_ = ['_size'] + def __init__(self, space, w_self, size): + AbstractStorageShadow.__init__(self, space, w_self, size) + self._size = size + def fetch(self, n0): + if n0 >= self._size: + raise IndexError + return self.space.w_nil + def copy_into(self, other_shadow): + other_shadow.copy_from_AllNil(self) + def do_store(self, n0, w_value): + pass + def size(self): + return self._size + def generalized_strategy_for(self, w_val): + return find_storage_for_objects(self.space, [w_val]) + @staticmethod + def static_can_contain(space, w_val): + return isinstance(w_val, model.W_Object) and w_val.is_nil(space) + +class AbstractValueOrNilStorageMixin(object): + # Class must provide: wrap, unwrap, nil_value, is_nil_value, wrapper_class + _attrs_ = ['storage'] + _immutable_fields_ = ['storage'] + + def __init__(self, space, w_self, size): + AbstractStorageShadow.__init__(self, space, w_self, size) + self.storage = [self.nil_value] * size + + def size(self): + return len(self.storage) + + def generalized_strategy_for(self, w_val): + return ListStorageShadow + + def fetch(self, n0): + val = self.storage[n0] + if self.is_nil_value(val): + return self.space.w_nil + else: + return self.wrap(self.space, val) + + def do_store(self, n0, w_val): + if w_val.is_nil(self.space): + self.storage[n0] = self.nil_value + else: + self.storage[n0] = self.unwrap(self.space, w_val) + +# This is to avoid code duplication + at objectmodel.specialize.arg(0) +def _value_or_nil_can_handle(cls, space, w_val): + return isinstance(w_val, model.W_Object) and w_val.is_nil(space) or \ + (isinstance(w_val, cls.wrapper_class) \ + and not cls.is_nil_value(cls.unwrap(space, w_val))) + +class SmallIntegerOrNilStorageShadow(AbstractStorageShadow): + repr_classname = "SmallIntegerOrNilStorageShadow" + nil_value = constants.MAXINT + wrapper_class = model.W_SmallInteger + import_from_mixin(AbstractValueOrNilStorageMixin) + + @staticmethod + def static_can_contain(space, w_val): + return _value_or_nil_can_handle(SmallIntegerOrNilStorageShadow, space, w_val) + @staticmethod + def is_nil_value(val): + return val == SmallIntegerOrNilStorageShadow.nil_value + @staticmethod + def wrap(space, val): + return space.wrap_int(val) + @staticmethod + def unwrap(space, w_val): + return space.unwrap_int(w_val) + def copy_into(self, other_shadow): + other_shadow.copy_from_SmallIntegerOrNil(self) + +class FloatOrNilStorageShadow(AbstractStorageShadow): + repr_classname = "FloatOrNilStorageShadow" + nil_value = sys.float_info.max + wrapper_class = model.W_Float + import_from_mixin(AbstractValueOrNilStorageMixin) + + @staticmethod + def static_can_contain(space, w_val): + return _value_or_nil_can_handle(FloatOrNilStorageShadow, space, w_val) + @staticmethod + def is_nil_value(val): + return val == FloatOrNilStorageShadow.nil_value + @staticmethod + def wrap(space, val): + return space.wrap_float(val) + @staticmethod + def unwrap(space, w_val): + return space.unwrap_float(w_val) + def copy_into(self, other_shadow): + other_shadow.copy_from_FloatOrNil(self) + +def empty_storage(space, w_self, size, weak=False): + if weak: + return WeakListStorageShadow(space, w_self, size) + if space.no_specialized_storage.is_set(): + return ListStorageShadow(space, w_self, size) + return AllNilStorageShadow(space, w_self, size) + + at jit.unroll_safe +def find_storage_for_objects(space, vars, weak=False): + if weak: + return WeakListStorageShadow + if space.no_specialized_storage.is_set(): + return ListStorageShadow + specialized_strategies = 3 + all_nil_can_handle = True + small_int_can_handle = True + float_can_handle = True + for w_obj in vars: + if all_nil_can_handle and not AllNilStorageShadow.static_can_contain(space, w_obj): + all_nil_can_handle = False + specialized_strategies = specialized_strategies - 1 + if small_int_can_handle and not SmallIntegerOrNilStorageShadow.static_can_contain(space, w_obj): + small_int_can_handle = False + specialized_strategies = specialized_strategies - 1 + if float_can_handle and not FloatOrNilStorageShadow.static_can_contain(space, w_obj): + float_can_handle = False + specialized_strategies = specialized_strategies - 1 + + if specialized_strategies <= 0: + return ListStorageShadow + + if all_nil_can_handle: + return AllNilStorageShadow + if small_int_can_handle: + return SmallIntegerOrNilStorageShadow + if float_can_handle: + return FloatOrNilStorageShadow + + # If this happens, please look for a bug in the code above. + assert False, "No strategy could be found for list..." + +class ListStorageMixin(object): + def __init__(self, space, w_self, size): + AbstractStorageShadow.__init__(self, space, w_self, size) + self.initialize_storage(size) + def size(self): + return len(self.storage) + +class ListStorageShadow(AbstractStorageShadow): + _attrs_ = ['storage'] + _immutable_fields_ = ['storage'] + repr_classname = "ListStorageShadow" + import_from_mixin(ListStorageMixin) + + def initialize_storage(self, size): + self.storage = [self.space.w_nil] * size + def fetch(self, n0): + return self.storage[n0] + def store(self, n0, w_value): + self.storage[n0] = w_value + +class WeakListStorageShadow(AbstractStorageShadow): + _attrs_ = ['storage'] + _immutable_fields_ = ['storage'] + repr_classname = "WeakListStorageShadow" + import_from_mixin(ListStorageMixin) + + def initialize_storage(self, size): + self.storage = [weakref.ref(self.space.w_nil)] * size + def fetch(self, n0): + weakobj = self.storage[n0] + return weakobj() or self.space.w_nil + def store(self, n0, w_value): + assert w_value is not None + self.storage[n0] = weakref.ref(w_value) + +# ========== Other storage classes, non-strategies ========== + +class AbstractRedirectingShadow(AbstractShadow): + _attrs_ = ['_w_self_size'] + repr_classname = "AbstractRedirectingShadow" + + def __init__(self, space, w_self, size): + if w_self is not None: + self._w_self_size = w_self.size() + else: + self._w_self_size = size + AbstractShadow.__init__(self, space, w_self, self._w_self_size) + + def size(self): + return self._w_self_size + +class AbstractCachingShadow(ListStorageShadow): + _immutable_fields_ = ['version?'] + _attrs_ = ['version'] + repr_classname = "AbstractCachingShadow" + import_from_mixin(version.VersionMixin) + version = None + + def __init__(self, space, w_self, size): + ListStorageShadow.__init__(self, space, w_self, size) + self.changed() + +class CachedObjectShadow(AbstractCachingShadow): + repr_classname = "CachedObjectShadow" + + @elidable_for_version + def fetch(self, n0): + return AbstractCachingShadow.fetch(self, n0) + + def store(self, n0, w_value): + AbstractCachingShadow.store(self, n0, w_value) + self.changed() + +class ObserveeShadow(ListStorageShadow): + _attrs_ = ['dependent'] + repr_classname = "ObserveeShadow" + def __init__(self, space, w_self, size): + ListStorageShadow.__init__(self, space, w_self, size) + self.dependent = None + + def store(self, n0, w_value): + ListStorageShadow.store(self, n0, w_value) + if self.dependent: + self.dependent.update() + + def notify(self, dependent): + if self.dependent is not None and dependent is not self.dependent: + raise RuntimeError('Meant to be observed by only one value, so far') + self.dependent = dependent diff --git a/spyvm/storage_classes.py b/spyvm/storage_classes.py new file mode 100644 --- /dev/null +++ b/spyvm/storage_classes.py @@ -0,0 +1,346 @@ + +from spyvm import model, constants, error, wrapper, version +from spyvm.storage import AbstractCachingShadow, ListStorageShadow +from spyvm.version import constant_for_version, constant_for_version_arg +from rpython.rlib import jit + +POINTERS = 0 +BYTES = 1 +WORDS = 2 +WEAK_POINTERS = 3 +COMPILED_METHOD = 4 +FLOAT = 5 +LARGE_POSITIVE_INTEGER = 6 + +class ClassShadowError(error.SmalltalkException): + exception_type = "ClassShadowError" + +class ClassShadow(AbstractCachingShadow): + """A shadow for Smalltalk objects that are classes + (i.e. used as the class of another Smalltalk object). + """ + + _attrs_ = ["name", "_instance_size", "instance_varsized", "instance_kind", + "_s_methoddict", "_s_superclass", "subclass_s"] + name = '??? (incomplete class info)' + _s_superclass = _s_methoddict = None + provides_getname = True + repr_classname = "ClassShadow" + + def __init__(self, space, w_self, size): + self.subclass_s = {} + AbstractCachingShadow.__init__(self, space, w_self, size) + + def store(self, n0, w_val): + AbstractCachingShadow.store(self, n0, w_val) + if n0 == constants.CLASS_SUPERCLASS_INDEX: + self.store_w_superclass(w_val) + elif n0 == constants.CLASS_METHODDICT_INDEX: + self.store_w_methoddict(w_val) + elif n0 == constants.CLASS_FORMAT_INDEX: + # read and painfully decode the format + assert isinstance(w_val, model.W_SmallInteger) + classformat = self.space.unwrap_int(w_val) + # The classformat in Squeak, as an integer value, is: + # <2 bits=instSize//64><5 bits=cClass><4 bits=instSpec> + # <6 bits=instSize\\64><1 bit=0> + # In Slang the value is read directly as a boxed integer, so that + # the code gets a "pointer" whose bits are set as above, but + # shifted one bit to the left and with the lowest bit set to 1. + + # Compute the instance size (really the size, not the number of bytes) + instsize_lo = (classformat >> 1) & 0x3F + instsize_hi = (classformat >> (9 + 1)) & 0xC0 + self._instance_size = (instsize_lo | instsize_hi) - 1 # subtract hdr + # decode the instSpec + format = (classformat >> 7) & 15 + self.instance_varsized = format >= 2 + + # In case of raised exception below. + self.changed() + + if format < 4: + self.instance_kind = POINTERS + elif format == 4: + self.instance_kind = WEAK_POINTERS + elif format == 6: + if self.space.w_Float.is_same_object(self.w_self()): + self.instance_kind = FLOAT + else: + self.instance_kind = WORDS + if self.instsize() != 0: + raise ClassShadowError("can't have both words and a non-zero " + "base instance size") + elif 8 <= format <= 11: + if self.space.w_LargePositiveInteger.is_same_object(self.w_self()): + self.instance_kind = LARGE_POSITIVE_INTEGER + else: + self.instance_kind = BYTES + if self.instsize() != 0: + raise ClassShadowError("can't have both bytes and a non-zero " + "base instance size") + elif 12 <= format <= 15: + self.instance_kind = COMPILED_METHOD + else: + raise ClassShadowError("unknown format %d" % (format,)) + else: + if self._w_self.w_class == self.space.classtable["w_Metaclass"]: + # In case of Metaclasses, the "instance" class is stored in the last field. + if n0 == self.size() - 1 and isinstance(w_val, model.W_PointersObject): + cl_shadow = w_val.as_class_get_shadow(self.space) + self.name = "%s class" % cl_shadow.getname() + else: + return + elif n0 == constants.CLASS_NAME_INDEX: + # In case of regular classes, the name is stored here. + self.store_w_name(w_val) + else: + return + # Some of the special info has changed -> Switch version. + self.changed() + + def store_w_superclass(self, w_class): + superclass = self._s_superclass + if w_class is None or w_class.is_nil(self.space): + if superclass: superclass.detach_s_class(self) + self._s_superclass = None + else: + assert isinstance(w_class, model.W_PointersObject) + s_new_superclass = w_class.as_class_get_shadow(self.space) + if superclass is s_new_superclass: + return + if superclass: superclass.detach_s_class(self) + self._s_superclass = s_new_superclass + s_new_superclass.attach_s_class(self) + + def store_w_methoddict(self, w_methoddict): + methoddict = self._s_methoddict + if w_methoddict is None or w_methoddict.is_nil(self.space): + if methoddict: methoddict.s_class = None + self._s_methoddict = None + else: + assert isinstance(w_methoddict, model.W_PointersObject) + s_new_methoddict = w_methoddict.as_methoddict_get_shadow(self.space) + if methoddict is s_new_methoddict: + return + if methoddict: methoddict.s_class = None + self.store_s_methoddict(s_new_methoddict) + + def store_s_methoddict(self, s_methoddict): + s_methoddict.s_class = self + s_methoddict.sync_method_cache() + self._s_methoddict = s_methoddict + + def attach_s_class(self, s_other): + self.subclass_s[s_other] = None + + def detach_s_class(self, s_other): + del self.subclass_s[s_other] + + def store_w_name(self, w_name): + if isinstance(w_name, model.W_BytesObject): + self.name = w_name.as_string() + else: + self.name = None + + @jit.unroll_safe + def flush_method_caches(self): + look_in_shadow = self + while look_in_shadow is not None: + look_in_shadow.s_methoddict().flush_method_cache() + look_in_shadow = look_in_shadow._s_superclass + + def new(self, extrasize=0): + w_cls = self.w_self() + if self.instance_kind == POINTERS: + size = self.instsize() + extrasize + w_new = model.W_PointersObject(self.space, w_cls, size) From noreply at buildbot.pypy.org Tue Aug 5 18:06:40 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Tue, 5 Aug 2014 18:06:40 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage: Refactored creation of context objects. Message-ID: <20140805160640.64D0E1D37DC@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage Changeset: r1010:d007ca0a7137 Date: 2014-08-05 17:54 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/d007ca0a7137/ Log: Refactored creation of context objects. Created build() methods, clean up default constructor. This ensures that certain fields are only initialized once. diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -278,7 +278,7 @@ assert len(w_arguments) <= 7 w_method.setbytes([chr(131), chr(len(w_arguments) << 5 + 0), chr(124)]) #returnTopFromMethodBytecode w_method.set_lookup_class_and_name(w_receiver.getclass(self.space), "Interpreter.perform") - s_frame = MethodContextShadow(self.space, w_method=w_method, w_receiver=w_receiver) + s_frame = MethodContextShadow.build(self.space, w_method, w_receiver) s_frame.push(w_receiver) s_frame.push_all(list(w_arguments)) return s_frame diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -1269,7 +1269,7 @@ def create_frame(self, space, receiver, arguments=[]): from spyvm.storage_contexts import MethodContextShadow assert len(arguments) == self.argsize - return MethodContextShadow(space, w_method=self, w_receiver=receiver, arguments=arguments) + return MethodContextShadow.build(space, self, receiver, arguments) # === Printing === diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -1287,12 +1287,12 @@ # context of the receiver is used for the new BlockContext. # Note that in our impl, MethodContext.w_home == self w_context = assert_pointers(w_context) - w_method_context = w_context.as_context_get_shadow(interp.space).s_home().w_self() + s_method_context = w_context.as_context_get_shadow(interp.space).s_home() # The block bytecodes are stored inline: so we skip past the - # byteodes to invoke this primitive to find them (hence +2) + # bytecodes to invoke this primitive to get to them. initialip = s_frame.pc() + 2 - s_new_context = storage_contexts.BlockContextShadow(interp.space, None, 0, w_method_context, argcnt, initialip) + s_new_context = storage_contexts.BlockContextShadow.build(interp.space, s_method_context, argcnt, initialip) return s_new_context.w_self() @expose_primitive(VALUE, result_is_new_frame=True) diff --git a/spyvm/storage_contexts.py b/spyvm/storage_contexts.py --- a/spyvm/storage_contexts.py +++ b/spyvm/storage_contexts.py @@ -2,8 +2,11 @@ from spyvm import model, constants, error, wrapper from spyvm.storage import AbstractRedirectingShadow from rpython.tool.pairtype import extendabletype -from rpython.rlib import rarithmetic, jit -from rpython.rlib.debug import make_sure_not_resized +from rpython.rlib import rarithmetic, jit, objectmodel + + at objectmodel.specialize.call_location() +def fresh_virtualizable(x): + return jit.hint(x, access_directly=True, fresh_virtualizable=True) class ContextState(object): def __init__(self, name): @@ -33,11 +36,12 @@ # ______________________________________________________________________ # Initialization - def __init__(self, space, w_self, size=0): + def __init__(self, space, w_self, size): self._s_sender = None AbstractRedirectingShadow.__init__(self, space, w_self, size) self.instances_w = {} self.state = InactiveContext + self.store_pc(0) def copy_from(self, other_shadow): # Some fields have to be initialized before the rest, to ensure correct initialization. @@ -237,15 +241,15 @@ @jit.unroll_safe def init_stack_and_temps(self): + self = fresh_virtualizable(self) stacksize = self.stackend() - self.stackstart() tempsize = self.tempsize() temps_and_stack = [None] * (stacksize + tempsize) self._temps_and_stack = temps_and_stack - make_sure_not_resized(temps_and_stack) for i in range(tempsize): temps_and_stack[i] = self.space.w_nil self._stack_ptr = rarithmetic.r_uint(tempsize) # we point after the last element - + def stack_get(self, index0): return self._temps_and_stack[index0] @@ -369,24 +373,29 @@ repr_classname = "BlockContextShadow" # === Initialization === - - def __init__(self, space, w_self=None, size=0, w_home=None, argcnt=0, initialip=0): - self = jit.hint(self, access_directly=True, fresh_virtualizable=True) - creating_w_self = w_self is None - if creating_w_self: - s_home = w_home.as_methodcontext_get_shadow(space) - contextsize = s_home.size() - s_home.tempsize() - w_self = model.W_PointersObject(space, space.w_BlockContext, contextsize) + + @staticmethod + def build(space, s_home, argcnt, pc): + size = s_home.size() - s_home.tempsize() + w_self = model.W_PointersObject(space, space.w_BlockContext, size) + + ctx = BlockContextShadow(space, w_self, size) + ctx.store_expected_argument_count(argcnt) + ctx.store_w_home(s_home.w_self()) + ctx.store_initialip(pc) + ctx.store_pc(pc) + + w_self.store_shadow(ctx) + ctx.init_stack_and_temps() + return ctx + + def __init__(self, space, w_self, size): + self = fresh_virtualizable(self) ContextPartShadow.__init__(self, space, w_self, size) - if creating_w_self: - w_self.store_shadow(self) - self.store_expected_argument_count(argcnt) - self.store_initialip(initialip) - if w_home: - self.store_w_home(w_home) - self.store_pc(initialip) - self.init_stack_and_temps() - + self._w_home = None + self._initialip = 0 + self._eargc = 0 + def fields_to_copy_first(self): return [ constants.BLKCTX_HOME_INDEX ] @@ -502,37 +511,43 @@ repr_classname = "MethodContextShadow" # === Initialization === - + + @staticmethod + def build(space, w_method, w_receiver, arguments=[], closure=None): + s_MethodContext = space.w_MethodContext.as_class_get_shadow(space) + size = w_method.compute_frame_size() + s_MethodContext.instsize() + + ctx = MethodContextShadow(space, None, size) + ctx.store_w_receiver(w_receiver) + ctx.store_w_method(w_method) + ctx.closure = closure + ctx.init_stack_and_temps() + ctx.initialize_temps(arguments) + return ctx + + def __init__(self, space, w_self, size): + self = fresh_virtualizable(self) + ContextPartShadow.__init__(self, space, w_self, size) + self.closure = None + self._w_method = None + self._w_receiver = None + self._is_BlockClosure_ensure = False + + def fields_to_copy_first(self): + return [ constants.MTHDCTX_METHOD, constants.MTHDCTX_CLOSURE_OR_NIL ] + @jit.unroll_safe - def __init__(self, space, w_self=None, size=0, w_method=None, w_receiver=None, - arguments=[], closure=None, pc=0): - self = jit.hint(self, access_directly=True, fresh_virtualizable=True) - ContextPartShadow.__init__(self, space, w_self, size) - self.store_w_receiver(w_receiver) - self.store_pc(pc) - self.closure = closure - - if w_method: - self.store_w_method(w_method) - # The summand is needed, because we calculate i.a. our stackdepth relative of the size of w_self. - size = w_method.compute_frame_size() + self.space.w_MethodContext.as_class_get_shadow(self.space).instsize() - self._w_self_size = size - self.init_stack_and_temps() - else: - self._w_method = None - self._is_BlockClosure_ensure = False - + def initialize_temps(self, arguments): argc = len(arguments) for i0 in range(argc): self.settemp(i0, arguments[i0]) - + closure = self.closure if closure: + pc = closure.startpc() - self.w_method().bytecodeoffset() - 1 + self.store_pc(pc) for i0 in range(closure.size()): self.settemp(i0+argc, closure.at0(i0)) - - def fields_to_copy_first(self): - return [ constants.MTHDCTX_METHOD, constants.MTHDCTX_CLOSURE_OR_NIL ] - + # === Accessing object fields === def fetch(self, n0): @@ -593,9 +608,8 @@ def store_w_method(self, w_method): assert isinstance(w_method, model.W_CompiledMethod) self._w_method = w_method - if w_method: - # Primitive 198 is used in BlockClosure >> ensure: - self._is_BlockClosure_ensure = (w_method.primitive() == 198) + # Primitive 198 is a marker used in BlockClosure >> ensure: + self._is_BlockClosure_ensure = (w_method.primitive() == 198) def w_receiver(self): return self._w_receiver @@ -627,14 +641,10 @@ if self._w_self is not None: return self._w_self else: - s_MethodContext = self.space.w_MethodContext.as_class_get_shadow(self.space) - size = self.size() - s_MethodContext.instsize() space = self.space - w_self = s_MethodContext.new(size) - assert isinstance(w_self, model.W_PointersObject) + w_self = model.W_PointersObject(space, space.w_MethodContext, self.size()) w_self.store_shadow(self) self._w_self = w_self - self._w_self_size = w_self.size() return w_self # === Temporary variables === diff --git a/spyvm/test/jit.py b/spyvm/test/jit.py --- a/spyvm/test/jit.py +++ b/spyvm/test/jit.py @@ -49,7 +49,7 @@ w_method.literals = literals w_method.setbytes(bytes) w_receiver = stack[0] - s_frame = storage_contexts.MethodContextShadow(space, w_method=w_method, w_receiver=w_receiver) + s_frame = storage_contexts.MethodContextShadow.build(space, w_method, w_receiver) w_frame = s_frame.w_self() def interp_execute_frame(): return interp.interpret_toplevel(w_frame) diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -31,7 +31,7 @@ def as_blockcontext_get_shadow(self, space): if not isinstance(self.shadow, storage_contexts.BlockContextShadow): - self.shadow = storage_contexts.BlockContextShadow(space, self) + self.shadow = storage_contexts.BlockContextShadow(space, self, self.size()) return self.shadow IMAGENAME = "anImage.image" diff --git a/spyvm/wrapper.py b/spyvm/wrapper.py --- a/spyvm/wrapper.py +++ b/spyvm/wrapper.py @@ -248,9 +248,7 @@ s_outerContext = w_outerContext.as_context_get_shadow(self.space) w_method = s_outerContext.w_method() w_receiver = s_outerContext.w_receiver() - pc = self.startpc() - w_method.bytecodeoffset() - 1 - return storage_contexts.MethodContextShadow(self.space, w_method=w_method, w_receiver=w_receiver, - arguments=arguments, closure=self, pc=pc) + return storage_contexts.MethodContextShadow.build(self.space, w_method, w_receiver, arguments, self) def tempsize(self): # We ignore the number of temps a block has, because the first From noreply at buildbot.pypy.org Tue Aug 5 20:25:05 2014 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 5 Aug 2014 20:25:05 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix raw-unicode-escape codec Message-ID: <20140805182505.1360F1C05B7@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72700:a70bb8e0da3c Date: 2014-08-05 13:24 -0500 http://bitbucket.org/pypy/pypy/changeset/a70bb8e0da3c/ Log: Fix raw-unicode-escape codec diff --git a/pypy/interpreter/test/test_utf8_codecs.py b/pypy/interpreter/test/test_utf8_codecs.py --- a/pypy/interpreter/test/test_utf8_codecs.py +++ b/pypy/interpreter/test/test_utf8_codecs.py @@ -740,6 +740,12 @@ Utf8Str.from_unicode(u' 12, \u1234 '), 7, None) assert encoder(Utf8Str.from_unicode(u'u\u1234'), 2, 'replace') == 'u?' + def test_decode_raw_unicode_escape(self): + decoder = self.getdecoder('raw-unicode-escape') + s = '\xffab\x80\n' + u = Utf8Str.from_unicode(unicode(s, 'raw-unicode-escape')) + assert decoder(s, len(s), 'strict')[0] == u + class TestTranslation(object): def test_utf8(self): diff --git a/pypy/interpreter/utf8_codecs.py b/pypy/interpreter/utf8_codecs.py --- a/pypy/interpreter/utf8_codecs.py +++ b/pypy/interpreter/utf8_codecs.py @@ -288,7 +288,7 @@ # Non-escape characters are interpreted as Unicode ordinals if ch != '\\': - result.append(ch) + result.append(ord(ch)) pos += 1 continue @@ -310,7 +310,7 @@ pos >= size or (s[pos] != 'u' and s[pos] != 'U')): result.append('\\') - result.append(s[pos]) + result.append(ord(s[pos])) pos += 1 continue From noreply at buildbot.pypy.org Tue Aug 5 20:25:03 2014 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 5 Aug 2014 20:25:03 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: merge default Message-ID: <20140805182503.B80C91C05B7@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72699:033dc5f49479 Date: 2014-08-05 11:54 -0500 http://bitbucket.org/pypy/pypy/changeset/033dc5f49479/ Log: merge default diff too long, truncating to 2000 out of 11128 lines diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -389,12 +389,13 @@ func.__name__ = name_or_ordinal return func -class PyDLL(CDLL): - """This class represents the Python library itself. It allows to - access Python API functions. The GIL is not released, and - Python exceptions are handled correctly. - """ - _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI +# Not in PyPy +#class PyDLL(CDLL): +# """This class represents the Python library itself. It allows to +# access Python API functions. The GIL is not released, and +# Python exceptions are handled correctly. +# """ +# _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI if _os.name in ("nt", "ce"): @@ -447,15 +448,8 @@ return self._dlltype(name) cdll = LibraryLoader(CDLL) -pydll = LibraryLoader(PyDLL) - -if _os.name in ("nt", "ce"): - pythonapi = PyDLL("python dll", None, _sys.dllhandle) -elif _sys.platform == "cygwin": - pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2]) -else: - pythonapi = PyDLL(None) - +# not on PyPy +#pydll = LibraryLoader(PyDLL) if _os.name in ("nt", "ce"): windll = LibraryLoader(WinDLL) diff --git a/lib-python/2.7/ctypes/test/test_values.py b/lib-python/2.7/ctypes/test/test_values.py --- a/lib-python/2.7/ctypes/test/test_values.py +++ b/lib-python/2.7/ctypes/test/test_values.py @@ -4,6 +4,7 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test @@ -23,7 +24,8 @@ class Win_ValuesTestCase(unittest.TestCase): """This test only works when python itself is a dll/shared library""" - + + @xfail def test_optimizeflag(self): # This test accesses the Py_OptimizeFlag intger, which is # exported by the Python dll. @@ -40,6 +42,7 @@ else: self.assertEqual(opt, 2) + @xfail def test_frozentable(self): # Python exports a PyImport_FrozenModules symbol. This is a # pointer to an array of struct _frozen entries. The end of the @@ -75,6 +78,7 @@ from ctypes import _pointer_type_cache del _pointer_type_cache[struct_frozen] + @xfail def test_undefined(self): self.assertRaises(ValueError, c_int.in_dll, pydll, "Undefined_Symbol") diff --git a/lib-python/2.7/test/test_gdbm.py b/lib-python/2.7/test/test_gdbm.py --- a/lib-python/2.7/test/test_gdbm.py +++ b/lib-python/2.7/test/test_gdbm.py @@ -98,6 +98,17 @@ self.assertTrue(key in self.g) self.assertTrue(self.g.has_key(key)) + def test_unicode_key(self): + key = u'ab' + value = u'cd' + self.g = gdbm.open(filename, 'cf') + self.g[key] = value + self.g.close() + self.g = gdbm.open(filename, 'r') + self.assertEquals(self.g[key], value) + self.assertTrue(key in self.g) + self.assertTrue(self.g.has_key(key)) + def test_main(): run_unittest(TestGdbm) diff --git a/lib-python/2.7/timeit.py b/lib-python/2.7/timeit.py --- a/lib-python/2.7/timeit.py +++ b/lib-python/2.7/timeit.py @@ -55,11 +55,6 @@ import gc import sys import time -try: - import itertools -except ImportError: - # Must be an older Python version (see timeit() below) - itertools = None __all__ = ["Timer"] @@ -81,7 +76,8 @@ def inner(_it, _timer): %(setup)s _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 %(stmt)s _t1 = _timer() return _t1 - _t0 @@ -96,7 +92,8 @@ def inner(_it, _timer, _func=func): setup() _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 _func() _t1 = _timer() return _t1 - _t0 @@ -133,9 +130,19 @@ else: raise ValueError("setup is neither a string nor callable") self.src = src # Save for traceback display - code = compile(src, dummy_src_name, "exec") - exec code in globals(), ns - self.inner = ns["inner"] + def make_inner(): + # PyPy tweak: recompile the source code each time before + # calling inner(). There are situations like Issue #1776 + # where PyPy tries to reuse the JIT code from before, + # but that's not going to work: the first thing the + # function does is the "-s" statement, which may declare + # new classes (here a namedtuple). We end up with + # bridges from the inner loop; more and more of them + # every time we call inner(). + code = compile(src, dummy_src_name, "exec") + exec code in globals(), ns + return ns["inner"] + self.make_inner = make_inner elif hasattr(stmt, '__call__'): self.src = None if isinstance(setup, basestring): @@ -144,7 +151,8 @@ exec _setup in globals(), ns elif not hasattr(setup, '__call__'): raise ValueError("setup is neither a string nor callable") - self.inner = _template_func(setup, stmt) + inner = _template_func(setup, stmt) + self.make_inner = lambda: inner else: raise ValueError("stmt is neither a string nor callable") @@ -185,15 +193,12 @@ to one million. The main statement, the setup statement and the timer function to be used are passed to the constructor. """ - if itertools: - it = itertools.repeat(None, number) - else: - it = [None] * number + inner = self.make_inner() gcold = gc.isenabled() if '__pypy__' not in sys.builtin_module_names: gc.disable() # only do that on CPython try: - timing = self.inner(it, self.timer) + timing = inner(number, self.timer) finally: if gcold: gc.enable() diff --git a/lib-python/2.7/xml/sax/saxutils.py b/lib-python/2.7/xml/sax/saxutils.py --- a/lib-python/2.7/xml/sax/saxutils.py +++ b/lib-python/2.7/xml/sax/saxutils.py @@ -98,13 +98,14 @@ except AttributeError: pass # wrap a binary writer with TextIOWrapper - class UnbufferedTextIOWrapper(io.TextIOWrapper): - def write(self, s): - super(UnbufferedTextIOWrapper, self).write(s) - self.flush() - return UnbufferedTextIOWrapper(buffer, encoding=encoding, + return _UnbufferedTextIOWrapper(buffer, encoding=encoding, errors='xmlcharrefreplace', newline='\n') +# PyPy: moved this class outside the function above +class _UnbufferedTextIOWrapper(io.TextIOWrapper): + def write(self, s): + super(_UnbufferedTextIOWrapper, self).write(s) + self.flush() class XMLGenerator(handler.ContentHandler): diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -219,6 +219,8 @@ if restype is None: import ctypes restype = ctypes.c_int + if self._argtypes_ is None: + self._argtypes_ = [] self._ptr = self._getfuncptr_fromaddress(self._argtypes_, restype) self._check_argtypes_for_fastpath() return diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -309,11 +309,9 @@ #endif int _m_ispad(WINDOW *win) { -#if defined WINDOW_HAS_FLAGS + // may not have _flags (and possibly _ISPAD), + // but for now let's assume that always has it return (win->_flags & _ISPAD); -#else - return 0; -#endif } void _m_getsyx(int *yx) { diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -13,7 +13,15 @@ k1 = k1.lstrip('0x').rstrip('L') k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) k2 = k2.lstrip('0').rstrip('L') - output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) + try: + username = os.environ['USER'] #linux, et al + except KeyError: + try: + username = os.environ['USERNAME'] #windows + except KeyError: + username = os.getuid() + output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s_%s%s' % ( + username, k1, k2) if not os.path.exists(output_dir): os.mkdir(output_dir) return output_dir diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.2" -__version_info__ = (0, 8, 2) +__version__ = "0.8.6" +__version_info__ = (0, 8, 6) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -55,8 +55,7 @@ # _cffi_backend.so compiled. import _cffi_backend as backend from . import __version__ - assert (backend.__version__ == __version__ or - backend.__version__ == __version__[:3]) + assert backend.__version__ == __version__ # (If you insist you can also try to pass the option # 'backend=backend_ctypes.CTypesBackend()', but don't # rely on it! It's probably not going to work well.) @@ -443,6 +442,10 @@ for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + for key, val in ffi._parser._int_constants.items(): + if key not in library.__dict__: + library.__dict__[key] = val + copied_enums.append(True) if name in library.__dict__: return diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -24,6 +24,7 @@ _r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]") _r_words = re.compile(r"\w+|\S") _parser_cache = None +_r_int_literal = re.compile(r"^0?x?[0-9a-f]+u?l?$", re.IGNORECASE) def _get_parser(): global _parser_cache @@ -99,6 +100,7 @@ self._structnode2type = weakref.WeakKeyDictionary() self._override = False self._packed = False + self._int_constants = {} def _parse(self, csource): csource, macros = _preprocess(csource) @@ -128,9 +130,10 @@ finally: if lock is not None: lock.release() - return ast, macros + # csource will be used to find buggy source text + return ast, macros, csource - def convert_pycparser_error(self, e, csource): + def _convert_pycparser_error(self, e, csource): # xxx look for ":NUM:" at the start of str(e) and try to interpret # it as a line number line = None @@ -142,6 +145,12 @@ csourcelines = csource.splitlines() if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] + return line + + def convert_pycparser_error(self, e, csource): + line = self._convert_pycparser_error(e, csource) + + msg = str(e) if line: msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: @@ -160,14 +169,9 @@ self._packed = prev_packed def _internal_parse(self, csource): - ast, macros = self._parse(csource) + ast, macros, csource = self._parse(csource) # add the macros - for key, value in macros.items(): - value = value.strip() - if value != '...': - raise api.CDefError('only supports the syntax "#define ' - '%s ..." for now (literally)' % key) - self._declare('macro ' + key, value) + self._process_macros(macros) # find the first "__dotdotdot__" and use that as a separator # between the repeated typedefs and the real csource iterator = iter(ast.ext) @@ -175,27 +179,61 @@ if decl.name == '__dotdotdot__': break # - for decl in iterator: - if isinstance(decl, pycparser.c_ast.Decl): - self._parse_decl(decl) - elif isinstance(decl, pycparser.c_ast.Typedef): - if not decl.name: - raise api.CDefError("typedef does not declare any name", - decl) - if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) - and decl.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_type(decl.name) - elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and - isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and - isinstance(decl.type.type.type, - pycparser.c_ast.IdentifierType) and - decl.type.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_ptr_type(decl.name) + try: + for decl in iterator: + if isinstance(decl, pycparser.c_ast.Decl): + self._parse_decl(decl) + elif isinstance(decl, pycparser.c_ast.Typedef): + if not decl.name: + raise api.CDefError("typedef does not declare any name", + decl) + if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) + and decl.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_type(decl.name) + elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and + isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and + isinstance(decl.type.type.type, + pycparser.c_ast.IdentifierType) and + decl.type.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_ptr_type(decl.name) + else: + realtype = self._get_type(decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) + raise api.CDefError("unrecognized construct", decl) + except api.FFIError as e: + msg = self._convert_pycparser_error(e, csource) + if msg: + e.args = (e.args[0] + "\n *** Err: %s" % msg,) + raise + + def _add_constants(self, key, val): + if key in self._int_constants: + raise api.FFIError( + "multiple declarations of constant: %s" % (key,)) + self._int_constants[key] = val + + def _process_macros(self, macros): + for key, value in macros.items(): + value = value.strip() + match = _r_int_literal.search(value) + if match is not None: + int_str = match.group(0).lower().rstrip("ul") + + # "010" is not valid oct in py3 + if (int_str.startswith("0") and + int_str != "0" and + not int_str.startswith("0x")): + int_str = "0o" + int_str[1:] + + pyvalue = int(int_str, 0) + self._add_constants(key, pyvalue) + elif value == '...': + self._declare('macro ' + key, value) else: - raise api.CDefError("unrecognized construct", decl) + raise api.CDefError('only supports the syntax "#define ' + '%s ..." (literally) or "#define ' + '%s 0x1FF" for now' % (key, key)) def _parse_decl(self, decl): node = decl.type @@ -227,7 +265,7 @@ self._declare('variable ' + decl.name, tp) def parse_type(self, cdecl): - ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) + ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): @@ -306,7 +344,8 @@ if ident == 'void': return model.void_type if ident == '__dotdotdot__': - raise api.FFIError('bad usage of "..."') + raise api.FFIError(':%d: bad usage of "..."' % + typenode.coord.line) return resolve_common_type(ident) # if isinstance(type, pycparser.c_ast.Struct): @@ -333,7 +372,8 @@ return self._get_struct_union_enum_type('union', typenode, name, nested=True) # - raise api.FFIError("bad or unsupported type declaration") + raise api.FFIError(":%d: bad or unsupported type declaration" % + typenode.coord.line) def _parse_function_type(self, typenode, funcname=None): params = list(getattr(typenode.args, 'params', [])) @@ -499,6 +539,10 @@ if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '-'): return -self._parse_constant(exprnode.expr) + # load previously defined int constant + if (isinstance(exprnode, pycparser.c_ast.ID) and + exprnode.name in self._int_constants): + return self._int_constants[exprnode.name] # if partial_length_ok: if (isinstance(exprnode, pycparser.c_ast.ID) and @@ -506,8 +550,8 @@ self._partial_length = True return '...' # - raise api.FFIError("unsupported expression: expected a " - "simple numeric constant") + raise api.FFIError(":%d: unsupported expression: expected a " + "simple numeric constant" % exprnode.coord.line) def _build_enum_type(self, explicit_name, decls): if decls is not None: @@ -522,6 +566,7 @@ if enum.value is not None: nextenumvalue = self._parse_constant(enum.value) enumvalues.append(nextenumvalue) + self._add_constants(enum.name, nextenumvalue) nextenumvalue += 1 enumvalues = tuple(enumvalues) tp = model.EnumType(explicit_name, enumerators, enumvalues) @@ -535,3 +580,5 @@ kind = name.split(' ', 1)[0] if kind in ('typedef', 'struct', 'union', 'enum'): self._declare(name, tp) + for k, v in other._int_constants.items(): + self._add_constants(k, v) diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -38,6 +38,7 @@ import distutils.errors # dist = Distribution({'ext_modules': [ext]}) + dist.parse_config_files() options = dist.get_option_dict('build_ext') options['force'] = ('ffiplatform', True) options['build_lib'] = ('ffiplatform', tmpdir) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -89,43 +89,54 @@ # by generate_cpy_function_method(). prnt('static PyMethodDef _cffi_methods[] = {') self._generate("method") - prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS},') - prnt(' {NULL, NULL} /* Sentinel */') + prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},') + prnt(' {NULL, NULL, 0, NULL} /* Sentinel */') prnt('};') prnt() # # standard init. modname = self.verifier.get_module_name() - if sys.version_info >= (3,): - prnt('static struct PyModuleDef _cffi_module_def = {') - prnt(' PyModuleDef_HEAD_INIT,') - prnt(' "%s",' % modname) - prnt(' NULL,') - prnt(' -1,') - prnt(' _cffi_methods,') - prnt(' NULL, NULL, NULL, NULL') - prnt('};') - prnt() - initname = 'PyInit_%s' % modname - createmod = 'PyModule_Create(&_cffi_module_def)' - errorcase = 'return NULL' - finalreturn = 'return lib' - else: - initname = 'init%s' % modname - createmod = 'Py_InitModule("%s", _cffi_methods)' % modname - errorcase = 'return' - finalreturn = 'return' + constants = self._chained_list_constants[False] + prnt('#if PY_MAJOR_VERSION >= 3') + prnt() + prnt('static struct PyModuleDef _cffi_module_def = {') + prnt(' PyModuleDef_HEAD_INIT,') + prnt(' "%s",' % modname) + prnt(' NULL,') + prnt(' -1,') + prnt(' _cffi_methods,') + prnt(' NULL, NULL, NULL, NULL') + prnt('};') + prnt() prnt('PyMODINIT_FUNC') - prnt('%s(void)' % initname) + prnt('PyInit_%s(void)' % modname) prnt('{') prnt(' PyObject *lib;') - prnt(' lib = %s;' % createmod) - prnt(' if (lib == NULL || %s < 0)' % ( - self._chained_list_constants[False],)) - prnt(' %s;' % errorcase) - prnt(' _cffi_init();') - prnt(' %s;' % finalreturn) + prnt(' lib = PyModule_Create(&_cffi_module_def);') + prnt(' if (lib == NULL)') + prnt(' return NULL;') + prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,)) + prnt(' Py_DECREF(lib);') + prnt(' return NULL;') + prnt(' }') + prnt(' return lib;') prnt('}') + prnt() + prnt('#else') + prnt() + prnt('PyMODINIT_FUNC') + prnt('init%s(void)' % modname) + prnt('{') + prnt(' PyObject *lib;') + prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname) + prnt(' if (lib == NULL)') + prnt(' return;') + prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,)) + prnt(' return;') + prnt(' return;') + prnt('}') + prnt() + prnt('#endif') def load_library(self): # XXX review all usages of 'self' here! @@ -394,7 +405,7 @@ meth = 'METH_O' else: meth = 'METH_VARARGS' - self._prnt(' {"%s", _cffi_f_%s, %s},' % (name, name, meth)) + self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth)) _loading_cpy_function = _loaded_noop @@ -481,8 +492,8 @@ if tp.fldnames is None: return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) - self._prnt(' {"%s", %s, METH_NOARGS},' % (layoutfuncname, - layoutfuncname)) + self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname, + layoutfuncname)) def _loading_struct_or_union(self, tp, prefix, name, module): if tp.fldnames is None: @@ -589,13 +600,7 @@ 'variable type'),)) assert delayed else: - prnt(' if (LONG_MIN <= (%s) && (%s) <= LONG_MAX)' % (name, name)) - prnt(' o = PyInt_FromLong((long)(%s));' % (name,)) - prnt(' else if ((%s) <= 0)' % (name,)) - prnt(' o = PyLong_FromLongLong((long long)(%s));' % (name,)) - prnt(' else') - prnt(' o = PyLong_FromUnsignedLongLong(' - '(unsigned long long)(%s));' % (name,)) + prnt(' o = _cffi_from_c_int_const(%s);' % name) prnt(' if (o == NULL)') prnt(' return -1;') if size_too: @@ -632,13 +637,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_cpy_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_cpy_const(True, enumerator, delayed=False) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) prnt = self._prnt prnt('static int %s(PyObject *lib)' % funcname) prnt('{') @@ -760,17 +770,30 @@ #include #include -#ifdef MS_WIN32 -#include /* for alloca() */ -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef unsigned char _Bool; +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +# endif #endif #if PY_MAJOR_VERSION < 3 @@ -795,6 +818,15 @@ #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble +#define _cffi_from_c_int_const(x) \ + (((x) > 0) ? \ + ((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \ + ((long long)(x) >= (long long)LONG_MIN) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromLongLong((long long)(x))) + #define _cffi_from_c_int(x, type) \ (((type)-1) > 0 ? /* unsigned */ \ (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ @@ -804,14 +836,14 @@ PyLong_FromLongLong(x))) #define _cffi_to_c_int(o, type) \ - (sizeof(type) == 1 ? (((type)-1) > 0 ? _cffi_to_c_u8(o) \ - : _cffi_to_c_i8(o)) : \ - sizeof(type) == 2 ? (((type)-1) > 0 ? _cffi_to_c_u16(o) \ - : _cffi_to_c_i16(o)) : \ - sizeof(type) == 4 ? (((type)-1) > 0 ? _cffi_to_c_u32(o) \ - : _cffi_to_c_i32(o)) : \ - sizeof(type) == 8 ? (((type)-1) > 0 ? _cffi_to_c_u64(o) \ - : _cffi_to_c_i64(o)) : \ + (sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ + : (type)_cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \ + : (type)_cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \ + : (type)_cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ + : (type)_cffi_to_c_i64(o)) : \ (Py_FatalError("unsupported size for type " #type), 0)) #define _cffi_to_c_i8 \ @@ -885,25 +917,32 @@ return PyBool_FromLong(was_alive); } -static void _cffi_init(void) +static int _cffi_init(void) { - PyObject *module = PyImport_ImportModule("_cffi_backend"); - PyObject *c_api_object; + PyObject *module, *c_api_object = NULL; + module = PyImport_ImportModule("_cffi_backend"); if (module == NULL) - return; + goto failure; c_api_object = PyObject_GetAttrString(module, "_C_API"); if (c_api_object == NULL) - return; + goto failure; if (!PyCapsule_CheckExact(c_api_object)) { - Py_DECREF(c_api_object); PyErr_SetNone(PyExc_ImportError); - return; + goto failure; } memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), _CFFI_NUM_EXPORTS * sizeof(void *)); + + Py_DECREF(module); Py_DECREF(c_api_object); + return 0; + + failure: + Py_XDECREF(module); + Py_XDECREF(c_api_object); + return -1; } #define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -249,10 +249,10 @@ prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') self.export_symbols.append(layoutfuncname) - prnt('ssize_t %s(ssize_t i)' % (layoutfuncname,)) + prnt('intptr_t %s(intptr_t i)' % (layoutfuncname,)) prnt('{') prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname) - prnt(' static ssize_t nums[] = {') + prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') for fname, ftype, fbitsize in tp.enumfields(): @@ -276,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] + BFunc = self.ffi._typeof_locked("intptr_t(*)(intptr_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -410,13 +410,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_gen_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_gen_const(True, enumerator) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) self.export_symbols.append(funcname) prnt = self._prnt prnt('int %s(char *out_error)' % funcname) @@ -430,14 +435,14 @@ enumerator, enumerator, enumvalue)) prnt(' char buf[64];') prnt(' if ((%s) < 0)' % enumerator) - prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator) prnt(' else') - prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % enumerator) - prnt(' snprintf(out_error, 255,' + prnt(' sprintf(out_error,' ' "%s has the real value %s, not %s",') prnt(' "%s", buf, "%d");' % ( - enumerator, enumvalue)) + enumerator[:100], enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return 0;') @@ -453,7 +458,7 @@ else: BType = self.ffi._typeof_locked("char[]")[0] BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) function = module.load_function(BFunc, funcname) p = self.ffi.new(BType, 256) if function(p) < 0: @@ -547,20 +552,29 @@ #include #include /* XXX for ssize_t on some platforms */ -#ifdef _WIN32 -# include -# define snprintf _snprintf -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef SSIZE_T ssize_t; -typedef unsigned char _Bool; +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif #else -# include +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +# endif #endif ''' diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py --- a/lib_pypy/gdbm.py +++ b/lib_pypy/gdbm.py @@ -50,6 +50,8 @@ pass def _fromstr(key): + if isinstance(key, unicode): + key = key.encode("ascii") if not isinstance(key, str): raise TypeError("gdbm mappings have string indices only") return {'dptr': ffi.new("char[]", key), 'dsize': len(key)} @@ -71,8 +73,8 @@ def _raise_from_errno(self): if ffi.errno: - raise error(os.strerror(ffi.errno)) - raise error(lib.gdbm_strerror(lib.gdbm_errno)) + raise error(ffi.errno, os.strerror(ffi.errno)) + raise error(lib.gdbm_errno, lib.gdbm_strerror(lib.gdbm_errno)) def __len__(self): if self.size < 0: @@ -141,7 +143,7 @@ def _check_closed(self): if not self.ll_dbm: - raise error("GDBM object has already been closed") + raise error(0, "GDBM object has already been closed") __del__ = close @@ -159,7 +161,7 @@ elif flags[0] == 'n': iflags = lib.GDBM_NEWDB else: - raise error("First flag must be one of 'r', 'w', 'c' or 'n'") + raise error(0, "First flag must be one of 'r', 'w', 'c' or 'n'") for flag in flags[1:]: if flag == 'f': iflags |= lib.GDBM_FAST @@ -168,7 +170,7 @@ elif flag == 'u': iflags |= lib.GDBM_NOLOCK else: - raise error("Flag '%s' not supported" % flag) + raise error(0, "Flag '%s' not supported" % flag) return gdbm(filename, iflags, mode) open_flags = "rwcnfsu" diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -740,7 +740,7 @@ Adding an entry under pypy/module (e.g. mymodule) entails automatic creation of a new config option (such as --withmod-mymodule and ---withoutmod-mymodule (the later being the default)) for py.py and +--withoutmod-mymodule (the latter being the default)) for py.py and translate.py. Testing modules in ``lib_pypy/`` @@ -931,7 +931,7 @@ assert self.result == 2 ** 6 which executes the code string function with the given arguments at app level. -Note the use of ``w_result`` in ``setup_class`` but self.result in the test +Note the use of ``w_result`` in ``setup_class`` but self.result in the test. Here is how to define an app level class in ``setup_class`` that can be used in subsequent tests:: diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -74,9 +74,6 @@ zipimport zlib - When translated to Java or .NET, the list is smaller; see - `pypy/config/pypyoption.py`_ for details. - When translated on Windows, a few Unix-only modules are skipped, and the following module is built instead: @@ -328,7 +325,7 @@ * directly calling the internal magic methods of a few built-in types with invalid arguments may have a slightly different result. For example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return - ``NotImplemented`` on PyPy; on CPython, only the later does, and the + ``NotImplemented`` on PyPy; on CPython, only the latter does, and the former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` both raise ``TypeError`` everywhere.) This difference is an implementation detail that shows up because of internal C-level slots diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -72,13 +72,11 @@ Here is a list of the limitations and missing features of the current implementation: -* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer - of PyPy, at your own risks and without doing anything sensible about - the GIL. Since PyPy 2.3, these functions are also named with an extra - "Py", for example ``PyPyInt_FromLong()``. Basically, don't use this, - but it might more or less work in simple cases if you do. (Obviously, - assuming the PyObject pointers you get have any particular fields in - any particular order is just going to crash.) +* ``ctypes.pythonapi`` is missing. In previous versions, it was present + and redirected to the `cpyext` C API emulation layer, but our + implementation did not do anything sensible about the GIL and the + functions were named with an extra "Py", for example + ``PyPyInt_FromLong()``. It was removed for being unhelpful. * We copy Python strings instead of having pointers to raw buffers diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -8,6 +8,9 @@ *Articles about PyPy published so far, most recent first:* (bibtex_ file) +* `A Way Forward in Parallelising Dynamic Languages`_, + R. Meier, A. Rigo + * `Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`_, C.F. Bolz, A. Cuni, M. Fijalkowski, M. Leuschel, S. Pedroni, A. Rigo @@ -71,6 +74,7 @@ .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib +.. _`A Way Forward in Parallelising Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2014/position-paper.pdf .. _`Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf .. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/pepm2011/bolz-allocation-removal.pdf .. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf @@ -93,6 +97,11 @@ Talks and Presentations ---------------------------------- +*This part is no longer updated.* The complete list is here__ (in +alphabetical order). + +.. __: https://bitbucket.org/pypy/extradoc/src/extradoc/talk/ + Talks in 2010 +++++++++++++ diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -171,16 +171,21 @@ You might be interested in our `benchmarking site`_ and our `jit documentation`_. -Note that the JIT has a very high warm-up cost, meaning that the -programs are slow at the beginning. If you want to compare the timings -with CPython, even relatively simple programs need to run *at least* one -second, preferrably at least a few seconds. Large, complicated programs -need even more time to warm-up the JIT. +`Your tests are not a benchmark`_: tests tend to be slow under PyPy +because they run exactly once; if they are good tests, they exercise +various corner cases in your code. This is a bad case for JIT +compilers. Note also that our JIT has a very high warm-up cost, meaning +that any program is slow at the beginning. If you want to compare the +timings with CPython, even relatively simple programs need to run *at +least* one second, preferrably at least a few seconds. Large, +complicated programs need even more time to warm-up the JIT. .. _`benchmarking site`: http://speed.pypy.org .. _`jit documentation`: jit/index.html +.. _`your tests are not a benchmark`: http://alexgaynor.net/2013/jul/15/your-tests-are-not-benchmark/ + --------------------------------------------------------------- Couldn't the JIT dump and reload already-compiled machine code? --------------------------------------------------------------- @@ -465,9 +470,13 @@ This is documented (here__ and here__). It needs 4 GB of RAM to run "rpython targetpypystandalone" on top of PyPy, a bit more when running -on CPython. If you have less than 4 GB it will just swap forever (or -fail if you don't have enough swap). On 32-bit, divide the numbers by -two. +on top of CPython. If you have less than 4 GB free, it will just swap +forever (or fail if you don't have enough swap). And we mean *free:* +if the machine has 4 GB *in total,* then it will swap. + +On 32-bit, divide the numbers by two. (We didn't try recently, but in +the past it was possible to compile a 32-bit version on a 2 GB Linux +machine with nothing else running: no Gnome/KDE, for example.) .. __: http://pypy.org/download.html#building-from-source .. __: https://pypy.readthedocs.org/en/latest/getting-started-python.html#translating-the-pypy-python-interpreter diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -50,6 +50,8 @@ libz-dev libbz2-dev libncurses-dev libexpat1-dev \ libssl-dev libgc-dev python-sphinx python-greenlet + For the optional lzma module on PyPy3 you will also need ``liblzma-dev``. + On a Fedora-16 box these are:: [user at fedora-or-rh-box ~]$ sudo yum install \ @@ -57,6 +59,8 @@ zlib-devel bzip2-devel ncurses-devel expat-devel \ openssl-devel gc-devel python-sphinx python-greenlet + For the optional lzma module on PyPy3 you will also need ``xz-devel``. + On SLES11: $ sudo zypper install gcc make python-devel pkg-config \ @@ -74,6 +78,7 @@ * ``pkg-config`` (to help us locate libffi files) * ``libz-dev`` (for the optional ``zlib`` module) * ``libbz2-dev`` (for the optional ``bz2`` module) + * ``liblzma`` (for the optional ``lzma`` module, PyPy3 only) * ``libsqlite3-dev`` (for the optional ``sqlite3`` module via cffi) * ``libncurses-dev`` (for the optional ``_minimal_curses`` module) * ``libexpat1-dev`` (for the optional ``pyexpat`` module) diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -102,7 +102,7 @@ .. _Python: http://docs.python.org/index.html .. _`more...`: architecture.html#mission-statement .. _`PyPy blog`: http://morepypy.blogspot.com/ -.. _`development bug/feature tracker`: https://bugs.pypy.org +.. _`development bug/feature tracker`: https://bitbucket.org/pypy/pypy/issues .. _here: http://www.tismer.com/pypy/irc-logs/pypy/ .. _`Mercurial commit mailing list`: http://mail.python.org/mailman/listinfo/pypy-commit .. _`development mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev diff --git a/pypy/doc/jit-hooks.rst b/pypy/doc/jit-hooks.rst --- a/pypy/doc/jit-hooks.rst +++ b/pypy/doc/jit-hooks.rst @@ -34,7 +34,7 @@ aborted due to some reason. The hook will be invoked with the siagnture: - ``hook(jitdriver_name, greenkey, reason)`` + ``hook(jitdriver_name, greenkey, reason, oplist)`` Reason is a string, the meaning of other arguments is the same as attributes on JitLoopInfo object diff --git a/pypy/doc/objspace-proxies.rst b/pypy/doc/objspace-proxies.rst --- a/pypy/doc/objspace-proxies.rst +++ b/pypy/doc/objspace-proxies.rst @@ -26,6 +26,16 @@ Transparent Proxies ================================ +.. warning:: + + This is a feature that was tried experimentally long ago, and we + found no really good use cases. The basic functionality is still + there, but we don't recommend using it. Some of the examples below + might not work any more (e.g. you can't tproxy a list object any + more). The rest can be done by hacking in standard Python. If + anyone is interested in working on tproxy again, he is welcome, but + we don't regard this as an interesting extension. + PyPy's Transparent Proxies allow routing of operations on objects to a callable. Application level code can customize objects without interfering with the type system - ``type(proxied_list) is list`` holds true diff --git a/pypy/doc/release-pypy3-2.3.1.rst b/pypy/doc/release-pypy3-2.3.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-pypy3-2.3.1.rst @@ -0,0 +1,69 @@ +===================== +PyPy3 2.3.1 - Fulcrum +===================== + +We're pleased to announce the first stable release of PyPy3. PyPy3 +targets Python 3 (3.2.5) compatibility. + +We would like to thank all of the people who donated_ to the `py3k proposal`_ +for supporting the work that went into this. + +You can download the PyPy3 2.3.1 release here: + + http://pypy.org/download.html#pypy3-2-3-1 + +Highlights +========== + +* The first stable release of PyPy3: support for Python 3! + +* The stdlib has been updated to Python 3.2.5 + +* Additional support for the u'unicode' syntax (`PEP 414`_) from Python 3.3 + +* Updates from the default branch, such as incremental GC and various JIT + improvements + +* Resolved some notable JIT performance regressions from PyPy2: + + - Re-enabled the previously disabled collection (list/dict/set) strategies + + - Resolved performance of iteration over range objects + + - Resolved handling of Python 3's exception __context__ unnecessarily forcing + frame object overhead + +.. _`PEP 414`: http://legacy.python.org/dev/peps/pep-0414/ + +What is PyPy? +============== + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7.6 or 3.2.5. It's fast due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows, +and OpenBSD, +as well as newer ARM hardware (ARMv6 or ARMv7, with VFPv3) running Linux. + +While we support 32 bit python on Windows, work on the native Windows 64 +bit python is still stalling, we would welcome a volunteer +to `handle that`_. + +.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation + +How to use PyPy? +================= + +We suggest using PyPy from a `virtualenv`_. Once you have a virtualenv +installed, you can follow instructions from `pypy documentation`_ on how +to proceed. This document also covers other `installation schemes`_. + +.. _donated: http://morepypy.blogspot.com/2012/01/py3k-and-numpy-first-stage-thanks-to.html +.. _`py3k proposal`: http://pypy.org/py3donate.html +.. _`pypy documentation`: http://doc.pypy.org/en/latest/getting-started.html#installing-using-virtualenv +.. _`virtualenv`: http://www.virtualenv.org/en/latest/ +.. _`installation schemes`: http://doc.pypy.org/en/latest/getting-started.html#installing-pypy + + +Cheers, +the PyPy team diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -28,7 +28,8 @@ Introduction ============ -``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats_ +``pypy-stm`` is a variant of the regular PyPy interpreter. (This +version supports Python 2.7; see below for `Python 3`_.) With caveats_ listed below, it should be in theory within 20%-50% slower than a regular PyPy, comparing the JIT version in both cases (but see below!). It is called @@ -92,9 +93,9 @@ We're busy fixing them as we find them; feel free to `report bugs`_. * It runs with an overhead as low as 20% on examples like "richards". - There are also other examples with higher overheads --up to 10x for - "translate.py"-- which we are still trying to understand. One suspect - is our partial GC implementation, see below. + There are also other examples with higher overheads --currently up to + 2x for "translate.py"-- which we are still trying to understand. + One suspect is our partial GC implementation, see below. * Currently limited to 1.5 GB of RAM (this is just a parameter in `core.h`__). Memory overflows are not correctly handled; they cause @@ -111,9 +112,8 @@ * The GC is new; although clearly inspired by PyPy's regular GC, it misses a number of optimizations for now. Programs allocating large - numbers of small objects that don't immediately die, as well as - programs that modify large lists or dicts, suffer from these missing - optimizations. + numbers of small objects that don't immediately die (surely a common + situation) suffer from these missing optimizations. * The GC has no support for destructors: the ``__del__`` method is never called (including on file objects, which won't be closed for you). @@ -138,6 +138,25 @@ +Python 3 +======== + +In this document I describe "pypy-stm", which is based on PyPy's Python +2.7 interpreter. Supporting Python 3 should take about half an +afternoon of work. Obviously, what I *don't* mean is that by tomorrow +you can have a finished and polished "pypy3-stm" product. General py3k +work is still missing; and general stm work is also still missing. But +they are rather independent from each other, as usual in PyPy. The +required afternoon of work will certainly be done one of these days now +that the internal interfaces seem to stabilize. + +The same is true for other languages implemented in the RPython +framework, although the amount of work to put there might vary, because +the STM framework within RPython is currently targeting the PyPy +interpreter and other ones might have slightly different needs. + + + User Guide ========== @@ -490,8 +509,6 @@ The last two lines are special; they are an internal marker read by ``transactional_memory.print_abort_info()``. -These statistics are not printed out for the main thread, for now. - Reference to implementation details ----------------------------------- diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -10,3 +10,43 @@ Added support for ``__getitem__``, ``__setitem__``, ``__getslice__``, ``__setslice__``, and ``__len__`` to RPython + +.. branch: stringbuilder2-perf +Give the StringBuilder a more flexible internal structure, with a +chained list of strings instead of just one string. This make it +more efficient when building large strings, e.g. with cStringIO(). + +Also, use systematically jit.conditional_call() instead of regular +branches. This lets the JIT make more linear code, at the cost of +forcing a bit more data (to be passed as arguments to +conditional_calls). I would expect the net result to be a slight +slow-down on some simple benchmarks and a speed-up on bigger +programs. + +.. branch: ec-threadlocal +Change the executioncontext's lookup to be done by reading a thread- +local variable (which is implemented in C using '__thread' if +possible, and pthread_getspecific() otherwise). On Linux x86 and +x86-64, the JIT backend has a special optimization that lets it emit +directly a single MOV from a %gs- or %fs-based address. It seems +actually to give a good boost in performance. + +.. branch: fast-gil +A faster way to handle the GIL, particularly in JIT code. The GIL is +now a composite of two concepts: a global number (it's just set from +1 to 0 and back around CALL_RELEASE_GIL), and a real mutex. If there +are threads waiting to acquire the GIL, one of them is actively +checking the global number every 0.1 ms to 1 ms. Overall, JIT loops +full of external function calls now run a bit faster (if no thread was +started yet), or a *lot* faster (if threads were started already). + +.. branch: jit-get-errno +Optimize the errno handling in the JIT, notably around external +function calls. Linux-only. + +.. branch: disable_pythonapi +Remove non-functioning ctypes.pyhonapi and ctypes.PyDLL, document this +incompatibility with cpython. Recast sys.dllhandle to an int. + +.. branch: scalar-operations +Fix performance regression on ufunc(, ) in numpy. diff --git a/pypy/doc/whatsnew-pypy3-2.3.1.rst b/pypy/doc/whatsnew-pypy3-2.3.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-pypy3-2.3.1.rst @@ -0,0 +1,6 @@ +========================= +What's new in PyPy3 2.3.1 +========================= + +.. this is a revision shortly after pypy3-release-2.3.x +.. startrev: 0137d8e6657d diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -132,19 +132,23 @@ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Download http://www.gzip.org/zlib/zlib-1.2.3.tar.gz and extract it in -the base directory. Then compile:: +the base directory. Then compile as a static library:: cd zlib-1.2.3 nmake -f win32\Makefile.msc - copy zlib1.dll \zlib.dll + copy zlib1.lib + copy zlib.h zconf.h The bz2 compression library ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Get the same version of bz2 used by python and compile as a static library:: svn export http://svn.python.org/projects/external/bzip2-1.0.6 cd bzip2-1.0.6 nmake -f makefile.msc - copy bzip.dll \bzip.dll + copy libbz2.lib + copy bzlib.h + The sqlite3 database library ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -166,7 +170,8 @@ is actually enough for pypy). Then, copy the file ``win32\bin\release\libexpat.dll`` somewhere in -your PATH. +your PATH, ``win32\bin\release\libexpat.lib`` somewhere in LIB, and +both ``lib\expat.h`` and ``lib\expat_external.h`` somewhere in INCLUDE. The OpenSSL library ~~~~~~~~~~~~~~~~~~~ diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst --- a/pypy/doc/you-want-to-help.rst +++ b/pypy/doc/you-want-to-help.rst @@ -15,14 +15,14 @@ * Because of the above, we are very serious about Test Driven Development. It's not only what we believe in, but also that PyPy's architecture is working very well with TDD in mind and not so well without it. Often - the development means progressing in an unrelated corner, one unittest + development means progressing in an unrelated corner, one unittest at a time; and then flipping a giant switch, bringing it all together. (It generally works out of the box. If it doesn't, then we didn't - write enough unit tests.) It's worth repeating - PyPy - approach is great if you do TDD, not so great otherwise. + write enough unit tests.) It's worth repeating - PyPy's + approach is great if you do TDD, and not so great otherwise. * PyPy uses an entirely different set of tools - most of them included - in the PyPy repository. There is no Makefile, nor autoconf. More below + in the PyPy repository. There is no Makefile, nor autoconf. More below. Architecture ============ @@ -32,7 +32,7 @@ * `RPython`_ is the language in which we write interpreters. Not the entire PyPy project is written in RPython, only the parts that are compiled in the translation process. The interesting point is that RPython has no parser, - it's compiled from the live python objects, which make it possible to do + it's compiled from the live python objects, which makes it possible to do all kinds of metaprogramming during import time. In short, Python is a meta programming language for RPython. @@ -40,7 +40,7 @@ .. _`RPython`: coding-guide.html#RPython -* The translation toolchain - this is the part that takes care about translating +* The translation toolchain - this is the part that takes care of translating RPython to flow graphs and then to C. There is more in the `architecture`_ document written about it. @@ -73,7 +73,7 @@ .. _`we have a tracing JIT`: jit/index.html -* Garbage Collectors (GC): as you can notice if you are used to CPython's +* Garbage Collectors (GC): as you may notice if you are used to CPython's C code, there are no ``Py_INCREF/Py_DECREF`` equivalents in RPython code. `Garbage collection in PyPy`_ is inserted during translation. Moreover, this is not reference counting; it is a real diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -30,8 +30,6 @@ if w_dict is not None: # for tests w_entry_point = space.getitem(w_dict, space.wrap('entry_point')) w_run_toplevel = space.getitem(w_dict, space.wrap('run_toplevel')) - w_call_finish_gateway = space.wrap(gateway.interp2app(call_finish)) - w_call_startup_gateway = space.wrap(gateway.interp2app(call_startup)) withjit = space.config.objspace.usemodules.pypyjit def entry_point(argv): @@ -53,7 +51,7 @@ argv = argv[:1] + argv[3:] try: try: - space.call_function(w_run_toplevel, w_call_startup_gateway) + space.startup() w_executable = space.wrap(argv[0]) w_argv = space.newlist([space.wrap(s) for s in argv[1:]]) w_exitcode = space.call_function(w_entry_point, w_executable, w_argv) @@ -69,7 +67,7 @@ return 1 finally: try: - space.call_function(w_run_toplevel, w_call_finish_gateway) + space.finish() except OperationError, e: debug("OperationError:") debug(" operror-type: " + e.w_type.getname(space)) @@ -184,11 +182,6 @@ 'pypy_thread_attach': pypy_thread_attach, 'pypy_setup_home': pypy_setup_home} -def call_finish(space): - space.finish() - -def call_startup(space): - space.startup() # _____ Define and setup target ___ diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -396,6 +396,7 @@ def startup(self): # To be called before using the space + self.threadlocals.enter_thread(self) # Initialize already imported builtin modules from pypy.interpreter.module import Module @@ -640,30 +641,36 @@ """NOT_RPYTHON: Abstract method that should put some minimal content into the w_builtins.""" - @jit.loop_invariant def getexecutioncontext(self): "Return what we consider to be the active execution context." # Important: the annotator must not see a prebuilt ExecutionContext: # you should not see frames while you translate # so we make sure that the threadlocals never *have* an # ExecutionContext during translation. - if self.config.translating and not we_are_translated(): - assert self.threadlocals.getvalue() is None, ( - "threadlocals got an ExecutionContext during translation!") - try: - return self._ec_during_translation - except AttributeError: - ec = self.createexecutioncontext() - self._ec_during_translation = ec + if not we_are_translated(): + if self.config.translating: + assert self.threadlocals.get_ec() is None, ( + "threadlocals got an ExecutionContext during translation!") + try: + return self._ec_during_translation + except AttributeError: + ec = self.createexecutioncontext() + self._ec_during_translation = ec + return ec + else: + ec = self.threadlocals.get_ec() + if ec is None: + self.threadlocals.enter_thread(self) + ec = self.threadlocals.get_ec() return ec - # normal case follows. The 'thread' module installs a real - # thread-local object in self.threadlocals, so this builds - # and caches a new ec in each thread. - ec = self.threadlocals.getvalue() - if ec is None: - ec = self.createexecutioncontext() - self.threadlocals.setvalue(ec) - return ec + else: + # translated case follows. self.threadlocals is either from + # 'pypy.interpreter.miscutils' or 'pypy.module.thread.threadlocals'. + # the result is assumed to be non-null: enter_thread() was called + # by space.startup(). + ec = self.threadlocals.get_ec() + assert ec is not None + return ec def _freeze_(self): return True @@ -964,6 +971,13 @@ """ return self.unpackiterable(w_iterable, expected_length) + def listview_no_unpack(self, w_iterable): + """ Same as listview() if cheap. If 'w_iterable' is something like + a generator, for example, then return None instead. + May return None anyway. + """ + return None + def listview_bytes(self, w_list): """ Return a list of unwrapped strings out of a list of strings. If the argument is not a list or does not contain only strings, return None. @@ -1488,9 +1502,7 @@ return buf.as_str() def str_or_None_w(self, w_obj): - if self.is_w(w_obj, self.w_None): - return None - return self.str_w(w_obj) + return None if self.is_none(w_obj) else self.str_w(w_obj) def str_w(self, w_obj): return w_obj.str_w(self) diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -496,6 +496,13 @@ """ +class UserDelCallback(object): + def __init__(self, w_obj, callback, descrname): + self.w_obj = w_obj + self.callback = callback + self.descrname = descrname + self.next = None + class UserDelAction(AsyncAction): """An action that invokes all pending app-level __del__() method. This is done as an action instead of immediately when the @@ -506,12 +513,18 @@ def __init__(self, space): AsyncAction.__init__(self, space) - self.dying_objects = [] + self.dying_objects = None + self.dying_objects_last = None self.finalizers_lock_count = 0 self.enabled_at_app_level = True def register_callback(self, w_obj, callback, descrname): - self.dying_objects.append((w_obj, callback, descrname)) + cb = UserDelCallback(w_obj, callback, descrname) + if self.dying_objects_last is None: + self.dying_objects = cb + else: + self.dying_objects_last.next = cb + self.dying_objects_last = cb self.fire() def perform(self, executioncontext, frame): @@ -525,13 +538,33 @@ # avoid too deep recursions of the kind of __del__ being called # while in the middle of another __del__ call. pending = self.dying_objects - self.dying_objects = [] + self.dying_objects = None + self.dying_objects_last = None space = self.space - for i in range(len(pending)): - w_obj, callback, descrname = pending[i] - pending[i] = (None, None, None) + while pending is not None: try: - callback(w_obj) + pending.callback(pending.w_obj) except OperationError, e: - e.write_unraisable(space, descrname, w_obj) + e.write_unraisable(space, pending.descrname, pending.w_obj) e.clear(space) # break up reference cycles + pending = pending.next + # + # Note: 'dying_objects' used to be just a regular list instead + # of a chained list. This was the cause of "leaks" if we have a + # program that constantly creates new objects with finalizers. + # Here is why: say 'dying_objects' is a long list, and there + # are n instances in it. Then we spend some time in this + # function, possibly triggering more GCs, but keeping the list + # of length n alive. Then the list is suddenly freed at the + # end, and we return to the user program. At this point the + # GC limit is still very high, because just before, there was + # a list of length n alive. Assume that the program continues + # to allocate a lot of instances with finalizers. The high GC + # limit means that it could allocate a lot of instances before + # reaching it --- possibly more than n. So the whole procedure + # repeats with higher and higher values of n. + # + # This does not occur in the current implementation because + # there is no list of length n: if n is large, then the GC + # will run several times while walking the list, but it will + # see lower and lower memory usage, with no lower bound of n. diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -616,7 +616,8 @@ def descr_classmethod_get(self, space, w_obj, w_klass=None): if space.is_none(w_klass): w_klass = space.type(w_obj) - return space.wrap(Method(space, self.w_function, w_klass, space.w_None)) + return space.wrap(Method(space, self.w_function, w_klass, + space.type(w_klass))) def descr_classmethod__new__(space, w_subtype, w_function): instance = space.allocate_instance(ClassMethod, w_subtype) diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -895,7 +895,7 @@ "use unwrap_spec(...=WrappedDefault(default))" % ( self._code.identifier, name, defaultval)) defs_w.append(None) - else: + elif name != '__args__' and name != 'args_w': defs_w.append(space.wrap(defaultval)) if self._code._unwrap_spec: UNDEFINED = object() diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -61,6 +61,13 @@ return self.send_ex(w_arg) def send_ex(self, w_arg, operr=None): + pycode = self.pycode + if jit.we_are_jitted() and should_not_inline(pycode): + generatorentry_driver.jit_merge_point(gen=self, w_arg=w_arg, + operr=operr, pycode=pycode) + return self._send_ex(w_arg, operr) + + def _send_ex(self, w_arg, operr): space = self.space if self.running: raise OperationError(space.w_ValueError, @@ -72,8 +79,7 @@ if operr is None: operr = OperationError(space.w_StopIteration, space.w_None) raise operr - # XXX it's not clear that last_instr should be promoted at all - # but as long as it is necessary for call_assembler, let's do it early + last_instr = jit.promote(frame.last_instr) if last_instr == -1: if w_arg and not space.is_w(w_arg, space.w_None): @@ -214,3 +220,38 @@ "interrupting generator of ") break block = block.previous + + + +def get_printable_location_genentry(bytecode): + return '%s ' % (bytecode.get_repr(),) +generatorentry_driver = jit.JitDriver(greens=['pycode'], + reds=['gen', 'w_arg', 'operr'], + get_printable_location = + get_printable_location_genentry, + name='generatorentry') + +from pypy.tool.stdlib_opcode import HAVE_ARGUMENT, opmap +YIELD_VALUE = opmap['YIELD_VALUE'] + + at jit.elidable_promote() +def should_not_inline(pycode): + # Should not inline generators with more than one "yield", + # as an approximative fix (see issue #1782). There are cases + # where it slows things down; for example calls to a simple + # generator that just produces a few simple values with a few + # consecutive "yield" statements. It fixes the near-infinite + # slow-down in issue #1782, though... + count_yields = 0 + code = pycode.co_code + n = len(code) + i = 0 + while i < n: + c = code[i] + op = ord(c) + if op == YIELD_VALUE: + count_yields += 1 + i += 1 + if op >= HAVE_ARGUMENT: + i += 2 + return count_yields >= 2 diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py --- a/pypy/interpreter/miscutils.py +++ b/pypy/interpreter/miscutils.py @@ -11,11 +11,14 @@ """ _value = None - def getvalue(self): + def get_ec(self): return self._value - def setvalue(self, value): - self._value = value + def enter_thread(self, space): + self._value = space.createexecutioncontext() + + def try_enter_thread(self, space): + return False def signals_enabled(self): return True diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py --- a/pypy/interpreter/pycompiler.py +++ b/pypy/interpreter/pycompiler.py @@ -96,7 +96,7 @@ XXX: This class should override the baseclass implementation of compile_command() in order to optimize it, especially in case - of incomplete inputs (e.g. we shouldn't re-compile from sracth + of incomplete inputs (e.g. we shouldn't re-compile from scratch the whole source after having only added a new '\n') """ def __init__(self, space, override_version=None): diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -511,10 +511,10 @@ for i in range(min(len(varnames), self.getcode().co_nlocals)): name = varnames[i] w_value = self.locals_stack_w[i] - w_name = self.space.wrap(name) if w_value is not None: - self.space.setitem(self.w_locals, w_name, w_value) + self.space.setitem_str(self.w_locals, name, w_value) else: + w_name = self.space.wrap(name) try: self.space.delitem(self.w_locals, w_name) except OperationError as e: @@ -534,8 +534,7 @@ except ValueError: pass else: - w_name = self.space.wrap(name) - self.space.setitem(self.w_locals, w_name, w_value) + self.space.setitem_str(self.w_locals, name, w_value) @jit.unroll_safe @@ -548,13 +547,9 @@ new_fastlocals_w = [None] * numlocals for i in range(min(len(varnames), numlocals)): - w_name = self.space.wrap(varnames[i]) - try: - w_value = self.space.getitem(self.w_locals, w_name) - except OperationError, e: - if not e.match(self.space, self.space.w_KeyError): - raise - else: + name = varnames[i] + w_value = self.space.finditem_str(self.w_locals, name) + if w_value is not None: new_fastlocals_w[i] = w_value self.setfastscope(new_fastlocals_w) @@ -563,13 +558,8 @@ for i in range(len(freevarnames)): name = freevarnames[i] cell = self.cells[i] - w_name = self.space.wrap(name) - try: - w_value = self.space.getitem(self.w_locals, w_name) - except OperationError, e: - if not e.match(self.space, self.space.w_KeyError): - raise - else: + w_value = self.space.finditem_str(self.w_locals, name) + if w_value is not None: cell.set(w_value) @jit.unroll_safe diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -200,7 +200,7 @@ elif opcode == opcodedesc.BREAK_LOOP.index: next_instr = self.BREAK_LOOP(oparg, next_instr) elif opcode == opcodedesc.CONTINUE_LOOP.index: - next_instr = self.CONTINUE_LOOP(oparg, next_instr) + return self.CONTINUE_LOOP(oparg, next_instr) elif opcode == opcodedesc.FOR_ITER.index: next_instr = self.FOR_ITER(oparg, next_instr) elif opcode == opcodedesc.JUMP_FORWARD.index: diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -727,6 +727,22 @@ never_called py.test.raises(AssertionError, space.wrap, gateway.interp2app_temp(g)) + def test_unwrap_spec_default_applevel_bug2(self): + space = self.space + def g(space, w_x, w_y=None, __args__=None): + return w_x + w_g = space.wrap(gateway.interp2app_temp(g)) + w_42 = space.call_function(w_g, space.wrap(42)) + assert space.int_w(w_42) == 42 + py.test.raises(gateway.OperationError, space.call_function, w_g) + # + def g(space, w_x, w_y=None, args_w=None): + return w_x + w_g = space.wrap(gateway.interp2app_temp(g)) + w_42 = space.call_function(w_g, space.wrap(42)) + assert space.int_w(w_42) == 42 + py.test.raises(gateway.OperationError, space.call_function, w_g) + def test_interp2app_doc(self): space = self.space def f(space, w_x): diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -17,7 +17,7 @@ yield 1 assert g.gi_running g = f() - assert g.gi_code is f.func_code + assert g.gi_code is f.__code__ assert g.__name__ == 'f' assert g.gi_frame is not None assert not g.gi_running @@ -26,7 +26,7 @@ raises(StopIteration, g.next) assert not g.gi_running assert g.gi_frame is None - assert g.gi_code is f.func_code + assert g.gi_code is f.__code__ assert g.__name__ == 'f' def test_generator3(self): @@ -278,4 +278,21 @@ def f(): yield 1 raise StopIteration - assert tuple(f()) == (1,) \ No newline at end of file + assert tuple(f()) == (1,) + + +def test_should_not_inline(space): + from pypy.interpreter.generator import should_not_inline + w_co = space.appexec([], '''(): + def g(x): + yield x + 5 + return g.__code__ + ''') + assert should_not_inline(w_co) == False + w_co = space.appexec([], '''(): + def g(x): + yield x + 5 + yield x + 6 + return g.__code__ + ''') + assert should_not_inline(w_co) == True diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -388,6 +388,13 @@ # differs from .im_class in case the method is # defined in some parent class of l's actual class + def test_classmethod_im_class(self): + class Foo(object): + @classmethod + def bar(cls): + pass + assert Foo.bar.im_class is type + def test_func_closure(self): x = 2 def f(): diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -7,8 +7,8 @@ from __pypy__ import lookup_special -def _caller_locals(): - return sys._getframe(0).f_locals +def _caller_locals(): + return sys._getframe(0).f_locals def vars(*obj): """Return a dictionary of all the attributes currently bound in obj. If @@ -17,12 +17,11 @@ if len(obj) == 0: return _caller_locals() elif len(obj) != 1: - raise TypeError, "vars() takes at most 1 argument." - else: - try: - return obj[0].__dict__ - except AttributeError: - raise TypeError, "vars() argument must have __dict__ attribute" + raise TypeError("vars() takes at most 1 argument.") + try: + return obj[0].__dict__ + except AttributeError: + raise TypeError("vars() argument must have __dict__ attribute") def dir(*args): """dir([object]) -> list of strings @@ -38,8 +37,7 @@ attributes of its class's base classes. """ if len(args) > 1: - raise TypeError("dir expected at most 1 arguments, got %d" - % len(args)) + raise TypeError("dir expected at most 1 arguments, got %d" % len(args)) if len(args) == 0: local_names = _caller_locals().keys() # 2 stackframes away if not isinstance(local_names, list): @@ -48,92 +46,61 @@ return local_names import types - obj = args[0] - - dir_meth = None if isinstance(obj, types.InstanceType): - try: - dir_meth = getattr(obj, "__dir__") - except AttributeError: - pass From noreply at buildbot.pypy.org Wed Aug 6 02:34:11 2014 From: noreply at buildbot.pypy.org (kiilerix) Date: Wed, 6 Aug 2014 02:34:11 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Make pycode.dump work for py3 opcodes on py2 (issue 1827) Message-ID: <20140806003411.CB1F51C320C@cobra.cs.uni-duesseldorf.de> Author: Mads Kiilerich Branch: py3.3 Changeset: r72701:803c1daaac74 Date: 2014-07-27 17:30 +0200 http://bitbucket.org/pypy/pypy/changeset/803c1daaac74/ Log: Make pycode.dump work for py3 opcodes on py2 (issue 1827) Python 3 dis.py and opcode.py are "backported" as more or less trivial copies. This code duplication seems like the least intrusive solution. diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -4,7 +4,7 @@ The bytecode interpreter itself is implemented by the PyFrame class. """ -import dis, imp, struct, types, new, sys +import imp, struct, types, new, sys from pypy.interpreter import eval from pypy.interpreter.signature import Signature @@ -13,6 +13,7 @@ from pypy.interpreter.astcompiler.consts import ( CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, CO_GENERATOR, CO_KILL_DOCSTRING, CO_YIELD_INSIDE_TRY) +from pypy.tool import dis3 from pypy.tool.stdlib_opcode import opcodedesc, HAVE_ARGUMENT from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import compute_hash, we_are_translated @@ -245,33 +246,6 @@ if isinstance(w_co, PyCode): w_co.remove_docstrings(space) - def _to_code(self): - """For debugging only.""" - consts = [None] * len(self.co_consts_w) - num = 0 - for w in self.co_consts_w: - if isinstance(w, PyCode): - consts[num] = w._to_code() - else: - consts[num] = self.space.unwrap(w) - num += 1 - assert self.co_kwonlyargcount == 0, 'kwonlyargcount is py3k only, cannot turn this code object into a Python2 one' - return new.code(self.co_argcount, - #self.co_kwonlyargcount, # this does not exists in python2 - self.co_nlocals, - self.co_stacksize, - self.co_flags, - self.co_code, - tuple(consts), - tuple(self.co_names), - tuple(self.co_varnames), - self.co_filename, - self.co_name, - self.co_firstlineno, - self.co_lnotab, - tuple(self.co_freevars), - tuple(self.co_cellvars)) - def exec_host_bytecode(self, w_globals, w_locals): if sys.version_info < (2, 7): raise Exception("PyPy no longer supports Python 2.6 or lower") @@ -280,11 +254,11 @@ return frame.run() def dump(self): - """A dis.dis() dump of the code object.""" - print 'WARNING: dumping a py3k bytecode using python2 opmap, the result might be inaccurate or wrong' - print - co = self._to_code() - dis.dis(co) + """NOT_RPYTHON: A dis.dis() dump of the code object.""" + if not hasattr(self, 'co_consts'): + self.co_consts = [w if isinstance(w, PyCode) else self.space.unwrap(w) + for w in self.co_consts_w] + dis3.dis(self) def fget_co_consts(self, space): return space.newtuple(self.co_consts_w) diff --git a/pypy/interpreter/test/test_pycode.py b/pypy/interpreter/test/test_pycode.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/test/test_pycode.py @@ -0,0 +1,19 @@ +import sys, StringIO + +def test_dump(space): + """test that pycode.dump kind of works with py3 opcodes""" + compiler = space.createcompiler() + code = compiler.compile('lambda *, y=7: None', 'filename', 'exec', 0) + output = None + stdout = sys.stdout + try: + sys.stdout = StringIO.StringIO() + code.dump() + output = sys.stdout.getvalue() + sys.stdout.close() + finally: + sys.stdout = stdout + print '>>>\n' + output + '\n<<<' + assert ' 1 (7)' in output + assert ' 3 (None)' in output + assert ' 16 RETURN_VALUE ' in output diff --git a/lib-python/3/dis.py b/pypy/tool/dis3.py copy from lib-python/3/dis.py copy to pypy/tool/dis3.py --- a/lib-python/3/dis.py +++ b/pypy/tool/dis3.py @@ -1,30 +1,18 @@ -"""Disassembler of Python byte code into mnemonics.""" +"""Disassembler of Python byte code into mnemonics. +Python 3 dis.py partly backported to Python 2""" import sys import types -from opcode import * -from opcode import __all__ as _opcodes_all +from opcode3 import * +from opcode3 import __all__ as _opcodes_all -__all__ = ["code_info", "dis", "disassemble", "distb", "disco", - "findlinestarts", "findlabels", "show_code"] + _opcodes_all +__all__ = ["dis", "disassemble", "distb", "disco", + "findlinestarts", "findlabels"] + _opcodes_all del _opcodes_all _have_code = (types.MethodType, types.FunctionType, types.CodeType, type) -def _try_compile(source, name): - """Attempts to compile the given source, first as an expression and - then as a statement if the first approach fails. - - Utility function to accept strings in functions that otherwise - expect code objects - """ - try: - c = compile(source, name, 'eval') - except SyntaxError: - c = compile(source, name, 'exec') - return c - def dis(x=None): """Disassemble classes, methods, functions, or code. @@ -34,29 +22,31 @@ if x is None: distb() return - if hasattr(x, '__func__'): # Method - x = x.__func__ - if hasattr(x, '__code__'): # Function - x = x.__code__ - if hasattr(x, '__dict__'): # Class or module - items = sorted(x.__dict__.items()) + if isinstance(x, types.InstanceType): + x = x.__class__ + if hasattr(x, 'im_func'): + x = x.im_func + if hasattr(x, 'func_code'): + x = x.func_code + if hasattr(x, 'co_code'): # PyCode needs co_code before __dict__ + disassemble(x) + elif hasattr(x, '__dict__'): + items = x.__dict__.items() + items.sort() for name, x1 in items: if isinstance(x1, _have_code): - print("Disassembly of %s:" % name) + print "Disassembly of %s:" % name try: dis(x1) - except TypeError as msg: - print("Sorry:", msg) - print() - elif hasattr(x, 'co_code'): # Code object - disassemble(x) - elif isinstance(x, (bytes, bytearray)): # Raw bytecode - _disassemble_bytes(x) - elif isinstance(x, str): # Source code - _disassemble_str(x) + except TypeError, msg: + print "Sorry:", msg + print + elif isinstance(x, str): + disassemble_string(x) else: - raise TypeError("don't know how to disassemble %s objects" % - type(x).__name__) + raise TypeError, \ + "don't know how to disassemble %s objects" % \ + type(x).__name__ def distb(tb=None): """Disassemble a traceback (default: last traceback).""" @@ -64,86 +54,10 @@ try: tb = sys.last_traceback except AttributeError: - raise RuntimeError("no last traceback to disassemble") + raise RuntimeError, "no last traceback to disassemble" while tb.tb_next: tb = tb.tb_next disassemble(tb.tb_frame.f_code, tb.tb_lasti) -# The inspect module interrogates this dictionary to build its -# list of CO_* constants. It is also used by pretty_flags to -# turn the co_flags field into a human readable list. -COMPILER_FLAG_NAMES = { - 1: "OPTIMIZED", - 2: "NEWLOCALS", - 4: "VARARGS", - 8: "VARKEYWORDS", - 16: "NESTED", - 32: "GENERATOR", - 64: "NOFREE", -} - -def pretty_flags(flags): - """Return pretty representation of code flags.""" - names = [] - for i in range(32): - flag = 1<") - if hasattr(x, 'co_code'): # Code object - return _format_code_info(x) - else: - raise TypeError("don't know how to disassemble %s objects" % - type(x).__name__) - -def _format_code_info(co): - lines = [] - lines.append("Name: %s" % co.co_name) - lines.append("Filename: %s" % co.co_filename) - lines.append("Argument count: %s" % co.co_argcount) - lines.append("Kw-only arguments: %s" % co.co_kwonlyargcount) - lines.append("Number of locals: %s" % co.co_nlocals) - lines.append("Stack size: %s" % co.co_stacksize) - lines.append("Flags: %s" % pretty_flags(co.co_flags)) - if co.co_consts: - lines.append("Constants:") - for i_c in enumerate(co.co_consts): - lines.append("%4d: %r" % i_c) - if co.co_names: - lines.append("Names:") - for i_n in enumerate(co.co_names): - lines.append("%4d: %s" % i_n) - if co.co_varnames: - lines.append("Variable names:") - for i_n in enumerate(co.co_varnames): - lines.append("%4d: %s" % i_n) - if co.co_freevars: - lines.append("Free variables:") - for i_n in enumerate(co.co_freevars): - lines.append("%4d: %s" % i_n) - if co.co_cellvars: - lines.append("Cell variables:") - for i_n in enumerate(co.co_cellvars): - lines.append("%4d: %s" % i_n) - return "\n".join(lines) - -def show_code(co): - """Print details of methods, functions, or code to stdout.""" - print(code_info(co)) - def disassemble(co, lasti=-1): """Disassemble a code object.""" code = co.co_code @@ -154,92 +68,90 @@ extended_arg = 0 free = None while i < n: - op = code[i] + c = code[i] + op = ord(c) if i in linestarts: if i > 0: - print() - print("%3d" % linestarts[i], end=' ') + print + print "%3d" % linestarts[i], else: - print(' ', end=' ') + print ' ', - if i == lasti: print('-->', end=' ') - else: print(' ', end=' ') - if i in labels: print('>>', end=' ') - else: print(' ', end=' ') - print(repr(i).rjust(4), end=' ') - print(opname[op].ljust(20), end=' ') + if i == lasti: print '-->', + else: print ' ', + if i in labels: print '>>', + else: print ' ', + print repr(i).rjust(4), + print opname[op].ljust(20), i = i+1 if op >= HAVE_ARGUMENT: - oparg = code[i] + code[i+1]*256 + extended_arg + oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg extended_arg = 0 i = i+2 if op == EXTENDED_ARG: - extended_arg = oparg*65536 - print(repr(oparg).rjust(5), end=' ') + extended_arg = oparg*65536L + print repr(oparg).rjust(5), if op in hasconst: - print('(' + repr(co.co_consts[oparg]) + ')', end=' ') + print '(' + repr(co.co_consts[oparg]) + ')', elif op in hasname: - print('(' + co.co_names[oparg] + ')', end=' ') + print '(' + co.co_names[oparg] + ')', elif op in hasjrel: - print('(to ' + repr(i + oparg) + ')', end=' ') + print '(to ' + repr(i + oparg) + ')', elif op in haslocal: - print('(' + co.co_varnames[oparg] + ')', end=' ') + print '(' + co.co_varnames[oparg] + ')', elif op in hascompare: - print('(' + cmp_op[oparg] + ')', end=' ') + print '(' + cmp_op[oparg] + ')', elif op in hasfree: if free is None: free = co.co_cellvars + co.co_freevars - print('(' + free[oparg] + ')', end=' ') + print '(' + free[oparg] + ')', elif op in hasnargs: - print('(%d positional, %d keyword pair)' - % (code[i-2], code[i-1]), end=' ') - print() + print '(%d positional, %d keyword pair)' % \ + (ord(code[i-2]), ord(code[i-1])), + print -def _disassemble_bytes(code, lasti=-1, varnames=None, names=None, +def disassemble_string(code, lasti=-1, varnames=None, names=None, constants=None): labels = findlabels(code) n = len(code) i = 0 while i < n: - op = code[i] - if i == lasti: print('-->', end=' ') - else: print(' ', end=' ') - if i in labels: print('>>', end=' ') - else: print(' ', end=' ') - print(repr(i).rjust(4), end=' ') - print(opname[op].ljust(15), end=' ') + c = code[i] + op = ord(c) + if i == lasti: print '-->', + else: print ' ', + if i in labels: print '>>', + else: print ' ', + print repr(i).rjust(4), + print opname[op].ljust(15), i = i+1 if op >= HAVE_ARGUMENT: - oparg = code[i] + code[i+1]*256 + oparg = ord(code[i]) + ord(code[i+1])*256 i = i+2 - print(repr(oparg).rjust(5), end=' ') + print repr(oparg).rjust(5), if op in hasconst: if constants: - print('(' + repr(constants[oparg]) + ')', end=' ') + print '(' + repr(constants[oparg]) + ')', else: - print('(%d)'%oparg, end=' ') + print '(%d)'%oparg, elif op in hasname: if names is not None: - print('(' + names[oparg] + ')', end=' ') + print '(' + names[oparg] + ')', else: - print('(%d)'%oparg, end=' ') + print '(%d)'%oparg, elif op in hasjrel: - print('(to ' + repr(i + oparg) + ')', end=' ') + print '(to ' + repr(i + oparg) + ')', elif op in haslocal: if varnames: - print('(' + varnames[oparg] + ')', end=' ') + print '(' + varnames[oparg] + ')', else: - print('(%d)' % oparg, end=' ') + print '(%d)' % oparg, elif op in hascompare: - print('(' + cmp_op[oparg] + ')', end=' ') + print '(' + cmp_op[oparg] + ')', elif op in hasnargs: - print('(%d positional, %d keyword pair)' - % (code[i-2], code[i-1]), end=' ') - print() - -def _disassemble_str(source): - """Compile the source string, then disassemble the code object.""" - disassemble(_try_compile(source, '')) + print '(%d positional, %d keyword pair)' % \ + (ord(code[i-2]), ord(code[i-1])), + print disco = disassemble # XXX For backwards compatibility @@ -253,10 +165,11 @@ n = len(code) i = 0 while i < n: - op = code[i] + c = code[i] + op = ord(c) i = i+1 if op >= HAVE_ARGUMENT: - oparg = code[i] + code[i+1]*256 + oparg = ord(code[i]) + ord(code[i+1])*256 i = i+2 label = -1 if op in hasjrel: @@ -274,8 +187,8 @@ Generate pairs (offset, lineno) as described in Python/compile.c. """ - byte_increments = list(code.co_lnotab[0::2]) - line_increments = list(code.co_lnotab[1::2]) + byte_increments = [ord(c) for c in code.co_lnotab[0::2]] + line_increments = [ord(c) for c in code.co_lnotab[1::2]] lastlineno = None lineno = code.co_firstlineno diff --git a/lib-python/3/opcode.py b/pypy/tool/opcode3.py copy from lib-python/3/opcode.py copy to pypy/tool/opcode3.py --- a/lib-python/3/opcode.py +++ b/pypy/tool/opcode3.py @@ -1,7 +1,7 @@ - """ opcode module - potentially shared between dis and other modules which operate on bytecodes (e.g. peephole optimizers). +"Backported" from Python 3 to Python 2 land - an excact copy of lib-python/3/opcode.py """ __all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs", From noreply at buildbot.pypy.org Wed Aug 6 02:34:13 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 6 Aug 2014 02:34:13 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Merged in kiilerix/pypy/py3.3 (pull request #257) Message-ID: <20140806003413.8A73A1C320C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72702:8ba1b63dfa1b Date: 2014-08-05 17:33 -0700 http://bitbucket.org/pypy/pypy/changeset/8ba1b63dfa1b/ Log: Merged in kiilerix/pypy/py3.3 (pull request #257) py3k: implement PyCode.dump() properly (issue 1827) diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -4,7 +4,7 @@ The bytecode interpreter itself is implemented by the PyFrame class. """ -import dis, imp, struct, types, new, sys +import imp, struct, types, new, sys from pypy.interpreter import eval from pypy.interpreter.signature import Signature @@ -13,6 +13,7 @@ from pypy.interpreter.astcompiler.consts import ( CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, CO_GENERATOR, CO_KILL_DOCSTRING, CO_YIELD_INSIDE_TRY) +from pypy.tool import dis3 from pypy.tool.stdlib_opcode import opcodedesc, HAVE_ARGUMENT from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import compute_hash, we_are_translated @@ -245,33 +246,6 @@ if isinstance(w_co, PyCode): w_co.remove_docstrings(space) - def _to_code(self): - """For debugging only.""" - consts = [None] * len(self.co_consts_w) - num = 0 - for w in self.co_consts_w: - if isinstance(w, PyCode): - consts[num] = w._to_code() - else: - consts[num] = self.space.unwrap(w) - num += 1 - assert self.co_kwonlyargcount == 0, 'kwonlyargcount is py3k only, cannot turn this code object into a Python2 one' - return new.code(self.co_argcount, - #self.co_kwonlyargcount, # this does not exists in python2 - self.co_nlocals, - self.co_stacksize, - self.co_flags, - self.co_code, - tuple(consts), - tuple(self.co_names), - tuple(self.co_varnames), - self.co_filename, - self.co_name, - self.co_firstlineno, - self.co_lnotab, - tuple(self.co_freevars), - tuple(self.co_cellvars)) - def exec_host_bytecode(self, w_globals, w_locals): if sys.version_info < (2, 7): raise Exception("PyPy no longer supports Python 2.6 or lower") @@ -280,11 +254,11 @@ return frame.run() def dump(self): - """A dis.dis() dump of the code object.""" - print 'WARNING: dumping a py3k bytecode using python2 opmap, the result might be inaccurate or wrong' - print - co = self._to_code() - dis.dis(co) + """NOT_RPYTHON: A dis.dis() dump of the code object.""" + if not hasattr(self, 'co_consts'): + self.co_consts = [w if isinstance(w, PyCode) else self.space.unwrap(w) + for w in self.co_consts_w] + dis3.dis(self) def fget_co_consts(self, space): return space.newtuple(self.co_consts_w) diff --git a/pypy/interpreter/test/test_pycode.py b/pypy/interpreter/test/test_pycode.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/test/test_pycode.py @@ -0,0 +1,19 @@ +import sys, StringIO + +def test_dump(space): + """test that pycode.dump kind of works with py3 opcodes""" + compiler = space.createcompiler() + code = compiler.compile('lambda *, y=7: None', 'filename', 'exec', 0) + output = None + stdout = sys.stdout + try: + sys.stdout = StringIO.StringIO() + code.dump() + output = sys.stdout.getvalue() + sys.stdout.close() + finally: + sys.stdout = stdout + print '>>>\n' + output + '\n<<<' + assert ' 1 (7)' in output + assert ' 3 (None)' in output + assert ' 16 RETURN_VALUE ' in output diff --git a/lib-python/3/dis.py b/pypy/tool/dis3.py copy from lib-python/3/dis.py copy to pypy/tool/dis3.py --- a/lib-python/3/dis.py +++ b/pypy/tool/dis3.py @@ -1,30 +1,18 @@ -"""Disassembler of Python byte code into mnemonics.""" +"""Disassembler of Python byte code into mnemonics. +Python 3 dis.py partly backported to Python 2""" import sys import types -from opcode import * -from opcode import __all__ as _opcodes_all +from opcode3 import * +from opcode3 import __all__ as _opcodes_all -__all__ = ["code_info", "dis", "disassemble", "distb", "disco", - "findlinestarts", "findlabels", "show_code"] + _opcodes_all +__all__ = ["dis", "disassemble", "distb", "disco", + "findlinestarts", "findlabels"] + _opcodes_all del _opcodes_all _have_code = (types.MethodType, types.FunctionType, types.CodeType, type) -def _try_compile(source, name): - """Attempts to compile the given source, first as an expression and - then as a statement if the first approach fails. - - Utility function to accept strings in functions that otherwise - expect code objects - """ - try: - c = compile(source, name, 'eval') - except SyntaxError: - c = compile(source, name, 'exec') - return c - def dis(x=None): """Disassemble classes, methods, functions, or code. @@ -34,29 +22,31 @@ if x is None: distb() return - if hasattr(x, '__func__'): # Method - x = x.__func__ - if hasattr(x, '__code__'): # Function - x = x.__code__ - if hasattr(x, '__dict__'): # Class or module - items = sorted(x.__dict__.items()) + if isinstance(x, types.InstanceType): + x = x.__class__ + if hasattr(x, 'im_func'): + x = x.im_func + if hasattr(x, 'func_code'): + x = x.func_code + if hasattr(x, 'co_code'): # PyCode needs co_code before __dict__ + disassemble(x) + elif hasattr(x, '__dict__'): + items = x.__dict__.items() + items.sort() for name, x1 in items: if isinstance(x1, _have_code): - print("Disassembly of %s:" % name) + print "Disassembly of %s:" % name try: dis(x1) - except TypeError as msg: - print("Sorry:", msg) - print() - elif hasattr(x, 'co_code'): # Code object - disassemble(x) - elif isinstance(x, (bytes, bytearray)): # Raw bytecode - _disassemble_bytes(x) - elif isinstance(x, str): # Source code - _disassemble_str(x) + except TypeError, msg: + print "Sorry:", msg + print + elif isinstance(x, str): + disassemble_string(x) else: - raise TypeError("don't know how to disassemble %s objects" % - type(x).__name__) + raise TypeError, \ + "don't know how to disassemble %s objects" % \ + type(x).__name__ def distb(tb=None): """Disassemble a traceback (default: last traceback).""" @@ -64,86 +54,10 @@ try: tb = sys.last_traceback except AttributeError: - raise RuntimeError("no last traceback to disassemble") + raise RuntimeError, "no last traceback to disassemble" while tb.tb_next: tb = tb.tb_next disassemble(tb.tb_frame.f_code, tb.tb_lasti) -# The inspect module interrogates this dictionary to build its -# list of CO_* constants. It is also used by pretty_flags to -# turn the co_flags field into a human readable list. -COMPILER_FLAG_NAMES = { - 1: "OPTIMIZED", - 2: "NEWLOCALS", - 4: "VARARGS", - 8: "VARKEYWORDS", - 16: "NESTED", - 32: "GENERATOR", - 64: "NOFREE", -} - -def pretty_flags(flags): - """Return pretty representation of code flags.""" - names = [] - for i in range(32): - flag = 1<") - if hasattr(x, 'co_code'): # Code object - return _format_code_info(x) - else: - raise TypeError("don't know how to disassemble %s objects" % - type(x).__name__) - -def _format_code_info(co): - lines = [] - lines.append("Name: %s" % co.co_name) - lines.append("Filename: %s" % co.co_filename) - lines.append("Argument count: %s" % co.co_argcount) - lines.append("Kw-only arguments: %s" % co.co_kwonlyargcount) - lines.append("Number of locals: %s" % co.co_nlocals) - lines.append("Stack size: %s" % co.co_stacksize) - lines.append("Flags: %s" % pretty_flags(co.co_flags)) - if co.co_consts: - lines.append("Constants:") - for i_c in enumerate(co.co_consts): - lines.append("%4d: %r" % i_c) - if co.co_names: - lines.append("Names:") - for i_n in enumerate(co.co_names): - lines.append("%4d: %s" % i_n) - if co.co_varnames: - lines.append("Variable names:") - for i_n in enumerate(co.co_varnames): - lines.append("%4d: %s" % i_n) - if co.co_freevars: - lines.append("Free variables:") - for i_n in enumerate(co.co_freevars): - lines.append("%4d: %s" % i_n) - if co.co_cellvars: - lines.append("Cell variables:") - for i_n in enumerate(co.co_cellvars): - lines.append("%4d: %s" % i_n) - return "\n".join(lines) - -def show_code(co): - """Print details of methods, functions, or code to stdout.""" - print(code_info(co)) - def disassemble(co, lasti=-1): """Disassemble a code object.""" code = co.co_code @@ -154,92 +68,90 @@ extended_arg = 0 free = None while i < n: - op = code[i] + c = code[i] + op = ord(c) if i in linestarts: if i > 0: - print() - print("%3d" % linestarts[i], end=' ') + print + print "%3d" % linestarts[i], else: - print(' ', end=' ') + print ' ', - if i == lasti: print('-->', end=' ') - else: print(' ', end=' ') - if i in labels: print('>>', end=' ') - else: print(' ', end=' ') - print(repr(i).rjust(4), end=' ') - print(opname[op].ljust(20), end=' ') + if i == lasti: print '-->', + else: print ' ', + if i in labels: print '>>', + else: print ' ', + print repr(i).rjust(4), + print opname[op].ljust(20), i = i+1 if op >= HAVE_ARGUMENT: - oparg = code[i] + code[i+1]*256 + extended_arg + oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg extended_arg = 0 i = i+2 if op == EXTENDED_ARG: - extended_arg = oparg*65536 - print(repr(oparg).rjust(5), end=' ') + extended_arg = oparg*65536L + print repr(oparg).rjust(5), if op in hasconst: - print('(' + repr(co.co_consts[oparg]) + ')', end=' ') + print '(' + repr(co.co_consts[oparg]) + ')', elif op in hasname: - print('(' + co.co_names[oparg] + ')', end=' ') + print '(' + co.co_names[oparg] + ')', elif op in hasjrel: - print('(to ' + repr(i + oparg) + ')', end=' ') + print '(to ' + repr(i + oparg) + ')', elif op in haslocal: - print('(' + co.co_varnames[oparg] + ')', end=' ') + print '(' + co.co_varnames[oparg] + ')', elif op in hascompare: - print('(' + cmp_op[oparg] + ')', end=' ') + print '(' + cmp_op[oparg] + ')', elif op in hasfree: if free is None: free = co.co_cellvars + co.co_freevars - print('(' + free[oparg] + ')', end=' ') + print '(' + free[oparg] + ')', elif op in hasnargs: - print('(%d positional, %d keyword pair)' - % (code[i-2], code[i-1]), end=' ') - print() + print '(%d positional, %d keyword pair)' % \ + (ord(code[i-2]), ord(code[i-1])), + print -def _disassemble_bytes(code, lasti=-1, varnames=None, names=None, +def disassemble_string(code, lasti=-1, varnames=None, names=None, constants=None): labels = findlabels(code) n = len(code) i = 0 while i < n: - op = code[i] - if i == lasti: print('-->', end=' ') - else: print(' ', end=' ') - if i in labels: print('>>', end=' ') - else: print(' ', end=' ') - print(repr(i).rjust(4), end=' ') - print(opname[op].ljust(15), end=' ') + c = code[i] + op = ord(c) + if i == lasti: print '-->', + else: print ' ', + if i in labels: print '>>', + else: print ' ', + print repr(i).rjust(4), + print opname[op].ljust(15), i = i+1 if op >= HAVE_ARGUMENT: - oparg = code[i] + code[i+1]*256 + oparg = ord(code[i]) + ord(code[i+1])*256 i = i+2 - print(repr(oparg).rjust(5), end=' ') + print repr(oparg).rjust(5), if op in hasconst: if constants: - print('(' + repr(constants[oparg]) + ')', end=' ') + print '(' + repr(constants[oparg]) + ')', else: - print('(%d)'%oparg, end=' ') + print '(%d)'%oparg, elif op in hasname: if names is not None: - print('(' + names[oparg] + ')', end=' ') + print '(' + names[oparg] + ')', else: - print('(%d)'%oparg, end=' ') + print '(%d)'%oparg, elif op in hasjrel: - print('(to ' + repr(i + oparg) + ')', end=' ') + print '(to ' + repr(i + oparg) + ')', elif op in haslocal: if varnames: - print('(' + varnames[oparg] + ')', end=' ') + print '(' + varnames[oparg] + ')', else: - print('(%d)' % oparg, end=' ') + print '(%d)' % oparg, elif op in hascompare: - print('(' + cmp_op[oparg] + ')', end=' ') + print '(' + cmp_op[oparg] + ')', elif op in hasnargs: - print('(%d positional, %d keyword pair)' - % (code[i-2], code[i-1]), end=' ') - print() - -def _disassemble_str(source): - """Compile the source string, then disassemble the code object.""" - disassemble(_try_compile(source, '')) + print '(%d positional, %d keyword pair)' % \ + (ord(code[i-2]), ord(code[i-1])), + print disco = disassemble # XXX For backwards compatibility @@ -253,10 +165,11 @@ n = len(code) i = 0 while i < n: - op = code[i] + c = code[i] + op = ord(c) i = i+1 if op >= HAVE_ARGUMENT: - oparg = code[i] + code[i+1]*256 + oparg = ord(code[i]) + ord(code[i+1])*256 i = i+2 label = -1 if op in hasjrel: @@ -274,8 +187,8 @@ Generate pairs (offset, lineno) as described in Python/compile.c. """ - byte_increments = list(code.co_lnotab[0::2]) - line_increments = list(code.co_lnotab[1::2]) + byte_increments = [ord(c) for c in code.co_lnotab[0::2]] + line_increments = [ord(c) for c in code.co_lnotab[1::2]] lastlineno = None lineno = code.co_firstlineno diff --git a/lib-python/3/opcode.py b/pypy/tool/opcode3.py copy from lib-python/3/opcode.py copy to pypy/tool/opcode3.py --- a/lib-python/3/opcode.py +++ b/pypy/tool/opcode3.py @@ -1,7 +1,7 @@ - """ opcode module - potentially shared between dis and other modules which operate on bytecodes (e.g. peephole optimizers). +"Backported" from Python 3 to Python 2 land - an excact copy of lib-python/3/opcode.py """ __all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs", From noreply at buildbot.pypy.org Wed Aug 6 10:00:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 6 Aug 2014 10:00:21 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20140806080021.C48691C0588@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r517:1030f28b3578 Date: 2014-08-06 10:00 +0200 http://bitbucket.org/pypy/pypy.org/changeset/1030f28b3578/ Log: update the values diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,7 +9,7 @@ @@ -17,7 +17,7 @@ 2nd call: - $2959 of $80000 (3.7%) + $12959 of $80000 (16.2%)
From noreply at buildbot.pypy.org Wed Aug 6 11:41:11 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 6 Aug 2014 11:41:11 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-cleanups: Moved system.py and version.py to spyvm/util. Message-ID: <20140806094111.4ACDC1C0588@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-cleanups Changeset: r1017:c0050f5678f2 Date: 2014-08-05 20:06 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/c0050f5678f2/ Log: Moved system.py and version.py to spyvm/util. Cleaned up some imports. diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -10,13 +10,10 @@ W_BytesObject W_WordsObject W_CompiledMethod - -W_BlockContext and W_MethodContext classes have been replaced by functions -that create W_PointersObjects of correct size with attached shadows. """ -import sys, weakref -from spyvm import constants, error, version, storage_logger -from spyvm.version import elidable_for_version, constant_for_version, constant_for_version_arg +import sys +from spyvm import constants, error, storage_logger +from spyvm.util.version import constant_for_version, constant_for_version_arg, VersionMixin from rpython.rlib import rrandom, objectmodel, jit, signature from rpython.rlib.rarithmetic import intmask, r_uint, r_int @@ -1032,7 +1029,7 @@ lookup_selector = "" lookup_class = None - import_from_mixin(version.VersionMixin) + import_from_mixin(VersionMixin) def __init__(self, space, bytecount=0, header=0): self.bytes = ["\x00"] * bytecount diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -1,7 +1,7 @@ import os -from spyvm import constants, model, model_display, wrapper, version, display -from spyvm.error import UnwrappingError, WrappingError, PrimitiveFailedError +from spyvm import constants, model, wrapper, display +from spyvm.error import UnwrappingError, WrappingError from rpython.rlib import jit, rpath from rpython.rlib.objectmodel import instantiate, specialize, import_from_mixin from rpython.rlib.rarithmetic import intmask, r_uint, int_between diff --git a/spyvm/storage.py b/spyvm/storage.py --- a/spyvm/storage.py +++ b/spyvm/storage.py @@ -1,7 +1,7 @@ import sys, weakref -from spyvm import model, version, constants -from spyvm.version import elidable_for_version +from spyvm import model, constants +from spyvm.util.version import elidable_for_version, VersionMixin from rpython.rlib import objectmodel, jit from rpython.rlib.objectmodel import import_from_mixin @@ -283,7 +283,7 @@ _immutable_fields_ = ['version?'] _attrs_ = ['version'] repr_classname = "AbstractCachingShadow" - import_from_mixin(version.VersionMixin) + import_from_mixin(VersionMixin) version = None def __init__(self, space, w_self, size): diff --git a/spyvm/storage_classes.py b/spyvm/storage_classes.py --- a/spyvm/storage_classes.py +++ b/spyvm/storage_classes.py @@ -1,7 +1,7 @@ -from spyvm import model, constants, error, wrapper, version +from spyvm import model, constants, error, wrapper from spyvm.storage import AbstractCachingShadow, ListStorageShadow -from spyvm.version import constant_for_version, constant_for_version_arg +from spyvm.util.version import constant_for_version, constant_for_version_arg, Version from rpython.rlib import jit POINTERS = 0 @@ -231,7 +231,7 @@ raise error.MethodNotFound() def changed(self): - self.superclass_changed(version.Version()) + self.superclass_changed(Version()) # this is done, because the class-hierarchy contains cycles def superclass_changed(self, version): diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -1,5 +1,5 @@ import py, sys -from spyvm import model, storage_classes, objspace, version, constants, squeakimage, interpreter, interpreter_bytecodes +from spyvm import model, storage_classes, objspace, util, constants, squeakimage, interpreter, interpreter_bytecodes from rpython.rlib.objectmodel import instantiate # Most tests don't need a bootstrapped objspace. Those that do, indicate so explicitely. @@ -237,7 +237,7 @@ name='?', format=storage_classes.POINTERS, varsized=False): s = instantiate(storage_classes.ClassShadow) s.space = self - s.version = version.Version() + s.version = util.version.Version() s._w_self = w_class s.subclass_s = {} s._s_superclass = None diff --git a/spyvm/system.py b/spyvm/util/system.py rename from spyvm/system.py rename to spyvm/util/system.py diff --git a/spyvm/version.py b/spyvm/util/version.py rename from spyvm/version.py rename to spyvm/util/version.py From noreply at buildbot.pypy.org Wed Aug 6 11:41:04 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 6 Aug 2014 11:41:04 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-cleanups: Removed obsolete conftest.py Message-ID: <20140806094104.A5CC41C0588@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-cleanups Changeset: r1011:b3cb41345ac7 Date: 2014-08-05 19:32 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/b3cb41345ac7/ Log: Removed obsolete conftest.py diff --git a/spyvm/conftest.py b/spyvm/conftest.py deleted file mode 100644 --- a/spyvm/conftest.py +++ /dev/null @@ -1,20 +0,0 @@ -import py - -option = None - -def pytest_configure(config): - global option - option = config.option - -def pytest_addoption(parser): - group = parser.getgroup("smalltalk options") - group.addoption('--bc-trace', - action="store_true", - dest="bc_trace", - default=False, - help="print bytecodes and stack during execution") - group.addoption('--prim-trace', - action="store_true", - dest="prim_trace", - default=False, - help="print called primitives during execution") diff --git a/spyvm/test/test_interpreter.py b/spyvm/test/test_interpreter.py --- a/spyvm/test/test_interpreter.py +++ b/spyvm/test/test_interpreter.py @@ -1,7 +1,6 @@ import py, operator, sys from spyvm import model, interpreter, primitives, storage_classes, storage_contexts, wrapper, constants, error from .util import create_space_interp, copy_to_module, cleanup_module, import_bytecodes -from spyvm.conftest import option import_bytecodes(__name__) @@ -840,14 +839,6 @@ [space.w_CompiledMethod, primitives.OBJECT_AT_PUT, 2, "objectAt:put:"]], test) -def test_runwithtrace(): - # We run random tests with the bc_trace option turned on explicitely - bc_trace = option.bc_trace - option.bc_trace = True - test_storeAndPopReceiverVariableBytecode() - test_bc_objectAtAndAtPut() - option.bc_trace = bc_trace - # Closure Bytecodes def test_bc_pushNewArrayBytecode(bytecode=pushNewArrayBytecode): w_frame, s_frame = new_frame(bytecode + chr(0x83)) From noreply at buildbot.pypy.org Wed Aug 6 11:41:12 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 6 Aug 2014 11:41:12 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-cleanups: Extracted Stream() into util/stream.py since its reusable. Message-ID: <20140806094112.4C1951C0588@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-cleanups Changeset: r1018:eabb41bf240d Date: 2014-08-05 20:18 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/eabb41bf240d/ Log: Extracted Stream() into util/stream.py since its reusable. Also, can be turned into in/out stream when implementing image writer. diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -1,120 +1,11 @@ import os, sys, time -from spyvm import constants, model +from spyvm import constants, model, util +from spyvm.util import stream from spyvm.util.bitmanipulation import splitter -from rpython.rlib import objectmodel, streamio +from rpython.rlib import objectmodel -# ____________________________________________________________ -# -# Stream class for reading raw input data - -def chrs2int(b): - assert len(b) == 4 - first = ord(b[0]) # big endian - if first & 0x80 != 0: - first = first - 0x100 - return (first << 24 | ord(b[1]) << 16 | ord(b[2]) << 8 | ord(b[3])) - -def swapped_chrs2int(b): - assert len(b) == 4 - first = ord(b[3]) # little endian - if first & 0x80 != 0: - first = first - 0x100 - return (first << 24 | ord(b[2]) << 16 | ord(b[1]) << 8 | ord(b[0])) - -def chrs2long(b): - assert len(b) == 8 - first = ord(b[0]) # big endian - if first & 0x80 != 0: - first = first - 0x100 - return ( first << 56 | ord(b[1]) << 48 | ord(b[2]) << 40 | ord(b[3]) << 32 - | ord(b[4]) << 24 | ord(b[5]) << 16 | ord(b[6]) << 8 | ord(b[7]) ) - -def swapped_chrs2long(b): - assert len(b) == 8 - first = ord(b[7]) # little endian - if first & 0x80 != 0: - first = first - 0x100 - return ( first << 56 | ord(b[6]) << 48 | ord(b[5]) << 40 | ord(b[4]) << 32 - | ord(b[3]) << 24 | ord(b[2]) << 16 | ord(b[1]) << 8 | ord(b[0]) ) - -class Stream(object): - """ Simple input stream. Constructor can raise OSError. """ - - def __init__(self, filename=None, inputfile=None, data=None): - if filename: - f = streamio.open_file_as_stream(filename, mode="rb", buffering=0) - try: - self.data = f.readall() - finally: - f.close() - elif inputfile: - try: - self.data = inputfile.read() - finally: - inputfile.close() - elif data: - self.data = data - else: - raise RuntimeError("need to supply either inputfile or data") - - self.reset() - - def peek(self): - if self.pos >= len(self.data): - raise IndexError - data_peek = self.data[self.pos:self.pos + self.word_size] - if self.use_long_read: - if self.swap: - return swapped_chrs2long(data_peek) - else: - return chrs2long(data_peek) - else: - if self.swap: - return swapped_chrs2int(data_peek) - else: - return chrs2int(data_peek) - - def next(self): - integer = self.peek() - self.pos += self.word_size - self.count += self.word_size - return integer - - def reset(self): - self.swap = False - self.pos = 0 - self.count = 0 - self.be_32bit() - - def reset_count(self): - self.count = 0 - - def skipbytes(self, jump): - assert jump > 0 - assert (self.pos + jump) <= len(self.data) - self.pos += jump - self.count += jump - - def skipwords(self, jump): - self.skipbytes(jump * self.word_size) - assert (self.pos + jump) <= len(self.data) - self.pos += jump - self.count += jump - - def length(self): - return len(self.data) - - def close(self): - pass # already closed - - def be_64bit(self): - self.word_size = 8 - self.use_long_read = True - - def be_32bit(self): - self.word_size = 4 - self.use_long_read = False - +# Access for module users +Stream = stream.Stream # ____________________________________________________________ # diff --git a/spyvm/test/test_squeakimage.py b/spyvm/test/test_squeakimage.py --- a/spyvm/test/test_squeakimage.py +++ b/spyvm/test/test_squeakimage.py @@ -1,7 +1,7 @@ import py, StringIO, sys from struct import pack from spyvm import squeakimage -from spyvm.squeakimage import chrs2int, chrs2long, swapped_chrs2long +from spyvm.util.stream import chrs2int, chrs2long, swapped_chrs2long from spyvm import objspace from .util import create_space, copy_to_module, cleanup_module diff --git a/spyvm/util/stream.py b/spyvm/util/stream.py new file mode 100644 --- /dev/null +++ b/spyvm/util/stream.py @@ -0,0 +1,111 @@ +from rpython.rlib import streamio + +def chrs2int(b): + assert len(b) == 4 + first = ord(b[0]) # big endian + if first & 0x80 != 0: + first = first - 0x100 + return (first << 24 | ord(b[1]) << 16 | ord(b[2]) << 8 | ord(b[3])) + +def swapped_chrs2int(b): + assert len(b) == 4 + first = ord(b[3]) # little endian + if first & 0x80 != 0: + first = first - 0x100 + return (first << 24 | ord(b[2]) << 16 | ord(b[1]) << 8 | ord(b[0])) + +def chrs2long(b): + assert len(b) == 8 + first = ord(b[0]) # big endian + if first & 0x80 != 0: + first = first - 0x100 + return ( first << 56 | ord(b[1]) << 48 | ord(b[2]) << 40 | ord(b[3]) << 32 + | ord(b[4]) << 24 | ord(b[5]) << 16 | ord(b[6]) << 8 | ord(b[7]) ) + +def swapped_chrs2long(b): + assert len(b) == 8 + first = ord(b[7]) # little endian + if first & 0x80 != 0: + first = first - 0x100 + return ( first << 56 | ord(b[6]) << 48 | ord(b[5]) << 40 | ord(b[4]) << 32 + | ord(b[3]) << 24 | ord(b[2]) << 16 | ord(b[1]) << 8 | ord(b[0]) ) + +class Stream(object): + """ Simple input stream. + Data is completely read into memory. + Constructor can raise OSError. """ + + def __init__(self, filename=None, inputfile=None, data=None): + if filename: + f = streamio.open_file_as_stream(filename, mode="rb", buffering=0) + try: + self.data = f.readall() + finally: + f.close() + elif inputfile: + try: + self.data = inputfile.read() + finally: + inputfile.close() + elif data: + self.data = data + else: + raise RuntimeError("need to supply either inputfile or data") + + self.reset() + + def peek(self): + if self.pos >= len(self.data): + raise IndexError + data_peek = self.data[self.pos:self.pos + self.word_size] + if self.use_long_read: + if self.swap: + return swapped_chrs2long(data_peek) + else: + return chrs2long(data_peek) + else: + if self.swap: + return swapped_chrs2int(data_peek) + else: + return chrs2int(data_peek) + + def next(self): + integer = self.peek() + self.pos += self.word_size + self.count += self.word_size + return integer + + def reset(self): + self.swap = False + self.pos = 0 + self.count = 0 + self.be_32bit() + + def reset_count(self): + self.count = 0 + + def skipbytes(self, jump): + assert jump > 0 + assert (self.pos + jump) <= len(self.data) + self.pos += jump + self.count += jump + + def skipwords(self, jump): + self.skipbytes(jump * self.word_size) + assert (self.pos + jump) <= len(self.data) + self.pos += jump + self.count += jump + + def length(self): + return len(self.data) + + def close(self): + pass # already closed + + def be_64bit(self): + self.word_size = 8 + self.use_long_read = True + + def be_32bit(self): + self.word_size = 4 + self.use_long_read = False From noreply at buildbot.pypy.org Wed Aug 6 11:41:05 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 6 Aug 2014 11:41:05 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-cleanups: Removed obsolete analyseimage.py Message-ID: <20140806094105.EECBF1C0588@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-cleanups Changeset: r1012:80fb749ef958 Date: 2014-08-05 19:39 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/80fb749ef958/ Log: Removed obsolete analyseimage.py Cleaned up squeakimage.py a little diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -1,12 +1,11 @@ -import py -import os -import sys -import time -from spyvm import constants -from spyvm import model +import os, sys, time +from spyvm import constants, model from spyvm.tool.bitmanipulation import splitter +from rpython.rlib import objectmodel, streamio -from rpython.rlib import objectmodel +# ____________________________________________________________ +# +# Stream class for reading raw input data def chrs2int(b): assert len(b) == 4 @@ -38,26 +37,28 @@ return ( first << 56 | ord(b[6]) << 48 | ord(b[5]) << 40 | ord(b[4]) << 32 | ord(b[3]) << 24 | ord(b[2]) << 16 | ord(b[1]) << 8 | ord(b[0]) ) - -# ____________________________________________________________ -# -# Reads an image file and creates all model objects - class Stream(object): - """ Simple input stream """ - def __init__(self, inputfile=None, data=None): - if inputfile is None and data is None: - raise RuntimeError("need to supply either inputfile or data") - - if inputfile: + """ Simple input stream. Constructor can raise OSError. """ + + def __init__(self, filename=None, inputfile=None, data=None): + if filename: + f = streamio.open_file_as_stream(filename, mode="rb", buffering=0) + try: + self.data = f.readall() + finally: + f.close() + elif inputfile: try: self.data = inputfile.read() finally: inputfile.close() + elif data: + self.data = data else: - self.data = data + raise RuntimeError("need to supply either inputfile or data") + self.reset() - + def peek(self): if self.pos >= len(self.data): raise IndexError @@ -73,7 +74,6 @@ else: return chrs2int(data_peek) - def next(self): integer = self.peek() self.pos += self.word_size @@ -101,7 +101,6 @@ self.pos += jump self.count += jump - def length(self): return len(self.data) @@ -117,14 +116,24 @@ self.use_long_read = False +# ____________________________________________________________ +# +# Constants and image versions. + +# from the squeak source code: +# in squeak, the compact classes array can be found at this position +# in the special objects array +COMPACT_CLASSES_ARRAY = 28 + +# The image data can optionally start after this fixed offset. +POSSIBLE_IMAGE_OFFSET = 512 + class CorruptImageError(Exception): pass class UnsupportedImageError(Exception): pass -# ____________________________________________________________ - class ImageVersion(object): def __init__(self, magic, is_big_endian, is_64bit, has_closures, has_floats_reversed): @@ -159,7 +168,6 @@ ImageVersion(68003, False, True, True, True ), }) - def version(magic): ver = image_versions.get(magic, None) if ver is None: @@ -168,15 +176,13 @@ # raise UnsupportedImageError return ver -possible_image_offset = 512 - def version_from_stream(stream): # 32 bit try: return version(stream.peek()) except CorruptImageError as e: - if stream.length() > possible_image_offset + 4: - stream.skipbytes(possible_image_offset) + if stream.length() > POSSIBLE_IMAGE_OFFSET + 4: + stream.skipbytes(POSSIBLE_IMAGE_OFFSET) try: return version(stream.peek()) except CorruptImageError: @@ -189,8 +195,8 @@ assert v.is_64bit return v except CorruptImageError as e: - if stream.length() > possible_image_offset + 4: - stream.skipbytes(possible_image_offset) + if stream.length() > POSSIBLE_IMAGE_OFFSET + 4: + stream.skipbytes(POSSIBLE_IMAGE_OFFSET) try: v = version(stream.peek()) assert v.is_64bit @@ -200,6 +206,9 @@ raise +# ____________________________________________________________ +# +# Parser classes for Squeak image format. def reader_for_image(space, stream): ver = version_from_stream(stream) @@ -207,6 +216,13 @@ stream.swap = True return ImageReader(space, stream, ver) +def parse_image(space, stream): + image_reader = reader_for_image(space, stream) + image_reader.read_all() + image = SqueakImage() + image.from_reader(space, image_reader) + return image + class ImageReader(object): def __init__(self, space, stream, version): @@ -222,8 +238,7 @@ self.lastWindowSize = 0 - def initialize(self): - # XXX should be called something like read_full_image + def read_all(self): self.read_header() self.read_body() self.init_compactclassesarray() @@ -257,7 +272,6 @@ self.stream.skipbytes(headersize - self.stream.pos) def read_body(self): - import sys self.stream.reset_count() while self.stream.count < self.endofmemory: chunk, pos = self.read_object() @@ -372,7 +386,6 @@ "is_modern", "startup_time"] def from_reader(self, space, reader): - from spyvm import constants self.special_objects = [g_object.w_object for g_object in reader.chunks[reader.specialobjectspointer] .g_object.pointers] @@ -415,11 +428,6 @@ def special(self, index): return self.special_objects[index] -# from the squeak source code: -# in squeak, the compact classes array can be found at this position -# in the special objects array -COMPACT_CLASSES_ARRAY = 28 - # ____________________________________________________________ class GenericObject(object): diff --git a/spyvm/test/test_squeakimage.py b/spyvm/test/test_squeakimage.py --- a/spyvm/test/test_squeakimage.py +++ b/spyvm/test/test_squeakimage.py @@ -26,7 +26,7 @@ def imagestream_mock(string): f = StringIO.StringIO(string) - return squeakimage.Stream(f) + return squeakimage.Stream(inputfile=f) def imagereader_mock(string): stream = imagestream_mock(string) diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -1,4 +1,4 @@ -import sys +import py, sys from spyvm import model, storage_classes, objspace, version, constants, squeakimage, interpreter, interpreter_bytecodes from rpython.rlib.objectmodel import instantiate @@ -6,15 +6,16 @@ # This way, as many tests as possible use the real, not-bootstrapped ObjSpace. bootstrap_by_default = False +image_dir = py.path.local(__file__).dirpath().dirpath().dirpath('images') + def open_reader(space, imagefilename): - from spyvm.tool.analyseimage import image_dir - imagefilename = image_dir.join(imagefilename) - return squeakimage.reader_for_image(space, squeakimage.Stream(imagefilename.open(mode="rb"))) + stream = squeakimage.Stream(filename=str(image_dir.join(imagefilename).strpath)) + return squeakimage.reader_for_image(space, stream) def read_image(image_filename, bootstrap = bootstrap_by_default): space = create_space(bootstrap) reader = open_reader(space, image_filename) - reader.initialize() + reader.read_all() image = squeakimage.SqueakImage() image.from_reader(space, reader) interp = TestInterpreter(space, image) diff --git a/spyvm/tool/analyseimage.py b/spyvm/tool/analyseimage.py deleted file mode 100644 --- a/spyvm/tool/analyseimage.py +++ /dev/null @@ -1,77 +0,0 @@ -import py -from spyvm import squeakimage -from spyvm import constants -from spyvm import model -from spyvm import interpreter -import sys - -image_dir = py.path.local(__file__).dirpath().dirpath().dirpath('images') - -mini_image = image_dir.join('mini.image') -minitest_image = image_dir.join('minitest.image') -s45_image = image_dir.join('Squeak4.5-12568.image') - -def get_miniimage(space): - return squeakimage.reader_for_image(space, squeakimage.Stream(mini_image.open(mode="rb"))) - -def get_minitestimage(space): - return squeakimage.reader_for_image(space, squeakimage.Stream(minitest_image.open(mode="rb"))) - -def get_45image(space): - return squeakimage.reader_for_image(space, squeakimage.Stream(s45_image.open(mode="rb"))) - -def create_image(space, image_reader): - image_reader.initialize() - - image = squeakimage.SqueakImage() - image.from_reader(space, image_reader) - return image - -def create_squeakimage(space): - return create_image(space, get_miniimage(space)) - -def create_testimage(space): - return create_image(space, get_minitestimage(space)) - -def create_45image(space): - return create_image(space, get_45image(space)) - -def printStringsInImage(): - image = create_squeakimage() - for each in image.objects: - if isinstance(each,model.W_BytesObject): - print each.class_shadow() - print each.as_string() - -def tinyBenchmarks(): - image = create_squeakimage() - interp = interpreter.Interpreter() - - w_object = model.W_SmallInteger(0) - - # Should get this from w_object - w_smallint_class = image.special(constants.SO_SMALLINTEGER_CLASS) - s_class = w_object.class_shadow() - #w_method = s_class.lookup("benchFib") - w_method = s_class.lookup("tinyBenchmarks") - - assert w_method - w_frame = w_method.create_frame(interp.space, w_object) - interp.store_w_active_context(w_frame) - - from spyvm.interpreter_bytecodes import BYTECODE_TABLE - while True: - try: - interp.step() - except interpreter.ReturnFromTopLevel, e: - print e.object - return - -def test_do(): - #testSelector() - #printStringsInImage() - #testDoesNotUnderstand() - tinyBenchmarks() - -if __name__ == '__main__': - test_do() diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -1,10 +1,8 @@ #! /usr/bin/env python import sys, time, os -from rpython.rlib.streamio import open_file_as_stream from rpython.rlib import jit, rpath, objectmodel from spyvm import model, interpreter, squeakimage, objspace, wrapper, error, storage_logger -from spyvm.tool.analyseimage import create_image def _usage(argv): print """ @@ -155,18 +153,13 @@ path = rpath.rabspath(path) try: - f = open_file_as_stream(path, mode="rb", buffering=0) - try: - imagedata = f.readall() - finally: - f.close() + stream = squeakimage.Stream(filename=path) except OSError as e: print_error("%s -- %s (LoadError)" % (os.strerror(e.errno), path)) return 1 # Load & prepare image and environment - image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) - image = create_image(space, image_reader) + image = squeakimage.parse_image(space, stream) interp = interpreter.Interpreter(space, image, trace=trace, trace_important=trace_important, evented=not poll, interrupts=interrupts) diff --git a/targettinybenchsmalltalk.py b/targettinybenchsmalltalk.py --- a/targettinybenchsmalltalk.py +++ b/targettinybenchsmalltalk.py @@ -1,8 +1,7 @@ #! /usr/bin/env python import sys from rpython.jit.codewriter.policy import JitPolicy -from spyvm import model, interpreter -from spyvm.tool.analyseimage import create_testimage +from spyvm import model, objspace, interpreter, squeakimage # This loads the whole mini.image in advance. At run-time, # it executes the tinyBenchmark. In this way we get an RPython @@ -14,10 +13,11 @@ # compile... #sys.setrecursionlimit(100000) +imagefile = "" + def setup(): - from spyvm import objspace space = objspace.ObjSpace() - image = create_testimage(space) + image = squeakimage.parse_image(space, Stream(filename=imagefile)) interp = interpreter.Interpreter(space, image) w_selector = interp.perform(space.wrap_string("loopTest"), "asSymbol") w_object = model.W_SmallInteger(0) From noreply at buildbot.pypy.org Wed Aug 6 11:41:07 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 6 Aug 2014 11:41:07 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-cleanups: Turned from_reader() method into the constructor. Message-ID: <20140806094107.101D21C0588@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-cleanups Changeset: r1013:15238f6b9b23 Date: 2014-08-05 19:42 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/15238f6b9b23/ Log: Turned from_reader() method into the constructor. diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -219,9 +219,7 @@ def parse_image(space, stream): image_reader = reader_for_image(space, stream) image_reader.read_all() - image = SqueakImage() - image.from_reader(space, image_reader) - return image + return SqueakImage(space, image_reader) class ImageReader(object): @@ -385,7 +383,7 @@ _immutable_fields_ = ["w_asSymbol", "w_simulateCopyBits", "version", "is_modern", "startup_time"] - def from_reader(self, space, reader): + def __init__(self, space, reader): self.special_objects = [g_object.w_object for g_object in reader.chunks[reader.specialobjectspointer] .g_object.pointers] diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -16,8 +16,7 @@ space = create_space(bootstrap) reader = open_reader(space, image_filename) reader.read_all() - image = squeakimage.SqueakImage() - image.from_reader(space, reader) + image = squeakimage.SqueakImage(space, reader) interp = TestInterpreter(space, image) return space, interp, image, reader From noreply at buildbot.pypy.org Wed Aug 6 11:41:13 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 6 Aug 2014 11:41:13 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-cleanups: Fixed tests that were marked as skip. Message-ID: <20140806094113.4DF991C0588@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-cleanups Changeset: r1019:7b439eadbc99 Date: 2014-08-06 11:40 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/7b439eadbc99/ Log: Fixed tests that were marked as skip. Added 64bit image versions to regular versions (no need to separate them). diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -1,4 +1,4 @@ -import os, sys, time +import os, time from spyvm import constants, model, util from spyvm.util import stream from spyvm.util.bitmanipulation import splitter @@ -41,24 +41,16 @@ 0x68190000: ImageVersion(6504, False, False, True, False), 0x00001969: ImageVersion(6505, True, False, True, True ), 0x69190000: ImageVersion(6505, False, False, True, True ), + + # Versions for 64 bit images 0x00000000000109A0: ImageVersion(68000, True, True, False, False), + -0x5ff6ff0000000000:ImageVersion(68000, False, True, False, False), # 0xA009010000000000 + 0x00000000000109A2: ImageVersion(68002, True, True, True, False), + -0x5df6ff0000000000:ImageVersion(68002, False, True, True, False), # 0xA209010000000000 + 0x00000000000109A3: ImageVersion(68003, True, True, True, True ), + -0x5cf6ff0000000000:ImageVersion(68003, False, True, True, True ), # 0xA309010000000000 } -if sys.maxint == 2 ** 63 - 1: - image_versions.update({ - -0x5ff6ff0000000000: - # signed version of 0xA009010000000000: - ImageVersion(68000, False, True, False, False), - 0x00000000000109A2: ImageVersion(68002, True, True, True, False), - -0x5df6ff0000000000: - # signed version of 0xA209010000000000: - ImageVersion(68002, False, True, True, False), - 0x00000000000109A3: ImageVersion(68003, True, True, True, True ), - -0x5cf6ff0000000000: - # signed version of 0xA309010000000000: - ImageVersion(68003, False, True, True, True ), -}) - def version(magic): ver = image_versions.get(magic, None) if ver is None: diff --git a/spyvm/test/test_bootstrappedimage.py b/spyvm/test/test_bootstrappedimage.py --- a/spyvm/test/test_bootstrappedimage.py +++ b/spyvm/test/test_bootstrappedimage.py @@ -15,12 +15,6 @@ w_result = perform(image.w_asSymbol, "asSymbol") assert w_result is image.w_asSymbol -def test_create_new_symbol(): - py.test.skip("This test takes quite long and is actually included in test_retrieve_symbol.") - w_result = perform(w("someString"), "asSymbol") - assert w_result is not None - assert w_result.as_string() == "someString" - def test_retrieve_symbol(): """asSymbol "This is the only place that new Symbols are created. A Symbol is created @@ -30,6 +24,7 @@ self = sym ifTrue: [ ^ sym ] ]. ^ (Symbol basicNew: self size) initFrom: self""" + w_result = perform(w("someString"), "asSymbol") assert w_result.as_string() == "someString" w_anotherSymbol = perform(w("someString"), "asSymbol") diff --git a/spyvm/test/test_miniimage.py b/spyvm/test/test_miniimage.py --- a/spyvm/test/test_miniimage.py +++ b/spyvm/test/test_miniimage.py @@ -1,5 +1,5 @@ import py, math -from spyvm import model, constants, storage_contexts, wrapper, primitives +from spyvm import model, constants, storage_contexts, wrapper, primitives, interpreter, error from .util import read_image, open_reader, copy_to_module, cleanup_module, TestInterpreter def setup_module(): @@ -210,14 +210,18 @@ w_false = image.special(constants.SO_FALSE) assert w_false.is_same_object(space.w_false) -def test_runimage(): - py.test.skip("This method actually runs an image. Fails since no graphical primitives yet") - ap = wrapper.ProcessWrapper(space, wrapper.scheduler(space).active_process()) - w_ctx = ap.suspended_context() - ap.store_suspended_context(space.w_nil) - - interp = TestInterpreter(space) - interp.interpret_toplevel(w_ctx) +def test_runimage_and_quit(): + # This image has been prepared executing the following DoIt (the entire line): + # Smalltalk snapshotPrimitive. Smalltalk snapshot: false andQuit: true. + # After starting, the image quits immediately. This allows testing the full image execution. + + from targetimageloadingsmalltalk import active_context, execute_context + space, interp, _, _ = read_image('running-exit.image') + frame = active_context(space) + try: + execute_context(interp, frame) + except error.Exit, e: + assert e.msg == "Quit-Primitive called" def test_compile_method(): sourcecode = """fib @@ -260,8 +264,6 @@ assert s_ctx.top().is_same_object(space.w_true) def test_cached_methoddict(): - py.test.skip('Should test the same as test_shadow.test_cached_methoddict, as long ' - 'as the implementation of MethodDictionary>>#at:put does not change.') sourcecode = """fib ^self < 2 ifTrue: [ 1 ] diff --git a/spyvm/test/test_model.py b/spyvm/test/test_model.py --- a/spyvm/test/test_model.py +++ b/spyvm/test/test_model.py @@ -376,31 +376,30 @@ assert target.getword(0) == 0xffff0100 assert target.getword(1) == 0x7fff8000 - at py.test.mark.skipif("'This test must be fixed!'") def test_display_bitmap(): - # XXX: Patch SDLDisplay -> get_pixelbuffer() to circumvent - # double-free bug - def get_pixelbuffer(self): - return lltype.malloc(rffi.ULONGP.TO, self.width * self.height * 32, flavor='raw') - display.SDLDisplay.get_pixelbuffer = get_pixelbuffer - d = display.SDLDisplay("test") - d.set_video_mode(32, 10, 1) - - target = model_display.W_DisplayBitmap.create(space, space.w_Array, 10, 1, d) + size = 10 + space.display().set_video_mode(32, size, 1) + target = model_display.W_MappingDisplayBitmap(space, space.w_Array, size, 1) + for idx in range(size): + target.setword(idx, r_uint(0)) + target.take_over_display() + target.setword(0, r_uint(0xFF00)) assert bin(target.getword(0)) == bin(0xFF00) target.setword(0, r_uint(0x00FF00FF)) assert bin(target.getword(0)) == bin(0x00FF00FF) target.setword(0, r_uint(0xFF00FF00)) assert bin(target.getword(0)) == bin(0xFF00FF00) + + buf = target.pixelbuffer() for i in xrange(2): - assert target.pixelbuffer[i] == 0x01010101 + assert buf[i] == 0x01010101 for i in xrange(2, 4): - assert target.pixelbuffer[i] == 0x0 + assert buf[i] == 0x0 for i in xrange(4, 6): - assert target.pixelbuffer[i] == 0x01010101 + assert buf[i] == 0x01010101 for i in xrange(6, 8): - assert target.pixelbuffer[i] == 0x0 + assert buf[i] == 0x0 def test_display_offset_computation_even(): dbitmap = model_display.W_MappingDisplayBitmap(space, space.w_Array, 200, 1) @@ -419,7 +418,6 @@ assert dbitmap.compute_pos(2) == 67 assert dbitmap.compute_pos(3) == 67 + 32 - at py.test.mark.skipif("socket.gethostname() == 'precise32'") def test_weak_pointers(): w_cls = bootstrap_class(2) s_cls = w_cls.as_class_get_shadow(space) diff --git a/spyvm/test/test_squeakimage.py b/spyvm/test/test_squeakimage.py --- a/spyvm/test/test_squeakimage.py +++ b/spyvm/test/test_squeakimage.py @@ -187,8 +187,8 @@ assert r.stream.pos == len(image_2) def test_simple_image64(): - if not sys.maxint == 2 ** 63 - 1: - py.test.skip("on 32 bit platforms, we can't need to check for 64 bit images") + #if not sys.maxint == 2 ** 63 - 1: + # py.test.skip("on 32 bit platforms, we can't need to check for 64 bit images") word_size = 8 header_size = 16 * word_size From noreply at buildbot.pypy.org Wed Aug 6 11:41:08 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 6 Aug 2014 11:41:08 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-cleanups: Removed 2 obsolete files, moved test file to correct folder. Message-ID: <20140806094108.163D11C0588@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-cleanups Changeset: r1014:88d1d1dfa117 Date: 2014-08-05 19:48 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/88d1d1dfa117/ Log: Removed 2 obsolete files, moved test file to correct folder. diff --git a/spyvm/tool/test_bitmanipulation.py b/spyvm/test/test_bitmanipulation.py rename from spyvm/tool/test_bitmanipulation.py rename to spyvm/test/test_bitmanipulation.py diff --git a/spyvm/tool/infostats.py b/spyvm/tool/infostats.py deleted file mode 100644 --- a/spyvm/tool/infostats.py +++ /dev/null @@ -1,5 +0,0 @@ -import pstats -p = pstats.Stats('compile_method.txt') -#print p.print_callers('as_context_get_shadow') -#print p.print_callers('s_active_context') -p.sort_stats('time', 'cum').print_stats(.5) diff --git a/spyvm/tool/profile.sh b/spyvm/tool/profile.sh deleted file mode 100644 --- a/spyvm/tool/profile.sh +++ /dev/null @@ -1,2 +0,0 @@ -python -m cProfile -o compile_method.txt `which py.test` -k compile_method ../test/test_miniimage.py -python infostats.py From noreply at buildbot.pypy.org Wed Aug 6 11:41:09 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 6 Aug 2014 11:41:09 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-cleanups: Renamed spyvm/tool to spyvm/util Message-ID: <20140806094109.2F8EC1C0588@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-cleanups Changeset: r1015:2476448b677b Date: 2014-08-05 19:52 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/2476448b677b/ Log: Renamed spyvm/tool to spyvm/util diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -2,7 +2,7 @@ import time from rpython.rlib.jit import elidable -from spyvm.tool.bitmanipulation import splitter +from spyvm.util.bitmanipulation import splitter # ___________________________________________________________________________ # Slot Names diff --git a/spyvm/interpreter_bytecodes.py b/spyvm/interpreter_bytecodes.py --- a/spyvm/interpreter_bytecodes.py +++ b/spyvm/interpreter_bytecodes.py @@ -2,7 +2,7 @@ from spyvm.storage_contexts import ContextPartShadow from spyvm.storage_classes import ClassShadow from spyvm import model, primitives, wrapper, error -from spyvm.tool.bitmanipulation import splitter +from spyvm.util.bitmanipulation import splitter from rpython.rlib import objectmodel, unroll, jit # unrolling_zero has been removed from rlib at some point. diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -1,6 +1,6 @@ import os, sys, time from spyvm import constants, model -from spyvm.tool.bitmanipulation import splitter +from spyvm.util.bitmanipulation import splitter from rpython.rlib import objectmodel, streamio # ____________________________________________________________ diff --git a/spyvm/test/jittest/base.py b/spyvm/test/jittest/base.py --- a/spyvm/test/jittest/base.py +++ b/spyvm/test/jittest/base.py @@ -3,7 +3,7 @@ from rpython.tool.jitlogparser.parser import Op from rpython.jit.metainterp.resoperation import opname from rpython.jit.tool import oparser -from spyvm.tool import logparser +from spyvm.util import logparser BasePath = os.path.abspath( os.path.join( diff --git a/spyvm/test/test_bitmanipulation.py b/spyvm/test/test_bitmanipulation.py --- a/spyvm/test/test_bitmanipulation.py +++ b/spyvm/test/test_bitmanipulation.py @@ -1,4 +1,4 @@ -from spyvm.tool.bitmanipulation import splitter +from spyvm.util.bitmanipulation import splitter def test_simple_splitbits(): diff --git a/spyvm/tool/__init__.py b/spyvm/util/__init__.py rename from spyvm/tool/__init__.py rename to spyvm/util/__init__.py diff --git a/spyvm/tool/bitmanipulation.py b/spyvm/util/bitmanipulation.py rename from spyvm/tool/bitmanipulation.py rename to spyvm/util/bitmanipulation.py diff --git a/spyvm/tool/extract_loops.py b/spyvm/util/extract_loops.py rename from spyvm/tool/extract_loops.py rename to spyvm/util/extract_loops.py --- a/spyvm/tool/extract_loops.py +++ b/spyvm/util/extract_loops.py @@ -1,5 +1,5 @@ import sys, os, shutil -from spyvm.tool import logparser +from spyvm.util import logparser def main(argv): if len(argv) != 1: diff --git a/spyvm/tool/logparser.py b/spyvm/util/logparser.py rename from spyvm/tool/logparser.py rename to spyvm/util/logparser.py diff --git a/spyvm/tool/storagelog_parser.py b/spyvm/util/storagelog_parser.py rename from spyvm/tool/storagelog_parser.py rename to spyvm/util/storagelog_parser.py From noreply at buildbot.pypy.org Wed Aug 6 11:41:10 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 6 Aug 2014 11:41:10 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-cleanups: Added spyvm/tools to contain cli-tools (no __init__.py) Message-ID: <20140806094110.40EB21C0588@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-cleanups Changeset: r1016:ba3f2dea82da Date: 2014-08-05 19:53 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/ba3f2dea82da/ Log: Added spyvm/tools to contain cli-tools (no __init__.py) diff --git a/spyvm/util/extract_loops.py b/spyvm/tools/extract_loops.py rename from spyvm/util/extract_loops.py rename to spyvm/tools/extract_loops.py diff --git a/spyvm/util/storagelog_parser.py b/spyvm/tools/storagelog_parser.py rename from spyvm/util/storagelog_parser.py rename to spyvm/tools/storagelog_parser.py From noreply at buildbot.pypy.org Wed Aug 6 13:38:35 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 6 Aug 2014 13:38:35 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-cleanups: More cleanups in squeakimage.py and stream.py Message-ID: <20140806113835.5F2221C0257@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-cleanups Changeset: r1020:2bf5b77ee6b7 Date: 2014-08-06 13:07 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/2bf5b77ee6b7/ Log: More cleanups in squeakimage.py and stream.py diff --git a/spyvm/error.py b/spyvm/error.py --- a/spyvm/error.py +++ b/spyvm/error.py @@ -42,3 +42,6 @@ _attrs_ = ["msg"] def __init__(self, msg): self.msg = msg + +class CorruptImageError(Exit): + pass diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -711,7 +711,7 @@ height = interp.space.unwrap_int(w_rcvr.fetch(interp.space, 2)) depth = interp.space.unwrap_int(w_rcvr.fetch(interp.space, 3)) hotpt = wrapper.PointWrapper(interp.space, w_rcvr.fetch(interp.space, 4)) - if not interp.image.is_modern: + if not interp.image.version.is_modern: display.SDLCursor.set( w_bitmap.words, width, diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -1,5 +1,5 @@ import os, time -from spyvm import constants, model, util +from spyvm import constants, model, util, error from spyvm.util import stream from spyvm.util.bitmanipulation import splitter from rpython.rlib import objectmodel @@ -19,20 +19,22 @@ # The image data can optionally start after this fixed offset. POSSIBLE_IMAGE_OFFSET = 512 -class CorruptImageError(Exception): - pass - -class UnsupportedImageError(Exception): - pass - class ImageVersion(object): - + def __init__(self, magic, is_big_endian, is_64bit, has_closures, has_floats_reversed): self.magic = magic self.is_big_endian = is_big_endian self.is_64bit = is_64bit self.has_closures = has_closures self.has_floats_reversed = has_floats_reversed + self.is_modern = magic > 6502 + + def configure_stream(self, stream): + stream.big_endian = self.is_big_endian + if self.is_64bit: + stream.be_64bit() + else: + stream.be_32bit() image_versions = { 0x00001966: ImageVersion(6502, True, False, False, False), @@ -51,91 +53,71 @@ -0x5cf6ff0000000000:ImageVersion(68003, False, True, True, True ), # 0xA309010000000000 } -def version(magic): - ver = image_versions.get(magic, None) - if ver is None: - raise CorruptImageError - # if ver.is_64bit or ver.has_floats_reversed: - # raise UnsupportedImageError - return ver - -def version_from_stream(stream): - # 32 bit - try: - return version(stream.peek()) - except CorruptImageError as e: - if stream.length() > POSSIBLE_IMAGE_OFFSET + 4: - stream.skipbytes(POSSIBLE_IMAGE_OFFSET) - try: - return version(stream.peek()) - except CorruptImageError: - pass # raise original error - # 64 bit - stream.reset() - stream.be_64bit() - try: - v = version(stream.peek()) - assert v.is_64bit - return v - except CorruptImageError as e: - if stream.length() > POSSIBLE_IMAGE_OFFSET + 4: - stream.skipbytes(POSSIBLE_IMAGE_OFFSET) - try: - v = version(stream.peek()) - assert v.is_64bit - return v - except CorruptImageError: - pass # raise original error - raise - - # ____________________________________________________________ # # Parser classes for Squeak image format. -def reader_for_image(space, stream): - ver = version_from_stream(stream) - if not ver.is_big_endian: - stream.swap = True - return ImageReader(space, stream, ver) - -def parse_image(space, stream): - image_reader = reader_for_image(space, stream) - image_reader.read_all() - return SqueakImage(space, image_reader) - class ImageReader(object): - - def __init__(self, space, stream, version): + + _attrs_ = [ "space", "stream", "version", + "chunks", # Dictionary mapping old address to chunk object + "chunklist", # Flat list of all read chunks + "intcache", # Cached instances of SmallInteger + "lastWindowSize" + ] + + def __init__(self, space, stream): self.space = space self.stream = stream - self.version = version - self.is_modern = self.version.magic > 6502 - # dictionary mapping old address to chunk object + self.version = None self.chunks = {} self.chunklist = [] - # cache wrapper integers self.intcache = {} - self.lastWindowSize = 0 - + + def create_image(self): + self.read_all() + return SqueakImage(self) + + def log_progress(self, progress, char): + if progress % 1000 == 0: + os.write(2, char) + def read_all(self): self.read_header() self.read_body() self.init_compactclassesarray() - # until here, the chunks are generated + # All chunks are read, now convert them to real objects. self.init_g_objects() + self.assign_prebuilt_constants() self.init_w_objects() self.fillin_w_objects() + def try_read_version(self): + version = image_versions.get(self.stream.next(), None) + if version: + return version + self.stream.reset() + if self.stream.length() > POSSIBLE_IMAGE_OFFSET + 4: + self.stream.skipbytes(POSSIBLE_IMAGE_OFFSET) + version = image_versions.get(self.stream.next(), None) + if not version: + self.stream.reset() + return version + def read_version(self): - # 1 word version - magic = self.stream.next() - assert self.version.magic == magic - + version = self.try_read_version() + if not version: + # Try 64 bit + self.stream.be_64bit() + version = self.try_read_version() + if not version: + raise error.CorruptImageError("Illegal version magic.") + version.configure_stream(self.stream) + self.version = version + def read_header(self): self.read_version() - #------ # 1 word headersize headersize = self.stream.next() # 1 word size of the full image @@ -146,82 +128,21 @@ self.specialobjectspointer = self.stream.next() # 1 word last used hash lasthash = self.stream.next() - self.lastWindowSize = savedwindowssize = self.stream.next() - # print "savedwindowssize: ", savedwindowssize >> 16, "@", savedwindowssize & 0xffff + self.lastWindowSize = self.stream.next() fullscreenflag = self.stream.next() extravmmemory = self.stream.next() self.stream.skipbytes(headersize - self.stream.pos) - + def read_body(self): self.stream.reset_count() while self.stream.count < self.endofmemory: chunk, pos = self.read_object() - if len(self.chunklist) % 1000 == 0: os.write(2,'#') + self.log_progress(len(self.chunklist), '#') self.chunklist.append(chunk) self.chunks[pos + self.oldbaseaddress] = chunk self.stream.close() - self.swap = self.stream.swap #save for later - self.stream = None return self.chunklist # return for testing - def init_g_objects(self): - for chunk in self.chunks.itervalues(): - chunk.as_g_object(self) # initialized g_object - - def init_w_objects(self): - self.assign_prebuilt_constants() - for chunk in self.chunks.itervalues(): - chunk.g_object.init_w_object() - - def assign_prebuilt_constants(self): - # Assign classes and objects that in special objects array that are already created. - self._assign_prebuilt_constants(constants.objects_in_special_object_table, self.space.objtable) - if not self.is_modern: - classtable = {} - for name, so_index in self.space.classtable.items(): - # In non-modern images (pre 4.0), there was no BlockClosure class. - if not name == "BlockClosure": - classtable[name] = so_index - else: - classtable = self.space.classtable - self._assign_prebuilt_constants(constants.classes_in_special_object_table, classtable) - - def _assign_prebuilt_constants(self, names_and_indices, prebuilt_objects): - for name, so_index in names_and_indices.items(): - name = "w_" + name - if name in prebuilt_objects: - w_object = prebuilt_objects[name] - if self.special_object(so_index).w_object is None: - self.special_object(so_index).w_object = w_object - else: - if not self.special_object(0).w_object.is_nil(self.space): - raise Warning('Object found in multiple places in the special objects array') - - def special_object(self, index): - special = self.chunks[self.specialobjectspointer].g_object.pointers - return special[index] - - def fillin_w_objects(self): - self.filledin_objects = 0 - for chunk in self.chunks.itervalues(): - chunk.g_object.fillin(self.space) - - def print_object_filledin(self): - self.filledin_objects = self.filledin_objects + 1 - if self.filledin_objects % 1000 == 0: - os.write(2,'%') - - def init_compactclassesarray(self): - """ from the blue book (CompiledMethod Symbol Array PseudoContext LargePositiveInteger nil MethodDictionary Association Point Rectangle nil TranslatedMethod BlockContext MethodContext nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil ) """ - special = self.chunks[self.specialobjectspointer] - assert special.size > 24 #at least - assert special.format == 2 - # squeak-specific: compact classes array - chunk = self.chunks[special.data[COMPACT_CLASSES_ARRAY]] - assert len(chunk.data) == 31 - assert chunk.format == 2 - self.compactclasses = [self.chunks[pointer] for pointer in chunk.data] - def read_object(self): kind = self.stream.peek() & 3 # 2 bits if kind == 0: # 00 bits @@ -231,7 +152,7 @@ elif kind == 3: # 11 bits chunk, pos = self.read_1wordobjectheader() else: # 10 bits - raise CorruptImageError("Unused block not allowed in image") + raise error.CorruptImageError("Unused block not allowed in image") size = chunk.size chunk.data = [self.stream.next() for _ in range(size - 1)] #size-1, excluding header @@ -258,15 +179,71 @@ kind, _, format, _, idhash = splitter[2,6,4,5,12](self.stream.next()) assert kind == 0 return ImageChunk(self.space, size, format, classid, idhash), self.stream.count - 4 + + def init_compactclassesarray(self): + """ from the blue book (CompiledMethod Symbol Array PseudoContext LargePositiveInteger nil MethodDictionary Association Point Rectangle nil TranslatedMethod BlockContext MethodContext nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil ) """ + special = self.chunks[self.specialobjectspointer] + assert special.size > 24 #at least + assert special.format == 2 + # squeak-specific: compact classes array + chunk = self.chunks[special.data[COMPACT_CLASSES_ARRAY]] + assert len(chunk.data) == 31 + assert chunk.format == 2 + self.compactclasses = [self.chunks[pointer] for pointer in chunk.data] + + def init_g_objects(self): + for chunk in self.chunks.itervalues(): + chunk.as_g_object(self) # initialized g_object + + def assign_prebuilt_constants(self): + # Assign classes and objects that in special objects array that are already created. + self._assign_prebuilt_constants(constants.objects_in_special_object_table, self.space.objtable) + if not self.version.is_modern: + classtable = {} + for name, so_index in self.space.classtable.items(): + # In non-modern images (pre 4.0), there was no BlockClosure class. + if not name == "BlockClosure": + classtable[name] = so_index + else: + classtable = self.space.classtable + self._assign_prebuilt_constants(constants.classes_in_special_object_table, classtable) + + def _assign_prebuilt_constants(self, names_and_indices, prebuilt_objects): + for name, so_index in names_and_indices.items(): + name = "w_" + name + if name in prebuilt_objects: + w_object = prebuilt_objects[name] + if self.special_object(so_index).w_object is None: + self.special_object(so_index).w_object = w_object + else: + if not self.special_object(0).w_object.is_nil(self.space): + raise Warning('Object found in multiple places in the special objects array') + + def special_object(self, index): + special = self.chunks[self.specialobjectspointer].g_object.pointers + return special[index] + + def init_w_objects(self): + for chunk in self.chunks.itervalues(): + chunk.g_object.init_w_object() + + def fillin_w_objects(self): + self.filledin_objects = 0 + for chunk in self.chunks.itervalues(): + chunk.g_object.fillin(self.space) + + def log_object_filledin(self): + self.filledin_objects = self.filledin_objects + 1 + self.log_progress(self.filledin_objects, '%') # ____________________________________________________________ class SqueakImage(object): - _immutable_fields_ = ["w_asSymbol", "w_simulateCopyBits", "version", - "is_modern", "startup_time"] + _immutable_fields_ = ["w_asSymbol", "w_simulateCopyBits", "version", "startup_time"] - def __init__(self, space, reader): + def __init__(self, reader): + space = reader.space self.special_objects = [g_object.w_object for g_object in reader.chunks[reader.specialobjectspointer] .g_object.pointers] @@ -275,7 +252,6 @@ self.w_simulateCopyBits = self.find_symbol(space, reader, "simulateCopyBits") self.lastWindowSize = reader.lastWindowSize self.version = reader.version - self.is_modern = reader.is_modern self.run_spy_hacks(space) self.startup_time = time.time() @@ -423,7 +399,7 @@ if self.ispointers(): self.w_object = objectmodel.instantiate(model.W_PointersObject) elif self.format == 5: - raise CorruptImageError("Unknown format 5") + raise error.CorruptImageError("Unknown format 5") elif self.isfloat(): self.w_object = objectmodel.instantiate(model.W_Float) elif self.is32bitlargepositiveinteger(): @@ -431,7 +407,7 @@ elif self.iswords(): self.w_object = objectmodel.instantiate(model.W_WordsObject) elif self.format == 7: - raise CorruptImageError("Unknown format 7, no 64-bit support yet :-)") + raise error.CorruptImageError("Unknown format 7, no 64-bit support yet :-)") elif self.isbytes(): self.w_object = objectmodel.instantiate(model.W_BytesObject) elif self.iscompiledmethod(): @@ -442,18 +418,18 @@ def get_bytes(self): bytes = [] - if self.reader.swap: + if self.reader.version.is_big_endian: + for each in self.chunk.data: + bytes.append(chr((each >> 24) & 0xff)) + bytes.append(chr((each >> 16) & 0xff)) + bytes.append(chr((each >> 8) & 0xff)) + bytes.append(chr((each >> 0) & 0xff)) + else: for each in self.chunk.data: bytes.append(chr((each >> 0) & 0xff)) bytes.append(chr((each >> 8) & 0xff)) bytes.append(chr((each >> 16) & 0xff)) bytes.append(chr((each >> 24) & 0xff)) - else: - for each in self.chunk.data: - bytes.append(chr((each >> 24) & 0xff)) - bytes.append(chr((each >> 16) & 0xff)) - bytes.append(chr((each >> 8) & 0xff)) - bytes.append(chr((each >> 0) & 0xff)) stop = len(bytes) - (self.format & 3) assert stop >= 0 return bytes[:stop] # omit odd bytes @@ -462,14 +438,14 @@ from rpython.rlib.rarithmetic import r_uint words = [r_uint(x) for x in self.chunk.data] if required_len != -1 and len(words) != required_len: - raise CorruptImageError("Expected %d words, got %d" % (required_len, len(words))) + raise error.CorruptImageError("Expected %d words, got %d" % (required_len, len(words))) return words def fillin(self, space): if not self.filled_in: self.filled_in = True self.w_object.fillin(space, self) - self.reader.print_object_filledin() + self.reader.log_object_filledin() def get_g_pointers(self): assert self.pointers is not None diff --git a/spyvm/test/test_squeakimage.py b/spyvm/test/test_squeakimage.py --- a/spyvm/test/test_squeakimage.py +++ b/spyvm/test/test_squeakimage.py @@ -1,8 +1,7 @@ import py, StringIO, sys from struct import pack -from spyvm import squeakimage +from spyvm import squeakimage, error from spyvm.util.stream import chrs2int, chrs2long, swapped_chrs2long -from spyvm import objspace from .util import create_space, copy_to_module, cleanup_module def setup_module(): @@ -30,7 +29,7 @@ def imagereader_mock(string): stream = imagestream_mock(string) - return squeakimage.reader_for_image(space, stream) + return squeakimage.ImageReader(space, stream) SIMPLE_VERSION_HEADER = pack(">i", 6502) SIMPLE_VERSION_HEADER_LE = pack(" Author: Anton Gulenko Branch: storage-cleanups Changeset: r1021:218915d01567 Date: 2014-08-06 13:37 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/218915d01567/ Log: Fixed translation. Now able to parse 64-bit versions when translating in 32-bit mode; but will raise an error that image cannot be handled. diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -1,6 +1,6 @@ import os, time from spyvm import constants, model, util, error -from spyvm.util import stream +from spyvm.util import stream, system from spyvm.util.bitmanipulation import splitter from rpython.rlib import objectmodel @@ -32,6 +32,8 @@ def configure_stream(self, stream): stream.big_endian = self.is_big_endian if self.is_64bit: + if not system.IS_64BIT: + raise error.FatalError("Cannot handle 64-bit image.") stream.be_64bit() else: stream.be_32bit() @@ -43,14 +45,16 @@ 0x68190000: ImageVersion(6504, False, False, True, False), 0x00001969: ImageVersion(6505, True, False, True, True ), 0x69190000: ImageVersion(6505, False, False, True, True ), - - # Versions for 64 bit images - 0x00000000000109A0: ImageVersion(68000, True, True, False, False), - -0x5ff6ff0000000000:ImageVersion(68000, False, True, False, False), # 0xA009010000000000 - 0x00000000000109A2: ImageVersion(68002, True, True, True, False), - -0x5df6ff0000000000:ImageVersion(68002, False, True, True, False), # 0xA209010000000000 - 0x00000000000109A3: ImageVersion(68003, True, True, True, True ), - -0x5cf6ff0000000000:ImageVersion(68003, False, True, True, True ), # 0xA309010000000000 +} + +image_versions_64bit = { + # Versions for 64 bit images (expressed as two 32-bit words) + (0x00000000, 0x000109A0): ImageVersion(68000, True, True, False, False), + (-0x5ff6ff00, 0x00000000): ImageVersion(68000, False, True, False, False), # 0xA009010000000000 + (0x00000000, 0x000109A2): ImageVersion(68002, True, True, True, False), + (-0x5df6ff00, 0x00000000): ImageVersion(68002, False, True, True, False), # 0xA209010000000000 + (0x00000000, 0x000109A3): ImageVersion(68003, True, True, True, True ), + (-0x5cf6ff00, 0x00000000): ImageVersion(68003, False, True, True, True ), # 0xA309010000000000 } # ____________________________________________________________ @@ -59,20 +63,13 @@ class ImageReader(object): - _attrs_ = [ "space", "stream", "version", - "chunks", # Dictionary mapping old address to chunk object - "chunklist", # Flat list of all read chunks - "intcache", # Cached instances of SmallInteger - "lastWindowSize" - ] - def __init__(self, space, stream): self.space = space self.stream = stream self.version = None - self.chunks = {} - self.chunklist = [] - self.intcache = {} + self.chunks = {} # Dictionary mapping old address to chunk object + self.chunklist = [] # Flat list of all read chunks + self.intcache = {} # Cached instances of SmallInteger self.lastWindowSize = 0 def create_image(self): @@ -94,25 +91,25 @@ self.fillin_w_objects() def try_read_version(self): - version = image_versions.get(self.stream.next(), None) + magic1 = self.stream.next() + version = image_versions.get(magic1, None) if version: return version - self.stream.reset() - if self.stream.length() > POSSIBLE_IMAGE_OFFSET + 4: - self.stream.skipbytes(POSSIBLE_IMAGE_OFFSET) - version = image_versions.get(self.stream.next(), None) - if not version: - self.stream.reset() - return version + # Check 64 bit version + magic2 = self.stream.next() + version = image_versions_64bit.get((magic1, magic2), None) + if not version: + self.stream.reset() + return version def read_version(self): version = self.try_read_version() if not version: - # Try 64 bit - self.stream.be_64bit() - version = self.try_read_version() - if not version: - raise error.CorruptImageError("Illegal version magic.") + if self.stream.length() > POSSIBLE_IMAGE_OFFSET + 4: + self.stream.skipbytes(POSSIBLE_IMAGE_OFFSET) + version = self.try_read_version() + if not version: + raise error.CorruptImageError("Illegal version magic.") version.configure_stream(self.stream) self.version = version diff --git a/spyvm/test/test_squeakimage.py b/spyvm/test/test_squeakimage.py --- a/spyvm/test/test_squeakimage.py +++ b/spyvm/test/test_squeakimage.py @@ -185,42 +185,46 @@ r.read_header() assert r.stream.pos == len(image_2) -def test_simple_image64(): - #if not sys.maxint == 2 ** 63 - 1: - # py.test.skip("on 32 bit platforms, we can't need to check for 64 bit images") - word_size = 8 - header_size = 16 * word_size +def test_simple_image64(monkeypatch): + from spyvm.util import system + monkeypatch.setattr(system, "IS_64BIT", True) + + try: + word_size = 8 + header_size = 16 * word_size - image_1 = (pack(">Q", 68002) # 1 version - + pack(">q", header_size) # 2 64 byte header - + pack(">q", 0) # 3 no body - + pack(">q", 0) # 4 old base addresss unset - + pack(">q", 0) # 5 no spl objs array - + ("\x12\x34\x56\x78" * 2)# 6 last hash - + pack(">H", 480) # 7 window 480 height - + pack(">H", 640) # window 640 width - + pack(">i", 0) # pad - + pack(">q", 0) # 8 not fullscreen - + pack(">q", 0) # 9 no extra memory - + ("\x00" * (header_size - (9 * word_size)))) - r = imagereader_mock(image_1) - # does not raise - r.read_header() - assert r.stream.pos == len(image_1) + image_1 = (pack(">Q", 68002) # 1 version + + pack(">q", header_size) # 2 64 byte header + + pack(">q", 0) # 3 no body + + pack(">q", 0) # 4 old base addresss unset + + pack(">q", 0) # 5 no spl objs array + + ("\x12\x34\x56\x78" * 2)# 6 last hash + + pack(">H", 480) # 7 window 480 height + + pack(">H", 640) # window 640 width + + pack(">i", 0) # pad + + pack(">q", 0) # 8 not fullscreen + + pack(">q", 0) # 9 no extra memory + + ("\x00" * (header_size - (9 * word_size)))) + r = imagereader_mock(image_1) + # does not raise + r.read_header() + assert r.stream.pos == len(image_1) - image_2 = (pack("i", 0) # pad - + pack(">q", 0) # 8 not fullscreen - + pack("i", 0) # pad + + pack(">q", 0) # 8 not fullscreen + + pack(" Author: Armin Rigo Branch: Changeset: r72703:51ad4dce5ecd Date: 2014-08-06 17:26 +0200 http://bitbucket.org/pypy/pypy/changeset/51ad4dce5ecd/ Log: Crash translation if we're trying to use mixin classes in ways that are not supported diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -419,6 +419,10 @@ base = object baselist = list(cls.__bases__) + if cls.__dict__.get('_mixin_', False): + raise Exception("cannot use directly the class %r because it" + " is a _mixin_" % (cls,)) + # special case: skip BaseException in Python 2.5, and pretend # that all exceptions ultimately inherit from Exception instead # of BaseException (XXX hack) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -2536,6 +2536,22 @@ s = a.build_types(f, []) assert s.const == 2 + def test_cannot_use_directly_mixin(self): + class A(object): + _mixin_ = True + # + def f(): + return A() + a = self.RPythonAnnotator() + py.test.raises(Exception, a.build_types, f, []) + # + class B(object): + pass + x = B() + def g(): + return isinstance(x, A) + py.test.raises(Exception, a.build_types, g, []) + def test_import_from_mixin(self): class M(object): def f(self): From noreply at buildbot.pypy.org Wed Aug 6 18:57:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 6 Aug 2014 18:57:46 +0200 (CEST) Subject: [pypy-commit] pypy default: Convert some classes in rlib away from using _mixin_. These classes Message-ID: <20140806165746.0670C1C332E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72704:089c2104902a Date: 2014-08-06 18:54 +0200 http://bitbucket.org/pypy/pypy/changeset/089c2104902a/ Log: Convert some classes in rlib away from using _mixin_. These classes can either be directly instantiated (and then they are not mixins at all), or we can use import_from_mixin(), as done now in pypy/module/. diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -4,12 +4,15 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from rpython.rlib.rStringIO import RStringIO from rpython.rlib.rarithmetic import r_longlong +from rpython.rlib.objectmodel import import_from_mixin from pypy.module._io.interp_bufferedio import W_BufferedIOBase from pypy.module._io.interp_iobase import convert_size import sys -class W_BytesIO(RStringIO, W_BufferedIOBase): +class W_BytesIO(W_BufferedIOBase): + import_from_mixin(RStringIO) + def __init__(self, space): W_BufferedIOBase.__init__(self, space, add_to_autoflusher=False) self.init() diff --git a/pypy/module/_md5/interp_md5.py b/pypy/module/_md5/interp_md5.py --- a/pypy/module/_md5/interp_md5.py +++ b/pypy/module/_md5/interp_md5.py @@ -1,13 +1,15 @@ from rpython.rlib import rmd5 +from rpython.rlib.objectmodel import import_from_mixin from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec -class W_MD5(W_Root, rmd5.RMD5): +class W_MD5(W_Root): """ A subclass of RMD5 that can be exposed to app-level. """ + import_from_mixin(rmd5.RMD5) def __init__(self, space): self.space = space diff --git a/pypy/module/_sha/interp_sha.py b/pypy/module/_sha/interp_sha.py --- a/pypy/module/_sha/interp_sha.py +++ b/pypy/module/_sha/interp_sha.py @@ -1,13 +1,15 @@ from rpython.rlib import rsha +from rpython.rlib.objectmodel import import_from_mixin from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec -class W_SHA(W_Root, rsha.RSHA): +class W_SHA(W_Root): """ A subclass of RSHA that can be exposed to app-level. """ + import_from_mixin(rsha.RSHA) def __init__(self, space): self.space = space diff --git a/pypy/module/cStringIO/interp_stringio.py b/pypy/module/cStringIO/interp_stringio.py --- a/pypy/module/cStringIO/interp_stringio.py +++ b/pypy/module/cStringIO/interp_stringio.py @@ -3,6 +3,7 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.gateway import interp2app, unwrap_spec from rpython.rlib.rStringIO import RStringIO +from rpython.rlib.objectmodel import import_from_mixin class W_InputOutputType(W_Root): @@ -144,7 +145,9 @@ # ____________________________________________________________ -class W_OutputType(RStringIO, W_InputOutputType): +class W_OutputType(W_InputOutputType): + import_from_mixin(RStringIO) + def __init__(self, space): self.init() self.space = space diff --git a/rpython/rlib/rStringIO.py b/rpython/rlib/rStringIO.py --- a/rpython/rlib/rStringIO.py +++ b/rpython/rlib/rStringIO.py @@ -8,8 +8,6 @@ The fastest path through this code is for the case of a bunch of write() followed by getvalue(). """ - _mixin_ = True # for interp_stringio.py - def __init__(self): self.init() diff --git a/rpython/rlib/rmd5.py b/rpython/rlib/rmd5.py --- a/rpython/rlib/rmd5.py +++ b/rpython/rlib/rmd5.py @@ -132,8 +132,6 @@ class RMD5(object): """RPython-level MD5 object. """ - _mixin_ = True # for interp_md5.py - def __init__(self, initialdata=''): self._init() self.update(initialdata) diff --git a/rpython/rlib/rsha.py b/rpython/rlib/rsha.py --- a/rpython/rlib/rsha.py +++ b/rpython/rlib/rsha.py @@ -95,8 +95,6 @@ class RSHA(object): """RPython-level SHA object. """ - _mixin_ = True # for interp_sha.py - def __init__(self, initialdata=''): self._init() self.update(initialdata) From noreply at buildbot.pypy.org Wed Aug 6 18:57:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 6 Aug 2014 18:57:47 +0200 (CEST) Subject: [pypy-commit] pypy default: Use the more precise AnnotatorError in this new place Message-ID: <20140806165747.316BC1C332E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72705:0dad266b2efa Date: 2014-08-06 18:56 +0200 http://bitbucket.org/pypy/pypy/changeset/0dad266b2efa/ Log: Use the more precise AnnotatorError in this new place diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -420,8 +420,8 @@ baselist = list(cls.__bases__) if cls.__dict__.get('_mixin_', False): - raise Exception("cannot use directly the class %r because it" - " is a _mixin_" % (cls,)) + raise AnnotatorError("cannot use directly the class %r because " + "it is a _mixin_" % (cls,)) # special case: skip BaseException in Python 2.5, and pretend # that all exceptions ultimately inherit from Exception instead diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -2543,14 +2543,14 @@ def f(): return A() a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, []) + py.test.raises(annmodel.AnnotatorError, a.build_types, f, []) # class B(object): pass x = B() def g(): return isinstance(x, A) - py.test.raises(Exception, a.build_types, g, []) + py.test.raises(annmodel.AnnotatorError, a.build_types, g, []) def test_import_from_mixin(self): class M(object): From noreply at buildbot.pypy.org Wed Aug 6 19:46:53 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 6 Aug 2014 19:46:53 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-cleanups: Moved images used in tests to separate test/images folder. Message-ID: <20140806174653.EFBD91C05B7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-cleanups Changeset: r1022:cd56b20326e3 Date: 2014-08-06 14:09 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/cd56b20326e3/ Log: Moved images used in tests to separate test/images folder. Makes clear that these images should not be touched, since tests depend on them. diff too long, truncating to 2000 out of 361107 lines diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes deleted file mode 100644 --- a/images/Squeak4.5-12568.changes +++ /dev/null @@ -1,39 +0,0 @@ -'From Squeak4.1 of 17 April 2010 [latest update: #9957] on 17 April 2010 at 5:22:05 pm'! ----STARTUP----{17 April 2010 . 5:21:54 pm} as C:\Squeak\4.0\4.1-final\Squeak4.1.image! Smalltalk appendChangesTo: 'SqueakV41.sources'.! ----QUIT----{17 April 2010 . 5:22:11 pm} Squeak4.1.image priorSource: 89! ----STARTUP----{24 May 2010 . 8:07:26 pm} as C:\Squeak\4.2\Squeak4.1.image! ----SNAPSHOT----{24 May 2010 . 8:08:14 pm} Squeak4.2.image priorSource: 229! !HashedCollection commentStamp: 'ul 4/12/2010 22:37' prior: 0! I am an abstract collection of objects that implement hash and equality in a consitent way. This means that whenever two objects are equal, their hashes have to be equal too. If two objects are equal then I can only store one of them. Hashes are expected to be integers (preferably SmallIntegers). I also expect that the objects contained by me do not change their hashes. If that happens, hash invariants have to be re-established, which can be done by #rehash. Since I'm abstract, no instances of me should exist. My subclasses should implement #scanFor:, #fixCollisionsFrom: and #noCheckNoGrowFillFrom:. Instance Variables array: (typically Array or WeakArray) tally: (non-negative) array - An array whose size is a prime number, it's non-nil elements are the elements of the collection, and whose nil elements are empty slots. There is always at least one nil. In fact I try to keep my "load" at 75% or less so that hashing will work well. tally - The number of elements in the collection. The array size is always greater than this. Implementation details: I implement a hash table which uses open addressing with linear probing as the method of collision resolution. Searching for an element or a free slot for an element is done by #scanFor: which should return the index of the slot in array corresponding to it's argument. When an element is removed #fixCollisionsFrom: should rehash all elements in array between the original index of the removed element, wrapping around after the last slot until reaching an empty slot. My maximum load factor (75%) is hardcoded in #atNewIndex:put:, so it can only be changed by overriding that method. When my load factor reaches this limit I replace my array with a larger one (see #grow) ensuring that my load factor will be less than or equal to 50%. The new array is filled by #noCheckNoGrowFillFrom: which should use #scanForEmptySlotFor: instead of #scanFor: for better performance. I do not shrink. ! !WeakKeyDictionary methodsFor: 'private' stamp: 'ul 4/12/2010 22:59'! compact "Reduce the size of array so that the load factor will be ~75%." | newCapacity | newCapacity := self class goodPrimeAtLeast: self slowSize * 4 // 3. self growTo: newCapacity! ! !Collection methodsFor: 'adding' stamp: 'ul 4/12/2010 22:33' prior: 18816249! add: newObject withOccurrences: anInteger "Add newObject anInteger times to the receiver. Do nothing if anInteger is less than one. Answer newObject." anInteger timesRepeat: [self add: newObject]. ^ newObject! ! !HashedCollection class methodsFor: 'initialize-release' stamp: 'ul 4/12/2010 23:49'! compactAll "HashedCollection compactAll" self allSubclassesDo: #compactAllInstances! ! !HashedCollection class methodsFor: 'initialize-release' stamp: 'ul 4/12/2010 23:49'! compactAllInstances "Do not use #allInstancesDo: because compact may create new instances." self allInstances do: #compact! ! !HashedCollection class methodsFor: 'sizing' stamp: 'ul 4/7/2010 00:17' prior: 55063414! goodPrimes "Answer a sorted array of prime numbers less than one billion that make good hash table sizes. Should be expanded as needed. See comments below code" ^#( 5 11 17 23 31 43 59 79 107 149 199 269 359 479 641 857 1151 1549 2069 2237 2423 2617 2797 2999 3167 3359 3539 3727 3911 4441 4787 5119 5471 5801 6143 6521 6827 7177 7517 7853 8783 9601 10243 10867 11549 12239 12919 13679 14293 15013 15731 17569 19051 20443 21767 23159 24611 25847 27397 28571 30047 31397 35771 38201 40841 43973 46633 48989 51631 54371 57349 60139 62969 70589 76091 80347 85843 90697 95791 101051 106261 111143 115777 120691 126311 140863 150523 160969 170557 181243 190717 201653 211891 221251 232591 242873 251443 282089 300869 321949 341227 362353 383681 401411 422927 443231 464951 482033 504011 562621 605779 647659 681607 723623 763307 808261 844709 886163 926623 967229 1014617 1121987 1201469 1268789 1345651 1429531 1492177 1577839 1651547 1722601 1800377 1878623 1942141 2028401 2242727 2399581 2559173 2686813 2836357 3005579 3144971 3283993 3460133 3582923 3757093 3903769 4061261 4455361 4783837 5068529 5418079 5680243 6000023 6292981 6611497 6884641 7211599 7514189 7798313 8077189 9031853 9612721 10226107 10745291 11338417 11939203 12567671 13212697 13816333 14337529 14938571 15595673 16147291 17851577 18993941 20180239 21228533 22375079 23450491 24635579 25683871 26850101 27921689 29090911 30153841 31292507 32467307 35817611 37983761 40234253 42457253 44750177 46957969 49175831 51442639 53726417 55954637 58126987 60365939 62666977 64826669 71582779 76039231 80534381 84995153 89500331 93956777 98470819 102879613 107400389 111856841 116365721 120819287 125246581 129732203 143163379 152076289 161031319 169981667 179000669 187913573 196826447 205826729 214748357 223713691 232679021 241591901 250504801 259470131 285162679 301939921 318717121 335494331 352271573 369148753 385926017 402603193 419480419 436157621 453034849 469712051 486589307 503366497 520043707 570475349 603929813 637584271 671138659 704693081 738247541 771801929 805356457 838910803 872365267 905919671 939574117 973128521 1006682977 1040137411 1073741833) "The above primes past 2069 were chosen carefully so that they do not interact badly with 1664525 (used by hashMultiply), and so that gcd(p, (256^k) +/- a) = 1, for 0 cost ifTrue: [ cost := newCost ] ]. cost ]."! ! !HashedCollection methodsFor: 'adding' stamp: 'ul 4/12/2010 22:38' prior: 53647096! add: newObject withOccurrences: anInteger "Add newObject anInteger times to the receiver. Do nothing if anInteger is less than one. Answer newObject." anInteger < 1 ifTrue: [ ^newObject ]. ^self add: newObject "I can only store an object once." ! ! !HashedCollection methodsFor: 'private' stamp: 'ul 4/12/2010 22:53'! compact "Reduce the size of array so that the load factor will be ~75%." | newCapacity | newCapacity := self class goodPrimeAtLeast: tally * 4 // 3. self growTo: newCapacity! ! !WeakSet methodsFor: 'private' stamp: 'ul 4/12/2010 22:59'! compact "Reduce the size of array so that the load factor will be ~75%." | newCapacity | newCapacity := self class goodPrimeAtLeast: self slowSize * 4 // 3. self growTo: newCapacity! ! !Symbol class methodsFor: 'class initialization' stamp: 'ul 4/13/2010 00:00' prior: 30357901! compactSymbolTable "Reduce the size of the symbol table so that it holds all existing symbols with 25% free space." | oldSize | Smalltalk garbageCollect. oldSize := SymbolTable capacity. SymbolTable compact. ^(oldSize - SymbolTable capacity) printString, ' slot(s) reclaimed'! ! KeyedIdentitySet class removeSelector: #goodPrimes! WeakIdentityKeyDictionary class removeSelector: #goodPrimes! IdentitySet class removeSelector: #goodPrimes! IdentityDictionary class removeSelector: #goodPrimes! "Collections"! !HashedCollectionTest methodsFor: 'test - class - sizing' stamp: 'ul 4/7/2010 00:18' prior: 58761579! testPrimes: primes | badPrimes | badPrimes := #(3 5 71 139 479 5861 277421). "These primes are less than the hashMultiply constant (1664525) and 1664525 \\ prime is close to 0 (mod prime). The following snippet reproduces these numbers: | hashMultiplyConstant | hashMultiplyConstant := 1 hashMultiply. (Integer primesUpTo: hashMultiplyConstant) select: [ :each | | remainder | remainder := hashMultiplyConstant \\ each. remainder <= 1 or: [ remainder + 1 = each ] ]." self assert: primes isSorted. primes do: [ :each | self assert: each isPrime. self deny: (each > 2069 and: [ badPrimes includes: each ]) ]. self assert: ( primes select: [ :p | | result | result := false. p > 2069 ifTrue: [ 1 to: 8 do: [ :k | 1 to: 32 do: [ :a | (p gcd: (256 raisedTo: k) + a) = 1 ifFalse: [ result := true ]. (p gcd: (256 raisedTo: k) - a) = 1 ifFalse: [ result := true ] ] ] ]. result ]) isEmpty.! ! HashedCollectionTest removeSelector: #testGoodPrimesForIdentityBasedHashedCollections! "CollectionsTests"! !MCMczReader methodsFor: 'as yet unclassified' stamp: 'bf 4/18/2010 18:38' prior: 22938947! extractInfoFrom: dict ^MCWorkingCopy infoFromDictionary: dict cache: self infoCache! ! !MCWorkingCopy class methodsFor: 'as yet unclassified' stamp: 'bf 4/19/2010 00:39' prior: 23215403! infoFromDictionary: aDictionary cache: cache | id | id := (aDictionary at: #id) asString. ^ cache at: id ifAbsentPut: [MCVersionInfo name: (aDictionary at: #name ifAbsent: ['']) id: (UUID fromString: id) message: (aDictionary at: #message ifAbsent: ['']) date: ([Date fromString: (aDictionary at: #date)] ifError: [nil]) time: ([Time fromString: (aDictionary at: #time)] ifError: [nil]) author: (aDictionary at: #author ifAbsent: ['']) ancestors: (self ancestorsFromArray: (aDictionary at: #ancestors ifAbsent: []) cache: cache) stepChildren: (self ancestorsFromArray: (aDictionary at: #stepChildren ifAbsent: []) cache: cache)]! ! !MCVersionInfo methodsFor: 'converting' stamp: 'bf 4/18/2010 23:25' prior: 23175569! asDictionary ^ Dictionary new at: #name put: name; at: #id put: id asString; at: #message put: message; at: #date put: date; at: #time put: time; at: #author put: author; at: #ancestors put: (self ancestors collect: [:a | a asDictionary]); yourself! ! "Monticello"! !BlockContextTest methodsFor: 'running' stamp: 'md 9/6/2005 19:56' prior: 50431957! setUp super setUp. aBlockContext := [100 at 100 corner: 200 at 200]. contextOfaBlockContext := thisContext.! ! !BehaviorTest methodsFor: 'tests' stamp: 'md 2/18/2006 16:42' prior: 17365994! testBinding self assert: Object binding value = Object. self assert: Object binding key = #Object. self assert: Object class binding value = Object class. "returns nil for Metaclasses... like Encoder>>#associationFor:" self assert: Object class binding key = nil.! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:13' prior: 53956757! testEmbeddingSourceCode | trailer newTrailer code | trailer := CompiledMethodTrailer new. code := 'foo'. trailer sourceCode: code. newTrailer := trailer testEncoding. self assert: (trailer kind == #EmbeddedSourceQCompress ). self assert: (newTrailer sourceCode = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). code := 'testEmbeddingSourceCode | trailer newTrailer code | trailer := CompiledMethodTrailer new. trailer sourceCode: code. newTrailer := trailer testEncoding. self assert: (newTrailer sourceCode = code).'. trailer sourceCode: code. self assert: (trailer kind == #EmbeddedSourceZip ). newTrailer := trailer testEncoding. self assert: (newTrailer sourceCode = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:13' prior: 53957691! testEmbeddingTempNames | trailer newTrailer code | trailer := CompiledMethodTrailer new. code := 'foo'. trailer tempNames: code. newTrailer := trailer testEncoding. self assert: (trailer kind == #TempsNamesQCompress ). self assert: (newTrailer tempNames = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). code := 'testEmbeddingSourceCode | trailer newTrailer code | trailer := CompiledMethodTrailer new. trailer sourceCode: code. newTrailer := trailer testEncoding. self assert: (newTrailer sourceCode = code).'. trailer tempNames: code. self assert: (trailer kind == #TempsNamesZip ). newTrailer := trailer testEncoding. self assert: (newTrailer tempNames = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:17' prior: 53958613! testEncodingNoTrailer | trailer | trailer := CompiledMethodTrailer new. "by default it should be a no-trailer" self assert: (trailer kind == #NoTrailer ). self assert: (trailer size = 1). trailer := trailer testEncoding. self assert: (trailer kind == #NoTrailer ). self assert: (trailer size = 1). "the last bytecode index must be at 0" self assert: (trailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:14' prior: 53959109! testEncodingSourcePointer | trailer | trailer := CompiledMethodTrailer new. CompiledMethod allInstancesDo: [:method | | ptr | trailer method: method. self assert: ( (ptr := method sourcePointer) == trailer sourcePointer). "the last bytecode index must be at 0" ptr ~= 0 ifTrue: [ self assert: (method endPC = trailer endPC) ]. ].! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:15' prior: 53959564! testEncodingVarLengthSourcePointer | trailer newTrailer | trailer := CompiledMethodTrailer new. trailer sourcePointer: 1. newTrailer := trailer testEncoding. self assert: (newTrailer sourcePointer = 1). trailer sourcePointer: 16r100000000000000. newTrailer := trailer testEncoding. self assert: (newTrailer sourcePointer = 16r100000000000000). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:15' prior: 53960108! testSourceByIdentifierEncoding | trailer id | trailer := CompiledMethodTrailer new. id := UUID new asString. trailer sourceIdentifier: id. self assert: (trailer kind == #SourceByStringIdentifier ). trailer := trailer testEncoding. self assert: (trailer kind == #SourceByStringIdentifier ). self assert: (trailer sourceIdentifier = id). "the last bytecode index must be at 0" self assert: (trailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:49' prior: 53960643! testSourceBySelectorEncoding | trailer | trailer := CompiledMethodTrailer new. trailer setSourceBySelector. self assert: (trailer kind == #SourceBySelector ). self assert: (trailer size = 1). trailer := trailer testEncoding. self assert: (trailer kind == #SourceBySelector ). self assert: (trailer size = 1). "the last bytecode index must be at 0" self assert: (trailer endPC = 0). ! ! !CategorizerTest methodsFor: 'running' stamp: 'mtf 9/10/2007 10:10' prior: 18074036! setUp categorizer := Categorizer defaultList: #(a b c d e). categorizer classifyAll: #(a b c) under: 'abc'. categorizer addCategory: 'unreal'.! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:17' prior: 18074267! testClassifyNewElementNewCategory categorizer classify: #f under: #nice. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') (''nice'' f) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:18' prior: 18074541! testClassifyNewElementOldCategory categorizer classify: #f under: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'' f) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:17' prior: 18074806! testClassifyOldElementNewCategory categorizer classify: #e under: #nice. self assert: categorizer printString = '(''as yet unclassified'' d) (''abc'' a b c) (''unreal'') (''nice'' e) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:54' prior: 18075078! testClassifyOldElementOldCategory categorizer classify: #e under: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d) (''abc'' a b c) (''unreal'' e) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:22' prior: 18075341! testDefaultCategoryIsTransient "Test that category 'as yet unclassified' disapears when all it's elements are removed'" categorizer classifyAll: #(d e) under: #abc. self assert: categorizer printString = '(''abc'' a b c d e) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/11/2007 15:15' prior: 18075669! testNullCategory "Test that category 'as yet unclassified' disapears when all it's elements are removed'" | aCategorizer | aCategorizer := Categorizer defaultList: #(). self assert: aCategorizer printString = '(''as yet unclassified'') '. self assert: aCategorizer categories = #('no messages'). aCategorizer classify: #a under: #b. self assert: aCategorizer printString = '(''b'' a) '. self assert: aCategorizer categories = #(b).! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:57' prior: 18076194! testRemoveEmptyCategory categorizer removeCategory: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:55' prior: 18076430! testRemoveExistingElement categorizer removeElement: #a. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:59' prior: 18076673! testRemoveNonEmptyCategory self should: [categorizer removeCategory: #abc] raise: Error. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:59' prior: 18076950! testRemoveNonExistingCategory categorizer removeCategory: #nice. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:57' prior: 18077203! testRemoveNonExistingElement categorizer removeElement: #f. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/11/2007 14:49' prior: 18077451! testRemoveThenRename categorizer removeCategory: #unreal. categorizer renameCategory: #abc toBe: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d e) (''unreal'' a b c) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:14' prior: 18077736! testUnchanged self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! "KernelTests"! !SmalltalkImage methodsFor: 'accessing' stamp: 'ul 4/18/2010 22:22'! at: key ifPresentAndInMemory: aBlock "Lookup the given key in the receiver. If it is present, answer the value of evaluating the given block with the value associated with the key. Otherwise, answer nil." ^globals at: key ifPresentAndInMemory: aBlock! ! !SmalltalkImage methodsFor: 'image' stamp: 'dtl 4/11/2010 11:45'! image "Answer the object to query about the current object memory and execution environment." ^self! ! !SmalltalkImage methodsFor: 'image' stamp: 'dtl 4/11/2010 11:47'! imageFormatVersion "Answer an integer identifying the type of image. The image version number may identify the format of the image (e.g. 32 or 64-bit word size) or specific requirements of the image (e.g. block closure support required). This invokes an optional primitive that may not be available on all virtual machines." "Smalltalk image imageFormatVersion" self notify: 'This virtual machine does not support the optional primitive #primitiveImageFormatVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:38'! interpreterSourceVersion "Answer a string corresponding to the version of the interpreter source. This represents the version level of the Smalltalk source code (interpreter and various plugins) that is translated to C by a CCodeGenerator, as distinct from the external platform source code, typically written in C and managed separately for each platform. An optional primitive is invoked that may not be available on all virtual machines." "Smalltalk vm interpreterSourceVersion" self notify: 'This virtual machine does not support the optional primitive #primitiveInterpreterSourceVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:39'! platformSourceVersion "Answer a string corresponding to the version of the external platform source code, typically written in C and managed separately for each platform. This invokes an optional primitive that may not be available on all virtual machines." "Smalltalk vm platformSourceVersion" self notify: 'This virtual machine does not support the optional primitive #primitivePlatformSourceVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'image' stamp: 'md 5/16/2006 12:34' prior: 58536670! version "Answer the version of this release." ^SystemVersion current version! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:39'! versionLabel "Answer a string corresponding to the version of virtual machine. This represents the version level of the Smalltalk source code (interpreter and various plugins) that is translated to C by a CCodeGenerator, in addition to the external platform source code, typically written in C and managed separately for each platform. This invokes an optional primitive that may not be available on all virtual machines. See also vmVersion, which answers a string identifying the image from which virtual machine sources were generated." "Smalltalk vm versionLabel" self notify: 'This virtual machine does not support the optional primitive #primitiveVMVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:15'! vm "Answer the object to query about virtual machine." ^self! ! !SmalltalkImage methodsFor: 'image' stamp: 'dtl 1/4/2010 21:40' prior: 58537225! wordSize "Answer the size in bytes of an object pointer or word in the object memory. The value does not change for a given image, but may be modified by a SystemTracer when converting the image to another format. The value is cached in WordSize to avoid the performance overhead of repeatedly consulting the VM." "Smalltalk wordSize" ^ WordSize ifNil: [WordSize := [SmalltalkImage current vmParameterAt: 40] on: Error do: [4]]! ! "System"! !SMLoaderPlus commentStamp: 'btr 12/1/2006 15:16' prior: 0! A simple package loader that is currently the standard UI for SqueakMap (the model is an SMSqueakMap instance). It uses ToolBuilder to construct its window. You can open one with: SMLoaderPlus open Instance Variables categoriesToFilterIds: The set of categories to filter the packages list. filters: The set of filters to apply to the packages list. map: The model SqueakMap. packagesList: The list of packages from the map. selectedCategory: The current category. selectedItem: The selected package or release. window: The window, held only so we can reOpen.! !SMLoaderCategoricalPlus commentStamp: 'btr 12/4/2006 15:47' prior: 0! A variant package loader that uses a more-or-less standard Smalltalk-80 browser perspective of selecting categories in one pane and then selecting items within in the next pane. You can open one with: SMLoaderCategoricalPlus open! !SMLoader commentStamp: 'btr 11/30/2006 18:00' prior: 27913009! A simple package loader that is currently the standard UI for SqueakMap (the model is an SMSqueakMap instance). You can open one with: SMLoader open! !SMLoaderCategorical commentStamp: 'btr 12/1/2006 15:16' prior: 0! A variant package loader that uses a more-or-less standard Smalltalk-80 browser perspective of selecting categories in one pane and then selecting items within in the next pane. You can open one with: SMLoaderCategorical open! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 18:06'! initialize Smalltalk at: #ToolBuilder ifPresent: [:tb | (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu registerOpenCommand: {self openMenuString. {self. #open}}]]! ! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 17:34'! openMenuString ^ 'SqueakMap Categories'! ! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 17:34'! removeFromSystem (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString]. self removeFromSystem: true! ! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 17:34'! unload (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString].! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:50'! buildFancyWith: aBuilder "Creates a variant of the window where the package pane is split between installed and uninstalled packages." | buttonBarHeight searchHeight vertDivide horizDivide | buttonBarHeight := 0.07. searchHeight := 0.07. vertDivide := 0.5. horizDivide := 0.6. builder := aBuilder. window := builder build: (builder pluggableWindowSpec new model: self; label: #label; children: (OrderedCollection new add: ((self buildButtonBarWith: builder) frame: (0 @ 0 corner: 1 @ buttonBarHeight); yourself); add: ((self buildCategoriesListWith: builder) frame: (0 @ buttonBarHeight corner: vertDivide @ horizDivide); yourself); add: ((self buildSearchPaneWith: builder) frame: (vertDivide @ buttonBarHeight corner: 1 @ (buttonBarHeight + searchHeight)); yourself); add: ((self buildNotInstalledPackagesListWith: builder) frame: (vertDivide @ (buttonBarHeight + searchHeight) corner: 1 @ (horizDivide / 2)); yourself); add: ((self buildInstalledPackagesListWith: builder) frame: (vertDivide @ (horizDivide / 2) corner: 1 @ horizDivide); yourself); add: ((self buildPackagePaneWith: builder) frame: (0 @ horizDivide corner: 1 @ 1); yourself); yourself)). window on: #mouseEnter send: #paneTransition: to: window. window on: #mouseLeave send: #paneTransition: to: window. self setUpdatablePanesFrom: #(#installedPackageList #notInstalledPackageList ). currentPackageList := #notInstalled. window extent: self initialExtent. ^ window! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 17:56'! buildInstalledPackagesListWith: aBuilder ^ aBuilder pluggableTreeSpec new model: self; roots: #installedPackageList; getSelectedPath: #selectedItemPath; setSelected: #selectedItem:; menu: #packagesMenu:; label: #itemLabel:; getChildren: #itemChildren:; hasChildren: #itemHasChildren:; autoDeselect: true; wantsDrop: true; yourself! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 17:52'! buildNotInstalledPackagesListWith: aBuilder ^ aBuilder pluggableTreeSpec new model: self; roots: #notInstalledPackageList; getSelectedPath: #selectedItemPath; setSelected: #selectedItem:; menu: #packagesMenu:; label: #itemLabel:; getChildren: #itemChildren:; hasChildren: #itemHasChildren:; autoDeselect: true; wantsDrop: true; yourself! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:55'! buildWith: aBuilder | buttonBarHeight searchHeight vertDivide horizDivide | buttonBarHeight := 0.07. searchHeight := 0.07. vertDivide := 0.5. horizDivide := 0.6. builder := aBuilder. window := builder build: (builder pluggableWindowSpec new model: self; label: #label; children: (OrderedCollection new add: ((self buildButtonBarWith: builder) frame: (0 @ 0 corner: 1 @ buttonBarHeight); yourself); add: ((self buildCategoriesListWith: builder) frame: (0 @ buttonBarHeight corner: vertDivide @ horizDivide); yourself); add: ((self buildSearchPaneWith: builder) frame: (vertDivide @ buttonBarHeight corner: 1 @ (buttonBarHeight + searchHeight))); add: ((self buildPackagesListWith: builder) frame: (vertDivide @ (buttonBarHeight + searchHeight) corner: 1 @ horizDivide)); add: ((self buildPackagePaneWith: builder) frame: (0 @ horizDivide corner: 1 @ 1)); yourself)). window on: #mouseEnter send: #paneTransition: to: window. window on: #mouseLeave send: #paneTransition: to: window. window extent: self initialExtent. ^ window! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! currentPackageList ^currentPackageList! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! currentPackageList: aSymbol currentPackageList := aSymbol. self changed: #installButtonLabel.! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/4/2006 15:55'! defaultLabel ^ 'Categorical ' , super defaultLabel! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/4/2006 15:58'! installButtonLabel ^ self currentPackageList = #notInstalled ifTrue: ['Install the above package'] ifFalse: ['Remove the above package']! ! !SMLoaderCategoricalPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 17:52'! installedPackageList ^self packageList select: [:e | e isInstalled]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 18:02'! installedPackagesListIndex ^ self currentPackageList = #installed ifTrue: [self packagesListIndex] ifFalse: [0]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! installedPackagesListIndex: anObject packagesListIndex := anObject. self currentPackageList ~= #installed ifTrue: [self currentPackageList: #installed. self changed: #currentPackageList]. self noteChanged! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! isOn ^false! ! !SMLoaderCategoricalPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 17:53'! notInstalledPackageList ^self packageList reject: [:e | e isInstalled]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 18:02'! notInstalledPackagesListIndex ^ self currentPackageList = #notInstalled ifTrue: [self packagesListIndex] ifFalse: [0]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 18:03'! notInstalledPackagesListIndex: anObject packagesListIndex := anObject. self currentPackageList ~= #notInstalled ifTrue: [self currentPackageList: #notInstalled. self changed: #currentPackageList]. self changed: #packagesListIndex. "update my selection" self noteChanged. self contentsChanged! ! !SMLoaderCategoricalPlus methodsFor: 'private' stamp: 'btr 12/1/2006 17:53'! noteChanged self changed: #installedPackageList. self changed: #notInstalledPackageList. super noteChanged." self changed: #packageNameList. self changed: #packagesListIndex. self changed: #categoriesForPackage. self contentsChanged."! ! !SMLoaderCategoricalPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 17:34'! packageList ^ self packages select: [:e | (e categories anySatisfy: [:cat | cat = self selectedCategory]) and: [(filters ifNil: [#()]) allSatisfy: [:currFilter | (self perform: currFilter) value: e]]]! ! !SMLoaderPlus class methodsFor: 'parts bin' stamp: 'btr 11/22/2006 15:02'! descriptionForPartsBin ^self partName: 'Package Loader' categories: #(Tools) documentation: 'SqueakMap UI' ! ! !SMLoaderPlus class methodsFor: 'class initialization' stamp: 'btr 12/1/2006 15:47'! initialize "Hook us up in the world menu." "self initialize" Smalltalk at: #ToolBuilder ifPresent: [:tb | self registerInFlapsRegistry. (Preferences windowColorFor: #SMLoader) = Color white "not set" ifTrue: [ Preferences setWindowColorFor: #SMLoader to: (Color colorFrom: self windowColorSpecification brightColor) ]. (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [| oldCmds | oldCmds := TheWorldMenu registry select: [:cmd | cmd first includesSubString: 'Package Loader']. oldCmds do: [:cmd | TheWorldMenu unregisterOpenCommand: cmd first]. TheWorldMenu registerOpenCommand: {self openMenuString. {self. #open}}]]. DefaultFilters := OrderedCollection new. DefaultCategoriesToFilterIds := OrderedCollection new! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:16'! initializedInstance ^ (ToolBuilder open: self new) extent: 400 at 400! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/22/2006 15:02'! new "Create a SqueakMap loader on the default map." ^self newOn: SMSqueakMap default! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/22/2006 15:02'! newOn: aMap "Create a SqueakMap loader on given map." ^super new on: aMap; yourself! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:16'! newStandAlone ^ ToolBuilder open: self new! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/23/2006 11:13'! open "Create and open a SqueakMap Loader." "SMLoaderPlus open" ^ (Smalltalk at: #ToolBuilder) open: self new! ! !SMLoaderPlus class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:50'! openMenuString ^ 'SqueakMap Catalog'! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/23/2006 11:21'! openOn: aSqueakMap "Create and open a SqueakMap Loader on a given map." "self openOn: SqueakMap default" ^ (Smalltalk at: #ToolBuilder) open: (self newOn: aSqueakMap)! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:18'! prototypicalToolWindow ^ ToolBuilder open: self new; applyModelExtent; yourself! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:02'! registerInFlapsRegistry "Register the receiver in the system's flaps registry." self environment at: #Flaps ifPresent: [:cl | (cl respondsTo: #registerQuad:forFlapNamed:) ifTrue: [cl registerQuad: #(#SMLoader #prototypicalToolWindow 'Package Loader' 'The SqueakMap Package Loader' ) forFlapNamed: 'Tools']]! ! !SMLoaderPlus class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:50'! unload (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString]. self environment at: #Flaps ifPresent: [:cl | cl unregisterQuadsWithReceiver: self] ! ! !SMLoaderPlus class methodsFor: 'window color' stamp: 'btr 11/22/2006 15:02'! windowColorSpecification "Answer a WindowColorSpec object that declares my preference." ^WindowColorSpec classSymbol: self name wording: 'Package Loader' brightColor: Color yellow muchLighter duller pastelColor: Color yellow veryMuchLighter duller helpMessage: 'The SqueakMap Package Loader'! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! addFiltersToMenu: aMenu | filterSymbol help | self filterSpecs do: [:filterArray | filterSymbol := filterArray second. help := filterArray third. aMenu addUpdating: #showFilterString: target: self selector: #toggleFilterState: argumentList: (Array with: filterSymbol). aMenu balloonTextForLastItem: help]. aMenu addLine; addList: #(('Clear all filters' uncheckFilters 'Unchecks all filters to list all packages')) ! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! addSelectedCategoryAsFilter "Add a new filter that filters on the currently selected category. Make it enabled as default." categoriesToFilterIds add: self selectedCategory id! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 16:11'! askToLoadUpdates "Check how old the map is and ask to update it if it is older than 10 days or if there is no map on disk." | available | available := map isCheckpointAvailable. (available not or: [ (Date today subtractDate: (Date fromSeconds: (map directory directoryEntryFor: map lastCheckpointFilename) modificationTime)) > 3]) ifTrue: [ (self confirm: (available ifTrue: ['The map on disk is more than 10 days old, update it from the Internet?'] ifFalse: ['There is no map on disk, fetch it from the Internet?'])) ifTrue: [self loadUpdates]]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:43'! browseCacheDirectory "Open a FileList2 on the directory for the package or release." | item dir win | item := self selectedPackageOrRelease ifNil: [^ nil]. dir := item isPackage ifTrue: [map cache directoryForPackage: item] ifFalse: [map cache directoryForPackageRelease: item]. win := FileList2 morphicViewOnDirectory: dir. "withLabel: item name, ' cache directory'." win openInWorld! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:56'! buildButtonBarWith: aBuilder ^ aBuilder pluggablePanelSpec new model: self; layout: #horizontal; children: (self commandSpecs select: [ :spec | spec fourth includes: #all] thenCollect: [ :spec | aBuilder pluggableActionButtonSpec new model: self; label: spec first; action: spec second; help: spec third; enabled: ((spec fourth includes: #item) ifTrue: [#hasSelectedItem]); yourself]); name: #buttonBar; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/22/2006 15:02'! buildButtonNamed: labelText helpText: balloon action: action | btn | btn := PluggableButtonMorph on: self getState: nil action: action. btn color: Color transparent; hResizing: #shrinkWrap; vResizing: #spaceFill; label: labelText; setBalloonText: balloon; onColor: Color transparent offColor: Color transparent. ^ btn! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:56'! buildCategoriesListWith: aBuilder "Create the hierarchical list holding the category tree." ^ aBuilder pluggableTreeSpec new model: self; roots: #categoryList; getSelectedPath: #selectedCategoryPath; getChildren: #categoryChildren:; hasChildren: #categoryHasChildren:; setSelected: #selectedCategory:; menu: #categoriesMenu:; label: #categoryLabel:; autoDeselect: true; wantsDrop: true; name: #categoriesList; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:57'! buildPackagePaneWith: aBuilder "Create the text area to the right in the loader." ^ aBuilder pluggableTextSpec new model: self; getText: #itemDescription; name: #packagePane; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:57'! buildPackagesListWith: aBuilder "Create the hierarchical list holding the packages and releases." ^ aBuilder pluggableTreeSpec new model: self; roots: #packageList; getSelectedPath: #selectedItemPath; setSelected: #selectedItem:; menu: #packagesMenu:; label: #itemLabel:; getChildren: #itemChildren:; hasChildren: #itemHasChildren:; autoDeselect: true; wantsDrop: true; name: #packagesList; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:57'! buildSearchPaneWith: aBuilder ^ aBuilder pluggableInputFieldSpec new model: self; selection: #searchSelection; getText: #searchText; setText: #findPackage:notifying:; name: #search; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:54'! buildWith: aBuilder "Create the package loader window." | buttonBarHeight vertDivide horizDivide | buttonBarHeight := 0.07. vertDivide := 0.6. horizDivide := 0.3. builder := aBuilder. window := builder build: (builder pluggableWindowSpec new model: self; label: #label; children: (OrderedCollection new add: ((self buildButtonBarWith: builder) frame: (0 @ 0 corner: 1 @ buttonBarHeight)); add: ((self buildSearchPaneWith: builder) frame: (0 @ buttonBarHeight corner: horizDivide @ (buttonBarHeight * 2))); add: ((self buildPackagesListWith: builder) frame: (0 @ (buttonBarHeight * 2) corner: horizDivide @ vertDivide)); add: ((self buildCategoriesListWith: builder) frame: (0 @ vertDivide corner: horizDivide @ 1)); add: ((self buildPackagePaneWith: builder) frame: (horizDivide @ buttonBarHeight corner: 1 @ 1)); yourself); yourself). window on: #mouseEnter send: #paneTransition: to: window. window on: #mouseLeave send: #paneTransition: to: window. window extent: self initialExtent. ^ window! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:38'! cachePackageReleaseAndOfferToCopy "Cache package release, then offer to copy it somewhere. Answer the chosen file's location after copy, or the cache location if no directory was chosen." | release installer newDir newName newFile oldFile oldName | release := self selectedPackageOrRelease. release isPackageRelease ifFalse: [ self error: 'Should be a package release!!']. installer := SMInstaller forPackageRelease: release. [UIManager default informUser: 'Caching ' , release asString during: [installer cache]] on: Error do: [:ex | | msg | msg := ex messageText ifNil: [ex asString]. self informException: ex msg: ('Error occurred during download:\', msg, '\') withCRs. ^nil ]. installer isCached ifFalse: [self inform: 'Download failed, see transcript for details'. ^nil]. oldName := installer fullFileName. newDir := FileList2 modalFolderSelector: installer directory. newDir ifNil: [ ^oldName ]. newDir = installer directory ifTrue: [ ^oldName ]. newName := newDir fullNameFor: installer fileName. newFile := FileStream newFileNamed: newName. newFile ifNil: [ ^oldName ]. newFile binary. oldFile := FileStream readOnlyFileNamed: oldName. oldFile ifNil: [ ^nil ]. oldFile binary. [[ newDir copyFile: oldFile toFile: newFile ] ensure: [ oldFile close. newFile close ]] on: Error do: [ :ex | ^oldName ]. ^newName! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! categoriesMenu: aMenu "Answer the categories-list menu." self selectedCategory ifNotNil: [aMenu addList: self categorySpecificOptions; addLine]. aMenu addList: self generalOptions. self addFiltersToMenu: aMenu. ^aMenu! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:44'! categoryChildren: aCategory ^ aCategory subCategories! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:45'! categoryHasChildren: aCategory ^ aCategory hasSubCategories! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:46'! categoryLabel: aCategory ^ aCategory name! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 11/30/2006 21:01'! categoryList "Create the category list for the hierarchical list. We sort the categories by name but ensure that 'Squeak versions' is first if it exists." | list first | list := (map categories select: [:each | each parent isNil]) asArray sort: [:c1 :c2 | c1 name <= c2 name]. first := list detect: [:any | any name = 'Squeak versions'] ifNone: []. first ifNotNil: [list := list copyWithout: first. list := {first} , list]. ^ list! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! categorySpecificOptions | choices | choices := OrderedCollection new. (categoriesToFilterIds includes: self selectedCategory id) ifTrue: [ choices add: #('Remove filter' #removeSelectedCategoryAsFilter 'Remove the filter for the selected category.')] ifFalse: [ choices add: #('Add as filter' #addSelectedCategoryAsFilter 'Add the selection as a filter to hide unrelated packages.')]. categoriesToFilterIds isEmpty ifFalse: [ choices add: #('Remove all filters' #removeCategoryFilters 'Remove all category filters.')]. ^ choices! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/22/2006 15:02'! changeFilters: anObject "Update my selection." | oldItem index | oldItem := self selectedPackageOrRelease. filters := anObject. self packagesListIndex: ((index := self packageList indexOf: oldItem) ifNil: [0] ifNotNil: [index]). self noteChanged! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 18:01'! commandSpecFor: selector ^ self commandSpecs detect: [:spec | spec second = selector]! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 18:00'! commandSpecs ^ #(('Install' installPackageRelease 'Install the latest version from the server.' (item all)) ('Email' emailPackageMaintainers 'Open an editor to send an email to the owner and co-maintainers of this package.' (item all)) ('Browse cache' browseCacheDirectory 'Browse cache directory of the selection.' (item all)) ('Copy from cache' cachePackageReleaseAndOfferToCopy 'Download selected release into cache first if needed, and then offer to copy it somewhere else.' (item)) ('Force download into cache' downloadPackageRelease 'Force a download of the selected release into the cache.' (item)) ('Update' loadUpdates 'Update the package index from the servers.' (all)) ('Upgrade All' upgradeInstalledPackagesConfirm 'Upgrade all installed packages (conf8irming each).' (all)) ('Upgrade all installed packages' upgradeInstalledPackagesNoConfirm '' (item)) ('Upgrade all installed packages confirming each' upgradeInstalledPackagesConfirm '' (item)) ('Copy list' listInPasteBuffer 'Puts the list as text into the clipboard.' (all)) ('Save filters' saveFiltersAsDefault 'Saves the current filters as default.' (all)) ('Help' help 'What is this?' (all)))! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/22/2006 15:02'! defaultButtonPaneHeight "Answer the user's preferred default height for new button panes." ^ Preferences parameterAt: #defaultButtonPaneHeight ifAbsentPut: [25]! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:50'! defaultLabel ^ 'SqueakMap Package Loader'! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:38'! downloadPackageRelease "Force a download of the selected package release into the cache." | release | release := self selectedPackageOrRelease. release isPackageRelease ifFalse: [ self error: 'Should be a package release!!']. [UIManager default informUser: 'Downloading ' , release asString during: [ (SMInstaller forPackageRelease: release) download] ] on: Error do: [:ex | | msg | msg := ex messageText ifNil: [ex asString]. self informException: ex msg: ('Error occurred during download:\', msg, '\') withCRs]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! emailPackageMaintainers "Send mail to package owner and co-maintainers." | item package toAddresses | item := self selectedPackageOrRelease ifNil: [^ nil]. package := item isPackageRelease ifTrue: [item package] ifFalse: [item]. "(this logic should be moved to MailMessage as soon as it can handle multiple To: addresses)" toAddresses := '<', package owner email, '>'. package maintainers ifNotNil: [ package maintainers do: [:maintainer | toAddresses := toAddresses, ', <', maintainer email, '>']]. SMUtilities sendMailTo: toAddresses regardingPackageRelease: item! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! filterAdd: anObject self changeFilters: (self filters copyWith: anObject) ! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 11/22/2006 15:02'! filterAutoInstall ^[:package | package isInstallable]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 12/1/2006 01:42'! filterAvailable ^[:package | package isAvailable]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 11/22/2006 15:02'! filterInstalled ^[:package | package isInstalled]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 11/22/2006 15:02'! filterNotInstalledYet ^[:package | package isInstalled not]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 12/1/2006 01:42'! filterNotUptoDate ^[:package | package isAvailable]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 11/22/2006 15:02'! filterPublished ^[:package | package isPublished]! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! filterRemove: anObject self changeFilters: (self filters copyWithout: anObject) ! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 12/1/2006 01:43'! filterSafelyAvailable ^[:package | package isSafelyAvailable]! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/30/2006 21:07'! filterSpecs "Return a specification for the filter menu. Is called each time." | specs | specs := #(#('Auto-installable packages' #filterAutoInstall 'display only packages that can be installed automatically') #('New available packages' #filterAvailable 'display only packages that are not installed or that have newer releases available.') #('New safely-available packages' #filterSafelyAvailable 'display only packages that are not installed or that have newer releases available that are safe to install, meaning that they are published and meant for the current version of Squeak.') #('Installed packages' #filterInstalled 'Display only packages that are installed.') #('Published packages' #filterPublished 'Display only packages that have at least one published release.') ) asOrderedCollection. categoriesToFilterIds do: [:catId | specs add: {'Packages in ' , (map object: catId) name. catId. 'Display only packages that are in the category.'}]. ^ specs! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 12/1/2006 01:43'! filterVersion "Ignore spaces in the version string, they're sometimes spurious. Not used anymore." ^[:package | package categories anySatisfy: [:cat | (cat name, '*') match: (Smalltalk version copyWithout: $ ) ]]! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! filters ^filters! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/24/2006 13:49'! findPackage: aString notifying: aView "Search and select a package with the given (sub) string in the name or description. " | index list match descriptions | match := aString asString asLowercase. index := self packagesListIndex. list := self packageNameList. list isEmpty ifTrue: [^ self]. descriptions := self packageList collect: [:e | e description]. index + 1 to: list size do: [:i | (((list at: i) includesSubstring: match caseSensitive: false) or: [(descriptions at: i) includesSubstring: match caseSensitive: false]) ifTrue: [^ self packagesListIndex: i]]. "wrap around" 1 to: index do: [:i | (((list at: i) includesSubstring: match caseSensitive: false) or: [(descriptions at: i) includesSubstring: match caseSensitive: false]) ifTrue: [^ self packagesListIndex: i]]. self inform: 'No package matching ' , aString asString! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! generalOptions ^#( #('Upgrade all installed packages' upgradeInstalledPackagesNoConfirm) #('Upgrade all installed packages confirming each' upgradeInstalledPackagesConfirm) #('Put list in paste buffer' listInPasteBuffer) #('Save filters as default' saveFiltersAsDefault) #- ) ! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 18:36'! hasSelectedItem ^ self selectedPackageOrRelease notNil! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:44'! help "Present help text. If there is a web server available, offer to open it. Use the WebBrowser registry if possible, or Scamper if available." | message browserClass | message := 'Welcome to the SqueakMap package loader. The names of packages are followed by versions: (installed -> latest). If there is no arrow, your installed version of the package is the latest. Bold packages and releases have been installed. The checkbox menu items modify which packages you''ll see. Take a look at them - only some packages are shown initially. The options available for a package depend on how it was packaged. Comment on a package by emailing the author or the squeak list.'. browserClass := Smalltalk at: #WebBrowser ifPresent: [ :registry | registry default ]. browserClass := browserClass ifNil: [ Smalltalk at: #Scamper ifAbsent: [ ^self inform: message ]]. (self confirm: message, ' Would you like to view more detailed help on the SqueakMap swiki page?') ifTrue: [ browserClass openOnUrl: 'http://wiki.squeak.org/2726' asUrl]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 15:02'! informException: ex msg: msg "Tell the user that an error has occurred. Offer to open debug notifier." (self confirm: msg, 'Would you like to open a debugger?') ifTrue: [ex pass]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 05:28'! initialExtent ^500 at 400! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! installPackageRelease "Install selected package or release. The cache is used." | item release | item := self selectedPackageOrRelease ifNil: [^ nil]. item isPackageRelease ifTrue: [ (item isPublished or: [self confirm: 'Selected release is not published yet, install anyway?']) ifTrue: [^self installPackageRelease: item]] ifFalse: [ release := item lastPublishedReleaseForCurrentSystemVersion. release ifNil: [ (self confirm: 'The package has no published release for your Squeak version, try releases for any Squeak version?') ifTrue: [ release := item lastPublishedRelease. release ifNil: [ (self confirm: 'The package has no published release at all, take the latest of the unpublished releases?') ifTrue: [release := item lastRelease]]]]. release ifNotNil: [^self installPackageRelease: release]]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 12/1/2006 01:53'! installPackageRelease: aRelease "Install a package release. The cache is used." | myRelease installer | aRelease isCompatibleWithCurrentSystemVersion ifFalse: [(self confirm: 'The package you are about to install is not listed as being compatible with your image version (', SystemVersion current majorMinorVersion, '), so the package may not work properly. Do you still want to proceed with the install?') ifFalse: [^ self]]. myRelease := self installedReleaseOfMe. installer := SMInstaller forPackageRelease: aRelease. [UIManager default informUser: 'Downloading ' , aRelease asString during: [installer download]. UIManager default informUser: 'Installing ' , aRelease asString during: [ installer install. myRelease = self installedReleaseOfMe ifFalse: [self reOpen] ifTrue: [self noteChanged]] ] on: Error do: [:ex | | msg | msg := ex messageText ifNil:[ex asString]. self informException: ex msg: ('Error occurred during install:\', msg, '\') withCRs].! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 15:02'! installedReleaseOfMe "Return the release of the installed package loader." ^SMSqueakMap default installedReleaseOf: (SMSqueakMap default packageWithId: '941c0108-4039-4071-9863-a8d7d2b3d4a3').! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:44'! itemChildren: anItem ^ anItem isPackage ifTrue: [anItem releases] ifFalse: [#()]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 19:56'! itemDescription ^ self selectedPackageOrRelease ifNil: [''] ifNotNilDo: [:item | item fullDescription]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:45'! itemHasChildren: anItem ^ anItem isPackage and: [anItem releases notEmpty]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:44'! itemLabel: anItem | label | label := anItem isPackage ifTrue: [anItem name , (anItem versionLabel ifEmpty: [''] ifNotEmptyDo: [:lbl | ' (' , anItem versionLabel , ')'])] ifFalse: [anItem smartVersion]. ^ anItem isInstalled ifTrue: [label asText allBold] ifFalse: [label]! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 11/24/2006 17:17'! label ^ self labelForShown: (packagesList ifNil: [self packageList])! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! labelForFilter: aFilterSymbol ^(self filterSpecs detect: [:fs | fs second = aFilterSymbol]) first! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:50'! labelForShown: packagesShown "Update the label of the window." ^ self defaultLabel , ' (', (packagesShown size < map packages size ifTrue: [packagesShown size printString, ' shown out of '] ifFalse: ['']) , map packages size printString, ' packages)'! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! listInPasteBuffer "Useful when talking with people etc. Uses the map to produce a nice String." Clipboard clipboardText: (String streamContents: [:s | packagesList do: [:p | s nextPutAll: p nameWithVersionLabel; cr ]]) asText! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:31'! loadUpdates [UIManager default informUser: 'Loading Updates' during: [ map loadUpdates. self noteChanged ] ] on: Error do: [:ex | self informException: ex msg: ('Error occurred when updating map:\', ex messageText, '\') withCRs]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/24/2006 14:05'! noteChanged filters ifNil: [^ self reOpen]. map ifNotNil: [packagesList := nil. selectedCategory := nil. self changed: #categoryList. self changed: #packageList. self changed: #packagesListIndex. "update my selection" self contentsChanged]! ! !SMLoaderPlus methodsFor: 'initialization' stamp: 'btr 11/22/2006 16:11'! on: aSqueakMap "Initialize instance." map := aSqueakMap. map synchWithDisk. filters := DefaultFilters copy. categoriesToFilterIds := DefaultCategoriesToFilterIds copy. self askToLoadUpdates! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! package: aPackage filteredByCategory: aCategory "Answer true if the package should be shown if we filter on . It should be shown if itself or any of its releases has the category." | releases | releases := aPackage releases. ^(aPackage hasCategoryOrSubCategoryOf: aCategory) or: [ releases anySatisfy: [:rel | rel hasCategoryOrSubCategoryOf: aCategory]]! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:49'! packageList "Return a list of the SMPackages that should be visible by applying all the filters. Also filter based on the currently selected category - if any." | list | list := packagesList ifNil: [packagesList := self packageListCalculated]. selectedCategory ifNotNil: [ list := list select: [:each | self package: each filteredByCategory: selectedCategory]]. self updateLabel: list. ^ list! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:49'! packageListCalculated "Return a list of the SMPackages that should be visible by applying all the filters. Also filter based on the currently selected category - if any." ^ self packages select: [:p | filters allSatisfy: [:currFilter | currFilter isSymbol ifTrue: [(self perform: currFilter) value: p] ifFalse: [self package: p filteredByCategory: (map object: currFilter)]]]! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:50'! packageNameList ^ self packageList collect: [:e | e name]! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 18:30'! packageSpecificOptions | choices packageOrRelease | packageOrRelease := self selectedPackageOrRelease. choices := OrderedCollection new. packageOrRelease isInstallable ifTrue: [ choices add: (self commandSpecFor: #installPackageRelease)]. (packageOrRelease isDownloadable and: [packageOrRelease isCached]) ifTrue: [ choices add: (self commandSpecFor: #browseCacheDirectory)]. (packageOrRelease isPackageRelease and: [packageOrRelease isDownloadable]) ifTrue: [ choices add: (self commandSpecFor: #cachePackageReleaseAndOfferToCopy). choices add: (self commandSpecFor: #downloadPackageRelease)]. choices add: (self commandSpecFor: #emailPackageMaintainers). ^ choices! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 16:11'! packages "We request the packages as sorted by name by default." ^map packagesByName asArray ! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:01'! packagesListIndex ^ self packageList indexOf: self selectedItem! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:01'! packagesListIndex: anObject self selectedItem: (anObject = 0 ifFalse: [self packageList at: anObject])! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! packagesMenu: aMenu "Answer the packages-list menu." self selectedPackageOrRelease ifNotNil: [aMenu addList: self packageSpecificOptions; addLine]. aMenu addList: self generalOptions. self addFiltersToMenu: aMenu. ^aMenu! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:45'! perform: selector orSendTo: otherTarget "Selector was just chosen from a menu by a user. If can respond, then perform it on myself. If not, send it to otherTarget, presumably the editPane from which the menu was invoked." ^ (self respondsTo: selector) ifTrue: [self perform: selector] ifFalse: [super perform: selector orSendTo: otherTarget]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/26/2006 23:22'! reOpen "Close this package loader, probably because it has been updated, and open a new one." self inform: 'This package loader has been upgraded and will be closed and reopened to avoid strange side effects.'. window delete. (Smalltalk at: self class name) open! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! removeCategoryFilters "Remove all category filters." categoriesToFilterIds := OrderedCollection new! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! removeSelectedCategoryAsFilter "Remove the filter that filters on the currently selected category." categoriesToFilterIds remove: self selectedCategory id! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! saveFiltersAsDefault "Save the current filters as default so that they are selected the next time the loader is opened." DefaultFilters := filters copy. DefaultCategoriesToFilterIds := categoriesToFilterIds copy! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:35'! searchSelection "Selects all of the default search text so that a type-in overwrites it." ^ {1. self searchText size}! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:35'! searchText "A dummy default search text so that the field describes its purpose." ^ 'Search packages'! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:02'! selectedCategory "Return selected category." ^ selectedCategory! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 16:37'! selectedCategory: anSMCategory "Change the selected category." selectedCategory := anSMCategory. selectedCategory ifNotNil: [(selectedCategory objects includes: self selectedItem) ifFalse: [self selectedItem: nil]]. self changed: #selectedCategory. self changed: #packageList! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:52'! selectedCategoryPath "Return selected category's path." | path | path := #(). selectedCategory ifNotNil: [selectedCategory parent ifNotNilDo: [:p | path := path copyWith: p]. path := path copyWith: selectedCategory]. ^ path collect: [:cat | self categoryLabel: cat]! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:02'! selectedItem ^ selectedItem! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 16:27'! selectedItem: anItem "This == workaround protects us from recursion since ToolBuilder's tree widgets will always tell us that the selection has been updated when we tell it that the selection path has been updated. Cleaner solutions invited." anItem == selectedItem ifFalse: [ selectedItem := anItem. self changed: #selectedItemPath. self changed: #itemDescription. self changed: #hasSelectedItem]! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 16:16'! selectedItemPath | path | path := #(). (selectedItem isKindOf: SMPackageRelease) ifTrue: [path := path copyWith: selectedItem package]. selectedItem ifNotNil: [path := path copyWith: selectedItem]. ^ path! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:03'! selectedPackageOrRelease "Return selected package or package release." ^ selectedItem! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! showFilterString: aFilterSymbol ^(self stateForFilter: aFilterSymbol), (self labelForFilter: aFilterSymbol)! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! stateForFilter: aFilterSymbol ^(self filters includes: aFilterSymbol) ifTrue: [''] ifFalse: [''] ! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! toggleFilterState: aFilterSymbol ^(self filters includes: (aFilterSymbol)) ifTrue: [self filterRemove: aFilterSymbol] ifFalse: [self filterAdd: aFilterSymbol]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! uncheckFilters "Uncheck all filters." filters := OrderedCollection new. self noteChanged! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:50'! updateLabel: packagesShown "Update the label of the window." window ifNotNilDo: [:w | w setLabel: (self labelForShown: packagesShown)]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:29'! upgradeInstalledPackages "Tries to upgrade all installed packages to the latest published release for this version of Squeak. So this is a conservative approach." | installed old myRelease toUpgrade info | installed := map installedPackages. old := map oldPackages. old isEmpty ifTrue: [ ^self inform: 'All ', installed size printString, ' installed packages are up to date.']. toUpgrade := map upgradeableAndOldPackages. toUpgrade isEmpty ifTrue: [ ^self inform: 'None of the ', old size printString, ' old packages of the ', installed size printString, ' installed can be automatically upgraded. You need to upgrade them manually.']. info := old size < toUpgrade size ifTrue: [ 'Of the ', old size printString, ' old packages only ', toUpgrade size printString, ' can be upgraded. The following packages will not be upgraded: ', (String streamContents: [:s | (old removeAll: toUpgrade; yourself) do: [:p | s nextPutAll: p nameWithVersionLabel; cr]])] ifFalse: ['All old packages upgradeable.']. (self confirm: info, ' About to upgrade the following packages: ', (String streamContents: [:s | toUpgrade do: [:p | s nextPutAll: p nameWithVersionLabel; cr]]), 'Proceed?') ifTrue: [ myRelease := self installedReleaseOfMe. [UIManager default informUser: 'Upgrading Installed Packages' during: [ map upgradeOldPackages. self inform: toUpgrade size printString, ' packages successfully upgraded.'. myRelease = self installedReleaseOfMe ifFalse: [self reOpen] ifTrue: [self noteChanged]] ] on: Error do: [:ex | self informException: ex msg: ('Error occurred when upgrading old packages:\', ex messageText, '\') withCRs]]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! upgradeInstalledPackagesConfirm "Tries to upgrade all installed packages to the latest published release for this version of Squeak. Confirms on each upgrade." ^ self upgradeInstalledPackagesConfirm: true! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 12/1/2006 01:29'! upgradeInstalledPackagesConfirm: confirmEach "Tries to upgrade all installed packages to the latest published release for this version of Squeak. If confirmEach is true we ask for every upgrade. " | installed old myRelease toUpgrade info | installed := map installedPackages. old := map oldPackages. old isEmpty ifTrue: [^ self inform: 'All ' , installed size printString , ' installed packages are up to date.']. toUpgrade := map upgradeableAndOldPackages. toUpgrade isEmpty ifTrue: [^ self inform: 'None of the ' , old size printString , ' old packages of the ' , installed size printString , ' installed can be automatically upgraded. You need to upgrade them manually.']. info := old size < toUpgrade size ifTrue: ['Of the ' , old size printString , ' old packages only ' , toUpgrade size printString , ' can be upgraded. The following packages will not be upgraded: ' , (String streamContents: [:s | (old removeAll: toUpgrade; yourself) do: [:p | s nextPutAll: p nameWithVersionLabel; cr]])] ifFalse: ['All old packages upgradeable.']. (self confirm: info , ' About to upgrade the following packages: ' , (String streamContents: [:s | toUpgrade do: [:p | s nextPutAll: p nameWithVersionLabel; cr]]) , 'Proceed?') ifTrue: [myRelease := self installedReleaseOfMe. [UIManager default informUser: 'Upgrading Installed Packages' during: [confirmEach ifTrue: [map upgradeOldPackagesConfirmBlock: [:p | self confirm: 'Upgrade ' , p installedRelease packageNameWithVersion , ' to ' , (p lastPublishedReleaseForCurrentSystemVersionNewerThan: p installedRelease) listName , '?']] ifFalse: [map upgradeOldPackages]. self inform: toUpgrade size printString , ' packages successfully processed.'. myRelease = self installedReleaseOfMe ifTrue: [self noteChanged] ifFalse: [self reOpen]]] on: Error do: [:ex | self informException: ex msg: ('Error occurred when upgrading old packages:\' , ex messageText , '\') withCRs]]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! upgradeInstalledPackagesNoConfirm "Tries to upgrade all installed packages to the latest published release for this version of Squeak. No confirmation on each upgrade." ^ self upgradeInstalledPackagesConfirm: false! ! !SMPackageWrapper methodsFor: 'comparing' stamp: 'dvf 9/21/2003 16:25' prior: 27998626! = anObject ^self withoutListWrapper = anObject withoutListWrapper! ! !SMPackageWrapper methodsFor: 'converting' stamp: 'btr 11/22/2006 00:54' prior: 27998778! asString | string | string := item name, ' (', item versionLabel, ')'. item isInstalled ifTrue: [string := string asText allBold]. "(string includesSubString: '->') ifTrue: [string := string asText color: Color green]." ^ string! ! !SMPackageWrapper methodsFor: 'accessing' stamp: 'dvf 10/14/2003 18:58' prior: 27998902! contents ^item releases reversed collect: [:e | SMPackageReleaseWrapper with: e]! ! !SMPackageWrapper methodsFor: 'testing' stamp: 'dvf 9/21/2003 16:25' prior: 27999070! hash ^self withoutListWrapper hash! ! !SMPackageWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 16:55'! help ^ 'This shows all packages with their releases that should be displayed according the current filter.'! ! !SMPackageWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 16:49'! label ^ self asString! ! !SMPackageWrapper methodsFor: 'printing' stamp: 'dvf 9/21/2003 16:22' prior: 27999192! printOn: aStream aStream nextPutAll: 'wrapper for: ', item printString! ! !SMCategoryWrapper methodsFor: 'comparing' stamp: 'ar 2/9/2004 02:13' prior: 27849043! = anObject ^self withoutListWrapper = anObject withoutListWrapper! ! !SMCategoryWrapper methodsFor: 'converting' stamp: 'btr 11/30/2006 18:53' prior: 27849195! asString ^ item name , ' (' , self numberOfObjects printString , ')'! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'ar 2/9/2004 02:35' prior: 27849301! category ^item! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'btr 11/30/2006 21:02' prior: 27849402! contents ^ item subCategories collect: [:n | self class with: n model: n]! ! !SMCategoryWrapper methodsFor: 'model access' stamp: 'btr 11/30/2006 21:02'! getList ^ Array with: (self class with: self contents model: model)! ! !SMCategoryWrapper methodsFor: 'testing' stamp: 'btr 11/30/2006 18:53'! hasContents ^ item hasSubCategories! ! !SMCategoryWrapper methodsFor: 'comparing' stamp: 'ar 2/9/2004 02:13' prior: 27849700! hash ^self withoutListWrapper hash! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 16:56'! help ^ 'The categories are structured in a tree. Packages and package releases belong to several categories. You can add one or more categories as filters and enable them in the menu.'! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'BJP 11/22/2002 14:17'! model ^model! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'btr 11/30/2006 18:53'! numberOfObjects " | total | total _ 0. model allCategoriesDo: [:c | total _ total + c objects size]. ^total" ^item objects size! ! !SMPackageReleaseWrapper methodsFor: 'converting' stamp: 'btr 11/30/2006 21:30' prior: 27997393! asString "Show installed releases with a trailing asterisk." | string | string := item smartVersion. "Older SMBase versions don't have isInstalled.'" (item respondsTo: #isInstalled) ifTrue: [item isInstalled ifTrue: [string := (string , ' *') asText allBold]]. ^ string! ! !SMPackageReleaseWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 17:14'! contents ^ #()! ! !SMPackageReleaseWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 16:49'! label ^ self asString ! ! !SMLoader class methodsFor: 'class initialization' stamp: 'btr 12/1/2006 15:47' prior: 27944626! initialize "Hook us up in the world menu." "self initialize" Smalltalk at: #ToolBuilder ifAbsent: [self registerInFlapsRegistry. (Preferences windowColorFor: #SMLoader) = Color white ifTrue: ["not set" Preferences setWindowColorFor: #SMLoader to: (Color colorFrom: self windowColorSpecification brightColor)]. (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [| oldCmds | oldCmds := TheWorldMenu registry select: [:cmd | cmd first includesSubString: 'Package Loader']. oldCmds do: [:cmd | TheWorldMenu unregisterOpenCommand: cmd first]. TheWorldMenu registerOpenCommand: {self openMenuString. {self. #open}}]]. DefaultFilters := OrderedCollection new. DefaultCategoriesToFilterIds := OrderedCollection new! ! !SMLoader class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:52'! openMenuString ^ 'SqueakMap Catalog'! ! !SMLoader class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:52' prior: 27945298! unload (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString]. self environment at: #Flaps ifPresent: [:cl | cl unregisterQuadsWithReceiver: self] ! ! !SMLoader methodsFor: 'menus' stamp: 'btr 11/21/2006 16:08' prior: 54331069! addFiltersToMenu: aMenu | filterSymbol help | self filterSpecs do: [:filterArray | filterSymbol := filterArray second. help := filterArray third. aMenu addUpdating: #showFilterString: target: self selector: #toggleFilterState: argumentList: (Array with: filterSymbol). aMenu balloonTextForLastItem: help]. aMenu addLine; addList: #(('Clear all filters' uncheckFilters 'Unchecks all filters to list all packages')) ! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 01:15' prior: 27927912! browseCacheDirectory "Open a FileList2 on the directory for the package or release." | item dir win | item := self selectedPackageOrRelease ifNil: [^ nil]. item ifNil: [^nil]. dir := item isPackage ifTrue: [model cache directoryForPackage: item] ifFalse: [model cache directoryForPackageRelease: item]. win := FileList2 morphicViewOnDirectory: dir. " withLabel: item name, ' cache directory'." win openInWorld ! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 14:52'! buildButtonBar | aRow btn | aRow := AlignmentMorph newRow beSticky. aRow color: Color transparent; clipSubmorphs: true. self buttonSpecs do: [:spec | btn := self buildButtonNamed: spec first helpText: spec third action: spec second. aRow addMorphBack: btn] separatedBy: [aRow addTransparentSpacerOfSize: 3 at 0]. ^ aRow! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 01:27'! buildButtonNamed: labelText helpText: balloon action: action | btn | btn := PluggableButtonMorph on: self getState: nil action: action. btn color: Color transparent; hResizing: #shrinkWrap; vResizing: #spaceFill; label: labelText; setBalloonText: balloon; onColor: Color transparent offColor: Color transparent. ^ btn! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/30/2006 19:04' prior: 27928394! buildMorphicCategoriesList "Create the hierarchical list holding the category tree." | list | list := (SimpleHierarchicalListMorph on: self list: #categoryWrapperList selected: #selectedCategoryWrapper changeSelected: #selectedCategoryWrapper: menu: #categoriesMenu: keystroke: nil) autoDeselect: true; enableDrag: false; enableDrop: true; yourself. list setBalloonText: 'The categories are structured in a tree. Packages and package releases belong to several categories. You can add one or more categories as filters and enable them in the menu.'. "list scroller submorphs do:[:each| list expandAll: each]." list adjustSubmorphPositions. ^ list! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 00:22' prior: 27929139! buildMorphicPackagesList "Create the hierarchical list holding the packages and releases." ^(SimpleHierarchicalListMorph on: self list: #packageWrapperList selected: #selectedItemWrapper changeSelected: #selectedItemWrapper: menu: #packagesMenu: keystroke: nil) autoDeselect: false; enableDrag: false; enableDrop: true; setBalloonText: 'This shows all packages with their releases that should be displayed according the current filter.'; yourself! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/30/2006 21:13'! buildPackageButtonBar | aRow | "Somewhat patterned after IRCe's buttonRow method." aRow := AlignmentMorph newRow beSticky. aRow color: Color transparent; clipSubmorphs: true. ^ aRow! ! !SMLoader methodsFor: 'interface' stamp: 'gk 5/5/2006 02:05' prior: 27929686! buildPackagePane "Create the text area to the right in the loader." | ptm | ptm := PluggableTextMorph on: self text: #contents accept: nil readSelection: nil "#packageSelection " menu: nil. ptm setBalloonText: 'This is where the selected package or package release is displayed.'. ptm lock. ^ptm! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/30/2006 21:08' prior: 27930070! buildSearchPane "Cribbed from MessageNames>>inMorphicWindowWithInitialSearchString:" | typeInView searchButton typeInPane | typeInView := PluggableTextMorph on: self text: nil accept: #findPackage:notifying: readSelection: nil menu: nil. typeInView acceptOnCR: true; vResizing: #spaceFill; hResizing: #spaceFill; setTextMorphToSelectAllOnMouseEnter; askBeforeDiscardingEdits: false; setProperty: #alwaysAccept toValue: true. (typeInView respondsTo: #hideScrollBarsIndefinitely) ifTrue: [typeInView hideScrollBarsIndefinitely] ifFalse: [typeInView hideScrollBarIndefinitely]. searchButton := SimpleButtonMorph new target: typeInView; color: Color white; label: 'Search'; actionSelector: #accept; arguments: #(); yourself. typeInPane := AlignmentMorph newRow. typeInPane vResizing: #shrinkWrap; hResizing: #shrinkWrap; listDirection: #leftToRight; addMorphFront: searchButton; addTransparentSpacerOfSize: 6 @ 0; addMorphBack: typeInView; setBalloonText: 'Type into the pane, then press Search (or hit RETURN) to visit the next package matching what you typed.'. ^ typeInPane! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 14:24'! buttonSpecs ^ #(('Install' installPackageRelease 'Install the latest version from the server.') ('Email' emailPackageMaintainers 'Open an editor to send an email to the owner and co-maintainers of this package.') ('Browse cache' browseCacheDirectory 'Browse cache directory of the selection.') ('Update' loadUpdates 'Update the package index from the servers.') ('Upgrade All' upgradeInstalledPackagesConfirm 'Upgrade all installed packages (confirming each).') ('Help' help 'What is this?'))! ! !SMLoader methodsFor: 'menus' stamp: 'btr 11/21/2006 16:11' prior: 27936393! categorySpecificOptions | choices | choices := OrderedCollection new. (categoriesToFilterIds includes: self selectedCategory id) ifTrue: [ choices add: #('Remove filter' #removeSelectedCategoryAsFilter 'Remove the filter for the selected category.')] ifFalse: [ choices add: #('Add as filter' #addSelectedCategoryAsFilter 'Add the selection as a filter to hide unrelated packages.')]. categoriesToFilterIds isEmpty ifFalse: [ choices add: #('Remove all filters' #removeCategoryFilters 'Remove all category filters.')]. ^ choices! ! !SMLoader methodsFor: 'lists' stamp: 'btr 11/30/2006 21:01' prior: 27933585! categoryWrapperList "Create the wrapper list for the hierarchical list. We sort the categories by name but ensure that 'Squeak versions' is first if it exists." | list first | list := (model categories select: [:each | each parent isNil]) asArray sort: [:c1 :c2 | c1 name <= c2 name]. first := list detect: [:any | any name = 'Squeak versions'] ifNone: []. first ifNotNil: [list := list copyWithout: first. list := {first} , list]. ^ list collect: [:cat | SMCategoryWrapper with: cat model: self]! ! !SMLoader methodsFor: 'filter utilities' stamp: 'gk 7/10/2004 15:45' prior: 27913226! changeFilters: anObject "Update my selection." | oldItem index | oldItem := self selectedPackageOrRelease. filters := anObject. self packagesListIndex: ((index := self packageList indexOf: oldItem) ifNil: [0] ifNotNil: [index]). self noteChanged! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/30/2006 17:30' prior: 27930584! createWindow | buttonBarHeight searchHeight vertDivide horizDivide | buttonBarHeight := 0.07. searchHeight := 0.07. vertDivide := 0.3. horizDivide := 0.6. self addMorph: (self buildButtonBar borderWidth: 0) frame: (0.0 @ 0.0 corner: 1.0 @ buttonBarHeight). self addMorph: (self buildSearchPane borderWidth: 0) frame: (0.0 @ buttonBarHeight corner: vertDivide @ searchHeight). self addMorph: (self buildMorphicPackagesList borderWidth: 0) frame: (0.0 @ (buttonBarHeight + searchHeight) corner: vertDivide @ horizDivide). self addMorph: (self buildMorphicCategoriesList borderWidth: 0) frame: (0.0 @ horizDivide corner: vertDivide @ 1.0). self addMorph: (self buildPackagePane borderWidth: 0) frame: (vertDivide @ buttonBarHeight corner: 1.0 @ 1.0). self on: #mouseEnter send: #paneTransition: to: self. self on: #mouseLeave send: #paneTransition: to: self! ! !SMLoader methodsFor: 'interface' stamp: 'gk 7/12/2004 11:14' prior: 27931214! defaultButtonPaneHeight "Answer the user's preferred default height for new button panes." ^ Preferences parameterAt: #defaultButtonPaneHeight ifAbsentPut: [25]! ! !SMLoader methodsFor: 'interface' stamp: 'btr 12/1/2006 02:01'! defaultLabel ^'SqueakMap Package Loader'! ! !SMLoader methodsFor: 'actions' stamp: 'btr 11/22/2006 01:14' prior: 27917579! emailPackageMaintainers "Send mail to package owner and co-maintainers." | item package toAddresses | item := self selectedPackageOrRelease ifNil: [^ nil]. package := item isPackageRelease ifTrue: [item package] ifFalse: [item]. "(this logic should be moved to MailMessage as soon as it can handle multiple To: addresses)" toAddresses := '<', package owner email, '>'. package maintainers ifNotNil: [ package maintainers do: [:maintainer | toAddresses := toAddresses, ', <', maintainer email, '>']]. SMUtilities sendMailTo: toAddresses regardingPackageRelease: item! ! !SMLoader methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 00:14' prior: 27923782! filterSpecs "Return a specification for the filter menu. Is called each time." | specs | specs := #( #('Auto-installable packages' #filterAutoInstall 'display only packages that can be installed automatically') #('New available packages' #filterAvailable 'display only packages that are not installed or that have newer releases available.') #('New safely-available packages' #filterSafelyAvailable 'display only packages that are not installed or that have newer releases available that are safe to install, meaning that they are published and meant for the current version of Squeak.') From noreply at buildbot.pypy.org Wed Aug 6 19:46:55 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 6 Aug 2014 19:46:55 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-cleanups: Removed all but one version of the mini squeak image. Message-ID: <20140806174655.B7E721C05B7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-cleanups Changeset: r1023:45545d39f647 Date: 2014-08-06 14:39 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/45545d39f647/ Log: Removed all but one version of the mini squeak image. The remaining image contains all benchmark/test methods of the other images. Lets put all the usefull code into one image. diff --git a/images/benchmark.image b/images/benchmark.image deleted file mode 100644 Binary file images/benchmark.image has changed diff --git a/images/mini.image b/images/mini.image index a8cfbe28687d27aca4ab2d9dd0f37da338fd475d..c6aae01370f01c03a4a79d2738325907942fdcdc GIT binary patch [cut] diff --git a/images/minibluebookdebug.image b/images/minibluebookdebug.image deleted file mode 100644 Binary file images/minibluebookdebug.image has changed diff --git a/images/minitest.image b/images/minitest.image deleted file mode 100644 Binary file images/minitest.image has changed From noreply at buildbot.pypy.org Wed Aug 6 19:46:57 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 6 Aug 2014 19:46:57 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-cleanups: Resized the window to 800*576, to have a width that is a multiple of 32. Message-ID: <20140806174657.1827A1C05B7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-cleanups Changeset: r1024:278622e47ceb Date: 2014-08-06 14:55 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/278622e47ceb/ Log: Resized the window to 800*576, to have a width that is a multiple of 32. diff --git a/images/mini.image b/images/mini.image index c6aae01370f01c03a4a79d2738325907942fdcdc..3b80af1d58edc29b1c6d6379e5dd5e62015b6a47 GIT binary patch [cut] From noreply at buildbot.pypy.org Wed Aug 6 19:46:58 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 6 Aug 2014 19:46:58 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-cleanups: Tagged some tests with @slow_tests, which excludes them from regular execution. Message-ID: <20140806174658.33DFE1C05B7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-cleanups Changeset: r1025:f0f40bb06fc8 Date: 2014-08-06 16:55 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/f0f40bb06fc8/ Log: Tagged some tests with @slow_tests, which excludes them from regular execution. Adding --slow parameter to pytest executes all tests. The fast tests take 12 seconds, the entire test suite takes 2:30 minutes. Most of the time is spent in 2-3 tests. Maybe add something like @very_slow_test? Also split test_miniimage.py in two files. diff --git a/spyvm/test/conftest.py b/spyvm/test/conftest.py new file mode 100644 --- /dev/null +++ b/spyvm/test/conftest.py @@ -0,0 +1,25 @@ +import py + +def pytest_addoption(parser): + group = parser.getgroup("RSqueak test options") + group.addoption( + "--slow", "-S", + dest="execute-slow-tests", + action="store_true", + default=False, + help="Additionally execute slow tests (loading full Squeak image or long execution)" + ) + group.addoption( + "--jit", + dest="rsqueak-binary", + action="store", + default=None, + help="Path to a compiled rsqueak binary" + ) + +# The 'spy' parameter is used in tests under jittest/ +def pytest_funcarg__spy(request): + val = request.config.getvalue("rsqueak-binary") + if not val: + py.test.skip("Provide --jit parameter to execute jit tests") + return str(py.path.local(val)) diff --git a/spyvm/test/jittest/conftest.py b/spyvm/test/jittest/conftest.py deleted file mode 100644 --- a/spyvm/test/jittest/conftest.py +++ /dev/null @@ -1,15 +0,0 @@ -import py - - -def pytest_addoption(parser): - group = parser.getgroup("SPy JIT tests") - group.addoption( - "--spy", - dest="spy", - default=None, - help="Path to a compiled SPy binary" - ) - - -def pytest_funcarg__spy(request): - return str(py.path.local(request.config.getvalueorskip("spy"))) diff --git a/spyvm/test/test_bootstrappedimage.py b/spyvm/test/test_bootstrappedimage.py --- a/spyvm/test/test_bootstrappedimage.py +++ b/spyvm/test/test_bootstrappedimage.py @@ -1,12 +1,11 @@ import py -from .util import read_image, copy_to_module, cleanup_module +from .util import read_image, copy_to_module, cleanup_module, slow_test def setup_module(): space, interp, image, reader = read_image("bootstrapped.image") w = space.w perform = interp.perform copy_to_module(locals(), __name__) - space.initialize_class(space.w_String, interp) def teardown_module(): cleanup_module(__name__) @@ -15,6 +14,7 @@ w_result = perform(image.w_asSymbol, "asSymbol") assert w_result is image.w_asSymbol + at slow_test def test_retrieve_symbol(): """asSymbol "This is the only place that new Symbols are created. A Symbol is created @@ -25,6 +25,7 @@ ifTrue: [ ^ sym ] ]. ^ (Symbol basicNew: self size) initFrom: self""" + space.initialize_class(space.w_String, interp) w_result = perform(w("someString"), "asSymbol") assert w_result.as_string() == "someString" w_anotherSymbol = perform(w("someString"), "asSymbol") @@ -32,6 +33,4 @@ def test_all_pointers_are_valid(): from test_miniimage import _test_all_pointers_are_valid - from test_miniimage import _test_lookup_abs_in_integer _test_all_pointers_are_valid(reader) - _test_lookup_abs_in_integer(interp) diff --git a/spyvm/test/test_largeinteger.py b/spyvm/test/test_largeinteger.py --- a/spyvm/test/test_largeinteger.py +++ b/spyvm/test/test_largeinteger.py @@ -1,7 +1,7 @@ import operator from spyvm import model, constants, primitives from spyvm.test.test_primitives import MockFrame -from .util import read_image, copy_to_module, cleanup_module +from .util import read_image, copy_to_module, cleanup_module, slow_test from rpython.rlib.rarithmetic import intmask, r_uint def setup_module(): @@ -9,7 +9,6 @@ w = space.w copy_to_module(locals(), __name__) interp.trace = False - space.initialize_class(space.w_String, interp) def teardown_module(): cleanup_module(__name__) @@ -71,14 +70,4 @@ return a >> -b else: return a << b -# do_primitive("bitShift:", shift, j=-5) - do_primitive("bitShift:", shift, i=[9470032], j=[6]) # 8 - -# def test_primitiveAdd(): -# do_primitive("+", operator.add) - -# def test_primitiveSub(): -# do_primitive("-", operator.sub, j=[0xFF, 0xFFFF, 0xF0E0D0C0], i=[-1, -1, -1]) -# do_primitive("-", operator.sub) - # do_primitive("-", operator.sub, i=[0xFF], j=0x3FFFFFFF) - + do_primitive("bitShift:", shift, i=[9470032], j=[6]) diff --git a/spyvm/test/test_miniimage.py b/spyvm/test/test_miniimage.py --- a/spyvm/test/test_miniimage.py +++ b/spyvm/test/test_miniimage.py @@ -1,6 +1,6 @@ import py, math from spyvm import model, constants, storage_contexts, wrapper, primitives, interpreter, error -from .util import read_image, open_reader, copy_to_module, cleanup_module, TestInterpreter +from .util import read_image, open_reader, copy_to_module, cleanup_module, TestInterpreter, slow_test def setup_module(): space, interp, image, reader = read_image("mini.image") @@ -14,35 +14,32 @@ def teardown_module(): cleanup_module(__name__) -def open_miniimage(): - return open_reader(space, "mini.image") +def runningSomethingImage(cached=True): + # This image has been created by executing the followin entire line in a workspace: + # a := Smalltalk snapshotPrimitive. 1+2. + # This way, the first few operations when opening this image are predetermined. + space, interp, _, _ = read_image('mini-running-something.image', cached=cached) + return space, interp -def find_symbol(name): - if name == "asSymbol": - return image.w_asSymbol - return perform(space.wrap_string(name), "asSymbol") - -def get_reader(): - return reader - -def get_image(): - return image +def runningExitImage(cached=True): + # This image has been created by executing the followin entire line in a workspace: + # Smalltalk snapshotPrimitive. Smalltalk snapshot: false andQuit: true. + # After starting, the image quits immediately. This allows testing the full image execution. + space, interp, _, _ = read_image('mini-running-exit.image', cached=cached) + return space, interp def get_float_class(): - image = get_image() return image.special(constants.SO_FLOAT_CLASS) - + # ------ tests ------------------------------------------ def test_read_header(): - reader = open_miniimage() - reader.read_header() assert reader.endofmemory == 726592 assert reader.oldbaseaddress == -1221464064 assert reader.specialobjectspointer == -1221336216 def test_read_all_header(): - reader = open_miniimage() + reader = open_reader(space, "mini.image") reader.read_header() next = reader.stream.peek() assert next != 0 #expects object header, which must not be 0x00000000 @@ -55,11 +52,9 @@ assert pointer in reader.chunks def test_all_pointers_are_valid(): - reader = get_reader() _test_all_pointers_are_valid(reader) def test_there_are_31_compact_classes(): - reader = get_reader() assert len(reader.compactclasses) == 31 def test_float_class_size(): @@ -98,7 +93,6 @@ assert str(w_float_class.getclass(space).getclass(space).getclass(space)) == "Metaclass class" def test_nil_true_false(): - image = get_image() w = image.special(constants.SO_NIL) w.class_shadow(space) assert str(w) == "a UndefinedObject" @@ -110,7 +104,6 @@ assert str(w) == "a True" def test_scheduler(): - image = get_image() w = image.special(constants.SO_SCHEDULERASSOCIATIONPOINTER) w0 = w.fetch(space, 0) assert str(w0) == "a Symbol('Processor')" @@ -123,7 +116,6 @@ obj = image.special(so_index) obj.as_class_get_shadow(space) assert str(obj) == expected_name - image = get_image() # w = image.special(constants.SO_BITMAP_CLASS) # assert str(w) == "Bitmap" test_classname(constants.SO_SMALLINTEGER_CLASS, "SmallInteger") @@ -146,7 +138,6 @@ SO_CHARACTER_CLASS = 19""" def test_name_of_shadow_of_specials(): - image = get_image() w_doesnot = image.special(constants.SO_DOES_NOT_UNDERSTAND) assert repr(w_doesnot.class_shadow(space)) == "" assert repr(space.w_nil.class_shadow(space)) == "" @@ -158,7 +149,6 @@ assert repr(space.w_false.class_shadow(space)) == "" def test_special_objects0(): - image = get_image() w = image.special(constants.SO_DOES_NOT_UNDERSTAND) assert str(w) == "a Symbol('doesNotUnderstand:')" assert str(w.getclass(space)) == "Symbol" # for some strange reason not a symbol @@ -210,80 +200,29 @@ w_false = image.special(constants.SO_FALSE) assert w_false.is_same_object(space.w_false) + at slow_test def test_runimage_and_quit(): - # This image has been prepared executing the following DoIt (the entire line): - # Smalltalk snapshotPrimitive. Smalltalk snapshot: false andQuit: true. - # After starting, the image quits immediately. This allows testing the full image execution. - from targetimageloadingsmalltalk import active_context, execute_context - space, interp, _, _ = read_image('mini-running-exit.image') + space, interp = runningExitImage(cached=False) frame = active_context(space) try: execute_context(interp, frame) except error.Exit, e: assert e.msg == "Quit-Primitive called" -def test_compile_method(): - sourcecode = """fib - ^self < 2 - ifTrue: [ 1 ] - ifFalse: [ (self - 1) fib + (self - 2) fib ]""" - perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None)) - assert perform(w(10), "fib").is_same_object(w(89)) - -def test_become(): - sourcecode = """ - testBecome - | p1 p2 a | - p1 := 1 at 2. - p2 := #(3 4 5). - a := p1 -> p2. - (1 at 2 = a key) ifFalse: [^1]. - (#(3 4 5) = a value) ifFalse: [^2]. - (p1 -> p2 = a) ifFalse: [^3]. - (p1 == a key) ifFalse: [^4]. - (p2 == a value) ifFalse: [^5]. - p1 become: p2. - (1 at 2 = a value) ifFalse: [^6]. - (3 = (a key at: 1)) ifFalse: [^7]. - (4 = (a key at: 2)) ifFalse: [^8]. - (5 = (a key at: 3)) ifFalse: [^9]. - (p1 -> p2 = a) ifFalse: [^10]. - (p1 == a key) ifFalse: [^11]. - (p2 == a value) ifFalse: [^12]. - - ^42""" - perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None)) - w_result = perform(w(10), "testBecome") - assert space.unwrap_int(w_result) == 42 - def test_step_forged_image(): ap = wrapper.ProcessWrapper(space, wrapper.scheduler(space).active_process()) s_ctx = ap.suspended_context().as_context_get_shadow(space) assert isinstance(s_ctx, storage_contexts.MethodContextShadow) assert s_ctx.top().is_same_object(space.w_true) -def test_cached_methoddict(): - sourcecode = """fib - ^self < 2 - ifTrue: [ 1 ] - ifFalse: [ ((self - 1) fib + (self - 2) fib) + 1 ]""" - perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None)) - assert perform(w(5), "fib").is_same_object(w(15)) - sourcecode = """fib - ^self < 2 - ifTrue: [ 1 ] - ifFalse: [ (self - 1) fib + (self - 2) fib ]""" - perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None)) - assert perform(w(10), "fib").is_same_object(w(89)) - def test_create_new_symbol(): w_result = perform(w("someString"), "asSymbol") assert w_result is not None assert w_result.as_string() == "someString" def test_create_new_symbol_new_with_arg0(): - w_dnu = get_image().special(constants.SO_DOES_NOT_UNDERSTAND) + w_dnu = image.special(constants.SO_DOES_NOT_UNDERSTAND) w_Symbol = w_dnu.getclass(space) w_res = perform(w_Symbol, "new:", w(0)) assert w_res.getclass(space).is_same_object(w_Symbol) @@ -301,14 +240,6 @@ assert w_result is not None assert isinstance(w_result, model.W_Float) -def test_compiling_float(): - sourcecode = """aFloat - ^ 1.1""" - perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None)) - w_result = perform(w(10), "aFloat") - assert isinstance(w_result, model.W_Float) - assert w_result.value == 1.1 - def test_existing_large_positive_integer_as_W_LargePositiveInteger1Word(): w_result = perform(interp.space.w_Float, "pi") assert w_result is not None @@ -326,13 +257,6 @@ assert w_result is not None assert isinstance(w_result, model.W_BytesObject) -def test_compiling_large_positive_integer(): - sourcecode = """aLargeInteger - ^ 16rFFFFFFFF""" - perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None)) - w_result = perform(w(10), "aLargeInteger") - assert isinstance(w_result, model.W_LargePositiveInteger1Word) - def test_doesNotUnderstand(): w_dnu = interp.space.objtable["w_doesNotUnderstand"] assert isinstance(w_dnu, model.W_BytesObject) @@ -343,18 +267,6 @@ assert isinstance(w_mbb, model.W_BytesObject) assert w_mbb.as_string() == "mustBeBoolean" -def test_run_doesNotUnderstand(): - space, interp, _, _ = read_image('mini-running-something.image') - w_result = interp.perform(interp.space.wrap_int(0), "runningADNU") - assert isinstance(w_result, model.W_BytesObject) - assert w_result.as_string() == "foobarThis:doesNotExist:('pypy' 'heya' )" - -def test_run_mustBeBoolean(): - space, interp, _, _ = read_image('mini-running-something.image') - w_result = interp.perform(interp.space.wrap_int(0), "runningMustBeBoolean") - assert isinstance(w_result, model.W_BytesObject) - assert w_result.as_string() == "mustBeBoolean has been called" - def test_Message(): w_message_cls = interp.space.w_Message assert w_message_cls is interp.space.classtable["w_Message"] @@ -364,27 +276,6 @@ w_message = s_message_cls.new() assert isinstance(w_message, model.W_PointersObject) -def test_step_run_something(): - # This test depends on the following code being executed in a workspace (the entire line): - # a := Smalltalk snapshotPrimitive. 1+2. - # This will save the image in a state that will satisfy the following test. - - space, interp, _, _ = read_image('mini-running-something.image') - ap = wrapper.ProcessWrapper(space, wrapper.scheduler(space).active_process()) - w_ctx = ap.suspended_context() - s_ctx = w_ctx.as_context_get_shadow(space) - ap.store_suspended_context(space.w_nil) - - assert isinstance(s_ctx, storage_contexts.MethodContextShadow) - assert s_ctx.top().is_same_object(space.w_true) - interp.step(s_ctx) - interp.step(s_ctx) - assert s_ctx.top().value == 1 - interp.step(s_ctx) - assert s_ctx.top().value == 2 - interp.step(s_ctx) - assert s_ctx.top().value == 3 - def test_primitive_perform_with_args(): from spyvm.test.test_primitives import _prim w_o = space.wrap_list([1, 2, 3]) @@ -397,3 +288,32 @@ w_sel = sel size = _prim(space, primitives.PERFORM_WITH_ARGS, [w_o, w_sel, []]) assert size.value == 3 + +def test_step_run_something(): + space, interp = runningSomethingImage(cached=False) + ap = wrapper.ProcessWrapper(space, wrapper.scheduler(space).active_process()) + w_ctx = ap.suspended_context() + s_ctx = w_ctx.as_context_get_shadow(space) + ap.store_suspended_context(space.w_nil) + + assert isinstance(s_ctx, storage_contexts.MethodContextShadow) + assert s_ctx.top().is_same_object(space.w_true) + interp.step(s_ctx) + interp.step(s_ctx) + assert s_ctx.top().value == 1 + interp.step(s_ctx) + assert s_ctx.top().value == 2 + interp.step(s_ctx) + assert s_ctx.top().value == 3 + +def test_run_doesNotUnderstand(): + space, interp = runningSomethingImage() + w_result = interp.perform(interp.space.wrap_int(0), "runningADNU") + assert isinstance(w_result, model.W_BytesObject) + assert w_result.as_string() == "foobarThis:doesNotExist:('pypy' 'heya' )" + +def test_run_mustBeBoolean(): + space, interp = runningSomethingImage() + w_result = interp.perform(interp.space.wrap_int(0), "runningMustBeBoolean") + assert isinstance(w_result, model.W_BytesObject) + assert w_result.as_string() == "mustBeBoolean has been called" diff --git a/spyvm/test/test_miniimage_compiling.py b/spyvm/test/test_miniimage_compiling.py new file mode 100644 --- /dev/null +++ b/spyvm/test/test_miniimage_compiling.py @@ -0,0 +1,82 @@ +import py, math +from spyvm import model, constants, storage_contexts, wrapper, primitives, interpreter, error +from .util import read_image, open_reader, copy_to_module, cleanup_module, TestInterpreter, slow_test + +pytestmark = slow_test + +def setup_module(): + space, interp, _, _ = read_image("mini.image") + w = space.w + def perform_wrapper(receiver, selector, *args): + w_selector = None if isinstance(selector, str) else selector + return interp.perform(receiver, selector, w_selector, list(args)) + perform = perform_wrapper + copy_to_module(locals(), __name__) + +def teardown_module(): + cleanup_module(__name__) + +# ------ tests ------------------------------------------ + +def test_compile_method(): + sourcecode = """fib + ^self < 2 + ifTrue: [ 1 ] + ifFalse: [ (self - 1) fib + (self - 2) fib ]""" + perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None)) + assert perform(w(10), "fib").is_same_object(w(89)) + +def test_become(): + sourcecode = """ + testBecome + | p1 p2 a | + p1 := 1 at 2. + p2 := #(3 4 5). + a := p1 -> p2. + (1 at 2 = a key) ifFalse: [^1]. + (#(3 4 5) = a value) ifFalse: [^2]. + (p1 -> p2 = a) ifFalse: [^3]. + (p1 == a key) ifFalse: [^4]. + (p2 == a value) ifFalse: [^5]. + p1 become: p2. + (1 at 2 = a value) ifFalse: [^6]. + (3 = (a key at: 1)) ifFalse: [^7]. + (4 = (a key at: 2)) ifFalse: [^8]. + (5 = (a key at: 3)) ifFalse: [^9]. + (p1 -> p2 = a) ifFalse: [^10]. + (p1 == a key) ifFalse: [^11]. + (p2 == a value) ifFalse: [^12]. + + ^42""" + perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None)) + w_result = perform(w(10), "testBecome") + assert space.unwrap_int(w_result) == 42 + +def test_cached_methoddict(): + sourcecode = """fib + ^self < 2 + ifTrue: [ 1 ] + ifFalse: [ ((self - 1) fib + (self - 2) fib) + 1 ]""" + perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None)) + assert perform(w(5), "fib").is_same_object(w(15)) + sourcecode = """fib + ^self < 2 + ifTrue: [ 1 ] + ifFalse: [ (self - 1) fib + (self - 2) fib ]""" + perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None)) + assert perform(w(10), "fib").is_same_object(w(89)) + +def test_compiling_float(): + sourcecode = """aFloat + ^ 1.1""" + perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None)) + w_result = perform(w(10), "aFloat") + assert isinstance(w_result, model.W_Float) + assert w_result.value == 1.1 + +def test_compiling_large_positive_integer(): + sourcecode = """aLargeInteger + ^ 16rFFFFFFFF""" + perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None)) + w_result = perform(w(10), "aLargeInteger") + assert isinstance(w_result, model.W_LargePositiveInteger1Word) diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -5,7 +5,7 @@ from rpython.rlib.rfloat import isinf, isnan from rpython.rlib.rarithmetic import intmask from rpython.rtyper.lltypesystem import lltype, rffi -from .util import create_space, copy_to_module, cleanup_module, TestInterpreter +from .util import create_space, copy_to_module, cleanup_module, TestInterpreter, slow_test def setup_module(): space = create_space(bootstrap = True) @@ -626,12 +626,14 @@ assert s_new_context.gettemp(1).as_string() == "second arg" assert s_new_context.gettemp(2).as_string() == "some value" + at slow_test def test_primitive_some_instance(): import gc; gc.collect() someInstance = map(space.wrap_list, [[1], [2]]) w_r = prim(primitives.SOME_INSTANCE, [space.w_Array]) assert w_r.getclass(space) is space.w_Array + at slow_test def test_primitive_next_instance(): someInstances = map(space.wrap_list, [[2], [3]]) w_frame, s_context = new_frame("") @@ -648,6 +650,7 @@ assert w_2.getclass(space) is space.w_Array assert w_1 is not w_2 + at slow_test def test_primitive_next_instance_wo_some_instance_in_same_frame(): someInstances = map(space.wrap_list, [[2], [3]]) w_frame, s_context = new_frame("") @@ -702,12 +705,6 @@ monkeypatch.undo() def test_primitive_be_display(): - # XXX: Patch SDLDisplay -> get_pixelbuffer() to circumvent - # double-free bug - def get_pixelbuffer(self): - return lltype.malloc(rffi.ULONGP.TO, self.width * self.height * 32, flavor='raw') - display.SDLDisplay.get_pixelbuffer = get_pixelbuffer - assert space.objtable["w_display"] is None mock_display = model.W_PointersObject(space, space.w_Point, 4) w_wordbmp = model.W_WordsObject(space, space.w_Array, 10) @@ -741,12 +738,6 @@ assert mock_display.fetch(space, 0) is w_bitmap def test_primitive_force_display_update(monkeypatch): - # XXX: Patch SDLDisplay -> get_pixelbuffer() to circumvent - # double-free bug - def get_pixelbuffer(self): - return lltype.malloc(rffi.ULONGP.TO, self.width * self.height * 32, flavor='raw') - display.SDLDisplay.get_pixelbuffer = get_pixelbuffer - mock_display = model.W_PointersObject(space, space.w_Point, 4) w_wordbmp = model.W_WordsObject(space, space.w_Array, 10) mock_display.store(space, 0, w_wordbmp) # bitmap diff --git a/spyvm/test/test_zin_squeak_4_5_image.py b/spyvm/test/test_zin_squeak_4_5_image.py --- a/spyvm/test/test_zin_squeak_4_5_image.py +++ b/spyvm/test/test_zin_squeak_4_5_image.py @@ -1,7 +1,8 @@ +import operator from spyvm import model -from .util import read_image, copy_to_module, cleanup_module +from .util import read_image, copy_to_module, cleanup_module, slow_test -import operator +pytestmark = slow_test def setup_module(): space, interp, image, reader = read_image('Squeak4.5-12568.image') diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -2,6 +2,13 @@ from spyvm import model, storage_classes, objspace, util, constants, squeakimage, interpreter, interpreter_bytecodes from rpython.rlib.objectmodel import instantiate +# Use this as decorator, if the test takes longer then a few seconds. +# This option is configured in conftest.py. +# To mark all tests in a module as slow, add this line to the module: +# pytestmark = slow_test +slow_test = py.test.mark.skipif('not config.getvalue("execute-slow-tests")', + reason="Slow tests are being skipped. Add --slow to execute all tests.") + # Most tests don't need a bootstrapped objspace. Those that do, indicate so explicitely. # This way, as many tests as possible use the real, not-bootstrapped ObjSpace. bootstrap_by_default = False @@ -17,10 +24,16 @@ def open_reader(space, imagefilename): return squeakimage.ImageReader(space, image_stream(imagefilename)) -def read_image(image_filename, bootstrap = bootstrap_by_default): - space = create_space(bootstrap) - reader = open_reader(space, image_filename) - image = reader.create_image() +image_cache = {} + +def read_image(image_filename, cached=True): + if cached and image_filename in image_cache: + space, reader, image = image_cache.get(image_filename) + else: + space = create_space() + reader = open_reader(space, image_filename) + image = reader.create_image() + image_cache[image_filename] = (space, reader, image) interp = TestInterpreter(space, image) return space, interp, image, reader @@ -35,7 +48,7 @@ interp = TestInterpreter(space) return space, interp -def copy_to_module(locals, module_name): +def copy_to_module(locals, module_name, all_tests_slow = False): mod = sys.modules[module_name] mod._copied_objects_ = [] for name, obj in locals.items(): From noreply at buildbot.pypy.org Wed Aug 6 19:46:59 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Wed, 6 Aug 2014 19:46:59 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-cleanups: Added @very_slow_test decorator. Message-ID: <20140806174659.4F7091C05B7@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-cleanups Changeset: r1026:7c5136678154 Date: 2014-08-06 17:11 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/7c5136678154/ Log: Added @very_slow_test decorator. --all|-A runs all tests. Normal tests take 12 seconds, slow tests take 33 seconds, all tests take 2:35 minutes. diff --git a/spyvm/test/conftest.py b/spyvm/test/conftest.py --- a/spyvm/test/conftest.py +++ b/spyvm/test/conftest.py @@ -10,6 +10,13 @@ help="Additionally execute slow tests (loading full Squeak image or long execution)" ) group.addoption( + "--all", "-A", + dest="execute-all-tests", + action="store_true", + default=False, + help="Execute all tests" + ) + group.addoption( "--jit", dest="rsqueak-binary", action="store", diff --git a/spyvm/test/test_bootstrappedimage.py b/spyvm/test/test_bootstrappedimage.py --- a/spyvm/test/test_bootstrappedimage.py +++ b/spyvm/test/test_bootstrappedimage.py @@ -1,5 +1,5 @@ import py -from .util import read_image, copy_to_module, cleanup_module, slow_test +from .util import read_image, copy_to_module, cleanup_module, slow_test, very_slow_test def setup_module(): space, interp, image, reader = read_image("bootstrapped.image") @@ -14,7 +14,7 @@ w_result = perform(image.w_asSymbol, "asSymbol") assert w_result is image.w_asSymbol - at slow_test + at very_slow_test def test_retrieve_symbol(): """asSymbol "This is the only place that new Symbols are created. A Symbol is created diff --git a/spyvm/test/test_miniimage.py b/spyvm/test/test_miniimage.py --- a/spyvm/test/test_miniimage.py +++ b/spyvm/test/test_miniimage.py @@ -1,6 +1,6 @@ import py, math from spyvm import model, constants, storage_contexts, wrapper, primitives, interpreter, error -from .util import read_image, open_reader, copy_to_module, cleanup_module, TestInterpreter, slow_test +from .util import read_image, open_reader, copy_to_module, cleanup_module, TestInterpreter, very_slow_test def setup_module(): space, interp, image, reader = read_image("mini.image") @@ -200,7 +200,7 @@ w_false = image.special(constants.SO_FALSE) assert w_false.is_same_object(space.w_false) - at slow_test + at very_slow_test def test_runimage_and_quit(): from targetimageloadingsmalltalk import active_context, execute_context space, interp = runningExitImage(cached=False) diff --git a/spyvm/test/test_miniimage_compiling.py b/spyvm/test/test_miniimage_compiling.py --- a/spyvm/test/test_miniimage_compiling.py +++ b/spyvm/test/test_miniimage_compiling.py @@ -2,8 +2,6 @@ from spyvm import model, constants, storage_contexts, wrapper, primitives, interpreter, error from .util import read_image, open_reader, copy_to_module, cleanup_module, TestInterpreter, slow_test -pytestmark = slow_test - def setup_module(): space, interp, _, _ = read_image("mini.image") w = space.w @@ -18,6 +16,10 @@ # ------ tests ------------------------------------------ +def test_load_image(): + pass + + at slow_test def test_compile_method(): sourcecode = """fib ^self < 2 @@ -26,6 +28,7 @@ perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None)) assert perform(w(10), "fib").is_same_object(w(89)) + at slow_test def test_become(): sourcecode = """ testBecome @@ -52,6 +55,7 @@ w_result = perform(w(10), "testBecome") assert space.unwrap_int(w_result) == 42 + at slow_test def test_cached_methoddict(): sourcecode = """fib ^self < 2 diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -5,7 +5,7 @@ from rpython.rlib.rfloat import isinf, isnan from rpython.rlib.rarithmetic import intmask from rpython.rtyper.lltypesystem import lltype, rffi -from .util import create_space, copy_to_module, cleanup_module, TestInterpreter, slow_test +from .util import create_space, copy_to_module, cleanup_module, TestInterpreter, slow_test, very_slow_test def setup_module(): space = create_space(bootstrap = True) @@ -626,14 +626,14 @@ assert s_new_context.gettemp(1).as_string() == "second arg" assert s_new_context.gettemp(2).as_string() == "some value" - at slow_test + at very_slow_test def test_primitive_some_instance(): import gc; gc.collect() someInstance = map(space.wrap_list, [[1], [2]]) w_r = prim(primitives.SOME_INSTANCE, [space.w_Array]) assert w_r.getclass(space) is space.w_Array - at slow_test + at very_slow_test def test_primitive_next_instance(): someInstances = map(space.wrap_list, [[2], [3]]) w_frame, s_context = new_frame("") @@ -650,7 +650,7 @@ assert w_2.getclass(space) is space.w_Array assert w_1 is not w_2 - at slow_test + at very_slow_test def test_primitive_next_instance_wo_some_instance_in_same_frame(): someInstances = map(space.wrap_list, [[2], [3]]) w_frame, s_context = new_frame("") diff --git a/spyvm/test/test_zin_squeak_4_5_image.py b/spyvm/test/test_zin_squeak_4_5_image.py --- a/spyvm/test/test_zin_squeak_4_5_image.py +++ b/spyvm/test/test_zin_squeak_4_5_image.py @@ -2,6 +2,7 @@ from spyvm import model from .util import read_image, copy_to_module, cleanup_module, slow_test +# The tests are quick, but loading the big image takes time. pytestmark = slow_test def setup_module(): @@ -15,8 +16,10 @@ def test_all_pointers_are_valid(): from test_miniimage import _test_all_pointers_are_valid + _test_all_pointers_are_valid(reader) + +def test_lookup_abs_in_integer(): from test_miniimage import _test_lookup_abs_in_integer - _test_all_pointers_are_valid(reader) _test_lookup_abs_in_integer(interp) def test_ensure(): diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -2,12 +2,15 @@ from spyvm import model, storage_classes, objspace, util, constants, squeakimage, interpreter, interpreter_bytecodes from rpython.rlib.objectmodel import instantiate -# Use this as decorator, if the test takes longer then a few seconds. -# This option is configured in conftest.py. +# Use these as decorators, if the test takes longer then a few seconds. +# The according options is configured in conftest.py. # To mark all tests in a module as slow, add this line to the module: # pytestmark = slow_test -slow_test = py.test.mark.skipif('not config.getvalue("execute-slow-tests")', - reason="Slow tests are being skipped. Add --slow to execute all tests.") +slow_test = py.test.mark.skipif('not config.getvalue("execute-slow-tests") or config.getvalue("execute-all-tests")', + reason="Slow tests are being skipped. Add --slow|-S to execute slow tests.") + +very_slow_test = py.test.mark.skipif('not config.getvalue("execute-all-tests")', + reason="Very slow tests are being skipped. Add --all|-A to execute all tests.") # Most tests don't need a bootstrapped objspace. Those that do, indicate so explicitely. # This way, as many tests as possible use the real, not-bootstrapped ObjSpace. From noreply at buildbot.pypy.org Thu Aug 7 00:46:34 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Thu, 7 Aug 2014 00:46:34 +0200 (CEST) Subject: [pypy-commit] pypy gc_no_cleanup_nursery: merge default Message-ID: <20140806224634.D9A2B1C0257@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: gc_no_cleanup_nursery Changeset: r72706:3027c2c5d444 Date: 2014-08-05 23:28 -0700 http://bitbucket.org/pypy/pypy/changeset/3027c2c5d444/ Log: merge default diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -309,11 +309,9 @@ #endif int _m_ispad(WINDOW *win) { -#if defined WINDOW_HAS_FLAGS + // may not have _flags (and possibly _ISPAD), + // but for now let's assume that always has it return (win->_flags & _ISPAD); -#else - return 0; -#endif } void _m_getsyx(int *yx) { diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -740,7 +740,7 @@ Adding an entry under pypy/module (e.g. mymodule) entails automatic creation of a new config option (such as --withmod-mymodule and ---withoutmod-mymodule (the later being the default)) for py.py and +--withoutmod-mymodule (the latter being the default)) for py.py and translate.py. Testing modules in ``lib_pypy/`` @@ -931,7 +931,7 @@ assert self.result == 2 ** 6 which executes the code string function with the given arguments at app level. -Note the use of ``w_result`` in ``setup_class`` but self.result in the test +Note the use of ``w_result`` in ``setup_class`` but self.result in the test. Here is how to define an app level class in ``setup_class`` that can be used in subsequent tests:: diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -74,9 +74,6 @@ zipimport zlib - When translated to Java or .NET, the list is smaller; see - `pypy/config/pypyoption.py`_ for details. - When translated on Windows, a few Unix-only modules are skipped, and the following module is built instead: @@ -328,7 +325,7 @@ * directly calling the internal magic methods of a few built-in types with invalid arguments may have a slightly different result. For example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return - ``NotImplemented`` on PyPy; on CPython, only the later does, and the + ``NotImplemented`` on PyPy; on CPython, only the latter does, and the former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` both raise ``TypeError`` everywhere.) This difference is an implementation detail that shows up because of internal C-level slots diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst --- a/pypy/doc/getting-started-python.rst +++ b/pypy/doc/getting-started-python.rst @@ -50,6 +50,8 @@ libz-dev libbz2-dev libncurses-dev libexpat1-dev \ libssl-dev libgc-dev python-sphinx python-greenlet + For the optional lzma module on PyPy3 you will also need ``liblzma-dev``. + On a Fedora-16 box these are:: [user at fedora-or-rh-box ~]$ sudo yum install \ @@ -57,6 +59,8 @@ zlib-devel bzip2-devel ncurses-devel expat-devel \ openssl-devel gc-devel python-sphinx python-greenlet + For the optional lzma module on PyPy3 you will also need ``xz-devel``. + On SLES11: $ sudo zypper install gcc make python-devel pkg-config \ @@ -74,6 +78,7 @@ * ``pkg-config`` (to help us locate libffi files) * ``libz-dev`` (for the optional ``zlib`` module) * ``libbz2-dev`` (for the optional ``bz2`` module) + * ``liblzma`` (for the optional ``lzma`` module, PyPy3 only) * ``libsqlite3-dev`` (for the optional ``sqlite3`` module via cffi) * ``libncurses-dev`` (for the optional ``_minimal_curses`` module) * ``libexpat1-dev`` (for the optional ``pyexpat`` module) diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -102,7 +102,7 @@ .. _Python: http://docs.python.org/index.html .. _`more...`: architecture.html#mission-statement .. _`PyPy blog`: http://morepypy.blogspot.com/ -.. _`development bug/feature tracker`: https://bugs.pypy.org +.. _`development bug/feature tracker`: https://bitbucket.org/pypy/pypy/issues .. _here: http://www.tismer.com/pypy/irc-logs/pypy/ .. _`Mercurial commit mailing list`: http://mail.python.org/mailman/listinfo/pypy-commit .. _`development mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst --- a/pypy/doc/you-want-to-help.rst +++ b/pypy/doc/you-want-to-help.rst @@ -15,14 +15,14 @@ * Because of the above, we are very serious about Test Driven Development. It's not only what we believe in, but also that PyPy's architecture is working very well with TDD in mind and not so well without it. Often - the development means progressing in an unrelated corner, one unittest + development means progressing in an unrelated corner, one unittest at a time; and then flipping a giant switch, bringing it all together. (It generally works out of the box. If it doesn't, then we didn't - write enough unit tests.) It's worth repeating - PyPy - approach is great if you do TDD, not so great otherwise. + write enough unit tests.) It's worth repeating - PyPy's + approach is great if you do TDD, and not so great otherwise. * PyPy uses an entirely different set of tools - most of them included - in the PyPy repository. There is no Makefile, nor autoconf. More below + in the PyPy repository. There is no Makefile, nor autoconf. More below. Architecture ============ @@ -32,7 +32,7 @@ * `RPython`_ is the language in which we write interpreters. Not the entire PyPy project is written in RPython, only the parts that are compiled in the translation process. The interesting point is that RPython has no parser, - it's compiled from the live python objects, which make it possible to do + it's compiled from the live python objects, which makes it possible to do all kinds of metaprogramming during import time. In short, Python is a meta programming language for RPython. @@ -40,7 +40,7 @@ .. _`RPython`: coding-guide.html#RPython -* The translation toolchain - this is the part that takes care about translating +* The translation toolchain - this is the part that takes care of translating RPython to flow graphs and then to C. There is more in the `architecture`_ document written about it. @@ -73,7 +73,7 @@ .. _`we have a tracing JIT`: jit/index.html -* Garbage Collectors (GC): as you can notice if you are used to CPython's +* Garbage Collectors (GC): as you may notice if you are used to CPython's C code, there are no ``Py_INCREF/Py_DECREF`` equivalents in RPython code. `Garbage collection in PyPy`_ is inserted during translation. Moreover, this is not reference counting; it is a real diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -616,7 +616,8 @@ def descr_classmethod_get(self, space, w_obj, w_klass=None): if space.is_none(w_klass): w_klass = space.type(w_obj) - return space.wrap(Method(space, self.w_function, w_klass, space.w_None)) + return space.wrap(Method(space, self.w_function, w_klass, + space.type(w_klass))) def descr_classmethod__new__(space, w_subtype, w_function): instance = space.allocate_instance(ClassMethod, w_subtype) diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py --- a/pypy/interpreter/pycompiler.py +++ b/pypy/interpreter/pycompiler.py @@ -96,7 +96,7 @@ XXX: This class should override the baseclass implementation of compile_command() in order to optimize it, especially in case - of incomplete inputs (e.g. we shouldn't re-compile from sracth + of incomplete inputs (e.g. we shouldn't re-compile from scratch the whole source after having only added a new '\n') """ def __init__(self, space, override_version=None): diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -200,7 +200,7 @@ elif opcode == opcodedesc.BREAK_LOOP.index: next_instr = self.BREAK_LOOP(oparg, next_instr) elif opcode == opcodedesc.CONTINUE_LOOP.index: - next_instr = self.CONTINUE_LOOP(oparg, next_instr) + return self.CONTINUE_LOOP(oparg, next_instr) elif opcode == opcodedesc.FOR_ITER.index: next_instr = self.FOR_ITER(oparg, next_instr) elif opcode == opcodedesc.JUMP_FORWARD.index: diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -388,6 +388,13 @@ # differs from .im_class in case the method is # defined in some parent class of l's actual class + def test_classmethod_im_class(self): + class Foo(object): + @classmethod + def bar(cls): + pass + assert Foo.bar.im_class is type + def test_func_closure(self): x = 2 def f(): diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -7,8 +7,8 @@ from __pypy__ import lookup_special -def _caller_locals(): - return sys._getframe(0).f_locals +def _caller_locals(): + return sys._getframe(0).f_locals def vars(*obj): """Return a dictionary of all the attributes currently bound in obj. If @@ -17,12 +17,11 @@ if len(obj) == 0: return _caller_locals() elif len(obj) != 1: - raise TypeError, "vars() takes at most 1 argument." - else: - try: - return obj[0].__dict__ - except AttributeError: - raise TypeError, "vars() argument must have __dict__ attribute" + raise TypeError("vars() takes at most 1 argument.") + try: + return obj[0].__dict__ + except AttributeError: + raise TypeError("vars() argument must have __dict__ attribute") def dir(*args): """dir([object]) -> list of strings @@ -38,8 +37,7 @@ attributes of its class's base classes. """ if len(args) > 1: - raise TypeError("dir expected at most 1 arguments, got %d" - % len(args)) + raise TypeError("dir expected at most 1 arguments, got %d" % len(args)) if len(args) == 0: local_names = _caller_locals().keys() # 2 stackframes away if not isinstance(local_names, list): @@ -48,92 +46,61 @@ return local_names import types - obj = args[0] - - dir_meth = None if isinstance(obj, types.InstanceType): - try: - dir_meth = getattr(obj, "__dir__") - except AttributeError: - pass + dir_meth = getattr(obj, '__dir__', None) else: - dir_meth = lookup_special(obj, "__dir__") + dir_meth = lookup_special(obj, '__dir__') if dir_meth is not None: - result = dir_meth() - if not isinstance(result, list): + names = dir_meth() + if not isinstance(names, list): raise TypeError("__dir__() must return a list, not %r" % ( - type(result),)) - result.sort() - return result + type(names),)) + names.sort() + return names elif isinstance(obj, types.ModuleType): try: - result = list(obj.__dict__) - result.sort() - return result + return sorted(obj.__dict__) except AttributeError: return [] - elif isinstance(obj, (types.TypeType, types.ClassType)): - #Don't look at __class__, as metaclass methods would be confusing. - result = _classdir(obj).keys() - result.sort() - return result - - else: #(regular item) - Dict = {} - try: - if isinstance(obj.__dict__, dict): - Dict.update(obj.__dict__) - except AttributeError: - pass - try: - Dict.update(_classdir(obj.__class__)) - except AttributeError: - pass + # Don't look at __class__, as metaclass methods would be confusing. + return sorted(_classdir(obj)) + else: + names = set() + ns = getattr(obj, '__dict__', None) + if isinstance(ns, dict): + names.update(ns) + klass = getattr(obj, '__class__', None) + if klass is not None: + names.update(_classdir(klass)) ## Comment from object.c: ## /* Merge in __members__ and __methods__ (if any). ## XXX Would like this to go away someday; for now, it's ## XXX needed to get at im_self etc of method objects. */ - for attr in ['__members__','__methods__']: - try: - l = getattr(obj, attr) - if not isinstance(l, list): - continue - for item in l: - if isinstance(item, types.StringTypes): - Dict[item] = None - except (AttributeError, TypeError): - pass + for attr in '__members__', '__methods__': + l = getattr(obj, attr, None) + if not isinstance(l, list): + continue + names.extend(item for item in l if isinstance(item, str)) - result = Dict.keys() - result.sort() - return result + return sorted(names) def _classdir(klass): - """Return a dict of the accessible attributes of class/type klass. + """Return a set of the accessible attributes of class/type klass. - This includes all attributes of klass and all of the - base classes recursively. - - The values of this dict have no meaning - only the keys have - meaning. + This includes all attributes of klass and all of the base classes + recursively. """ - Dict = {} - try: - Dict.update(klass.__dict__) - except AttributeError: pass - try: - # XXX - Use of .__mro__ would be suggested, if the existance - # of that attribute could be guarranted. - bases = klass.__bases__ - except AttributeError: pass - else: - try: - #Note that since we are only interested in the keys, - # the order we merge classes is unimportant - for base in bases: - Dict.update(_classdir(base)) - except TypeError: pass - return Dict + names = set() + ns = getattr(klass, '__dict__', None) + if ns is not None: + names.update(ns) + bases = getattr(klass, '__bases__', None) + if bases is not None: + # Note that since we are only interested in the keys, the order + # we merge classes is unimportant + for base in bases: + names.update(_classdir(base)) + return names diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -30,10 +30,6 @@ raise NotImplementedError def descr_reduce(self, space): - """ - XXX to do: remove this __reduce__ method and do - a registration with copy_reg, instead. - """ from pypy.interpreter.mixedmodule import MixedModule w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) @@ -125,10 +121,6 @@ self.index = space.int_w(self.w_len) + index def descr_reduce(self, space): - """ - XXX to do: remove this __reduce__ method and do - a registration with copy_reg, instead. - """ from pypy.interpreter.mixedmodule import MixedModule w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) diff --git a/rpython/flowspace/test/test_model.py b/rpython/flowspace/test/test_model.py --- a/rpython/flowspace/test/test_model.py +++ b/rpython/flowspace/test/test_model.py @@ -13,7 +13,7 @@ class pieces: """ The manually-built graph corresponding to the sample_function(). """ - i = Variable("i") + i0 = Variable("i0") i1 = Variable("i1") i2 = Variable("i2") i3 = Variable("i3") @@ -25,12 +25,12 @@ conditionop = SpaceOperation("gt", [i1, Constant(0)], conditionres) addop = SpaceOperation("add", [sum2, i2], sum3) decop = SpaceOperation("sub", [i2, Constant(1)], i3) - startblock = Block([i]) + startblock = Block([i0]) headerblock = Block([i1, sum1]) whileblock = Block([i2, sum2]) graph = FunctionGraph("f", startblock) - startblock.closeblock(Link([i, Constant(0)], headerblock)) + startblock.closeblock(Link([i0, Constant(0)], headerblock)) headerblock.operations.append(conditionop) headerblock.exitswitch = conditionres headerblock.closeblock(Link([sum1], graph.returnblock, False), @@ -55,7 +55,7 @@ def test_graphattributes(): assert graph.startblock is pieces.startblock assert graph.returnblock is pieces.headerblock.exits[0].target - assert graph.getargs() == [pieces.i] + assert graph.getargs() == [pieces.i0] assert [graph.getreturnvar()] == graph.returnblock.inputargs assert graph.source == inspect.getsource(sample_function) diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -21,7 +21,7 @@ # this is a basic test that tries to hit a number of features and their # translation: # - jitting of loops and bridges - # - virtualizables + # - two virtualizable types # - set_param interface # - profiler # - full optimizer @@ -79,22 +79,28 @@ if rposix.get_errno() != total: raise ValueError return chr(total % 253) # + class Virt2(object): + _virtualizable_ = ['i'] + def __init__(self, i): + self.i = i from rpython.rlib.libffi import types, CDLL, ArgChain from rpython.rlib.test.test_clibffi import get_libm_name libm_name = get_libm_name(sys.platform) - jitdriver2 = JitDriver(greens=[], reds = ['i', 'func', 'res', 'x']) + jitdriver2 = JitDriver(greens=[], reds = ['v2', 'func', 'res', 'x'], + virtualizables = ['v2']) def libffi_stuff(i, j): lib = CDLL(libm_name) func = lib.getpointer('fabs', [types.double], types.double) res = 0.0 x = float(j) - while i > 0: - jitdriver2.jit_merge_point(i=i, res=res, func=func, x=x) + v2 = Virt2(i) + while v2.i > 0: + jitdriver2.jit_merge_point(v2=v2, res=res, func=func, x=x) promote(func) argchain = ArgChain() argchain.arg(x) res = func.call(argchain, rffi.DOUBLE) - i -= 1 + v2.i -= 1 return res # def main(i, j): diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -1611,6 +1611,40 @@ op.getopnum() == rop.GUARD_NOT_FORCED_2] assert len(l) == 0 + def test_two_virtualizable_types(self): + class A: + _virtualizable_ = ['x'] + def __init__(self, x): + self.x = x + + class B: + _virtualizable_ = ['lst[*]'] + def __init__(self, lst): + self.lst = lst + + driver_a = JitDriver(greens=[], reds=['a'], virtualizables=['a']) + driver_b = JitDriver(greens=[], reds=['b'], virtualizables=['b']) + + def foo_a(a): + while a.x > 0: + driver_a.jit_merge_point(a=a) + a.x -= 2 + return a.x + + def foo_b(b): + while b.lst[0] > 0: + driver_b.jit_merge_point(b=b) + b.lst[0] -= 2 + return b.lst[0] + + def f(): + return foo_a(A(13)) * 100 + foo_b(B([13])) + + assert f() == -101 + res = self.meta_interp(f, [], listops=True) + assert res == -101 + + class TestLLtype(ExplicitVirtualizableTests, ImplicitVirtualizableTests, LLJitMixin): diff --git a/rpython/jit/metainterp/test/test_virtualref.py b/rpython/jit/metainterp/test/test_virtualref.py --- a/rpython/jit/metainterp/test/test_virtualref.py +++ b/rpython/jit/metainterp/test/test_virtualref.py @@ -34,7 +34,7 @@ # def check_call(op, fname): assert op.opname == 'direct_call' - assert op.args[0].value._obj._name == fname + assert op.args[0].value._obj._name.startswith(fname) # ops = [op for block, op in graph.iterblockops()] check_call(ops[-3], 'virtual_ref') diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -340,6 +340,7 @@ # ____________________________________________________________ # VRefs + at specialize.argtype(0) def virtual_ref(x): """Creates a 'vref' object that contains a reference to 'x'. Calls to virtual_ref/virtual_ref_finish must be properly nested. The idea @@ -351,6 +352,7 @@ return DirectJitVRef(x) virtual_ref.oopspec = 'virtual_ref(x)' + at specialize.argtype(1) def virtual_ref_finish(vref, x): """See docstring in virtual_ref(x)""" keepalive_until_here(x) # otherwise the whole function call is removed diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -1155,7 +1155,12 @@ type(other).__name__,)) if self._TYPE != other._TYPE: raise TypeError("comparing %r and %r" % (self._TYPE, other._TYPE)) - return self._obj == other._obj + try: + return self._obj == other._obj + except DelayedPointer: + # if one of the two pointers is delayed, they cannot + # possibly be equal unless they are the same _ptr instance + return self is other def __ne__(self, other): return not (self == other) diff --git a/rpython/rtyper/normalizecalls.py b/rpython/rtyper/normalizecalls.py --- a/rpython/rtyper/normalizecalls.py +++ b/rpython/rtyper/normalizecalls.py @@ -93,7 +93,12 @@ return False # nothing to do, all signatures already match shape_cnt, shape_keys, shape_star = shape - assert not shape_star, "XXX not implemented" + if shape_star: + raise TyperError( + "not implemented: a call is done with a '*' argument, and the" + " multiple functions or methods that it can go to don't have" + " all the same signature (different argument names or defaults)." + " The call can go to:\n%s" % '\n'.join(map(repr, graphs))) # for the first 'shape_cnt' arguments we need to generalize to # a common type diff --git a/rpython/rtyper/test/test_annlowlevel.py b/rpython/rtyper/test/test_annlowlevel.py --- a/rpython/rtyper/test/test_annlowlevel.py +++ b/rpython/rtyper/test/test_annlowlevel.py @@ -64,3 +64,13 @@ assert lltype.typeOf(ptr) == OBJECTPTR y = annlowlevel.cast_base_ptr_to_instance(X, ptr) assert y is x + + def test_delayedptr(self): + FUNCTYPE = lltype.FuncType([], lltype.Signed) + name = "delayed!myfunc" + delayedptr1 = lltype._ptr(lltype.Ptr(FUNCTYPE), name, solid=True) + delayedptr2 = lltype._ptr(lltype.Ptr(FUNCTYPE), name, solid=True) + assert delayedptr1 == delayedptr1 + assert delayedptr1 != delayedptr2 + assert bool(delayedptr1) + assert delayedptr1 != lltype.nullptr(FUNCTYPE) diff --git a/rpython/rtyper/test/test_normalizecalls.py b/rpython/rtyper/test/test_normalizecalls.py --- a/rpython/rtyper/test/test_normalizecalls.py +++ b/rpython/rtyper/test/test_normalizecalls.py @@ -192,6 +192,25 @@ import re assert re.match(msg, excinfo.value.args[0]) + def test_methods_with_named_arg_call(self): + class Base: + def fn(self, y): + raise NotImplementedError + class Sub1(Base): + def fn(self, y): + return 1 + y + class Sub2(Base): + def fn(self, x): # different name! + return x - 2 + def dummyfn(n): + if n == 1: + s = Sub1() + else: + s = Sub2() + return s.fn(*(n,)) + + py.test.raises(TyperError, self.rtype, dummyfn, [int], int) + class PBase: def fn(self): From noreply at buildbot.pypy.org Thu Aug 7 00:46:36 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Thu, 7 Aug 2014 00:46:36 +0200 (CEST) Subject: [pypy-commit] pypy gc_no_cleanup_nursery: modify operation name Message-ID: <20140806224636.3A4141C0257@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: gc_no_cleanup_nursery Changeset: r72707:2bd06d902889 Date: 2014-08-06 15:45 -0700 http://bitbucket.org/pypy/pypy/changeset/2bd06d902889/ Log: modify operation name diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -904,11 +904,11 @@ def op_gc_gcflag_extra(self, subopnum, *args): return self.heap.gcflag_extra(subopnum, *args) - def op_do_malloc_fixedsize_clear(self): - raise NotImplementedError("do_malloc_fixedsize_clear") + def op_do_malloc_fixedsize(self): + raise NotImplementedError("do_malloc_fixedsize") - def op_do_malloc_varsize_clear(self): - raise NotImplementedError("do_malloc_varsize_clear") + def op_do_malloc_varsize(self): + raise NotImplementedError("do_malloc_varsize") def op_get_write_barrier_failing_case(self): raise NotImplementedError("get_write_barrier_failing_case") diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -47,7 +47,7 @@ class _uninitialized(object): def __init__(self, TYPE): - self._TYPE = TYPE + #self._TYPE = TYPE self.TYPE = TYPE def __repr__(self): return ''%(self.TYPE,) diff --git a/rpython/translator/test/test_exceptiontransform.py b/rpython/translator/test/test_exceptiontransform.py --- a/rpython/translator/test/test_exceptiontransform.py +++ b/rpython/translator/test/test_exceptiontransform.py @@ -239,7 +239,7 @@ etrafo.create_exception_handling(g) ops = dict.fromkeys([o.opname for b, o in g.iterblockops()]) assert 'zero_gc_pointers_inside' in ops - + def test_llexternal(self): from rpython.rtyper.lltypesystem.rffi import llexternal from rpython.rtyper.lltypesystem import lltype From noreply at buildbot.pypy.org Thu Aug 7 10:59:05 2014 From: noreply at buildbot.pypy.org (Hubert Hesse) Date: Thu, 7 Aug 2014 10:59:05 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: remove dead code Message-ID: <20140807085905.5BEDB1C332E@cobra.cs.uni-duesseldorf.de> Author: Hubert Hesse Branch: stmgc-c7 Changeset: r1027:d597c6739d62 Date: 2014-07-17 10:14 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/d597c6739d62/ Log: remove dead code diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -17,40 +17,6 @@ self.w_frame = w_frame self.w_stm_process = w_stm_process -class MyLock(): - def __init__(self): - self.LOCK = None - self.a = 0 - -mylock = MyLock() - - -def my_little_thread(): - while True: - acquired = mylock.LOCK.acquire(False) - if acquired: - mylock.a = 2 - #print "MY 2:", mylock.a - time.sleep(2.5) - mylock.LOCK.release() - else: - pass - #print "MY locked 10:", mylock.a - -def yours_little_thread(): - while True: - acquired = mylock.LOCK.acquire(False) - if acquired: - mylock.a = 10 - #print "YOURS 10:", mylock.a - mylock.LOCK.release() - time.sleep(4.0) - else: - pass - #print "YOURS locked 2:", mylock.a - - - class MissingBytecode(Exception): """Bytecode not implemented yet.""" def __init__(self, bytecodename): From noreply at buildbot.pypy.org Thu Aug 7 10:59:06 2014 From: noreply at buildbot.pypy.org (Hubert Hesse) Date: Thu, 7 Aug 2014 10:59:06 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: removed changes Message-ID: <20140807085906.95A631C332E@cobra.cs.uni-duesseldorf.de> Author: Hubert Hesse Branch: stmgc-c7 Changeset: r1028:94fdcab50ba7 Date: 2014-07-17 13:32 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/94fdcab50ba7/ Log: removed changes diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -17,40 +17,6 @@ self.w_frame = w_frame self.w_stm_process = w_stm_process -class MyLock(): - def __init__(self): - self.LOCK = None - self.a = 0 - -mylock = MyLock() - - -def my_little_thread(): - while True: - acquired = mylock.LOCK.acquire(False) - if acquired: - mylock.a = 2 - #print "MY 2:", mylock.a - time.sleep(2.5) - mylock.LOCK.release() - else: - pass - #print "MY locked 10:", mylock.a - -def yours_little_thread(): - while True: - acquired = mylock.LOCK.acquire(False) - if acquired: - mylock.a = 10 - #print "YOURS 10:", mylock.a - mylock.LOCK.release() - time.sleep(4.0) - else: - pass - #print "YOURS locked 2:", mylock.a - - - class MissingBytecode(Exception): """Bytecode not implemented yet.""" def __init__(self, bytecodename): From noreply at buildbot.pypy.org Thu Aug 7 10:59:07 2014 From: noreply at buildbot.pypy.org (Hubert Hesse) Date: Thu, 7 Aug 2014 10:59:07 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: merge Message-ID: <20140807085907.B01E61C332E@cobra.cs.uni-duesseldorf.de> Author: Hubert Hesse Branch: stmgc-c7 Changeset: r1029:3f5e33c7c8ae Date: 2014-08-07 10:59 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/3f5e33c7c8ae/ Log: merge diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -17,40 +17,6 @@ self.w_frame = w_frame self.w_stm_process = w_stm_process -class MyLock(): - def __init__(self): - self.LOCK = None - self.a = 0 - -mylock = MyLock() - - -def my_little_thread(): - while True: - acquired = mylock.LOCK.acquire(False) - if acquired: - mylock.a = 2 - #print "MY 2:", mylock.a - time.sleep(2.5) - mylock.LOCK.release() - else: - pass - #print "MY locked 10:", mylock.a - -def yours_little_thread(): - while True: - acquired = mylock.LOCK.acquire(False) - if acquired: - mylock.a = 10 - #print "YOURS 10:", mylock.a - mylock.LOCK.release() - time.sleep(4.0) - else: - pass - #print "YOURS locked 2:", mylock.a - - - class MissingBytecode(Exception): """Bytecode not implemented yet.""" def __init__(self, bytecodename): From noreply at buildbot.pypy.org Thu Aug 7 13:23:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 7 Aug 2014 13:23:36 +0200 (CEST) Subject: [pypy-commit] pypy default: Add python27.lib automatically, so MSVC users need not specify the .lib Message-ID: <20140807112336.8B7011C3233@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72708:4b4823920562 Date: 2014-08-07 13:23 +0200 http://bitbucket.org/pypy/pypy/changeset/4b4823920562/ Log: Add python27.lib automatically, so MSVC users need not specify the .lib file in their Makefile diff --git a/include/PyPy.h b/include/PyPy.h --- a/include/PyPy.h +++ b/include/PyPy.h @@ -53,6 +53,12 @@ int pypy_execute_source_ptr(char *source, void* ptr); +/* Windows hackery */ +#if defined(_MSC_VER) +# pragma comment(lib,"python27.lib") +#endif + + #ifdef __cplusplus } #endif From noreply at buildbot.pypy.org Thu Aug 7 20:45:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 7 Aug 2014 20:45:07 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Merge trunk again: redo the abandoned merge in 2aef0e942480. Message-ID: <20140807184507.B042D1C000D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72709:ce7658973a7b Date: 2014-08-07 17:23 +0200 http://bitbucket.org/pypy/pypy/changeset/ce7658973a7b/ Log: Merge trunk again: redo the abandoned merge in 2aef0e942480. diff too long, truncating to 2000 out of 112693 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -6,3 +6,11 @@ 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm +20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 +20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 +0000000000000000000000000000000000000000 release-2.3.0 +394146e9bb673514c61f0150ab2013ccf78e8de7 release-2.3 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.3.1 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1 +0000000000000000000000000000000000000000 release-2.2=3.1 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -44,31 +44,33 @@ Alex Gaynor Michael Hudson David Schneider + Matti Picus + Brian Kearns + Philip Jenvey Holger Krekel Christian Tismer Hakan Ardo Benjamin Peterson - Matti Picus - Philip Jenvey + Manuel Jacob Anders Chrigstrom - Brian Kearns Eric van Riet Paap + Wim Lavrijsen + Ronan Lamy Richard Emslie Alexander Schremmer - Wim Lavrijsen Dan Villiom Podlaski Christiansen - Manuel Jacob Lukas Diekmann Sven Hager Anders Lehmann Aurelien Campeas Niklaus Haldimann - Ronan Lamy Camillo Bruni Laura Creighton Toon Verwaest + Remi Meier Leonardo Santagada Seo Sanghyeon + Romain Guillebert Justin Peel Ronny Pfannschmidt David Edelsohn @@ -80,52 +82,62 @@ Daniel Roberts Niko Matsakis Adrien Di Mascio + Alexander Hesse Ludovic Aubry - Alexander Hesse Jacob Hallen - Romain Guillebert Jason Creighton Alex Martelli Michal Bendowski Jan de Mooij + stian Michael Foord Stephan Diehl Stefan Schwarzer Valentino Volonghi Tomek Meka Patrick Maupin - stian Bob Ippolito Bruno Gola Jean-Paul Calderone Timo Paulssen + Squeaky Alexandre Fayolle Simon Burton Marius Gedminas John Witulski + Konstantin Lopuhin Greg Price Dario Bertini Mark Pearse Simon Cross - Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy + Tobias Oberstein Adrian Kuhn Boris Feigin + Stefano Rivera tav + Taavi Burns Georg Brandl Bert Freudenberg Stian Andreassen - Stefano Rivera + Laurence Tratt Wanja Saatkamp + Ivan Sichmann Freitas Gerald Klix Mike Blume - Taavi Burns Oscar Nierstrasz + Stefan H. Muller + Jeremy Thurgood + Gregor Wegberg + Rami Chowdhury + Tobias Pape + Edd Barrett David Malcolm Eugene Oden Henry Mason @@ -135,18 +147,16 @@ Dusty Phillips Lukas Renggli Guenter Jantzen - Tobias Oberstein - Remi Meier Ned Batchelder Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin + Andrew Chambers Michael Schneider Nicholas Riley Jason Chu Igor Trindade Oliveira - Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey @@ -159,18 +169,19 @@ Karl Bartel Brian Dorsey Victor Stinner + Andrews Medina Stuart Williams Jasper Schulz + Christian Hudon Toby Watson Antoine Pitrou Aaron Iles Michael Cheng Justas Sadzevicius + Mikael Schönenberg Gasper Zejn Neil Shepperd - Mikael Schönenberg Elmo Mäntynen - Tobias Pape Jonathan David Riehl Stanislaw Halik Anders Qvist @@ -182,19 +193,18 @@ Alexander Sedov Corbin Simpson Christopher Pope - Laurence Tratt - Guillebert Romain + wenzhuman Christian Tismer + Marc Abramowitz Dan Stromberg Stefano Parmesan - Christian Hudon Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang Gabriel - Paweł Piotr Przeradowski + Lukas Vacek Andrew Dalke Sylvain Thenault Nathan Taylor @@ -203,8 +213,11 @@ Alejandro J. Cura Jacob Oscarson Travis Francis Athougies + Ryan Gonzalez Kristjan Valur Jonsson + Sebastian Pawluś Neil Blakey-Milner + anatoly techtonik Lutz Paelike Lucio Torre Lars Wassermann @@ -218,13 +231,14 @@ Martin Blais Lene Wagner Tomo Cocoa - Andrews Medina roberto at goyle + Yury V. Zaytsev + Anna Katrina Dominguez William Leslie Bobby Impollonia timo at eistee.fritz.box Andrew Thompson - Yusei Tahara + Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado Godefroid Chappelle @@ -234,28 +248,39 @@ Michael Hudson-Doyle Anders Sigfridsson Yasir Suhail + rafalgalczynski at gmail.com Floris Bruynooghe + Laurens Van Houtven Akira Li Gustavo Niemeyer Stephan Busemann - Anna Katrina Dominguez + Rafał Gałczyński + Yusei Tahara Christian Muirhead James Lan shoma hosaka - Daniel Neuhäuser + Daniel Neuh?user + Matthew Miller Buck Golemon Konrad Delong Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + Rodrigo Araújo + w31rd0 Jim Baker - Rodrigo Araújo + James Robert Armin Ronacher Brett Cannon yrttyr + aliceinwire + OlivierBlanvillain Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + Asmo Soinio + Stefan Marr + jiaaro opassembler.py Antony Lee Jim Hunziker @@ -263,12 +288,13 @@ Even Wiik Thomassen jbs soareschen + Kurt Griffiths + Mike Bayer Flavio Percoco Kristoffer Kleine yasirs Michael Chermside Anna Ravencroft - Andrew Chambers Julien Phalip Dan Loewenherz diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -56,6 +56,9 @@ for line in longrepr.splitlines(): py.builtin.print_(" %s" % line, file=self.logfile) for key, text in sections: + # py.io.StdCaptureFD may send in unicode + if isinstance(text, unicode): + text = text.encode('utf-8') py.builtin.print_(" ", file=self.logfile) py.builtin.print_(" -------------------- %s --------------------" % key.rstrip(), file=self.logfile) diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -389,12 +389,13 @@ func.__name__ = name_or_ordinal return func -class PyDLL(CDLL): - """This class represents the Python library itself. It allows to - access Python API functions. The GIL is not released, and - Python exceptions are handled correctly. - """ - _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI +# Not in PyPy +#class PyDLL(CDLL): +# """This class represents the Python library itself. It allows to +# access Python API functions. The GIL is not released, and +# Python exceptions are handled correctly. +# """ +# _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI if _os.name in ("nt", "ce"): @@ -447,15 +448,8 @@ return self._dlltype(name) cdll = LibraryLoader(CDLL) -pydll = LibraryLoader(PyDLL) - -if _os.name in ("nt", "ce"): - pythonapi = PyDLL("python dll", None, _sys.dllhandle) -elif _sys.platform == "cygwin": - pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2]) -else: - pythonapi = PyDLL(None) - +# not on PyPy +#pydll = LibraryLoader(PyDLL) if _os.name in ("nt", "ce"): windll = LibraryLoader(WinDLL) diff --git a/lib-python/2.7/ctypes/test/test_values.py b/lib-python/2.7/ctypes/test/test_values.py --- a/lib-python/2.7/ctypes/test/test_values.py +++ b/lib-python/2.7/ctypes/test/test_values.py @@ -4,6 +4,7 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test @@ -23,7 +24,8 @@ class Win_ValuesTestCase(unittest.TestCase): """This test only works when python itself is a dll/shared library""" - + + @xfail def test_optimizeflag(self): # This test accesses the Py_OptimizeFlag intger, which is # exported by the Python dll. @@ -40,6 +42,7 @@ else: self.assertEqual(opt, 2) + @xfail def test_frozentable(self): # Python exports a PyImport_FrozenModules symbol. This is a # pointer to an array of struct _frozen entries. The end of the @@ -75,6 +78,7 @@ from ctypes import _pointer_type_cache del _pointer_type_cache[struct_frozen] + @xfail def test_undefined(self): self.assertRaises(ValueError, c_int.in_dll, pydll, "Undefined_Symbol") diff --git a/lib-python/2.7/imputil.py b/lib-python/2.7/imputil.py --- a/lib-python/2.7/imputil.py +++ b/lib-python/2.7/imputil.py @@ -422,7 +422,8 @@ saved back to the filesystem for future imports. The source file's modification timestamp must be provided as a Long value. """ - codestring = open(pathname, 'rU').read() + with open(pathname, 'rU') as fp: + codestring = fp.read() if codestring and codestring[-1] != '\n': codestring = codestring + '\n' code = __builtin__.compile(codestring, pathname, 'exec') @@ -603,8 +604,8 @@ self.desc = desc def import_file(self, filename, finfo, fqname): - fp = open(filename, self.desc[1]) - module = imp.load_module(fqname, fp, filename, self.desc) + with open(filename, self.desc[1]) as fp: + module = imp.load_module(fqname, fp, filename, self.desc) module.__file__ = filename return 0, module, { } diff --git a/lib-python/2.7/modulefinder.py b/lib-python/2.7/modulefinder.py --- a/lib-python/2.7/modulefinder.py +++ b/lib-python/2.7/modulefinder.py @@ -109,16 +109,16 @@ def run_script(self, pathname): self.msg(2, "run_script", pathname) - fp = open(pathname, READ_MODE) - stuff = ("", "r", imp.PY_SOURCE) - self.load_module('__main__', fp, pathname, stuff) + with open(pathname, READ_MODE) as fp: + stuff = ("", "r", imp.PY_SOURCE) + self.load_module('__main__', fp, pathname, stuff) def load_file(self, pathname): dir, name = os.path.split(pathname) name, ext = os.path.splitext(name) - fp = open(pathname, READ_MODE) - stuff = (ext, "r", imp.PY_SOURCE) - self.load_module(name, fp, pathname, stuff) + with open(pathname, READ_MODE) as fp: + stuff = (ext, "r", imp.PY_SOURCE) + self.load_module(name, fp, pathname, stuff) def import_hook(self, name, caller=None, fromlist=None, level=-1): self.msg(3, "import_hook", name, caller, fromlist, level) @@ -461,6 +461,8 @@ fp, buf, stuff = self.find_module("__init__", m.__path__) self.load_module(fqname, fp, buf, stuff) self.msgout(2, "load_package ->", m) + if fp: + fp.close() return m def add_module(self, fqname): diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -48,6 +48,9 @@ def tearDown(self): os.chdir(self.old_dir) + import gc + # Force a collection which should close FileType() options + gc.collect() for root, dirs, files in os.walk(self.temp_dir, topdown=False): for name in files: os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) diff --git a/lib-python/2.7/test/test_gdbm.py b/lib-python/2.7/test/test_gdbm.py --- a/lib-python/2.7/test/test_gdbm.py +++ b/lib-python/2.7/test/test_gdbm.py @@ -74,6 +74,29 @@ size2 = os.path.getsize(filename) self.assertTrue(size1 > size2 >= size0) + def test_sync(self): + # check if sync works at all, not sure how to check it + self.g = gdbm.open(filename, 'cf') + self.g['x'] = 'x' * 10000 + self.g.sync() + + def test_get_key(self): + self.g = gdbm.open(filename, 'cf') + self.g['x'] = 'x' * 10000 + self.g.close() + self.g = gdbm.open(filename, 'r') + self.assertEquals(self.g['x'], 'x' * 10000) + + def test_key_with_null_bytes(self): + key = 'a\x00b' + value = 'c\x00d' + self.g = gdbm.open(filename, 'cf') + self.g[key] = value + self.g.close() + self.g = gdbm.open(filename, 'r') + self.assertEquals(self.g[key], value) + self.assertTrue(key in self.g) + self.assertTrue(self.g.has_key(key)) def test_main(): run_unittest(TestGdbm) diff --git a/lib-python/2.7/timeit.py b/lib-python/2.7/timeit.py --- a/lib-python/2.7/timeit.py +++ b/lib-python/2.7/timeit.py @@ -55,11 +55,6 @@ import gc import sys import time -try: - import itertools -except ImportError: - # Must be an older Python version (see timeit() below) - itertools = None __all__ = ["Timer"] @@ -81,7 +76,8 @@ def inner(_it, _timer): %(setup)s _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 %(stmt)s _t1 = _timer() return _t1 - _t0 @@ -96,7 +92,8 @@ def inner(_it, _timer, _func=func): setup() _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 _func() _t1 = _timer() return _t1 - _t0 @@ -133,9 +130,11 @@ else: raise ValueError("setup is neither a string nor callable") self.src = src # Save for traceback display - code = compile(src, dummy_src_name, "exec") - exec code in globals(), ns - self.inner = ns["inner"] + def make_inner(): + code = compile(src, dummy_src_name, "exec") + exec code in globals(), ns + return ns["inner"] + self.make_inner = make_inner elif hasattr(stmt, '__call__'): self.src = None if isinstance(setup, basestring): @@ -144,7 +143,8 @@ exec _setup in globals(), ns elif not hasattr(setup, '__call__'): raise ValueError("setup is neither a string nor callable") - self.inner = _template_func(setup, stmt) + inner = _template_func(setup, stmt) + self.make_inner = lambda: inner else: raise ValueError("stmt is neither a string nor callable") @@ -185,15 +185,12 @@ to one million. The main statement, the setup statement and the timer function to be used are passed to the constructor. """ - if itertools: - it = itertools.repeat(None, number) - else: - it = [None] * number + inner = self.make_inner() gcold = gc.isenabled() if '__pypy__' not in sys.builtin_module_names: gc.disable() # only do that on CPython try: - timing = self.inner(it, self.timer) + timing = inner(number, self.timer) finally: if gcold: gc.enable() diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -219,6 +219,8 @@ if restype is None: import ctypes restype = ctypes.c_int + if self._argtypes_ is None: + self._argtypes_ = [] self._ptr = self._getfuncptr_fromaddress(self._argtypes_, restype) self._check_argtypes_for_fastpath() return diff --git a/lib_pypy/_pypy_interact.py b/lib_pypy/_pypy_interact.py --- a/lib_pypy/_pypy_interact.py +++ b/lib_pypy/_pypy_interact.py @@ -3,6 +3,8 @@ import sys import os +irc_header = "And now for something completely different" + def interactive_console(mainmodule=None, quiet=False): # set sys.{ps1,ps2} just before invoking the interactive interpreter. This @@ -15,8 +17,7 @@ if not quiet: try: from _pypy_irc_topic import some_topic - text = "And now for something completely different: ``%s''" % ( - some_topic(),) + text = "%s: ``%s''" % ( irc_header, some_topic()) while len(text) >= 80: i = text[:80].rfind(' ') print(text[:i]) diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -13,7 +13,15 @@ k1 = k1.lstrip('0x').rstrip('L') k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) k2 = k2.lstrip('0').rstrip('L') - output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) + try: + username = os.environ['USER'] #linux, et al + except KeyError: + try: + username = os.environ['USERNAME'] #windows + except KeyError: + username = os.getuid() + output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s_%s%s' % ( + username, k1, k2) if not os.path.exists(output_dir): os.mkdir(output_dir) return output_dir diff --git a/lib_pypy/_tkinter/license.terms b/lib_pypy/_tkinter/license.terms new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/license.terms @@ -0,0 +1,39 @@ +This software is copyrighted by the Regents of the University of +California, Sun Microsystems, Inc., and other parties. The following +terms apply to all files associated with the software unless explicitly +disclaimed in individual files. + +The authors hereby grant permission to use, copy, modify, distribute, +and license this software and its documentation for any purpose, provided +that existing copyright notices are retained in all copies and that this +notice is included verbatim in any distributions. No written agreement, +license, or royalty fee is required for any of the authorized uses. +Modifications to this software may be copyrighted by their authors +and need not follow the licensing terms described here, provided that +the new terms are clearly indicated on the first page of each file where +they apply. + +IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +MODIFICATIONS. + +GOVERNMENT USE: If you are acquiring this software on behalf of the +U.S. government, the Government shall have only "Restricted Rights" +in the software and related documentation as defined in the Federal +Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you +are acquiring the software on behalf of the Department of Defense, the +software shall be classified as "Commercial Computer Software" and the +Government shall have only "Restricted Rights" as defined in Clause +252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the +authors grant the U.S. Government and others acting in its behalf +permission to use and distribute the software in accordance with the +terms specified in this license. diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -121,6 +121,10 @@ incdirs = [] linklibs = ['tcl85', 'tk85'] libdirs = [] +elif sys.platform == 'darwin': + incdirs = ['/System/Library/Frameworks/Tk.framework/Versions/Current/Headers/'] + linklibs = ['tcl', 'tk'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -443,6 +443,10 @@ for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + for key, val in ffi._parser._int_constants.items(): + if key not in library.__dict__: + library.__dict__[key] = val + copied_enums.append(True) if name in library.__dict__: return diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -24,6 +24,7 @@ _r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]") _r_words = re.compile(r"\w+|\S") _parser_cache = None +_r_int_literal = re.compile(r"^0?x?[0-9a-f]+u?l?$", re.IGNORECASE) def _get_parser(): global _parser_cache @@ -99,6 +100,7 @@ self._structnode2type = weakref.WeakKeyDictionary() self._override = False self._packed = False + self._int_constants = {} def _parse(self, csource): csource, macros = _preprocess(csource) @@ -128,9 +130,10 @@ finally: if lock is not None: lock.release() - return ast, macros + # csource will be used to find buggy source text + return ast, macros, csource - def convert_pycparser_error(self, e, csource): + def _convert_pycparser_error(self, e, csource): # xxx look for ":NUM:" at the start of str(e) and try to interpret # it as a line number line = None @@ -142,6 +145,12 @@ csourcelines = csource.splitlines() if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] + return line + + def convert_pycparser_error(self, e, csource): + line = self._convert_pycparser_error(e, csource) + + msg = str(e) if line: msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: @@ -160,14 +169,9 @@ self._packed = prev_packed def _internal_parse(self, csource): - ast, macros = self._parse(csource) + ast, macros, csource = self._parse(csource) # add the macros - for key, value in macros.items(): - value = value.strip() - if value != '...': - raise api.CDefError('only supports the syntax "#define ' - '%s ..." for now (literally)' % key) - self._declare('macro ' + key, value) + self._process_macros(macros) # find the first "__dotdotdot__" and use that as a separator # between the repeated typedefs and the real csource iterator = iter(ast.ext) @@ -175,27 +179,61 @@ if decl.name == '__dotdotdot__': break # - for decl in iterator: - if isinstance(decl, pycparser.c_ast.Decl): - self._parse_decl(decl) - elif isinstance(decl, pycparser.c_ast.Typedef): - if not decl.name: - raise api.CDefError("typedef does not declare any name", - decl) - if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) - and decl.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_type(decl.name) - elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and - isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and - isinstance(decl.type.type.type, - pycparser.c_ast.IdentifierType) and - decl.type.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_ptr_type(decl.name) + try: + for decl in iterator: + if isinstance(decl, pycparser.c_ast.Decl): + self._parse_decl(decl) + elif isinstance(decl, pycparser.c_ast.Typedef): + if not decl.name: + raise api.CDefError("typedef does not declare any name", + decl) + if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) + and decl.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_type(decl.name) + elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and + isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and + isinstance(decl.type.type.type, + pycparser.c_ast.IdentifierType) and + decl.type.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_ptr_type(decl.name) + else: + realtype = self._get_type(decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) + raise api.CDefError("unrecognized construct", decl) + except api.FFIError as e: + msg = self._convert_pycparser_error(e, csource) + if msg: + e.args = (e.args[0] + "\n *** Err: %s" % msg,) + raise + + def _add_constants(self, key, val): + if key in self._int_constants: + raise api.FFIError( + "multiple declarations of constant: %s" % (key,)) + self._int_constants[key] = val + + def _process_macros(self, macros): + for key, value in macros.items(): + value = value.strip() + match = _r_int_literal.search(value) + if match is not None: + int_str = match.group(0).lower().rstrip("ul") + + # "010" is not valid oct in py3 + if (int_str.startswith("0") and + int_str != "0" and + not int_str.startswith("0x")): + int_str = "0o" + int_str[1:] + + pyvalue = int(int_str, 0) + self._add_constants(key, pyvalue) + elif value == '...': + self._declare('macro ' + key, value) else: - raise api.CDefError("unrecognized construct", decl) + raise api.CDefError('only supports the syntax "#define ' + '%s ..." (literally) or "#define ' + '%s 0x1FF" for now' % (key, key)) def _parse_decl(self, decl): node = decl.type @@ -227,7 +265,7 @@ self._declare('variable ' + decl.name, tp) def parse_type(self, cdecl): - ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) + ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): @@ -306,7 +344,8 @@ if ident == 'void': return model.void_type if ident == '__dotdotdot__': - raise api.FFIError('bad usage of "..."') + raise api.FFIError(':%d: bad usage of "..."' % + typenode.coord.line) return resolve_common_type(ident) # if isinstance(type, pycparser.c_ast.Struct): @@ -333,7 +372,8 @@ return self._get_struct_union_enum_type('union', typenode, name, nested=True) # - raise api.FFIError("bad or unsupported type declaration") + raise api.FFIError(":%d: bad or unsupported type declaration" % + typenode.coord.line) def _parse_function_type(self, typenode, funcname=None): params = list(getattr(typenode.args, 'params', [])) @@ -499,6 +539,10 @@ if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '-'): return -self._parse_constant(exprnode.expr) + # load previously defined int constant + if (isinstance(exprnode, pycparser.c_ast.ID) and + exprnode.name in self._int_constants): + return self._int_constants[exprnode.name] # if partial_length_ok: if (isinstance(exprnode, pycparser.c_ast.ID) and @@ -506,8 +550,8 @@ self._partial_length = True return '...' # - raise api.FFIError("unsupported expression: expected a " - "simple numeric constant") + raise api.FFIError(":%d: unsupported expression: expected a " + "simple numeric constant" % exprnode.coord.line) def _build_enum_type(self, explicit_name, decls): if decls is not None: @@ -522,6 +566,7 @@ if enum.value is not None: nextenumvalue = self._parse_constant(enum.value) enumvalues.append(nextenumvalue) + self._add_constants(enum.name, nextenumvalue) nextenumvalue += 1 enumvalues = tuple(enumvalues) tp = model.EnumType(explicit_name, enumerators, enumvalues) @@ -535,3 +580,5 @@ kind = name.split(' ', 1)[0] if kind in ('typedef', 'struct', 'union', 'enum'): self._declare(name, tp) + for k, v in other._int_constants.items(): + self._add_constants(k, v) diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -38,6 +38,7 @@ import distutils.errors # dist = Distribution({'ext_modules': [ext]}) + dist.parse_config_files() options = dist.get_option_dict('build_ext') options['force'] = ('ffiplatform', True) options['build_lib'] = ('ffiplatform', tmpdir) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -89,43 +89,54 @@ # by generate_cpy_function_method(). prnt('static PyMethodDef _cffi_methods[] = {') self._generate("method") - prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS},') - prnt(' {NULL, NULL} /* Sentinel */') + prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},') + prnt(' {NULL, NULL, 0, NULL} /* Sentinel */') prnt('};') prnt() # # standard init. modname = self.verifier.get_module_name() - if sys.version_info >= (3,): - prnt('static struct PyModuleDef _cffi_module_def = {') - prnt(' PyModuleDef_HEAD_INIT,') - prnt(' "%s",' % modname) - prnt(' NULL,') - prnt(' -1,') - prnt(' _cffi_methods,') - prnt(' NULL, NULL, NULL, NULL') - prnt('};') - prnt() - initname = 'PyInit_%s' % modname - createmod = 'PyModule_Create(&_cffi_module_def)' - errorcase = 'return NULL' - finalreturn = 'return lib' - else: - initname = 'init%s' % modname - createmod = 'Py_InitModule("%s", _cffi_methods)' % modname - errorcase = 'return' - finalreturn = 'return' + constants = self._chained_list_constants[False] + prnt('#if PY_MAJOR_VERSION >= 3') + prnt() + prnt('static struct PyModuleDef _cffi_module_def = {') + prnt(' PyModuleDef_HEAD_INIT,') + prnt(' "%s",' % modname) + prnt(' NULL,') + prnt(' -1,') + prnt(' _cffi_methods,') + prnt(' NULL, NULL, NULL, NULL') + prnt('};') + prnt() prnt('PyMODINIT_FUNC') - prnt('%s(void)' % initname) + prnt('PyInit_%s(void)' % modname) prnt('{') prnt(' PyObject *lib;') - prnt(' lib = %s;' % createmod) - prnt(' if (lib == NULL || %s < 0)' % ( - self._chained_list_constants[False],)) - prnt(' %s;' % errorcase) - prnt(' _cffi_init();') - prnt(' %s;' % finalreturn) + prnt(' lib = PyModule_Create(&_cffi_module_def);') + prnt(' if (lib == NULL)') + prnt(' return NULL;') + prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,)) + prnt(' Py_DECREF(lib);') + prnt(' return NULL;') + prnt(' }') + prnt(' return lib;') prnt('}') + prnt() + prnt('#else') + prnt() + prnt('PyMODINIT_FUNC') + prnt('init%s(void)' % modname) + prnt('{') + prnt(' PyObject *lib;') + prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname) + prnt(' if (lib == NULL)') + prnt(' return;') + prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,)) + prnt(' return;') + prnt(' return;') + prnt('}') + prnt() + prnt('#endif') def load_library(self): # XXX review all usages of 'self' here! @@ -394,7 +405,7 @@ meth = 'METH_O' else: meth = 'METH_VARARGS' - self._prnt(' {"%s", _cffi_f_%s, %s},' % (name, name, meth)) + self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth)) _loading_cpy_function = _loaded_noop @@ -481,8 +492,8 @@ if tp.fldnames is None: return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) - self._prnt(' {"%s", %s, METH_NOARGS},' % (layoutfuncname, - layoutfuncname)) + self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname, + layoutfuncname)) def _loading_struct_or_union(self, tp, prefix, name, module): if tp.fldnames is None: @@ -589,13 +600,7 @@ 'variable type'),)) assert delayed else: - prnt(' if (LONG_MIN <= (%s) && (%s) <= LONG_MAX)' % (name, name)) - prnt(' o = PyInt_FromLong((long)(%s));' % (name,)) - prnt(' else if ((%s) <= 0)' % (name,)) - prnt(' o = PyLong_FromLongLong((long long)(%s));' % (name,)) - prnt(' else') - prnt(' o = PyLong_FromUnsignedLongLong(' - '(unsigned long long)(%s));' % (name,)) + prnt(' o = _cffi_from_c_int_const(%s);' % name) prnt(' if (o == NULL)') prnt(' return -1;') if size_too: @@ -632,13 +637,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_cpy_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_cpy_const(True, enumerator, delayed=False) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) prnt = self._prnt prnt('static int %s(PyObject *lib)' % funcname) prnt('{') @@ -760,17 +770,30 @@ #include #include -#ifdef MS_WIN32 -#include /* for alloca() */ -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef unsigned char _Bool; +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +# endif #endif #if PY_MAJOR_VERSION < 3 @@ -795,6 +818,15 @@ #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble +#define _cffi_from_c_int_const(x) \ + (((x) > 0) ? \ + ((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \ + ((long long)(x) >= (long long)LONG_MIN) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromLongLong((long long)(x))) + #define _cffi_from_c_int(x, type) \ (((type)-1) > 0 ? /* unsigned */ \ (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ @@ -804,14 +836,14 @@ PyLong_FromLongLong(x))) #define _cffi_to_c_int(o, type) \ - (sizeof(type) == 1 ? (((type)-1) > 0 ? _cffi_to_c_u8(o) \ - : _cffi_to_c_i8(o)) : \ - sizeof(type) == 2 ? (((type)-1) > 0 ? _cffi_to_c_u16(o) \ - : _cffi_to_c_i16(o)) : \ - sizeof(type) == 4 ? (((type)-1) > 0 ? _cffi_to_c_u32(o) \ - : _cffi_to_c_i32(o)) : \ - sizeof(type) == 8 ? (((type)-1) > 0 ? _cffi_to_c_u64(o) \ - : _cffi_to_c_i64(o)) : \ + (sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ + : (type)_cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \ + : (type)_cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \ + : (type)_cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ + : (type)_cffi_to_c_i64(o)) : \ (Py_FatalError("unsupported size for type " #type), 0)) #define _cffi_to_c_i8 \ @@ -885,25 +917,32 @@ return PyBool_FromLong(was_alive); } -static void _cffi_init(void) +static int _cffi_init(void) { - PyObject *module = PyImport_ImportModule("_cffi_backend"); - PyObject *c_api_object; + PyObject *module, *c_api_object = NULL; + module = PyImport_ImportModule("_cffi_backend"); if (module == NULL) - return; + goto failure; c_api_object = PyObject_GetAttrString(module, "_C_API"); if (c_api_object == NULL) - return; + goto failure; if (!PyCapsule_CheckExact(c_api_object)) { - Py_DECREF(c_api_object); PyErr_SetNone(PyExc_ImportError); - return; + goto failure; } memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), _CFFI_NUM_EXPORTS * sizeof(void *)); + + Py_DECREF(module); Py_DECREF(c_api_object); + return 0; + + failure: + Py_XDECREF(module); + Py_XDECREF(c_api_object); + return -1; } #define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -249,10 +249,10 @@ prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') self.export_symbols.append(layoutfuncname) - prnt('ssize_t %s(ssize_t i)' % (layoutfuncname,)) + prnt('intptr_t %s(intptr_t i)' % (layoutfuncname,)) prnt('{') prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname) - prnt(' static ssize_t nums[] = {') + prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') for fname, ftype, fbitsize in tp.enumfields(): @@ -276,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] + BFunc = self.ffi._typeof_locked("intptr_t(*)(intptr_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -410,13 +410,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_gen_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_gen_const(True, enumerator) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) self.export_symbols.append(funcname) prnt = self._prnt prnt('int %s(char *out_error)' % funcname) @@ -453,7 +458,7 @@ else: BType = self.ffi._typeof_locked("char[]")[0] BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) function = module.load_function(BFunc, funcname) p = self.ffi.new(BType, 256) if function(p) < 0: @@ -547,20 +552,29 @@ #include #include /* XXX for ssize_t on some platforms */ -#ifdef _WIN32 -# include -# define snprintf _snprintf -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef SSIZE_T ssize_t; -typedef unsigned char _Bool; +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif #else -# include +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +# endif #endif ''' diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py new file mode 100644 --- /dev/null +++ b/lib_pypy/gdbm.py @@ -0,0 +1,174 @@ +import cffi, os + +ffi = cffi.FFI() +ffi.cdef(''' +#define GDBM_READER ... +#define GDBM_WRITER ... +#define GDBM_WRCREAT ... +#define GDBM_NEWDB ... +#define GDBM_FAST ... +#define GDBM_SYNC ... +#define GDBM_NOLOCK ... +#define GDBM_REPLACE ... + +void* gdbm_open(char *, int, int, int, void (*)()); +void gdbm_close(void*); + +typedef struct { + char *dptr; + int dsize; +} datum; + +datum gdbm_fetch(void*, datum); +int gdbm_delete(void*, datum); +int gdbm_store(void*, datum, datum, int); +int gdbm_exists(void*, datum); + +int gdbm_reorganize(void*); + +datum gdbm_firstkey(void*); +datum gdbm_nextkey(void*, datum); +void gdbm_sync(void*); + +char* gdbm_strerror(int); +int gdbm_errno; + +void free(void*); +''') + +try: + lib = ffi.verify(''' + #include "gdbm.h" + ''', libraries=['gdbm']) +except cffi.VerificationError as e: + # distutils does not preserve the actual message, + # but the verification is simple enough that the + # failure must be due to missing gdbm dev libs + raise ImportError('%s: %s' %(e.__class__.__name__, e)) + +class error(Exception): + pass + +def _fromstr(key): + if not isinstance(key, str): + raise TypeError("gdbm mappings have string indices only") + return {'dptr': ffi.new("char[]", key), 'dsize': len(key)} + +class gdbm(object): + ll_dbm = None + + def __init__(self, filename, iflags, mode): + res = lib.gdbm_open(filename, 0, iflags, mode, ffi.NULL) + self.size = -1 + if not res: + self._raise_from_errno() + self.ll_dbm = res + + def close(self): + if self.ll_dbm: + lib.gdbm_close(self.ll_dbm) + self.ll_dbm = None + + def _raise_from_errno(self): + if ffi.errno: + raise error(os.strerror(ffi.errno)) + raise error(lib.gdbm_strerror(lib.gdbm_errno)) + + def __len__(self): + if self.size < 0: + self.size = len(self.keys()) + return self.size + + def __setitem__(self, key, value): + self._check_closed() + self._size = -1 + r = lib.gdbm_store(self.ll_dbm, _fromstr(key), _fromstr(value), + lib.GDBM_REPLACE) + if r < 0: + self._raise_from_errno() + + def __delitem__(self, key): + self._check_closed() + res = lib.gdbm_delete(self.ll_dbm, _fromstr(key)) + if res < 0: + raise KeyError(key) + + def __contains__(self, key): + self._check_closed() + return lib.gdbm_exists(self.ll_dbm, _fromstr(key)) + has_key = __contains__ + + def __getitem__(self, key): + self._check_closed() + drec = lib.gdbm_fetch(self.ll_dbm, _fromstr(key)) + if not drec.dptr: + raise KeyError(key) + res = str(ffi.buffer(drec.dptr, drec.dsize)) + lib.free(drec.dptr) + return res + + def keys(self): + self._check_closed() + l = [] + key = lib.gdbm_firstkey(self.ll_dbm) + while key.dptr: + l.append(str(ffi.buffer(key.dptr, key.dsize))) + nextkey = lib.gdbm_nextkey(self.ll_dbm, key) + lib.free(key.dptr) + key = nextkey + return l + + def firstkey(self): + self._check_closed() + key = lib.gdbm_firstkey(self.ll_dbm) + if key.dptr: + res = str(ffi.buffer(key.dptr, key.dsize)) + lib.free(key.dptr) + return res + + def nextkey(self, key): + self._check_closed() + key = lib.gdbm_nextkey(self.ll_dbm, _fromstr(key)) + if key.dptr: + res = str(ffi.buffer(key.dptr, key.dsize)) + lib.free(key.dptr) + return res + + def reorganize(self): + self._check_closed() + if lib.gdbm_reorganize(self.ll_dbm) < 0: + self._raise_from_errno() + + def _check_closed(self): + if not self.ll_dbm: + raise error("GDBM object has already been closed") + + __del__ = close + + def sync(self): + self._check_closed() + lib.gdbm_sync(self.ll_dbm) + +def open(filename, flags='r', mode=0666): + if flags[0] == 'r': + iflags = lib.GDBM_READER + elif flags[0] == 'w': + iflags = lib.GDBM_WRITER + elif flags[0] == 'c': + iflags = lib.GDBM_WRCREAT + elif flags[0] == 'n': + iflags = lib.GDBM_NEWDB + else: + raise error("First flag must be one of 'r', 'w', 'c' or 'n'") + for flag in flags[1:]: + if flag == 'f': + iflags |= lib.GDBM_FAST + elif flag == 's': + iflags |= lib.GDBM_SYNC + elif flag == 'u': + iflags |= lib.GDBM_NOLOCK + else: + raise error("Flag '%s' not supported" % flag) + return gdbm(filename, iflags, mode) + +open_flags = "rwcnfsu" diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -113,7 +113,7 @@ try: for name in modlist: __import__(name) - except (ImportError, CompilationError, py.test.skip.Exception), e: + except (ImportError, CompilationError, py.test.skip.Exception) as e: errcls = e.__class__.__name__ raise Exception( "The module %r is disabled\n" % (modname,) + diff --git a/pypy/doc/Makefile b/pypy/doc/Makefile --- a/pypy/doc/Makefile +++ b/pypy/doc/Makefile @@ -7,63 +7,80 @@ PAPER = BUILDDIR = _build +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex man changes linkcheck doctest +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " man to make manual pages" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: - -rm -rf $(BUILDDIR)/* + rm -rf $(BUILDDIR)/* html: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + pickle: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ @@ -72,35 +89,89 @@ @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyPy.qhc" +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/PyPy" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PyPy" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + latex: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ - "run these through (pdf)latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." man: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man" + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." + +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -105,7 +105,7 @@ while True: try: w_key = space.next(w_iter) - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_StopIteration): raise # re-raise other app-level exceptions break @@ -348,8 +348,12 @@ **objects** - Normal rules apply. Special methods are not honoured, except ``__init__``, - ``__del__`` and ``__iter__``. + Normal rules apply. The only special methods that are honoured are + ``__init__``, ``__del__``, ``__len__``, ``__getitem__``, ``__setitem__``, + ``__getslice__``, ``__setslice__``, and ``__iter__``. To handle slicing, + ``__getslice__`` and ``__setslice__`` must be used; using ``__getitem__`` and + ``__setitem__`` for slicing isn't supported. Additionally, using negative + indices for slicing is still not support, even when using ``__getslice__``. This layout makes the number of types to take care about quite limited. @@ -567,7 +571,7 @@ try: ... - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_XxxError): raise ... diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -18,11 +18,31 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath('.')) + +# -- Read The Docs theme config ------------------------------------------------ + +# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org +on_rtd = os.environ.get('READTHEDOCS', None) == 'True' + +if not on_rtd: # only import and set the theme if we're building docs locally + try: + import sphinx_rtd_theme + html_theme = 'sphinx_rtd_theme' + html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + except ImportError: + print('sphinx_rtd_theme is not installed') + html_theme = 'default' + +# otherwise, readthedocs.org uses their theme by default, so no need to specify it + + # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'pypyconfig'] +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', + 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', + 'pypyconfig'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -45,9 +65,9 @@ # built documents. # # The short X.Y version. -version = '2.2' +version = '2.3' # The full version, including alpha/beta/rc tags. -release = '2.2.1' +release = '2.3.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -91,7 +111,7 @@ # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. -html_theme = 'default' +#html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the diff --git a/pypy/doc/config/translation.log.txt b/pypy/doc/config/translation.log.txt --- a/pypy/doc/config/translation.log.txt +++ b/pypy/doc/config/translation.log.txt @@ -2,4 +2,4 @@ These must be enabled by setting the PYPYLOG environment variable. The exact set of features supported by PYPYLOG is described in -pypy/translation/c/src/debug_print.h. +rpython/translator/c/src/debug_print.h. diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -99,6 +99,7 @@ Stian Andreassen Laurence Tratt Wanja Saatkamp + Ivan Sichmann Freitas Gerald Klix Mike Blume Oscar Nierstrasz @@ -183,7 +184,9 @@ Alejandro J. Cura Jacob Oscarson Travis Francis Athougies + Ryan Gonzalez Kristjan Valur Jonsson + Sebastian Pawluś Neil Blakey-Milner anatoly techtonik Lutz Paelike @@ -216,6 +219,7 @@ Michael Hudson-Doyle Anders Sigfridsson Yasir Suhail + rafalgalczynski at gmail.com Floris Bruynooghe Laurens Van Houtven Akira Li @@ -245,6 +249,8 @@ Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + Asmo Soinio + Stefan Marr jiaaro opassembler.py Antony Lee diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -348,4 +348,9 @@ type and vice versa. For builtin types, a dictionary will be returned that cannot be changed (but still looks and behaves like a normal dictionary). +* PyPy prints a random line from past #pypy IRC topics at startup in + interactive mode. In a released version, this behaviour is supressed, but + setting the environment variable PYPY_IRC_TOPIC will bring it back. Note that + downstream package providers have been known to totally disable this feature. + .. include:: _ref.txt diff --git a/pypy/doc/ctypes-implementation.rst b/pypy/doc/ctypes-implementation.rst --- a/pypy/doc/ctypes-implementation.rst +++ b/pypy/doc/ctypes-implementation.rst @@ -72,13 +72,11 @@ Here is a list of the limitations and missing features of the current implementation: -* ``ctypes.pythonapi`` lets you access the CPython C API emulation layer - of PyPy, at your own risks and without doing anything sensible about - the GIL. Since PyPy 2.3, these functions are also named with an extra - "Py", for example ``PyPyInt_FromLong()``. Basically, don't use this, - but it might more or less work in simple cases if you do. (Obviously, - assuming the PyObject pointers you get have any particular fields in - any particular order is just going to crash.) +* ``ctypes.pythonapi`` is missing. In previous versions, it was present + and redirected to the `cpyext` C API emulation layer, but our + implementation did not do anything sensible about the GIL and the + functions were named with an extra "Py", for example + ``PyPyInt_FromLong()``. It was removed for being unhelpful. * We copy Python strings instead of having pointers to raw buffers diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -8,6 +8,9 @@ *Articles about PyPy published so far, most recent first:* (bibtex_ file) +* `A Way Forward in Parallelising Dynamic Languages`_, + R. Meier, A. Rigo + * `Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`_, C.F. Bolz, A. Cuni, M. Fijalkowski, M. Leuschel, S. Pedroni, A. Rigo @@ -71,6 +74,7 @@ .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib +.. _`A Way Forward in Parallelising Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2014/position-paper.pdf .. _`Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf .. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/pepm2011/bolz-allocation-removal.pdf .. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/mediawiki/images/a/a7/Pub-BoLeSch2010.pdf @@ -93,6 +97,11 @@ Talks and Presentations ---------------------------------- +*This part is no longer updated.* The complete list is here__ (in +alphabetical order). + +.. __: https://bitbucket.org/pypy/extradoc/src/extradoc/talk/ + Talks in 2010 +++++++++++++ diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -465,9 +465,13 @@ This is documented (here__ and here__). It needs 4 GB of RAM to run "rpython targetpypystandalone" on top of PyPy, a bit more when running -on CPython. If you have less than 4 GB it will just swap forever (or -fail if you don't have enough swap). On 32-bit, divide the numbers by -two. +on top of CPython. If you have less than 4 GB free, it will just swap +forever (or fail if you don't have enough swap). And we mean *free:* +if the machine has 4 GB *in total,* then it will swap. + +On 32-bit, divide the numbers by two. (We didn't try recently, but in +the past it was possible to compile a 32-bit version on a 2 GB Linux +machine with nothing else running: no Gnome/KDE, for example.) .. __: http://pypy.org/download.html#building-from-source .. __: https://pypy.readthedocs.org/en/latest/getting-started-python.html#translating-the-pypy-python-interpreter diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -28,11 +28,6 @@ pypy/doc/tool/makecontributor.py generates the list of contributors * rename pypy/doc/whatsnew_head.rst to whatsnew_VERSION.rst and create a fresh whatsnew_head.rst after the release -* merge PYPY_IRC_TOPIC environment variable handling from previous release - in pypy/doc/getting-started-dev.rst, pypy/doc/man/pypy.1.rst, and - pypy/interpreter/app_main.py so release versions will not print a random - IRC topic by default. -* change the tracker to have a new release tag to file bugs against * go to pypy/tool/release and run: force-builds.py * wait for builds to complete, make sure there are no failures diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -1,19 +1,43 @@ Historical release notes ------------------------- +======================== + +Cpython 2.7 compatible versions +=============================== .. toctree:: + release-2.3.1.rst + release-2.3.0.rst + release-2.2.1.rst + release-2.2.0.rst + release-2.1.0.rst + release-2.1.0-beta2.rst + release-2.1.0-beta1.rst + release-2.1.0.rst + release-2.0.2.rst + release-2.0.1.rst + release-2.0.0.rst + release-2.0.0-beta2.rst + release-2.0.0-beta1.rst + release-1.9.0.rst + release-1.8.0.rst + release-1.7.0.rst + release-1.6.0.rst + release-1.5.0.rst + release-1.4.1.rst + release-1.4.0beta.rst + release-1.4.0.rst + release-1.3.0.rst + release-1.2.0.rst + release-1.1.0.rst + release-1.0.0.rst + release-0.99.0.rst + release-0.9.0.rst + release-0.8.0.rst + release-0.7.0.rst release-0.6 - release-0.7.0.rst - release-0.8.0.rst - release-0.9.0.rst - release-0.99.0.rst - release-1.0.0.rst - release-1.1.0.rst - release-1.2.0.rst - release-1.3.0.rst - release-1.4.0.rst - release-1.4.0beta.rst - release-1.4.1.rst - release-1.5.0.rst - release-1.6.0.rst + +Cpython 3.2 compatible versions +=============================== +.. toctree:: + release-pypy3-2.1.0-beta1.rst diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -40,7 +40,7 @@ * `FAQ`_: some frequently asked questions. -* `Release 2.2.1`_: the latest official release +* `Release 2.3.1`_: the latest official release * `PyPy Blog`_: news and status info about PyPy @@ -110,7 +110,7 @@ .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html .. _`Videos`: video-index.html -.. _`Release 2.2.1`: http://pypy.org/download.html +.. _`Release 2.3.1`: http://pypy.org/download.html .. _`speed.pypy.org`: http://speed.pypy.org .. _`RPython toolchain`: translation.html .. _`potential project ideas`: project-ideas.html diff --git a/pypy/doc/make.bat b/pypy/doc/make.bat --- a/pypy/doc/make.bat +++ b/pypy/doc/make.bat @@ -2,11 +2,15 @@ REM Command file for Sphinx documentation -set SPHINXBUILD=sphinx-build +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . +set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% + set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help @@ -14,16 +18,25 @@ if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of - echo. html to make standalone HTML files From noreply at buildbot.pypy.org Thu Aug 7 20:45:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 7 Aug 2014 20:45:08 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Kill this test too Message-ID: <20140807184508.ED2F91C000D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72710:36d2641627cf Date: 2014-08-07 17:41 +0200 http://bitbucket.org/pypy/pypy/changeset/36d2641627cf/ Log: Kill this test too diff --git a/rpython/rlib/test/test_rstm.py b/rpython/rlib/test/test_rstm.py --- a/rpython/rlib/test/test_rstm.py +++ b/rpython/rlib/test/test_rstm.py @@ -4,20 +4,3 @@ def test_symbolics(): assert rstm.adr_nursery_free == rstm.adr_nursery_free assert rstm.adr_nursery_free != rstm.adr_nursery_top - -def test_tlref_untranslated(): - class FooBar(object): - pass - t = rstm.ThreadLocalReference(FooBar) - results = [] - def subthread(): - x = FooBar() - results.append(t.get() is None) - t.set(x) - results.append(t.get() is x) - time.sleep(0.2) - results.append(t.get() is x) - for i in range(5): - thread.start_new_thread(subthread, ()) - time.sleep(0.5) - assert results == [True] * 15 From noreply at buildbot.pypy.org Thu Aug 7 20:45:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 7 Aug 2014 20:45:10 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Fix imports Message-ID: <20140807184510.311121C000D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72711:5b52f9756edd Date: 2014-08-07 17:41 +0200 http://bitbucket.org/pypy/pypy/changeset/5b52f9756edd/ Log: Fix imports diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -36,7 +36,6 @@ from rpython.jit.codewriter import longlong from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rlib.objectmodel import compute_unique_id -from rpython.jit.backend.x86 import stmtlocal from rpython.rlib import rstm, nonconst @@ -2723,6 +2722,7 @@ """ if self.SEGMENT_TL != self.SEGMENT_NO: # only for STM and not during tests + from rpython.jit.backend.x86 import stmtlocal adr -= stmtlocal.threadlocal_base() assert rx86.fits_in_32bits(adr) return heap(self.SEGMENT_TL, adr) @@ -2743,7 +2743,7 @@ addr0 = stmtlocal.threadlocal_base() addr = addr1 - addr0 assert rx86.fits_in_32bits(addr) - self.mc.MOV_rj(resloc.value, (stmtlocal.SEGMENT_TL, addr)) + self.mc.MOV_rj(resloc.value, (self.SEGMENT_TL, addr)) def get_set_errno(self, op, loc, issue_a_write): # this function is only called on Linux @@ -2751,7 +2751,7 @@ addr = stmtlocal.get_errno_tl() assert rx86.fits_in_32bits(addr) mc = self.mc - SEGTL = stmtlocal.SEGMENT_TL + SEGTL = self.SEGMENT_TL if issue_a_write: if isinstance(loc, RegLoc): mc.MOV32_jr((SEGTL, addr), loc.value) # memory write from reg From noreply at buildbot.pypy.org Thu Aug 7 20:45:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 7 Aug 2014 20:45:11 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Fix Message-ID: <20140807184511.602D71C000D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72712:91f1b7951764 Date: 2014-08-07 18:01 +0200 http://bitbucket.org/pypy/pypy/changeset/91f1b7951764/ Log: Fix diff --git a/pypy/module/thread/stm.py b/pypy/module/thread/stm.py --- a/pypy/module/thread/stm.py +++ b/pypy/module/thread/stm.py @@ -4,7 +4,7 @@ XXX this module may contain near-duplicated code from the non-STM variants """ -from pypy.module.thread.threadlocals import BaseThreadLocals +from pypy.module.thread.threadlocals import OSThreadLocals from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import ExecutionContext from pypy.interpreter.gateway import W_Root, interp2app @@ -42,7 +42,7 @@ initialize_execution_context(ec) -class STMThreadLocals(BaseThreadLocals): +class STMThreadLocals(OSThreadLocals): threads_running = False _immutable_fields_ = ['threads_running?'] @@ -56,11 +56,9 @@ assert space.actionflag.setcheckinterval_callback is None space.actionflag.setcheckinterval_callback = setcheckinterval_callback - def getallvalues(self): - raise ValueError - - def leave_thread(self, space): - self.setvalue(None) + # XXX? + #def getallvalues(self): + # raise ValueError def setup_threads(self, space): if not self.threads_running: @@ -72,14 +70,6 @@ rstm.enter_callback_call, rstm.leave_callback_call) - def reinit_threads(self, space): - self.setup_threads(space) - ident = rthread.get_ident() - if ident != self._mainthreadident: - ec = self.getvalue() - ec._signals_enabled += 1 - self._mainthreadident = ident - def configure_transaction_length(self, space): if self.threads_running: interval = space.actionflag.getcheckinterval() diff --git a/pypy/module/thread/threadlocals.py b/pypy/module/thread/threadlocals.py --- a/pypy/module/thread/threadlocals.py +++ b/pypy/module/thread/threadlocals.py @@ -7,33 +7,7 @@ ExecutionContext._signals_enabled = 0 # default value -class BaseThreadLocals(object): - _mainthreadident = 0 - - def initialize(self, space): - pass - - def setup_threads(self, space): - pass - - def signals_enabled(self): - ec = self.getvalue() - return ec._signals_enabled - - def enable_signals(self, space): - ec = self.getvalue() - ec._signals_enabled += 1 - - def disable_signals(self, space): - ec = self.getvalue() - new = ec._signals_enabled - 1 - if new < 0: - raise wrap_thread_error(space, - "cannot disable signals in thread not enabled for signals") - ec._signals_enabled = new - - -class OSThreadLocals(BaseThreadLocals): +class OSThreadLocals: """Thread-local storage for OS-level threads. For memory management, this version depends on explicit notification when a thread finishes. This works as long as the thread was started by @@ -49,6 +23,12 @@ self._valuedict.clear() self._mainthreadident = 0 + def initialize(self, space): + pass # for the STMThreadLocals subclass + + def setup_threads(self, space): + pass # for the STMThreadLocals subclass + def enter_thread(self, space): "Notification that the current thread is about to start running." self._set_ec(space.createexecutioncontext()) From noreply at buildbot.pypy.org Thu Aug 7 20:45:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 7 Aug 2014 20:45:12 +0200 (CEST) Subject: [pypy-commit] pypy default: Test and fix (the code in module/pypyjit was already doing that correctly) Message-ID: <20140807184512.9631D1C000D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72713:380497b41b43 Date: 2014-08-07 20:44 +0200 http://bitbucket.org/pypy/pypy/changeset/380497b41b43/ Log: Test and fix (the code in module/pypyjit was already doing that correctly) diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -61,6 +61,7 @@ while True: next_instr = self.handle_bytecode(co_code, next_instr, ec) except ExitFrame: + self.last_exception = None return self.popvalue() def handle_bytecode(self, co_code, next_instr, ec): diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -280,6 +280,20 @@ raise StopIteration assert tuple(f()) == (1,) + def test_exception_is_cleared_by_yield(self): + def f(): + try: + foobar + except NameError: + yield 5 + raise # should raise "no active exception to re-raise" + gen = f() + gen.next() # --> 5 + try: + gen.next() + except TypeError: + pass + def test_should_not_inline(space): from pypy.interpreter.generator import should_not_inline From noreply at buildbot.pypy.org Thu Aug 7 21:25:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 7 Aug 2014 21:25:55 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix comment Message-ID: <20140807192555.DBEEC1D36D9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72714:3bd94738d4f8 Date: 2014-08-07 21:25 +0200 http://bitbucket.org/pypy/pypy/changeset/3bd94738d4f8/ Log: Fix comment diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -129,7 +129,7 @@ self.mc.MOV(heap(fastgil), css_value) # if not we_are_translated(): # for testing: we should not access - self.mc.ADD(ebp, imm(1)) # ebp any more; and ignore 'fastgil' + self.mc.ADD(ebp, imm(1)) # ebp any more def move_real_result_and_call_reacqgil_addr(self, fastgil): from rpython.jit.backend.x86 import rx86 From noreply at buildbot.pypy.org Fri Aug 8 02:20:53 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 8 Aug 2014 02:20:53 +0200 (CEST) Subject: [pypy-commit] pypy py3k-qualname: merge py3.3 Message-ID: <20140808002053.A3D6E1C332E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-qualname Changeset: r72715:525ebcd3c24a Date: 2014-08-05 17:36 -0700 http://bitbucket.org/pypy/pypy/changeset/525ebcd3c24a/ Log: merge py3.3 diff too long, truncating to 2000 out of 4104 lines diff --git a/lib-python/3/distutils/command/build_ext.py b/lib-python/3/distutils/command/build_ext.py --- a/lib-python/3/distutils/command/build_ext.py +++ b/lib-python/3/distutils/command/build_ext.py @@ -4,7 +4,8 @@ modules (currently limited to C extensions, should accommodate C++ extensions ASAP).""" -import sys, os, re, imp +import sys, os, re +import importlib.machinery from distutils.core import Command from distutils.errors import * from distutils.sysconfig import customize_compiler, get_python_version @@ -36,9 +37,8 @@ show_compilers() def _get_c_extension_suffix(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext + suffixes = importlib.machinery.EXTENSION_SUFFIXES + return suffixes[0] if suffixes else None class build_ext(Command): diff --git a/lib-python/3/test/test_audioop.py b/lib-python/3/test/test_audioop.py --- a/lib-python/3/test/test_audioop.py +++ b/lib-python/3/test/test_audioop.py @@ -1,6 +1,7 @@ import audioop import sys import unittest +from test.support import run_unittest, impl_detail def pack(width, data): return b''.join(v.to_bytes(width, sys.byteorder, signed=True) for v in data) @@ -170,6 +171,7 @@ self.assertEqual(audioop.lin2lin(datas[4], 4, 2), packs[2](0, 0x1234, 0x4567, -0x4568, 0x7fff, -0x8000, -1)) + @impl_detail(pypy=False) def test_adpcm2lin(self): self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 1, None), (b'\x00\x00\x00\xff\x00\xff', (-179, 40))) @@ -184,6 +186,7 @@ self.assertEqual(audioop.adpcm2lin(b'\0' * 5, w, None), (b'\0' * w * 10, (0, 0))) + @impl_detail(pypy=False) def test_lin2adpcm(self): self.assertEqual(audioop.lin2adpcm(datas[1], 1, None), (b'\x07\x7f\x7f', (-221, 39))) @@ -197,6 +200,7 @@ self.assertEqual(audioop.lin2adpcm(b'\0' * w * 10, w, None), (b'\0' * 5, (0, 0))) + @impl_detail(pypy=False) def test_lin2alaw(self): self.assertEqual(audioop.lin2alaw(datas[1], 1), b'\xd5\x87\xa4\x24\xaa\x2a\x5a') @@ -205,6 +209,7 @@ self.assertEqual(audioop.lin2alaw(datas[4], 4), b'\xd5\x87\xa4\x24\xaa\x2a\x55') + @impl_detail(pypy=False) def test_alaw2lin(self): encoded = b'\x00\x03\x24\x2a\x51\x54\x55\x58\x6b\x71\x7f'\ b'\x80\x83\xa4\xaa\xd1\xd4\xd5\xd8\xeb\xf1\xff' @@ -219,6 +224,7 @@ decoded = audioop.alaw2lin(encoded, w) self.assertEqual(audioop.lin2alaw(decoded, w), encoded) + @impl_detail(pypy=False) def test_lin2ulaw(self): self.assertEqual(audioop.lin2ulaw(datas[1], 1), b'\xff\xad\x8e\x0e\x80\x00\x67') @@ -227,6 +233,7 @@ self.assertEqual(audioop.lin2ulaw(datas[4], 4), b'\xff\xad\x8e\x0e\x80\x00\x7e') + @impl_detail(pypy=False) def test_ulaw2lin(self): encoded = b'\x00\x0e\x28\x3f\x57\x6a\x76\x7c\x7e\x7f'\ b'\x80\x8e\xa8\xbf\xd7\xea\xf6\xfc\xfe\xff' @@ -341,6 +348,7 @@ self.assertRaises(audioop.error, audioop.findmax, bytes(range(256)), -2392392) + @impl_detail(pypy=False) def test_issue7673(self): state = None for data, size in INVALID_DATA: @@ -365,6 +373,7 @@ self.assertRaises(audioop.error, audioop.lin2alaw, data, size) self.assertRaises(audioop.error, audioop.lin2adpcm, data, size, state) + @impl_detail(pypy=False) def test_wrongsize(self): data = b'abcdefgh' state = None diff --git a/lib-python/3/test/test_builtin.py b/lib-python/3/test/test_builtin.py --- a/lib-python/3/test/test_builtin.py +++ b/lib-python/3/test/test_builtin.py @@ -15,7 +15,8 @@ import unittest import warnings from operator import neg -from test.support import TESTFN, unlink, run_unittest, check_warnings +from test.support import ( + TESTFN, unlink, run_unittest, check_warnings, check_impl_detail) try: import pty, signal except ImportError: @@ -558,18 +559,21 @@ self.assertEqual((g, l), ({'a': 1}, {'b': 2})) def test_exec_globals(self): - code = compile("print('Hello World!')", "", "exec") - # no builtin function - self.assertRaisesRegex(NameError, "name 'print' is not defined", - exec, code, {'__builtins__': {}}) - # __builtins__ must be a mapping type - self.assertRaises(TypeError, - exec, code, {'__builtins__': 123}) + if check_impl_detail(): + # strict __builtins__ compliance (CPython) + code = compile("print('Hello World!')", "", "exec") + # no builtin function + self.assertRaisesRegex(NameError, "name 'print' is not defined", + exec, code, {'__builtins__': {}}) + # __builtins__ must be a mapping type + self.assertRaises(TypeError, + exec, code, {'__builtins__': 123}) - # no __build_class__ function - code = compile("class A: pass", "", "exec") - self.assertRaisesRegex(NameError, "__build_class__ not found", - exec, code, {'__builtins__': {}}) + # no __build_class__ function + code = compile("class A: pass", "", "exec") + if True: + self.assertRaisesRegex(NameError, "__build_class__ not found", + exec, code, {'__builtins__': {}}) class frozendict_error(Exception): pass @@ -579,7 +583,7 @@ raise frozendict_error("frozendict is readonly") # read-only builtins - frozen_builtins = frozendict(__builtins__) + frozen_builtins = frozendict(builtins.__dict__) code = compile("__builtins__['superglobal']=2; print(superglobal)", "test", "exec") self.assertRaises(frozendict_error, exec, code, {'__builtins__': frozen_builtins}) diff --git a/lib-python/3/test/test_capi.py b/lib-python/3/test/test_capi.py --- a/lib-python/3/test/test_capi.py +++ b/lib-python/3/test/test_capi.py @@ -110,6 +110,8 @@ self.assertRaises(TypeError, _posixsubprocess.fork_exec, Z(),[b'1'],3,[1, 2],5,6,7,8,9,10,11,12,13,14,15,16,17) + at unittest.skipIf(support.check_impl_detail(pypy=True), + 'Py_AddPendingCall not currently supported.') @unittest.skipUnless(threading, 'Threading required for this test.') class TestPendingCalls(unittest.TestCase): @@ -327,6 +329,8 @@ self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords, (), {}, b'', [42]) + at unittest.skipIf(support.check_impl_detail(pypy=True), + 'Not currently supported under PyPy') @unittest.skipUnless(threading, 'Threading required for this test.') class TestThreadState(unittest.TestCase): diff --git a/lib-python/3/test/test_concurrent_futures.py b/lib-python/3/test/test_concurrent_futures.py --- a/lib-python/3/test/test_concurrent_futures.py +++ b/lib-python/3/test/test_concurrent_futures.py @@ -295,14 +295,19 @@ event = threading.Event() def future_func(): event.wait() - oldswitchinterval = sys.getswitchinterval() - sys.setswitchinterval(1e-6) + newgil = hasattr(sys, 'getswitchinterval') + if newgil: + geti, seti = sys.getswitchinterval, sys.setswitchinterval + else: + geti, seti = sys.getcheckinterval, sys.setcheckinterval + oldinterval = geti() + seti(1e-6 if newgil else 1) try: fs = {self.executor.submit(future_func) for i in range(100)} event.set() futures.wait(fs, return_when=futures.ALL_COMPLETED) finally: - sys.setswitchinterval(oldswitchinterval) + seti(oldinterval) class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests, unittest.TestCase): diff --git a/lib-python/3/test/test_descr.py b/lib-python/3/test/test_descr.py --- a/lib-python/3/test/test_descr.py +++ b/lib-python/3/test/test_descr.py @@ -1196,7 +1196,7 @@ self.assertEqual(Counted.counter, 0) # Test lookup leaks [SF bug 572567] - if hasattr(gc, 'get_objects'): + if hasattr(gc, 'get_objects') and support.check_impl_detail(pypy=False): class G(object): def __eq__(self, other): return False @@ -3035,15 +3035,24 @@ class R(J): __slots__ = ["__dict__", "__weakref__"] - for cls, cls2 in ((G, H), (G, I), (I, H), (Q, R), (R, Q)): + if support.check_impl_detail(pypy=False): + lst = ((G, H), (G, I), (I, H), (Q, R), (R, Q)) + else: + # Not supported in pypy: changing the __class__ of an object + # to another __class__ that just happens to have the same slots. + # If needed, we can add the feature, but what we'll likely do + # then is to allow mostly any __class__ assignment, even if the + # classes have different __slots__, because we it's easier. + lst = ((Q, R), (R, Q)) + for cls, cls2 in lst: x = cls() x.a = 1 x.__class__ = cls2 - self.assertIs(x.__class__, cls2, + self.assertTrue(x.__class__ is cls2, "assigning %r as __class__ for %r silently failed" % (cls2, x)) self.assertEqual(x.a, 1) x.__class__ = cls - self.assertIs(x.__class__, cls, + self.assertTrue(x.__class__ is cls, "assigning %r as __class__ for %r silently failed" % (cls, x)) self.assertEqual(x.a, 1) for cls in G, J, K, L, M, N, P, R, list, Int: @@ -3055,7 +3064,8 @@ # Issue5283: when __class__ changes in __del__, the wrong # type gets DECREF'd. class O(object): - pass + def __del__(self): + pass class A(object): def __del__(self): self.__class__ = O @@ -3118,7 +3128,8 @@ except TypeError: pass else: - self.fail("%r's __dict__ can be modified" % cls) + if support.check_impl_detail(pypy=False): + self.fail("%r's __dict__ can be modified" % cls) # Modules also disallow __dict__ assignment class Module1(types.ModuleType, Base): diff --git a/lib-python/3/test/test_exceptions.py b/lib-python/3/test/test_exceptions.py --- a/lib-python/3/test/test_exceptions.py +++ b/lib-python/3/test/test_exceptions.py @@ -512,6 +512,7 @@ except MyException as e: pass obj = None + gc_collect() obj = wr() self.assertTrue(obj is None, "%s" % obj) @@ -523,6 +524,7 @@ except MyException: pass obj = None + gc_collect() obj = wr() self.assertTrue(obj is None, "%s" % obj) @@ -534,6 +536,7 @@ except: pass obj = None + gc_collect() obj = wr() self.assertTrue(obj is None, "%s" % obj) @@ -546,6 +549,7 @@ except: break obj = None + gc_collect() # XXX it seems it's not enough obj = wr() self.assertTrue(obj is None, "%s" % obj) @@ -564,6 +568,7 @@ # must clear the latter manually for our test to succeed. e.__context__ = None obj = None + gc_collect() obj = wr() # guarantee no ref cycles on CPython (don't gc_collect) if check_impl_detail(cpython=False): @@ -708,6 +713,7 @@ next(g) testfunc(g) g = obj = None + gc_collect() obj = wr() self.assertIs(obj, None) @@ -761,6 +767,7 @@ raise Exception(MyObject()) except: pass + gc_collect() self.assertEqual(e, (None, None, None)) def testUnicodeChangeAttributes(self): @@ -911,6 +918,7 @@ self.assertNotEqual(wr(), None) else: self.fail("MemoryError not raised") + gc_collect() self.assertEqual(wr(), None) @no_tracing @@ -931,6 +939,7 @@ self.assertNotEqual(wr(), None) else: self.fail("RuntimeError not raised") + gc_collect() self.assertEqual(wr(), None) def test_errno_ENOTDIR(self): diff --git a/lib-python/3/test/test_fileio.py b/lib-python/3/test/test_fileio.py --- a/lib-python/3/test/test_fileio.py +++ b/lib-python/3/test/test_fileio.py @@ -10,6 +10,7 @@ from functools import wraps from test.support import TESTFN, check_warnings, run_unittest, make_bad_fd, cpython_only +from test.support import gc_collect from collections import UserList from _io import FileIO as _FileIO @@ -32,6 +33,7 @@ self.assertEqual(self.f.tell(), p.tell()) self.f.close() self.f = None + gc_collect() self.assertRaises(ReferenceError, getattr, p, 'tell') def testSeekTell(self): diff --git a/lib-python/3/test/test_functools.py b/lib-python/3/test/test_functools.py --- a/lib-python/3/test/test_functools.py +++ b/lib-python/3/test/test_functools.py @@ -45,6 +45,8 @@ self.assertEqual(p.args, (1, 2)) self.assertEqual(p.keywords, dict(a=10, b=20)) # attributes should not be writable + if not support.check_impl_detail(): + return self.assertRaises(AttributeError, setattr, p, 'func', map) self.assertRaises(AttributeError, setattr, p, 'args', (1, 2)) self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2)) @@ -136,6 +138,7 @@ p = proxy(f) self.assertEqual(f.func, p.func) f = None + support.gc_collect() self.assertRaises(ReferenceError, getattr, p, 'func') def test_with_bound_and_unbound_methods(self): @@ -192,9 +195,13 @@ raise IndexError f = self.thetype(object) - self.assertRaisesRegex(SystemError, - "new style getargs format but argument is not a tuple", - f.__setstate__, BadSequence()) + if support.check_impl_detail(pypy=True): + # CPython fails, pypy does not :-) + f.__setstate__(BadSequence()) + else: + self.assertRaisesRegex(SystemError, + "new style getargs format but argument is not a tuple", + f.__setstate__, BadSequence()) class PartialSubclass(functools.partial): pass @@ -223,7 +230,7 @@ updated=functools.WRAPPER_UPDATES): # Check attributes were assigned for name in assigned: - self.assertTrue(getattr(wrapper, name) is getattr(wrapped, name)) + self.assertTrue(getattr(wrapper, name) == getattr(wrapped, name)) # Check attributes were updated for name in updated: wrapper_attr = getattr(wrapper, name) diff --git a/lib-python/3/test/test_int.py b/lib-python/3/test/test_int.py --- a/lib-python/3/test/test_int.py +++ b/lib-python/3/test/test_int.py @@ -307,9 +307,10 @@ try: int(TruncReturnsNonIntegral()) except TypeError as e: - self.assertEqual(str(e), - "__trunc__ returned non-Integral" - " (type NonIntegral)") + if support.check_impl_detail(pypy=False): + self.assertEqual(str(e), + "__trunc__ returned non-Integral" + " (type NonIntegral)") else: self.fail("Failed to raise TypeError with %s" % ((base, trunc_result_base),)) diff --git a/lib-python/3/test/test_marshal.py b/lib-python/3/test/test_marshal.py --- a/lib-python/3/test/test_marshal.py +++ b/lib-python/3/test/test_marshal.py @@ -203,6 +203,7 @@ s = b'c' + (b'X' * 4*4) + b'{' * 2**20 self.assertRaises(ValueError, marshal.loads, s) + @support.impl_detail('specific recursion check') def test_recursion_limit(self): # Create a deeply nested structure. head = last = [] @@ -291,6 +292,10 @@ LARGE_SIZE = 2**31 pointer_size = 8 if sys.maxsize > 0xFFFFFFFF else 4 +if support.check_impl_detail(pypy=False): + sizeof_large_size = sys.getsizeof(LARGE_SIZE-1) +else: + sizeof_large_size = 32 # Some value for PyPy class NullWriter: def write(self, s): @@ -318,13 +323,13 @@ self.check_unmarshallable([None] * size) @support.bigmemtest(size=LARGE_SIZE, - memuse=pointer_size*12 + sys.getsizeof(LARGE_SIZE-1), + memuse=pointer_size*12 + sizeof_large_size, dry_run=False) def test_set(self, size): self.check_unmarshallable(set(range(size))) @support.bigmemtest(size=LARGE_SIZE, - memuse=pointer_size*12 + sys.getsizeof(LARGE_SIZE-1), + memuse=pointer_size*12 + sizeof_large_size, dry_run=False) def test_frozenset(self, size): self.check_unmarshallable(frozenset(range(size))) diff --git a/lib-python/3/test/test_peepholer.py b/lib-python/3/test/test_peepholer.py --- a/lib-python/3/test/test_peepholer.py +++ b/lib-python/3/test/test_peepholer.py @@ -81,10 +81,13 @@ self.assertIn(elem, asm) def test_pack_unpack(self): + # On PyPy, "a, b = ..." is even more optimized, by removing + # the ROT_TWO. But the ROT_TWO is not removed if assigning + # to more complex expressions, so check that. for line, elem in ( ('a, = a,', 'LOAD_CONST',), - ('a, b = a, b', 'ROT_TWO',), - ('a, b, c = a, b, c', 'ROT_THREE',), + ('a[1], b = a, b', 'ROT_TWO',), + ('a, b[2], c = a, b, c', 'ROT_THREE',), ): asm = dis_single(line) self.assertIn(elem, asm) @@ -92,6 +95,8 @@ self.assertNotIn('UNPACK_TUPLE', asm) def test_folding_of_tuples_of_constants(self): + # On CPython, "a,b,c=1,2,3" turns into "a,b,c=" + # but on PyPy, it turns into "a=1;b=2;c=3". for line, elem in ( ('a = 1,2,3', '((1, 2, 3))'), ('("a","b","c")', "(('a', 'b', 'c'))"), @@ -100,7 +105,8 @@ ('((1, 2), 3, 4)', '(((1, 2), 3, 4))'), ): asm = dis_single(line) - self.assertIn(elem, asm) + self.assert_(elem in asm or ( + line == 'a,b,c = 1,2,3' and 'UNPACK_TUPLE' not in asm)) self.assertNotIn('BUILD_TUPLE', asm) # Long tuples should be folded too. diff --git a/lib-python/3/test/test_subprocess.py b/lib-python/3/test/test_subprocess.py --- a/lib-python/3/test/test_subprocess.py +++ b/lib-python/3/test/test_subprocess.py @@ -1314,6 +1314,7 @@ stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=raise_it) + @support.impl_detail("PyPy's _posixsubprocess doesn't have to disable gc") def test_preexec_gc_module_failure(self): # This tests the code that disables garbage collection if the child # process will execute any Python. @@ -1964,6 +1965,7 @@ ident = id(p) pid = p.pid del p + support.gc_collect() # check that p is in the active processes list self.assertIn(ident, [id(o) for o in subprocess._active]) @@ -1983,6 +1985,7 @@ ident = id(p) pid = p.pid del p + support.gc_collect() os.kill(pid, signal.SIGKILL) # check that p is in the active processes list self.assertIn(ident, [id(o) for o in subprocess._active]) diff --git a/lib-python/3/test/test_sys.py b/lib-python/3/test/test_sys.py --- a/lib-python/3/test/test_sys.py +++ b/lib-python/3/test/test_sys.py @@ -405,8 +405,10 @@ self.assertEqual(len(sys.float_info), 11) self.assertEqual(sys.float_info.radix, 2) self.assertEqual(len(sys.int_info), 2) - self.assertTrue(sys.int_info.bits_per_digit % 5 == 0) - self.assertTrue(sys.int_info.sizeof_digit >= 1) + if test.support.check_impl_detail(cpython=True): + self.assertTrue(sys.int_info.bits_per_digit % 5 == 0) + else: + self.assertTrue(sys.int_info.sizeof_digit >= 1) self.assertEqual(type(sys.int_info.bits_per_digit), int) self.assertEqual(type(sys.int_info.sizeof_digit), int) self.assertIsInstance(sys.hexversion, int) @@ -503,6 +505,7 @@ self.assertTrue(repr(sys.flags)) self.assertEqual(len(sys.flags), len(attrs)) + @test.support.impl_detail("sys._clear_type_cache", pypy=False) def test_clear_type_cache(self): sys._clear_type_cache() diff --git a/lib-python/3/test/test_weakref.py b/lib-python/3/test/test_weakref.py --- a/lib-python/3/test/test_weakref.py +++ b/lib-python/3/test/test_weakref.py @@ -8,6 +8,7 @@ import copy from test import support +from test.support import gc_collect # Used in ReferencesTestCase.test_ref_created_during_del() . ref_from_del = None @@ -88,6 +89,7 @@ ref1 = weakref.ref(o, self.callback) ref2 = weakref.ref(o, self.callback) del o + gc_collect() self.assertIsNone(ref1(), "expected reference to be invalidated") self.assertIsNone(ref2(), "expected reference to be invalidated") self.assertEqual(self.cbcalled, 2, @@ -117,13 +119,16 @@ ref1 = weakref.proxy(o, self.callback) ref2 = weakref.proxy(o, self.callback) del o + gc_collect() def check(proxy): proxy.bar self.assertRaises(ReferenceError, check, ref1) self.assertRaises(ReferenceError, check, ref2) - self.assertRaises(ReferenceError, bool, weakref.proxy(C())) + ref3 = weakref.proxy(C()) + gc_collect() + self.assertRaises(ReferenceError, bool, ref3) self.assertEqual(self.cbcalled, 2) def check_basic_ref(self, factory): @@ -140,6 +145,7 @@ o = factory() ref = weakref.ref(o, self.callback) del o + gc_collect() self.assertEqual(self.cbcalled, 1, "callback did not properly set 'cbcalled'") self.assertIsNone(ref(), @@ -164,6 +170,7 @@ self.assertEqual(weakref.getweakrefcount(o), 2, "wrong weak ref count for object") del proxy + gc_collect() self.assertEqual(weakref.getweakrefcount(o), 1, "wrong weak ref count for object after deleting proxy") @@ -338,6 +345,7 @@ "got wrong number of weak reference objects") del ref1, ref2, proxy1, proxy2 + gc_collect() self.assertEqual(weakref.getweakrefcount(o), 0, "weak reference objects not unlinked from" " referent when discarded.") @@ -351,6 +359,7 @@ ref1 = weakref.ref(o, self.callback) ref2 = weakref.ref(o, self.callback) del ref1 + gc_collect() self.assertEqual(weakref.getweakrefs(o), [ref2], "list of refs does not match") @@ -358,10 +367,12 @@ ref1 = weakref.ref(o, self.callback) ref2 = weakref.ref(o, self.callback) del ref2 + gc_collect() self.assertEqual(weakref.getweakrefs(o), [ref1], "list of refs does not match") del ref1 + gc_collect() self.assertEqual(weakref.getweakrefs(o), [], "list of refs not cleared") @@ -647,9 +658,11 @@ gc.collect() self.assertEqual(alist, []) + @support.impl_detail(pypy=False) def test_gc_during_ref_creation(self): self.check_gc_during_creation(weakref.ref) + @support.impl_detail(pypy=False) def test_gc_during_proxy_creation(self): self.check_gc_during_creation(weakref.proxy) @@ -811,6 +824,7 @@ self.assertTrue(mr.called) self.assertEqual(mr.value, 24) del o + gc_collect() self.assertIsNone(mr()) self.assertTrue(mr.called) @@ -917,6 +931,7 @@ n1 = len(dct) del it gc.collect() + gc.collect() n2 = len(dct) # one item may be kept alive inside the iterator self.assertIn(n1, (0, 1)) @@ -928,6 +943,7 @@ def test_weak_valued_len_cycles(self): self.check_len_cycles(weakref.WeakValueDictionary, lambda k: (1, k)) + @support.impl_detail(pypy=False) def check_len_race(self, dict_type, cons): # Extended sanity checks for len() in the face of cyclic collection self.addCleanup(gc.set_threshold, *gc.get_threshold()) @@ -976,15 +992,18 @@ del items1, items2 self.assertEqual(len(dict), self.COUNT) del objects[0] + gc_collect() self.assertEqual(len(dict), self.COUNT - 1, "deleting object did not cause dictionary update") del objects, o + gc_collect() self.assertEqual(len(dict), 0, "deleting the values did not clear the dictionary") # regression on SF bug #447152: dict = weakref.WeakValueDictionary() self.assertRaises(KeyError, dict.__getitem__, 1) dict[2] = C() + gc_collect() self.assertRaises(KeyError, dict.__getitem__, 2) def test_weak_keys(self): @@ -1005,9 +1024,11 @@ del items1, items2 self.assertEqual(len(dict), self.COUNT) del objects[0] + gc_collect() self.assertEqual(len(dict), (self.COUNT - 1), "deleting object did not cause dictionary update") del objects, o + gc_collect() self.assertEqual(len(dict), 0, "deleting the keys did not clear the dictionary") o = Object(42) @@ -1368,6 +1389,7 @@ for o in objs: count += 1 del d[o] + gc_collect() self.assertEqual(len(d), 0) self.assertEqual(count, 2) @@ -1389,6 +1411,7 @@ libreftest = """ Doctest for examples in the library reference: weakref.rst +>>> from test.support import gc_collect >>> import weakref >>> class Dict(dict): ... pass @@ -1408,6 +1431,7 @@ >>> o is o2 True >>> del o, o2 +>>> gc_collect() >>> print(r()) None @@ -1460,6 +1484,7 @@ >>> id2obj(a_id) is a True >>> del a +>>> gc_collect() >>> try: ... id2obj(a_id) ... except KeyError: diff --git a/lib-python/3/test/test_weakset.py b/lib-python/3/test/test_weakset.py --- a/lib-python/3/test/test_weakset.py +++ b/lib-python/3/test/test_weakset.py @@ -416,11 +416,13 @@ n1 = len(s) del it gc.collect() + gc.collect() n2 = len(s) # one item may be kept alive inside the iterator self.assertIn(n1, (0, 1)) self.assertEqual(n2, 0) + @support.impl_detail("PyPy has no cyclic collection", pypy=False) def test_len_race(self): # Extended sanity checks for len() in the face of cyclic collection self.addCleanup(gc.set_threshold, *gc.get_threshold()) diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -309,11 +309,9 @@ #endif int _m_ispad(WINDOW *win) { -#if defined WINDOW_HAS_FLAGS + // may not have _flags (and possibly _ISPAD), + // but for now let's assume that always has it return (win->_flags & _ISPAD); -#else - return 0; -#endif } void _m_getsyx(int *yx) { diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,5 +1,6 @@ import os, sys, imp import tempfile, binascii +import importlib.machinery def get_hashed_dir(cfile): @@ -28,9 +29,8 @@ def _get_c_extension_suffix(): - for ext, mod, typ in imp.get_suffixes(): - if typ == imp.C_EXTENSION: - return ext + suffixes = importlib.machinery.EXTENSION_SUFFIXES + return suffixes[0] if suffixes else None def compile_shared(csource, modulename, output_dir=None): diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -1,7 +1,17 @@ -import sys, os, binascii, imp, shutil +import sys, os, binascii, shutil from . import __version__ from . import ffiplatform +if sys.version_info >= (3, 3): + import importlib.machinery + def extension_suffixes(): + return importlib.machinery.EXTENSION_SUFFIXES[:] +else: + import imp + def extension_suffixes(): + return [suffix for suffix, _, type in imp.get_suffixes() + if type == imp.C_EXTENSION] + class Verifier(object): @@ -222,11 +232,7 @@ pass def _get_so_suffixes(): - suffixes = [] - for suffix, mode, type in imp.get_suffixes(): - if type == imp.C_EXTENSION: - suffixes.append(suffix) - + suffixes = extension_suffixes() if not suffixes: # bah, no C_EXTENSION available. Occurs on pypy without cpyext if sys.platform == 'win32': diff --git a/lib_pypy/pyrepl/module_lister.py b/lib_pypy/pyrepl/module_lister.py --- a/lib_pypy/pyrepl/module_lister.py +++ b/lib_pypy/pyrepl/module_lister.py @@ -40,8 +40,8 @@ return sorted(set(l)) def _make_module_list(): - import imp - suffs = [x[0] for x in imp.get_suffixes() if x[0] != '.pyc'] + import importlib.machinery + suffs = [x for x in importlib.machinery.all_suffixes() if x != '.pyc'] suffs.sort(reverse=True) _packages[''] = list(sys.builtin_module_names) for dir in sys.path: diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -37,7 +37,7 @@ "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "_continuation", "_cffi_backend", "_csv", "_pypyjson", "_posixsubprocess", # "cppyy", "micronumpy" - "faulthandler", + "faulthandler", "_lzma", ]) translation_modules = default_modules.copy() @@ -106,6 +106,7 @@ "_hashlib" : ["pypy.module._ssl.interp_ssl"], "_minimal_curses": ["pypy.module._minimal_curses.fficurses"], "_continuation": ["rpython.rlib.rstacklet"], + "_lzma" : ["pypy.module._lzma.interp_lzma"], } def get_module_validator(modname): diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -740,7 +740,7 @@ Adding an entry under pypy/module (e.g. mymodule) entails automatic creation of a new config option (such as --withmod-mymodule and ---withoutmod-mymodule (the later being the default)) for py.py and +--withoutmod-mymodule (the latter being the default)) for py.py and translate.py. Testing modules in ``lib_pypy/`` @@ -931,7 +931,7 @@ assert self.result == 2 ** 6 which executes the code string function with the given arguments at app level. -Note the use of ``w_result`` in ``setup_class`` but self.result in the test +Note the use of ``w_result`` in ``setup_class`` but self.result in the test. Here is how to define an app level class in ``setup_class`` that can be used in subsequent tests:: diff --git a/pypy/doc/config/objspace.usemodules._lzma.txt b/pypy/doc/config/objspace.usemodules._lzma.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/objspace.usemodules._lzma.txt @@ -0,0 +1,2 @@ +Use the '_lzma' module. +This module is expected to be working and is included by default. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -328,7 +328,7 @@ * directly calling the internal magic methods of a few built-in types with invalid arguments may have a slightly different result. For example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return - ``NotImplemented`` on PyPy; on CPython, only the later does, and the + ``NotImplemented`` on PyPy; on CPython, only the latter does, and the former raises ``TypeError``. (Of course, ``[]+None`` and ``2+None`` both raise ``TypeError`` everywhere.) This difference is an implementation detail that shows up because of internal C-level slots diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -7,7 +7,7 @@ from pypy.tool import stdlib_opcode as ops from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import specialize, we_are_translated from rpython.rlib import rfloat @@ -141,11 +141,12 @@ i += 1 return result -def _list_to_dict(l, offset=0): + at specialize.argtype(0) +def _iter_to_dict(iterable, offset=0): result = {} index = offset - for i in range(len(l)): - result[l[i]] = index + for item in iterable: + result[item] = index index += 1 return result @@ -161,10 +162,10 @@ self.first_block = self.new_block() self.use_block(self.first_block) self.names = {} - self.var_names = _list_to_dict(scope.varnames) + self.var_names = _iter_to_dict(scope.varnames) self.cell_vars = _make_index_dict_filter(scope.symbols, symtable.SCOPE_CELL) - self.free_vars = _list_to_dict(scope.free_vars, len(self.cell_vars)) + self.free_vars = _iter_to_dict(scope.free_vars, len(self.cell_vars)) self.w_consts = space.newdict() self.argcount = 0 self.kwonlyargcount = 0 diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -244,7 +244,7 @@ self.emit_op_arg(op, self.add_name(container, identifier)) def possible_docstring(self, node): - if isinstance(node, ast.Expr): + if isinstance(node, ast.Expr) and self.compile_info.optimize < 2: expr_value = node.value if isinstance(expr_value, ast.Str): return expr_value diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -37,7 +37,7 @@ self.roles = {} self.varnames = [] self.children = [] - self.free_vars = [] + self.free_vars = {} self.temp_name_counter = 1 self.has_free = False self.child_has_free = False @@ -136,7 +136,9 @@ err = "no binding for nonlocal '%s' found" % (name,) raise SyntaxError(err, self.lineno, self.col_offset) self.symbols[name] = SCOPE_FREE + self.free_vars[name] = None free[name] = None + self.has_free = True elif flags & SYM_BOUND: self.symbols[name] = SCOPE_LOCAL local[name] = None @@ -146,7 +148,7 @@ pass elif bound and name in bound: self.symbols[name] = SCOPE_FREE - self.free_vars.append(name) + self.free_vars[name] = None free[name] = None self.has_free = True elif name in globs: @@ -203,7 +205,7 @@ except KeyError: if name in bound: self.symbols[name] = SCOPE_FREE - self.free_vars.append(name) + self.free_vars[name] = None else: if role_here & (SYM_BOUND | SYM_GLOBAL) and \ self._hide_bound_from_nested_scopes: @@ -212,7 +214,7 @@ # scope. We add the name to the class scope's list of free # vars, so it will be passed through by the interpreter, but # we leave the scope alone, so it can be local on its own. - self.free_vars.append(name) + self.free_vars[name] = None self._check_optimization() free.update(new_free) @@ -244,18 +246,12 @@ return Scope.note_symbol(self, identifier, role) def note_yield(self, yield_node): - if self.return_with_value: - raise SyntaxError("'return' with argument inside generator", - self.ret.lineno, self.ret.col_offset) self.is_generator = True if self._in_try_body_depth > 0: self.has_yield_inside_try = True def note_return(self, ret): if ret.value: - if self.is_generator: - raise SyntaxError("'return' with argument inside generator", - ret.lineno, ret.col_offset) self.return_with_value = True self.ret = ret diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -808,6 +808,28 @@ return y""" yield self.st, test, "f()", 4 + def test_nonlocal_from_arg(self): + test = """if 1: + def test1(x): + def test2(): + nonlocal x + def test3(): + return x + return test3() + return test2()""" + yield self.st, test, "test1(2)", 2 + + def test_class_nonlocal_from_arg(self): + test = """if 1: + def f(x): + class c: + nonlocal x + x += 1 + def get(self): + return x + return c().get()""" + yield self.st, test, "f(3)", 4 + def test_lots_of_loops(self): source = "for x in y: pass\n" * 1000 compile_with_astcompiler(source, 'exec', self.space) diff --git a/pypy/interpreter/astcompiler/test/test_symtable.py b/pypy/interpreter/astcompiler/test/test_symtable.py --- a/pypy/interpreter/astcompiler/test/test_symtable.py +++ b/pypy/interpreter/astcompiler/test/test_symtable.py @@ -361,8 +361,7 @@ assert exc.msg == "'yield' outside function" for input in ("yield\n return x", "return x\n yield"): input = "def f():\n " + input - exc = py.test.raises(SyntaxError, self.func_scope, input).value - assert exc.msg == "'return' with argument inside generator" + scp = self.func_scope(input) scp = self.func_scope("def f():\n return\n yield x") def test_yield_inside_try(self): diff --git a/pypy/interpreter/astcompiler/validate.py b/pypy/interpreter/astcompiler/validate.py --- a/pypy/interpreter/astcompiler/validate.py +++ b/pypy/interpreter/astcompiler/validate.py @@ -14,6 +14,9 @@ def __init__(self, message): self.message = message + def __str__(self): + return self.message + def expr_context_name(ctx): if not 1 <= ctx <= len(ast.expr_context_to_class): diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -99,7 +99,7 @@ # if the frame is now marked as finished, it was RETURNed from if frame.frame_finished_execution: self.frame = None - raise OperationError(space.w_StopIteration, space.w_None) + raise OperationError(space.w_StopIteration, w_result) else: return w_result # YIELDed finally: diff --git a/pypy/interpreter/module.py b/pypy/interpreter/module.py --- a/pypy/interpreter/module.py +++ b/pypy/interpreter/module.py @@ -3,7 +3,7 @@ """ from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from rpython.rlib.objectmodel import we_are_translated @@ -123,3 +123,10 @@ except OperationError: __file__ = u'?' return space.wrap(u"" % (name, __file__)) + + def descr_module__dir__(self, space): + w_dict = space.getattr(self, space.wrap('__dict__')) + if not space.isinstance_w(w_dict, space.w_dict): + raise oefmt(space.w_TypeError, "%N.__dict__ is not a dictionary", + self) + return space.call_function(space.w_list, w_dict) diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -75,8 +75,9 @@ if self.w_value is None: content = "empty" else: - content = "%s object at 0x%x" % (space.type(self.w_value).name, uid(self.w_value)) - s = "" % (uid(self), content) + content = "%s object at 0x%s" % (space.type(self.w_value).name, + self.w_value.getaddrstring(space)) + s = "" % (self.getaddrstring(space), content) return space.wrap(s.decode('utf-8')) def descr__cell_contents(self, space): diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -4,7 +4,7 @@ The bytecode interpreter itself is implemented by the PyFrame class. """ -import dis, imp, struct, types, new, sys +import imp, struct, types, new, sys from pypy.interpreter import eval from pypy.interpreter.signature import Signature @@ -13,6 +13,7 @@ from pypy.interpreter.astcompiler.consts import ( CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, CO_GENERATOR, CO_KILL_DOCSTRING, CO_YIELD_INSIDE_TRY) +from pypy.tool import dis3 from pypy.tool.stdlib_opcode import opcodedesc, HAVE_ARGUMENT from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import compute_hash, we_are_translated @@ -60,7 +61,7 @@ else: varargname = None if code.co_flags & CO_VARKEYWORDS: - kwargname = code.co_varnames[argcount] + kwargname = code.co_varnames[argcount+kwonlyargcount] argcount += 1 else: kwargname = None @@ -245,33 +246,6 @@ if isinstance(w_co, PyCode): w_co.remove_docstrings(space) - def _to_code(self): - """For debugging only.""" - consts = [None] * len(self.co_consts_w) - num = 0 - for w in self.co_consts_w: - if isinstance(w, PyCode): - consts[num] = w._to_code() - else: - consts[num] = self.space.unwrap(w) - num += 1 - assert self.co_kwonlyargcount == 0, 'kwonlyargcount is py3k only, cannot turn this code object into a Python2 one' - return new.code(self.co_argcount, - #self.co_kwonlyargcount, # this does not exists in python2 - self.co_nlocals, - self.co_stacksize, - self.co_flags, - self.co_code, - tuple(consts), - tuple(self.co_names), - tuple(self.co_varnames), - self.co_filename, - self.co_name, - self.co_firstlineno, - self.co_lnotab, - tuple(self.co_freevars), - tuple(self.co_cellvars)) - def exec_host_bytecode(self, w_globals, w_locals): if sys.version_info < (2, 7): raise Exception("PyPy no longer supports Python 2.6 or lower") @@ -280,11 +254,11 @@ return frame.run() def dump(self): - """A dis.dis() dump of the code object.""" - print 'WARNING: dumping a py3k bytecode using python2 opmap, the result might be inaccurate or wrong' - print - co = self._to_code() - dis.dis(co) + """NOT_RPYTHON: A dis.dis() dump of the code object.""" + if not hasattr(self, 'co_consts'): + self.co_consts = [w if isinstance(w, PyCode) else self.space.unwrap(w) + for w in self.co_consts_w] + dis3.dis(self) def fget_co_consts(self, space): return space.newtuple(self.co_consts_w) diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py --- a/pypy/interpreter/pycompiler.py +++ b/pypy/interpreter/pycompiler.py @@ -96,7 +96,7 @@ XXX: This class should override the baseclass implementation of compile_command() in order to optimize it, especially in case - of incomplete inputs (e.g. we shouldn't re-compile from sracth + of incomplete inputs (e.g. we shouldn't re-compile from scratch the whole source after having only added a new '\n') """ def __init__(self, space, override_version=None): diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -1006,13 +1006,14 @@ else: w_retval = space.call_method(w_gen, "send", w_value) except OperationError as e: - if not e.match(self.space, self.space.w_StopIteration): + if not e.match(space, space.w_StopIteration): raise self.popvalue() # Remove iter from stack + e.normalize_exception(space) try: w_value = space.getattr(e.get_w_value(space), space.wrap("value")) except OperationError as e: - if not e.match(self.space, self.space.w_AttributeError): + if not e.match(space, space.w_AttributeError): raise w_value = space.w_None self.pushvalue(w_value) diff --git a/pypy/interpreter/pytraceback.py b/pypy/interpreter/pytraceback.py --- a/pypy/interpreter/pytraceback.py +++ b/pypy/interpreter/pytraceback.py @@ -50,6 +50,10 @@ self.lasti = space.int_w(w_lasti) self.next = space.interp_w(PyTraceback, w_next, can_be_None=True) + def descr__dir__(self, space): + return space.newlist([space.wrap(n) for n in + ['tb_frame', 'tb_next', 'tb_lasti', 'tb_lineno']]) + def record_application_traceback(space, operror, frame, last_instruction): if frame.pycode.hidden_applevel: diff --git a/pypy/interpreter/test/test_compiler.py b/pypy/interpreter/test/test_compiler.py --- a/pypy/interpreter/test/test_compiler.py +++ b/pypy/interpreter/test/test_compiler.py @@ -268,10 +268,7 @@ def test_return_in_generator(self): code = 'def f():\n return None\n yield 19\n' - e = py.test.raises(OperationError, self.compiler.compile, code, '', 'single', 0) - ex = e.value - ex.normalize_exception(self.space) - assert ex.match(self.space, self.space.w_SyntaxError) + self.compiler.compile(code, '', 'single', 0) def test_yield_in_finally(self): code ='def f():\n try:\n yield 19\n finally:\n pass\n' @@ -717,6 +714,27 @@ else: py.test.fail("Did not raise") + def test_signature_kwargname(self): + from pypy.interpreter.pycode import cpython_code_signature + from pypy.interpreter.signature import Signature + + def find_func(code): + for w_const in code.co_consts_w: + if isinstance(w_const, PyCode): + return w_const + + snippet = 'def f(a, b, m=1, n=2, **kwargs): pass' + containing_co = self.compiler.compile(snippet, '', 'single', 0) + co = find_func(containing_co) + sig = cpython_code_signature(co) + assert sig == Signature(['a', 'b', 'm', 'n'], None, 'kwargs', []) + + snippet = 'def f(a, b, *, m=1, n=2, **kwargs): pass' + containing_co = self.compiler.compile(snippet, '', 'single', 0) + co = find_func(containing_co) + sig = cpython_code_signature(co) + assert sig == Signature(['a', 'b'], None, 'kwargs', ['m', 'n']) + class AppTestCompiler: diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -301,6 +301,39 @@ raise StopIteration assert tuple(f()) == (1,) + def test_yield_return(self): + """ + def f(): + yield 1 + return 2 + g = f() + assert next(g) == 1 + try: + next(g) + except StopIteration as e: + assert e.value == 2 + else: + assert False, 'Expected StopIteration' + """ + + def test_yield_from_return(self): + """ + def f1(): + result = yield from f2() + return result + def f2(): + yield 1 + return 2 + g = f1() + assert next(g) == 1 + try: + next(g) + except StopIteration as e: + assert e.value == 2 + else: + assert False, 'Expected StopIteration' + """ + def test_should_not_inline(space): from pypy.interpreter.generator import should_not_inline diff --git a/pypy/interpreter/test/test_module.py b/pypy/interpreter/test/test_module.py --- a/pypy/interpreter/test/test_module.py +++ b/pypy/interpreter/test/test_module.py @@ -68,6 +68,11 @@ m = type(_pypy_interact).__new__(type(_pypy_interact)) assert repr(m).startswith(">>\n' + output + '\n<<<' + assert ' 1 (7)' in output + assert ' 3 (None)' in output + assert ' 16 RETURN_VALUE ' in output diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -779,6 +779,7 @@ __new__ = interp2app(Module.descr_module__new__.im_func), __init__ = interp2app(Module.descr_module__init__), __repr__ = interp2app(Module.descr_module__repr__), + __dir__ = interp2app(Module.descr_module__dir__), __reduce__ = interp2app(Module.descr__reduce__), __dict__ = GetSetProperty(descr_get_dict, cls=Module), # module dictionaries are readonly attributes __doc__ = 'module(name[, doc])\n\nCreate a module object.\nThe name must be a string; the optional doc argument can have any type.' @@ -912,6 +913,7 @@ PyTraceback.typedef = TypeDef("traceback", __reduce__ = interp2app(PyTraceback.descr__reduce__), __setstate__ = interp2app(PyTraceback.descr__setstate__), + __dir__ = interp2app(PyTraceback.descr__dir__), tb_frame = interp_attrproperty('frame', cls=PyTraceback), tb_lasti = interp_attrproperty('lasti', cls=PyTraceback), tb_lineno = GetSetProperty(PyTraceback.descr_tb_lineno), diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -7,8 +7,8 @@ from __pypy__ import lookup_special -def _caller_locals(): - return sys._getframe(0).f_locals +def _caller_locals(): + return sys._getframe(0).f_locals def vars(*obj): """Return a dictionary of all the attributes currently bound in obj. If @@ -18,11 +18,10 @@ return _caller_locals() elif len(obj) != 1: raise TypeError("vars() takes at most 1 argument.") - else: - try: - return obj[0].__dict__ - except AttributeError: - raise TypeError("vars() argument must have __dict__ attribute") + try: + return obj[0].__dict__ + except AttributeError: + raise TypeError("vars() argument must have __dict__ attribute") def dir(*args): """dir([object]) -> list of strings @@ -38,76 +37,16 @@ attributes of its class's base classes. """ if len(args) > 1: - raise TypeError("dir expected at most 1 arguments, got %d" - % len(args)) + raise TypeError("dir expected at most 1 arguments, got %d" % len(args)) if len(args) == 0: - local_names = list(_caller_locals().keys()) # 2 stackframes away - local_names.sort() - return local_names - - import types + return sorted(_caller_locals().keys()) # 2 stackframes away obj = args[0] - - dir_meth = lookup_special(obj, "__dir__") + dir_meth = lookup_special(obj, '__dir__') if dir_meth is not None: - result = dir_meth() - if not isinstance(result, list): - result = list(result) # Will throw TypeError if not iterable - result.sort() - return result - elif isinstance(obj, types.ModuleType): - try: - result = list(obj.__dict__) - result.sort() - return result - except AttributeError: - return [] - - elif isinstance(obj, type): - #Don't look at __class__, as metaclass methods would be confusing. - result = list(_classdir(obj).keys()) - result.sort() - return result - - else: #(regular item) - Dict = {} - try: - if isinstance(obj.__dict__, dict): - Dict.update(obj.__dict__) - except AttributeError: - pass - try: - Dict.update(_classdir(obj.__class__)) - except AttributeError: - pass - result = list(Dict.keys()) - result.sort() - return result - -def _classdir(klass): - """Return a dict of the accessible attributes of class/type klass. - - This includes all attributes of klass and all of the - base classes recursively. - - The values of this dict have no meaning - only the keys have - meaning. - """ - Dict = {} - try: - Dict.update(klass.__dict__) - except AttributeError: pass - try: - # XXX - Use of .__mro__ would be suggested, if the existance - # of that attribute could be guarranted. - bases = klass.__bases__ - except AttributeError: pass - else: - try: - #Note that since we are only interested in the keys, - # the order we merge classes is unimportant - for base in bases: - Dict.update(_classdir(base)) - except TypeError: pass - return Dict + # obscure: lookup_special won't bind None.__dir__! + result = dir_meth(obj) if obj is None else dir_meth() + # Will throw TypeError if not iterable + return sorted(result) + # we should never reach here since object.__dir__ exists + return [] diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -670,8 +670,8 @@ def descr_reduce(self, space): w_map = space.getattr(space.getbuiltinmodule('builtins'), space.wrap('map')) - args = [self.w_fun] + self.iterators_w - return space.newtuple([w_map, space.newtuple(args)]) + args_w = [self.w_fun] + self.iterators_w + return space.newtuple([w_map, space.newtuple(args_w)]) def W_Map___new__(space, w_subtype, w_fun, args_w): @@ -721,9 +721,9 @@ def descr_reduce(self, space): w_filter = space.getattr(space.getbuiltinmodule('builtins'), space.wrap('filter')) - args = [space.w_None if self.no_predicate else self.w_predicate, - self.iterable] - return space.newtuple([w_filter, space.newtuple(args)]) + args_w = [space.w_None if self.no_predicate else self.w_predicate, + self.iterable] + return space.newtuple([w_filter, space.newtuple(args_w)]) def W_Filter___new__(space, w_subtype, w_predicate, w_iterable): diff --git a/pypy/module/__builtin__/test/test_compile.py b/pypy/module/__builtin__/test/test_compile.py --- a/pypy/module/__builtin__/test/test_compile.py +++ b/pypy/module/__builtin__/test/test_compile.py @@ -1,8 +1,7 @@ class AppTestCompile: - # TODO: This test still fails for now because the docstrings are not - # removed with optimize=2. - def untest_compile(self): + def test_compile(self): + """Clone of the part of the original test that was failing.""" import ast codestr = '''def f(): @@ -37,7 +36,7 @@ assert rv == (debugval, docstring) def test_assert_remove(self): - """Test just removal of the asserts with optimize=1.""" + """Test removal of the asserts with optimize=1.""" import ast code = """def f(): @@ -50,9 +49,73 @@ exec(compiled, ns) ns['f']() + def test_docstring_remove(self): + """Test removal of docstrings with optimize=2.""" + import ast + import marshal -# TODO: Remove docstrings with optimize=2. + code = """ +'module_doc' + +def f(): + 'func_doc' + +class C: + 'class_doc' +""" + tree = ast.parse(code) + for to_compile in [code, tree]: + compiled = compile(to_compile, "", "exec", optimize=2) + + ns = {} + exec(compiled, ns) + assert '__doc__' not in ns + assert ns['f'].__doc__ is None + assert ns['C'].__doc__ is None + + # Check that the docstrings are gone from the bytecode and not just + # inaccessible. + marshalled = str(marshal.dumps(compiled)) + assert 'module_doc' not in marshalled + assert 'func_doc' not in marshalled + assert 'class_doc' not in marshalled + + +class TestOptimizeO: + """Test interaction of -O flag and optimize parameter of compile.""" + + def setup_method(self, method): + space = self.space + self._sys_debug = space.sys.debug + # imitate -O + space.sys.debug = False + + def teardown_method(self, method): + self.space.sys.debug = self._sys_debug + + def test_O_optmize_0(self): + """Test that assert is not ignored if -O flag is set but optimize=0.""" + space = self.space + w_res = space.appexec([], """(): + assert False # check that our -O imitation hack works + try: + exec(compile('assert False', '', 'exec', optimize=0)) + except AssertionError: + return True + else: + return False + """) + assert space.unwrap(w_res) + + def test_O_optimize__1(self): + """Test that assert is ignored with -O and optimize=-1.""" + space = self.space + space.appexec([], """(): + exec(compile('assert False', '', 'exec', optimize=-1)) + """) + + # TODO: Check the value of __debug__ inside of the compiled block! # According to the documentation, it should follow the optimize flag. -# TODO: It would also be good to test that with the assert is not removed and -# is executed when -O flag is set but optimize=0. +# However, cpython3.5.0a0 behaves the same way as PyPy (__debug__ follows +# -O, -OO flags of the interpreter). diff --git a/pypy/module/__builtin__/test/test_dir.py b/pypy/module/__builtin__/test/test_dir.py --- a/pypy/module/__builtin__/test/test_dir.py +++ b/pypy/module/__builtin__/test/test_dir.py @@ -24,3 +24,82 @@ def __dir__(self): return 42 raises(TypeError, dir, Foo()) + + def test_dir_traceback(self): + """Test dir() of traceback.""" + try: + raise IndexError + except Exception as e: + tb_dir = dir(e.__traceback__) + assert tb_dir == ['tb_frame', 'tb_lasti', 'tb_lineno', 'tb_next'] + + def test_dir_object_inheritance(self): + """Dir should behave the same regardless of inheriting from object.""" + class A: + pass + + class B(object): + pass + assert dir(A) == dir(B) + + def test_dir_sanity(self): + """Test that dir returns reasonable items.""" + class A(object): + a = 1 + + class B(A): + y = 2 + + b = B() + b.z = 1 + + names = dir(b) + for name in 'ayz': + assert name in names + + assert '__doc__' in names + assert '__module__' in names + assert '__dict__' in names + assert '__dir__' in names + assert '__weakref__' in names + assert '__class__' in names + assert '__format__' in names + # Not an exhaustive list, but will be enough if dir is very broken. + + def test_dir_module(self): + import sys + assert dir(sys) == list(sorted(sys.__dict__)) + + def test_dir_list(self): + """Check that dir([]) has methods from list and from object.""" + names = dir([]) + + dct = {} + dct.update(list.__dict__) + dct.update(object.__dict__) + + assert names == sorted(dct) + + def test_dir_builtins(self): + """Test that builtin objects have sane __dir__().""" + import sys + + for builtin in [sys, object(), [], {}, {1}, "", 1, (), sys, + map(ord, "abc"), filter(None, "abc"), zip([1, 2], [3, 4]), + compile('1', '', 'exec')]: + assert sorted(builtin.__dir__()) == dir(builtin) + + def test_dir_type(self): + """Test .__dir__() and dir(...) behavior on types. + + * t.__dir__() throws a TypeError, + * dir(t) == sorted(t().__dir__()) + + This is the behavior that I observe with cpython3.3.2. + """ + for t in [int, list, tuple, set, str]: + raises(TypeError, t.__dir__) + assert dir(t) == sorted(t().__dir__()) + + def test_dir_none(self): + assert dir(None) == sorted(None.__dir__()) diff --git a/pypy/module/__builtin__/test/test_format.py b/pypy/module/__builtin__/test/test_format.py new file mode 100644 --- /dev/null +++ b/pypy/module/__builtin__/test/test_format.py @@ -0,0 +1,38 @@ +class AppTestFormat: + + def test_format(self): + """Test deprecation warnings from format(object(), 'nonempty')""" + + import warnings + + def test_deprecated(obj, fmt_str, should_raise_warning): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", DeprecationWarning) + format(obj, fmt_str) + if should_raise_warning: + assert len(w) == 1 + assert isinstance(w[0].message, DeprecationWarning) + assert 'object.__format__ with a non-empty format string '\ + in str(w[0].message) + else: + assert len(w) == 0 + + fmt_strs = ['', 's'] + + class A: + def __format__(self, fmt_str): + return format('', fmt_str) + + for fmt_str in fmt_strs: + test_deprecated(A(), fmt_str, False) + + class B: + pass + + class C(object): + pass + + for cls in [object, B, C]: + for fmt_str in fmt_strs: + print(cls, fmt_str) + test_deprecated(cls(), fmt_str, len(fmt_str) != 0) diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -287,7 +287,8 @@ try: w_locale = space.call_method(space.builtin, '__import__', space.wrap('locale')) - w_encoding = space.call_method(w_locale, 'getpreferredencoding') + w_encoding = space.call_method(w_locale, 'getpreferredencoding', + space.w_False) except OperationError as e: # getpreferredencoding() may also raise ImportError if not e.match(space, space.w_ImportError): diff --git a/pypy/module/_lzma/__init__.py b/pypy/module/_lzma/__init__.py new file mode 100644 --- /dev/null +++ b/pypy/module/_lzma/__init__.py @@ -0,0 +1,20 @@ +from pypy.interpreter.mixedmodule import MixedModule + +class Module(MixedModule): + # The private part of the lzma module. + + applevel_name = '_lzma' + + interpleveldefs = { + 'LZMACompressor': 'interp_lzma.W_LZMACompressor', + 'LZMADecompressor': 'interp_lzma.W_LZMADecompressor', + '_encode_filter_properties': 'interp_lzma.encode_filter_properties', + '_decode_filter_properties': 'interp_lzma.decode_filter_properties', + 'FORMAT_AUTO': 'space.wrap(interp_lzma.FORMAT_AUTO)', + 'FORMAT_XZ': 'space.wrap(interp_lzma.FORMAT_XZ)', + 'FORMAT_ALONE': 'space.wrap(interp_lzma.FORMAT_ALONE)', + 'FORMAT_RAW': 'space.wrap(interp_lzma.FORMAT_RAW)', + } + + appleveldefs = { + } diff --git a/pypy/module/_lzma/interp_lzma.py b/pypy/module/_lzma/interp_lzma.py new file mode 100644 --- /dev/null +++ b/pypy/module/_lzma/interp_lzma.py @@ -0,0 +1,360 @@ +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.typedef import ( + TypeDef, interp_attrproperty_bytes, interp_attrproperty) +from pypy.interpreter.error import oefmt +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.module.thread.os_lock import Lock +from rpython.rlib.objectmodel import specialize +from rpython.rlib.rarithmetic import LONGLONG_MASK, r_ulonglong +from rpython.rtyper.tool import rffi_platform as platform +from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import lltype +from rpython.translator.tool.cbuild import ExternalCompilationInfo + + +FORMAT_AUTO, FORMAT_XZ, FORMAT_ALONE, FORMAT_RAW = range(4) +R_LONGLONG_MASK = r_ulonglong(LONGLONG_MASK) + + +eci = ExternalCompilationInfo( + includes = ['lzma.h'], + libraries = ['lzma'], + ) +eci = platform.configure_external_library( + 'lzma', eci, + [dict(prefix='lzma-')]) +if not eci: + raise ImportError("Could not find lzma library") + + +class CConfig: + _compilation_info_ = eci + calling_conv = 'c' + + BUFSIZ = platform.ConstantInteger("BUFSIZ") + + lzma_stream = platform.Struct( + 'lzma_stream', + [('next_in', rffi.CCHARP), + ('avail_in', rffi.UINT), + ('total_in', rffi.UINT), + ('next_out', rffi.CCHARP), + ('avail_out', rffi.UINT), + ('total_out', rffi.UINT), + ]) + + lzma_options_lzma = platform.Struct( + 'lzma_options_lzma', + []) + +constant_names = ''' + LZMA_RUN LZMA_FINISH + LZMA_OK LZMA_GET_CHECK LZMA_NO_CHECK LZMA_STREAM_END + LZMA_PRESET_DEFAULT + LZMA_CHECK_ID_MAX + LZMA_TELL_ANY_CHECK LZMA_TELL_NO_CHECK + '''.split() +for name in constant_names: + setattr(CConfig, name, platform.ConstantInteger(name)) + +class cConfig(object): + pass +for k, v in platform.configure(CConfig).items(): + setattr(cConfig, k, v) + +for name in constant_names: + globals()[name] = getattr(cConfig, name) +lzma_stream = lltype.Ptr(cConfig.lzma_stream) +lzma_options_lzma = lltype.Ptr(cConfig.lzma_options_lzma) +BUFSIZ = cConfig.BUFSIZ +LZMA_CHECK_UNKNOWN = LZMA_CHECK_ID_MAX + 1 + +def external(name, args, result, **kwds): + return rffi.llexternal(name, args, result, compilation_info= + CConfig._compilation_info_, **kwds) + +lzma_ret = rffi.INT +lzma_action = rffi.INT +lzma_bool = rffi.INT + +lzma_lzma_preset = external('lzma_lzma_preset', [lzma_options_lzma, rffi.UINT], lzma_bool) +lzma_alone_encoder = external('lzma_alone_encoder', [lzma_stream, lzma_options_lzma], lzma_ret) +lzma_end = external('lzma_end', [lzma_stream], lltype.Void, releasegil=False) + +lzma_auto_decoder = external('lzma_auto_decoder', [lzma_stream, rffi.LONG, rffi.INT], lzma_ret) +lzma_get_check = external('lzma_get_check', [lzma_stream], rffi.INT) + +lzma_code = external('lzma_code', [lzma_stream, lzma_action], rffi.INT) + + + at specialize.arg(1) +def raise_error(space, fmt, *args): + raise oefmt(space.w_RuntimeError, fmt, *args) + + +def _catch_lzma_error(space, lzret): + if (lzret == LZMA_OK or lzret == LZMA_GET_CHECK or + lzret == LZMA_NO_CHECK or lzret == LZMA_STREAM_END): + return + raise raise_error(space, "Unrecognized error from liblzma: %d", lzret) + + +if BUFSIZ < 8192: + SMALLCHUNK = 8192 +else: + SMALLCHUNK = BUFSIZ +if rffi.sizeof(rffi.INT) > 4: + BIGCHUNK = 512 * 32 +else: + BIGCHUNK = 512 * 1024 + + +def _new_buffer_size(current_size): + # keep doubling until we reach BIGCHUNK; then the buffer size is no + # longer increased + if current_size < BIGCHUNK: + return current_size + current_size + return current_size + + +class OutBuffer(object): + """Handler for the output buffer. A bit custom code trying to + encapsulate the logic of setting up the fields of 'lzs' and + allocating raw memory as needed. + """ + def __init__(self, lzs, initial_size=SMALLCHUNK): + # when the constructor is called, allocate a piece of memory + # of length 'piece_size' and make lzs ready to dump there. + self.temp = [] + self.lzs = lzs + self._allocate_chunk(initial_size) + + def _allocate_chunk(self, size): + self.raw_buf, self.gc_buf = rffi.alloc_buffer(size) + self.current_size = size + self.lzs.c_next_out = self.raw_buf + rffi.setintfield(self.lzs, 'c_avail_out', size) + + def _get_chunk(self, chunksize): + assert 0 <= chunksize <= self.current_size + raw_buf = self.raw_buf + gc_buf = self.gc_buf + s = rffi.str_from_buffer(raw_buf, gc_buf, self.current_size, chunksize) + rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) + self.current_size = 0 + return s + + def prepare_next_chunk(self): + size = self.current_size + self.temp.append(self._get_chunk(size)) + self._allocate_chunk(_new_buffer_size(size)) + + def make_result_string(self): + count_unoccupied = rffi.getintfield(self.lzs, 'c_avail_out') + s = self._get_chunk(self.current_size - count_unoccupied) + if self.temp: + self.temp.append(s) + return ''.join(self.temp) + else: + return s + + def free(self): + if self.current_size > 0: + rffi.keep_buffer_alive_until_here(self.raw_buf, self.gc_buf) + + def __enter__(self): + return self + def __exit__(self, *args): + self.free() + + +class W_LZMACompressor(W_Root): + def __init__(self, space, format): + self.format = format + self.lock = Lock(space) + self.flushed = False + self.lzs = lltype.malloc(lzma_stream.TO, flavor='raw', zero=True) + + def __del__(self): + lzma_end(self.lzs) + lltype.free(self.lzs, flavor='raw') + + def _init_alone(self, space, preset, w_filters): + if space.is_none(w_filters): + with lltype.scoped_alloc(lzma_options_lzma.TO) as options: + if lzma_lzma_preset(options, preset): + raise_error(space, "Invalid compression preset: %d", preset) + lzret = lzma_alone_encoder(self.lzs, options) + else: + raise NotImplementedError + _catch_lzma_error(space, lzret) + + @staticmethod + @unwrap_spec(format=int, + w_check=WrappedDefault(None), From noreply at buildbot.pypy.org Fri Aug 8 02:20:55 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 8 Aug 2014 02:20:55 +0200 (CEST) Subject: [pypy-commit] pypy py3k-qualname: oops, bump the actual magic number that now lives in importlib! Message-ID: <20140808002055.0714C1C332E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k-qualname Changeset: r72716:9f0e2a2c6084 Date: 2014-08-07 17:20 -0700 http://bitbucket.org/pypy/pypy/changeset/9f0e2a2c6084/ Log: oops, bump the actual magic number that now lives in importlib! diff --git a/lib-python/3/importlib/_bootstrap.py b/lib-python/3/importlib/_bootstrap.py --- a/lib-python/3/importlib/_bootstrap.py +++ b/lib-python/3/importlib/_bootstrap.py @@ -405,7 +405,7 @@ due to the addition of new opcodes). """ -_RAW_MAGIC_NUMBER = 3230 | ord('\r') << 16 | ord('\n') << 24 +_RAW_MAGIC_NUMBER = 64 | ord('\r') << 16 | ord('\n') << 24 _MAGIC_BYTES = bytes(_RAW_MAGIC_NUMBER >> n & 0xff for n in range(0, 25, 8)) _PYCACHE = '__pycache__' diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -36,6 +36,7 @@ # different value for the highest 16 bits. Bump pypy_incremental_magic every # time you make pyc files incompatible +# XXX: the canonical magic is now in importlib._bootstrap! pypy_incremental_magic = 64 # bump it by 16 assert pypy_incremental_magic % 16 == 0 assert pypy_incremental_magic < 3000 # the magic number of Python 3. There are From noreply at buildbot.pypy.org Fri Aug 8 08:38:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 8 Aug 2014 08:38:02 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20140808063802.123C91D24F4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r518:816a5cc2d940 Date: 2014-08-08 08:38 +0200 http://bitbucket.org/pypy/pypy.org/changeset/816a5cc2d940/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $51969 of $105000 (49.5%) + $52117 of $105000 (49.6%)
diff --git a/don3.html b/don3.html --- a/don3.html +++ b/don3.html @@ -9,13 +9,13 @@ - $48322 of $60000 (80.5%) + $48360 of $60000 (80.6%)
diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,7 +9,7 @@ @@ -17,7 +17,7 @@ 2nd call: - $12959 of $80000 (16.2%) + $13393 of $80000 (16.7%)
From noreply at buildbot.pypy.org Fri Aug 8 09:22:39 2014 From: noreply at buildbot.pypy.org (waedt) Date: Fri, 8 Aug 2014 09:22:39 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Try to fix unicode.join([]). It passed locally but failed on the buildbot Message-ID: <20140808072239.E760A1C0157@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72717:e941f1e74248 Date: 2014-08-05 14:11 -0500 http://bitbucket.org/pypy/pypy/changeset/e941f1e74248/ Log: Try to fix unicode.join([]). It passed locally but failed on the buildbot diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py --- a/rpython/rtyper/rstr.py +++ b/rpython/rtyper/rstr.py @@ -235,7 +235,8 @@ else: if r_lst.item_repr == rstr.repr: llfn = self.ll.ll_join - elif r_lst.item_repr == char_repr: + elif (r_lst.item_repr == char_repr or + r_lst.item_repr == unichar_repr): llfn = self.ll.ll_join_chars_with_str else: raise TyperError("sep.join() of non-string list: %r" % r_lst) From noreply at buildbot.pypy.org Fri Aug 8 09:22:41 2014 From: noreply at buildbot.pypy.org (waedt) Date: Fri, 8 Aug 2014 09:22:41 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Some non-ascii Utf8Str's were being marked as ascii Message-ID: <20140808072241.2A0C41C0157@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72718:f324ba27ece1 Date: 2014-08-06 21:10 -0500 http://bitbucket.org/pypy/pypy/changeset/f324ba27ece1/ Log: Some non-ascii Utf8Str's were being marked as ascii diff --git a/pypy/interpreter/test/test_utf8_codecs.py b/pypy/interpreter/test/test_utf8_codecs.py --- a/pypy/interpreter/test/test_utf8_codecs.py +++ b/pypy/interpreter/test/test_utf8_codecs.py @@ -746,6 +746,12 @@ u = Utf8Str.from_unicode(unicode(s, 'raw-unicode-escape')) assert decoder(s, len(s), 'strict')[0] == u + def test_decode_unicode_escape(self): + decoder = self.getdecoder('unicode-escape') + s = '\\\xff' + u = Utf8Str.from_unicode(unicode(s, 'unicode-escape')) + assert decoder(s, len(s), 'strict')[0] == u + class TestTranslation(object): def test_utf8(self): diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -478,7 +478,7 @@ for s in other: if not s._is_ascii: is_ascii = False - break + break return Utf8Str(self.bytes.join([s.bytes for s in other]), is_ascii) else: assert isinstance(other[0], str) @@ -678,6 +678,8 @@ elif isinstance(s, Utf8Str): self._builder.append_slice(s.bytes, s.index_of_char(start), s.index_of_char(end)) + if not s._is_ascii: + self._is_ascii = False else: raise TypeError("Invalid type '%s' for Utf8Str.append_slice" % type(s)) diff --git a/pypy/interpreter/utf8_codecs.py b/pypy/interpreter/utf8_codecs.py --- a/pypy/interpreter/utf8_codecs.py +++ b/pypy/interpreter/utf8_codecs.py @@ -134,7 +134,7 @@ builder.append(res) else: builder.append('\\') - builder.append(ch) + builder.append(ord(ch)) return builder.build(), pos From noreply at buildbot.pypy.org Fri Aug 8 09:22:42 2014 From: noreply at buildbot.pypy.org (waedt) Date: Fri, 8 Aug 2014 09:22:42 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Refactor Utf8Builder API some; don't allow .append() Message-ID: <20140808072242.6D6A21C0157@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72719:b5e27ed82427 Date: 2014-08-08 02:12 -0500 http://bitbucket.org/pypy/pypy/changeset/b5e27ed82427/ Log: Refactor Utf8Builder API some; don't allow .append() diff --git a/pypy/interpreter/test/test_utf8.py b/pypy/interpreter/test/test_utf8.py --- a/pypy/interpreter/test/test_utf8.py +++ b/pypy/interpreter/test/test_utf8.py @@ -9,10 +9,10 @@ def build_utf8str(): builder = Utf8Builder() - builder.append('A') #0x41 - builder.append(0x10F) #0xC4 0x8F - builder.append(0x20AC) #0xE2 0x82 0xAC - builder.append(0x1F63D) #0xF0 0x9F 0x98 0xBD + builder.append_ascii('A') #0x41 + builder.append_codepoint(0x10F) #0xC4 0x8F + builder.append_codepoint(0x20AC) #0xE2 0x82 0xAC + builder.append_codepoint(0x1F63D) #0xF0 0x9F 0x98 0xBD return builder.build() def test_builder(): @@ -88,7 +88,7 @@ def test_unicode_literal_comparison(): builder = Utf8Builder() - builder.append(0x10F) + builder.append_codepoint(0x10F) s = builder.build() assert s == u'\u010F' assert s[0] == u'\u010F' diff --git a/pypy/interpreter/test/test_utf8_codecs.py b/pypy/interpreter/test/test_utf8_codecs.py --- a/pypy/interpreter/test/test_utf8_codecs.py +++ b/pypy/interpreter/test/test_utf8_codecs.py @@ -58,7 +58,7 @@ assert t is s assert start == startingpos assert stop == endingpos - return "42424242", None, stop + return Utf8Str("42424242"), None, stop encoder = self.getencoder(encoding) result = encoder(s, len(s), "foo!", errorhandler) assert called[0] @@ -85,8 +85,8 @@ assert stop == endingpos if msg is not None: assert errmsg == msg - return "42424242", stop - return "", endingpos + return Utf8Str("42424242"), stop + return Utf8Str(""), endingpos decoder = self.getdecoder(encoding) if addstuff: s += "some rest in ascii" diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -21,7 +21,7 @@ # Like unichr, but returns a Utf8Str object # TODO: Do this without the builder so its faster b = Utf8Builder() - b.append(value) + b.append_codepoint(value) return b.build() def utf8ord_bytes(bytes, start): @@ -130,6 +130,13 @@ else: self._len = len(data) + if not we_are_translated(): + self.bytes.decode('utf8') + + if self._is_ascii: + for i in self.bytes: + assert ord(i) < 128 + def _calc_length(self): pos = 0 length = 0 @@ -559,15 +566,15 @@ i += 1 c2 = intmask(array[i]) if c2 == 0: - builder.append(c) + builder.append_codepoint(c) break elif not (0xDC00 <= c2 <= 0xDFFF): - builder.append(c) + builder.append_codepoint(c) c = c2 else: c = (((c & 0x3FF)<<10) | (c2 & 0x3FF)) + 0x10000; - builder.append(c) + builder.append_codepoint(c) i += 1 return builder.build() @@ -587,15 +594,15 @@ i += 1 c2 = intmask(array[i]) if c2 == 0: - builder.append(c) + builder.append_codepoint(c) break elif not (0xDC00 <= c2 <= 0xDFFF): - builder.append(c) + builder.append_codepoint(c) c = c2 else: c = (((c & 0x3FF)<<10) | (c2 & 0x3FF)) + 0x10000; - builder.append(c) + builder.append_codepoint(c) i += 1 return builder.build() @@ -613,12 +620,12 @@ i += 1 c2 = intmask(array[i]) if not (0xDC00 <= c2 <= 0xDFFF): - builder.append(c) + builder.append_codepoint(c) c = c2 else: c = (((c & 0x3FF)<<10) | (c2 & 0x3FF)) + 0x10000; - builder.append(c) + builder.append_codepoint(c) i += 1 return builder.build() @@ -634,42 +641,54 @@ self._length = 0 + def append_codepoint(self, c): + if c < 0x80: + self._builder.append(chr(c)) + elif c < 0x800: + self._builder.append(chr(0xC0 | (c >> 6))) + self._builder.append(chr(0x80 | (c & 0x3F))) + self._is_ascii = False + elif c < 0x10000: + self._builder.append(chr(0xE0 | (c >> 12))) + self._builder.append(chr(0x80 | (c >> 6 & 0x3F))) + self._builder.append(chr(0x80 | (c & 0x3F))) + self._is_ascii = False + elif c <= 0x10FFFF: + self._builder.append(chr(0xF0 | (c >> 18))) + self._builder.append(chr(0x80 | (c >> 12 & 0x3F))) + self._builder.append(chr(0x80 | (c >> 6 & 0x3F))) + self._builder.append(chr(0x80 | (c & 0x3F))) + self._is_ascii = False + else: + raise ValueError("Invalid unicode codepoint > 0x10FFFF.") + self._length += 1 + + def append_ascii(self, str): + if not we_are_translated(): + # XXX For testing purposes, make sure this is actually ascii + for i in str: + assert ord(i) < 128 + + self._builder.append(str) + self._length += len(str) + + def append_utf8(self, ustr): + self._builder.append(ustr.bytes) + if not ustr._is_ascii: + self._is_ascii = False + self._length += len(ustr) + + def _append_bytes(self, bytes, is_ascii=False): + # XXX This breaks getlength() + self._builder.append(bytes) + self._is_ascii = self._is_ascii and is_ascii + @specialize.argtype(1) def append(self, c): if isinstance(c, Utf8Str): - self._builder.append(c.bytes) - if not c._is_ascii: - self._is_ascii = False - self._length += len(c) - - elif isinstance(c, int): - if c < 0x80: - self._builder.append(chr(c)) - elif c < 0x800: - self._builder.append(chr(0xC0 | (c >> 6))) - self._builder.append(chr(0x80 | (c & 0x3F))) - self._is_ascii = False - elif c < 0x10000: - self._builder.append(chr(0xE0 | (c >> 12))) - self._builder.append(chr(0x80 | (c >> 6 & 0x3F))) - self._builder.append(chr(0x80 | (c & 0x3F))) - self._is_ascii = False - elif c <= 0x10FFFF: - self._builder.append(chr(0xF0 | (c >> 18))) - self._builder.append(chr(0x80 | (c >> 12 & 0x3F))) - self._builder.append(chr(0x80 | (c >> 6 & 0x3F))) - self._builder.append(chr(0x80 | (c & 0x3F))) - self._is_ascii = False - else: - raise ValueError("Invalid unicode codepoint > 0x10FFFF.") - self._length += 1 + self.append_utf8(c) else: - assert isinstance(c, str) - self._builder.append(c) - - # XXX The assumption here is that the bytes being appended are - # ASCII, ie 1:1 byte:char - self._length += len(c) + self.append_ascii(c) @specialize.argtype(1) def append_slice(self, s, start, end): @@ -685,18 +704,12 @@ type(s)) self._length += end - start - @specialize.argtype(1) def append_multiple_char(self, c, count): # TODO: What do I do when I have an int? Is it fine to just loop over # .append(c) then? Should (can) I force a resize first? - if isinstance(c, int): - self._builder.append_multiple_char(chr(c), count) - return - - if isinstance(c, str): - self._builder.append_multiple_char(c, count) - else: - self._builder.append_multiple_char(c.bytes, count) + if ord(c) > 127: + raise ValueError("an ascii char is required") + self._builder.append_multiple_char(c, count) self._length += count def getlength(self): @@ -705,6 +718,7 @@ def build(self): return Utf8Str(self._builder.build(), self._is_ascii) + class WCharContextManager(object): def __init__(self, str): self.str = str diff --git a/pypy/interpreter/utf8_codecs.py b/pypy/interpreter/utf8_codecs.py --- a/pypy/interpreter/utf8_codecs.py +++ b/pypy/interpreter/utf8_codecs.py @@ -34,7 +34,7 @@ # Non-escape characters are interpreted as Unicode ordinals if ch != '\\': - builder.append(ord(ch)) + builder.append_codepoint(ord(ch)) pos += 1 continue @@ -44,23 +44,23 @@ message = "\\ at end of string" res, pos = errorhandler(errors, "unicodeescape", message, s, pos-1, size) - builder.append(res) + builder.append_utf8(res) continue ch = s[pos] pos += 1 # \x escapes if ch == '\n': pass - elif ch == '\\': builder.append('\\') - elif ch == '\'': builder.append('\'') - elif ch == '\"': builder.append('\"') - elif ch == 'b' : builder.append('\b') - elif ch == 'f' : builder.append('\f') - elif ch == 't' : builder.append('\t') - elif ch == 'n' : builder.append('\n') - elif ch == 'r' : builder.append('\r') - elif ch == 'v' : builder.append('\v') - elif ch == 'a' : builder.append('\a') + elif ch == '\\': builder.append_ascii('\\') + elif ch == '\'': builder.append_ascii('\'') + elif ch == '\"': builder.append_ascii('\"') + elif ch == 'b' : builder.append_ascii('\b') + elif ch == 'f' : builder.append_ascii('\f') + elif ch == 't' : builder.append_ascii('\t') + elif ch == 'n' : builder.append_ascii('\n') + elif ch == 'r' : builder.append_ascii('\r') + elif ch == 'v' : builder.append_ascii('\v') + elif ch == 'a' : builder.append_ascii('\a') elif '0' <= ch <= '7': x = ord(ch) - ord('0') if pos < size: @@ -73,7 +73,7 @@ if '0' <= ch <= '7': pos += 1 x = (x<<3) + ord(ch) - ord('0') - builder.append(x) + builder.append_codepoint(x) # hex escapes # \xXX elif ch == 'x': @@ -105,7 +105,7 @@ "(can't load unicodedata module)") res, pos = errorhandler(errors, "unicodeescape", message, s, pos-1, size) - builder.append(res) + builder.append_utf8(res) continue if look < size and s[look] == '{': @@ -120,21 +120,21 @@ if code < 0: res, pos = errorhandler(errors, "unicodeescape", message, s, pos-1, look+1) - builder.append(res) + builder.append_utf8(res) continue pos = look + 1 - builder.append(code) + builder.append_codepoint(code) else: res, pos = errorhandler(errors, "unicodeescape", message, s, pos-1, look+1) - builder.append(res) + builder.append_utf8(res) else: res, pos = errorhandler(errors, "unicodeescape", message, s, pos-1, look+1) - builder.append(res) + builder.append_utf8(res) else: - builder.append('\\') - builder.append(ord(ch)) + builder.append_ascii('\\') + builder.append_codepoint(ord(ch)) return builder.build(), pos @@ -149,7 +149,7 @@ endinpos += 1 res, pos = errorhandler(errors, encoding, message, s, pos-2, endinpos) - builder.append(res) + builder.append_utf8(res) else: try: chr = r_uint(int(s[pos:pos+digits], 16)) @@ -159,18 +159,18 @@ endinpos += 1 res, pos = errorhandler(errors, encoding, message, s, pos-2, endinpos) - builder.append(res) + builder.append_utf8(res) else: # when we get here, chr is a 32-bit unicode character if chr <= MAXUNICODE: - builder.append(intmask(chr)) + builder.append_codepoint(intmask(chr)) pos += digits else: message = "illegal Unicode character" res, pos = errorhandler(errors, encoding, message, s, pos-2, pos+digits) - builder.append(res) + builder.append_utf8(res) return pos def make_unicode_escape_function(pass_printable=False, unicode_output=False, @@ -288,7 +288,7 @@ # Non-escape characters are interpreted as Unicode ordinals if ch != '\\': - result.append(ord(ch)) + result.append_codepoint(ord(ch)) pos += 1 continue @@ -299,18 +299,18 @@ pos += 1 if pos == size or s[pos] != '\\': break - result.append('\\') + result.append_ascii('\\') # we have a backslash at the end of the string, stop here if pos >= size: - result.append('\\') + result.append_ascii('\\') break if ((pos - bs) & 1 == 0 or pos >= size or (s[pos] != 'u' and s[pos] != 'U')): - result.append('\\') - result.append(ord(s[pos])) + result.append_ascii('\\') + result.append_codepoint(ord(s[pos])) pos += 1 continue @@ -350,7 +350,7 @@ pos = 0 result = Utf8Builder(size) while pos < size: - result.append(ord(s[pos])) + result.append_codepoint(ord(s[pos])) pos += 1 return result.build(), pos @@ -370,12 +370,12 @@ while pos < size: c = s[pos] if ord(c) < 128: - result.append(c) + result.append_ascii(c) pos += 1 else: r, pos = errorhandler(errors, "ascii", "ordinal not in range(128)", s, pos, pos + 1) - result.append(r) + result.append_utf8(r) return result.build(), pos @@ -416,8 +416,9 @@ # py3k only result.append(rs) continue + for ch in ru: - cd = utf8.ORD(ch, 0) + cd = utf8ord(ch, 0) if cd < limit: result.append(chr(cd)) else: @@ -470,9 +471,10 @@ if (iter.pos != len(s) and oc <= 0xDBFF and 0xDC00 <= iter.peek_next() <= 0xDFFF): oc2 = iter.next() - result.append(((oc - 0xD800) << 10 | (oc2 - 0xDC00)) + 0x10000) + result.append_codepoint( + ((oc - 0xD800) << 10 | (oc2 - 0xDC00)) + 0x10000) elif allow_surrogates: - result.append(oc) + result.append_codepoint(oc) else: ru, rs, pos = errorhandler(errors, 'utf8', 'surrogates not allowed', s, @@ -480,17 +482,17 @@ iter.move(pos - iter.pos) if rs is not None: # py3k only - result.append(rs) + result.append_utf8(rs) continue for ch in ru: if ord(ch) < 0x80: - result.append(ch) + result.append_ascii(ch) else: errorhandler('strict', 'utf8', 'surrogates not allowed', s, pos-1, pos) else: - result.append(oc) + result.append_codepoint(oc) return result.build().bytes @@ -516,7 +518,7 @@ # fast path for ASCII # XXX maybe use a while loop here if ordch1 < 0x80: - result.append(ordch1) + result.append_codepoint(ordch1) pos += 1 continue @@ -532,7 +534,7 @@ r, pos = errorhandler(errors, 'utf8', 'unexpected end of data', s, pos, pos+1) - result.append(r) + result.append_utf8(r) break ordch2 = ord(s[pos+1]) if n == 3: @@ -544,14 +546,14 @@ r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) - result.append(r) + result.append_utf8(r) continue else: # second byte valid, but third byte missing r, pos = errorhandler(errors, 'utf8', 'unexpected end of data', s, pos, pos+2) - result.append(r) + result.append_utf8(r) break elif n == 4: # 4-bytes seq with 1 or 2 continuation bytes @@ -562,28 +564,28 @@ r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) - result.append(r) + result.append_utf8(r) continue elif charsleft == 2 and ord(s[pos+2])>>6 != 0x2: # 0b10 # third byte invalid, take the first two and continue r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+2) - result.append(r) + result.append_utf8(r) continue else: # there's only 1 or 2 valid cb, but the others are missing r, pos = errorhandler(errors, 'utf8', 'unexpected end of data', s, pos, pos+charsleft+1) - result.append(r) + result.append_utf8(r) break if n == 0: r, pos = errorhandler(errors, 'utf8', 'invalid start byte', s, pos, pos+1) - result.append(r) + result.append_utf8(r) elif n == 1: assert 0, "ascii should have gone through the fast path" @@ -594,11 +596,11 @@ r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) - result.append(r) + result.append_utf8(r) continue # 110yyyyy 10zzzzzz -> 00000000 00000yyy yyzzzzzz - result.append(((ordch1 & 0x1F) << 6) + # 0b00011111 - (ordch2 & 0x3F)) # 0b00111111 + result.append_codepoint(((ordch1 & 0x1F) << 6) + # 0b00011111 + (ordch2 & 0x3F)) # 0b00111111 pos += 2 elif n == 3: @@ -612,18 +614,18 @@ r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) - result.append(r) + result.append_utf8(r) continue elif ordch3>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+2) - result.append(r) + result.append_utf8(r) continue # 1110xxxx 10yyyyyy 10zzzzzz -> 00000000 xxxxyyyy yyzzzzzz - result.append((((ordch1 & 0x0F) << 12) + # 0b00001111 - ((ordch2 & 0x3F) << 6) + # 0b00111111 - (ordch3 & 0x3F))) # 0b00111111 + result.append_codepoint((((ordch1 & 0x0F) << 12) + # 0b00001111 + ((ordch2 & 0x3F) << 6) + # 0b00111111 + (ordch3 & 0x3F))) # 0b00111111 pos += 3 elif n == 4: @@ -636,19 +638,19 @@ r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+1) - result.append(r) + result.append_utf8(r) continue elif ordch3>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+2) - result.append(r) + result.append_utf8(r) continue elif ordch4>>6 != 0x2: # 0b10 r, pos = errorhandler(errors, 'utf8', 'invalid continuation byte', s, pos, pos+3) - result.append(r) + result.append_utf8(r) continue # 11110www 10xxxxxx 10yyyyyy 10zzzzzz -> 000wwwxx xxxxyyyy yyzzzzzz c = (((ordch1 & 0x07) << 18) + # 0b00000111 @@ -659,7 +661,7 @@ # TODO: Why doesn't this raise an error when c > MAXUNICODE? If I'm # converting utf8 -> utf8 is this necessary if c <= MAXUNICODE: - result.append(c) + result.append_codepoint(c) pos += 4 return pos @@ -748,13 +750,13 @@ break r, pos = errorhandler(errors, 'utf16', "truncated data", s, pos, len(s)) - result.append(r) + result.append_utf8(r) if len(s) - pos < 2: break ch = (ord(s[pos + ihi]) << 8) | ord(s[pos + ilo]) pos += 2 if ch < 0xD800 or ch > 0xDFFF: - result.append(ch) + result.append_codepoint(ch) continue # UTF-16 code pair: if len(s) - pos < 2: @@ -763,26 +765,26 @@ break errmsg = "unexpected end of data" r, pos = errorhandler(errors, 'utf16', errmsg, s, pos, len(s)) - result.append(r) + result.append_utf8(r) if len(s) - pos < 2: break elif 0xD800 <= ch <= 0xDBFF: ch2 = (ord(s[pos+ihi]) << 8) | ord(s[pos+ilo]) pos += 2 if 0xDC00 <= ch2 <= 0xDFFF: - result.append((((ch & 0x3FF)<<10) | - (ch2 & 0x3FF)) + 0x10000) + result.append_codepoint((((ch & 0x3FF)<<10) | + (ch2 & 0x3FF)) + 0x10000) continue else: r, pos = errorhandler(errors, 'utf16', "illegal UTF-16 surrogate", s, pos - 4, pos - 2) - result.append(r) + result.append_utf8(r) else: r, pos = errorhandler(errors, 'utf16', "illegal encoding", s, pos - 2, pos) - result.append(r) + result.append_utf8(r) return result.build(), pos, bo def create_surrogate_pair(val): @@ -930,7 +932,7 @@ break r, pos = errorhandler(errors, encodingname, "truncated data", s, pos, len(s)) - result.append(r) + result.append_utf8(r) if len(s) - pos < 4: break continue @@ -940,10 +942,10 @@ r, pos = errorhandler(errors, encodingname, "codepoint not in range(0x110000)", s, pos, len(s)) - result.append(r) + result.append_utf8(r) continue - result.append(ch) + result.append_codepoint(ch) pos += 4 return result.build(), pos, bo @@ -1131,19 +1133,19 @@ if surrogate: # expecting a second surrogate if outCh >= 0xDC00 and outCh <= 0xDFFFF: - result.append((((surrogate & 0x3FF)<<10) | - (outCh & 0x3FF)) + 0x10000) + result.append_codepoint((((surrogate & 0x3FF)<<10) | + (outCh & 0x3FF)) + 0x10000) surrogate = 0 continue else: - result.append(surrogate) + result.append_codepoint(surrogate) surrogate = 0 # Not done with outCh: falls back to next line if outCh >= 0xD800 and outCh <= 0xDBFF: # first surrogate surrogate = outCh else: - result.append(outCh) + result.append_codepoint(outCh) else: # now leaving a base-64 section @@ -1151,7 +1153,7 @@ pos += 1 if surrogate: - result.append(surrogate) + result.append_codepoint(surrogate) surrogate = 0 if base64bits > 0: # left-over bits @@ -1160,7 +1162,7 @@ msg = "partial character in shift sequence" res, pos = errorhandler(errors, 'utf7', msg, s, pos-1, pos) - result.append(res) + result.append_utf8(res) continue else: # Some bits remain; they should be zero @@ -1168,7 +1170,7 @@ msg = "non-zero padding bits in shift sequence" res, pos = errorhandler(errors, 'utf7', msg, s, pos-1, pos) - result.append(res) + result.append_utf8(res) continue if ch == '-': @@ -1178,13 +1180,13 @@ base64buffer = 0 surrogate = 0 else: - result.append(ch) + result.append_codepoint(oc) elif ch == '+': pos += 1 # consume '+' if pos < size and s[pos] == '-': # '+-' encodes '+' pos += 1 - result.append('+') + result.append_ascii('+') else: # begin base64-encoded section inShift = 1 shiftOutStartPos = pos - 1 @@ -1192,13 +1194,13 @@ base64buffer = 0 elif _utf7_DECODE_DIRECT(oc): # character decodes at itself - result.append(chr(oc)) + result.append_codepoint(oc) pos += 1 else: pos += 1 msg = "unexpected special character" res, pos = errorhandler(errors, 'utf7', msg, s, pos-1, pos) - result.append(res) + result.append_utf8(res) # end of string @@ -1209,7 +1211,7 @@ (base64bits > 0 and base64buffer != 0)): msg = "unterminated shift sequence" res, pos = errorhandler(errors, 'utf7', msg, s, shiftOutStartPos, pos) - result.append(res) + result.append_utf8(res) elif inShift: pos = shiftOutStartPos # back off output @@ -1298,9 +1300,9 @@ r, pos = errorhandler(errors, "charmap", "character maps to ", s, pos, pos + 1) - result.append(r) + result.append_utf8(r) continue - result.append(c) + result.append_utf8(c) pos += 1 return result.build(), pos diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -231,9 +231,9 @@ pos = start while pos < end: code = utf8ord(obj, pos) - builder.append("&#") - builder.append(str(code)) - builder.append(";") + builder.append_ascii("&#") + builder.append_ascii(str(code)) + builder.append_ascii(";") pos += 1 return space.newtuple([space.wrap(builder.build()), w_end]) else: @@ -254,13 +254,13 @@ oc = utf8ord(obj, pos) num = hex(oc) if (oc >= 0x10000): - builder.append("\\U") + builder.append_ascii("\\U") zeros = 8 elif (oc >= 0x100): - builder.append("\\u") + builder.append_ascii("\\u") zeros = 4 else: - builder.append("\\x") + builder.append_ascii("\\x") zeros = 2 lnum = len(num) nb = zeros + 2 - lnum # num starts with '0x' diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -141,9 +141,9 @@ i += 1 else: seennl |= SEEN_CR - builder.append('\n') + builder.append_ascii('\n') continue - builder.append(c) + builder.append_codepoint(c) output = builder.build() self.seennl |= seennl @@ -614,7 +614,7 @@ # Keep reading chunks until we have n characters to return while True: data = self._get_decoded_chars(remaining) - builder.append(data) + builder.append_utf8(data) remaining -= len(data) if remaining <= 0: # Done diff --git a/pypy/objspace/std/formatting.py b/pypy/objspace/std/formatting.py --- a/pypy/objspace/std/formatting.py +++ b/pypy/objspace/std/formatting.py @@ -418,8 +418,8 @@ result.append_multiple_char(' ', padding) # pad with spaces on the left if sign: - # TODO: Why r[0]? result.append(r[0]) # the sign + # prefix is is only every '' or '0x', ie always ascii result.append(prefix) # the prefix if padnumber == '0': result.append_multiple_char('0', padding) diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -7,7 +7,7 @@ from pypy.interpreter.utf8 import Utf8Str, Utf8Builder, ORD, utf8chr from pypy.interpreter.utf8_codecs import ( unicode_encode_latin_1, unicode_encode_ascii, str_decode_ascii) -from rpython.rlib import rstring, runicode, rlocale, rfloat, jit +from rpython.rlib import rstring, rlocale, rfloat, jit from rpython.rlib.objectmodel import specialize from rpython.rlib.rfloat import copysign, formatd @@ -698,6 +698,7 @@ need_separator = False done = False previous = 0 + while True: group = ord(grouping[grouping_state]) if group > 0: @@ -750,9 +751,7 @@ if spec.n_sign: if self.is_unicode: - # TODO: A better way to do this might be to check if - # spec.sign < 127 ... - sign = str_decode_ascii(chr(spec.sign), 1, 'strict')[0] + sign = str_decode_ascii(chr(spec.sign), 1, 'strict')[0] else: sign = chr(spec.sign) out.append(sign) From noreply at buildbot.pypy.org Fri Aug 8 17:54:35 2014 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 8 Aug 2014 17:54:35 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk emscripten: more changes for emscripten Message-ID: <20140808155435.2239F1C0157@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: emscripten Changeset: r1032:2a78caaf3e0f Date: 2014-08-08 11:48 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/2a78caaf3e0f/ Log: more changes for emscripten diff --git a/spyvm/display.py b/spyvm/display.py --- a/spyvm/display.py +++ b/spyvm/display.py @@ -47,7 +47,7 @@ assert RSDL.Init(RSDL.INIT_VIDEO) >= 0 RSDL.WM_SetCaption(title, "RSqueakVM") RSDL.EnableUNICODE(1) - SDLCursor.has_display = True + # SDLCursor.has_display = True self.has_surface = False self.mouse_position = [0, 0] self.interrupt_key = 15 << 8 # pushing all four meta keys, of which we support three... @@ -270,45 +270,45 @@ self.interrupt_key = encoded_key -class SDLCursorClass(object): - _attrs_ = ["cursor", "has_cursor", "has_display"] +# class SDLCursorClass(object): +# _attrs_ = ["cursor", "has_cursor", "has_display"] - instance = None +# instance = None - def __init__(self): - self.cursor = lltype.nullptr(RSDL.CursorPtr.TO) - self.has_cursor = False - self.has_display = False +# def __init__(self): +# self.cursor = lltype.nullptr(RSDL.CursorPtr.TO) +# self.has_cursor = False +# self.has_display = False - def set(self, data_words, w, h, x, y, mask_words=None): - if not self.has_display: - return - if self.has_cursor: - RSDL.FreeCursor(self.cursor) - data = self.words_to_bytes(len(data_words) * 4, data_words) - try: - mask = self.words_to_bytes(len(data_words) * 4, mask_words) - try: - self.cursor = RSDL.CreateCursor(data, mask, w * 2, h, x, y) - self.has_cursor = True - RSDL.SetCursor(self.cursor) - finally: - lltype.free(mask, flavor="raw") - finally: - lltype.free(data, flavor="raw") +# def set(self, data_words, w, h, x, y, mask_words=None): +# if not self.has_display: +# return +# if self.has_cursor: +# RSDL.FreeCursor(self.cursor) +# data = self.words_to_bytes(len(data_words) * 4, data_words) +# try: +# mask = self.words_to_bytes(len(data_words) * 4, mask_words) +# try: +# self.cursor = RSDL.CreateCursor(data, mask, w * 2, h, x, y) +# self.has_cursor = True +# RSDL.SetCursor(self.cursor) +# finally: +# lltype.free(mask, flavor="raw") +# finally: +# lltype.free(data, flavor="raw") - def words_to_bytes(self, bytenum, words): - bytes = lltype.malloc(RSDL.Uint8P.TO, bytenum, flavor="raw") - if words: - for pos in range(bytenum / 4): - word = words[pos] - bytes[pos * 4] = rffi.r_uchar((word >> 24) & 0xff) - bytes[pos * 4 + 1] = rffi.r_uchar((word >> 16) & 0xff) - bytes[pos * 4 + 2] = rffi.r_uchar((word >> 8) & 0xff) - bytes[pos * 4 + 3] = rffi.r_uchar(word & 0xff) - else: - for idx in range(bytenum): - bytes[idx] = rffi.r_uchar(0) - return bytes +# def words_to_bytes(self, bytenum, words): +# bytes = lltype.malloc(RSDL.Uint8P.TO, bytenum, flavor="raw") +# if words: +# for pos in range(bytenum / 4): +# word = words[pos] +# bytes[pos * 4] = rffi.r_uchar((word >> 24) & 0xff) +# bytes[pos * 4 + 1] = rffi.r_uchar((word >> 16) & 0xff) +# bytes[pos * 4 + 2] = rffi.r_uchar((word >> 8) & 0xff) +# bytes[pos * 4 + 3] = rffi.r_uchar(word & 0xff) +# else: +# for idx in range(bytenum): +# bytes[idx] = rffi.r_uchar(0) +# return bytes -SDLCursor = SDLCursorClass() +# SDLCursor = SDLCursorClass() diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -3,12 +3,14 @@ import os from rpython.rlib.streamio import open_file_as_stream -from rpython.rlib import jit, rpath, objectmodel +from rpython.rlib import jit, rpath, objectmodel, rgc +from rpython.rlib.entrypoint import entrypoint +from rpython.rtyper.lltypesystem import rffi, llmemory, lltype from spyvm import model, interpreter, squeakimage, objspace, wrapper,\ error, shadow, storage_logger, constants from spyvm.tool.analyseimage import create_image -from spyvm.interpreter_proxy import VirtualMachine +# from spyvm.interpreter_proxy import VirtualMachine def _usage(argv): print """ @@ -200,6 +202,61 @@ storage_logger.print_aggregated_log() return 0 + + at entrypoint('main', [rffi.INT], c_name='load_image') +def load_image(bitflags): + trace = (bitflags & 0b1) != 0 + trace_important = (bitflags & 0b10) != 0 + safe_trace = (bitflags & 0b100) != 0 + + space = prebuilt_space + if safe_trace: + space.omit_printing_raw_bytes.activate() + path = rpath.rabspath("/Squeak.image") + try: + f = open_file_as_stream(path, mode="rb", buffering=0) + try: + imagedata = f.readall() + finally: + f.close() + except OSError as e: + print_error("%s -- %s (LoadError)" % (os.strerror(e.errno), path)) + return 1 + # Load & prepare image and environment + image_reader = squeakimage.reader_for_image(space, squeakimage.Stream(data=imagedata)) + image = create_image(space, image_reader) + interp = interpreter.Interpreter(space, image, + trace=trace, trace_important=trace_important, + evented=False, is_async=True) + space.runtime_setup("./rsqueak.vm.js", path) + print_error("") # Line break after image-loading characters + return rffi.cast(rffi.INT, interp) + + at entrypoint('main', [rffi.INT], c_name='load_active_context') +def load_active_context(i_interp): + gcref = rffi.cast(llmemory.GCREF, i_interp) + interp = rgc.try_cast_gcref_to_instance(interpreter.Interpreter, gcref) + space = interp.space + context = active_context(space) + return rffi.cast(rffi.INT, context.w_self()) + + at entrypoint('main', [rffi.INT, rffi.INT], c_name='execute_async') +def execute_async(i_interp, i_w_frame): + i_gcref = rffi.cast(llmemory.GCREF, i_interp) + interp = rgc.try_cast_gcref_to_instance(interpreter.Interpreter, i_gcref) + f_gcref = rffi.cast(llmemory.GCREF, i_w_frame) + w_frame = rgc.try_cast_gcref_to_instance(model.W_PointersObject, f_gcref) + try: + w_context = interp.loop(w_frame) + print result_string(w_context) + # print w_context.as_context_get_shadow(interp.space).print_stack() + return rffi.cast(rffi.INT, w_context) + except interpreter.ReturnFromTopLevel, e: + print "returned from toplevel" + print result_string(e.object) + return 0 + + def result_string(w_result): # This will also print contents of strings/symbols/numbers if not w_result: @@ -279,7 +336,17 @@ if hasattr(rgc, "stm_is_enabled"): driver.config.translation.stm = True driver.config.translation.thread = True + driver.exe_name = "rsqueak" + + from rpython.translator import platform + if hasattr(platform, "emscripten_platform"): + # platform.emscripten_platform.EmscriptenPlatform.exe_ext = "js" + platform.emscripten_platform.EmscriptenPlatform.link_flags += [ + "--embed-file", "/home/tim/Dev/lang-smalltalk/images/minibluebookdebug.image@/Squeak.image" + ] + driver.exe_name = "rsqueak.js" + return safe_entry_point, None def jitpolicy(self): From noreply at buildbot.pypy.org Fri Aug 8 17:54:32 2014 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 8 Aug 2014 17:54:32 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk emscripten: new branch Message-ID: <20140808155432.D3F501C0157@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: emscripten Changeset: r1030:a1c54c159f54 Date: 2014-08-08 17:41 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/a1c54c159f54/ Log: new branch From noreply at buildbot.pypy.org Fri Aug 8 17:54:34 2014 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 8 Aug 2014 17:54:34 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk emscripten: changes for emscripten compile Message-ID: <20140808155434.1CF091C0157@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: emscripten Changeset: r1031:e0767d248b23 Date: 2014-08-08 11:45 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/e0767d248b23/ Log: changes for emscripten compile - no references to cursor stuff - async flag in interpreter diff --git a/spyvm/interpreter.py b/spyvm/interpreter.py --- a/spyvm/interpreter.py +++ b/spyvm/interpreter.py @@ -61,7 +61,7 @@ class Interpreter(object): _immutable_fields_ = ["space", "image", "interrupt_counter_size", "trace_important", - "startup_time", "evented", "interrupts"] + "startup_time", "evented", "interrupts", "is_async"] jit_driver = jit.JitDriver( greens=['pc', 'self', 'method'], @@ -71,10 +71,11 @@ ) def __init__(self, space, image=None, trace_important=False, - trace=False, evented=True, interrupts=True): + trace=False, evented=True, interrupts=True, is_async=False): # === Initialize immutable variables self.space = space self.image = image + self.is_async = is_async if image: self.startup_time = image.startup_time else: @@ -105,6 +106,8 @@ except ContextSwitchException, e: if self.is_tracing(): e.print_trace() + if self.is_async: + return e.s_new_context.w_self() s_context = e.s_new_context except Return, ret: target = s_sender if ret.arrived_at_target else ret.s_target_context diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -987,16 +987,16 @@ def convert_to_c_layout(self): if self.words is None: return self.c_words - else: - from spyvm.interpreter_proxy import sqIntArrayPtr - size = self.size() - old_words = self.words - c_words = self.c_words = lltype.malloc(sqIntArrayPtr.TO, size, flavor='raw') - for i in range(size): - c_words[i] = intmask(old_words[i]) - self.words = None - return c_words - + # else: + # from spyvm.interpreter_proxy import sqIntArrayPtr + # size = self.size() + # old_words = self.words + # c_words = self.c_words = lltype.malloc(sqIntArrayPtr.TO, size, flavor='raw') + # for i in range(size): + # c_words[i] = intmask(old_words[i]) + # self.words = None + # return c_words + def _become(self, w_other): assert isinstance(w_other, W_WordsObject) self.words, w_other.words = w_other.words, self.words diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -712,14 +712,15 @@ depth = interp.space.unwrap_int(w_rcvr.fetch(interp.space, 3)) hotpt = wrapper.PointWrapper(interp.space, w_rcvr.fetch(interp.space, 4)) if not interp.image.is_modern: - display.SDLCursor.set( - w_bitmap.words, - width, - height, - hotpt.x(), - hotpt.y(), - mask_words=mask_words - ) + # display.SDLCursor.set( + # w_bitmap.words, + # width, + # height, + # hotpt.x(), + # hotpt.y(), + # mask_words=mask_words + # ) + pass else: # TODO: Implement pass @@ -890,18 +891,18 @@ if signature[0] == 'BitBltPlugin': from spyvm.plugins.bitblt import BitBltPlugin return BitBltPlugin.call(signature[1], interp, s_frame, argcount, w_method) - elif signature[0] == "SocketPlugin": - from spyvm.plugins.socket import SocketPlugin - return SocketPlugin.call(signature[1], interp, s_frame, argcount, w_method) + # elif signature[0] == "SocketPlugin": + # from spyvm.plugins.socket import SocketPlugin + # return SocketPlugin.call(signature[1], interp, s_frame, argcount, w_method) elif signature[0] == "FilePlugin": from spyvm.plugins.fileplugin import FilePlugin return FilePlugin.call(signature[1], interp, s_frame, argcount, w_method) elif signature[0] == "VMDebugging": from spyvm.plugins.vmdebugging import DebuggingPlugin return DebuggingPlugin.call(signature[1], interp, s_frame, argcount, w_method) - else: - from spyvm.interpreter_proxy import IProxy - return IProxy.call(signature, interp, s_frame, argcount, w_method) + # else: + # from spyvm.interpreter_proxy import IProxy + # return IProxy.call(signature, interp, s_frame, argcount, w_method) raise PrimitiveFailedError @expose_primitive(COMPILED_METHOD_FLUSH_CACHE, unwrap_spec=[object]) @@ -1497,6 +1498,7 @@ @expose_primitive(IDLE_FOR_MICROSECONDS, unwrap_spec=[object, int], no_result=True, clean_stack=False) def func(interp, s_frame, w_rcvr, time_mu_s): import time + from spyvm.interpreter import ProcessSwitch s_frame.pop() time_s = time_mu_s / 1000000.0 interp.interrupt_check_counter = 0 @@ -1504,6 +1506,8 @@ time.sleep(time_s) interp.interrupt_check_counter = 0 interp.quick_check_for_interrupt(s_frame, dec=0) + if interp.is_async: + raise ProcessSwitch(s_frame) @expose_primitive(FORCE_DISPLAY_UPDATE, unwrap_spec=[object]) def func(interp, s_frame, w_rcvr): diff --git a/spyvm/wrapper.py b/spyvm/wrapper.py --- a/spyvm/wrapper.py +++ b/spyvm/wrapper.py @@ -298,5 +298,5 @@ extend_y = make_int_getter(2) depth = make_int_getter(3) -class CursorWrapper(MaskWrapper): - offset = make_getter(4) +# class CursorWrapper(MaskWrapper): +# offset = make_getter(4) From noreply at buildbot.pypy.org Fri Aug 8 21:42:08 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 8 Aug 2014 21:42:08 +0200 (CEST) Subject: [pypy-commit] pypy default: ensure the cffi.egg-info version is up to date Message-ID: <20140808194208.F0CE01C30CF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r72720:6e76708b4faf Date: 2014-08-08 12:35 -0700 http://bitbucket.org/pypy/pypy/changeset/6e76708b4faf/ Log: ensure the cffi.egg-info version is up to date diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.8 +Version: 0.8.6 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_egg_version.py b/pypy/module/test_lib_pypy/cffi_tests/test_egg_version.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/test_egg_version.py @@ -0,0 +1,12 @@ +from email.parser import Parser + +import py + +import cffi +import pypy + +egg_info = py.path.local(pypy.__file__) / '../../lib_pypy/cffi.egg-info' + +def test_egg_version(): + info = Parser().parsestr(egg_info.read()) + assert info['version'] == cffi.__version__ From noreply at buildbot.pypy.org Fri Aug 8 22:10:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 8 Aug 2014 22:10:22 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20140808201022.C2ADA1C0157@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r519:c5acb1ff57b7 Date: 2014-08-08 22:10 +0200 http://bitbucket.org/pypy/pypy.org/changeset/c5acb1ff57b7/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $52117 of $105000 (49.6%) + $52126 of $105000 (49.6%)
diff --git a/don3.html b/don3.html --- a/don3.html +++ b/don3.html @@ -15,7 +15,7 @@ - $48360 of $60000 (80.6%) + $48374 of $60000 (80.6%)
diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,7 +9,7 @@ @@ -17,7 +17,7 @@ 2nd call: - $13393 of $80000 (16.7%) + $13463 of $80000 (16.8%)
From noreply at buildbot.pypy.org Sat Aug 9 01:02:16 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 9 Aug 2014 01:02:16 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Fixed a few typos Message-ID: <20140808230216.04EB81C3352@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r520:ec3445c33f07 Date: 2014-08-08 16:02 -0700 http://bitbucket.org/pypy/pypy.org/changeset/ec3445c33f07/ Log: Fixed a few typos diff --git a/numpydonate.html b/numpydonate.html --- a/numpydonate.html +++ b/numpydonate.html @@ -126,7 +126,7 @@

About estimates and costs

For each step, we estimated the time that it would take to complete for an experienced developer who is already familiar with the PyPy codebase. From -this number, the money is calculated considering a hourly rate of $60, and a +this number, the money is calculated considering an hourly rate of $60, and a 5% general donation which goes to the Software Freedom Conservancy itself, the non-profit organization of which the PyPy project is a member and which manages all the issues related to donations, payments, and tax-exempt status.

@@ -158,7 +158,7 @@ for tight loops

As with all speed improvements, it's relatively hard to predict exactly -how it'll cope, however we expect the results to be withing an order +how it'll cope, however we expect the results to be within an order of magnitude of handwritten C equivalent.

Estimated costs: USD$30,000. Estimated duration: 3 months.

diff --git a/py3donate.html b/py3donate.html --- a/py3donate.html +++ b/py3donate.html @@ -64,7 +64,7 @@ continue using Python 2 while others work with Python 3, making it harder for everyone.

The PyPy project is in a unique position in that it could support -Python 3 without having to discontinue supporting Python 2, with the possibility of reusing a large part of of code base and fully +Python 3 without having to discontinue supporting Python 2, with the possibility of reusing a large part of the code base and fully reusing its unique translation and JIT-Compiler technologies. However, it requires a lot of work, and it will take a long time before we can complete a Python 3 port if we only wait for volunteer @@ -115,7 +115,7 @@

About estimates and costs

For each step, we estimated the time that it would take to complete for an experienced developer who is already familiar with the PyPy codebase. From -this number, the money is calculated considering a hourly rate of $60, and a +this number, the money is calculated considering an hourly rate of $60, and a 5% general donation which goes to the Software Freedom Conservancy itself, the non-profit association of which the PyPy project is a member and which manages all the issues related to donations, taxes and payments.

@@ -255,7 +255,7 @@ some attention towards implementing Python 3. This will not hinder other directions in which PyPy is going like improving performance. The goal of the PyPy community is to support both Python 2 and Python 3 for the -forseeable future.

+foreseeable future.

PyPy's developers make all PyPy software available to the public without charge, under PyPy's Open Source copyright license, the permissive MIT License. PyPy's license assures that PyPy is equally available to diff --git a/source/numpydonate.txt b/source/numpydonate.txt --- a/source/numpydonate.txt +++ b/source/numpydonate.txt @@ -104,7 +104,7 @@ For each step, we estimated the time that it would take to complete for an experienced developer who is already familiar with the PyPy codebase. From -this number, the money is calculated considering a hourly rate of $60, and a +this number, the money is calculated considering an hourly rate of $60, and a 5% general donation which goes to the `Software Freedom Conservancy`_ itself, the non-profit organization of which the PyPy project is a member and which manages all the issues related to donations, payments, and tax-exempt status. @@ -145,7 +145,7 @@ for tight loops As with all speed improvements, it's relatively hard to predict exactly - how it'll cope, however we expect the results to be withing an order + how it'll cope, however we expect the results to be within an order of magnitude of handwritten C equivalent. Estimated costs: USD$30,000. Estimated duration: 3 months. diff --git a/source/py3donate.txt b/source/py3donate.txt --- a/source/py3donate.txt +++ b/source/py3donate.txt @@ -25,7 +25,7 @@ harder for everyone. The PyPy project is in a unique position in that it could support -Python 3 without having to discontinue supporting Python 2, with the possibility of reusing a large part of of code base and fully +Python 3 without having to discontinue supporting Python 2, with the possibility of reusing a large part of the code base and fully reusing its unique translation and JIT-Compiler technologies. However, it requires a lot of work, and it will take a long time before we can complete a Python 3 port if we only wait for volunteer @@ -89,7 +89,7 @@ For each step, we estimated the time that it would take to complete for an experienced developer who is already familiar with the PyPy codebase. From -this number, the money is calculated considering a hourly rate of $60, and a +this number, the money is calculated considering an hourly rate of $60, and a 5% general donation which goes to the `Software Freedom Conservancy`_ itself, the non-profit association of which the PyPy project is a member and which manages all the issues related to donations, taxes and payments. @@ -271,7 +271,7 @@ some attention towards implementing Python 3. This will not hinder other directions in which PyPy is going like improving performance. The goal of the PyPy community is to support both Python 2 and Python 3 for the -forseeable future. +foreseeable future. PyPy's developers make all PyPy software available to the public without charge, under PyPy's Open Source copyright license, the permissive MIT From noreply at buildbot.pypy.org Sat Aug 9 01:02:17 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 9 Aug 2014 01:02:17 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: merged upstream Message-ID: <20140808230217.557851C3352@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: extradoc Changeset: r521:08ddef577737 Date: 2014-08-08 16:02 -0700 http://bitbucket.org/pypy/pypy.org/changeset/08ddef577737/ Log: merged upstream diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $51969 of $105000 (49.5%) + $52126 of $105000 (49.6%)

diff --git a/don3.html b/don3.html --- a/don3.html +++ b/don3.html @@ -9,13 +9,13 @@ - $48322 of $60000 (80.5%) + $48374 of $60000 (80.6%)
diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,7 +9,7 @@ @@ -17,7 +17,7 @@ 2nd call: - $2959 of $80000 (3.7%) + $13463 of $80000 (16.8%)
diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -90,7 +90,7 @@ portable Linux binaries.

  • Linux x86 binary (32bit, tar.bz2 built on Ubuntu 10.04.4 LTS) (see [1] below)
  • -
  • Linux x86 binary (64bit, tar.bz2 built on Ubuntu 12.04.2 LTS) (see [1] below)
  • +
  • Linux x86-64 binary (64bit, tar.bz2 built on Ubuntu 12.04 - 14.04) (see [1] below)
  • ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Raspbian) (see [1] below)
  • ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Ubuntu Raring) (see [1] below)
  • ARM Softfloat Linux binary (ARMEL/gnueabi, tar.bz2, Ubuntu Precise) (see [1] below)
  • @@ -111,8 +111,8 @@ them unless you're ready to hack your system by adding symlinks to the libraries it tries to open.

      -
    • Linux binary (32bit, tar.bz2 built on Ubuntu 10.04.4 LTS) (see [1] below)
    • -
    • Linux binary (64bit, tar.bz2 built on Ubuntu 12.04.2 LTS) (see [1] below)
    • +
    • Linux x86 binary (32bit, tar.bz2 built on Ubuntu 10.04.4 LTS) (see [1] below)
    • +
    • Linux x86-64 binary (64bit, tar.bz2 built on Ubuntu 12.04 - 14.04) (see [1] below)
    • ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Raspbian) (see [1] below)
    • ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Ubuntu Raring) (see [1] below)
    • ARM Softfloat Linux binary (ARMEL/gnueabi, tar.bz2, Ubuntu Precise) (see [1] below)
    • diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -73,7 +73,7 @@ .. _`portable Linux binaries`: https://github.com/squeaky-pl/portable-pypy * `Linux x86 binary (32bit, tar.bz2 built on Ubuntu 10.04.4 LTS)`__ (see ``[1]`` below) -* `Linux x86 binary (64bit, tar.bz2 built on Ubuntu 12.04.2 LTS)`__ (see ``[1]`` below) +* `Linux x86-64 binary (64bit, tar.bz2 built on Ubuntu 12.04 - 14.04)`__ (see ``[1]`` below) * `ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Raspbian)`__ (see ``[1]`` below) * `ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Ubuntu Raring)`__ (see ``[1]`` below) * `ARM Softfloat Linux binary (ARMEL/gnueabi, tar.bz2, Ubuntu Precise)`__ (see ``[1]`` below) @@ -107,8 +107,8 @@ them** unless you're ready to hack your system by adding symlinks to the libraries it tries to open. -* `Linux binary (32bit, tar.bz2 built on Ubuntu 10.04.4 LTS)`__ (see ``[1]`` below) -* `Linux binary (64bit, tar.bz2 built on Ubuntu 12.04.2 LTS)`__ (see ``[1]`` below) +* `Linux x86 binary (32bit, tar.bz2 built on Ubuntu 10.04.4 LTS)`__ (see ``[1]`` below) +* `Linux x86-64 binary (64bit, tar.bz2 built on Ubuntu 12.04 - 14.04)`__ (see ``[1]`` below) * `ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Raspbian)`__ (see ``[1]`` below) * `ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Ubuntu Raring)`__ (see ``[1]`` below) * `ARM Softfloat Linux binary (ARMEL/gnueabi, tar.bz2, Ubuntu Precise)`__ (see ``[1]`` below) diff --git a/talk/ep2014-status.html b/talk/ep2014-status.html new file mode 100644 --- /dev/null +++ b/talk/ep2014-status.html @@ -0,0 +1,373 @@ + + + + + + +PyPy status talk (a.k.a.: no no, PyPy is not dead) + + + +
      +

      PyPy status talk (a.k.a.: no no, PyPy is not dead)

      + +
      +

      Abstract

      +

      The current status of PyPy, with a particular focus on what happened in +the last two years, since the last EuroPython PyPy talk. We will give a +brief overview of the current speed and the on-going development efforts +on the JIT, the GC, NumPy, Python 3 compatibility, CFFI, STM...

      +
      +
      +

      Description

      +

      In this talk we will present the current status of PyPy, with a +particular focus on what happened in the last two years, since the last +EuroPython PyPy talk. We will give an overview of the current speed and +the on-going development efforts, including but not limited to:

      +
        +
      • the status of the Just-in-Time Compiler (JIT) and PyPy performance in +general;
      • +
      • the improvements on the Garbage Collector (GC);
      • +
      • the status of the NumPy and Python 3 compatibility subprojects;
      • +
      • CFFI, which aims to be a general C interface mechanism for both +CPython and PyPy;
      • +
      • a quick overview of the STM (Software Transactional Memory) research +project, which aims to solve the GIL problem.
      • +
      +

      This is the "general PyPy status talk" that we give every year at +EuroPython (except last year; hence the "no no, PyPy is not dead" part +of the title of this talk).

      +
      +
      + + diff --git a/talk/ep2014-stm.html b/talk/ep2014-stm.html new file mode 100644 --- /dev/null +++ b/talk/ep2014-stm.html @@ -0,0 +1,379 @@ + + + + + + +Using All These Cores: Transactional Memory in PyPy + + + +
      +

      Using All These Cores: Transactional Memory in PyPy

      + +
      +

      Abstract

      +

      PyPy, the Python implementation written in Python, experimentally +supports Transactional Memory (TM). The strength of TM is to enable a +novel use of multithreading, inheritently safe, and not limited to +special use cases like other approaches. This talk will focus on how it +works under the hood.

      +
      +
      +

      Description

      +

      PyPy is a fast alternative Python implementation. Software +Transactional Memory (STM) is a current academic research topic. Put +the two together --brew for a couple of years-- and we get a version of +PyPy that runs on multiple cores, without the infamous Global +Interpreter Lock (GIL).

      +

      The current research is based on a recent new insight that promises to +give really good performance. The speed of STM is generally measured by +two factors: the ability to scale with the number of CPUs, and the +amount of overhead when compared with other approaches in a single CPU +(in this case, with the regular PyPy with the GIL). Scaling is not +really a problem here, but single-CPU performance is --or used to be. +This new approach gives a single-threaded overhead that should be very +low, maybe 20%, which would definitely be news for STM systems. Right +now (February 2014) we are still implementing it, so we cannot give +final numbers yet, but early results on a small interpreter for a custom +language are around 15%. This looks like a deal-changer for STM.

      +

      In the talk, I will describe our progress, hopefully along with real +numbers and demos. I will then dive under the hood of PyPy to give an +idea about how it works. I will conclude with a picture of how the +future of multi-threaded programming might looks like, for high-level +languages like Python. I will also mention CPython: how hard (or not) +it would be to change the CPython source code to use the same approach.

      +
      +
      + + From noreply at buildbot.pypy.org Sat Aug 9 05:42:44 2014 From: noreply at buildbot.pypy.org (waedt) Date: Sat, 9 Aug 2014 05:42:44 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Explictly check for negative codepoints Message-ID: <20140809034244.767FD1C3233@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72721:3a4bfe6c37ee Date: 2014-08-08 22:00 -0500 http://bitbucket.org/pypy/pypy/changeset/3a4bfe6c37ee/ Log: Explictly check for negative codepoints diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -642,7 +642,9 @@ def append_codepoint(self, c): - if c < 0x80: + if c < 0: + raise ValueError("Invalid unicode codepoint < 0.") + elif c < 0x80: self._builder.append(chr(c)) elif c < 0x800: self._builder.append(chr(0xC0 | (c >> 6))) From noreply at buildbot.pypy.org Sat Aug 9 05:42:45 2014 From: noreply at buildbot.pypy.org (waedt) Date: Sat, 9 Aug 2014 05:42:45 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix unicode keys in (app-level) dictionaries Message-ID: <20140809034245.A78A61C3233@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72722:a97578376264 Date: 2014-08-08 22:42 -0500 http://bitbucket.org/pypy/pypy/changeset/a97578376264/ Log: Fix unicode keys in (app-level) dictionaries diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -1,6 +1,6 @@ from rpython.rlib.rstring import StringBuilder from rpython.rlib.objectmodel import ( - we_are_translated, specialize, import_from_mixin) + we_are_translated, specialize, import_from_mixin, compute_hash) from rpython.rlib.runicode import utf8_code_length from rpython.rlib.unicodedata import unicodedb_5_2_0 as unicodedb from rpython.rlib.rarithmetic import r_uint, intmask, base_int @@ -230,7 +230,7 @@ return self._len def __hash__(self): - return hash(self.bytes) + return compute_hash(self.bytes) def __eq__(self, other): if isinstance(other, Utf8Str): diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -9,6 +9,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import ( WrappedDefault, applevel, interp2app, unwrap_spec) +from pypy.interpreter.utf8 import Utf8Str from pypy.interpreter.mixedmodule import MixedModule from pypy.interpreter.signature import Signature from pypy.objspace.std.stdtypedef import StdTypeDef @@ -1035,7 +1036,6 @@ create_iterator_classes(BytesDictStrategy) - class UnicodeDictStrategy(AbstractTypedStrategy, DictStrategy): erase, unerase = rerased.new_erasing_pair("unicode") erase = staticmethod(erase) @@ -1052,9 +1052,9 @@ return space.is_w(space.type(w_obj), space.w_unicode) def get_empty_storage(self): - res = {} - mark_dict_non_null(res) - return self.erase(res) + new_dict = r_dict(Utf8Str.__eq__, Utf8Str.__hash__, + force_non_null=True) + return self.erase(new_dict) def _never_equal_to(self, w_lookup_type): return _never_equal_to_string(self.space, w_lookup_type) diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -2,6 +2,7 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.signature import Signature from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.utf8 import Utf8Str from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.intobject import W_IntObject from pypy.objspace.std.stdtypedef import StdTypeDef @@ -1250,10 +1251,14 @@ name='set(unicode).intersect') def get_empty_storage(self): - return self.erase({}) + new_dict = r_dict(Utf8Str.__eq__, Utf8Str.__hash__, + force_non_null=True) + return self.erase(new_dict) def get_empty_dict(self): - return {} + new_dict = r_dict(Utf8Str.__eq__, Utf8Str.__hash__, + force_non_null=True) + return new_dict def listview_unicode(self, w_set): return self.unerase(w_set.sstorage).keys() diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -109,9 +109,9 @@ w_set = W_SetObject(self.space) _initialize_set(self.space, w_set, w_list) assert w_set.strategy is self.space.fromcache(UnicodeSetStrategy) - assert w_set.strategy.unerase(w_set.sstorage) == {Utf8Str("1") :None, - Utf8Str("2") :None, - Utf8Str("3") :None} + assert dict(w_set.strategy.unerase(w_set.sstorage)) == {Utf8Str("1") :None, + Utf8Str("2") :None, + Utf8Str("3") :None} w_list = W_ListObject(self.space, [w("1"), w(2), w("3")]) w_set = W_SetObject(self.space) From noreply at buildbot.pypy.org Sat Aug 9 08:44:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 08:44:37 +0200 (CEST) Subject: [pypy-commit] cffi default: Merged in pjenvey/cffi (pull request #44) Message-ID: <20140809064437.67F1D1D36EC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1556:303042482cf7 Date: 2014-08-09 08:44 +0200 http://bitbucket.org/cffi/cffi/changeset/303042482cf7/ Log: Merged in pjenvey/cffi (pull request #44) avoid deprecated imp.get_suffixes on >= 3.3 diff --git a/cffi/verifier.py b/cffi/verifier.py --- a/cffi/verifier.py +++ b/cffi/verifier.py @@ -1,7 +1,17 @@ -import sys, os, binascii, imp, shutil +import sys, os, binascii, shutil from . import __version__ from . import ffiplatform +if sys.version_info >= (3, 3): + import importlib.machinery + def _extension_suffixes(): + return importlib.machinery.EXTENSION_SUFFIXES[:] +else: + import imp + def _extension_suffixes(): + return [suffix for suffix, _, type in imp.get_suffixes() + if type == imp.C_EXTENSION] + class Verifier(object): @@ -222,11 +232,7 @@ pass def _get_so_suffixes(): - suffixes = [] - for suffix, mode, type in imp.get_suffixes(): - if type == imp.C_EXTENSION: - suffixes.append(suffix) - + suffixes = _extension_suffixes() if not suffixes: # bah, no C_EXTENSION available. Occurs on pypy without cpyext if sys.platform == 'win32': From noreply at buildbot.pypy.org Sat Aug 9 08:44:39 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 9 Aug 2014 08:44:39 +0200 (CEST) Subject: [pypy-commit] cffi default: avoid deprecated imp.get_suffixes on >= 3.3 Message-ID: <20140809064439.80D641D36ED@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r1554:7560687dfd75 Date: 2014-07-27 16:20 -0700 http://bitbucket.org/cffi/cffi/changeset/7560687dfd75/ Log: avoid deprecated imp.get_suffixes on >= 3.3 diff --git a/cffi/verifier.py b/cffi/verifier.py --- a/cffi/verifier.py +++ b/cffi/verifier.py @@ -1,7 +1,17 @@ -import sys, os, binascii, imp, shutil +import sys, os, binascii, shutil from . import __version__ from . import ffiplatform +if sys.version_info >= (3, 3): + import importlib.machinery + def extension_suffixes(): + return importlib.machinery.EXTENSION_SUFFIXES[:] +else: + import imp + def extension_suffixes(): + return [suffix for suffix, _, type in imp.get_suffixes() + if type == imp.C_EXTENSION] + class Verifier(object): @@ -222,11 +232,7 @@ pass def _get_so_suffixes(): - suffixes = [] - for suffix, mode, type in imp.get_suffixes(): - if type == imp.C_EXTENSION: - suffixes.append(suffix) - + suffixes = extension_suffixes() if not suffixes: # bah, no C_EXTENSION available. Occurs on pypy without cpyext if sys.platform == 'win32': From noreply at buildbot.pypy.org Sat Aug 9 08:44:40 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 9 Aug 2014 08:44:40 +0200 (CEST) Subject: [pypy-commit] cffi default: make this not public Message-ID: <20140809064440.8CB5C1D36ED@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r1555:dfdc10d75879 Date: 2014-07-28 09:51 -0700 http://bitbucket.org/cffi/cffi/changeset/dfdc10d75879/ Log: make this not public diff --git a/cffi/verifier.py b/cffi/verifier.py --- a/cffi/verifier.py +++ b/cffi/verifier.py @@ -4,11 +4,11 @@ if sys.version_info >= (3, 3): import importlib.machinery - def extension_suffixes(): + def _extension_suffixes(): return importlib.machinery.EXTENSION_SUFFIXES[:] else: import imp - def extension_suffixes(): + def _extension_suffixes(): return [suffix for suffix, _, type in imp.get_suffixes() if type == imp.C_EXTENSION] @@ -232,7 +232,7 @@ pass def _get_so_suffixes(): - suffixes = extension_suffixes() + suffixes = _extension_suffixes() if not suffixes: # bah, no C_EXTENSION available. Occurs on pypy without cpyext if sys.platform == 'win32': From noreply at buildbot.pypy.org Sat Aug 9 08:46:58 2014 From: noreply at buildbot.pypy.org (waedt) Date: Sat, 9 Aug 2014 08:46:58 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix mismatch between translated and untranslated behavior Message-ID: <20140809064658.509121D3703@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72723:d124ea9a7f3a Date: 2014-08-09 00:37 -0500 http://bitbucket.org/pypy/pypy/changeset/d124ea9a7f3a/ Log: Fix mismatch between translated and untranslated behavior diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -229,6 +229,10 @@ assert self._len >= 0 return self._len + def __bool__(self): + # XXX Make the untranslated behavior the same as the translated behavior + raise True + def __hash__(self): return compute_hash(self.bytes) diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -372,9 +372,10 @@ newline = None else: newline = space.unicode_w(w_newline) - if newline and not (utf8.EQ(newline, Utf8Str('\n')) or - utf8.EQ(newline, Utf8Str('\r\n')) or - utf8.EQ(newline, Utf8Str('\r'))): + if (newline is not None and len(newline) > 0 and + not (utf8.EQ(newline, Utf8Str('\n')) or + utf8.EQ(newline, Utf8Str('\r\n')) or + utf8.EQ(newline, Utf8Str('\r')))): r = space.str_w(space.repr(w_newline)) raise OperationError(space.w_ValueError, space.wrap( "illegal newline value: %s" % (r,))) From noreply at buildbot.pypy.org Sat Aug 9 08:46:59 2014 From: noreply at buildbot.pypy.org (waedt) Date: Sat, 9 Aug 2014 08:46:59 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Attempt to fix some translated test failures Message-ID: <20140809064659.9B2C71D3703@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72724:d2ab58ed2eed Date: 2014-08-09 01:45 -0500 http://bitbucket.org/pypy/pypy/changeset/d2ab58ed2eed/ Log: Attempt to fix some translated test failures diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -8,13 +8,13 @@ from rpython.tool.sourcetools import func_with_new_name -wchar_rint = rffi.r_uint -WCHAR_INTP = rffi.UINTP -WCHAR_INT = rffi.UINT +wchar_rint = rffi.r_int +WCHAR_INTP = rffi.INTP +WCHAR_INT = rffi.INT if rffi.sizeof(rffi.WCHAR_T) == 2: - wchar_rint = rffi.r_ushort - WCHAR_INTP = rffi.USHORTP - WCHAR_INT = rffi.USHORT + wchar_rint = rffi.r_short + WCHAR_INTP = rffi.SHORTP + WCHAR_INT = rffi.SHORT def utf8chr(value): @@ -27,6 +27,8 @@ def utf8ord_bytes(bytes, start): codepoint_length = utf8_code_length[ord(bytes[start])] + assert codepoint_length != 0, "byte index isn't the start of a character" + if codepoint_length == 1: res = ord(bytes[start]) @@ -168,6 +170,9 @@ return pos def __getitem__(self, char_pos): + if not isinstance(char_pos, int): + raise TyperError("string index must be an integer, not %r" % + type(char_pos)) # This if statement is needed for [-1:0] to slice correctly if char_pos >= self._len: raise IndexError() @@ -222,9 +227,16 @@ return Utf8Str(self.bytes + other.bytes, self._is_ascii and other._is_ascii) + def __radd__(self, other): + return Utf8Str(other.bytes + self.bytes, + self._is_ascii and other._is_ascii) + def __mul__(self, count): return Utf8Str(self.bytes * count, self._is_ascii) + def __rmul__(self, count): + return Utf8Str(count * self.bytes, self._is_ascii) + def __len__(self): assert self._len >= 0 return self._len From noreply at buildbot.pypy.org Sat Aug 9 08:47:00 2014 From: noreply at buildbot.pypy.org (waedt) Date: Sat, 9 Aug 2014 08:47:00 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix error handling in unicode_internal codec Message-ID: <20140809064700.D20F61D3703@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72725:36fc05030a2a Date: 2014-08-09 01:45 -0500 http://bitbucket.org/pypy/pypy/changeset/36fc05030a2a/ Log: Fix error handling in unicode_internal codec diff --git a/pypy/interpreter/utf8_codecs.py b/pypy/interpreter/utf8_codecs.py --- a/pypy/interpreter/utf8_codecs.py +++ b/pypy/interpreter/utf8_codecs.py @@ -1351,12 +1351,8 @@ def str_decode_unicode_internal(s, size, errors, final=False, errorhandler=None): - if BYTEORDER == 'little': - result, length, byteorder = str_decode_utf_32_helper( - s, size, errors, final, errorhandler, "little", "unicode_internal") - else: - result, length, byteorder = str_decode_utf_32_helper( - s, size, errors, final, errorhandler, "internal", "unicode_internal") + result, length = str_decode_unicode_internal_helper( + s, size, errors, final, errorhandler) return result, length def unicode_encode_unicode_internal(s, size, errors, errorhandler=None): @@ -1365,6 +1361,46 @@ else: return unicode_encode_utf_32_be(s, size, errors, errorhandler) +def str_decode_unicode_internal_helper(s, size, errors, final=True, + errorhandler=None): + if errorhandler is None: + errorhandler = default_unicode_error_decode + + if BYTEORDER == 'little': + iorder = [0, 1, 2, 3] + else: + iorder = [3, 2, 1, 0] + + if size == 0: + return Utf8Str(''), 0 + + pos = 0 + result = Utf8Builder(size // 4) + + while pos < size: + # remaining bytes at the end? (size should be divisible by 4) + if len(s) - pos < 4: + if not final: + break + r, pos = errorhandler(errors, "unicode_internal", "truncated data", + s, pos, len(s)) + result.append_utf8(r) + if len(s) - pos < 4: + break + continue + ch = ((ord(s[pos + iorder[3]]) << 24) | (ord(s[pos + iorder[2]]) << 16) | + (ord(s[pos + iorder[1]]) << 8) | ord(s[pos + iorder[0]])) + if ch >= 0x110000: + r, pos = errorhandler(errors, "unicode_internal", + "codepoint not in range(0x110000)", + s, pos, pos + 4) + result.append_utf8(r) + continue + + result.append_codepoint(ch) + pos += 4 + return result.build(), pos + # }}} # ____________________________________________________________ diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -727,3 +727,12 @@ _codecs.register_error("test.test_codecs_not_a_string", f) raises(TypeError, u'\u1234'.encode, 'ascii', 'test.test_codecs_not_a_string') + + def test_decode_callback(self): + import codecs + codecs.register_error("UnicodeInternalTest", codecs.ignore_errors) + decoder = codecs.getdecoder("unicode_internal") + ab = u"ab".encode("unicode_internal") + ignored = decoder("%s\x22\x22\x22\x22%s" % (ab[:4], ab[4:]), + "UnicodeInternalTest") + assert (u"ab", 12) == ignored From noreply at buildbot.pypy.org Sat Aug 9 11:05:07 2014 From: noreply at buildbot.pypy.org (waedt) Date: Sat, 9 Aug 2014 11:05:07 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: str._formatter_parser is apparently part of the public api Message-ID: <20140809090507.C726F1C02AF@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72726:7f527aa7b8f1 Date: 2014-08-09 02:12 -0500 http://bitbucket.org/pypy/pypy/changeset/7f527aa7b8f1/ Log: str._formatter_parser is apparently part of the public api diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -340,11 +340,19 @@ space = self.space startm1 = start - 1 assert startm1 >= self.last_end + + w_conv = space.w_None + if conversion != -1: + if self.is_unicode: + w_conv = space.wrap(utf8chr((conversion))) + else: + w_conv = space.wrap(chr((conversion))) + w_entry = space.newtuple([ space.wrap(self.template[self.last_end:startm1]), space.wrap(name), space.wrap(spec), - space.wrap(conversion)]) + w_conv]) self.parser_list_w.append(w_entry) self.last_end = end + 1 return self.empty diff --git a/pypy/objspace/std/test/test_newformat.py b/pypy/objspace/std/test/test_newformat.py --- a/pypy/objspace/std/test/test_newformat.py +++ b/pypy/objspace/std/test/test_newformat.py @@ -382,29 +382,29 @@ assert l == [('abcd', None, None, None)] # l = list('ab{0}cd'._formatter_parser()) - assert l == [('ab', '0', '', -1), ('cd', None, None, None)] + assert l == [('ab', '0', '', None), ('cd', None, None, None)] # l = list('{0}cd'._formatter_parser()) - assert l == [('', '0', '', -1), ('cd', None, None, None)] + assert l == [('', '0', '', None), ('cd', None, None, None)] # l = list('ab{0}'._formatter_parser()) - assert l == [('ab', '0', '', -1)] + assert l == [('ab', '0', '', None)] # l = list(''._formatter_parser()) assert l == [] # l = list('{0:123}'._formatter_parser()) - assert l == [('', '0', '123', -1)] + assert l == [('', '0', '123', None)] # l = list('{0!x:123}'._formatter_parser()) - assert l == [('', '0', '123', ord('x'))] + assert l == [('', '0', '123', 'x')] # l = list('{0!x:12{sdd}3}'._formatter_parser()) - assert l == [('', '0', '12{sdd}3', ord('x'))] + assert l == [('', '0', '12{sdd}3', 'x')] def test_u_formatter_parser(self): l = list(u'{0!x:12{sdd}3}'._formatter_parser()) - assert l == [(u'', u'0', u'12{sdd}3', ord(u'x'))] + assert l == [(u'', u'0', u'12{sdd}3', u'x')] for x in l[0][:-1]: assert isinstance(x, unicode) From noreply at buildbot.pypy.org Sat Aug 9 11:05:09 2014 From: noreply at buildbot.pypy.org (waedt) Date: Sat, 9 Aug 2014 11:05:09 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix the unicode strategy of list.find Message-ID: <20140809090509.20EE81C02AF@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72727:c80352ad6379 Date: 2014-08-09 02:42 -0500 http://bitbucket.org/pypy/pypy/changeset/c80352ad6379/ Log: Fix the unicode strategy of list.find diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1718,6 +1718,14 @@ def list_is_correct_type(self, w_list): return w_list.strategy is self.space.fromcache(UnicodeListStrategy) + def _safe_find(self, w_list, obj, start, stop): + l = self.unerase(w_list.lstorage) + for i in range(start, min(stop, len(l))): + val = l[i] + if utf8.EQ(val, obj): + return i + raise ValueError + def sort(self, w_list, reverse): l = self.unerase(w_list.lstorage) sorter = UnicodeSort(l, len(l)) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -973,6 +973,10 @@ assert c.index(0) == 0.0 raises(ValueError, c.index, 3) + c = [u'1', u'2'] + assert c.index(u'2') == 1 + raises(ValueError, c.index, u'3') + def test_index_cpython_bug(self): if self.on_cpython: skip("cpython has a bug here") From noreply at buildbot.pypy.org Sat Aug 9 11:05:10 2014 From: noreply at buildbot.pypy.org (waedt) Date: Sat, 9 Aug 2014 11:05:10 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Typo Message-ID: <20140809090510.464D31C02AF@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72728:da24f2c93b5b Date: 2014-08-09 02:43 -0500 http://bitbucket.org/pypy/pypy/changeset/da24f2c93b5b/ Log: Typo diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -171,7 +171,7 @@ def __getitem__(self, char_pos): if not isinstance(char_pos, int): - raise TyperError("string index must be an integer, not %r" % + raise TypeError("string index must be an integer, not %r" % type(char_pos)) # This if statement is needed for [-1:0] to slice correctly if char_pos >= self._len: From noreply at buildbot.pypy.org Sat Aug 9 11:05:11 2014 From: noreply at buildbot.pypy.org (waedt) Date: Sat, 9 Aug 2014 11:05:11 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix crash when using an incomplete format specific (ie u'%(foo' % {}) Message-ID: <20140809090511.7C2681C02AF@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72729:31e5deb73708 Date: 2014-08-09 04:04 -0500 http://bitbucket.org/pypy/pypy/changeset/31e5deb73708/ Log: Fix crash when using an incomplete format specific (ie u'%(foo' % {}) diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -50,6 +50,9 @@ return res def utf8ord(ustr, start=0): + if start >= len(ustr): + raise IndexError() + start = ustr.index_of_char(start) return utf8ord_bytes(ustr.bytes, start) From noreply at buildbot.pypy.org Sat Aug 9 11:29:21 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 9 Aug 2014 11:29:21 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs-fixes: fix typo Message-ID: <20140809092921.AC08D1C02AF@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: improve-docs-fixes Changeset: r72730:17a069157969 Date: 2014-08-03 18:19 +0200 http://bitbucket.org/pypy/pypy/changeset/17a069157969/ Log: fix typo diff --git a/rpython/doc/windows.rst b/rpython/doc/windows.rst --- a/rpython/doc/windows.rst +++ b/rpython/doc/windows.rst @@ -306,7 +306,7 @@ it CPython64/64. It is probably not too much work if the goal is only to get a translated -PyPy executable, and to run all tests before transaction. But you need +PyPy executable, and to run all tests before translation. But you need to start somewhere, and you should start with some tests in rpython/translator/c/test/, like ``test_standalone.py`` and ``test_newgc.py``: try to have them pass on top of CPython64/64. From noreply at buildbot.pypy.org Sat Aug 9 11:29:22 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 9 Aug 2014 11:29:22 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs-fixes: fix typo Message-ID: <20140809092922.D3FFA1C02AF@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: improve-docs-fixes Changeset: r72731:c6f8542c472b Date: 2014-08-03 18:21 +0200 http://bitbucket.org/pypy/pypy/changeset/c6f8542c472b/ Log: fix typo diff --git a/rpython/doc/windows.rst b/rpython/doc/windows.rst --- a/rpython/doc/windows.rst +++ b/rpython/doc/windows.rst @@ -314,7 +314,7 @@ Keep in mind that this runs small translations, and some details may go wrong. The most obvious one is to check that it produces C files that use the integer type ``Signed`` --- but what is ``Signed`` defined to? -It should be equal to ``long`` on every other platforms, but on Win64 it +It should be equal to ``long`` on every other platform, but on Win64 it should be something like ``long long``. What is more generally needed is to review all the C files in From noreply at buildbot.pypy.org Sat Aug 9 11:29:24 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 9 Aug 2014 11:29:24 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs-fixes: fix some typos Message-ID: <20140809092924.016B61C02AF@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: improve-docs-fixes Changeset: r72732:7f2b23211785 Date: 2014-08-03 18:26 +0200 http://bitbucket.org/pypy/pypy/changeset/7f2b23211785/ Log: fix some typos diff --git a/rpython/doc/windows.rst b/rpython/doc/windows.rst --- a/rpython/doc/windows.rst +++ b/rpython/doc/windows.rst @@ -325,11 +325,11 @@ Then, these two C types have corresponding RPython types: ``rffi.LONG`` and ``lltype.Signed`` respectively. The first should really correspond -to the C ``long``. Add tests that check that integers casted to one +to the C ``long``. Add tests that check that integers cast to one type or the other really have 32 and 64 bits respectively, on Win64. Once these basic tests work, you need to review ``rpython/rlib/`` for -usages of ``rffi.LONG`` versus ``lltype.Signed``. The goal would be to +uses of ``rffi.LONG`` versus ``lltype.Signed``. The goal would be to fix some more ``LONG-versus-Signed`` issues, by fixing the tests --- as always run on top of CPython64/64. Note that there was some early work done in ``rpython/rlib/rarithmetic`` with the goal of running all the @@ -340,13 +340,13 @@ with a minimal set of modules, starting with ``--no-allworkingmodules``; you need to use CPython64/64 to run this translation too. Check carefully the warnings of the C compiler at the end. I think that MSVC -is "nice" in the sense that by default a lot of mismatches of integer -sizes are reported as warnings. +is lenient in that by default a lot of mismatches of integer sizes are +reported as warnings. Then you need to review ``pypy/module/*/`` for ``LONG-versus-Signed`` issues. At some time during this review, we get a working translated PyPy on Windows 64 that includes all ``--translationmodules``, i.e. -everything needed to run translations. When we are there, the hacked +everything needed to run translations. Once we have that, the hacked CPython64/64 becomes much less important, because we can run future translations on top of this translated PyPy. As soon as we get there, please *distribute* the translated PyPy. It's an essential component From noreply at buildbot.pypy.org Sat Aug 9 11:29:25 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 9 Aug 2014 11:29:25 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs-fixes: use a different wording Message-ID: <20140809092925.1D9F61C02AF@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: improve-docs-fixes Changeset: r72733:af71f724922c Date: 2014-08-04 06:10 +0200 http://bitbucket.org/pypy/pypy/changeset/af71f724922c/ Log: use a different wording diff --git a/rpython/doc/windows.rst b/rpython/doc/windows.rst --- a/rpython/doc/windows.rst +++ b/rpython/doc/windows.rst @@ -339,9 +339,9 @@ The major intermediate goal is to get a translation of PyPy with ``-O2`` with a minimal set of modules, starting with ``--no-allworkingmodules``; you need to use CPython64/64 to run this translation too. Check -carefully the warnings of the C compiler at the end. I think that MSVC -is lenient in that by default a lot of mismatches of integer sizes are -reported as warnings. +carefully the warnings of the C compiler at the end. By default, MSVC +reports a lot of mismatches of integer sizes as warnings instead of +errors. Then you need to review ``pypy/module/*/`` for ``LONG-versus-Signed`` issues. At some time during this review, we get a working translated From noreply at buildbot.pypy.org Sat Aug 9 11:29:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 11:29:26 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Merged in numerodix/pypy/improve-docs-fixes (pull request #265) Message-ID: <20140809092926.3CCBF1C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: improve-docs Changeset: r72734:03113237ccb6 Date: 2014-08-09 11:29 +0200 http://bitbucket.org/pypy/pypy/changeset/03113237ccb6/ Log: Merged in numerodix/pypy/improve-docs-fixes (pull request #265) more simple doc fixes diff --git a/rpython/doc/windows.rst b/rpython/doc/windows.rst --- a/rpython/doc/windows.rst +++ b/rpython/doc/windows.rst @@ -306,7 +306,7 @@ it CPython64/64. It is probably not too much work if the goal is only to get a translated -PyPy executable, and to run all tests before transaction. But you need +PyPy executable, and to run all tests before translation. But you need to start somewhere, and you should start with some tests in rpython/translator/c/test/, like ``test_standalone.py`` and ``test_newgc.py``: try to have them pass on top of CPython64/64. @@ -314,7 +314,7 @@ Keep in mind that this runs small translations, and some details may go wrong. The most obvious one is to check that it produces C files that use the integer type ``Signed`` --- but what is ``Signed`` defined to? -It should be equal to ``long`` on every other platforms, but on Win64 it +It should be equal to ``long`` on every other platform, but on Win64 it should be something like ``long long``. What is more generally needed is to review all the C files in @@ -325,11 +325,11 @@ Then, these two C types have corresponding RPython types: ``rffi.LONG`` and ``lltype.Signed`` respectively. The first should really correspond -to the C ``long``. Add tests that check that integers casted to one +to the C ``long``. Add tests that check that integers cast to one type or the other really have 32 and 64 bits respectively, on Win64. Once these basic tests work, you need to review ``rpython/rlib/`` for -usages of ``rffi.LONG`` versus ``lltype.Signed``. The goal would be to +uses of ``rffi.LONG`` versus ``lltype.Signed``. The goal would be to fix some more ``LONG-versus-Signed`` issues, by fixing the tests --- as always run on top of CPython64/64. Note that there was some early work done in ``rpython/rlib/rarithmetic`` with the goal of running all the @@ -339,14 +339,14 @@ The major intermediate goal is to get a translation of PyPy with ``-O2`` with a minimal set of modules, starting with ``--no-allworkingmodules``; you need to use CPython64/64 to run this translation too. Check -carefully the warnings of the C compiler at the end. I think that MSVC -is "nice" in the sense that by default a lot of mismatches of integer -sizes are reported as warnings. +carefully the warnings of the C compiler at the end. By default, MSVC +reports a lot of mismatches of integer sizes as warnings instead of +errors. Then you need to review ``pypy/module/*/`` for ``LONG-versus-Signed`` issues. At some time during this review, we get a working translated PyPy on Windows 64 that includes all ``--translationmodules``, i.e. -everything needed to run translations. When we are there, the hacked +everything needed to run translations. Once we have that, the hacked CPython64/64 becomes much less important, because we can run future translations on top of this translated PyPy. As soon as we get there, please *distribute* the translated PyPy. It's an essential component From noreply at buildbot.pypy.org Sat Aug 9 11:32:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 11:32:09 +0200 (CEST) Subject: [pypy-commit] pypy default: Manual transplant of 03113237ccb6 Message-ID: <20140809093209.1339E1C02AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72735:b7a27fbe3da4 Date: 2014-08-09 11:31 +0200 http://bitbucket.org/pypy/pypy/changeset/b7a27fbe3da4/ Log: Manual transplant of 03113237ccb6 diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -296,7 +296,7 @@ it CPython64/64. It is probably not too much work if the goal is only to get a translated -PyPy executable, and to run all tests before transaction. But you need +PyPy executable, and to run all tests before translation. But you need to start somewhere, and you should start with some tests in rpython/translator/c/test/, like ``test_standalone.py`` and ``test_newgc.py``: try to have them pass on top of CPython64/64. @@ -304,7 +304,7 @@ Keep in mind that this runs small translations, and some details may go wrong. The most obvious one is to check that it produces C files that use the integer type ``Signed`` --- but what is ``Signed`` defined to? -It should be equal to ``long`` on every other platforms, but on Win64 it +It should be equal to ``long`` on every other platform, but on Win64 it should be something like ``long long``. What is more generally needed is to review all the C files in @@ -315,11 +315,11 @@ Then, these two C types have corresponding RPython types: ``rffi.LONG`` and ``lltype.Signed`` respectively. The first should really correspond -to the C ``long``. Add tests that check that integers casted to one +to the C ``long``. Add tests that check that integers cast to one type or the other really have 32 and 64 bits respectively, on Win64. Once these basic tests work, you need to review ``rpython/rlib/`` for -usages of ``rffi.LONG`` versus ``lltype.Signed``. The goal would be to +uses of ``rffi.LONG`` versus ``lltype.Signed``. The goal would be to fix some more ``LONG-versus-Signed`` issues, by fixing the tests --- as always run on top of CPython64/64. Note that there was some early work done in ``rpython/rlib/rarithmetic`` with the goal of running all the @@ -329,14 +329,14 @@ The major intermediate goal is to get a translation of PyPy with ``-O2`` with a minimal set of modules, starting with ``--no-allworkingmodules``; you need to use CPython64/64 to run this translation too. Check -carefully the warnings of the C compiler at the end. I think that MSVC -is "nice" in the sense that by default a lot of mismatches of integer -sizes are reported as warnings. +carefully the warnings of the C compiler at the end. By default, MSVC +reports a lot of mismatches of integer sizes as warnings instead of +errors. Then you need to review ``pypy/module/*/`` for ``LONG-versus-Signed`` issues. At some time during this review, we get a working translated PyPy on Windows 64 that includes all ``--translationmodules``, i.e. -everything needed to run translations. When we are there, the hacked +everything needed to run translations. Once we have that, the hacked CPython64/64 becomes much less important, because we can run future translations on top of this translated PyPy. As soon as we get there, please *distribute* the translated PyPy. It's an essential component From noreply at buildbot.pypy.org Sat Aug 9 14:27:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 14:27:10 +0200 (CEST) Subject: [pypy-commit] stmgc default: Merge the branch 'card-marking'. Adds card-marking support similar Message-ID: <20140809122710.6C3D21D366B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1274:34c8006bc1e2 Date: 2014-08-09 14:26 +0200 http://bitbucket.org/pypy/stmgc/changeset/34c8006bc1e2/ Log: Merge the branch 'card-marking'. Adds card-marking support similar to PyPy's, i.e. it's optional for large enough arrays, whereas we still have the regular per-object flag for all other objects. It only have an effect for the GC: for STM, we always mark the whole array as read or written, never some random slice of it. diff --git a/c7/TODO b/c7/TODO --- a/c7/TODO +++ b/c7/TODO @@ -1,8 +1,6 @@ - use small uniform gcpages -- write barrier for big arrays - - finalizers - the highest_overflow_number can overflow after 2**30 non-collect-time @@ -16,3 +14,13 @@ the unused pages away --- or maybe use consecutive addresses from the lowest ones from segment N, instead of the page corresponding to the page number in segment 0 (possibly a bit messy) + +- possibly messy too, but think about not using N+1 segments but only N + +- use a call/cc-style variant of setjmp/longjmp to avoid inevitable + transactions when we need to return + +- kill "atomic" and use regular lock elision + +- increase the memory limit, currently 2.5GB; this requires, apparently, + more fighting against LLVM bugs diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -43,7 +43,16 @@ n = (struct node_s*)obj; visit((object_t **)&n->next); } - +void stmcb_get_card_base_itemsize(struct object_s *obj, + uintptr_t offset_itemsize[2]) +{ + abort(); +} +void stmcb_trace_cards(struct object_s *obj, void visit(object_t **), + uintptr_t start, uintptr_t stop) +{ + abort(); +} void stmcb_commit_soon() {} static void expand_marker(char *base, uintptr_t odd_number, @@ -294,7 +303,7 @@ unregister_thread_local(); - stm_teardown(); + //stm_teardown(); return 0; } diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -194,7 +194,7 @@ /* tell the other to commit ASAP, since it causes aborts */ signal_other_to_commit_soon(contmgr.other_pseg); - dprintf(("abort in contention\n")); + dprintf(("abort in contention: kind %d\n", kind)); STM_SEGMENT->nursery_end = abort_category; marker_contention(kind, false, other_segment_num, obj); abort_with_mutex(); diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -40,26 +40,67 @@ #endif } -void _stm_write_slowpath(object_t *obj) +__attribute__((always_inline)) +static void write_slowpath_overflow_obj(object_t *obj, bool mark_card) +{ + /* An overflow object is an object from the same transaction, but + outside the nursery. More precisely, it is no longer young, + i.e. it comes from before the most recent minor collection. + */ + assert(STM_PSEGMENT->objects_pointing_to_nursery != NULL); + + assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); + if (!mark_card) { + /* The basic case, with no card marking. We append the object + into 'objects_pointing_to_nursery', and remove the flag so + that the write_slowpath will not be called again until the + next minor collection. */ + if (obj->stm_flags & GCFLAG_CARDS_SET) { + /* if we clear this flag, we also need to clear the cards */ + _reset_object_cards(get_priv_segment(STM_SEGMENT->segment_num), + obj, CARD_CLEAR, false); + } + obj->stm_flags &= ~(GCFLAG_WRITE_BARRIER | GCFLAG_CARDS_SET); + LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); + } + else { + /* Card marking. Don't remove GCFLAG_WRITE_BARRIER because we + need to come back to _stm_write_slowpath_card() for every + card to mark. Add GCFLAG_CARDS_SET. */ + assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); + obj->stm_flags |= GCFLAG_CARDS_SET; + assert(STM_PSEGMENT->old_objects_with_cards); + LIST_APPEND(STM_PSEGMENT->old_objects_with_cards, obj); + } +} + +__attribute__((always_inline)) +static void write_slowpath_common(object_t *obj, bool mark_card) { assert(_seems_to_be_running_transaction()); assert(!_is_young(obj)); assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); - /* is this an object from the same transaction, outside the nursery? */ - if ((obj->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) == - STM_PSEGMENT->overflow_number) { + uintptr_t base_lock_idx = get_write_lock_idx((uintptr_t)obj); - dprintf_test(("write_slowpath %p -> ovf obj_to_nurs\n", obj)); - obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; - assert(STM_PSEGMENT->objects_pointing_to_nursery != NULL); - LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); + if (IS_OVERFLOW_OBJ(STM_PSEGMENT, obj)) { + assert(write_locks[base_lock_idx] == 0); + write_slowpath_overflow_obj(obj, mark_card); return; } + /* Else, it's an old object and we need to privatise it. + Do a read-barrier now. Note that this must occur before the + safepoints that may be issued in write_write_contention_management(). + */ + stm_read(obj); - /* do a read-barrier now. Note that this must occur before the - safepoints that may be issued in write_write_contention_management(). */ - stm_read(obj); + /* Take the segment's own lock number */ + uint8_t lock_num = STM_PSEGMENT->write_lock_num; + + /* If CARDS_SET, we entered here at least once already, so we + already own the write_lock */ + assert(IMPLY(obj->stm_flags & GCFLAG_CARDS_SET, + write_locks[base_lock_idx] == lock_num)); /* XXX XXX XXX make the logic of write-locking objects optional! */ @@ -68,16 +109,14 @@ 'modified_old_objects' (but, because it had GCFLAG_WRITE_BARRIER, not in 'objects_pointing_to_nursery'). We'll detect this case by finding that we already own the write-lock. */ - uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - WRITELOCK_START; - uint8_t lock_num = STM_PSEGMENT->write_lock_num; - assert(lock_idx < sizeof(write_locks)); + retry: - if (write_locks[lock_idx] == 0) { + if (write_locks[base_lock_idx] == 0) { /* A lock to prevent reading garbage from lookup_other_thread_recorded_marker() */ acquire_marker_lock(STM_SEGMENT->segment_base); - if (UNLIKELY(!__sync_bool_compare_and_swap(&write_locks[lock_idx], + if (UNLIKELY(!__sync_bool_compare_and_swap(&write_locks[base_lock_idx], 0, lock_num))) { release_marker_lock(STM_SEGMENT->segment_base); goto retry; @@ -119,16 +158,15 @@ realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); obj_size = stmcb_size_rounded_up((struct object_s *)realobj); - /* that's the page *following* the last page with the object */ - end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL; + /* get the last page containing data from the object */ + end_page = (((uintptr_t)obj) + obj_size - 1) / 4096UL; - for (i = first_page; i < end_page; i++) { + for (i = first_page; i <= end_page; i++) { page_privatize(i); } } } - else if (write_locks[lock_idx] == lock_num) { - OPT_ASSERT(STM_PSEGMENT->objects_pointing_to_nursery != NULL); + else if (write_locks[base_lock_idx] == lock_num) { #ifdef STM_TESTS bool found = false; LIST_FOREACH_R(STM_PSEGMENT->modified_old_objects, object_t *, @@ -139,17 +177,10 @@ else { /* call the contention manager, and then retry (unless we were aborted). */ - write_write_contention_management(lock_idx, obj); + write_write_contention_management(base_lock_idx, obj); goto retry; } - /* A common case for write_locks[] that was either 0 or lock_num: - we need to add the object to 'objects_pointing_to_nursery' - if there is such a list. */ - if (STM_PSEGMENT->objects_pointing_to_nursery != NULL) { - dprintf_test(("write_slowpath %p -> old obj_to_nurs\n", obj)); - LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); - } /* check that we really have a private page */ assert(is_private_page(STM_SEGMENT->segment_num, @@ -158,16 +189,121 @@ /* check that so far all copies of the object have the flag */ check_flag_write_barrier(obj); - /* remove GCFLAG_WRITE_BARRIER, but only if we succeeded in - getting the write-lock */ assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); - obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; + if (!mark_card) { + /* A common case for write_locks[] that was either 0 or lock_num: + we need to add the object to the appropriate list if there is one. + */ + if (STM_PSEGMENT->objects_pointing_to_nursery != NULL) { + dprintf_test(("write_slowpath %p -> old obj_to_nurs\n", obj)); + LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); + } + + if (obj->stm_flags & GCFLAG_CARDS_SET) { + /* if we clear this flag, we have to tell sync_old_objs that + everything needs to be synced */ + _reset_object_cards(get_priv_segment(STM_SEGMENT->segment_num), + obj, CARD_MARKED_OLD, true); /* mark all */ + } + + /* remove GCFLAG_WRITE_BARRIER if we succeeded in getting the base + write-lock (not for card marking). */ + obj->stm_flags &= ~(GCFLAG_WRITE_BARRIER | GCFLAG_CARDS_SET); + } + else { + /* don't remove WRITE_BARRIER, but add CARDS_SET */ + obj->stm_flags |= GCFLAG_CARDS_SET; + assert(STM_PSEGMENT->old_objects_with_cards); + LIST_APPEND(STM_PSEGMENT->old_objects_with_cards, obj); + } /* for sanity, check again that all other segment copies of this object still have the flag (so privatization worked) */ check_flag_write_barrier(obj); } +void _stm_write_slowpath(object_t *obj) +{ + write_slowpath_common(obj, /*mark_card=*/false); +} + +static bool obj_should_use_cards(object_t *obj) +{ + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + size_t size = stmcb_size_rounded_up(realobj); + + return (size >= _STM_MIN_CARD_OBJ_SIZE); +} + +char _stm_write_slowpath_card_extra(object_t *obj) +{ + /* the PyPy JIT calls this function directly if it finds that an + array doesn't have the GCFLAG_CARDS_SET */ + bool mark_card = obj_should_use_cards(obj); + write_slowpath_common(obj, mark_card); + return mark_card; +} + +long _stm_write_slowpath_card_extra_base(void) +{ + /* for the PyPy JIT: _stm_write_slowpath_card_extra_base[obj >> 4] + is the byte that must be set to CARD_MARKED. The logic below + does the same, but more explicitly. */ + return (((long)write_locks) - WRITELOCK_START + 1) + + 0x4000000000000000L; // <- workaround for a clang bug :-( +} + +void _stm_write_slowpath_card(object_t *obj, uintptr_t index) +{ + /* If CARDS_SET is not set so far, issue a normal write barrier. + If the object is large enough, ask it to set up the object for + card marking instead. + */ + if (!(obj->stm_flags & GCFLAG_CARDS_SET)) { + char mark_card = _stm_write_slowpath_card_extra(obj); + if (!mark_card) + return; + } + + dprintf_test(("write_slowpath_card %p -> index:%lu\n", + obj, index)); + + /* We reach this point if we have to mark the card. + */ + assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); + assert(obj->stm_flags & GCFLAG_CARDS_SET); + assert(!(obj->stm_flags & GCFLAG_SMALL_UNIFORM)); /* not supported/tested */ + +#ifndef NDEBUG + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + size_t size = stmcb_size_rounded_up(realobj); + /* we need at least one lock in addition to the STM-reserved object + write-lock */ + assert(size >= 32); + /* the 'index' must be in range(length-of-obj), but we don't have + a direct way to know the length. We know that it is smaller + than the size in bytes. */ + assert(index < size); +#endif + + /* Write into the card's lock. This is used by the next minor + collection to know what parts of the big object may have changed. + We already own the object here or it is an overflow obj. */ + uintptr_t base_lock_idx = get_write_lock_idx((uintptr_t)obj); + uintptr_t card_lock_idx = base_lock_idx + get_index_to_card_index(index); + write_locks[card_lock_idx] = CARD_MARKED; + + /* More debug checks */ + dprintf(("mark %p index %lu, card:%lu with %d\n", + obj, index, get_index_to_card_index(index), CARD_MARKED)); + assert(IMPLY(IS_OVERFLOW_OBJ(STM_PSEGMENT, obj), + write_locks[base_lock_idx] == 0)); + assert(IMPLY(!IS_OVERFLOW_OBJ(STM_PSEGMENT, obj), + write_locks[base_lock_idx] == STM_PSEGMENT->write_lock_num)); +} + static void reset_transaction_read_version(void) { /* force-reset all read markers to 0 */ @@ -284,6 +420,8 @@ ({ if (was_read_remote(remote_base, item, remote_version)) { /* A write-read conflict! */ + dprintf(("write-read conflict on %p, our seg: %d, other: %ld\n", + item, STM_SEGMENT->segment_num, i)); if (write_read_contention_management(i, item)) { /* If we reach this point, we didn't abort, but we had to wait for the other thread to commit. If we @@ -355,7 +493,214 @@ } } -static void synchronize_object_now(object_t *obj) +static void _page_wise_synchronize_object_now(object_t *obj) +{ + uintptr_t start = (uintptr_t)obj; + uintptr_t first_page = start / 4096UL; + + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj); + assert(obj_size >= 16); + uintptr_t end = start + obj_size; + uintptr_t last_page = (end - 1) / 4096UL; + long i, myself = STM_SEGMENT->segment_num; + + for (; first_page <= last_page; first_page++) { + + uintptr_t copy_size; + if (first_page == last_page) { + /* this is the final fragment */ + copy_size = end - start; + } + else { + /* this is a non-final fragment, going up to the + page's end */ + copy_size = 4096 - (start & 4095); + } + /* double-check that the result fits in one page */ + assert(copy_size > 0); + assert(copy_size + (start & 4095) <= 4096); + + /* First copy the object into the shared page, if needed */ + char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); + char *dst = REAL_ADDRESS(stm_object_pages, start); + if (is_private_page(myself, first_page)) { + if (copy_size == 4096) + pagecopy(dst, src); + else + memcpy(dst, src, copy_size); + } + else { + assert(memcmp(dst, src, copy_size) == 0); /* same page */ + } + + for (i = 1; i <= NB_SEGMENTS; i++) { + if (i == myself) + continue; + + /* src = REAL_ADDRESS(stm_object_pages, start); */ + dst = REAL_ADDRESS(get_segment_base(i), start); + if (is_private_page(i, first_page)) { + /* The page is a private page. We need to diffuse this + fragment of object from the shared page to this private + page. */ + if (copy_size == 4096) + pagecopy(dst, src); + else + memcpy(dst, src, copy_size); + } + else { + assert(!memcmp(dst, src, copy_size)); /* same page */ + } + } + + start = (start + 4096) & ~4095; + } +} + +static inline bool _has_private_page_in_range( + long seg_num, uintptr_t start, uintptr_t size) +{ + uintptr_t first_page = start / 4096UL; + uintptr_t last_page = (start + size) / 4096UL; + for (; first_page <= last_page; first_page++) + if (is_private_page(seg_num, first_page)) + return true; + return false; +} + +static void _card_wise_synchronize_object_now(object_t *obj) +{ + assert(obj_should_use_cards(obj)); + assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); + assert(!IS_OVERFLOW_OBJ(STM_PSEGMENT, obj)); + + struct object_s *realobj = (struct object_s *)REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + size_t obj_size = stmcb_size_rounded_up(realobj); + assert(obj_size >= 32); + + uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj); + uintptr_t card_index = 1; + uintptr_t last_card_index = get_index_to_card_index(obj_size - 1); /* max valid index */ + long i, myself = STM_SEGMENT->segment_num; + + /* simple heuristic to check if probably the whole object is + marked anyway so we should do page-wise synchronize */ + if (write_locks[first_card_index + 1] == CARD_MARKED_OLD + && write_locks[first_card_index + last_card_index] == CARD_MARKED_OLD + && write_locks[first_card_index + (last_card_index >> 1) + 1] == CARD_MARKED_OLD) { + + dprintf(("card_wise_sync assumes %p,size:%lu is fully marked\n", obj, obj_size)); + _reset_object_cards(get_priv_segment(STM_SEGMENT->segment_num), + obj, CARD_CLEAR, false); + _page_wise_synchronize_object_now(obj); + return; + } + + dprintf(("card_wise_sync syncs %p,size:%lu card-wise\n", obj, obj_size)); + + /* Combine multiple marked cards and do a memcpy for them. We don't + try yet to use page_copy() or otherwise take into account privatization + of pages (except _has_private_page_in_range) */ + uintptr_t offset_itemsize[2]; + bool all_cards_were_cleared = true; + + uintptr_t start_card_index = -1; + while (card_index <= last_card_index) { + uintptr_t card_lock_idx = first_card_index + card_index; + uint8_t card_value = write_locks[card_lock_idx]; + + if (card_value == CARD_MARKED_OLD) { + write_locks[card_lock_idx] = CARD_CLEAR; + + if (start_card_index == -1) { /* first marked card */ + start_card_index = card_index; + /* start = (uintptr_t)obj + stmcb_index_to_byte_offset( */ + /* realobj, get_card_index_to_index(card_index)); */ + if (all_cards_were_cleared) { + all_cards_were_cleared = false; + stmcb_get_card_base_itemsize(realobj, offset_itemsize); + } + } + } + else { + OPT_ASSERT(card_value == CARD_CLEAR); + } + + if (start_card_index != -1 /* something to copy */ + && (card_value != CARD_MARKED_OLD /* found non-marked card */ + || card_index == last_card_index)) { /* this is the last card */ + /* do the copying: */ + uintptr_t start, copy_size; + uintptr_t next_card_offset; + uintptr_t start_card_offset; + uintptr_t next_card_index = card_index; + + if (card_value == CARD_MARKED_OLD) { + /* card_index is the last card of the object, but we need + to go one further to get the right offset */ + next_card_index++; + } + + start_card_offset = offset_itemsize[0] + + get_card_index_to_index(start_card_index) * offset_itemsize[1]; + + next_card_offset = offset_itemsize[0] + + get_card_index_to_index(next_card_index) * offset_itemsize[1]; + + if (next_card_offset > obj_size) + next_card_offset = obj_size; + + start = (uintptr_t)obj + start_card_offset; + copy_size = next_card_offset - start_card_offset; + OPT_ASSERT(copy_size > 0); + + /* dprintf(("copy %lu bytes\n", copy_size)); */ + + /* since we have marked cards, at least one page here must be private */ + assert(_has_private_page_in_range(myself, start, copy_size)); + + /* copy to shared segment: */ + char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); + char *dst = REAL_ADDRESS(stm_object_pages, start); + memcpy(dst, src, copy_size); + + /* copy to other segments */ + for (i = 1; i <= NB_SEGMENTS; i++) { + if (i == myself) + continue; + if (!_has_private_page_in_range(i, start, copy_size)) + continue; + /* src = REAL_ADDRESS(stm_object_pages, start); */ + dst = REAL_ADDRESS(get_segment_base(i), start); + memcpy(dst, src, copy_size); + } + + start_card_index = -1; + } + + card_index++; + } + + if (all_cards_were_cleared) { + /* well, seems like we never called stm_write_card() on it, so actually + we need to fall back to synchronize the whole object */ + _page_wise_synchronize_object_now(obj); + return; + } + +#ifndef NDEBUG + char *src = REAL_ADDRESS(stm_object_pages, (uintptr_t)obj); + char *dst; + for (i = 1; i <= NB_SEGMENTS; i++) { + dst = REAL_ADDRESS(get_segment_base(i), (uintptr_t)obj); + assert(memcmp(dst, src, obj_size) == 0); + } +#endif +} + + +static void synchronize_object_now(object_t *obj, bool ignore_cards) { /* Copy around the version of 'obj' that lives in our own segment. It is first copied into the shared pages, and then into other @@ -367,72 +712,16 @@ assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); assert(STM_PSEGMENT->privatization_lock == 1); - uintptr_t start = (uintptr_t)obj; - uintptr_t first_page = start / 4096UL; + if (obj->stm_flags & GCFLAG_SMALL_UNIFORM) { + assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); + abort();//XXX WRITE THE FAST CASE + } else if (ignore_cards || !obj_should_use_cards(obj)) { + _page_wise_synchronize_object_now(obj); + } else { + _card_wise_synchronize_object_now(obj); + } - if (obj->stm_flags & GCFLAG_SMALL_UNIFORM) { - abort();//XXX WRITE THE FAST CASE - } - else { - char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj); - assert(obj_size >= 16); - uintptr_t end = start + obj_size; - uintptr_t last_page = (end - 1) / 4096UL; - long i, myself = STM_SEGMENT->segment_num; - - for (; first_page <= last_page; first_page++) { - - uintptr_t copy_size; - if (first_page == last_page) { - /* this is the final fragment */ - copy_size = end - start; - } - else { - /* this is a non-final fragment, going up to the - page's end */ - copy_size = 4096 - (start & 4095); - } - /* double-check that the result fits in one page */ - assert(copy_size > 0); - assert(copy_size + (start & 4095) <= 4096); - - /* First copy the object into the shared page, if needed */ - char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); - char *dst = REAL_ADDRESS(stm_object_pages, start); - if (is_private_page(myself, first_page)) { - if (copy_size == 4096) - pagecopy(dst, src); - else - memcpy(dst, src, copy_size); - } - else { - assert(memcmp(dst, src, copy_size) == 0); /* same page */ - } - - for (i = 1; i <= NB_SEGMENTS; i++) { - if (i == myself) - continue; - - src = REAL_ADDRESS(stm_object_pages, start); - dst = REAL_ADDRESS(get_segment_base(i), start); - if (is_private_page(i, first_page)) { - /* The page is a private page. We need to diffuse this - fragment of object from the shared page to this private - page. */ - if (copy_size == 4096) - pagecopy(dst, src); - else - memcpy(dst, src, copy_size); - } - else { - assert(!memcmp(dst, src, copy_size)); /* same page */ - } - } - - start = (start + 4096) & ~4095; - } - } + _cards_cleared_in_object(get_priv_segment(STM_SEGMENT->segment_num), obj); } static void push_overflow_objects_from_privatized_pages(void) @@ -442,7 +731,7 @@ acquire_privatization_lock(); LIST_FOREACH_R(STM_PSEGMENT->large_overflow_objects, object_t *, - synchronize_object_now(item)); + synchronize_object_now(item, true /*ignore_cards*/)); release_privatization_lock(); } @@ -466,7 +755,7 @@ /* copy the object to the shared page, and to the other private pages as needed */ - synchronize_object_now(item); + synchronize_object_now(item, false); /* don't ignore_cards */ })); release_privatization_lock(); @@ -483,7 +772,9 @@ STM_PSEGMENT->marker_inev[1] = 0; /* reset these lists to NULL for the next transaction */ + _verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num)); LIST_FREE(STM_PSEGMENT->objects_pointing_to_nursery); + list_clear(STM_PSEGMENT->old_objects_with_cards); LIST_FREE(STM_PSEGMENT->large_overflow_objects); timing_end_transaction(attribute_to); @@ -534,6 +825,7 @@ /* synchronize modified old objects to other threads */ push_modified_to_other_segments(); + _verify_cards_cleared_in_all_lists(get_priv_segment(STM_SEGMENT->segment_num)); /* update 'overflow_number' if needed */ if (STM_PSEGMENT->overflow_number_has_been_used) { @@ -593,6 +885,9 @@ ssize_t size = stmcb_size_rounded_up((struct object_s *)src); memcpy(dst, src, size); + if (obj_should_use_cards(item)) + _reset_object_cards(pseg, item, CARD_CLEAR, false); + /* objects in 'modified_old_objects' usually have the WRITE_BARRIER flag, unless they have been modified recently. Ignore the old flag; after copying from the @@ -621,6 +916,10 @@ static void abort_data_structures_from_segment_num(int segment_num) { +#pragma push_macro("STM_PSEGMENT") +#pragma push_macro("STM_SEGMENT") +#undef STM_PSEGMENT +#undef STM_SEGMENT /* This function clears the content of the given segment undergoing an abort. It is called from abort_with_mutex(), but also sometimes from other threads that figure out that this segment should abort. @@ -648,8 +947,27 @@ /* throw away the content of the nursery */ long bytes_in_nursery = throw_away_nursery(pseg); + /* modified_old_objects' cards get cleared in + reset_modified_from_other_segments. Objs in old_objs_with_cards but not + in modified_old_objs are overflow objects and handled here: */ + if (pseg->large_overflow_objects != NULL) { + /* some overflow objects may have cards when aborting, clear them too */ + LIST_FOREACH_R(pseg->large_overflow_objects, object_t * /*item*/, + { + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(pseg->pub.segment_base, item); + + if (realobj->stm_flags & GCFLAG_CARDS_SET) { + /* CARDS_SET is enough since other HAS_CARDS objs + are already cleared */ + _reset_object_cards(pseg, item, CARD_CLEAR, false); + } + }); + } + /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ reset_modified_from_other_segments(segment_num); + _verify_cards_cleared_in_all_lists(pseg); /* reset the tl->shadowstack and thread_local_obj to their original value before the transaction start */ @@ -662,8 +980,11 @@ /* reset these lists to NULL too on abort */ LIST_FREE(pseg->objects_pointing_to_nursery); + list_clear(pseg->old_objects_with_cards); LIST_FREE(pseg->large_overflow_objects); list_clear(pseg->young_weakrefs); +#pragma pop_macro("STM_SEGMENT") +#pragma pop_macro("STM_PSEGMENT") } static void abort_with_mutex(void) diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -14,7 +14,7 @@ #endif -#define NB_PAGES (1500*256) // 1500MB +#define NB_PAGES (2500*256) // 2500MB #define NB_SEGMENTS STM_NB_SEGMENTS #define NB_SEGMENTS_MAX 240 /* don't increase NB_SEGMENTS past this */ #define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) @@ -35,6 +35,8 @@ #define WRITELOCK_START ((END_NURSERY_PAGE * 4096UL) >> 4) #define WRITELOCK_END READMARKER_END +#define CARD_SIZE _STM_CARD_SIZE + enum /* stm_flags */ { /* This flag is set on non-nursery objects. It forces stm_write() to call _stm_write_slowpath(). @@ -54,6 +56,12 @@ after the object. */ GCFLAG_HAS_SHADOW = 0x04, + /* Set on objects that are large enough (_STM_MIN_CARD_OBJ_SIZE) + to have multiple cards (at least _STM_MIN_CARD_COUNT), and that + have at least one card marked. This flag implies + GCFLAG_WRITE_BARRIER. */ + GCFLAG_CARDS_SET = _STM_GCFLAG_CARDS_SET, + /* All remaining bits of the 32-bit 'stm_flags' field are taken by the "overflow number". This is a number that identifies the "overflow objects" from the current transaction among all old @@ -61,7 +69,7 @@ current transaction that have been flushed out of the nursery, which occurs if the same transaction allocates too many objects. */ - GCFLAG_OVERFLOW_NUMBER_bit0 = 0x8 /* must be last */ + GCFLAG_OVERFLOW_NUMBER_bit0 = 0x10 /* must be last */ }; @@ -96,6 +104,10 @@ understood as meaning implicitly "this is the same as 'modified_old_objects'". */ struct list_s *objects_pointing_to_nursery; + /* Like objects_pointing_to_nursery it holds the old objects that + we did a stm_write_card() on. Objects can be in both lists. + It is NULL iff objects_pointing_to_nursery is NULL. */ + struct list_s *old_objects_with_cards; /* List of all large, overflowed objects. Only non-NULL after the current transaction spanned a minor collection. */ @@ -212,9 +224,34 @@ static uint8_t write_locks[WRITELOCK_END - WRITELOCK_START]; +enum /* card values for write_locks */ { + CARD_CLEAR = 0, /* card not used at all */ + CARD_MARKED = _STM_CARD_MARKED, /* card marked for tracing in the next gc */ + CARD_MARKED_OLD = 101, /* card was marked before, but cleared + in a GC */ +}; + #define REAL_ADDRESS(segment_base, src) ((segment_base) + (uintptr_t)(src)) +#define IS_OVERFLOW_OBJ(pseg, obj) (((obj)->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) \ + == (pseg)->overflow_number) + +static inline uintptr_t get_index_to_card_index(uintptr_t index) { + return (index / CARD_SIZE) + 1; +} + +static inline uintptr_t get_card_index_to_index(uintptr_t card_index) { + return (card_index - 1) * CARD_SIZE; +} + + +static inline uintptr_t get_write_lock_idx(uintptr_t obj) { + uintptr_t res = (obj >> 4) - WRITELOCK_START; + assert(res < sizeof(write_locks)); + return res; +} + static inline char *get_segment_base(long segment_num) { return stm_object_pages + segment_num * (NB_PAGES * 4096UL); } @@ -257,7 +294,7 @@ } static void copy_object_to_shared(object_t *obj, int source_segment_num); -static void synchronize_object_now(object_t *obj); +static void synchronize_object_now(object_t *obj, bool ignore_cards); static inline void acquire_privatization_lock(void) { diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -166,7 +166,7 @@ static inline uintptr_t mark_loc(object_t *obj) { - uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - WRITELOCK_START; + uintptr_t lock_idx = get_write_lock_idx((uintptr_t)obj); assert(lock_idx < sizeof(write_locks)); return lock_idx; } @@ -440,6 +440,11 @@ static void clean_up_segment_lists(void) { +#pragma push_macro("STM_PSEGMENT") +#pragma push_macro("STM_SEGMENT") +#undef STM_PSEGMENT +#undef STM_SEGMENT + long i; for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pseg = get_priv_segment(i); @@ -450,21 +455,54 @@ written to but don't actually point to the nursery. Clear it up and set GCFLAG_WRITE_BARRIER again on the objects. This is the case for transactions where - MINOR_NOTHING_TO_DO() == false + MINOR_NOTHING_TO_DO() == true but they still did write-barriers on objects */ lst = pseg->objects_pointing_to_nursery; if (lst != NULL) { - LIST_FOREACH_R(lst, uintptr_t /*item*/, + LIST_FOREACH_R(lst, object_t* /*item*/, ({ struct object_s *realobj = (struct object_s *) - REAL_ADDRESS(pseg->pub.segment_base, item); + REAL_ADDRESS(pseg->pub.segment_base, (uintptr_t)item); + assert(!(realobj->stm_flags & GCFLAG_WRITE_BARRIER)); + OPT_ASSERT(!(realobj->stm_flags & GCFLAG_CARDS_SET)); + realobj->stm_flags |= GCFLAG_WRITE_BARRIER; + + if (realobj->stm_flags & GCFLAG_CARDS_SET) { + /* we called a normal WB on this object, so all cards + need to be marked OLD */ + if (!IS_OVERFLOW_OBJ(pseg, realobj)) { + _reset_object_cards(pseg, item, CARD_MARKED_OLD, true); /* mark all */ + } else { + /* simply clear overflow */ + _reset_object_cards(pseg, item, CARD_CLEAR, false); + } + } })); list_clear(lst); + } else { + /* if here MINOR_NOTHING_TO_DO() was true before, it's like + we "didn't do a collection" at all. So nothing to do on + modified_old_objs. */ } + lst = pseg->old_objects_with_cards; + LIST_FOREACH_R(lst, object_t* /*item*/, + ({ + struct object_s *realobj = (struct object_s *) + REAL_ADDRESS(pseg->pub.segment_base, item); + OPT_ASSERT(realobj->stm_flags & GCFLAG_CARDS_SET); + OPT_ASSERT(realobj->stm_flags & GCFLAG_WRITE_BARRIER); + + /* clear cards if overflow, or mark marked cards as old otherwise */ + uint8_t mark_value = IS_OVERFLOW_OBJ(pseg, realobj) ? + CARD_CLEAR : CARD_MARKED_OLD; + _reset_object_cards(pseg, item, mark_value, false); + })); + list_clear(lst); + /* Remove from 'large_overflow_objects' all objects that die */ lst = pseg->large_overflow_objects; if (lst != NULL) { @@ -477,6 +515,8 @@ } } } +#pragma pop_macro("STM_SEGMENT") +#pragma pop_macro("STM_PSEGMENT") } static inline bool largemalloc_keep_object_at(char *data) @@ -503,6 +543,20 @@ _stm_largemalloc_sweep(); } +static void assert_cleared_locks(size_t n) +{ +#ifndef NDEBUG + size_t i; + uint8_t *s = write_locks; +# ifndef STM_TESTS + if (n > 5000) n = 5000; +# endif + for (i = 0; i < n; i++) + assert(s[i] == CARD_CLEAR || s[i] == CARD_MARKED + || s[i] == CARD_MARKED_OLD); +#endif +} + static void clean_write_locks(void) { /* the write_locks array, containing the visit marker during @@ -512,7 +566,7 @@ object_t *loc2 = (object_t *)(uninitialized_page_stop - stm_object_pages); uintptr_t lock2_idx = mark_loc(loc2 - 1) + 1; - assert_memset_zero(write_locks, lock2_idx); + assert_cleared_locks(lock2_idx); memset(write_locks + lock2_idx, 0, sizeof(write_locks) - lock2_idx); } diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -40,6 +40,12 @@ return (obj->stm_flags & _STM_GCFLAG_WRITE_BARRIER) == 0; } + +bool _stm_was_written_card(object_t *obj) +{ + return obj->stm_flags & _STM_GCFLAG_CARDS_SET; +} + #ifdef STM_TESTS uintptr_t _stm_get_private_page(uintptr_t pagenum) { @@ -61,6 +67,13 @@ return list_count(STM_PSEGMENT->objects_pointing_to_nursery); } +long _stm_count_old_objects_with_cards(void) +{ + if (STM_PSEGMENT->old_objects_with_cards == NULL) + return -1; + return list_count(STM_PSEGMENT->old_objects_with_cards); +} + object_t *_stm_enum_modified_old_objects(long index) { return (object_t *)list_item( @@ -73,6 +86,12 @@ STM_PSEGMENT->objects_pointing_to_nursery, index); } +object_t *_stm_enum_old_objects_with_cards(long index) +{ + return (object_t *)list_item( + STM_PSEGMENT->old_objects_with_cards, index); +} + uint64_t _stm_total_allocated(void) { return increment_total_allocated(0); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -65,6 +65,8 @@ object_t *obj = *pobj; object_t *nobj; uintptr_t nobj_sync_now; + char *realobj; + size_t size; if (obj == NULL) return; @@ -75,8 +77,6 @@ to GCWORD_MOVED. In that case, the forwarding location, i.e. where the object moved to, is stored in the second word in 'obj'. */ object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)obj; - char *realobj; - size_t size; if (obj->stm_flags & GCFLAG_HAS_SHADOW) { /* ^^ the single check above detects both already-moved objects @@ -149,6 +149,7 @@ /* Must trace the object later */ LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, nobj_sync_now); + _cards_cleared_in_object(get_priv_segment(STM_SEGMENT->segment_num), nobj); } static void collect_roots_in_nursery(void) @@ -183,23 +184,192 @@ minor_trace_if_young(&tl->thread_local_obj); } +static void _cards_cleared_in_object(struct stm_priv_segment_info_s *pseg, object_t *obj) +{ +#ifndef NDEBUG + struct object_s *realobj = (struct object_s *)REAL_ADDRESS(pseg->pub.segment_base, obj); + size_t size = stmcb_size_rounded_up(realobj); + + if (size < _STM_MIN_CARD_OBJ_SIZE) + return; /* too small for cards */ + + uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj); + uintptr_t card_index = 1; + uintptr_t last_card_index = get_index_to_card_index(size - 1); /* max valid index */ + + OPT_ASSERT(write_locks[first_card_index] <= NB_SEGMENTS_MAX + || write_locks[first_card_index] == 255); /* see gcpage.c */ + while (card_index <= last_card_index) { + uintptr_t card_lock_idx = first_card_index + card_index; + assert(write_locks[card_lock_idx] == CARD_CLEAR); + card_index++; + } + + assert(!(realobj->stm_flags & GCFLAG_CARDS_SET)); +#endif +} + +static void _verify_cards_cleared_in_all_lists(struct stm_priv_segment_info_s *pseg) +{ +#ifndef NDEBUG + LIST_FOREACH_R( + pseg->modified_old_objects, object_t * /*item*/, + _cards_cleared_in_object(pseg, item)); + + if (pseg->large_overflow_objects) { + LIST_FOREACH_R( + pseg->large_overflow_objects, object_t * /*item*/, + _cards_cleared_in_object(pseg, item)); + } + if (pseg->objects_pointing_to_nursery) { + LIST_FOREACH_R( + pseg->objects_pointing_to_nursery, object_t * /*item*/, + _cards_cleared_in_object(pseg, item)); + } + LIST_FOREACH_R( + pseg->old_objects_with_cards, object_t * /*item*/, + _cards_cleared_in_object(pseg, item)); +#endif +} + +static void _reset_object_cards(struct stm_priv_segment_info_s *pseg, + object_t *obj, uint8_t mark_value, + bool mark_all) +{ +#pragma push_macro("STM_PSEGMENT") +#pragma push_macro("STM_SEGMENT") +#undef STM_PSEGMENT +#undef STM_SEGMENT + struct object_s *realobj = (struct object_s *)REAL_ADDRESS(pseg->pub.segment_base, obj); + size_t size = stmcb_size_rounded_up(realobj); + + OPT_ASSERT(size >= _STM_MIN_CARD_OBJ_SIZE); + assert(IMPLY(mark_value == CARD_CLEAR, !mark_all)); /* not necessary */ + assert(IMPLY(mark_all, mark_value == CARD_MARKED_OLD)); /* set *all* to OLD */ + assert(IMPLY(IS_OVERFLOW_OBJ(pseg, realobj), + mark_value == CARD_CLEAR)); /* overflows are always CLEARed */ + + uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj); + uintptr_t card_index = 1; + uintptr_t last_card_index = get_index_to_card_index(size - 1); /* max valid index */ + + OPT_ASSERT(write_locks[first_card_index] <= NB_SEGMENTS + || write_locks[first_card_index] == 255); /* see gcpage.c */ + + dprintf(("mark cards of %p, size %lu with %d, all: %d\n", + obj, size, mark_value, mark_all)); + dprintf(("obj has %lu cards\n", last_card_index)); + while (card_index <= last_card_index) { + uintptr_t card_lock_idx = first_card_index + card_index; + + if (mark_all || write_locks[card_lock_idx] != CARD_CLEAR) { + /* dprintf(("mark card %lu,wl:%lu of %p with %d\n", */ + /* card_index, card_lock_idx, obj, mark_value)); */ + write_locks[card_lock_idx] = mark_value; + } + card_index++; + } + + realobj->stm_flags &= ~GCFLAG_CARDS_SET; + +#pragma pop_macro("STM_SEGMENT") +#pragma pop_macro("STM_PSEGMENT") +} + + +static void _trace_card_object(object_t *obj) +{ + assert(!_is_in_nursery(obj)); + assert(obj->stm_flags & GCFLAG_CARDS_SET); + assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); + + dprintf(("_trace_card_object(%p)\n", obj)); + bool obj_is_overflow = IS_OVERFLOW_OBJ(STM_PSEGMENT, obj); + uint8_t mark_value = obj_is_overflow ? CARD_CLEAR : CARD_MARKED_OLD; + + struct object_s *realobj = (struct object_s *)REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + size_t size = stmcb_size_rounded_up(realobj); + + uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj); + uintptr_t card_index = 1; + uintptr_t last_card_index = get_index_to_card_index(size - 1); /* max valid index */ + + OPT_ASSERT(write_locks[first_card_index] <= NB_SEGMENTS_MAX + || write_locks[first_card_index] == 255); /* see gcpage.c */ + + /* XXX: merge ranges */ + while (card_index <= last_card_index) { + uintptr_t card_lock_idx = first_card_index + card_index; + if (write_locks[card_lock_idx] == CARD_MARKED) { + /* clear or set to old: */ + write_locks[card_lock_idx] = mark_value; + + uintptr_t start = get_card_index_to_index(card_index); + uintptr_t stop = get_card_index_to_index(card_index + 1); + + dprintf(("trace_cards on %p with start:%lu stop:%lu\n", + obj, start, stop)); + stmcb_trace_cards(realobj, &minor_trace_if_young, + start, stop); + + } + + /* all cards should be cleared on overflow objs */ + assert(IMPLY(obj_is_overflow, + write_locks[card_lock_idx] == CARD_CLEAR)); + + card_index++; + } + obj->stm_flags &= ~GCFLAG_CARDS_SET; +} + + + static inline void _collect_now(object_t *obj) { assert(!_is_young(obj)); + assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); - /* We must not have GCFLAG_WRITE_BARRIER so far. Add it now. */ - assert(!(obj->stm_flags & GCFLAG_WRITE_BARRIER)); - obj->stm_flags |= GCFLAG_WRITE_BARRIER; + dprintf(("_collect_now: %p\n", obj)); - /* Trace the 'obj' to replace pointers to nursery with pointers - outside the nursery, possibly forcing nursery objects out and - adding them to 'objects_pointing_to_nursery' as well. */ - char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); + if (!(obj->stm_flags & GCFLAG_WRITE_BARRIER)) { + /* Trace the 'obj' to replace pointers to nursery with pointers + outside the nursery, possibly forcing nursery objects out and + adding them to 'objects_pointing_to_nursery' as well. */ + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); + + obj->stm_flags |= GCFLAG_WRITE_BARRIER; + } + /* else traced in collect_cardrefs_to_nursery if necessary */ +} + + +static void collect_cardrefs_to_nursery(void) +{ + dprintf(("collect_cardrefs_to_nursery\n")); + struct list_s *lst = STM_PSEGMENT->old_objects_with_cards; + + while (!list_is_empty(lst)) { + object_t *obj = (object_t*)list_pop_item(lst); + + assert(!_is_young(obj)); + + if (!(obj->stm_flags & GCFLAG_CARDS_SET)) { + /* sometimes we remove the CARDS_SET in the WB slowpath, see core.c */ + continue; + } + + /* traces cards, clears marked cards or marks them old if necessary */ + _trace_card_object(obj); + + assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); + } } static void collect_oldrefs_to_nursery(void) { + dprintf(("collect_oldrefs_to_nursery\n")); struct list_s *lst = STM_PSEGMENT->objects_pointing_to_nursery; while (!list_is_empty(lst)) { @@ -207,6 +377,7 @@ object_t *obj = (object_t *)(obj_sync_now & ~FLAG_SYNC_LARGE); _collect_now(obj); + assert(!(obj->stm_flags & GCFLAG_CARDS_SET)); if (obj_sync_now & FLAG_SYNC_LARGE) { /* this was a large object. We must either synchronize the @@ -214,13 +385,15 @@ WRITE_BARRIER flag and traced into it to fix its content); or add the object to 'large_overflow_objects'. */ + struct stm_priv_segment_info_s *pseg = get_priv_segment(STM_SEGMENT->segment_num); if (STM_PSEGMENT->minor_collect_will_commit_now) { acquire_privatization_lock(); - synchronize_object_now(obj); + synchronize_object_now(obj, true); /* ignore cards! */ release_privatization_lock(); } else { LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); } + _cards_cleared_in_object(pseg, obj); } /* the list could have moved while appending */ @@ -230,12 +403,15 @@ static void collect_modified_old_objects(void) { - LIST_FOREACH_R(STM_PSEGMENT->modified_old_objects, object_t * /*item*/, - _collect_now(item)); + dprintf(("collect_modified_old_objects\n")); + LIST_FOREACH_R( + STM_PSEGMENT->modified_old_objects, object_t * /*item*/, + _collect_now(item)); } static void collect_roots_from_markers(uintptr_t num_old) { + dprintf(("collect_roots_from_markers\n")); /* visit the marker objects */ struct list_s *mlst = STM_PSEGMENT->modified_old_objects_markers; STM_PSEGMENT->modified_old_objects_markers_num_old = list_count(mlst); @@ -254,6 +430,11 @@ static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg) { +#pragma push_macro("STM_PSEGMENT") +#pragma push_macro("STM_SEGMENT") +#undef STM_PSEGMENT +#undef STM_SEGMENT + dprintf(("throw_away_nursery\n")); /* reset the nursery by zeroing it */ size_t nursery_used; char *realnursery; @@ -279,11 +460,13 @@ wlog_t *item; TREE_LOOP_FORWARD(*pseg->young_outside_nursery, item) { - assert(!_is_in_nursery((object_t *)item->addr)); + object_t *obj = (object_t*)item->addr; + assert(!_is_in_nursery(obj)); + /* mark slot as unread (it can only have the read marker in this segment) */ ((struct stm_read_marker_s *) - (pseg->pub.segment_base + (item->addr >> 4)))->rm = 0; + (pseg->pub.segment_base + (((uintptr_t)obj) >> 4)))->rm = 0; _stm_large_free(stm_object_pages + item->addr); } TREE_LOOP_END; @@ -292,7 +475,10 @@ } tree_clear(pseg->nursery_objects_shadows); + return nursery_used; +#pragma pop_macro("STM_SEGMENT") +#pragma pop_macro("STM_PSEGMENT") } #define MINOR_NOTHING_TO_DO(pseg) \ @@ -331,6 +517,7 @@ if (!commit && STM_PSEGMENT->large_overflow_objects == NULL) STM_PSEGMENT->large_overflow_objects = list_create(); + /* All the objects we move out of the nursery become "overflow" objects. We use the list 'objects_pointing_to_nursery' to hold the ones we didn't trace so far. */ @@ -338,6 +525,11 @@ if (STM_PSEGMENT->objects_pointing_to_nursery == NULL) { STM_PSEGMENT->objects_pointing_to_nursery = list_create(); + /* collect objs with cards, adds to objects_pointing_to_nursery + and makes sure there are no objs with cards left in + modified_old_objs */ + collect_cardrefs_to_nursery(); + /* See the doc of 'objects_pointing_to_nursery': if it is NULL, then it is implicitly understood to be equal to 'modified_old_objects'. We could copy modified_old_objects @@ -347,6 +539,7 @@ num_old = 0; } else { + collect_cardrefs_to_nursery(); num_old = STM_PSEGMENT->modified_old_objects_markers_num_old; } @@ -355,6 +548,7 @@ collect_roots_in_nursery(); collect_oldrefs_to_nursery(); + assert(list_is_empty(STM_PSEGMENT->old_objects_with_cards)); /* now all surviving nursery objects have been moved out */ stm_move_young_weakrefs(); @@ -428,6 +622,7 @@ char *result = allocate_outside_nursery_large(size_rounded_up); object_t *o = (object_t *)(result - stm_object_pages); + tree_insert(STM_PSEGMENT->young_outside_nursery, (uintptr_t)o, 0); memset(REAL_ADDRESS(STM_SEGMENT->segment_base, o), 0, size_rounded_up); @@ -529,6 +724,7 @@ memcpy(realnobj, realobj, size); obj->stm_flags |= GCFLAG_HAS_SHADOW; + tree_insert(STM_PSEGMENT->nursery_objects_shadows, (uintptr_t)obj, (uintptr_t)nobj); return nobj; diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -6,6 +6,10 @@ static uint32_t highest_overflow_number; +static void _cards_cleared_in_object(struct stm_priv_segment_info_s *pseg, object_t *obj); +static void _reset_object_cards(struct stm_priv_segment_info_s *pseg, + object_t *obj, uint8_t mark_value, + bool mark_all); static void minor_collection(bool commit); static void check_nursery_at_transaction_start(void); static size_t throw_away_nursery(struct stm_priv_segment_info_s *pseg); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -83,6 +83,7 @@ { /* Check that some values are acceptable */ assert(NB_SEGMENTS <= NB_SEGMENTS_MAX); + assert(CARD_SIZE >= 32 && CARD_SIZE % 16 == 0); assert(4096 <= ((uintptr_t)STM_SEGMENT)); assert((uintptr_t)STM_SEGMENT == (uintptr_t)STM_PSEGMENT); assert(((uintptr_t)STM_PSEGMENT) + sizeof(*STM_PSEGMENT) <= 8192); @@ -117,6 +118,7 @@ pr->pub.segment_num = i; pr->pub.segment_base = segment_base; pr->objects_pointing_to_nursery = NULL; + pr->old_objects_with_cards = list_create(); pr->large_overflow_objects = NULL; pr->modified_old_objects = list_create(); pr->modified_old_objects_markers = list_create(); @@ -156,6 +158,7 @@ for (i = 1; i <= NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pr = get_priv_segment(i); assert(pr->objects_pointing_to_nursery == NULL); + list_free(pr->old_objects_with_cards); assert(pr->large_overflow_objects == NULL); list_free(pr->modified_old_objects); list_free(pr->modified_old_objects_markers); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -107,6 +107,10 @@ /* this should use llvm's coldcc calling convention, but it's not exposed to C code so far */ void _stm_write_slowpath(object_t *); +void _stm_write_slowpath_card(object_t *, uintptr_t); +char _stm_write_slowpath_card_extra(object_t *); +long _stm_write_slowpath_card_extra_base(void); +#define _STM_CARD_MARKED 100 object_t *_stm_allocate_slowpath(ssize_t); object_t *_stm_allocate_external(ssize_t); void _stm_become_inevitable(const char*); @@ -120,6 +124,7 @@ #include bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); +bool _stm_was_written_card(object_t *obj); uintptr_t _stm_get_private_page(uintptr_t pagenum); bool _stm_in_transaction(stm_thread_local_t *tl); char *_stm_get_segment_base(long index); @@ -137,12 +142,18 @@ void _stm_set_nursery_free_count(uint64_t free_count); long _stm_count_modified_old_objects(void); long _stm_count_objects_pointing_to_nursery(void); +long _stm_count_old_objects_with_cards(void); object_t *_stm_enum_modified_old_objects(long index); object_t *_stm_enum_objects_pointing_to_nursery(long index); +object_t *_stm_enum_old_objects_with_cards(long index); uint64_t _stm_total_allocated(void); #endif #define _STM_GCFLAG_WRITE_BARRIER 0x01 +#define _STM_GCFLAG_CARDS_SET 0x08 +#define _STM_CARD_SIZE 32 /* must be >= 32 */ +#define _STM_MIN_CARD_COUNT 17 +#define _STM_MIN_CARD_OBJ_SIZE (_STM_CARD_SIZE * _STM_MIN_CARD_COUNT) #define _STM_NSE_SIGNAL_MAX _STM_TIME_N #define _STM_FAST_ALLOC (66*1024) @@ -213,6 +224,20 @@ _stm_write_slowpath(obj); } +/* The following is a GC-optimized barrier that works on the granularity + of CARD_SIZE. It can be used on any array object, but it is only + useful with those that were internally marked with GCFLAG_HAS_CARDS. + It has the same purpose as stm_write() for TM. + 'index' is the array-item-based position within the object, which + is measured in units returned by stmcb_get_card_base_itemsize(). +*/ +__attribute__((always_inline)) +static inline void stm_write_card(object_t *obj, uintptr_t index) +{ + if (UNLIKELY((obj->stm_flags & _STM_GCFLAG_WRITE_BARRIER) != 0)) + _stm_write_slowpath_card(obj, index); +} + /* Must be provided by the user of this library. The "size rounded up" must be a multiple of 8 and at least 16. "Tracing" an object means enumerating all GC references in it, @@ -223,6 +248,16 @@ */ extern ssize_t stmcb_size_rounded_up(struct object_s *); extern void stmcb_trace(struct object_s *, void (object_t **)); +/* a special trace-callback that is only called for the marked + ranges of indices (using stm_write_card(o, index)) */ +extern void stmcb_trace_cards(struct object_s *, void (object_t **), + uintptr_t start, uintptr_t stop); +/* this function will be called on objects that support cards. + It returns the base_offset (in bytes) inside the object from + where the indices start, and item_size (in bytes) for the size of + one item */ +extern void stmcb_get_card_base_itemsize(struct object_s *, + uintptr_t offset_itemsize[2]); extern void stmcb_commit_soon(void); @@ -248,6 +283,7 @@ return (object_t *)p; } + /* Allocate a weakref object. Weakref objects have a reference to an object at the byte-offset stmcb_size_rounded_up(obj) - sizeof(void*) diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -12,6 +12,7 @@ #define STM_NB_SEGMENTS ... #define _STM_FAST_ALLOC ... #define _STM_GCFLAG_WRITE_BARRIER ... +#define _STM_CARD_SIZE ... #define STM_STACK_MARKER_NEW ... #define STM_STACK_MARKER_OLD ... @@ -41,6 +42,9 @@ object_t *stm_allocate_weakref(ssize_t size_rounded_up); object_t *_stm_allocate_old(ssize_t size_rounded_up); +/*void stm_write_card(); use _checked_stm_write_card() instead */ + + void stm_setup(void); void stm_teardown(void); void stm_register_thread_local(stm_thread_local_t *tl); @@ -49,8 +53,10 @@ object_t *stm_setup_prebuilt_weakref(object_t *); bool _checked_stm_write(object_t *obj); +bool _checked_stm_write_card(object_t *obj, uintptr_t index); bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); +bool _stm_was_written_card(object_t *obj); char *_stm_real_address(object_t *obj); char *_stm_get_segment_base(long index); bool _stm_in_transaction(stm_thread_local_t *tl); @@ -92,8 +98,11 @@ long _stm_count_modified_old_objects(void); long _stm_count_objects_pointing_to_nursery(void); +long _stm_count_old_objects_with_cards(void); object_t *_stm_enum_modified_old_objects(long index); object_t *_stm_enum_objects_pointing_to_nursery(long index); +object_t *_stm_enum_old_objects_with_cards(long index); + void stm_collect(long level); uint64_t _stm_total_allocated(void); @@ -181,6 +190,10 @@ CHECKED(stm_write(object)); } +bool _checked_stm_write_card(object_t *object, uintptr_t index) { + CHECKED(stm_write_card(object, index)); +} + bool _check_stop_safe_point(void) { CHECKED(_stm_stop_safe_point()); } @@ -287,6 +300,36 @@ } } +void stmcb_trace_cards(struct object_s *obj, void visit(object_t **), + uintptr_t start, uintptr_t stop) +{ + int i; + struct myobj_s *myobj = (struct myobj_s*)obj; + if (myobj->type_id < 421420) { + /* basic case: no references */ + return; + } + + for (i=start; (i < myobj->type_id - 421420) && (i < stop); i++) { + object_t **ref = ((object_t **)(myobj + 1)) + i; + visit(ref); + } +} + +void stmcb_get_card_base_itemsize(struct object_s *obj, + uintptr_t offset_itemsize[2]) +{ + struct myobj_s *myobj = (struct myobj_s*)obj; + if (myobj->type_id < 421420) { + offset_itemsize[0] = SIZEOF_MYOBJ; + offset_itemsize[1] = 1; + } + else { + offset_itemsize[0] = sizeof(struct myobj_s); + offset_itemsize[1] = sizeof(object_t *); + } +} + void stm_push_marker(stm_thread_local_t *tl, uintptr_t onum, object_t *ob) { STM_PUSH_MARKER(*tl, onum, ob); @@ -323,6 +366,7 @@ HDR = lib.SIZEOF_MYOBJ assert HDR == 8 GCFLAG_WRITE_BARRIER = lib._STM_GCFLAG_WRITE_BARRIER +CARD_SIZE = lib._STM_CARD_SIZE # 16b at least NB_SEGMENTS = lib.STM_NB_SEGMENTS FAST_ALLOC = lib._STM_FAST_ALLOC @@ -371,22 +415,28 @@ lib._set_type_id(o, tid) return o -def stm_set_ref(obj, idx, ref): - stm_write(obj) +def stm_set_ref(obj, idx, ref, use_cards=False): + if use_cards: + stm_write_card(obj, idx) + else: + stm_write(obj) lib._set_ptr(obj, idx, ref) def stm_get_ref(obj, idx): stm_read(obj) return lib._get_ptr(obj, idx) -def stm_set_char(obj, c, offset=HDR): - stm_write(obj) +def stm_set_char(obj, c, offset=HDR, use_cards=False): assert HDR <= offset < stm_get_obj_size(obj) + if use_cards: + stm_write_card(obj, offset - HDR) + else: + stm_write(obj) stm_get_real_address(obj)[offset] = c def stm_get_char(obj, offset=HDR): + assert HDR <= offset < stm_get_obj_size(obj) stm_read(obj) - assert HDR <= offset < stm_get_obj_size(obj) return stm_get_real_address(obj)[offset] def stm_get_real_address(obj): @@ -395,16 +445,24 @@ def stm_read(o): lib.stm_read(o) + def stm_write(o): if lib._checked_stm_write(o): raise Conflict() +def stm_write_card(o, index): + if lib._checked_stm_write_card(o, index): + raise Conflict() + def stm_was_read(o): return lib._stm_was_read(o) def stm_was_written(o): return lib._stm_was_written(o) +def stm_was_written_card(o): + return lib._stm_was_written_card(o) + def stm_start_safe_point(): lib._stm_start_safe_point() @@ -445,6 +503,14 @@ return None return map(lib._stm_enum_objects_pointing_to_nursery, range(count)) +def old_objects_with_cards(): + count = lib._stm_count_old_objects_with_cards() + if count < 0: + return None + return map(lib._stm_enum_old_objects_with_cards, range(count)) + + + SHADOWSTACK_LENGTH = 1000 _keepalive = weakref.WeakKeyDictionary() diff --git a/c7/test/test_card_marking.py b/c7/test/test_card_marking.py new file mode 100644 --- /dev/null +++ b/c7/test/test_card_marking.py @@ -0,0 +1,223 @@ +from support import * +import py + + +class TestBasic(BaseTest): + + def _collect(self, kind): + if kind == 0: + stm_minor_collect() + elif kind == 1: + stm_major_collect() + elif kind == 2: + self.switch(1) + self.start_transaction() + stm_major_collect() + self.abort_transaction() + self.switch(0) + + def test_simple(self): + o = stm_allocate_old_refs(1024) + self.start_transaction() + stm_read(o) + stm_write(o) + self.commit_transaction() + + def test_simple2(self): + o = stm_allocate_old_refs(1024) + self.start_transaction() + stm_write_card(o, 5) + assert not stm_was_written(o) # don't remove GCFLAG_WRITE_BARRIER + assert stm_was_written_card(o) + self.commit_transaction() + + @py.test.mark.parametrize("k", range(3)) + def test_overflow(self, k): + self.start_transaction() + o = stm_allocate_refs(1024) + + self.push_root(o) + self._collect(k) + o = self.pop_root() + + stm_write_card(o, 5) + + assert o in old_objects_with_cards() + assert o not in modified_old_objects() # overflow object + assert o not in objects_pointing_to_nursery() + # don't remove GCFLAG_WB + assert not stm_was_written(o) + stm_write(o) + assert stm_was_written(o) + self.commit_transaction() + + def test_nursery(self): + o = stm_allocate_old_refs(200) + self.start_transaction() + p = stm_allocate(64) + stm_set_ref(o, 199, p, True) + + # without a write-barrier: + lib._set_ptr(o, 0, ffi.cast("object_t*", -1)) + + self.push_root(o) + stm_minor_collect() + o = self.pop_root() + + lib._set_ptr(o, 0, ffi.NULL) + + pn = stm_get_ref(o, 199) + assert not is_in_nursery(pn) + assert pn != p + + assert not stm_was_written(o) + stm_write_card(o, 2) + assert stm_was_written_card(o) + + # card cleared after last collection, + # so no retrace of index 199: + + # without a write-barrier: + lib._set_ptr(o, 199, ffi.cast("object_t*", -1)) + self.push_root(o) + stm_minor_collect() + o = self.pop_root() + + def test_nursery2(self): + o = stm_allocate_old_refs(200) + self.start_transaction() + p = stm_allocate(64) + d = stm_allocate(64) + e = stm_allocate(64) + stm_set_ref(o, 199, p, True) + stm_set_ref(o, 1, d, False) + lib._set_ptr(o, 100, e) # no barrier + + self.push_root(o) + stm_minor_collect() + o = self.pop_root() + + # stm_write in stm_set_ref made it trace everything + assert not is_in_nursery(stm_get_ref(o, 199)) + assert not is_in_nursery(stm_get_ref(o, 1)) + assert not is_in_nursery(stm_get_ref(o, 100)) + + def test_nursery3(self): + o = stm_allocate_old_refs(2000) + self.start_transaction() + stm_minor_collect() + + p = stm_allocate(64) + d = stm_allocate(64) + stm_set_ref(o, 1999, p, True) + stm_set_ref(o, 1, d, True) + + lib._set_ptr(o, 1000, ffi.cast("object_t*", -1)) + + assert not stm_was_written(o) + assert stm_was_written_card(o) + + self.push_root(o) + stm_minor_collect() + o = self.pop_root() + + assert not is_in_nursery(stm_get_ref(o, 1999)) + assert not is_in_nursery(stm_get_ref(o, 1)) + + + def test_abort_cleanup(self): + o = stm_allocate_old_refs(200) + self.start_transaction() + stm_minor_collect() + + p = stm_allocate_refs(64) + d = stm_allocate(64) + e = stm_allocate(64) + stm_set_ref(o, 199, p, True) + stm_set_ref(o, 1, d, True) + stm_set_ref(p, 1, e) + + self.abort_transaction() + + assert not modified_old_objects() + assert not objects_pointing_to_nursery() + assert not old_objects_with_cards() + + self.start_transaction() + d = stm_allocate(64) + e = stm_allocate(64) + lib._set_ptr(o, 199, d) # no barrier + stm_set_ref(o, 1, e, True) # card barrier + + self.push_root(o) + stm_minor_collect() + o = self.pop_root() + + assert not is_in_nursery(stm_get_ref(o, 1)) + assert is_in_nursery(stm_get_ref(o, 199)) # not traced + + @py.test.mark.parametrize("k", range(3)) + def test_major_gc(self, k): + o = stm_allocate_old_refs(200) + self.start_transaction() + p = stm_allocate(64) + stm_set_ref(o, 0, p, True) + + self.push_root(o) + stm_major_collect() + o = self.pop_root() + + stm_set_ref(o, 1, ffi.NULL, True) + p = stm_get_ref(o, 0) + assert stm_was_written_card(o) + + self.push_root(o) + self._collect(k) + o = self.pop_root() + + assert not stm_was_written_card(o) + assert stm_get_ref(o, 0) == p + self.commit_transaction() + + def test_synchronize_objs(self): + o = stm_allocate_old(1000+20*CARD_SIZE) + + self.start_transaction() + stm_set_char(o, 'a', 1000, False) + self.commit_transaction() + + self.switch(1) + + self.start_transaction() + stm_set_char(o, 'b', 1001, False) + assert stm_get_char(o, 1000) == 'a' + self.commit_transaction() + + self.switch(0) + + self.start_transaction() + assert stm_get_char(o, 1001) == 'b' + + stm_set_char(o, 'c', 1000, True) + stm_set_char(o, 'c', 1000+CARD_SIZE, True) + stm_set_char(o, 'c', 1000+CARD_SIZE*2, True) + stm_set_char(o, 'c', 1000+CARD_SIZE*3, True) + + stm_set_char(o, 'd', 1000+CARD_SIZE*10, True) + + stm_set_char(o, 'e', 1000+CARD_SIZE*12, True) + self.commit_transaction() + + self.switch(1) + + self.start_transaction() + assert stm_get_char(o, 1000) == 'c' + assert stm_get_char(o, 1000+CARD_SIZE) == 'c' + assert stm_get_char(o, 1000+CARD_SIZE*2) == 'c' + assert stm_get_char(o, 1000+CARD_SIZE*3) == 'c' + + assert stm_get_char(o, 1000+CARD_SIZE*10) == 'd' + + assert stm_get_char(o, 1000+CARD_SIZE*12) == 'e' + + self.commit_transaction() diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -36,17 +36,22 @@ # we win but cannot wait in tests... raise WriteWriteConflictNotTestable - if our_trs.inevitable: + if our_trs.start_time >= other_trs.start_time: + abort_other = False + else: + abort_other = True + + if other_trs.check_must_abort(): + abort_other = True + elif our_trs.inevitable: + abort_other = True + elif other_trs.inevitable: + abort_other = False + + if not abort_other: + our_trs.set_must_abort(objs_in_conflict) + else: other_trs.set_must_abort(objs_in_conflict) - elif other_trs.start_time < our_trs.start_time: - pass - elif not other_trs.inevitable: - other_trs.set_must_abort(objs_in_conflict) - - if not other_trs.check_must_abort(): - our_trs.set_must_abort(objs_in_conflict) - elif wait: - assert not our_trs.inevitable class TransactionState(object): @@ -375,7 +380,7 @@ thread_state.register_root(r) def op_allocate_ref(ex, global_state, thread_state): - num = str(global_state.rnd.randrange(1, 100)) + num = str(global_state.rnd.randrange(1, 1000)) r = global_state.get_new_root_name(True, num) thread_state.push_roots(ex) ex.do('%s = stm_allocate_refs(%s)' % (r, num)) @@ -410,6 +415,7 @@ r = thread_state.get_random_root() trs = thread_state.transaction_state is_ref = global_state.has_ref_type(r) + try_cards = global_state.rnd.randrange(1, 100) > 5# and False # # check for possible write-write conflict: was_written = False @@ -438,13 +444,13 @@ thread_state.abort_transaction() offset = global_state.get_root_size(r) + " - 1" if is_ref: - ex.do(raising_call(aborts, "stm_set_ref", r, offset, v)) + ex.do(raising_call(aborts, "stm_set_ref", r, offset, v, try_cards)) if not aborts: - ex.do(raising_call(False, "stm_set_ref", r, "0", v)) + ex.do(raising_call(False, "stm_set_ref", r, "0", v, try_cards)) else: - ex.do(raising_call(aborts, "stm_set_char", r, repr(chr(v)), offset)) + ex.do(raising_call(aborts, "stm_set_char", r, repr(chr(v)), offset, try_cards)) if not aborts: - ex.do(raising_call(False, "stm_set_char", r, repr(chr(v)), "HDR")) + ex.do(raising_call(False, "stm_set_char", r, repr(chr(v)), "HDR", try_cards)) def op_read(ex, global_state, thread_state): r = thread_state.get_random_root() From noreply at buildbot.pypy.org Sat Aug 9 16:14:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 16:14:12 +0200 (CEST) Subject: [pypy-commit] stmgc rewind_setjmp: A branch in which we use a custom setjmp() that remains valid even Message-ID: <20140809141412.E11821C0257@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: rewind_setjmp Changeset: r1275:0fd9e73b7c70 Date: 2014-08-09 14:31 +0200 http://bitbucket.org/pypy/stmgc/changeset/0fd9e73b7c70/ Log: A branch in which we use a custom setjmp() that remains valid even after we returned from the frame that calls it From noreply at buildbot.pypy.org Sat Aug 9 16:14:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 16:14:14 +0200 (CEST) Subject: [pypy-commit] stmgc rewind_setjmp: Adapt the API for rewind_jmpbuf. Message-ID: <20140809141414.069DB1C0257@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: rewind_setjmp Changeset: r1276:327596f0760e Date: 2014-08-09 16:04 +0200 http://bitbucket.org/pypy/stmgc/changeset/327596f0760e/ Log: Adapt the API for rewind_jmpbuf. diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c new file mode 100644 --- /dev/null +++ b/c7/stm/rewind_setjmp.c @@ -0,0 +1,109 @@ +#include "rewind_setjmp.h" +#include +#include +#include +#include + + +struct _rewind_jmp_moved_s { + struct _rewind_jmp_moved_s *next; + size_t size; +}; +#define RJM_HEADER sizeof(struct _rewind_jmp_moved_s) + +#ifndef RJBUF_CUSTOM_MALLOC +#define rj_malloc malloc +#define rj_free free +#else +void *rj_malloc(size_t); +void rj_free(void *); +#endif + + +static void copy_stack(rewind_jmp_thread *rjthread, char *base) +{ + char *stop = rjthread->head->frame_base; + assert(stop > base); + struct _rewind_jmp_moved_s *next = (struct _rewind_jmp_moved_s *) + rj_malloc(RJM_HEADER + (stop - base)); + assert(next != NULL); /* XXX out of memory */ + next->next = rjthread->moved_off; + next->size = stop - base; + memcpy(((char *)next) + RJM_HEADER, base, stop - base); + + rjthread->moved_off_base = stop; + rjthread->moved_off = next; +} + +__attribute__((noinline)) +int rewind_jmp_setjmp(rewind_jmp_thread *rjthread) +{ + if (rjthread->moved_off) { + _rewind_jmp_free_stack_slices(rjthread); + } + rewind_jmp_thread *volatile rjthread1 = rjthread; + int result; + if (__builtin_setjmp(rjthread->jmpbuf) == 0) { + rjthread = rjthread1; + rjthread->initial_head = rjthread->head; + result = 0; + } + else { + rjthread = rjthread1; + rjthread->head = rjthread->initial_head; + result = 1; + } + copy_stack(rjthread, (char *)&rjthread1); + return result; +} + +__attribute__((noinline)) +static void do_longjmp(rewind_jmp_thread *rjthread, char *stack_free) +{ + assert(rjthread->moved_off_base != NULL); + + while (rjthread->moved_off) { + struct _rewind_jmp_moved_s *p = rjthread->moved_off; + char *target = rjthread->moved_off_base; + target -= p->size; + if (target < stack_free) { + /* need more stack space! */ + do_longjmp(rjthread, alloca(stack_free - target)); + } + memcpy(target, ((char *)p) + RJM_HEADER, p->size); + rjthread->moved_off_base = target; + rjthread->moved_off = p->next; + rj_free(p); + } + __builtin_longjmp(rjthread->jmpbuf, 1); +} + +void rewind_jmp_longjmp(rewind_jmp_thread *rjthread) +{ + char _rewind_jmp_marker; + do_longjmp(rjthread, &_rewind_jmp_marker); +} + +__attribute__((noinline)) +void _rewind_jmp_copy_stack_slice(rewind_jmp_thread *rjthread) +{ + if (rjthread->head == NULL) { + _rewind_jmp_free_stack_slices(rjthread); + return; + } + assert(rjthread->moved_off_base < (char *)rjthread->head); + copy_stack(rjthread, rjthread->moved_off_base); +} + +void _rewind_jmp_free_stack_slices(rewind_jmp_thread *rjthread) +{ + struct _rewind_jmp_moved_s *p = rjthread->moved_off; + struct _rewind_jmp_moved_s *pnext; + while (p) { + pnext = p->next; + rj_free(p); + p = pnext; + } + rjthread->moved_off = NULL; + rjthread->moved_off_base = NULL; +} diff --git a/c7/stm/rewind_setjmp.h b/c7/stm/rewind_setjmp.h new file mode 100644 --- /dev/null +++ b/c7/stm/rewind_setjmp.h @@ -0,0 +1,79 @@ +#ifndef _REWIND_SETJMP_H_ +#define _REWIND_SETJMP_H_ + +/************************************************************ + + : : ^^^^^ + |-------------------| older frames in the stack + | prev=0 | + ,---> | rewind_jmp_buf | + | |-------------------| + | | | + | : : + | : : + | | | + | |-------------------| + `---------prev | + ,----> | rewind_jmp_buf | + | +-------------------| + | | | + | : : + | | | + | |-------------------| + `----------prev | + ,---> | rewind_jmp_buf | <--------------- MOVED_OFF_BASE + | |---------------- +-------------+ + | | | STACK COPY | + | | : : + | : | size | + | | | next | <---- MOVED_OFF + | | +---|------ +-------------+ + | | | | | STACK COPY | + | |-------------------| | : (SEQUEL) : + `---------prev | | : : +HEAD-----> | rewind_jmp_buf | | | | + |-------------------| | | size | + `------> | next=0 | + +-------------+ + + +************************************************************/ + +typedef struct _rewind_jmp_buf { + char *frame_base; + struct _rewind_jmp_buf *prev; +} rewind_jmp_buf; + +typedef struct { + rewind_jmp_buf *head; + rewind_jmp_buf *initial_head; + char *moved_off_base; + struct _rewind_jmp_moved_s *moved_off; + void *jmpbuf[5]; +} rewind_jmp_thread; + + +#define rewind_jmp_enterframe(rjthread, rjbuf) do { \ + (rjbuf)->frame_base = __builtin_frame_address(0); \ + (rjbuf)->prev = (rjthread)->head; \ + (rjthread)->head = (rjbuf); \ +} while (0) + +#define rewind_jmp_leaveframe(rjthread, rjbuf) do { \ + (rjthread)->head = (rjbuf)->prev; \ + if ((rjbuf)->frame_base == (rjthread)->moved_off_base) \ + _rewind_jmp_copy_stack_slice(rjthread); \ +} while (0) + +int rewind_jmp_setjmp(rewind_jmp_thread *rjthread); +void rewind_jmp_longjmp(rewind_jmp_thread *rjthread); + +#define rewind_jmp_forget(rjthread) do { \ + if ((rjthread)->moved_off) _rewind_jmp_free_stack_slices(rjthread); \ + (rjthread)->moved_off_base = 0; \ +} while (0) + +void _rewind_jmp_copy_stack_slice(rewind_jmp_thread *); +void _rewind_jmp_free_stack_slices(rewind_jmp_thread *); + +#endif diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -13,6 +13,8 @@ #include #include +#include "stm/rewind_setjmp.h" + #if LONG_MAX == 2147483647 # error "Requires a 64-bit environment" #endif @@ -25,7 +27,6 @@ typedef TLPREFIX struct stm_read_marker_s stm_read_marker_t; typedef TLPREFIX struct stm_creation_marker_s stm_creation_marker_t; typedef TLPREFIX char stm_char; -typedef void* stm_jmpbuf_t[5]; /* for use with __builtin_setjmp() */ struct stm_read_marker_s { /* In every segment, every object has a corresponding read marker. @@ -44,7 +45,6 @@ stm_char *nursery_current; uintptr_t nursery_end; struct stm_thread_local_s *running_thread; - stm_jmpbuf_t *jmpbuf_ptr; }; #define STM_SEGMENT ((stm_segment_info_t *)4352) @@ -77,6 +77,7 @@ #define _STM_MARKER_LEN 80 typedef struct stm_thread_local_s { + rewind_jmp_thread rjthread; /* every thread should handle the shadow stack itself */ struct stm_shadowentry_s *shadowstack, *shadowstack_base; /* a generic optional thread-local object */ @@ -114,7 +115,6 @@ object_t *_stm_allocate_slowpath(ssize_t); object_t *_stm_allocate_external(ssize_t); void _stm_become_inevitable(const char*); -void _stm_start_transaction(stm_thread_local_t *, stm_jmpbuf_t *); void _stm_collectable_safe_point(void); /* for tests, but also used in duhton: */ @@ -326,39 +326,38 @@ void stm_register_thread_local(stm_thread_local_t *tl); void stm_unregister_thread_local(stm_thread_local_t *tl); +/* At some key places, like the entry point of the thread and in the + function with the interpreter's dispatch loop, you need to declare + a local variable of type 'rewind_jmp_buf' and call these macros. */ +#define stm_rewind_jmp_enterframe(tl, rjbuf) \ + rewind_jmp_enterframe(&(tl)->rjthread, rjbuf) +#define stm_rewind_jmp_leaveframe(tl, rjbuf) \ + rewind_jmp_leaveframe(&(tl)->rjthread, rjbuf) + /* Starting and ending transactions. stm_read(), stm_write() and stm_allocate() should only be called from within a transaction. - Use the macro STM_START_TRANSACTION() to start a transaction that - can be restarted using the 'jmpbuf' (a local variable of type - stm_jmpbuf_t). */ -#define STM_START_TRANSACTION(tl, jmpbuf) ({ \ - while (__builtin_setjmp(jmpbuf) == 1) { /*redo setjmp*/ } \ - _stm_start_transaction(tl, &jmpbuf); \ -}) - -/* Start an inevitable transaction, if it's going to return from the - current function immediately. */ -static inline void stm_start_inevitable_transaction(stm_thread_local_t *tl) { - _stm_start_transaction(tl, NULL); -} - -/* Commit a transaction. */ + The stm_start_transaction() call returns the number of times it + returned, starting at 0. If it is > 0, then the transaction was + aborted and restarted this number of times. */ +long stm_start_transaction(stm_thread_local_t *tl); void stm_commit_transaction(void); -/* Abort the currently running transaction. */ +/* Abort the currently running transaction. This function never + returns: it jumps back to the stm_start_transaction(). */ void stm_abort_transaction(void) __attribute__((noreturn)); -/* Turn the current transaction inevitable. The 'jmpbuf' passed to - STM_START_TRANSACTION() is not going to be used any more after - this call (but the stm_become_inevitable() itself may still abort). */ +/* Turn the current transaction inevitable. + The stm_become_inevitable() itself may still abort. */ static inline void stm_become_inevitable(stm_thread_local_t *tl, const char* msg) { + abort();/* XXX assert(STM_SEGMENT->running_thread == tl); if (STM_SEGMENT->jmpbuf_ptr != NULL) - _stm_become_inevitable(msg); + _stm_become_inevitable(msg);*/ } static inline int stm_is_inevitable(void) { - return (STM_SEGMENT->jmpbuf_ptr == NULL); + return 0; /* XXX + return (STM_SEGMENT->jmpbuf_ptr == NULL); */ } /* Forces a safe-point if needed. Normally not needed: this is From noreply at buildbot.pypy.org Sat Aug 9 16:14:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 16:14:15 +0200 (CEST) Subject: [pypy-commit] stmgc rewind_setjmp: Import and adapt the test file Message-ID: <20140809141415.099921C0257@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: rewind_setjmp Changeset: r1277:6659f9eaeeb5 Date: 2014-08-09 16:14 +0200 http://bitbucket.org/pypy/stmgc/changeset/6659f9eaeeb5/ Log: Import and adapt the test file diff --git a/c7/test/test_rewind.c b/c7/test/test_rewind.c new file mode 100644 --- /dev/null +++ b/c7/test/test_rewind.c @@ -0,0 +1,198 @@ +#include +#include +#include +#include +#include "rewind_setjmp.h" + + +rewind_jmp_thread gthread; +int gevents[1000]; +int num_gevents = 0; + +void gevent(int num) +{ + assert(num_gevents <= sizeof(gevents) / sizeof(int)); + gevents[num_gevents++] = num; +} + +void check_gevents(int expected[], int expected_size) +{ + int i; + int expected_count = expected_size / sizeof(int); + for (i = 0; i < expected_count && i < num_gevents; i++) { + assert(gevents[i] == expected[i]); + } + assert(num_gevents == expected_count); +} + +#define CHECK(expected) check_gevents(expected, sizeof(expected)) + +/************************************************************/ + +__attribute__((noinline)) +void f1(int x) +{ + gevent(1); + if (x < 10) { + rewind_jmp_longjmp(>hread); + } +} + +static int test1_x; + +void test1(void) +{ + rewind_jmp_buf buf; + rewind_jmp_enterframe(>hread, &buf); + + test1_x = 0; + rewind_jmp_setjmp(>hread); + + test1_x++; + f1(test1_x); + + assert(test1_x == 10); + int expected[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; + CHECK(expected); + + rewind_jmp_leaveframe(>hread, &buf); +} + +/************************************************************/ + +static int test2_x; + +__attribute__((noinline)) +int f2(void) +{ + rewind_jmp_buf buf; + rewind_jmp_enterframe(>hread, &buf); + test2_x = 0; + rewind_jmp_setjmp(>hread); + rewind_jmp_leaveframe(>hread, &buf); + return ++test2_x; +} + +void test2(void) +{ + rewind_jmp_buf buf; + rewind_jmp_enterframe(>hread, &buf); + int x = f2(); + gevent(x); + if (x < 10) + rewind_jmp_longjmp(>hread); + rewind_jmp_leaveframe(>hread, &buf); + int expected[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + CHECK(expected); +} + +/************************************************************/ + +__attribute__((noinline)) +int f3(int rec) +{ + if (rec > 0) + return f3(rec - 1); + else + return f2(); +} + +void test3(void) +{ + rewind_jmp_buf buf; + rewind_jmp_enterframe(>hread, &buf); + int x = f3(50); + gevent(x); + if (x < 10) + rewind_jmp_longjmp(>hread); + rewind_jmp_leaveframe(>hread, &buf); + int expected[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + CHECK(expected); +} + +/************************************************************/ + +__attribute__((noinline)) +int f4(int rec) +{ + rewind_jmp_buf buf; + rewind_jmp_enterframe(>hread, &buf); + int res; + if (rec > 0) + res = f4(rec - 1); + else + res = f2(); + rewind_jmp_leaveframe(>hread, &buf); + return res; +} + +void test4(void) +{ + rewind_jmp_buf buf; + rewind_jmp_enterframe(>hread, &buf); + int x = f4(5); + gevent(x); + if (x < 10) + rewind_jmp_longjmp(>hread); + rewind_jmp_leaveframe(>hread, &buf); + int expected[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + CHECK(expected); +} + +/************************************************************/ + +void test5(void) +{ + struct { int a; rewind_jmp_buf buf; int b; } sbuf; + rewind_jmp_enterframe(>hread, &sbuf.buf); + sbuf.a = 42; + sbuf.b = -42; + test2_x = 0; + rewind_jmp_setjmp(>hread); + sbuf.a++; + sbuf.b--; + gevent(sbuf.a); + gevent(sbuf.b); + if (test2_x == 0) { + test2_x++; + rewind_jmp_longjmp(>hread); + } + int expected[] = {43, -43, 43, -43}; + CHECK(expected); + rewind_jmp_leaveframe(>hread, &sbuf.buf); +} + +/************************************************************/ + +int rj_malloc_count = 0; + +void *rj_malloc(size_t size) +{ + rj_malloc_count++; + void *ptr = malloc(size); + fprintf(stderr, "malloc(%ld) -> %p\n", (long)size, ptr); + return ptr; +} + +void rj_free(void *ptr) +{ + if (ptr) + rj_malloc_count--; + fprintf(stderr, "free(%p)\n", ptr); + free(ptr); +} + + +int main(int argc, char *argv[]) +{ + assert(argc > 1); + if (!strcmp(argv[1], "1")) test1(); + else if (!strcmp(argv[1], "2")) test2(); + else if (!strcmp(argv[1], "3")) test3(); + else if (!strcmp(argv[1], "4")) test4(); + else if (!strcmp(argv[1], "5")) test5(); + else + assert(!"bad argv[1]"); + assert(rj_malloc_count == 0); + return 0; +} diff --git a/c7/test/test_rewind.py b/c7/test/test_rewind.py new file mode 100644 --- /dev/null +++ b/c7/test/test_rewind.py @@ -0,0 +1,19 @@ +import os + +def run_test(opt): + err = os.system("clang -g -O%d -Werror -DRJBUF_CUSTOM_MALLOC -I../stm" + " -o test_rewind_O%d test_rewind.c ../stm/rewind_setjmp.c" + % (opt, opt)) + if err != 0: + raise OSError("clang failed on test_rewind.c") + for testnum in [1, 2, 3, 4, 5]: + print '=== O%d: RUNNING TEST %d ===' % (opt, testnum) + err = os.system("./test_rewind_O%d %d" % (opt, testnum)) + if err != 0: + raise OSError("'test_rewind_O%d %d' failed" % (opt, testnum)) + os.unlink("./test_rewind_O%d" % (opt,)) + +def test_O0(): run_test(0) +def test_O1(): run_test(1) +def test_O2(): run_test(2) +def test_O3(): run_test(3) From noreply at buildbot.pypy.org Sat Aug 9 17:05:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 17:05:59 +0200 (CEST) Subject: [pypy-commit] stmgc rewind_setjmp: fixing tests, in-progress Message-ID: <20140809150559.0DE301D361D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: rewind_setjmp Changeset: r1278:5aef14b3bb77 Date: 2014-08-09 17:06 +0200 http://bitbucket.org/pypy/stmgc/changeset/5aef14b3bb77/ Log: fixing tests, in-progress diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -324,14 +324,14 @@ STM_SEGMENT->transaction_read_version = 1; } -void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf) +static void _stm_start_transaction(stm_thread_local_t *tl, bool inevitable) { assert(!_stm_in_transaction(tl)); s_mutex_lock(); retry: - if (jmpbuf == NULL) { + if (inevitable) { wait_for_end_of_inevitable_transaction(tl); } @@ -346,11 +346,9 @@ STM_PSEGMENT->signalled_to_commit_soon = false; STM_PSEGMENT->safe_point = SP_RUNNING; STM_PSEGMENT->marker_inev[1] = 0; - if (jmpbuf == NULL) + if (inevitable) marker_fetch_inev(); - STM_PSEGMENT->transaction_state = (jmpbuf != NULL ? TS_REGULAR - : TS_INEVITABLE); - STM_SEGMENT->jmpbuf_ptr = jmpbuf; + STM_PSEGMENT->transaction_state = (inevitable ? TS_INEVITABLE : TS_REGULAR); #ifndef NDEBUG STM_PSEGMENT->running_pthread = pthread_self(); #endif @@ -390,6 +388,22 @@ check_nursery_at_transaction_start(); } +long stm_start_transaction(stm_thread_local_t *tl) +{ +#ifdef STM_NO_AUTOMATIC_SETJMP + long repeat_count = 0; /* test/support.py */ +#else + long repeat_count = rewind_jmp_setjmp(&tl->rjthread); +#endif + _stm_start_transaction(tl, false); + return repeat_count; +} + +void stm_start_inevitable_transaction(stm_thread_local_t *tl) +{ + _stm_start_transaction(tl, true); +} + /************************************************************/ @@ -814,7 +828,7 @@ dprintf(("commit_transaction\n")); assert(STM_SEGMENT->nursery_end == NURSERY_END); - STM_SEGMENT->jmpbuf_ptr = NULL; + rewind_jmp_forget(&STM_SEGMENT->running_thread->rjthread); /* if a major collection is required, do it here */ if (is_major_collection_requested()) { @@ -987,6 +1001,10 @@ #pragma pop_macro("STM_PSEGMENT") } +#ifdef STM_NO_AUTOMATIC_SETJMP +void _test_run_abort(stm_thread_local_t *tl) __attribute__((noreturn)); +#endif + static void abort_with_mutex(void) { assert(_has_mutex()); @@ -996,10 +1014,9 @@ abort_data_structures_from_segment_num(STM_SEGMENT->segment_num); - stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; + stm_thread_local_t *tl = STM_SEGMENT->running_thread; /* clear memory registered on the thread-local */ - stm_thread_local_t *tl = STM_SEGMENT->running_thread; if (tl->mem_clear_on_abort) memset(tl->mem_clear_on_abort, 0, tl->mem_bytes_to_clear_on_abort); @@ -1035,9 +1052,11 @@ */ usleep(1); - assert(jmpbuf_ptr != NULL); - assert(jmpbuf_ptr != (stm_jmpbuf_t *)-1); /* for tests only */ - __builtin_longjmp(*jmpbuf_ptr, 1); +#ifdef STM_NO_AUTOMATIC_SETJMP + _test_run_abort(tl); +#else + rewind_jmp_longjmp(&tl->rjthread); +#endif } void _stm_become_inevitable(const char *msg) @@ -1051,12 +1070,11 @@ marker_fetch_inev(); wait_for_end_of_inevitable_transaction(NULL); STM_PSEGMENT->transaction_state = TS_INEVITABLE; - STM_SEGMENT->jmpbuf_ptr = NULL; + rewind_jmp_forget(&STM_SEGMENT->running_thread->rjthread); clear_callbacks_on_abort(); } else { assert(STM_PSEGMENT->transaction_state == TS_INEVITABLE); - assert(STM_SEGMENT->jmpbuf_ptr == NULL); } s_mutex_unlock(); diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -3,6 +3,9 @@ #endif + +#if 0 + /* XXX this is currently not doing copy-on-write, but simply forces a copy of all pages as soon as fork() is called. */ @@ -299,3 +302,7 @@ fork_support_ready = true; } } +#endif +static void setup_forksupport(void) { + if (0) _page_do_reshare(0, 0); +} diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c --- a/c7/stm/rewind_setjmp.c +++ b/c7/stm/rewind_setjmp.c @@ -36,7 +36,7 @@ } __attribute__((noinline)) -int rewind_jmp_setjmp(rewind_jmp_thread *rjthread) +long rewind_jmp_setjmp(rewind_jmp_thread *rjthread) { if (rjthread->moved_off) { _rewind_jmp_free_stack_slices(rjthread); @@ -51,13 +51,14 @@ else { rjthread = rjthread1; rjthread->head = rjthread->initial_head; - result = 1; + result = rjthread->repeat_count + 1; } + rjthread->repeat_count = result; copy_stack(rjthread, (char *)&rjthread1); return result; } -__attribute__((noinline)) +__attribute__((noinline, noreturn)) static void do_longjmp(rewind_jmp_thread *rjthread, char *stack_free) { assert(rjthread->moved_off_base != NULL); @@ -78,6 +79,7 @@ __builtin_longjmp(rjthread->jmpbuf, 1); } +__attribute__((noreturn)) void rewind_jmp_longjmp(rewind_jmp_thread *rjthread) { char _rewind_jmp_marker; diff --git a/c7/stm/rewind_setjmp.h b/c7/stm/rewind_setjmp.h --- a/c7/stm/rewind_setjmp.h +++ b/c7/stm/rewind_setjmp.h @@ -50,6 +50,7 @@ char *moved_off_base; struct _rewind_jmp_moved_s *moved_off; void *jmpbuf[5]; + long repeat_count; } rewind_jmp_thread; @@ -65,8 +66,8 @@ _rewind_jmp_copy_stack_slice(rjthread); \ } while (0) -int rewind_jmp_setjmp(rewind_jmp_thread *rjthread); -void rewind_jmp_longjmp(rewind_jmp_thread *rjthread); +long rewind_jmp_setjmp(rewind_jmp_thread *rjthread); +void rewind_jmp_longjmp(rewind_jmp_thread *rjthread) __attribute__((noreturn)); #define rewind_jmp_forget(rjthread) do { \ if ((rjthread)->moved_off) _rewind_jmp_free_stack_slices(rjthread); \ diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -36,3 +36,4 @@ #include "stm/weakref.c" #include "stm/timing.c" #include "stm/marker.c" +#include "stm/rewind_setjmp.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -77,9 +77,10 @@ #define _STM_MARKER_LEN 80 typedef struct stm_thread_local_s { - rewind_jmp_thread rjthread; /* every thread should handle the shadow stack itself */ struct stm_shadowentry_s *shadowstack, *shadowstack_base; + /* rewind_setjmp's interface */ + rewind_jmp_thread rjthread; /* a generic optional thread-local object */ object_t *thread_local_obj; /* in case this thread runs a transaction that aborts, @@ -340,6 +341,7 @@ returned, starting at 0. If it is > 0, then the transaction was aborted and restarted this number of times. */ long stm_start_transaction(stm_thread_local_t *tl); +void stm_start_inevitable_transaction(stm_thread_local_t *tl); void stm_commit_transaction(void); /* Abort the currently running transaction. This function never @@ -350,7 +352,7 @@ The stm_become_inevitable() itself may still abort. */ static inline void stm_become_inevitable(stm_thread_local_t *tl, const char* msg) { - abort();/* XXX + assert(0);/* XXX assert(STM_SEGMENT->running_thread == tl); if (STM_SEGMENT->jmpbuf_ptr != NULL) _stm_become_inevitable(msg);*/ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -7,7 +7,6 @@ ffi = cffi.FFI() ffi.cdef(""" typedef ... object_t; -typedef ... stm_jmpbuf_t; #define SIZEOF_MYOBJ ... #define STM_NB_SEGMENTS ... #define _STM_FAST_ALLOC ... @@ -64,7 +63,8 @@ uintptr_t _stm_get_private_page(uintptr_t pagenum); int _stm_get_flags(object_t *obj); -void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf); +void clear_jmpbuf(stm_thread_local_t *tl); +long stm_start_transaction(stm_thread_local_t *tl); bool _check_commit_transaction(void); bool _check_abort_transaction(void); bool _check_become_inevitable(stm_thread_local_t *tl); @@ -148,7 +148,7 @@ GC_N_SMALL_REQUESTS = 36 # from gcpage.c LARGE_MALLOC_OVERHEAD = 16 # from largemalloc.h -lib = ffi.verify(''' +lib = ffi.verify(r''' #include #include #include @@ -167,23 +167,26 @@ return obj->stm_flags; } +void clear_jmpbuf(stm_thread_local_t *tl) { + memset(&tl->rjthread, 0, sizeof(rewind_jmp_thread)); +} + +__attribute__((noreturn)) +void _test_run_abort(stm_thread_local_t *tl) { + void **jmpbuf = tl->rjthread.jmpbuf; + fprintf(stderr, "~~~~~ ABORT ~~~~~\n"); + __builtin_longjmp(jmpbuf, 1); +} + #define CHECKED(CALL) \ - stm_jmpbuf_t here; \ - stm_segment_info_t *segment = STM_SEGMENT; \ - if (__builtin_setjmp(here) == 0) { /* returned directly */ \ - if (segment->jmpbuf_ptr != NULL) { \ - assert(segment->jmpbuf_ptr == (stm_jmpbuf_t *)-1); \ - segment->jmpbuf_ptr = &here; \ - } \ + stm_thread_local_t *_tl = STM_SEGMENT->running_thread; \ + void **jmpbuf = _tl->rjthread.jmpbuf; \ + if (__builtin_setjmp(jmpbuf) == 0) { /* returned directly */\ CALL; \ - if (segment->jmpbuf_ptr != NULL) { \ - segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; \ - } \ + clear_jmpbuf(_tl); \ return 0; \ } \ - if (segment->jmpbuf_ptr != NULL) { \ - segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; \ - } \ + clear_jmpbuf(_tl); \ return 1 bool _checked_stm_write(object_t *object) { @@ -350,6 +353,7 @@ } ''', sources=source_files, define_macros=[('STM_TESTS', '1'), + ('STM_NO_AUTOMATIC_SETJMP', '1'), ('STM_LARGEMALLOC_TEST', '1'), ('STM_NO_COND_WAIT', '1'), ('STM_DEBUGPRINT', '1'), @@ -559,7 +563,9 @@ def start_transaction(self): tl = self.tls[self.current_thread] assert not lib._stm_in_transaction(tl) - lib._stm_start_transaction(tl, ffi.cast("stm_jmpbuf_t *", -1)) + res = lib.stm_start_transaction(tl) + assert res == 0 + lib.clear_jmpbuf(tl) assert lib._stm_in_transaction(tl) # seen = set() From noreply at buildbot.pypy.org Sat Aug 9 17:18:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 17:18:41 +0200 (CEST) Subject: [pypy-commit] stmgc rewind_setjmp: Progress Message-ID: <20140809151841.954A21C0257@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: rewind_setjmp Changeset: r1279:c872897a138f Date: 2014-08-09 17:18 +0200 http://bitbucket.org/pypy/stmgc/changeset/c872897a138f/ Log: Progress diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -1003,6 +1003,14 @@ #ifdef STM_NO_AUTOMATIC_SETJMP void _test_run_abort(stm_thread_local_t *tl) __attribute__((noreturn)); +int stm_is_inevitable(void) +{ + switch (STM_PSEGMENT->transaction_state) { + case TS_REGULAR: return 0; + case TS_INEVITABLE: return 1; + default: abort(); + } +} #endif static void abort_with_mutex(void) diff --git a/c7/stm/rewind_setjmp.h b/c7/stm/rewind_setjmp.h --- a/c7/stm/rewind_setjmp.h +++ b/c7/stm/rewind_setjmp.h @@ -77,4 +77,6 @@ void _rewind_jmp_copy_stack_slice(rewind_jmp_thread *); void _rewind_jmp_free_stack_slices(rewind_jmp_thread *); +#define rewind_jmp_armed(rjthread) ((rjthread)->moved_off_base != 0) + #endif diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -350,16 +350,18 @@ /* Turn the current transaction inevitable. The stm_become_inevitable() itself may still abort. */ +#ifdef STM_NO_AUTOMATIC_SETJMP +int stm_is_inevitable(void); +#else +static inline int stm_is_inevitable(void) { + return !rewind_jmp_armed(STM_SEGMENT->running_thread->rjthread); +} +#endif static inline void stm_become_inevitable(stm_thread_local_t *tl, const char* msg) { - assert(0);/* XXX assert(STM_SEGMENT->running_thread == tl); - if (STM_SEGMENT->jmpbuf_ptr != NULL) - _stm_become_inevitable(msg);*/ -} -static inline int stm_is_inevitable(void) { - return 0; /* XXX - return (STM_SEGMENT->jmpbuf_ptr == NULL); */ + if (!stm_is_inevitable()) + _stm_become_inevitable(msg); } /* Forces a safe-point if needed. Normally not needed: this is diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -369,6 +369,7 @@ def test_inevitable_transaction_has_priority(self): self.start_transaction() + assert lib.stm_is_inevitable() == 0 lp1 = stm_allocate(16) stm_set_char(lp1, 'a') self.push_root(lp1) From noreply at buildbot.pypy.org Sat Aug 9 17:26:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 17:26:37 +0200 (CEST) Subject: [pypy-commit] stmgc rewind_setjmp: Pass the demo2 tests Message-ID: <20140809152637.B09BD1C0257@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: rewind_setjmp Changeset: r1280:60f41b3b754a Date: 2014-08-09 17:26 +0200 http://bitbucket.org/pypy/stmgc/changeset/60f41b3b754a/ Log: Pass the demo2 tests diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -71,9 +71,8 @@ { nodeptr_t r_n; long prev, sum; - stm_jmpbuf_t here; - STM_START_TRANSACTION(&stm_thread_local, here); + stm_start_transaction(&stm_thread_local); stm_read((objptr_t)global_chained_list); r_n = global_chained_list; @@ -101,11 +100,9 @@ nodeptr_t swap_nodes(nodeptr_t initial) { - stm_jmpbuf_t here; - assert(initial != NULL); - STM_START_TRANSACTION(&stm_thread_local, here); + stm_start_transaction(&stm_thread_local); if (stm_thread_local.longest_marker_state != 0) { fprintf(stderr, "[%p] marker %d for %.6f seconds:\n", @@ -202,7 +199,7 @@ stm_commit_transaction(); - stm_start_inevitable_transaction(&stm_thread_local); + stm_start_transaction(&stm_thread_local); STM_POP_ROOT(stm_thread_local, global_chained_list); /* update value */ assert(global_chained_list->value == -1); STM_PUSH_ROOT(stm_thread_local, global_chained_list); /* remains forever in the shadow stack */ @@ -224,7 +221,9 @@ void *demo2(void *arg) { int status; + rewind_jmp_buf rjbuf; stm_register_thread_local(&stm_thread_local); + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); char *org = (char *)stm_thread_local.shadowstack; STM_PUSH_ROOT(stm_thread_local, global_chained_list); /* remains forever in the shadow stack */ @@ -244,6 +243,7 @@ STM_POP_ROOT(stm_thread_local, global_chained_list); OPT_ASSERT(org == (char *)stm_thread_local.shadowstack); + stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); unregister_thread_local(); status = sem_post(&done); assert(status == 0); return NULL; @@ -280,11 +280,13 @@ int main(void) { int status, i; + rewind_jmp_buf rjbuf; status = sem_init(&done, 0, 0); assert(status == 0); stm_setup(); stm_register_thread_local(&stm_thread_local); + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); stmcb_expand_marker = expand_marker; @@ -302,6 +304,7 @@ final_check(); + stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); unregister_thread_local(); //stm_teardown(); diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c --- a/c7/stm/rewind_setjmp.c +++ b/c7/stm/rewind_setjmp.c @@ -22,6 +22,7 @@ static void copy_stack(rewind_jmp_thread *rjthread, char *base) { + assert(rjthread->head != NULL); char *stop = rjthread->head->frame_base; assert(stop > base); struct _rewind_jmp_moved_s *next = (struct _rewind_jmp_moved_s *) diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -354,7 +354,7 @@ int stm_is_inevitable(void); #else static inline int stm_is_inevitable(void) { - return !rewind_jmp_armed(STM_SEGMENT->running_thread->rjthread); + return !rewind_jmp_armed(&STM_SEGMENT->running_thread->rjthread); } #endif static inline void stm_become_inevitable(stm_thread_local_t *tl, From noreply at buildbot.pypy.org Sat Aug 9 17:29:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 17:29:26 +0200 (CEST) Subject: [pypy-commit] stmgc rewind_setjmp: Extra test here Message-ID: <20140809152926.795421C0257@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: rewind_setjmp Changeset: r1281:289aaa8b6fda Date: 2014-08-09 17:29 +0200 http://bitbucket.org/pypy/stmgc/changeset/289aaa8b6fda/ Log: Extra test here diff --git a/c7/test/test_rewind.c b/c7/test/test_rewind.c --- a/c7/test/test_rewind.c +++ b/c7/test/test_rewind.c @@ -55,6 +55,10 @@ int expected[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; CHECK(expected); + assert(rewind_jmp_armed(>hread)); + rewind_jmp_forget(>hread); + assert(!rewind_jmp_armed(>hread)); + rewind_jmp_leaveframe(>hread, &buf); } From noreply at buildbot.pypy.org Sat Aug 9 17:40:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 17:40:16 +0200 (CEST) Subject: [pypy-commit] stmgc rewind_setjmp: Adapt demo_random. Fails obscurely for now Message-ID: <20140809154016.49FCF1C0157@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: rewind_setjmp Changeset: r1282:3378f3c9e327 Date: 2014-08-09 17:40 +0200 http://bitbucket.org/pypy/stmgc/changeset/3378f3c9e327/ Log: Adapt demo_random. Fails obscurely for now diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -81,6 +81,15 @@ void stmcb_commit_soon() {} +void stmcb_trace_cards(struct object_s *obj, void cb(object_t **), + uintptr_t start, uintptr_t stop) { + abort(); +} +void stmcb_get_card_base_itemsize(struct object_s *obj, + uintptr_t offset_itemsize[2]) { + abort(); +} + int get_rand(int max) { if (max == 0) @@ -323,15 +332,15 @@ void *demo_random(void *arg) { int status; + rewind_jmp_buf rjbuf; stm_register_thread_local(&stm_thread_local); + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); setup_thread(); objptr_t p; - stm_jmpbuf_t here; - volatile int call_fork = (arg != NULL); - STM_START_TRANSACTION(&stm_thread_local, here); + stm_start_transaction(&stm_thread_local); assert(td.num_roots >= td.num_roots_at_transaction_start); td.num_roots = td.num_roots_at_transaction_start; p = NULL; @@ -349,11 +358,12 @@ if (p == (objptr_t)-1) { push_roots(); + long call_fork = (arg != NULL && *(long *)arg); if (call_fork == 0) { /* common case */ stm_commit_transaction(); td.num_roots_at_transaction_start = td.num_roots; if (get_rand(100) < 98) { - STM_START_TRANSACTION(&stm_thread_local, here); + stm_start_transaction(&stm_thread_local); } else { stm_start_inevitable_transaction(&stm_thread_local); } @@ -365,7 +375,7 @@ else { /* run a fork() inside the transaction */ printf("========== FORK =========\n"); - call_fork = 0; + *(long*)arg = 0; pid_t child = fork(); printf("=== in process %d thread %lx, fork() returned %d\n", (int)getpid(), (long)pthread_self(), (int)child); @@ -385,6 +395,7 @@ } stm_commit_transaction(); + stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); stm_unregister_thread_local(&stm_thread_local); status = sem_post(&done); assert(status == 0); @@ -433,6 +444,7 @@ int main(void) { int i, status; + rewind_jmp_buf rjbuf; /* pick a random seed from the time in seconds. A bit pointless for now... because the interleaving of the @@ -446,6 +458,7 @@ stm_setup(); stm_register_thread_local(&stm_thread_local); + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); setup_globals(); @@ -463,7 +476,7 @@ long forkbase = NUMTHREADS * THREAD_STARTS / (FORKS + 1); long _fork = (thread_starts % forkbase) == 0; thread_starts--; - newthread(demo_random, (void *)_fork); + newthread(demo_random, &_fork); } } @@ -483,6 +496,7 @@ printf("Test OK!\n"); + stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); stm_unregister_thread_local(&stm_thread_local); stm_teardown(); From noreply at buildbot.pypy.org Sat Aug 9 17:43:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 17:43:51 +0200 (CEST) Subject: [pypy-commit] stmgc rewind_setjmp: Fix demo_simple Message-ID: <20140809154351.29FC51C0157@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: rewind_setjmp Changeset: r1283:52742cabf3a2 Date: 2014-08-09 17:43 +0200 http://bitbucket.org/pypy/stmgc/changeset/52742cabf3a2/ Log: Fix demo_simple diff --git a/c7/demo/demo_simple.c b/c7/demo/demo_simple.c --- a/c7/demo/demo_simple.c +++ b/c7/demo/demo_simple.c @@ -41,12 +41,20 @@ void stmcb_commit_soon() {} +void stmcb_trace_cards(struct object_s *obj, void cb(object_t **), + uintptr_t start, uintptr_t stop) { + abort(); +} +void stmcb_get_card_base_itemsize(struct object_s *obj, + uintptr_t offset_itemsize[2]) { + abort(); +} static sem_t done; static __thread int tl_counter = 0; -static int gl_counter = 0; +//static int gl_counter = 0; void *demo2(void *arg) { From noreply at buildbot.pypy.org Sat Aug 9 18:08:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 18:08:02 +0200 (CEST) Subject: [pypy-commit] stmgc rewind_setjmp: Re-enable and fix forksupport.c Message-ID: <20140809160802.CB83E1C3340@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: rewind_setjmp Changeset: r1284:37c19d87b177 Date: 2014-08-09 18:01 +0200 http://bitbucket.org/pypy/stmgc/changeset/37c19d87b177/ Log: Re-enable and fix forksupport.c diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -3,9 +3,6 @@ #endif - -#if 0 - /* XXX this is currently not doing copy-on-write, but simply forces a copy of all pages as soon as fork() is called. */ @@ -179,14 +176,15 @@ static void fork_abort_thread(long i) { struct stm_priv_segment_info_s *pr = get_priv_segment(i); + stm_thread_local_t *tl = pr->pub.running_thread; dprintf(("forksupport_child: abort in seg%ld\n", i)); - assert(pr->pub.running_thread->associated_segment_num == i); + assert(tl->associated_segment_num == i); assert(pr->transaction_state == TS_REGULAR); set_gs_register(get_segment_base(i)); - stm_jmpbuf_t jmpbuf; - if (__builtin_setjmp(jmpbuf) == 0) { - pr->pub.jmpbuf_ptr = &jmpbuf; + rewind_jmp_buf rjbuf; + stm_rewind_jmp_enterframe(tl, &rjbuf); + if (rewind_jmp_setjmp(&tl->rjthread) == 0) { #ifndef NDEBUG pr->running_pthread = pthread_self(); #endif @@ -194,6 +192,7 @@ pr->shadowstack_at_start_of_transaction); stm_abort_transaction(); } + stm_rewind_jmp_leaveframe(tl, &rjbuf); } static void forksupport_child(void) @@ -302,7 +301,3 @@ fork_support_ready = true; } } -#endif -static void setup_forksupport(void) { - if (0) _page_do_reshare(0, 0); -} From noreply at buildbot.pypy.org Sat Aug 9 18:08:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 18:08:03 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fix demo_random.c: there was indeed an issue with markers Message-ID: <20140809160803.EED921C3340@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1285:e5fe5d362c60 Date: 2014-08-09 18:07 +0200 http://bitbucket.org/pypy/stmgc/changeset/e5fe5d362c60/ Log: Fix demo_random.c: there was indeed an issue with markers diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -81,6 +81,15 @@ void stmcb_commit_soon() {} +void stmcb_trace_cards(struct object_s *obj, void cb(object_t **), + uintptr_t start, uintptr_t stop) { + abort(); +} +void stmcb_get_card_base_itemsize(struct object_s *obj, + uintptr_t offset_itemsize[2]) { + abort(); +} + int get_rand(int max) { if (max == 0) diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -189,6 +189,7 @@ #endif pr->pub.running_thread->shadowstack = ( pr->shadowstack_at_start_of_transaction); + strcpy(pr->marker_self, "fork"); stm_abort_transaction(); } } From noreply at buildbot.pypy.org Sat Aug 9 18:17:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 18:17:13 +0200 (CEST) Subject: [pypy-commit] stmgc rewind_setjmp: hg merge default Message-ID: <20140809161713.664731C0257@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: rewind_setjmp Changeset: r1286:2db7026f8f85 Date: 2014-08-09 18:08 +0200 http://bitbucket.org/pypy/stmgc/changeset/2db7026f8f85/ Log: hg merge default diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -190,6 +190,7 @@ #endif pr->pub.running_thread->shadowstack = ( pr->shadowstack_at_start_of_transaction); + strcpy(pr->marker_self, "fork"); stm_abort_transaction(); } stm_rewind_jmp_leaveframe(tl, &rjbuf); From noreply at buildbot.pypy.org Sat Aug 9 18:17:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 18:17:14 +0200 (CEST) Subject: [pypy-commit] stmgc rewind_setjmp: Fix demo_random in this branch Message-ID: <20140809161714.7FF341C0257@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: rewind_setjmp Changeset: r1287:8b798ea89719 Date: 2014-08-09 18:17 +0200 http://bitbucket.org/pypy/stmgc/changeset/8b798ea89719/ Log: Fix demo_random in this branch diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -193,6 +193,7 @@ strcpy(pr->marker_self, "fork"); stm_abort_transaction(); } + rewind_jmp_forget(&tl->rjthread); stm_rewind_jmp_leaveframe(tl, &rjbuf); } From noreply at buildbot.pypy.org Sat Aug 9 18:18:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 18:18:46 +0200 (CEST) Subject: [pypy-commit] stmgc rewind_setjmp: Fix demo_largemalloc. Message-ID: <20140809161846.D837A1C0257@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: rewind_setjmp Changeset: r1288:b011940794d7 Date: 2014-08-09 18:18 +0200 http://bitbucket.org/pypy/stmgc/changeset/b011940794d7/ Log: Fix demo_largemalloc. diff --git a/c7/demo/demo_largemalloc.c b/c7/demo/demo_largemalloc.c --- a/c7/demo/demo_largemalloc.c +++ b/c7/demo/demo_largemalloc.c @@ -25,6 +25,15 @@ void stmcb_commit_soon() {} +void stmcb_trace_cards(struct object_s *obj, void cb(object_t **), + uintptr_t start, uintptr_t stop) { + abort(); +} +void stmcb_get_card_base_itemsize(struct object_s *obj, + uintptr_t offset_itemsize[2]) { + abort(); +} + /************************************************************/ #define ARENA_SIZE (1024*1024*1024) @@ -67,7 +76,7 @@ int i; arena_data = malloc(ARENA_SIZE); assert(arena_data != NULL); - _stm_mutex_pages_lock(); + //_stm_mutex_pages_lock(); for (i = 0; i < 25; i++) timing(i); return 0; From noreply at buildbot.pypy.org Sat Aug 9 18:24:33 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 18:24:33 +0200 (CEST) Subject: [pypy-commit] stmgc rewind_setjmp: Adapt duhton, step 1 Message-ID: <20140809162433.A0E321C3340@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: rewind_setjmp Changeset: r1289:58a9c340002a Date: 2014-08-09 18:22 +0200 http://bitbucket.org/pypy/stmgc/changeset/58a9c340002a/ Log: Adapt duhton, step 1 diff --git a/duhton/transaction.c b/duhton/transaction.c --- a/duhton/transaction.c +++ b/duhton/transaction.c @@ -162,8 +162,9 @@ void *run_thread(void *thread_id) { - stm_jmpbuf_t here; + rewind_jmp_buf rjbuf; stm_register_thread_local(&stm_thread_local); + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); TLOBJ = NULL; @@ -176,7 +177,7 @@ TLOBJ = cell; stm_commit_transaction(); /* inevitable */ - STM_START_TRANSACTION(&stm_thread_local, here); + stm_start_transaction(&stm_thread_local); cell = TLOBJ; TLOBJ = NULL; @@ -187,6 +188,7 @@ } stm_flush_timing(&stm_thread_local, 1); + stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); stm_unregister_thread_local(&stm_thread_local); return NULL; From noreply at buildbot.pypy.org Sat Aug 9 18:24:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 18:24:34 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fix for duhton Message-ID: <20140809162434.A98D61C3340@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1290:4b4a86384381 Date: 2014-08-09 18:24 +0200 http://bitbucket.org/pypy/stmgc/changeset/4b4a86384381/ Log: Fix for duhton diff --git a/duhton/object.c b/duhton/object.c --- a/duhton/object.c +++ b/duhton/object.c @@ -34,8 +34,19 @@ if (trace) trace((struct DuObject_s *)obj, visit); } +void stmcb_get_card_base_itemsize(struct object_s *obj, + uintptr_t offset_itemsize[2]) +{ + abort(); +} +void stmcb_trace_cards(struct object_s *obj, void visit(object_t **), + uintptr_t start, uintptr_t stop) +{ + abort(); +} +void stmcb_commit_soon(void) { } -void stmcb_commit_soon(void) { } + DuObject *DuObject_New(DuType *tp) From noreply at buildbot.pypy.org Sat Aug 9 18:24:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 9 Aug 2014 18:24:35 +0200 (CEST) Subject: [pypy-commit] stmgc rewind_setjmp: hg merge default Message-ID: <20140809162435.B35C11C3340@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: rewind_setjmp Changeset: r1291:9ac9ce9d8d37 Date: 2014-08-09 18:24 +0200 http://bitbucket.org/pypy/stmgc/changeset/9ac9ce9d8d37/ Log: hg merge default diff --git a/duhton/object.c b/duhton/object.c --- a/duhton/object.c +++ b/duhton/object.c @@ -34,8 +34,19 @@ if (trace) trace((struct DuObject_s *)obj, visit); } +void stmcb_get_card_base_itemsize(struct object_s *obj, + uintptr_t offset_itemsize[2]) +{ + abort(); +} +void stmcb_trace_cards(struct object_s *obj, void visit(object_t **), + uintptr_t start, uintptr_t stop) +{ + abort(); +} +void stmcb_commit_soon(void) { } -void stmcb_commit_soon(void) { } + DuObject *DuObject_New(DuType *tp) From noreply at buildbot.pypy.org Sat Aug 9 19:16:21 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 9 Aug 2014 19:16:21 +0200 (CEST) Subject: [pypy-commit] pypy default: Try to prefer the py3k friendly syntax Message-ID: <20140809171621.7F5551C0157@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r72736:5329e5dde957 Date: 2014-08-09 10:14 -0700 http://bitbucket.org/pypy/pypy/changeset/5329e5dde957/ Log: Try to prefer the py3k friendly syntax diff --git a/rpython/rtyper/test/test_llinterp.py b/rpython/rtyper/test/test_llinterp.py --- a/rpython/rtyper/test/test_llinterp.py +++ b/rpython/rtyper/test/test_llinterp.py @@ -129,7 +129,7 @@ info = py.test.raises(LLException, "interp.eval_graph(graph, values)") try: got = interp.find_exception(info.value) - except ValueError, message: + except ValueError as message: got = 'None %r' % message assert got is exc, "wrong exception type, expected %r got %r" % (exc, got) From noreply at buildbot.pypy.org Sat Aug 9 19:16:22 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 9 Aug 2014 19:16:22 +0200 (CEST) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20140809171622.A53E01C0157@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r72737:c714037175c4 Date: 2014-08-09 10:16 -0700 http://bitbucket.org/pypy/pypy/changeset/c714037175c4/ Log: merged upstream diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -296,7 +296,7 @@ it CPython64/64. It is probably not too much work if the goal is only to get a translated -PyPy executable, and to run all tests before transaction. But you need +PyPy executable, and to run all tests before translation. But you need to start somewhere, and you should start with some tests in rpython/translator/c/test/, like ``test_standalone.py`` and ``test_newgc.py``: try to have them pass on top of CPython64/64. @@ -304,7 +304,7 @@ Keep in mind that this runs small translations, and some details may go wrong. The most obvious one is to check that it produces C files that use the integer type ``Signed`` --- but what is ``Signed`` defined to? -It should be equal to ``long`` on every other platforms, but on Win64 it +It should be equal to ``long`` on every other platform, but on Win64 it should be something like ``long long``. What is more generally needed is to review all the C files in @@ -315,11 +315,11 @@ Then, these two C types have corresponding RPython types: ``rffi.LONG`` and ``lltype.Signed`` respectively. The first should really correspond -to the C ``long``. Add tests that check that integers casted to one +to the C ``long``. Add tests that check that integers cast to one type or the other really have 32 and 64 bits respectively, on Win64. Once these basic tests work, you need to review ``rpython/rlib/`` for -usages of ``rffi.LONG`` versus ``lltype.Signed``. The goal would be to +uses of ``rffi.LONG`` versus ``lltype.Signed``. The goal would be to fix some more ``LONG-versus-Signed`` issues, by fixing the tests --- as always run on top of CPython64/64. Note that there was some early work done in ``rpython/rlib/rarithmetic`` with the goal of running all the @@ -329,14 +329,14 @@ The major intermediate goal is to get a translation of PyPy with ``-O2`` with a minimal set of modules, starting with ``--no-allworkingmodules``; you need to use CPython64/64 to run this translation too. Check -carefully the warnings of the C compiler at the end. I think that MSVC -is "nice" in the sense that by default a lot of mismatches of integer -sizes are reported as warnings. +carefully the warnings of the C compiler at the end. By default, MSVC +reports a lot of mismatches of integer sizes as warnings instead of +errors. Then you need to review ``pypy/module/*/`` for ``LONG-versus-Signed`` issues. At some time during this review, we get a working translated PyPy on Windows 64 that includes all ``--translationmodules``, i.e. -everything needed to run translations. When we are there, the hacked +everything needed to run translations. Once we have that, the hacked CPython64/64 becomes much less important, because we can run future translations on top of this translated PyPy. As soon as we get there, please *distribute* the translated PyPy. It's an essential component From noreply at buildbot.pypy.org Sat Aug 9 21:57:47 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 9 Aug 2014 21:57:47 +0200 (CEST) Subject: [pypy-commit] pypy pytest-25: hg merge default Message-ID: <20140809195747.788241C0257@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: pytest-25 Changeset: r72738:dbad1355c128 Date: 2014-08-09 20:51 +0100 http://bitbucket.org/pypy/pypy/changeset/dbad1355c128/ Log: hg merge default diff too long, truncating to 2000 out of 71845 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -6,3 +6,11 @@ 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm +20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 +20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 +0000000000000000000000000000000000000000 release-2.3.0 +394146e9bb673514c61f0150ab2013ccf78e8de7 release-2.3 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.3.1 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1 +0000000000000000000000000000000000000000 release-2.2=3.1 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -44,31 +44,33 @@ Alex Gaynor Michael Hudson David Schneider + Matti Picus + Brian Kearns + Philip Jenvey Holger Krekel Christian Tismer Hakan Ardo Benjamin Peterson - Matti Picus - Philip Jenvey + Manuel Jacob Anders Chrigstrom - Brian Kearns Eric van Riet Paap + Wim Lavrijsen + Ronan Lamy Richard Emslie Alexander Schremmer - Wim Lavrijsen Dan Villiom Podlaski Christiansen - Manuel Jacob Lukas Diekmann Sven Hager Anders Lehmann Aurelien Campeas Niklaus Haldimann - Ronan Lamy Camillo Bruni Laura Creighton Toon Verwaest + Remi Meier Leonardo Santagada Seo Sanghyeon + Romain Guillebert Justin Peel Ronny Pfannschmidt David Edelsohn @@ -80,52 +82,62 @@ Daniel Roberts Niko Matsakis Adrien Di Mascio + Alexander Hesse Ludovic Aubry - Alexander Hesse Jacob Hallen - Romain Guillebert Jason Creighton Alex Martelli Michal Bendowski Jan de Mooij + stian Michael Foord Stephan Diehl Stefan Schwarzer Valentino Volonghi Tomek Meka Patrick Maupin - stian Bob Ippolito Bruno Gola Jean-Paul Calderone Timo Paulssen + Squeaky Alexandre Fayolle Simon Burton Marius Gedminas John Witulski + Konstantin Lopuhin Greg Price Dario Bertini Mark Pearse Simon Cross - Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy + Tobias Oberstein Adrian Kuhn Boris Feigin + Stefano Rivera tav + Taavi Burns Georg Brandl Bert Freudenberg Stian Andreassen - Stefano Rivera + Laurence Tratt Wanja Saatkamp + Ivan Sichmann Freitas Gerald Klix Mike Blume - Taavi Burns Oscar Nierstrasz + Stefan H. Muller + Jeremy Thurgood + Gregor Wegberg + Rami Chowdhury + Tobias Pape + Edd Barrett David Malcolm Eugene Oden Henry Mason @@ -135,18 +147,16 @@ Dusty Phillips Lukas Renggli Guenter Jantzen - Tobias Oberstein - Remi Meier Ned Batchelder Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin + Andrew Chambers Michael Schneider Nicholas Riley Jason Chu Igor Trindade Oliveira - Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey @@ -159,18 +169,19 @@ Karl Bartel Brian Dorsey Victor Stinner + Andrews Medina Stuart Williams Jasper Schulz + Christian Hudon Toby Watson Antoine Pitrou Aaron Iles Michael Cheng Justas Sadzevicius + Mikael Schönenberg Gasper Zejn Neil Shepperd - Mikael Schönenberg Elmo Mäntynen - Tobias Pape Jonathan David Riehl Stanislaw Halik Anders Qvist @@ -182,19 +193,18 @@ Alexander Sedov Corbin Simpson Christopher Pope - Laurence Tratt - Guillebert Romain + wenzhuman Christian Tismer + Marc Abramowitz Dan Stromberg Stefano Parmesan - Christian Hudon Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang Gabriel - Paweł Piotr Przeradowski + Lukas Vacek Andrew Dalke Sylvain Thenault Nathan Taylor @@ -203,8 +213,11 @@ Alejandro J. Cura Jacob Oscarson Travis Francis Athougies + Ryan Gonzalez Kristjan Valur Jonsson + Sebastian Pawluś Neil Blakey-Milner + anatoly techtonik Lutz Paelike Lucio Torre Lars Wassermann @@ -218,13 +231,14 @@ Martin Blais Lene Wagner Tomo Cocoa - Andrews Medina roberto at goyle + Yury V. Zaytsev + Anna Katrina Dominguez William Leslie Bobby Impollonia timo at eistee.fritz.box Andrew Thompson - Yusei Tahara + Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado Godefroid Chappelle @@ -234,28 +248,39 @@ Michael Hudson-Doyle Anders Sigfridsson Yasir Suhail + rafalgalczynski at gmail.com Floris Bruynooghe + Laurens Van Houtven Akira Li Gustavo Niemeyer Stephan Busemann - Anna Katrina Dominguez + Rafał Gałczyński + Yusei Tahara Christian Muirhead James Lan shoma hosaka - Daniel Neuhäuser + Daniel Neuh?user + Matthew Miller Buck Golemon Konrad Delong Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + Rodrigo Araújo + w31rd0 Jim Baker - Rodrigo Araújo + James Robert Armin Ronacher Brett Cannon yrttyr + aliceinwire + OlivierBlanvillain Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + Asmo Soinio + Stefan Marr + jiaaro opassembler.py Antony Lee Jim Hunziker @@ -263,12 +288,13 @@ Even Wiik Thomassen jbs soareschen + Kurt Griffiths + Mike Bayer Flavio Percoco Kristoffer Kleine yasirs Michael Chermside Anna Ravencroft - Andrew Chambers Julien Phalip Dan Loewenherz diff --git a/dotviewer/graphserver.py b/dotviewer/graphserver.py --- a/dotviewer/graphserver.py +++ b/dotviewer/graphserver.py @@ -160,15 +160,14 @@ " | instructions in dotviewer/sshgraphserver.py\n") try: import pygame - except ImportError: + if isinstance(e, pygame.error): + print >> f, help + except Exception, e: f.seek(0) f.truncate() - print >> f, "ImportError" + print >> f, "%s: %s" % (e.__class__.__name__, e) print >> f, " | Pygame is not installed; either install it, or" print >> f, help - else: - if isinstance(e, pygame.error): - print >> f, help io.sendmsg(msgstruct.MSG_ERROR, f.getvalue()) else: listen_server(sys.argv[1]) diff --git a/include/PyPy.h b/include/PyPy.h --- a/include/PyPy.h +++ b/include/PyPy.h @@ -53,6 +53,12 @@ int pypy_execute_source_ptr(char *source, void* ptr); +/* Windows hackery */ +#if defined(_MSC_VER) +# pragma comment(lib,"python27.lib") +#endif + + #ifdef __cplusplus } #endif diff --git a/lib-python/2.7/cProfile.py b/lib-python/2.7/cProfile.py --- a/lib-python/2.7/cProfile.py +++ b/lib-python/2.7/cProfile.py @@ -161,7 +161,7 @@ # ____________________________________________________________ def main(): - import os, sys + import os, sys, types from optparse import OptionParser usage = "cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ..." parser = OptionParser(usage=usage) @@ -184,12 +184,10 @@ sys.path.insert(0, os.path.dirname(progname)) with open(progname, 'rb') as fp: code = compile(fp.read(), progname, 'exec') - globs = { - '__file__': progname, - '__name__': '__main__', - '__package__': None, - } - runctx(code, globs, None, options.outfile, options.sort) + mainmod = types.ModuleType('__main__') + mainmod.__file__ = progname + mainmod.__package__ = None + runctx(code, mainmod.__dict__, None, options.outfile, options.sort) else: parser.print_usage() return parser diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -389,12 +389,13 @@ func.__name__ = name_or_ordinal return func -class PyDLL(CDLL): - """This class represents the Python library itself. It allows to - access Python API functions. The GIL is not released, and - Python exceptions are handled correctly. - """ - _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI +# Not in PyPy +#class PyDLL(CDLL): +# """This class represents the Python library itself. It allows to +# access Python API functions. The GIL is not released, and +# Python exceptions are handled correctly. +# """ +# _func_flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI if _os.name in ("nt", "ce"): @@ -447,15 +448,8 @@ return self._dlltype(name) cdll = LibraryLoader(CDLL) -pydll = LibraryLoader(PyDLL) - -if _os.name in ("nt", "ce"): - pythonapi = PyDLL("python dll", None, _sys.dllhandle) -elif _sys.platform == "cygwin": - pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2]) -else: - pythonapi = PyDLL(None) - +# not on PyPy +#pydll = LibraryLoader(PyDLL) if _os.name in ("nt", "ce"): windll = LibraryLoader(WinDLL) diff --git a/lib-python/2.7/ctypes/test/test_values.py b/lib-python/2.7/ctypes/test/test_values.py --- a/lib-python/2.7/ctypes/test/test_values.py +++ b/lib-python/2.7/ctypes/test/test_values.py @@ -4,6 +4,7 @@ import unittest from ctypes import * +from ctypes.test import xfail import _ctypes_test @@ -23,7 +24,8 @@ class Win_ValuesTestCase(unittest.TestCase): """This test only works when python itself is a dll/shared library""" - + + @xfail def test_optimizeflag(self): # This test accesses the Py_OptimizeFlag intger, which is # exported by the Python dll. @@ -40,6 +42,7 @@ else: self.assertEqual(opt, 2) + @xfail def test_frozentable(self): # Python exports a PyImport_FrozenModules symbol. This is a # pointer to an array of struct _frozen entries. The end of the @@ -75,6 +78,7 @@ from ctypes import _pointer_type_cache del _pointer_type_cache[struct_frozen] + @xfail def test_undefined(self): self.assertRaises(ValueError, c_int.in_dll, pydll, "Undefined_Symbol") diff --git a/lib-python/2.7/ctypes/util.py b/lib-python/2.7/ctypes/util.py --- a/lib-python/2.7/ctypes/util.py +++ b/lib-python/2.7/ctypes/util.py @@ -86,9 +86,10 @@ elif os.name == "posix": # Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump - import re, tempfile, errno + import re, errno def _findLib_gcc(name): + import tempfile expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name) fdout, ccout = tempfile.mkstemp() os.close(fdout) diff --git a/lib-python/2.7/imputil.py b/lib-python/2.7/imputil.py --- a/lib-python/2.7/imputil.py +++ b/lib-python/2.7/imputil.py @@ -422,7 +422,8 @@ saved back to the filesystem for future imports. The source file's modification timestamp must be provided as a Long value. """ - codestring = open(pathname, 'rU').read() + with open(pathname, 'rU') as fp: + codestring = fp.read() if codestring and codestring[-1] != '\n': codestring = codestring + '\n' code = __builtin__.compile(codestring, pathname, 'exec') @@ -603,8 +604,8 @@ self.desc = desc def import_file(self, filename, finfo, fqname): - fp = open(filename, self.desc[1]) - module = imp.load_module(fqname, fp, filename, self.desc) + with open(filename, self.desc[1]) as fp: + module = imp.load_module(fqname, fp, filename, self.desc) module.__file__ = filename return 0, module, { } diff --git a/lib-python/2.7/modulefinder.py b/lib-python/2.7/modulefinder.py --- a/lib-python/2.7/modulefinder.py +++ b/lib-python/2.7/modulefinder.py @@ -109,16 +109,16 @@ def run_script(self, pathname): self.msg(2, "run_script", pathname) - fp = open(pathname, READ_MODE) - stuff = ("", "r", imp.PY_SOURCE) - self.load_module('__main__', fp, pathname, stuff) + with open(pathname, READ_MODE) as fp: + stuff = ("", "r", imp.PY_SOURCE) + self.load_module('__main__', fp, pathname, stuff) def load_file(self, pathname): dir, name = os.path.split(pathname) name, ext = os.path.splitext(name) - fp = open(pathname, READ_MODE) - stuff = (ext, "r", imp.PY_SOURCE) - self.load_module(name, fp, pathname, stuff) + with open(pathname, READ_MODE) as fp: + stuff = (ext, "r", imp.PY_SOURCE) + self.load_module(name, fp, pathname, stuff) def import_hook(self, name, caller=None, fromlist=None, level=-1): self.msg(3, "import_hook", name, caller, fromlist, level) @@ -461,6 +461,8 @@ fp, buf, stuff = self.find_module("__init__", m.__path__) self.load_module(fqname, fp, buf, stuff) self.msgout(2, "load_package ->", m) + if fp: + fp.close() return m def add_module(self, fqname): diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py --- a/lib-python/2.7/test/test_argparse.py +++ b/lib-python/2.7/test/test_argparse.py @@ -48,10 +48,13 @@ def tearDown(self): os.chdir(self.old_dir) + import gc + # Force a collection which should close FileType() options + gc.collect() for root, dirs, files in os.walk(self.temp_dir, topdown=False): for name in files: os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE) - shutil.rmtree(self.temp_dir, True) + shutil.rmtree(self.temp_dir, True) def create_readonly_file(self, filename): file_path = os.path.join(self.temp_dir, filename) diff --git a/lib-python/2.7/test/test_array.py b/lib-python/2.7/test/test_array.py --- a/lib-python/2.7/test/test_array.py +++ b/lib-python/2.7/test/test_array.py @@ -298,6 +298,7 @@ b = array.array(self.badtypecode()) with self.assertRaises(TypeError): a + b + with self.assertRaises(TypeError): a + 'bad' @@ -320,6 +321,7 @@ b = array.array(self.badtypecode()) with self.assertRaises(TypeError): a += b + with self.assertRaises(TypeError): a += 'bad' diff --git a/lib-python/2.7/test/test_builtin.py b/lib-python/2.7/test/test_builtin.py --- a/lib-python/2.7/test/test_builtin.py +++ b/lib-python/2.7/test/test_builtin.py @@ -250,14 +250,12 @@ self.assertRaises(TypeError, compile) self.assertRaises(ValueError, compile, 'print 42\n', '', 'badmode') self.assertRaises(ValueError, compile, 'print 42\n', '', 'single', 0xff) - if check_impl_detail(cpython=True): - self.assertRaises(TypeError, compile, chr(0), 'f', 'exec') + self.assertRaises(TypeError, compile, chr(0), 'f', 'exec') self.assertRaises(TypeError, compile, 'pass', '?', 'exec', mode='eval', source='0', filename='tmp') if have_unicode: compile(unicode('print u"\xc3\xa5"\n', 'utf8'), '', 'exec') - if check_impl_detail(cpython=True): - self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec') + self.assertRaises(TypeError, compile, unichr(0), 'f', 'exec') self.assertRaises(ValueError, compile, unicode('a = 1'), 'f', 'bad') diff --git a/lib-python/2.7/test/test_file2k.py b/lib-python/2.7/test/test_file2k.py --- a/lib-python/2.7/test/test_file2k.py +++ b/lib-python/2.7/test/test_file2k.py @@ -479,11 +479,10 @@ def _create_file(self): if self.use_buffering: - f = open(self.filename, "w+", buffering=1024*16) + self.f = open(self.filename, "w+", buffering=1024*16) else: - f = open(self.filename, "w+") - self.f = f - self.all_files.append(f) + self.f = open(self.filename, "w+") + self.all_files.append(self.f) oldf = self.all_files.pop(0) if oldf is not None: oldf.close() diff --git a/lib-python/2.7/test/test_gdbm.py b/lib-python/2.7/test/test_gdbm.py --- a/lib-python/2.7/test/test_gdbm.py +++ b/lib-python/2.7/test/test_gdbm.py @@ -74,6 +74,40 @@ size2 = os.path.getsize(filename) self.assertTrue(size1 > size2 >= size0) + def test_sync(self): + # check if sync works at all, not sure how to check it + self.g = gdbm.open(filename, 'cf') + self.g['x'] = 'x' * 10000 + self.g.sync() + + def test_get_key(self): + self.g = gdbm.open(filename, 'cf') + self.g['x'] = 'x' * 10000 + self.g.close() + self.g = gdbm.open(filename, 'r') + self.assertEquals(self.g['x'], 'x' * 10000) + + def test_key_with_null_bytes(self): + key = 'a\x00b' + value = 'c\x00d' + self.g = gdbm.open(filename, 'cf') + self.g[key] = value + self.g.close() + self.g = gdbm.open(filename, 'r') + self.assertEquals(self.g[key], value) + self.assertTrue(key in self.g) + self.assertTrue(self.g.has_key(key)) + + def test_unicode_key(self): + key = u'ab' + value = u'cd' + self.g = gdbm.open(filename, 'cf') + self.g[key] = value + self.g.close() + self.g = gdbm.open(filename, 'r') + self.assertEquals(self.g[key], value) + self.assertTrue(key in self.g) + self.assertTrue(self.g.has_key(key)) def test_main(): run_unittest(TestGdbm) diff --git a/lib-python/2.7/test/test_httpservers.py b/lib-python/2.7/test/test_httpservers.py --- a/lib-python/2.7/test/test_httpservers.py +++ b/lib-python/2.7/test/test_httpservers.py @@ -335,7 +335,7 @@ response = self.request(self.tempdir_name + '/') self.check_status_and_reason(response, 404) os.chmod(self.tempdir, 0755) - f.close() + f.close() def test_head(self): response = self.request( diff --git a/lib-python/2.7/test/test_itertools.py b/lib-python/2.7/test/test_itertools.py --- a/lib-python/2.7/test/test_itertools.py +++ b/lib-python/2.7/test/test_itertools.py @@ -139,7 +139,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1) @@ -211,7 +210,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_combinations_with_replacement_tuple_reuse(self): - # Test implementation detail: tuple re-use cwr = combinations_with_replacement self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1) @@ -278,7 +276,6 @@ @test_support.impl_detail("tuple reuse is specific to CPython") def test_permutations_tuple_reuse(self): - # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1) diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -115,8 +115,8 @@ self.assertRaises(TypeError, setitem, (0,), b"a") self.assertRaises(TypeError, setitem, "a", b"a") # Trying to resize the memory object - self.assertRaises((ValueError, TypeError), setitem, 0, b"") - self.assertRaises((ValueError, TypeError), setitem, 0, b"ab") + self.assertRaises(ValueError, setitem, 0, b"") + self.assertRaises(ValueError, setitem, 0, b"ab") self.assertRaises(ValueError, setitem, slice(1,1), b"a") self.assertRaises(ValueError, setitem, slice(0,2), b"a") @@ -166,18 +166,11 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - if test_support.check_impl_detail(cpython=True): - # what is supported and what is not supported by memoryview is - # very inconsisten on CPython. In PyPy, memoryview supports - # the buffer interface, and thus the following comparison - # succeeds. See also the comment in - # pypy.objspace.std.memoryview.W_MemoryView.descr_buffer - # - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef") + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/lib-python/2.7/timeit.py b/lib-python/2.7/timeit.py --- a/lib-python/2.7/timeit.py +++ b/lib-python/2.7/timeit.py @@ -55,11 +55,6 @@ import gc import sys import time -try: - import itertools -except ImportError: - # Must be an older Python version (see timeit() below) - itertools = None __all__ = ["Timer"] @@ -81,7 +76,8 @@ def inner(_it, _timer): %(setup)s _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 %(stmt)s _t1 = _timer() return _t1 - _t0 @@ -96,7 +92,8 @@ def inner(_it, _timer, _func=func): setup() _t0 = _timer() - for _i in _it: + while _it > 0: + _it -= 1 _func() _t1 = _timer() return _t1 - _t0 @@ -133,9 +130,19 @@ else: raise ValueError("setup is neither a string nor callable") self.src = src # Save for traceback display - code = compile(src, dummy_src_name, "exec") - exec code in globals(), ns - self.inner = ns["inner"] + def make_inner(): + # PyPy tweak: recompile the source code each time before + # calling inner(). There are situations like Issue #1776 + # where PyPy tries to reuse the JIT code from before, + # but that's not going to work: the first thing the + # function does is the "-s" statement, which may declare + # new classes (here a namedtuple). We end up with + # bridges from the inner loop; more and more of them + # every time we call inner(). + code = compile(src, dummy_src_name, "exec") + exec code in globals(), ns + return ns["inner"] + self.make_inner = make_inner elif hasattr(stmt, '__call__'): self.src = None if isinstance(setup, basestring): @@ -144,7 +151,8 @@ exec _setup in globals(), ns elif not hasattr(setup, '__call__'): raise ValueError("setup is neither a string nor callable") - self.inner = _template_func(setup, stmt) + inner = _template_func(setup, stmt) + self.make_inner = lambda: inner else: raise ValueError("stmt is neither a string nor callable") @@ -185,15 +193,12 @@ to one million. The main statement, the setup statement and the timer function to be used are passed to the constructor. """ - if itertools: - it = itertools.repeat(None, number) - else: - it = [None] * number + inner = self.make_inner() gcold = gc.isenabled() if '__pypy__' not in sys.builtin_module_names: gc.disable() # only do that on CPython try: - timing = self.inner(it, self.timer) + timing = inner(number, self.timer) finally: if gcold: gc.enable() diff --git a/lib-python/2.7/xml/sax/saxutils.py b/lib-python/2.7/xml/sax/saxutils.py --- a/lib-python/2.7/xml/sax/saxutils.py +++ b/lib-python/2.7/xml/sax/saxutils.py @@ -98,13 +98,14 @@ except AttributeError: pass # wrap a binary writer with TextIOWrapper - class UnbufferedTextIOWrapper(io.TextIOWrapper): - def write(self, s): - super(UnbufferedTextIOWrapper, self).write(s) - self.flush() - return UnbufferedTextIOWrapper(buffer, encoding=encoding, + return _UnbufferedTextIOWrapper(buffer, encoding=encoding, errors='xmlcharrefreplace', newline='\n') +# PyPy: moved this class outside the function above +class _UnbufferedTextIOWrapper(io.TextIOWrapper): + def write(self, s): + super(_UnbufferedTextIOWrapper, self).write(s) + self.flush() class XMLGenerator(handler.ContentHandler): diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -219,6 +219,8 @@ if restype is None: import ctypes restype = ctypes.c_int + if self._argtypes_ is None: + self._argtypes_ = [] self._ptr = self._getfuncptr_fromaddress(self._argtypes_, restype) self._check_argtypes_for_fastpath() return diff --git a/lib_pypy/_ctypes_test.py b/lib_pypy/_ctypes_test.py --- a/lib_pypy/_ctypes_test.py +++ b/lib_pypy/_ctypes_test.py @@ -1,4 +1,5 @@ -import imp, os +import imp +import os try: import cpyext @@ -17,7 +18,8 @@ output_dir = _pypy_testcapi.get_hashed_dir(os.path.join(thisdir, cfile)) try: fp, filename, description = imp.find_module('_ctypes_test', path=[output_dir]) - imp.load_module('_ctypes_test', fp, filename, description) + with fp: + imp.load_module('_ctypes_test', fp, filename, description) except ImportError: print('could not find _ctypes_test in %s' % output_dir) _pypy_testcapi.compile_shared('_ctypes_test.c', '_ctypes_test', output_dir) diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -309,11 +309,9 @@ #endif int _m_ispad(WINDOW *win) { -#if defined WINDOW_HAS_FLAGS + // may not have _flags (and possibly _ISPAD), + // but for now let's assume that always has it return (win->_flags & _ISPAD); -#else - return 0; -#endif } void _m_getsyx(int *yx) { diff --git a/lib_pypy/_pypy_interact.py b/lib_pypy/_pypy_interact.py --- a/lib_pypy/_pypy_interact.py +++ b/lib_pypy/_pypy_interact.py @@ -3,6 +3,8 @@ import sys import os +irc_header = "And now for something completely different" + def interactive_console(mainmodule=None, quiet=False): # set sys.{ps1,ps2} just before invoking the interactive interpreter. This @@ -15,8 +17,7 @@ if not quiet: try: from _pypy_irc_topic import some_topic - text = "And now for something completely different: ``%s''" % ( - some_topic(),) + text = "%s: ``%s''" % ( irc_header, some_topic()) while len(text) >= 80: i = text[:80].rfind(' ') print(text[:i]) diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -1,6 +1,7 @@ import os, sys, imp import tempfile, binascii + def get_hashed_dir(cfile): with open(cfile,'r') as fid: content = fid.read() @@ -12,10 +13,18 @@ k1 = k1.lstrip('0x').rstrip('L') k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) k2 = k2.lstrip('0').rstrip('L') - output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s%s' %(k1, k2) + try: + username = os.environ['USER'] #linux, et al + except KeyError: + try: + username = os.environ['USERNAME'] #windows + except KeyError: + username = os.getuid() + output_dir = tempfile.gettempdir() + os.path.sep + 'tmp_%s_%s%s' % ( + username, k1, k2) if not os.path.exists(output_dir): os.mkdir(output_dir) - return output_dir + return output_dir def _get_c_extension_suffix(): diff --git a/lib_pypy/_testcapi.py b/lib_pypy/_testcapi.py --- a/lib_pypy/_testcapi.py +++ b/lib_pypy/_testcapi.py @@ -1,4 +1,5 @@ -import imp, os +import imp +import os try: import cpyext @@ -12,6 +13,7 @@ try: fp, filename, description = imp.find_module('_testcapi', path=[output_dir]) - imp.load_module('_testcapi', fp, filename, description) + with fp: + imp.load_module('_testcapi', fp, filename, description) except ImportError: _pypy_testcapi.compile_shared(cfile, '_testcapi', output_dir) diff --git a/lib_pypy/_tkinter/license.terms b/lib_pypy/_tkinter/license.terms new file mode 100644 --- /dev/null +++ b/lib_pypy/_tkinter/license.terms @@ -0,0 +1,39 @@ +This software is copyrighted by the Regents of the University of +California, Sun Microsystems, Inc., and other parties. The following +terms apply to all files associated with the software unless explicitly +disclaimed in individual files. + +The authors hereby grant permission to use, copy, modify, distribute, +and license this software and its documentation for any purpose, provided +that existing copyright notices are retained in all copies and that this +notice is included verbatim in any distributions. No written agreement, +license, or royalty fee is required for any of the authorized uses. +Modifications to this software may be copyrighted by their authors +and need not follow the licensing terms described here, provided that +the new terms are clearly indicated on the first page of each file where +they apply. + +IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +MODIFICATIONS. + +GOVERNMENT USE: If you are acquiring this software on behalf of the +U.S. government, the Government shall have only "Restricted Rights" +in the software and related documentation as defined in the Federal +Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you +are acquiring the software on behalf of the Department of Defense, the +software shall be classified as "Commercial Computer Software" and the +Government shall have only "Restricted Rights" as defined in Clause +252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the +authors grant the U.S. Government and others acting in its behalf +permission to use and distribute the software in accordance with the +terms specified in this license. diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -121,6 +121,10 @@ incdirs = [] linklibs = ['tcl85', 'tk85'] libdirs = [] +elif sys.platform == 'darwin': + incdirs = ['/System/Library/Frameworks/Tk.framework/Versions/Current/Headers/'] + linklibs = ['tcl', 'tk'] + libdirs = [] else: incdirs=['/usr/include/tcl'] linklibs=['tcl', 'tk'] diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.8 +Version: 0.8.6 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.2" -__version_info__ = (0, 8, 2) +__version__ = "0.8.6" +__version_info__ = (0, 8, 6) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -55,8 +55,7 @@ # _cffi_backend.so compiled. import _cffi_backend as backend from . import __version__ - assert (backend.__version__ == __version__ or - backend.__version__ == __version__[:3]) + assert backend.__version__ == __version__ # (If you insist you can also try to pass the option # 'backend=backend_ctypes.CTypesBackend()', but don't # rely on it! It's probably not going to work well.) @@ -443,6 +442,10 @@ for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + for key, val in ffi._parser._int_constants.items(): + if key not in library.__dict__: + library.__dict__[key] = val + copied_enums.append(True) if name in library.__dict__: return diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -24,6 +24,7 @@ _r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]") _r_words = re.compile(r"\w+|\S") _parser_cache = None +_r_int_literal = re.compile(r"^0?x?[0-9a-f]+u?l?$", re.IGNORECASE) def _get_parser(): global _parser_cache @@ -99,6 +100,7 @@ self._structnode2type = weakref.WeakKeyDictionary() self._override = False self._packed = False + self._int_constants = {} def _parse(self, csource): csource, macros = _preprocess(csource) @@ -128,9 +130,10 @@ finally: if lock is not None: lock.release() - return ast, macros + # csource will be used to find buggy source text + return ast, macros, csource - def convert_pycparser_error(self, e, csource): + def _convert_pycparser_error(self, e, csource): # xxx look for ":NUM:" at the start of str(e) and try to interpret # it as a line number line = None @@ -142,6 +145,12 @@ csourcelines = csource.splitlines() if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] + return line + + def convert_pycparser_error(self, e, csource): + line = self._convert_pycparser_error(e, csource) + + msg = str(e) if line: msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: @@ -160,14 +169,9 @@ self._packed = prev_packed def _internal_parse(self, csource): - ast, macros = self._parse(csource) + ast, macros, csource = self._parse(csource) # add the macros - for key, value in macros.items(): - value = value.strip() - if value != '...': - raise api.CDefError('only supports the syntax "#define ' - '%s ..." for now (literally)' % key) - self._declare('macro ' + key, value) + self._process_macros(macros) # find the first "__dotdotdot__" and use that as a separator # between the repeated typedefs and the real csource iterator = iter(ast.ext) @@ -175,27 +179,61 @@ if decl.name == '__dotdotdot__': break # - for decl in iterator: - if isinstance(decl, pycparser.c_ast.Decl): - self._parse_decl(decl) - elif isinstance(decl, pycparser.c_ast.Typedef): - if not decl.name: - raise api.CDefError("typedef does not declare any name", - decl) - if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) - and decl.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_type(decl.name) - elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and - isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and - isinstance(decl.type.type.type, - pycparser.c_ast.IdentifierType) and - decl.type.type.type.names == ['__dotdotdot__']): - realtype = model.unknown_ptr_type(decl.name) + try: + for decl in iterator: + if isinstance(decl, pycparser.c_ast.Decl): + self._parse_decl(decl) + elif isinstance(decl, pycparser.c_ast.Typedef): + if not decl.name: + raise api.CDefError("typedef does not declare any name", + decl) + if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) + and decl.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_type(decl.name) + elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and + isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and + isinstance(decl.type.type.type, + pycparser.c_ast.IdentifierType) and + decl.type.type.type.names == ['__dotdotdot__']): + realtype = model.unknown_ptr_type(decl.name) + else: + realtype = self._get_type(decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) + raise api.CDefError("unrecognized construct", decl) + except api.FFIError as e: + msg = self._convert_pycparser_error(e, csource) + if msg: + e.args = (e.args[0] + "\n *** Err: %s" % msg,) + raise + + def _add_constants(self, key, val): + if key in self._int_constants: + raise api.FFIError( + "multiple declarations of constant: %s" % (key,)) + self._int_constants[key] = val + + def _process_macros(self, macros): + for key, value in macros.items(): + value = value.strip() + match = _r_int_literal.search(value) + if match is not None: + int_str = match.group(0).lower().rstrip("ul") + + # "010" is not valid oct in py3 + if (int_str.startswith("0") and + int_str != "0" and + not int_str.startswith("0x")): + int_str = "0o" + int_str[1:] + + pyvalue = int(int_str, 0) + self._add_constants(key, pyvalue) + elif value == '...': + self._declare('macro ' + key, value) else: - raise api.CDefError("unrecognized construct", decl) + raise api.CDefError('only supports the syntax "#define ' + '%s ..." (literally) or "#define ' + '%s 0x1FF" for now' % (key, key)) def _parse_decl(self, decl): node = decl.type @@ -227,7 +265,7 @@ self._declare('variable ' + decl.name, tp) def parse_type(self, cdecl): - ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl) + ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): @@ -306,7 +344,8 @@ if ident == 'void': return model.void_type if ident == '__dotdotdot__': - raise api.FFIError('bad usage of "..."') + raise api.FFIError(':%d: bad usage of "..."' % + typenode.coord.line) return resolve_common_type(ident) # if isinstance(type, pycparser.c_ast.Struct): @@ -333,7 +372,8 @@ return self._get_struct_union_enum_type('union', typenode, name, nested=True) # - raise api.FFIError("bad or unsupported type declaration") + raise api.FFIError(":%d: bad or unsupported type declaration" % + typenode.coord.line) def _parse_function_type(self, typenode, funcname=None): params = list(getattr(typenode.args, 'params', [])) @@ -499,6 +539,10 @@ if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '-'): return -self._parse_constant(exprnode.expr) + # load previously defined int constant + if (isinstance(exprnode, pycparser.c_ast.ID) and + exprnode.name in self._int_constants): + return self._int_constants[exprnode.name] # if partial_length_ok: if (isinstance(exprnode, pycparser.c_ast.ID) and @@ -506,8 +550,8 @@ self._partial_length = True return '...' # - raise api.FFIError("unsupported expression: expected a " - "simple numeric constant") + raise api.FFIError(":%d: unsupported expression: expected a " + "simple numeric constant" % exprnode.coord.line) def _build_enum_type(self, explicit_name, decls): if decls is not None: @@ -522,6 +566,7 @@ if enum.value is not None: nextenumvalue = self._parse_constant(enum.value) enumvalues.append(nextenumvalue) + self._add_constants(enum.name, nextenumvalue) nextenumvalue += 1 enumvalues = tuple(enumvalues) tp = model.EnumType(explicit_name, enumerators, enumvalues) @@ -535,3 +580,5 @@ kind = name.split(' ', 1)[0] if kind in ('typedef', 'struct', 'union', 'enum'): self._declare(name, tp) + for k, v in other._int_constants.items(): + self._add_constants(k, v) diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -38,6 +38,7 @@ import distutils.errors # dist = Distribution({'ext_modules': [ext]}) + dist.parse_config_files() options = dist.get_option_dict('build_ext') options['force'] = ('ffiplatform', True) options['build_lib'] = ('ffiplatform', tmpdir) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -89,43 +89,54 @@ # by generate_cpy_function_method(). prnt('static PyMethodDef _cffi_methods[] = {') self._generate("method") - prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS},') - prnt(' {NULL, NULL} /* Sentinel */') + prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},') + prnt(' {NULL, NULL, 0, NULL} /* Sentinel */') prnt('};') prnt() # # standard init. modname = self.verifier.get_module_name() - if sys.version_info >= (3,): - prnt('static struct PyModuleDef _cffi_module_def = {') - prnt(' PyModuleDef_HEAD_INIT,') - prnt(' "%s",' % modname) - prnt(' NULL,') - prnt(' -1,') - prnt(' _cffi_methods,') - prnt(' NULL, NULL, NULL, NULL') - prnt('};') - prnt() - initname = 'PyInit_%s' % modname - createmod = 'PyModule_Create(&_cffi_module_def)' - errorcase = 'return NULL' - finalreturn = 'return lib' - else: - initname = 'init%s' % modname - createmod = 'Py_InitModule("%s", _cffi_methods)' % modname - errorcase = 'return' - finalreturn = 'return' + constants = self._chained_list_constants[False] + prnt('#if PY_MAJOR_VERSION >= 3') + prnt() + prnt('static struct PyModuleDef _cffi_module_def = {') + prnt(' PyModuleDef_HEAD_INIT,') + prnt(' "%s",' % modname) + prnt(' NULL,') + prnt(' -1,') + prnt(' _cffi_methods,') + prnt(' NULL, NULL, NULL, NULL') + prnt('};') + prnt() prnt('PyMODINIT_FUNC') - prnt('%s(void)' % initname) + prnt('PyInit_%s(void)' % modname) prnt('{') prnt(' PyObject *lib;') - prnt(' lib = %s;' % createmod) - prnt(' if (lib == NULL || %s < 0)' % ( - self._chained_list_constants[False],)) - prnt(' %s;' % errorcase) - prnt(' _cffi_init();') - prnt(' %s;' % finalreturn) + prnt(' lib = PyModule_Create(&_cffi_module_def);') + prnt(' if (lib == NULL)') + prnt(' return NULL;') + prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,)) + prnt(' Py_DECREF(lib);') + prnt(' return NULL;') + prnt(' }') + prnt(' return lib;') prnt('}') + prnt() + prnt('#else') + prnt() + prnt('PyMODINIT_FUNC') + prnt('init%s(void)' % modname) + prnt('{') + prnt(' PyObject *lib;') + prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname) + prnt(' if (lib == NULL)') + prnt(' return;') + prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,)) + prnt(' return;') + prnt(' return;') + prnt('}') + prnt() + prnt('#endif') def load_library(self): # XXX review all usages of 'self' here! @@ -394,7 +405,7 @@ meth = 'METH_O' else: meth = 'METH_VARARGS' - self._prnt(' {"%s", _cffi_f_%s, %s},' % (name, name, meth)) + self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth)) _loading_cpy_function = _loaded_noop @@ -481,8 +492,8 @@ if tp.fldnames is None: return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) - self._prnt(' {"%s", %s, METH_NOARGS},' % (layoutfuncname, - layoutfuncname)) + self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname, + layoutfuncname)) def _loading_struct_or_union(self, tp, prefix, name, module): if tp.fldnames is None: @@ -589,13 +600,7 @@ 'variable type'),)) assert delayed else: - prnt(' if (LONG_MIN <= (%s) && (%s) <= LONG_MAX)' % (name, name)) - prnt(' o = PyInt_FromLong((long)(%s));' % (name,)) - prnt(' else if ((%s) <= 0)' % (name,)) - prnt(' o = PyLong_FromLongLong((long long)(%s));' % (name,)) - prnt(' else') - prnt(' o = PyLong_FromUnsignedLongLong(' - '(unsigned long long)(%s));' % (name,)) + prnt(' o = _cffi_from_c_int_const(%s);' % name) prnt(' if (o == NULL)') prnt(' return -1;') if size_too: @@ -632,13 +637,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_cpy_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_cpy_const(True, enumerator, delayed=False) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) prnt = self._prnt prnt('static int %s(PyObject *lib)' % funcname) prnt('{') @@ -760,17 +770,30 @@ #include #include -#ifdef MS_WIN32 -#include /* for alloca() */ -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef unsigned char _Bool; +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +# endif #endif #if PY_MAJOR_VERSION < 3 @@ -795,6 +818,15 @@ #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble +#define _cffi_from_c_int_const(x) \ + (((x) > 0) ? \ + ((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \ + ((long long)(x) >= (long long)LONG_MIN) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromLongLong((long long)(x))) + #define _cffi_from_c_int(x, type) \ (((type)-1) > 0 ? /* unsigned */ \ (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ @@ -804,14 +836,14 @@ PyLong_FromLongLong(x))) #define _cffi_to_c_int(o, type) \ - (sizeof(type) == 1 ? (((type)-1) > 0 ? _cffi_to_c_u8(o) \ - : _cffi_to_c_i8(o)) : \ - sizeof(type) == 2 ? (((type)-1) > 0 ? _cffi_to_c_u16(o) \ - : _cffi_to_c_i16(o)) : \ - sizeof(type) == 4 ? (((type)-1) > 0 ? _cffi_to_c_u32(o) \ - : _cffi_to_c_i32(o)) : \ - sizeof(type) == 8 ? (((type)-1) > 0 ? _cffi_to_c_u64(o) \ - : _cffi_to_c_i64(o)) : \ + (sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ + : (type)_cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \ + : (type)_cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \ + : (type)_cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ + : (type)_cffi_to_c_i64(o)) : \ (Py_FatalError("unsupported size for type " #type), 0)) #define _cffi_to_c_i8 \ @@ -885,25 +917,32 @@ return PyBool_FromLong(was_alive); } -static void _cffi_init(void) +static int _cffi_init(void) { - PyObject *module = PyImport_ImportModule("_cffi_backend"); - PyObject *c_api_object; + PyObject *module, *c_api_object = NULL; + module = PyImport_ImportModule("_cffi_backend"); if (module == NULL) - return; + goto failure; c_api_object = PyObject_GetAttrString(module, "_C_API"); if (c_api_object == NULL) - return; + goto failure; if (!PyCapsule_CheckExact(c_api_object)) { - Py_DECREF(c_api_object); PyErr_SetNone(PyExc_ImportError); - return; + goto failure; } memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), _CFFI_NUM_EXPORTS * sizeof(void *)); + + Py_DECREF(module); Py_DECREF(c_api_object); + return 0; + + failure: + Py_XDECREF(module); + Py_XDECREF(c_api_object); + return -1; } #define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -249,10 +249,10 @@ prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') self.export_symbols.append(layoutfuncname) - prnt('ssize_t %s(ssize_t i)' % (layoutfuncname,)) + prnt('intptr_t %s(intptr_t i)' % (layoutfuncname,)) prnt('{') prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname) - prnt(' static ssize_t nums[] = {') + prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') for fname, ftype, fbitsize in tp.enumfields(): @@ -276,7 +276,7 @@ return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # - BFunc = self.ffi._typeof_locked("ssize_t(*)(ssize_t)")[0] + BFunc = self.ffi._typeof_locked("intptr_t(*)(intptr_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 @@ -410,13 +410,18 @@ # ---------- # enums + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + def _generate_gen_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_gen_const(True, enumerator) return # - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) self.export_symbols.append(funcname) prnt = self._prnt prnt('int %s(char *out_error)' % funcname) @@ -430,14 +435,14 @@ enumerator, enumerator, enumvalue)) prnt(' char buf[64];') prnt(' if ((%s) < 0)' % enumerator) - prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % enumerator) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % enumerator) prnt(' else') - prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % enumerator) - prnt(' snprintf(out_error, 255,' + prnt(' sprintf(out_error,' ' "%s has the real value %s, not %s",') prnt(' "%s", buf, "%d");' % ( - enumerator, enumvalue)) + enumerator[:100], enumvalue)) prnt(' return -1;') prnt(' }') prnt(' return 0;') @@ -453,7 +458,7 @@ else: BType = self.ffi._typeof_locked("char[]")[0] BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] - funcname = '_cffi_e_%s_%s' % (prefix, name) + funcname = self._enum_funcname(prefix, name) function = module.load_function(BFunc, funcname) p = self.ffi.new(BType, 256) if function(p) < 0: @@ -547,20 +552,29 @@ #include #include /* XXX for ssize_t on some platforms */ -#ifdef _WIN32 -# include -# define snprintf _snprintf -typedef __int8 int8_t; -typedef __int16 int16_t; -typedef __int32 int32_t; -typedef __int64 int64_t; -typedef unsigned __int8 uint8_t; -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -typedef SSIZE_T ssize_t; -typedef unsigned char _Bool; +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ + typedef unsigned char _Bool; +# endif #else -# include +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# include +# endif #endif ''' diff --git a/lib_pypy/ctypes_support.py b/lib_pypy/ctypes_support.py --- a/lib_pypy/ctypes_support.py +++ b/lib_pypy/ctypes_support.py @@ -1,4 +1,3 @@ - """ This file provides some support for things like standard_c_lib and errno access, as portable as possible """ @@ -22,7 +21,7 @@ standard_c_lib._errno.argtypes = None def _where_is_errno(): return standard_c_lib._errno() - + elif sys.platform in ('linux2', 'freebsd6'): standard_c_lib.__errno_location.restype = ctypes.POINTER(ctypes.c_int) standard_c_lib.__errno_location.argtypes = None @@ -42,5 +41,3 @@ def set_errno(value): errno_p = _where_is_errno() errno_p.contents.value = value - - diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py new file mode 100644 --- /dev/null +++ b/lib_pypy/gdbm.py @@ -0,0 +1,176 @@ +import cffi, os + +ffi = cffi.FFI() +ffi.cdef(''' +#define GDBM_READER ... +#define GDBM_WRITER ... +#define GDBM_WRCREAT ... +#define GDBM_NEWDB ... +#define GDBM_FAST ... +#define GDBM_SYNC ... +#define GDBM_NOLOCK ... +#define GDBM_REPLACE ... + +void* gdbm_open(char *, int, int, int, void (*)()); +void gdbm_close(void*); + +typedef struct { + char *dptr; + int dsize; +} datum; + +datum gdbm_fetch(void*, datum); +int gdbm_delete(void*, datum); +int gdbm_store(void*, datum, datum, int); +int gdbm_exists(void*, datum); + +int gdbm_reorganize(void*); + +datum gdbm_firstkey(void*); +datum gdbm_nextkey(void*, datum); +void gdbm_sync(void*); + +char* gdbm_strerror(int); +int gdbm_errno; + +void free(void*); +''') + +try: + lib = ffi.verify(''' + #include "gdbm.h" + ''', libraries=['gdbm']) +except cffi.VerificationError as e: + # distutils does not preserve the actual message, + # but the verification is simple enough that the + # failure must be due to missing gdbm dev libs + raise ImportError('%s: %s' %(e.__class__.__name__, e)) + +class error(Exception): + pass + +def _fromstr(key): + if isinstance(key, unicode): + key = key.encode("ascii") + if not isinstance(key, str): + raise TypeError("gdbm mappings have string indices only") + return {'dptr': ffi.new("char[]", key), 'dsize': len(key)} + +class gdbm(object): + ll_dbm = None + + def __init__(self, filename, iflags, mode): + res = lib.gdbm_open(filename, 0, iflags, mode, ffi.NULL) + self.size = -1 + if not res: + self._raise_from_errno() + self.ll_dbm = res + + def close(self): + if self.ll_dbm: + lib.gdbm_close(self.ll_dbm) + self.ll_dbm = None + + def _raise_from_errno(self): + if ffi.errno: + raise error(ffi.errno, os.strerror(ffi.errno)) + raise error(lib.gdbm_errno, lib.gdbm_strerror(lib.gdbm_errno)) + + def __len__(self): + if self.size < 0: + self.size = len(self.keys()) + return self.size + + def __setitem__(self, key, value): + self._check_closed() + self._size = -1 + r = lib.gdbm_store(self.ll_dbm, _fromstr(key), _fromstr(value), + lib.GDBM_REPLACE) + if r < 0: + self._raise_from_errno() + + def __delitem__(self, key): + self._check_closed() + res = lib.gdbm_delete(self.ll_dbm, _fromstr(key)) + if res < 0: + raise KeyError(key) + + def __contains__(self, key): + self._check_closed() + return lib.gdbm_exists(self.ll_dbm, _fromstr(key)) + has_key = __contains__ + + def __getitem__(self, key): + self._check_closed() + drec = lib.gdbm_fetch(self.ll_dbm, _fromstr(key)) + if not drec.dptr: + raise KeyError(key) + res = str(ffi.buffer(drec.dptr, drec.dsize)) + lib.free(drec.dptr) + return res + + def keys(self): + self._check_closed() + l = [] + key = lib.gdbm_firstkey(self.ll_dbm) + while key.dptr: + l.append(str(ffi.buffer(key.dptr, key.dsize))) + nextkey = lib.gdbm_nextkey(self.ll_dbm, key) + lib.free(key.dptr) + key = nextkey + return l + + def firstkey(self): + self._check_closed() + key = lib.gdbm_firstkey(self.ll_dbm) + if key.dptr: + res = str(ffi.buffer(key.dptr, key.dsize)) + lib.free(key.dptr) + return res + + def nextkey(self, key): + self._check_closed() + key = lib.gdbm_nextkey(self.ll_dbm, _fromstr(key)) + if key.dptr: + res = str(ffi.buffer(key.dptr, key.dsize)) + lib.free(key.dptr) + return res + + def reorganize(self): + self._check_closed() + if lib.gdbm_reorganize(self.ll_dbm) < 0: + self._raise_from_errno() + + def _check_closed(self): + if not self.ll_dbm: + raise error(0, "GDBM object has already been closed") + + __del__ = close + + def sync(self): + self._check_closed() + lib.gdbm_sync(self.ll_dbm) + +def open(filename, flags='r', mode=0666): + if flags[0] == 'r': + iflags = lib.GDBM_READER + elif flags[0] == 'w': + iflags = lib.GDBM_WRITER + elif flags[0] == 'c': + iflags = lib.GDBM_WRCREAT + elif flags[0] == 'n': + iflags = lib.GDBM_NEWDB + else: + raise error(0, "First flag must be one of 'r', 'w', 'c' or 'n'") + for flag in flags[1:]: + if flag == 'f': + iflags |= lib.GDBM_FAST + elif flag == 's': + iflags |= lib.GDBM_SYNC + elif flag == 'u': + iflags |= lib.GDBM_NOLOCK + else: + raise error(0, "Flag '%s' not supported" % flag) + return gdbm(filename, iflags, mode) + +open_flags = "rwcnfsu" diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -113,7 +113,7 @@ try: for name in modlist: __import__(name) - except (ImportError, CompilationError, py.test.skip.Exception), e: + except (ImportError, CompilationError, py.test.skip.Exception) as e: errcls = e.__class__.__name__ raise Exception( "The module %r is disabled\n" % (modname,) + diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py --- a/pypy/config/test/test_pypyoption.py +++ b/pypy/config/test/test_pypyoption.py @@ -12,9 +12,9 @@ assert conf.objspace.usemodules.gc conf.objspace.std.withmapdict = True - assert conf.objspace.std.withmethodcache + assert conf.objspace.std.withtypeversion conf = get_pypy_config() - conf.objspace.std.withmethodcache = False + conf.objspace.std.withtypeversion = False py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True") def test_conflicting_gcrootfinder(): diff --git a/pypy/doc/Makefile b/pypy/doc/Makefile --- a/pypy/doc/Makefile +++ b/pypy/doc/Makefile @@ -7,63 +7,80 @@ PAPER = BUILDDIR = _build +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex man changes linkcheck doctest +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " man to make manual pages" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: - -rm -rf $(BUILDDIR)/* + rm -rf $(BUILDDIR)/* html: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + pickle: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ @@ -72,35 +89,89 @@ @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyPy.qhc" +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/PyPy" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PyPy" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + latex: - # python config/generate.py #readthedocs will not run this Makefile $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ - "run these through (pdf)latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja From noreply at buildbot.pypy.org Sun Aug 10 09:14:57 2014 From: noreply at buildbot.pypy.org (waedt) Date: Sun, 10 Aug 2014 09:14:57 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Explicitly raise the IndexError for str objects Message-ID: <20140810071457.09E941D2323@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72739:b895651fbbdc Date: 2014-08-09 23:50 -0500 http://bitbucket.org/pypy/pypy/changeset/b895651fbbdc/ Log: Explicitly raise the IndexError for str objects diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -50,15 +50,16 @@ return res def utf8ord(ustr, start=0): - if start >= len(ustr): - raise IndexError() - start = ustr.index_of_char(start) return utf8ord_bytes(ustr.bytes, start) @specialize.argtype(0) def ORD(s, pos): assert s is not None + + if pos >= len(s): + raise IndexError() + if isinstance(s, Utf8Str): return utf8ord(s, pos) else: @@ -493,7 +494,6 @@ res.reverse() return res - #@specialize.argtype(1) def join(self, other): if len(other) == 0: return Utf8Str('') From noreply at buildbot.pypy.org Sun Aug 10 09:14:58 2014 From: noreply at buildbot.pypy.org (waedt) Date: Sun, 10 Aug 2014 09:14:58 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix rstr.rsplit Message-ID: <20140810071458.57A771D2323@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72740:5f2ee0bf8c2c Date: 2014-08-10 00:45 -0500 http://bitbucket.org/pypy/pypy/changeset/5f2ee0bf8c2c/ Log: Fix rstr.rsplit diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -988,7 +988,7 @@ markerlen = len(c.chars) pos = s.rfind(c, 0, pos) while pos >= 0 and count <= max: - pos = s.rfind(c, 0, pos - markerlen) + pos = s.rfind(c, 0, pos) count += 1 res = LIST.ll_newlist(count) items = res.ll_items() diff --git a/rpython/rtyper/test/test_rstr.py b/rpython/rtyper/test/test_rstr.py --- a/rpython/rtyper/test/test_rstr.py +++ b/rpython/rtyper/test/test_rstr.py @@ -760,6 +760,11 @@ res = self.interpret(f, [i]) assert res == True + def f(): + return "a//b//c//d".rsplit("//") == ["a", "b", "c", "d"] + res = self.interpret(f, []) + assert res == f() + def test_rsplit(self): fn = self._make_split_test('rsplit') for i in range(5): From noreply at buildbot.pypy.org Sun Aug 10 09:14:59 2014 From: noreply at buildbot.pypy.org (waedt) Date: Sun, 10 Aug 2014 09:14:59 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix more translated vs untranslated bool issues (apparently its __nonzero__ instead of __bool__ in Python 2) Message-ID: <20140810071459.7D2DF1D2323@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72741:c963423d5f20 Date: 2014-08-10 02:14 -0500 http://bitbucket.org/pypy/pypy/changeset/c963423d5f20/ Log: Fix more translated vs untranslated bool issues (apparently its __nonzero__ instead of __bool__ in Python 2) diff --git a/pypy/interpreter/test/test_utf8.py b/pypy/interpreter/test/test_utf8.py --- a/pypy/interpreter/test/test_utf8.py +++ b/pypy/interpreter/test/test_utf8.py @@ -196,6 +196,10 @@ assert s.rsplit(' ', 2) == u.rsplit(' ', 2) assert s.rsplit('\n') == [s] +def test_untranslated_bool(): + r = bool(Utf8Str('')) + assert r == True + def test_copy_to_new_wcharp(): s = build_utf8str() if sys.maxunicode < 0x10000 and rffi.sizeof(rffi.WCHAR_T) == 4: diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -245,9 +245,9 @@ assert self._len >= 0 return self._len - def __bool__(self): + def __nonzero__(self): # XXX Make the untranslated behavior the same as the translated behavior - raise True + return True def __hash__(self): return compute_hash(self.bytes) diff --git a/pypy/module/_io/interp_stringio.py b/pypy/module/_io/interp_stringio.py --- a/pypy/module/_io/interp_stringio.py +++ b/pypy/module/_io/interp_stringio.py @@ -43,7 +43,8 @@ self.readnl = newline self.readuniversal = newline is None or len(newline) == 0 self.readtranslate = newline is None - if newline and utf8ord(newline) == ord("\r"): + if (newline is not None and len(newline) > 0 and + utf8ord(newline) == ord("\r")): self.writenl = newline if self.readuniversal: self.w_decoder = space.call_function( diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -382,11 +382,11 @@ self.line_buffering = line_buffering - self.readuniversal = not newline # null or empty + self.readuniversal = newline is None or len(newline) == 0 self.readtranslate = newline is None self.readnl = newline - self.writetranslate = (newline is None or len(newline) == 0) + self.writetranslate = newline is None or len(newline) == 0 if not self.readuniversal: self.writenl = self.readnl if utf8.EQ(self.writenl, Utf8Str('\n')): @@ -646,7 +646,7 @@ while True: # First, get some data if necessary has_data = True - while not self.decoded_chars: + while self.decoded_chars is None or len(self.decoded_chars) == 0: try: if not self._read_chunk(space): has_data = False @@ -935,7 +935,7 @@ w_pos = space.call_method(self.w_buffer, "tell") if self.w_decoder is None or self.snapshot is None: - assert not self.decoded_chars + assert self.decoded_chars is None or len(self.decoded_chars) == 0 return w_pos cookie = PositionCookie(space.bigint_w(w_pos)) diff --git a/pypy/module/_io/test/test_textio.py b/pypy/module/_io/test/test_textio.py --- a/pypy/module/_io/test/test_textio.py +++ b/pypy/module/_io/test/test_textio.py @@ -132,6 +132,14 @@ t.read(4) assert t.tell() == 4 + r = _io.BytesIO("abc") + t = _io.TextIOWrapper(r) + assert t.read(2) == "ab" + assert t.read(1) == "c" + assert t.read(1) == "" + assert t.read() == "" + assert t.tell() == 3 + def test_destructor(self): import _io l = [] diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -194,7 +194,7 @@ @unwrap_spec(tabsize=int) def descr_expandtabs(self, space, tabsize=8): value = self._val(space) - if not value: + if value is None or len(value) == 0: return self._empty() if self._use_rstr_ops(space, self): @@ -222,7 +222,7 @@ """calculates distance behind the token to the next tabstop""" distance = tabsize - if token: + if token is not None and len(token) != 0: distance = 0 offset = len(token) From noreply at buildbot.pypy.org Sun Aug 10 13:39:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Aug 2014 13:39:47 +0200 (CEST) Subject: [pypy-commit] pypy default: Update the download location Message-ID: <20140810113947.063F61C0EE9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72742:db7dfc8ced82 Date: 2014-08-10 13:39 +0200 http://bitbucket.org/pypy/pypy/changeset/db7dfc8ced82/ Log: Update the download location diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -63,8 +63,7 @@ Development is done in the branch `stmgc-c7`_. If you are only interested in trying it out, you can download a Ubuntu binary here__ -(``pypy-2.3.x-stm*.tar.bz2``, Ubuntu 12.04-14.04; these versions are -release mode, but not stripped of debug symbols). The current version +(``pypy-stm-2.3*.tar.bz2``, Ubuntu 12.04-14.04). The current version supports four "segments", which means that it will run up to four threads in parallel. @@ -79,7 +78,7 @@ rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py .. _`stmgc-c7`: https://bitbucket.org/pypy/pypy/src/stmgc-c7/ -.. __: http://cobra.cs.uni-duesseldorf.de/~buildmaster/misc/ +.. __: https://bitbucket.org/pypy/pypy/downloads/ .. __: http://clang.llvm.org/get_started.html .. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/ From noreply at buildbot.pypy.org Sun Aug 10 14:21:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Aug 2014 14:21:17 +0200 (CEST) Subject: [pypy-commit] stmgc rewind_setjmp: Add an extra test. I thought it would show an issue, but it seems Message-ID: <20140810122117.5BDBF1D2323@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: rewind_setjmp Changeset: r1292:ea1c0a0bd3be Date: 2014-08-10 14:21 +0200 http://bitbucket.org/pypy/stmgc/changeset/ea1c0a0bd3be/ Log: Add an extra test. I thought it would show an issue, but it seems that clang-compiled code never changes a value passed as argument and stored in (rbp+N). If we change an argument or take its address, it first copies the argument inside our own frame. diff --git a/c7/test/test_rewind.c b/c7/test/test_rewind.c --- a/c7/test/test_rewind.c +++ b/c7/test/test_rewind.c @@ -168,6 +168,58 @@ /************************************************************/ +static int test6_x; + +__attribute__((noinline)) +void foo(int *x) { ++*x; } + +__attribute__((noinline)) +void f6(int a1, int a2, int a3, int a4, int a5, int a6, int a7, + int a8, int a9, int a10, int a11, int a12, int a13) +{ + rewind_jmp_buf buf; + rewind_jmp_enterframe(>hread, &buf); + + rewind_jmp_setjmp(>hread); + gevent(a1); gevent(a2); gevent(a3); gevent(a4); + gevent(a5); gevent(a6); gevent(a7); gevent(a8); + gevent(a9); gevent(a10); gevent(a11); gevent(a12); + gevent(a13); + if (++test6_x < 4) { + foo(&a1); + foo(&a2); + foo(&a3); + foo(&a4); + foo(&a5); + foo(&a6); + foo(&a7); + foo(&a8); + foo(&a9); + foo(&a10); + foo(&a11); + foo(&a12); + foo(&a13); + rewind_jmp_longjmp(>hread); + } + rewind_jmp_leaveframe(>hread, &buf); +} + +void test6(void) +{ + rewind_jmp_buf buf; + rewind_jmp_enterframe(>hread, &buf); + test6_x = 0; + f6(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13); + rewind_jmp_leaveframe(>hread, &buf); + int expected[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}; + CHECK(expected); +} + +/************************************************************/ + int rj_malloc_count = 0; void *rj_malloc(size_t size) @@ -195,6 +247,7 @@ else if (!strcmp(argv[1], "3")) test3(); else if (!strcmp(argv[1], "4")) test4(); else if (!strcmp(argv[1], "5")) test5(); + else if (!strcmp(argv[1], "6")) test6(); else assert(!"bad argv[1]"); assert(rj_malloc_count == 0); diff --git a/c7/test/test_rewind.py b/c7/test/test_rewind.py --- a/c7/test/test_rewind.py +++ b/c7/test/test_rewind.py @@ -6,7 +6,7 @@ % (opt, opt)) if err != 0: raise OSError("clang failed on test_rewind.c") - for testnum in [1, 2, 3, 4, 5]: + for testnum in [1, 2, 3, 4, 5, 6]: print '=== O%d: RUNNING TEST %d ===' % (opt, testnum) err = os.system("./test_rewind_O%d %d" % (opt, testnum)) if err != 0: From noreply at buildbot.pypy.org Sun Aug 10 15:32:14 2014 From: noreply at buildbot.pypy.org (mjacob) Date: Sun, 10 Aug 2014 15:32:14 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: hg merge default Message-ID: <20140810133214.B79AE1C0136@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r72743:08ba84b1fa90 Date: 2014-08-09 23:04 +0200 http://bitbucket.org/pypy/pypy/changeset/08ba84b1fa90/ Log: hg merge default diff --git a/include/PyPy.h b/include/PyPy.h --- a/include/PyPy.h +++ b/include/PyPy.h @@ -53,6 +53,12 @@ int pypy_execute_source_ptr(char *source, void* ptr); +/* Windows hackery */ +#if defined(_MSC_VER) +# pragma comment(lib,"python27.lib") +#endif + + #ifdef __cplusplus } #endif diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info --- a/lib_pypy/cffi.egg-info +++ b/lib_pypy/cffi.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: cffi -Version: 0.8 +Version: 0.8.6 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/pypy/doc/getting-started-python.rst b/pypy/doc/getting-started-python.rst deleted file mode 100644 diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -616,7 +616,8 @@ def descr_classmethod_get(self, space, w_obj, w_klass=None): if space.is_none(w_klass): w_klass = space.type(w_obj) - return space.wrap(Method(space, self.w_function, w_klass, space.w_None)) + return space.wrap(Method(space, self.w_function, w_klass, + space.type(w_klass))) def descr_classmethod__new__(space, w_subtype, w_function): instance = space.allocate_instance(ClassMethod, w_subtype) diff --git a/pypy/interpreter/pycompiler.py b/pypy/interpreter/pycompiler.py --- a/pypy/interpreter/pycompiler.py +++ b/pypy/interpreter/pycompiler.py @@ -96,7 +96,7 @@ XXX: This class should override the baseclass implementation of compile_command() in order to optimize it, especially in case - of incomplete inputs (e.g. we shouldn't re-compile from sracth + of incomplete inputs (e.g. we shouldn't re-compile from scratch the whole source after having only added a new '\n') """ def __init__(self, space, override_version=None): diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -61,6 +61,7 @@ while True: next_instr = self.handle_bytecode(co_code, next_instr, ec) except ExitFrame: + self.last_exception = None return self.popvalue() def handle_bytecode(self, co_code, next_instr, ec): diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -280,6 +280,20 @@ raise StopIteration assert tuple(f()) == (1,) + def test_exception_is_cleared_by_yield(self): + def f(): + try: + foobar + except NameError: + yield 5 + raise # should raise "no active exception to re-raise" + gen = f() + gen.next() # --> 5 + try: + gen.next() + except TypeError: + pass + def test_should_not_inline(space): from pypy.interpreter.generator import should_not_inline diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -388,6 +388,13 @@ # differs from .im_class in case the method is # defined in some parent class of l's actual class + def test_classmethod_im_class(self): + class Foo(object): + @classmethod + def bar(cls): + pass + assert Foo.bar.im_class is type + def test_func_closure(self): x = 2 def f(): diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py --- a/pypy/module/__builtin__/app_inspect.py +++ b/pypy/module/__builtin__/app_inspect.py @@ -7,8 +7,8 @@ from __pypy__ import lookup_special -def _caller_locals(): - return sys._getframe(0).f_locals +def _caller_locals(): + return sys._getframe(0).f_locals def vars(*obj): """Return a dictionary of all the attributes currently bound in obj. If @@ -17,12 +17,11 @@ if len(obj) == 0: return _caller_locals() elif len(obj) != 1: - raise TypeError, "vars() takes at most 1 argument." - else: - try: - return obj[0].__dict__ - except AttributeError: - raise TypeError, "vars() argument must have __dict__ attribute" + raise TypeError("vars() takes at most 1 argument.") + try: + return obj[0].__dict__ + except AttributeError: + raise TypeError("vars() argument must have __dict__ attribute") def dir(*args): """dir([object]) -> list of strings @@ -38,8 +37,7 @@ attributes of its class's base classes. """ if len(args) > 1: - raise TypeError("dir expected at most 1 arguments, got %d" - % len(args)) + raise TypeError("dir expected at most 1 arguments, got %d" % len(args)) if len(args) == 0: local_names = _caller_locals().keys() # 2 stackframes away if not isinstance(local_names, list): @@ -48,92 +46,61 @@ return local_names import types - obj = args[0] - - dir_meth = None if isinstance(obj, types.InstanceType): - try: - dir_meth = getattr(obj, "__dir__") - except AttributeError: - pass + dir_meth = getattr(obj, '__dir__', None) else: - dir_meth = lookup_special(obj, "__dir__") + dir_meth = lookup_special(obj, '__dir__') if dir_meth is not None: - result = dir_meth() - if not isinstance(result, list): + names = dir_meth() + if not isinstance(names, list): raise TypeError("__dir__() must return a list, not %r" % ( - type(result),)) - result.sort() - return result + type(names),)) + names.sort() + return names elif isinstance(obj, types.ModuleType): try: - result = list(obj.__dict__) - result.sort() - return result + return sorted(obj.__dict__) except AttributeError: return [] - elif isinstance(obj, (types.TypeType, types.ClassType)): - #Don't look at __class__, as metaclass methods would be confusing. - result = _classdir(obj).keys() - result.sort() - return result - - else: #(regular item) - Dict = {} - try: - if isinstance(obj.__dict__, dict): - Dict.update(obj.__dict__) - except AttributeError: - pass - try: - Dict.update(_classdir(obj.__class__)) - except AttributeError: - pass + # Don't look at __class__, as metaclass methods would be confusing. + return sorted(_classdir(obj)) + else: + names = set() + ns = getattr(obj, '__dict__', None) + if isinstance(ns, dict): + names.update(ns) + klass = getattr(obj, '__class__', None) + if klass is not None: + names.update(_classdir(klass)) ## Comment from object.c: ## /* Merge in __members__ and __methods__ (if any). ## XXX Would like this to go away someday; for now, it's ## XXX needed to get at im_self etc of method objects. */ - for attr in ['__members__','__methods__']: - try: - l = getattr(obj, attr) - if not isinstance(l, list): - continue - for item in l: - if isinstance(item, types.StringTypes): - Dict[item] = None - except (AttributeError, TypeError): - pass + for attr in '__members__', '__methods__': + l = getattr(obj, attr, None) + if not isinstance(l, list): + continue + names.extend(item for item in l if isinstance(item, str)) - result = Dict.keys() - result.sort() - return result + return sorted(names) def _classdir(klass): - """Return a dict of the accessible attributes of class/type klass. + """Return a set of the accessible attributes of class/type klass. - This includes all attributes of klass and all of the - base classes recursively. - - The values of this dict have no meaning - only the keys have - meaning. + This includes all attributes of klass and all of the base classes + recursively. """ - Dict = {} - try: - Dict.update(klass.__dict__) - except AttributeError: pass - try: - # XXX - Use of .__mro__ would be suggested, if the existance - # of that attribute could be guarranted. - bases = klass.__bases__ - except AttributeError: pass - else: - try: - #Note that since we are only interested in the keys, - # the order we merge classes is unimportant - for base in bases: - Dict.update(_classdir(base)) - except TypeError: pass - return Dict + names = set() + ns = getattr(klass, '__dict__', None) + if ns is not None: + names.update(ns) + bases = getattr(klass, '__bases__', None) + if bases is not None: + # Note that since we are only interested in the keys, the order + # we merge classes is unimportant + for base in bases: + names.update(_classdir(base)) + return names diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -4,12 +4,15 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from rpython.rlib.rStringIO import RStringIO from rpython.rlib.rarithmetic import r_longlong +from rpython.rlib.objectmodel import import_from_mixin from pypy.module._io.interp_bufferedio import W_BufferedIOBase from pypy.module._io.interp_iobase import convert_size import sys -class W_BytesIO(RStringIO, W_BufferedIOBase): +class W_BytesIO(W_BufferedIOBase): + import_from_mixin(RStringIO) + def __init__(self, space): W_BufferedIOBase.__init__(self, space, add_to_autoflusher=False) self.init() diff --git a/pypy/module/_md5/interp_md5.py b/pypy/module/_md5/interp_md5.py --- a/pypy/module/_md5/interp_md5.py +++ b/pypy/module/_md5/interp_md5.py @@ -1,13 +1,15 @@ from rpython.rlib import rmd5 +from rpython.rlib.objectmodel import import_from_mixin from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec -class W_MD5(W_Root, rmd5.RMD5): +class W_MD5(W_Root): """ A subclass of RMD5 that can be exposed to app-level. """ + import_from_mixin(rmd5.RMD5) def __init__(self, space): self.space = space diff --git a/pypy/module/_sha/interp_sha.py b/pypy/module/_sha/interp_sha.py --- a/pypy/module/_sha/interp_sha.py +++ b/pypy/module/_sha/interp_sha.py @@ -1,13 +1,15 @@ from rpython.rlib import rsha +from rpython.rlib.objectmodel import import_from_mixin from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec -class W_SHA(W_Root, rsha.RSHA): +class W_SHA(W_Root): """ A subclass of RSHA that can be exposed to app-level. """ + import_from_mixin(rsha.RSHA) def __init__(self, space): self.space = space diff --git a/pypy/module/cStringIO/interp_stringio.py b/pypy/module/cStringIO/interp_stringio.py --- a/pypy/module/cStringIO/interp_stringio.py +++ b/pypy/module/cStringIO/interp_stringio.py @@ -3,6 +3,7 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.gateway import interp2app, unwrap_spec from rpython.rlib.rStringIO import RStringIO +from rpython.rlib.objectmodel import import_from_mixin class W_InputOutputType(W_Root): @@ -144,7 +145,9 @@ # ____________________________________________________________ -class W_OutputType(RStringIO, W_InputOutputType): +class W_OutputType(W_InputOutputType): + import_from_mixin(RStringIO) + def __init__(self, space): self.init() self.space = space diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_egg_version.py b/pypy/module/test_lib_pypy/cffi_tests/test_egg_version.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/cffi_tests/test_egg_version.py @@ -0,0 +1,12 @@ +from email.parser import Parser + +import py + +import cffi +import pypy + +egg_info = py.path.local(pypy.__file__) / '../../lib_pypy/cffi.egg-info' + +def test_egg_version(): + info = Parser().parsestr(egg_info.read()) + assert info['version'] == cffi.__version__ diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -30,10 +30,6 @@ raise NotImplementedError def descr_reduce(self, space): - """ - XXX to do: remove this __reduce__ method and do - a registration with copy_reg, instead. - """ from pypy.interpreter.mixedmodule import MixedModule w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) @@ -125,10 +121,6 @@ self.index = space.int_w(self.w_len) + index def descr_reduce(self, space): - """ - XXX to do: remove this __reduce__ method and do - a registration with copy_reg, instead. - """ from pypy.interpreter.mixedmodule import MixedModule w_mod = space.getbuiltinmodule('_pickle_support') mod = space.interp_w(MixedModule, w_mod) diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -419,6 +419,10 @@ base = object baselist = list(cls.__bases__) + if cls.__dict__.get('_mixin_', False): + raise AnnotatorError("cannot use directly the class %r because " + "it is a _mixin_" % (cls,)) + # special case: skip BaseException in Python 2.5, and pretend # that all exceptions ultimately inherit from Exception instead # of BaseException (XXX hack) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -2536,6 +2536,22 @@ s = a.build_types(f, []) assert s.const == 2 + def test_cannot_use_directly_mixin(self): + class A(object): + _mixin_ = True + # + def f(): + return A() + a = self.RPythonAnnotator() + py.test.raises(annmodel.AnnotatorError, a.build_types, f, []) + # + class B(object): + pass + x = B() + def g(): + return isinstance(x, A) + py.test.raises(annmodel.AnnotatorError, a.build_types, g, []) + def test_import_from_mixin(self): class M(object): def f(self): diff --git a/rpython/flowspace/test/test_model.py b/rpython/flowspace/test/test_model.py --- a/rpython/flowspace/test/test_model.py +++ b/rpython/flowspace/test/test_model.py @@ -13,7 +13,7 @@ class pieces: """ The manually-built graph corresponding to the sample_function(). """ - i = Variable("i") + i0 = Variable("i0") i1 = Variable("i1") i2 = Variable("i2") i3 = Variable("i3") @@ -25,12 +25,12 @@ conditionop = SpaceOperation("gt", [i1, Constant(0)], conditionres) addop = SpaceOperation("add", [sum2, i2], sum3) decop = SpaceOperation("sub", [i2, Constant(1)], i3) - startblock = Block([i]) + startblock = Block([i0]) headerblock = Block([i1, sum1]) whileblock = Block([i2, sum2]) graph = FunctionGraph("f", startblock) - startblock.closeblock(Link([i, Constant(0)], headerblock)) + startblock.closeblock(Link([i0, Constant(0)], headerblock)) headerblock.operations.append(conditionop) headerblock.exitswitch = conditionres headerblock.closeblock(Link([sum1], graph.returnblock, False), @@ -55,7 +55,7 @@ def test_graphattributes(): assert graph.startblock is pieces.startblock assert graph.returnblock is pieces.headerblock.exits[0].target - assert graph.getargs() == [pieces.i] + assert graph.getargs() == [pieces.i0] assert [graph.getreturnvar()] == graph.returnblock.inputargs assert graph.source == inspect.getsource(sample_function) diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -21,7 +21,7 @@ # this is a basic test that tries to hit a number of features and their # translation: # - jitting of loops and bridges - # - virtualizables + # - two virtualizable types # - set_param interface # - profiler # - full optimizer @@ -79,22 +79,28 @@ if rposix.get_errno() != total: raise ValueError return chr(total % 253) # + class Virt2(object): + _virtualizable_ = ['i'] + def __init__(self, i): + self.i = i from rpython.rlib.libffi import types, CDLL, ArgChain from rpython.rlib.test.test_clibffi import get_libm_name libm_name = get_libm_name(sys.platform) - jitdriver2 = JitDriver(greens=[], reds = ['i', 'func', 'res', 'x']) + jitdriver2 = JitDriver(greens=[], reds = ['v2', 'func', 'res', 'x'], + virtualizables = ['v2']) def libffi_stuff(i, j): lib = CDLL(libm_name) func = lib.getpointer('fabs', [types.double], types.double) res = 0.0 x = float(j) - while i > 0: - jitdriver2.jit_merge_point(i=i, res=res, func=func, x=x) + v2 = Virt2(i) + while v2.i > 0: + jitdriver2.jit_merge_point(v2=v2, res=res, func=func, x=x) promote(func) argchain = ArgChain() argchain.arg(x) res = func.call(argchain, rffi.DOUBLE) - i -= 1 + v2.i -= 1 return res # def main(i, j): diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -129,7 +129,7 @@ self.mc.MOV(heap(fastgil), css_value) # if not we_are_translated(): # for testing: we should not access - self.mc.ADD(ebp, imm(1)) # ebp any more; and ignore 'fastgil' + self.mc.ADD(ebp, imm(1)) # ebp any more def move_real_result_and_call_reacqgil_addr(self, fastgil): from rpython.jit.backend.x86 import rx86 diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -1611,6 +1611,40 @@ op.getopnum() == rop.GUARD_NOT_FORCED_2] assert len(l) == 0 + def test_two_virtualizable_types(self): + class A: + _virtualizable_ = ['x'] + def __init__(self, x): + self.x = x + + class B: + _virtualizable_ = ['lst[*]'] + def __init__(self, lst): + self.lst = lst + + driver_a = JitDriver(greens=[], reds=['a'], virtualizables=['a']) + driver_b = JitDriver(greens=[], reds=['b'], virtualizables=['b']) + + def foo_a(a): + while a.x > 0: + driver_a.jit_merge_point(a=a) + a.x -= 2 + return a.x + + def foo_b(b): + while b.lst[0] > 0: + driver_b.jit_merge_point(b=b) + b.lst[0] -= 2 + return b.lst[0] + + def f(): + return foo_a(A(13)) * 100 + foo_b(B([13])) + + assert f() == -101 + res = self.meta_interp(f, [], listops=True) + assert res == -101 + + class TestLLtype(ExplicitVirtualizableTests, ImplicitVirtualizableTests, LLJitMixin): diff --git a/rpython/rlib/rStringIO.py b/rpython/rlib/rStringIO.py --- a/rpython/rlib/rStringIO.py +++ b/rpython/rlib/rStringIO.py @@ -8,8 +8,6 @@ The fastest path through this code is for the case of a bunch of write() followed by getvalue(). """ - _mixin_ = True # for interp_stringio.py - def __init__(self): self.init() diff --git a/rpython/rlib/rmd5.py b/rpython/rlib/rmd5.py --- a/rpython/rlib/rmd5.py +++ b/rpython/rlib/rmd5.py @@ -132,8 +132,6 @@ class RMD5(object): """RPython-level MD5 object. """ - _mixin_ = True # for interp_md5.py - def __init__(self, initialdata=''): self._init() self.update(initialdata) diff --git a/rpython/rlib/rsha.py b/rpython/rlib/rsha.py --- a/rpython/rlib/rsha.py +++ b/rpython/rlib/rsha.py @@ -95,8 +95,6 @@ class RSHA(object): """RPython-level SHA object. """ - _mixin_ = True # for interp_sha.py - def __init__(self, initialdata=''): self._init() self.update(initialdata) diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -1154,7 +1154,12 @@ type(other).__name__,)) if self._TYPE != other._TYPE: raise TypeError("comparing %r and %r" % (self._TYPE, other._TYPE)) - return self._obj == other._obj + try: + return self._obj == other._obj + except DelayedPointer: + # if one of the two pointers is delayed, they cannot + # possibly be equal unless they are the same _ptr instance + return self is other def __ne__(self, other): return not (self == other) diff --git a/rpython/rtyper/normalizecalls.py b/rpython/rtyper/normalizecalls.py --- a/rpython/rtyper/normalizecalls.py +++ b/rpython/rtyper/normalizecalls.py @@ -93,7 +93,12 @@ return False # nothing to do, all signatures already match shape_cnt, shape_keys, shape_star = shape - assert not shape_star, "XXX not implemented" + if shape_star: + raise TyperError( + "not implemented: a call is done with a '*' argument, and the" + " multiple functions or methods that it can go to don't have" + " all the same signature (different argument names or defaults)." + " The call can go to:\n%s" % '\n'.join(map(repr, graphs))) # for the first 'shape_cnt' arguments we need to generalize to # a common type diff --git a/rpython/rtyper/test/test_annlowlevel.py b/rpython/rtyper/test/test_annlowlevel.py --- a/rpython/rtyper/test/test_annlowlevel.py +++ b/rpython/rtyper/test/test_annlowlevel.py @@ -64,3 +64,13 @@ assert lltype.typeOf(ptr) == OBJECTPTR y = annlowlevel.cast_base_ptr_to_instance(X, ptr) assert y is x + + def test_delayedptr(self): + FUNCTYPE = lltype.FuncType([], lltype.Signed) + name = "delayed!myfunc" + delayedptr1 = lltype._ptr(lltype.Ptr(FUNCTYPE), name, solid=True) + delayedptr2 = lltype._ptr(lltype.Ptr(FUNCTYPE), name, solid=True) + assert delayedptr1 == delayedptr1 + assert delayedptr1 != delayedptr2 + assert bool(delayedptr1) + assert delayedptr1 != lltype.nullptr(FUNCTYPE) diff --git a/rpython/rtyper/test/test_llinterp.py b/rpython/rtyper/test/test_llinterp.py --- a/rpython/rtyper/test/test_llinterp.py +++ b/rpython/rtyper/test/test_llinterp.py @@ -129,7 +129,7 @@ info = py.test.raises(LLException, "interp.eval_graph(graph, values)") try: got = interp.find_exception(info.value) - except ValueError, message: + except ValueError as message: got = 'None %r' % message assert got is exc, "wrong exception type, expected %r got %r" % (exc, got) diff --git a/rpython/rtyper/test/test_normalizecalls.py b/rpython/rtyper/test/test_normalizecalls.py --- a/rpython/rtyper/test/test_normalizecalls.py +++ b/rpython/rtyper/test/test_normalizecalls.py @@ -192,6 +192,25 @@ import re assert re.match(msg, excinfo.value.args[0]) + def test_methods_with_named_arg_call(self): + class Base: + def fn(self, y): + raise NotImplementedError + class Sub1(Base): + def fn(self, y): + return 1 + y + class Sub2(Base): + def fn(self, x): # different name! + return x - 2 + def dummyfn(n): + if n == 1: + s = Sub1() + else: + s = Sub2() + return s.fn(*(n,)) + + py.test.raises(TyperError, self.rtype, dummyfn, [int], int) + class PBase: def fn(self): From noreply at buildbot.pypy.org Sun Aug 10 15:32:16 2014 From: noreply at buildbot.pypy.org (mjacob) Date: Sun, 10 Aug 2014 15:32:16 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Copy paragraph about installing build time dependencies on SLES11 from default to pypy/doc/build.rst. Message-ID: <20140810133216.0C80F1C0136@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r72744:05c45f0263f7 Date: 2014-08-09 23:23 +0200 http://bitbucket.org/pypy/pypy/changeset/05c45f0263f7/ Log: Copy paragraph about installing build time dependencies on SLES11 from default to pypy/doc/build.rst. diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst --- a/pypy/doc/build.rst +++ b/pypy/doc/build.rst @@ -93,6 +93,14 @@ For the optional lzma module on PyPy3 you will also need ``xz-devel``. +On SLES11:: + + zypper install gcc make python-devel pkg-config \ + zlib-devel libopenssl-devel libbz2-devel sqlite3-devel \ + libexpat-devel libffi-devel python-curses + +For the optional lzma module on PyPy3 you will also need ``xz-devel``. + On Mac OS X, most of these build-time dependencies are installed alongside the Developer Tools. However, note that in order for the installation to find them you may need to run:: From noreply at buildbot.pypy.org Sun Aug 10 15:32:17 2014 From: noreply at buildbot.pypy.org (mjacob) Date: Sun, 10 Aug 2014 15:32:17 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: Update link. Message-ID: <20140810133217.2C64A1C0136@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: improve-docs Changeset: r72745:328c5ac6b882 Date: 2014-08-09 23:41 +0200 http://bitbucket.org/pypy/pypy/changeset/328c5ac6b882/ Log: Update link. diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst --- a/pypy/doc/cppyy.rst +++ b/pypy/doc/cppyy.rst @@ -109,7 +109,7 @@ to ``ROOTSYS``. The following is optional, and is only to show how pypy-c can be build -:ref:`from source `, for example to get at the main development branch of cppyy. +:doc:`from source `, for example to get at the main development branch of cppyy. The :doc:`backend documentation ` has more details on the backend-specific prerequisites. From noreply at buildbot.pypy.org Sun Aug 10 16:21:50 2014 From: noreply at buildbot.pypy.org (Patrick Rein) Date: Sun, 10 Aug 2014 16:21:50 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: Possible fix of a off by one fault in the SchedulerWrapper Message-ID: <20140810142150.4941B1C0EE9@cobra.cs.uni-duesseldorf.de> Author: Patrick Rein Branch: stmgc-c7 Changeset: r1033:48efd13caa41 Date: 2014-08-10 16:18 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/48efd13caa41/ Log: Possible fix of a off by one fault in the SchedulerWrapper diff --git a/spyvm/wrapper.py b/spyvm/wrapper.py --- a/spyvm/wrapper.py +++ b/spyvm/wrapper.py @@ -205,7 +205,8 @@ def get_process_list(self, priority): lists = Wrapper(self.space, self.priority_list()) - return ProcessListWrapper(self.space, lists.read(priority)) + # priority - 1 as listWrapper is 0 indexed and Priorities start at 1 + return ProcessListWrapper(self.space, lists.read(priority - 1)) def pop_highest_priority_process(self): w_lists = self.priority_list() From noreply at buildbot.pypy.org Sun Aug 10 17:54:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Aug 2014 17:54:34 +0200 (CEST) Subject: [pypy-commit] stmgc rewind_setjmp: Another TODO Message-ID: <20140810155434.1D3E11C0136@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: rewind_setjmp Changeset: r1293:76c0606ab1f7 Date: 2014-08-10 17:54 +0200 http://bitbucket.org/pypy/stmgc/changeset/76c0606ab1f7/ Log: Another TODO diff --git a/c7/TODO b/c7/TODO --- a/c7/TODO +++ b/c7/TODO @@ -24,3 +24,6 @@ - increase the memory limit, currently 2.5GB; this requires, apparently, more fighting against LLVM bugs + +- avoid __builtin_frame_address(0) in precisely the performance-critical + functions like the interpreter main loop From noreply at buildbot.pypy.org Sun Aug 10 18:00:33 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Aug 2014 18:00:33 +0200 (CEST) Subject: [pypy-commit] stmgc rewind_setjmp: Close branch, ready for merge Message-ID: <20140810160033.6C42B1C0136@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: rewind_setjmp Changeset: r1294:3eb4124c5fdc Date: 2014-08-10 17:59 +0200 http://bitbucket.org/pypy/stmgc/changeset/3eb4124c5fdc/ Log: Close branch, ready for merge From noreply at buildbot.pypy.org Sun Aug 10 18:00:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Aug 2014 18:00:35 +0200 (CEST) Subject: [pypy-commit] stmgc default: hg merge rewind_setjmp Message-ID: <20140810160035.1E2D71C0136@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1295:bdc151305c79 Date: 2014-08-10 18:00 +0200 http://bitbucket.org/pypy/stmgc/changeset/bdc151305c79/ Log: hg merge rewind_setjmp Add a custom layer around setjmp(), removing the restriction that we can only longjmp() if the function calling setjmp() didn't return yet. diff --git a/c7/TODO b/c7/TODO --- a/c7/TODO +++ b/c7/TODO @@ -24,3 +24,6 @@ - increase the memory limit, currently 2.5GB; this requires, apparently, more fighting against LLVM bugs + +- avoid __builtin_frame_address(0) in precisely the performance-critical + functions like the interpreter main loop diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -71,9 +71,8 @@ { nodeptr_t r_n; long prev, sum; - stm_jmpbuf_t here; - STM_START_TRANSACTION(&stm_thread_local, here); + stm_start_transaction(&stm_thread_local); stm_read((objptr_t)global_chained_list); r_n = global_chained_list; @@ -101,11 +100,9 @@ nodeptr_t swap_nodes(nodeptr_t initial) { - stm_jmpbuf_t here; - assert(initial != NULL); - STM_START_TRANSACTION(&stm_thread_local, here); + stm_start_transaction(&stm_thread_local); if (stm_thread_local.longest_marker_state != 0) { fprintf(stderr, "[%p] marker %d for %.6f seconds:\n", @@ -202,7 +199,7 @@ stm_commit_transaction(); - stm_start_inevitable_transaction(&stm_thread_local); + stm_start_transaction(&stm_thread_local); STM_POP_ROOT(stm_thread_local, global_chained_list); /* update value */ assert(global_chained_list->value == -1); STM_PUSH_ROOT(stm_thread_local, global_chained_list); /* remains forever in the shadow stack */ @@ -224,7 +221,9 @@ void *demo2(void *arg) { int status; + rewind_jmp_buf rjbuf; stm_register_thread_local(&stm_thread_local); + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); char *org = (char *)stm_thread_local.shadowstack; STM_PUSH_ROOT(stm_thread_local, global_chained_list); /* remains forever in the shadow stack */ @@ -244,6 +243,7 @@ STM_POP_ROOT(stm_thread_local, global_chained_list); OPT_ASSERT(org == (char *)stm_thread_local.shadowstack); + stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); unregister_thread_local(); status = sem_post(&done); assert(status == 0); return NULL; @@ -280,11 +280,13 @@ int main(void) { int status, i; + rewind_jmp_buf rjbuf; status = sem_init(&done, 0, 0); assert(status == 0); stm_setup(); stm_register_thread_local(&stm_thread_local); + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); stmcb_expand_marker = expand_marker; @@ -302,6 +304,7 @@ final_check(); + stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); unregister_thread_local(); //stm_teardown(); diff --git a/c7/demo/demo_largemalloc.c b/c7/demo/demo_largemalloc.c --- a/c7/demo/demo_largemalloc.c +++ b/c7/demo/demo_largemalloc.c @@ -25,6 +25,15 @@ void stmcb_commit_soon() {} +void stmcb_trace_cards(struct object_s *obj, void cb(object_t **), + uintptr_t start, uintptr_t stop) { + abort(); +} +void stmcb_get_card_base_itemsize(struct object_s *obj, + uintptr_t offset_itemsize[2]) { + abort(); +} + /************************************************************/ #define ARENA_SIZE (1024*1024*1024) @@ -67,7 +76,7 @@ int i; arena_data = malloc(ARENA_SIZE); assert(arena_data != NULL); - _stm_mutex_pages_lock(); + //_stm_mutex_pages_lock(); for (i = 0; i < 25; i++) timing(i); return 0; diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -332,15 +332,15 @@ void *demo_random(void *arg) { int status; + rewind_jmp_buf rjbuf; stm_register_thread_local(&stm_thread_local); + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); setup_thread(); objptr_t p; - stm_jmpbuf_t here; - volatile int call_fork = (arg != NULL); - STM_START_TRANSACTION(&stm_thread_local, here); + stm_start_transaction(&stm_thread_local); assert(td.num_roots >= td.num_roots_at_transaction_start); td.num_roots = td.num_roots_at_transaction_start; p = NULL; @@ -358,11 +358,12 @@ if (p == (objptr_t)-1) { push_roots(); + long call_fork = (arg != NULL && *(long *)arg); if (call_fork == 0) { /* common case */ stm_commit_transaction(); td.num_roots_at_transaction_start = td.num_roots; if (get_rand(100) < 98) { - STM_START_TRANSACTION(&stm_thread_local, here); + stm_start_transaction(&stm_thread_local); } else { stm_start_inevitable_transaction(&stm_thread_local); } @@ -374,7 +375,7 @@ else { /* run a fork() inside the transaction */ printf("========== FORK =========\n"); - call_fork = 0; + *(long*)arg = 0; pid_t child = fork(); printf("=== in process %d thread %lx, fork() returned %d\n", (int)getpid(), (long)pthread_self(), (int)child); @@ -394,6 +395,7 @@ } stm_commit_transaction(); + stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); stm_unregister_thread_local(&stm_thread_local); status = sem_post(&done); assert(status == 0); @@ -442,6 +444,7 @@ int main(void) { int i, status; + rewind_jmp_buf rjbuf; /* pick a random seed from the time in seconds. A bit pointless for now... because the interleaving of the @@ -455,6 +458,7 @@ stm_setup(); stm_register_thread_local(&stm_thread_local); + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); setup_globals(); @@ -472,7 +476,7 @@ long forkbase = NUMTHREADS * THREAD_STARTS / (FORKS + 1); long _fork = (thread_starts % forkbase) == 0; thread_starts--; - newthread(demo_random, (void *)_fork); + newthread(demo_random, &_fork); } } @@ -492,6 +496,7 @@ printf("Test OK!\n"); + stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); stm_unregister_thread_local(&stm_thread_local); stm_teardown(); diff --git a/c7/demo/demo_simple.c b/c7/demo/demo_simple.c --- a/c7/demo/demo_simple.c +++ b/c7/demo/demo_simple.c @@ -41,12 +41,20 @@ void stmcb_commit_soon() {} +void stmcb_trace_cards(struct object_s *obj, void cb(object_t **), + uintptr_t start, uintptr_t stop) { + abort(); +} +void stmcb_get_card_base_itemsize(struct object_s *obj, + uintptr_t offset_itemsize[2]) { + abort(); +} static sem_t done; static __thread int tl_counter = 0; -static int gl_counter = 0; +//static int gl_counter = 0; void *demo2(void *arg) { diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -324,14 +324,14 @@ STM_SEGMENT->transaction_read_version = 1; } -void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf) +static void _stm_start_transaction(stm_thread_local_t *tl, bool inevitable) { assert(!_stm_in_transaction(tl)); s_mutex_lock(); retry: - if (jmpbuf == NULL) { + if (inevitable) { wait_for_end_of_inevitable_transaction(tl); } @@ -346,11 +346,9 @@ STM_PSEGMENT->signalled_to_commit_soon = false; STM_PSEGMENT->safe_point = SP_RUNNING; STM_PSEGMENT->marker_inev[1] = 0; - if (jmpbuf == NULL) + if (inevitable) marker_fetch_inev(); - STM_PSEGMENT->transaction_state = (jmpbuf != NULL ? TS_REGULAR - : TS_INEVITABLE); - STM_SEGMENT->jmpbuf_ptr = jmpbuf; + STM_PSEGMENT->transaction_state = (inevitable ? TS_INEVITABLE : TS_REGULAR); #ifndef NDEBUG STM_PSEGMENT->running_pthread = pthread_self(); #endif @@ -390,6 +388,22 @@ check_nursery_at_transaction_start(); } +long stm_start_transaction(stm_thread_local_t *tl) +{ +#ifdef STM_NO_AUTOMATIC_SETJMP + long repeat_count = 0; /* test/support.py */ +#else + long repeat_count = rewind_jmp_setjmp(&tl->rjthread); +#endif + _stm_start_transaction(tl, false); + return repeat_count; +} + +void stm_start_inevitable_transaction(stm_thread_local_t *tl) +{ + _stm_start_transaction(tl, true); +} + /************************************************************/ @@ -814,7 +828,7 @@ dprintf(("commit_transaction\n")); assert(STM_SEGMENT->nursery_end == NURSERY_END); - STM_SEGMENT->jmpbuf_ptr = NULL; + rewind_jmp_forget(&STM_SEGMENT->running_thread->rjthread); /* if a major collection is required, do it here */ if (is_major_collection_requested()) { @@ -987,6 +1001,18 @@ #pragma pop_macro("STM_PSEGMENT") } +#ifdef STM_NO_AUTOMATIC_SETJMP +void _test_run_abort(stm_thread_local_t *tl) __attribute__((noreturn)); +int stm_is_inevitable(void) +{ + switch (STM_PSEGMENT->transaction_state) { + case TS_REGULAR: return 0; + case TS_INEVITABLE: return 1; + default: abort(); + } +} +#endif + static void abort_with_mutex(void) { assert(_has_mutex()); @@ -996,10 +1022,9 @@ abort_data_structures_from_segment_num(STM_SEGMENT->segment_num); - stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; + stm_thread_local_t *tl = STM_SEGMENT->running_thread; /* clear memory registered on the thread-local */ - stm_thread_local_t *tl = STM_SEGMENT->running_thread; if (tl->mem_clear_on_abort) memset(tl->mem_clear_on_abort, 0, tl->mem_bytes_to_clear_on_abort); @@ -1035,9 +1060,11 @@ */ usleep(1); - assert(jmpbuf_ptr != NULL); - assert(jmpbuf_ptr != (stm_jmpbuf_t *)-1); /* for tests only */ - __builtin_longjmp(*jmpbuf_ptr, 1); +#ifdef STM_NO_AUTOMATIC_SETJMP + _test_run_abort(tl); +#else + rewind_jmp_longjmp(&tl->rjthread); +#endif } void _stm_become_inevitable(const char *msg) @@ -1051,12 +1078,11 @@ marker_fetch_inev(); wait_for_end_of_inevitable_transaction(NULL); STM_PSEGMENT->transaction_state = TS_INEVITABLE; - STM_SEGMENT->jmpbuf_ptr = NULL; + rewind_jmp_forget(&STM_SEGMENT->running_thread->rjthread); clear_callbacks_on_abort(); } else { assert(STM_PSEGMENT->transaction_state == TS_INEVITABLE); - assert(STM_SEGMENT->jmpbuf_ptr == NULL); } s_mutex_unlock(); diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -176,14 +176,15 @@ static void fork_abort_thread(long i) { struct stm_priv_segment_info_s *pr = get_priv_segment(i); + stm_thread_local_t *tl = pr->pub.running_thread; dprintf(("forksupport_child: abort in seg%ld\n", i)); - assert(pr->pub.running_thread->associated_segment_num == i); + assert(tl->associated_segment_num == i); assert(pr->transaction_state == TS_REGULAR); set_gs_register(get_segment_base(i)); - stm_jmpbuf_t jmpbuf; - if (__builtin_setjmp(jmpbuf) == 0) { - pr->pub.jmpbuf_ptr = &jmpbuf; + rewind_jmp_buf rjbuf; + stm_rewind_jmp_enterframe(tl, &rjbuf); + if (rewind_jmp_setjmp(&tl->rjthread) == 0) { #ifndef NDEBUG pr->running_pthread = pthread_self(); #endif @@ -192,6 +193,8 @@ strcpy(pr->marker_self, "fork"); stm_abort_transaction(); } + rewind_jmp_forget(&tl->rjthread); + stm_rewind_jmp_leaveframe(tl, &rjbuf); } static void forksupport_child(void) diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c new file mode 100644 --- /dev/null +++ b/c7/stm/rewind_setjmp.c @@ -0,0 +1,112 @@ +#include "rewind_setjmp.h" +#include +#include +#include +#include + + +struct _rewind_jmp_moved_s { + struct _rewind_jmp_moved_s *next; + size_t size; +}; +#define RJM_HEADER sizeof(struct _rewind_jmp_moved_s) + +#ifndef RJBUF_CUSTOM_MALLOC +#define rj_malloc malloc +#define rj_free free +#else +void *rj_malloc(size_t); +void rj_free(void *); +#endif + + +static void copy_stack(rewind_jmp_thread *rjthread, char *base) +{ + assert(rjthread->head != NULL); + char *stop = rjthread->head->frame_base; + assert(stop > base); + struct _rewind_jmp_moved_s *next = (struct _rewind_jmp_moved_s *) + rj_malloc(RJM_HEADER + (stop - base)); + assert(next != NULL); /* XXX out of memory */ + next->next = rjthread->moved_off; + next->size = stop - base; + memcpy(((char *)next) + RJM_HEADER, base, stop - base); + + rjthread->moved_off_base = stop; + rjthread->moved_off = next; +} + +__attribute__((noinline)) +long rewind_jmp_setjmp(rewind_jmp_thread *rjthread) +{ + if (rjthread->moved_off) { + _rewind_jmp_free_stack_slices(rjthread); + } + rewind_jmp_thread *volatile rjthread1 = rjthread; + int result; + if (__builtin_setjmp(rjthread->jmpbuf) == 0) { + rjthread = rjthread1; + rjthread->initial_head = rjthread->head; + result = 0; + } + else { + rjthread = rjthread1; + rjthread->head = rjthread->initial_head; + result = rjthread->repeat_count + 1; + } + rjthread->repeat_count = result; + copy_stack(rjthread, (char *)&rjthread1); + return result; +} + +__attribute__((noinline, noreturn)) +static void do_longjmp(rewind_jmp_thread *rjthread, char *stack_free) +{ + assert(rjthread->moved_off_base != NULL); + + while (rjthread->moved_off) { + struct _rewind_jmp_moved_s *p = rjthread->moved_off; + char *target = rjthread->moved_off_base; + target -= p->size; + if (target < stack_free) { + /* need more stack space! */ + do_longjmp(rjthread, alloca(stack_free - target)); + } + memcpy(target, ((char *)p) + RJM_HEADER, p->size); + rjthread->moved_off_base = target; + rjthread->moved_off = p->next; + rj_free(p); + } + __builtin_longjmp(rjthread->jmpbuf, 1); +} + +__attribute__((noreturn)) +void rewind_jmp_longjmp(rewind_jmp_thread *rjthread) +{ + char _rewind_jmp_marker; + do_longjmp(rjthread, &_rewind_jmp_marker); +} + +__attribute__((noinline)) +void _rewind_jmp_copy_stack_slice(rewind_jmp_thread *rjthread) +{ + if (rjthread->head == NULL) { + _rewind_jmp_free_stack_slices(rjthread); + return; + } + assert(rjthread->moved_off_base < (char *)rjthread->head); + copy_stack(rjthread, rjthread->moved_off_base); +} + +void _rewind_jmp_free_stack_slices(rewind_jmp_thread *rjthread) +{ + struct _rewind_jmp_moved_s *p = rjthread->moved_off; + struct _rewind_jmp_moved_s *pnext; + while (p) { + pnext = p->next; + rj_free(p); + p = pnext; + } + rjthread->moved_off = NULL; + rjthread->moved_off_base = NULL; +} diff --git a/c7/stm/rewind_setjmp.h b/c7/stm/rewind_setjmp.h new file mode 100644 --- /dev/null +++ b/c7/stm/rewind_setjmp.h @@ -0,0 +1,82 @@ +#ifndef _REWIND_SETJMP_H_ +#define _REWIND_SETJMP_H_ + +/************************************************************ + + : : ^^^^^ + |-------------------| older frames in the stack + | prev=0 | + ,---> | rewind_jmp_buf | + | |-------------------| + | | | + | : : + | : : + | | | + | |-------------------| + `---------prev | + ,----> | rewind_jmp_buf | + | +-------------------| + | | | + | : : + | | | + | |-------------------| + `----------prev | + ,---> | rewind_jmp_buf | <--------------- MOVED_OFF_BASE + | |---------------- +-------------+ + | | | STACK COPY | + | | : : + | : | size | + | | | next | <---- MOVED_OFF + | | +---|------ +-------------+ + | | | | | STACK COPY | + | |-------------------| | : (SEQUEL) : + `---------prev | | : : +HEAD-----> | rewind_jmp_buf | | | | + |-------------------| | | size | + `------> | next=0 | + +-------------+ + + +************************************************************/ + +typedef struct _rewind_jmp_buf { + char *frame_base; + struct _rewind_jmp_buf *prev; +} rewind_jmp_buf; + +typedef struct { + rewind_jmp_buf *head; + rewind_jmp_buf *initial_head; + char *moved_off_base; + struct _rewind_jmp_moved_s *moved_off; + void *jmpbuf[5]; + long repeat_count; +} rewind_jmp_thread; + + +#define rewind_jmp_enterframe(rjthread, rjbuf) do { \ + (rjbuf)->frame_base = __builtin_frame_address(0); \ + (rjbuf)->prev = (rjthread)->head; \ + (rjthread)->head = (rjbuf); \ +} while (0) + +#define rewind_jmp_leaveframe(rjthread, rjbuf) do { \ + (rjthread)->head = (rjbuf)->prev; \ + if ((rjbuf)->frame_base == (rjthread)->moved_off_base) \ + _rewind_jmp_copy_stack_slice(rjthread); \ +} while (0) + +long rewind_jmp_setjmp(rewind_jmp_thread *rjthread); +void rewind_jmp_longjmp(rewind_jmp_thread *rjthread) __attribute__((noreturn)); + +#define rewind_jmp_forget(rjthread) do { \ + if ((rjthread)->moved_off) _rewind_jmp_free_stack_slices(rjthread); \ + (rjthread)->moved_off_base = 0; \ +} while (0) + +void _rewind_jmp_copy_stack_slice(rewind_jmp_thread *); +void _rewind_jmp_free_stack_slices(rewind_jmp_thread *); + +#define rewind_jmp_armed(rjthread) ((rjthread)->moved_off_base != 0) + +#endif diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -36,3 +36,4 @@ #include "stm/weakref.c" #include "stm/timing.c" #include "stm/marker.c" +#include "stm/rewind_setjmp.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -13,6 +13,8 @@ #include #include +#include "stm/rewind_setjmp.h" + #if LONG_MAX == 2147483647 # error "Requires a 64-bit environment" #endif @@ -25,7 +27,6 @@ typedef TLPREFIX struct stm_read_marker_s stm_read_marker_t; typedef TLPREFIX struct stm_creation_marker_s stm_creation_marker_t; typedef TLPREFIX char stm_char; -typedef void* stm_jmpbuf_t[5]; /* for use with __builtin_setjmp() */ struct stm_read_marker_s { /* In every segment, every object has a corresponding read marker. @@ -44,7 +45,6 @@ stm_char *nursery_current; uintptr_t nursery_end; struct stm_thread_local_s *running_thread; - stm_jmpbuf_t *jmpbuf_ptr; }; #define STM_SEGMENT ((stm_segment_info_t *)4352) @@ -79,6 +79,8 @@ typedef struct stm_thread_local_s { /* every thread should handle the shadow stack itself */ struct stm_shadowentry_s *shadowstack, *shadowstack_base; + /* rewind_setjmp's interface */ + rewind_jmp_thread rjthread; /* a generic optional thread-local object */ object_t *thread_local_obj; /* in case this thread runs a transaction that aborts, @@ -114,7 +116,6 @@ object_t *_stm_allocate_slowpath(ssize_t); object_t *_stm_allocate_external(ssize_t); void _stm_become_inevitable(const char*); -void _stm_start_transaction(stm_thread_local_t *, stm_jmpbuf_t *); void _stm_collectable_safe_point(void); /* for tests, but also used in duhton: */ @@ -326,40 +327,42 @@ void stm_register_thread_local(stm_thread_local_t *tl); void stm_unregister_thread_local(stm_thread_local_t *tl); +/* At some key places, like the entry point of the thread and in the + function with the interpreter's dispatch loop, you need to declare + a local variable of type 'rewind_jmp_buf' and call these macros. */ +#define stm_rewind_jmp_enterframe(tl, rjbuf) \ + rewind_jmp_enterframe(&(tl)->rjthread, rjbuf) +#define stm_rewind_jmp_leaveframe(tl, rjbuf) \ + rewind_jmp_leaveframe(&(tl)->rjthread, rjbuf) + /* Starting and ending transactions. stm_read(), stm_write() and stm_allocate() should only be called from within a transaction. - Use the macro STM_START_TRANSACTION() to start a transaction that - can be restarted using the 'jmpbuf' (a local variable of type - stm_jmpbuf_t). */ -#define STM_START_TRANSACTION(tl, jmpbuf) ({ \ - while (__builtin_setjmp(jmpbuf) == 1) { /*redo setjmp*/ } \ - _stm_start_transaction(tl, &jmpbuf); \ -}) - -/* Start an inevitable transaction, if it's going to return from the - current function immediately. */ -static inline void stm_start_inevitable_transaction(stm_thread_local_t *tl) { - _stm_start_transaction(tl, NULL); -} - -/* Commit a transaction. */ + The stm_start_transaction() call returns the number of times it + returned, starting at 0. If it is > 0, then the transaction was + aborted and restarted this number of times. */ +long stm_start_transaction(stm_thread_local_t *tl); +void stm_start_inevitable_transaction(stm_thread_local_t *tl); void stm_commit_transaction(void); -/* Abort the currently running transaction. */ +/* Abort the currently running transaction. This function never + returns: it jumps back to the stm_start_transaction(). */ void stm_abort_transaction(void) __attribute__((noreturn)); -/* Turn the current transaction inevitable. The 'jmpbuf' passed to - STM_START_TRANSACTION() is not going to be used any more after - this call (but the stm_become_inevitable() itself may still abort). */ +/* Turn the current transaction inevitable. + The stm_become_inevitable() itself may still abort. */ +#ifdef STM_NO_AUTOMATIC_SETJMP +int stm_is_inevitable(void); +#else +static inline int stm_is_inevitable(void) { + return !rewind_jmp_armed(&STM_SEGMENT->running_thread->rjthread); +} +#endif static inline void stm_become_inevitable(stm_thread_local_t *tl, const char* msg) { assert(STM_SEGMENT->running_thread == tl); - if (STM_SEGMENT->jmpbuf_ptr != NULL) + if (!stm_is_inevitable()) _stm_become_inevitable(msg); } -static inline int stm_is_inevitable(void) { - return (STM_SEGMENT->jmpbuf_ptr == NULL); -} /* Forces a safe-point if needed. Normally not needed: this is automatic if you call stm_allocate(). */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -7,7 +7,6 @@ ffi = cffi.FFI() ffi.cdef(""" typedef ... object_t; -typedef ... stm_jmpbuf_t; #define SIZEOF_MYOBJ ... #define STM_NB_SEGMENTS ... #define _STM_FAST_ALLOC ... @@ -64,7 +63,8 @@ uintptr_t _stm_get_private_page(uintptr_t pagenum); int _stm_get_flags(object_t *obj); -void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf); +void clear_jmpbuf(stm_thread_local_t *tl); +long stm_start_transaction(stm_thread_local_t *tl); bool _check_commit_transaction(void); bool _check_abort_transaction(void); bool _check_become_inevitable(stm_thread_local_t *tl); @@ -148,7 +148,7 @@ GC_N_SMALL_REQUESTS = 36 # from gcpage.c LARGE_MALLOC_OVERHEAD = 16 # from largemalloc.h -lib = ffi.verify(''' +lib = ffi.verify(r''' #include #include #include @@ -167,23 +167,26 @@ return obj->stm_flags; } +void clear_jmpbuf(stm_thread_local_t *tl) { + memset(&tl->rjthread, 0, sizeof(rewind_jmp_thread)); +} + +__attribute__((noreturn)) +void _test_run_abort(stm_thread_local_t *tl) { + void **jmpbuf = tl->rjthread.jmpbuf; + fprintf(stderr, "~~~~~ ABORT ~~~~~\n"); + __builtin_longjmp(jmpbuf, 1); +} + #define CHECKED(CALL) \ - stm_jmpbuf_t here; \ - stm_segment_info_t *segment = STM_SEGMENT; \ - if (__builtin_setjmp(here) == 0) { /* returned directly */ \ - if (segment->jmpbuf_ptr != NULL) { \ - assert(segment->jmpbuf_ptr == (stm_jmpbuf_t *)-1); \ - segment->jmpbuf_ptr = &here; \ - } \ + stm_thread_local_t *_tl = STM_SEGMENT->running_thread; \ + void **jmpbuf = _tl->rjthread.jmpbuf; \ + if (__builtin_setjmp(jmpbuf) == 0) { /* returned directly */\ CALL; \ - if (segment->jmpbuf_ptr != NULL) { \ - segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; \ - } \ + clear_jmpbuf(_tl); \ return 0; \ } \ - if (segment->jmpbuf_ptr != NULL) { \ - segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; \ - } \ + clear_jmpbuf(_tl); \ return 1 bool _checked_stm_write(object_t *object) { @@ -350,6 +353,7 @@ } ''', sources=source_files, define_macros=[('STM_TESTS', '1'), + ('STM_NO_AUTOMATIC_SETJMP', '1'), ('STM_LARGEMALLOC_TEST', '1'), ('STM_NO_COND_WAIT', '1'), ('STM_DEBUGPRINT', '1'), @@ -559,7 +563,9 @@ def start_transaction(self): tl = self.tls[self.current_thread] assert not lib._stm_in_transaction(tl) - lib._stm_start_transaction(tl, ffi.cast("stm_jmpbuf_t *", -1)) + res = lib.stm_start_transaction(tl) + assert res == 0 + lib.clear_jmpbuf(tl) assert lib._stm_in_transaction(tl) # seen = set() diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -369,6 +369,7 @@ def test_inevitable_transaction_has_priority(self): self.start_transaction() + assert lib.stm_is_inevitable() == 0 lp1 = stm_allocate(16) stm_set_char(lp1, 'a') self.push_root(lp1) diff --git a/c7/test/test_rewind.c b/c7/test/test_rewind.c new file mode 100644 --- /dev/null +++ b/c7/test/test_rewind.c @@ -0,0 +1,255 @@ +#include +#include +#include +#include +#include "rewind_setjmp.h" + + +rewind_jmp_thread gthread; +int gevents[1000]; +int num_gevents = 0; + +void gevent(int num) +{ + assert(num_gevents <= sizeof(gevents) / sizeof(int)); + gevents[num_gevents++] = num; +} + +void check_gevents(int expected[], int expected_size) +{ + int i; + int expected_count = expected_size / sizeof(int); + for (i = 0; i < expected_count && i < num_gevents; i++) { + assert(gevents[i] == expected[i]); + } + assert(num_gevents == expected_count); +} + +#define CHECK(expected) check_gevents(expected, sizeof(expected)) + +/************************************************************/ + +__attribute__((noinline)) +void f1(int x) +{ + gevent(1); + if (x < 10) { + rewind_jmp_longjmp(>hread); + } +} + +static int test1_x; + +void test1(void) +{ + rewind_jmp_buf buf; + rewind_jmp_enterframe(>hread, &buf); + + test1_x = 0; + rewind_jmp_setjmp(>hread); + + test1_x++; + f1(test1_x); + + assert(test1_x == 10); + int expected[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; + CHECK(expected); + + assert(rewind_jmp_armed(>hread)); + rewind_jmp_forget(>hread); + assert(!rewind_jmp_armed(>hread)); + + rewind_jmp_leaveframe(>hread, &buf); +} + +/************************************************************/ + +static int test2_x; + +__attribute__((noinline)) +int f2(void) +{ + rewind_jmp_buf buf; + rewind_jmp_enterframe(>hread, &buf); + test2_x = 0; + rewind_jmp_setjmp(>hread); + rewind_jmp_leaveframe(>hread, &buf); + return ++test2_x; +} + +void test2(void) +{ + rewind_jmp_buf buf; + rewind_jmp_enterframe(>hread, &buf); + int x = f2(); + gevent(x); + if (x < 10) + rewind_jmp_longjmp(>hread); + rewind_jmp_leaveframe(>hread, &buf); + int expected[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + CHECK(expected); +} + +/************************************************************/ + +__attribute__((noinline)) +int f3(int rec) +{ + if (rec > 0) + return f3(rec - 1); + else + return f2(); +} + +void test3(void) +{ + rewind_jmp_buf buf; + rewind_jmp_enterframe(>hread, &buf); + int x = f3(50); + gevent(x); + if (x < 10) + rewind_jmp_longjmp(>hread); + rewind_jmp_leaveframe(>hread, &buf); + int expected[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + CHECK(expected); +} + +/************************************************************/ + +__attribute__((noinline)) +int f4(int rec) +{ + rewind_jmp_buf buf; + rewind_jmp_enterframe(>hread, &buf); + int res; + if (rec > 0) + res = f4(rec - 1); + else + res = f2(); + rewind_jmp_leaveframe(>hread, &buf); + return res; +} + +void test4(void) +{ + rewind_jmp_buf buf; + rewind_jmp_enterframe(>hread, &buf); + int x = f4(5); + gevent(x); + if (x < 10) + rewind_jmp_longjmp(>hread); + rewind_jmp_leaveframe(>hread, &buf); + int expected[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + CHECK(expected); +} + +/************************************************************/ + +void test5(void) +{ + struct { int a; rewind_jmp_buf buf; int b; } sbuf; + rewind_jmp_enterframe(>hread, &sbuf.buf); + sbuf.a = 42; + sbuf.b = -42; + test2_x = 0; + rewind_jmp_setjmp(>hread); + sbuf.a++; + sbuf.b--; + gevent(sbuf.a); + gevent(sbuf.b); + if (test2_x == 0) { + test2_x++; + rewind_jmp_longjmp(>hread); + } + int expected[] = {43, -43, 43, -43}; + CHECK(expected); + rewind_jmp_leaveframe(>hread, &sbuf.buf); +} + +/************************************************************/ + +static int test6_x; + +__attribute__((noinline)) +void foo(int *x) { ++*x; } + +__attribute__((noinline)) +void f6(int a1, int a2, int a3, int a4, int a5, int a6, int a7, + int a8, int a9, int a10, int a11, int a12, int a13) +{ + rewind_jmp_buf buf; + rewind_jmp_enterframe(>hread, &buf); + + rewind_jmp_setjmp(>hread); + gevent(a1); gevent(a2); gevent(a3); gevent(a4); + gevent(a5); gevent(a6); gevent(a7); gevent(a8); + gevent(a9); gevent(a10); gevent(a11); gevent(a12); + gevent(a13); + if (++test6_x < 4) { + foo(&a1); + foo(&a2); + foo(&a3); + foo(&a4); + foo(&a5); + foo(&a6); + foo(&a7); + foo(&a8); + foo(&a9); + foo(&a10); + foo(&a11); + foo(&a12); + foo(&a13); + rewind_jmp_longjmp(>hread); + } + rewind_jmp_leaveframe(>hread, &buf); +} + +void test6(void) +{ + rewind_jmp_buf buf; + rewind_jmp_enterframe(>hread, &buf); + test6_x = 0; + f6(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13); + rewind_jmp_leaveframe(>hread, &buf); + int expected[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}; + CHECK(expected); +} + +/************************************************************/ + +int rj_malloc_count = 0; + +void *rj_malloc(size_t size) +{ + rj_malloc_count++; + void *ptr = malloc(size); + fprintf(stderr, "malloc(%ld) -> %p\n", (long)size, ptr); + return ptr; +} + +void rj_free(void *ptr) +{ + if (ptr) + rj_malloc_count--; + fprintf(stderr, "free(%p)\n", ptr); + free(ptr); +} + + +int main(int argc, char *argv[]) +{ + assert(argc > 1); + if (!strcmp(argv[1], "1")) test1(); + else if (!strcmp(argv[1], "2")) test2(); + else if (!strcmp(argv[1], "3")) test3(); + else if (!strcmp(argv[1], "4")) test4(); + else if (!strcmp(argv[1], "5")) test5(); + else if (!strcmp(argv[1], "6")) test6(); + else + assert(!"bad argv[1]"); + assert(rj_malloc_count == 0); + return 0; +} diff --git a/c7/test/test_rewind.py b/c7/test/test_rewind.py new file mode 100644 --- /dev/null +++ b/c7/test/test_rewind.py @@ -0,0 +1,19 @@ +import os + +def run_test(opt): + err = os.system("clang -g -O%d -Werror -DRJBUF_CUSTOM_MALLOC -I../stm" + " -o test_rewind_O%d test_rewind.c ../stm/rewind_setjmp.c" + % (opt, opt)) + if err != 0: + raise OSError("clang failed on test_rewind.c") + for testnum in [1, 2, 3, 4, 5, 6]: + print '=== O%d: RUNNING TEST %d ===' % (opt, testnum) + err = os.system("./test_rewind_O%d %d" % (opt, testnum)) + if err != 0: + raise OSError("'test_rewind_O%d %d' failed" % (opt, testnum)) + os.unlink("./test_rewind_O%d" % (opt,)) + +def test_O0(): run_test(0) +def test_O1(): run_test(1) +def test_O2(): run_test(2) +def test_O3(): run_test(3) diff --git a/duhton/transaction.c b/duhton/transaction.c --- a/duhton/transaction.c +++ b/duhton/transaction.c @@ -162,8 +162,9 @@ void *run_thread(void *thread_id) { - stm_jmpbuf_t here; + rewind_jmp_buf rjbuf; stm_register_thread_local(&stm_thread_local); + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); TLOBJ = NULL; @@ -176,7 +177,7 @@ TLOBJ = cell; stm_commit_transaction(); /* inevitable */ - STM_START_TRANSACTION(&stm_thread_local, here); + stm_start_transaction(&stm_thread_local); cell = TLOBJ; TLOBJ = NULL; @@ -187,6 +188,7 @@ } stm_flush_timing(&stm_thread_local, 1); + stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); stm_unregister_thread_local(&stm_thread_local); return NULL; From noreply at buildbot.pypy.org Sun Aug 10 20:38:20 2014 From: noreply at buildbot.pypy.org (Patrick Rein) Date: Sun, 10 Aug 2014 20:38:20 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk stmgc-c7: Images for benchmarking STM extension Message-ID: <20140810183820.AE8861D2323@cobra.cs.uni-duesseldorf.de> Author: Patrick Rein Branch: stmgc-c7 Changeset: r1034:627db5385987 Date: 2014-08-10 20:09 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/627db5385987/ Log: Images for benchmarking STM extension diff too long, truncating to 2000 out of 35678 lines diff --git a/images/benchmark-images/Squeak4.5nonstm.changes b/images/benchmark-images/Squeak4.5nonstm.changes new file mode 100644 --- /dev/null +++ b/images/benchmark-images/Squeak4.5nonstm.changes @@ -0,0 +1,10744 @@ + + ----STARTUP----{15 January 2014 . 2:16:33 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:33' prior: 42646392! benchStm [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 3 reporting!!']] parallelFork. [(1 to: 1000) do: [:t1 | SPyVM print: 'Thread 4 reporting!!']] parallelFork. (1 to: 1000) do: [:x | SPyVM print: '* spinlock *']. ^ 42 printString! ! ----SNAPSHOT----{15 January 2014 . 2:33:47 pm} Squeak4.5-12568.image priorSource: 9103122! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/15/2014 14:35' prior: 42656801! benchStm3 | t1 t2 | t1 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 1 reporting!!']] parallelFork. t2 := [(1 to: 100) do: [:t3 | SPyVM print: 'Thread 2 reporting!!']] parallelFork. SPyVM print: 'Waiting for Task 1'. t1 wait. SPyVM print: 'Waiting for Task 2'. t2 wait. SPyVM print: 'Finished waiting.'! ! ----SNAPSHOT----{15 January 2014 . 2:36:01 pm} Squeak4.5-12568.image priorSource: 594! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/15/2014 14:37' prior: 42653846! wait SPyVM print: ' Failed to wait for process!! '! ! ----SNAPSHOT----{15 January 2014 . 2:37:09 pm} Squeak4.5-12568.image priorSource: 1091! ----STARTUP----{16 January 2014 . 9:13:20 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !BlockClosure methodsFor: 'scheduling' stamp: 'toma 1/16/2014 21:13' prior: 42654183! parallelFork ^ (self newSTMProcess) fork; yourself! ! ----SNAPSHOT----{16 January 2014 . 9:14:01 pm} Squeak4.5-12568.image priorSource: 1345! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:14'! primWait SPyVM print: ' Failed to wait for process!! '! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33555705! wait SPyVM print: '[squeak] wait' self primWait! ! !STMProcess methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 21:15' prior: 33556450! wait SPyVM print: '[squeak] wait'. self primWait! ! ----SNAPSHOT----{16 January 2014 . 9:15:29 pm} Squeak4.5-12568.image priorSource: 1681! !BasicClassOrganizer methodsFor: 'accessing' stamp: 'toma 1/16/2014 22:18' prior: 17298983! classComment classComment ifNil: [^ '']. ^ [classComment text ifNil: ['']] on: Error do: [^ ''].! ! Object subclass: #SPySTM instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'SPy-Benchmarks'! Object subclass: #SPySTM instanceVariableNames: '' classVariableNames: 'Shared' poolDictionaries: '' category: 'SPy-Benchmarks'! !SPySTM class methodsFor: 'nil' stamp: 'toma 1/16/2014 22:22'! shared ^self Shared! ! !SPySTM class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:23' prior: 33557264! shared ^Shared! ! !SPySTM class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:23'! shared: aValue Shared := aValue! ! ----SNAPSHOT----{16 January 2014 . 10:24:08 pm} Squeak4.5-12568.image priorSource: 2221! Object subclass: #STMAtomic instanceVariableNames: 'lock' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !STMAtomic methodsFor: 'nil' stamp: 'toma 1/16/2014 22:28'! primEnter ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:28' prior: 33557810! primEnter SPyVM print: 'primEnter failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:28' prior: 33557933! primEnter SPyVM print: 'primEnter failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29'! primLeave SPyVM print: 'primLeave failed'.! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29'! value self primEnter. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:29' prior: 33558376! value | result | self primEnter. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:30' prior: 33558498! value | result | self primEnter. result := self. self primLeave ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:30' prior: 33558634! value | result | self primEnter. result := self. self primLeave. ! ! Object subclass: #STMAtomic instanceVariableNames: 'block' classVariableNames: '' poolDictionaries: '' category: 'Kernel-Processes'! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:31' prior: 33558803! value | result | self primEnter. result := self block value. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:31' prior: 33559111! value | result error | self primEnter. result := self block value. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559293! value | result error | self primEnter. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559481! value | result error | self primEnter. error := nil. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559707! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err] self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:32' prior: 33559950! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err]. self primLeave. ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:33' prior: 33560207! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Error do: [:err | error := err]. self primLeave. error ifNotNil: [error raise] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:33' prior: 33560465! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error raise] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33560754! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33561047! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass] ! ! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:35' prior: 33561339! value | result error | self primEnter. error := nil. result := nil. [result := self block value.] on: Exception do: [:err | error := err]. self primLeave. error ifNotNil: [error pass]. ^result ! ! !STMAtomic class methodsFor: 'nil' stamp: 'toma 1/16/2014 22:36'! from: aBlock ^ (STMAtomic new) block: aBlock; yourself.! ! !STMAtomic class methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 22:36' prior: 33561909! from: aBlock ^ (STMAtomic new) block: aBlock; yourself! ! !BlockClosure methodsFor: 'nil' stamp: 'toma 1/16/2014 22:37'! atomic ^STMAtomic from: self! ! SystemOrganization addCategory: #'Kernel-STM'! SystemOrganization classify: #STMAtomic under: #'Kernel-STM'! SystemOrganization classify: #STMProcess under: #'Kernel-STM'! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40'! benchStmAtomic ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562476! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562577! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:40' prior: 33562700! benchStmAtomic | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:41'! benchStmParallel | sum | sum := 0. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:41' prior: 33562933! benchStmParallel | sum | sum := 0. (1 to: self) do: [ :i | [(1 to: 100) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563060! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [(1 to: 100) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563258! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [(i to: (i + 1000)) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:42' prior: 33563453! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k ]] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33563655! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k ]] parallelFork ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33563872! benchStmParallel | sum | sum := 0. (1 to: 8) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33564102! benchStmParallel | sum | sum := 0. (0 to: 7) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:43' prior: 33564334! benchStmParallel | sum | sum := 0. (0 to: 7) do: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ]] parallelFork. ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33564566! benchStmParallel | sum t | sum := 0. (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33564800! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33565051! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait.]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:46' prior: 33565319! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 10:47:04 pm} Squeak4.5-12568.image priorSource: 3090! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:56' prior: 33562824! benchStmAtomic | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33566018! benchStmAtomic | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33565614! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 22:57' prior: 33566678! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * 1000) to: ((i + 1) * 1000 - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 10:58:17 pm} Squeak4.5-12568.image priorSource: 11414! !STMAtomic methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:01' prior: 33561633! value | result | self primEnter. result := self block value. self primLeave. ^result ! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block ^ block! ! !STMAtomic methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:02'! block: anObject block := anObject! ! [ 1 + 1 ] atomic value! [ 1 + 1 ] atomic value! ----SNAPSHOT----{16 January 2014 . 11:03:21 pm} Squeak4.5-12568.image priorSource: 12802! ----SNAPSHOT----{16 January 2014 . 11:03:41 pm} Squeak4.5-12568.image priorSource: 13325! ----SNAPSHOT----{16 January 2014 . 11:03:45 pm} Squeak4.5-12568.image priorSource: 13416! BlockClosure organization addCategory: #STM! BlockClosure organization classify: #atomic under: #STM! !BlockClosure methodsFor: 'STM' stamp: 'toma 1/16/2014 22:37' prior: 33562201! atomic ^STMAtomic from: self! ! BlockClosure organization classify: #newSTMProcess under: #STM! !BlockClosure methodsFor: 'STM' stamp: '' prior: 42643259! newSTMProcess ^ STMProcess forContext: [self value] asContext priority: Processor activePriority! ! !BlockClosure methodsFor: 'STM' stamp: '' prior: 33568373! newSTMProcess ^ STMProcess forContext: [self value] asContext priority: Processor activePriority! ! BlockClosure organization classify: #parallelFork under: #STM! !BlockClosure methodsFor: 'STM' stamp: 'toma 1/16/2014 21:13' prior: 33556059! parallelFork ^ (self newSTMProcess) fork; yourself! ! Object subclass: #STMFuture instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #STMFuture instanceVariableNames: 'block' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:33'! block ^ block! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:33'! block: anObject block := anObject! ! !STMFuture methodsFor: 'nil' stamp: 'toma 1/16/2014 23:34'! invoke ! ! Object subclass: #STMFuture instanceVariableNames: 'block process' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:34'! process ^ process! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:34'! process: anObject process := anObject! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:35' prior: 33569341! invoke self process: (self block parallelFork)! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:35'! value ! ! Object subclass: #STMFuture instanceVariableNames: 'block process result' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:35'! result ^ result! ! !STMFuture methodsFor: 'accessing' stamp: 'toma 1/16/2014 23:35'! result: anObject result := anObject! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33569785! invoke self process: ([self result: self block value] parallelFork)! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33569914! value self process wait.! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/16/2014 23:36' prior: 33570525! value self process wait. ^self result! ! !STMFuture class methodsFor: 'nil' stamp: 'toma 1/16/2014 23:37'! invoke: aBlock ^(STMFuture new) block: aBlock; invoke; yourself! ! !BlockClosure methodsFor: 'STM' stamp: 'toma 1/16/2014 23:38'! async ^STMFuture invoke: self! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:38'! benchStmFuture ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:39' prior: 33570998! benchStmFuture | futures | ! ! (1 to: 100) sum! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:40' prior: 33571101! benchStmFuture | futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum ] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:41' prior: 33571236! benchStmFuture | futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:41' prior: 33571416! benchStmFuture | futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async] ! ! (1 to: 100) inject: 0 into: [ :i :k | i + k]! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:43' prior: 33571596! benchStmFuture | sum futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :s :f | s + (f value)] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:44' prior: 33571825! benchStmFuture | sum futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/16/2014 23:44' prior: 33572069! benchStmFuture | sum futures | futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! ----SNAPSHOT----{16 January 2014 . 11:45:18 pm} Squeak4.5-12568.image priorSource: 13507! ----SNAPSHOT----{16 January 2014 . 11:45:23 pm} Squeak4.5-12568.image priorSource: 18085! ----SNAPSHOT----{16 January 2014 . 11:46:35 pm} Squeak4.5-12568.image priorSource: 18176! Object subclass: #STMWorker instanceVariableNames: '' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #STMWorker instanceVariableNames: 'queue' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:23' prior: 33570359! invoke self process ifNil: [ self process: ([self result: self block value] parallelFork) ] ifNotNil: [ ]! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:23' prior: 33573142! invoke self process ifNil: [ self process: ([self result: self block value] parallelFork) ] ifNotNil: [ ]! ! self! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:23' prior: 33573350! invoke self process ifNil: [ self process: ([self result: self block value] parallelFork) ] ifNotNil: [ self error: 'Future already invoked' ]! ! !STMFuture methodsFor: 'nil' stamp: 'toma 1/17/2014 00:24'! initialize super initialize.! ! STMFuture removeSelector: #initialize! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:26' prior: 33570648! value self process ifNotNil: [ self process wait. ^self result ] ifNil: [ self error: 'Future not invoked' ] ! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:26' prior: 33573946! value self process ifNotNil: [ self wait. ^self result ] ifNil: [ self error: 'Future not invoked' ] ! ! !STMFuture methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:26'! wait self process wait.! ! !STMWorker methodsFor: 'nil' stamp: 'toma 1/17/2014 00:28'! submit: aBlock callback: aUnaryBlock ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:30'! send: aSymbol with: anArgument ! ! STMWorker removeSelector: #submit:callback:! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:30'! on: aSymbol do: aBlock ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:30' prior: 33574724! on: aSymbol do: aBlock ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:31'! onMessage: aSymbol do: aBlock ! ! STMWorker removeSelector: #on:do:! Object subclass: #STMWorker instanceVariableNames: 'queue handlers' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMWorker methodsFor: 'nil' stamp: 'toma 1/17/2014 00:31'! initialize ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:31' prior: 33575225! initialize handlers := Dictionary new.! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! queue ^ queue! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! queue: anObject queue := anObject! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! handlers ^ handlers! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:32'! handlers: anObject handlers := anObject! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:32' prior: 33575335! initialize self handlers: Dictionary new.! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:32' prior: 33574951! onMessage: aSymbol do: aBlock self handlers at: aSymbol put: aBlock! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:32' prior: 33574566! send: aSymbol with: anArgument ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:32' prior: 33576170! send: aSymbol with: anArgument ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:34' prior: 33576299! send: aSymbol with: anArgument ! ! Object subclass: #STMMessage instanceVariableNames: 'queue handlers' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #STMMessage instanceVariableNames: 'name arg' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! Object subclass: #STMMessage instanceVariableNames: 'name args' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! {1. 2.}! {1. 2. World.}! [:i :j | i + j]! [:i :j | i + j] valueWithArguments: {1. 2.}! !STMMessage class methodsFor: 'nil' stamp: 'toma 1/17/2014 00:39'! named: aSymbol withArgs: anArray ^(self new) name: aSymbol; arguments: anArray; yourself! ! Object subclass: #STMMessage instanceVariableNames: 'name arguments' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:39'! name: anObject name := anObject! ! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:39'! arguments ^ arguments! ! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:39'! arguments: anObject arguments := anObject! ! Object subclass: #STMMessage instanceVariableNames: 'messageName arguments' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:40'! messageName ^ messageName! ! !STMMessage methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:40'! messageName: anObject messageName := anObject! ! STMMessage removeSelector: #name:! !STMMessage class methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:40' prior: 33577040! named: aSymbol withArgs: anArray ^(self new) messageName: aSymbol; arguments: anArray; yourself! ! a := {1. 2. 3.}! a := OrderedCollection new! a add: 5! a! a add: 5! a! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:44' prior: 33576429! send: aSymbol with: anArgument self queue! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:44' prior: 33575864! initialize self handlers: Dictionary new. self queue: Stack new.! ! a := Stack new! a := Stack new! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:47' prior: 33578512! initialize self handlers: Dictionary new. self queue: LinkedList new.! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:48' prior: 33578372! send: aSymbol with: anArgument self queue addLast: (STMMessage named: aSymbol with: {anArgument})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:48' prior: 33578879! send: aSymbol with: anArgument self queue addLast: (STMMessage named: aSymbol withArgs: {anArgument})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:48' prior: 33579075! send: aSymbol with: anArgument self queue addLast: ( STMMessage named: aSymbol withArgs: {anArgument})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:49'! send: aSymbol with: anArgument with: anotherArgument self queue addLast: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument.})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:49'! send: aSymbol with: anArgument and: anotherArgument self queue addLast: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument.})! ! STMWorker removeSelector: #send:with:with:! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:49'! send: aSymbol with: anArgument and: anotherArgument and: aThirdArgument self queue addLast: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument. aThirdArgument})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:50'! loop ! ! Object subclass: #STMWorker instanceVariableNames: 'queue handlers active' classVariableNames: '' poolDictionaries: '' category: 'Kernel-STM'! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:50'! active ^ active! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:50'! active: anObject active := anObject! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:50' prior: 33580221! loop ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:51' prior: 33580665! loop self active: true. [self active] whileTrue: [ ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:51' prior: 33580769! loop self active: true. [self active] whileTrue: [ ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:52' prior: 33580922! loop self active: true. [self active] whileTrue: [ [self queue isEmpty] ifFalse: [ ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:52' prior: 33581078! loop self active: true. [self active] whileTrue: [ [self queue isEmpty] ifFalse: [ | message | [message := self queue removeFirst] ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:52' prior: 33581273! loop self active: true. [self active] whileTrue: [ | message | [self queue isEmpty] ifFalse: [ [message := self queue removeFirst] ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:53' prior: 33581519! loop self active: true. [self active] whileTrue: [ | message | message := nil. [self queue isEmpty] ifFalse: [ [message := self queue removeFirst] ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:53' prior: 33581768! loop self active: true. [self active] whileTrue: [ | message | message := nil. [ [self queue isEmpty] ifFalse: [ [message := self queue removeFirst] ] ] atomic value. ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:53' prior: 33582035! loop self active: true. [self active] whileTrue: [ | message | message := nil. [ [self queue isEmpty] ifFalse: [ [message := self queue removeFirst] ] ] atomic value. ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:54'! receive ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:54' prior: 33582318! loop self active: true. [self active] whileTrue: [ ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:54' prior: 33582587! receive | message | message := nil. [ [self queue isEmpty] ifFalse: [ [message := self queue removeFirst] ] ] atomic value.! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:54' prior: 33582853! receive | message | message := nil. [ [self queue isEmpty] ifFalse: [ [message := self queue removeFirst]] ] atomic value. ^message! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:55' prior: 33575531! queue: aMessage ! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:55' prior: 33583328! queue: aMessage [self queue addLast: aMessage] atomic value! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:56' prior: 33583443! queue: anObject queue := anObject! ! !STMWorker methodsFor: 'accessing' stamp: 'toma 1/17/2014 00:56'! schedule: aMessage [self queue addLast: aMessage] atomic value! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:58' prior: 33579275! send: aSymbol with: anArgument self schedule: ( STMMessage named: aSymbol withArgs: {anArgument})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:58' prior: 33579689! send: aSymbol with: anArgument and: anotherArgument self schedule: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument.})! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:58' prior: 33579960! send: aSymbol with: anArgument and: anotherArgument and: aThirdArgument self schedule: ( STMMessage named: aSymbol withArgs: {anArgument. anotherArgument. aThirdArgument})! ! STMWorker organization classify: #schedule: under: #'as yet unclassified'! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 00:56' prior: 33583697! schedule: aMessage [self queue addLast: aMessage] atomic value! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:00' prior: 33582694! loop self active: true. [self active] whileTrue: [ self receive ifNotNilDo: [ :m ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:01' prior: 33584800! loop self active: true. [self active] whileTrue: [ self receive ifNotNilDo: [ :m | (self handlers at: (m messageName)) valueWithArguments: (m arguments) ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:01' prior: 33584997! loop self active: true. [self active] whileTrue: [ self receive ifNotNilDo: [ :m | (self handlers at: (m messageName)) valueWithArguments: (m arguments) ] ]! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:01'! stop self active: False! ! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:01'! start [self loop] parallelFork! ! w := STMWorker new! w onMessage: #test do: [:i | Transcript show: i]! w start! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:03' prior: 33583086! receive | message | message := nil. [ (self queue isEmpty) ifFalse: [ [message := self queue removeFirst]] ] atomic value. ^message! ! w stop! !STMWorker methodsFor: 'as yet unclassified' stamp: 'toma 1/17/2014 01:04' prior: 33585522! stop self active: false! ! Smalltalk renameClassNamed: #STMWorker as: #STMActor! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:06'! benchStmActor | a1 a2 | ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:06' prior: 33586238! benchStmActor | a1 a2 | a1 := STMActor new. a2 := STMActor new. ! ! 1 printString! 1 printString! 1 printString! '1'! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:14' prior: 33586352! benchStmActor | a | a := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | (n < 1) ifTrue: [SPyVM print: (sum2 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:14' prior: 33586563! benchStmActor | a | a := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ] ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:15' prior: 33586879! benchStmActor | a | a := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ] a start. ! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:18' prior: 33587197! benchStmActor | a b | a := STMActor new. b := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'a'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [b send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. b onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'b'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. a start. b start. a send: #fibonacci with: self and: 1 and: 1.! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:19' prior: 33587525! benchStmActor | a b | a := STMActor new. b := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'a'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [b send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. b onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'b'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. a start. b start. a send: #fibonacci with: self and: 1 and: 1. (1 to: 1000) do: [:i | SPyVM print: '.']! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:19' prior: 33588158! benchStmActor | a b | a := STMActor new. b := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'a'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [b send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. b onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'b'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. a start. b start. a send: #fibonacci with: self and: 1 and: 1. (1 to: 1000) do: [:i | SPyVM print: '.'] a stop. b stop.! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 01:19' prior: 33588833! benchStmActor | a b | a := STMActor new. b := STMActor new. a onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'a'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [b send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. b onMessage: #fibonacci do: [ :n :sum1 :sum2 | SPyVM print: 'b'. (n < 1) ifTrue: [SPyVM print: (sum1 printString) ] ifFalse: [a send: #fibonacci with: (n - 1) and: sum2 and: (sum1 + sum2)] ]. a start. b start. a send: #fibonacci with: self and: 1 and: 1. (1 to: 1000) do: [:i | SPyVM print: '.']. a stop. b stop.! ! ----SNAPSHOT----{17 January 2014 . 1:19:41 am} Squeak4.5-12568.image priorSource: 18267! ----SNAPSHOT----{17 January 2014 . 1:23 am} Squeak4.5-12568.image priorSource: 35706! 10 benchStmActor! ----QUIT/NOSAVE----{17 January 2014 . 1:24:53 am} Squeak4.5-12568.image priorSource: 35796! ----STARTUP----{17 January 2014 . 5:10:50 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:00' prior: 33566346! benchStmAtomic | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * self) to: ((i + 1) * self - 1)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{17 January 2014 . 7:00:29 pm} Squeak4.5-12568.image priorSource: 35796! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:01' prior: 33566996! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * self) to: ((i + 1) * 1000 - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:01' prior: 33591037! benchStmParallel | sum threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * self) to: ((i + 1) * self - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{17 January 2014 . 7:01:14 pm} Squeak4.5-12568.image priorSource: 36433! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:18' prior: 33591357! benchStmParallel | sum num threads | sum := 0. threads := (0 to: 7) collect: [ :i | [((i * self) to: ((i + 1) * self - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:20' prior: 33591767! benchStmParallel | sum num threads | num := self \\ 100. sum := 0. threads := (0 to: 7) collect: [ :i | [((i * self) to: ((i + 1) * self - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:20' prior: 33592091! benchStmParallel | sum num threads max | num := self \\ 100. sum := 0. threads := (0 to: 7) collect: [ :i | [((i * self) to: ((i + 1) * self - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:20' prior: 33592436! benchStmParallel | sum num threads max | num := self \\ 100. max := self - num. sum := 0. threads := (0 to: 7) collect: [ :i | [((i * self) to: ((i + 1) * self - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:21' prior: 33592785! benchStmParallel | sum num threads max | num := self \\ 100. max := self - num. sum := 0. threads := (0 to: num) collect: [ :i | [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:21' prior: 33590615! benchStmAtomic | sum threads max num | num := self \\ 100. max := self - num. sum := 0. sum := 0. threads := (0 to: 7) collect: [ :i | [((i * self) to: ((i + 1) * self - 1)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:21' prior: 33593523! benchStmAtomic | sum threads max num | num := self \\ 100. max := self - num. sum := 0. sum := 0. threads := (0 to: num) collect: [ :i | [((i * self) to: ((i + 1) * self - 1)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:21' prior: 33593917! benchStmAtomic | sum threads max num | num := self \\ 100. max := self - num. sum := 0. sum := 0. threads := (0 to: num) collect: [ :i | [((i * max) to: ((i + 1) * max - 1)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:21' prior: 33594313! benchStmAtomic | sum threads max num | num := self \\ 100. max := self - num. sum := 0. threads := (0 to: num) collect: [ :i | [((i * max) to: ((i + 1) * max - 1)) do: [ :k | [sum := sum + k. ] atomic value] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! ----SNAPSHOT----{17 January 2014 . 7:21:58 pm} Squeak4.5-12568.image priorSource: 37163! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:25' prior: 33572325! benchStmFuture | sum num threads max futures | num := self \\ 100. max := self - num. sum := 0. futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:25' prior: 33595178! benchStmFuture | sum num max futures | num := self \\ 100. max := self - num. sum := 0. futures := (0 to: 7) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:25' prior: 33595523! benchStmFuture | sum num max futures | num := self \\ 100. max := self - num. sum := 0. futures := (0 to: num) collect: [ :id | [(1 to: 1000) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:25' prior: 33595860! benchStmFuture | sum num max futures | num := self \\ 100. max := self - num. sum := 0. futures := (0 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:26' prior: 33596199! benchStmFuture | sum num max futures | num := self \\ 100. max := (self - num) \\ num. sum := 0. futures := (0 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:26' prior: 33596537! benchStmFuture | sum num max futures | num := self \\ 100. max := (self - num) // num. sum := 0. futures := (0 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/17/2014 19:26' prior: 33596884! benchStmFuture | sum num max futures | num := self \\ 100. max := self // num. sum := 0. futures := (0 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! ----SNAPSHOT----{17 January 2014 . 7:26:27 pm} Squeak4.5-12568.image priorSource: 40574! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 15:20' prior: 33597231! benchStmFuture | sum num max futures start | num := self \\ 100. max := self // num. sum := 0. futures := (0 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. ^ sum printString! ! Time now! Time now! Time now! Time now! Time now asNanoSeconds! Time now asNanoSeconds! Time now asNanoSeconds! Time now asNanoSeconds! Time now asNanoSeconds! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 15:25' prior: 33597660! benchStmFuture | sum num max futures start | num := self \\ 100. max := self // num. sum := 0. start := Time now asNanoSeconds. futures := (0 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: 'µs'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 15:25' prior: 33598176! benchStmFuture | sum num max futures start | num := self \\ 100. max := self // num. sum := 0. start := Time now asNanoSeconds. futures := (0 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: 'µs inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 15:25' prior: 33598657! benchStmFuture | sum num max futures start | num := self \\ 100. max := self // num. sum := 0. start := Time now asNanoSeconds. futures := (0 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: '[squeak] µs inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! Time now asNanoSeconds // 1000000! Time now asNanoSeconds // 1000000! Time now asNanoSeconds // 1000000! Time now asNanoSeconds // 1000000! Time now asNanoSeconds // 1000000! Time now asNanoSeconds // 1000000! Time now asNanoSeconds // 1000000! Time now asNanoSeconds // 1000000! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 15:26' prior: 33599153! benchStmFuture | sum num max futures start | num := self \\ 100. max := self // num. sum := 0. start := Time now asNanoSeconds. futures := (0 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! ----SNAPSHOT----{18 January 2014 . 3:26:07 pm} Squeak4.5-12568.image priorSource: 43056! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 16:30' prior: 33599946! benchStmFuture | sum num max futures start | num := self \\ 100. max := self // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. futures := (0 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 16:31' prior: 33600550! benchStmFuture | sum num max futures start | num := self \\ 100. max := self // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. futures := (1 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 16:36' prior: 33601163! benchStmFuture | sum num max futures start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. futures := (1 to: num) collect: [ :id | [(1 to: max) sum] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! ----SNAPSHOT----{18 January 2014 . 4:36:36 pm} Squeak4.5-12568.image priorSource: 45946! ----SNAPSHOT----{18 January 2014 . 4:36:48 pm} Squeak4.5-12568.image priorSource: 47883! ----STARTUP----{18 January 2014 . 10:05:52 pm} as /home/bot/lang-smalltalk/images/Squeak4.5-12568.image! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 22:06' prior: 33593154! benchStmParallel | sum num threads max start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. threads := (0 to: num) collect: [ :i | [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 22:06' prior: 33602684! benchStmParallel | sum num threads max start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 22:07' prior: 33603203! benchStmParallel | sum num threads max start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | [((i * max) to: ((i + 1) * max - 1)) do: [ :k | sum := sum + k. ] ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 22:07' prior: 33594707! benchStmAtomic | sum num threads max start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. threads := (1 to: num) collect: [ :i | [((i * max) to: ((i + 1) * max - 1)) do: [ :k | [sum := sum + k.] atomic value. ] ] parallelFork ]. threads do: [:t | t wait]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! ----SNAPSHOT----{18 January 2014 . 10:07:37 pm} Squeak4.5-12568.image priorSource: 47973! ----SNAPSHOT----{18 January 2014 . 10:48:10 pm} Squeak4.5-12568.image priorSource: 50518! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 22:59' prior: 33601776! benchStmFuture | sum num max futures start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. futures := (1 to: num) collect: [ :id | |s| s := 0. [(1 to: max) do: [:i | s := s + 1] ] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 22:59'! benchStmFuture2 | sum num max futures start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. futures := (1 to: num) collect: [ :id | |s| s := 0. [(1 to: max) do: [:i | s := s + 1] ] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 22:59' prior: 33605214! benchStmFuture | sum num max futures start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. futures := (1 to: num) collect: [ :id | |s| s := 0. [(1 to: max) do: [:i | s := s + 1] ] async]. sum := futures inject: 0 into: [ :next :each | next + (each value)]. SPyVM print: '[squeak] milliseconds inside method:'. SPyVM print: (((Time now asNanoSeconds) - start) // 1000000) printString. ^ sum printString! ! !Integer methodsFor: 'benchmarks' stamp: 'toma 1/18/2014 22:59' prior: 33606505! benchStmFuture | sum num max futures start | num := self \\ 100. max := (self - num) // num. sum := 0. SPyVM print: ('Threads:', (num printString)). SPyVM print: ('Items/Thread:', (max printString)). start := Time now asNanoSeconds. futures := (1 to: num) collect: [ :id | |s| s := 0. [(1 to: max) do: [:i | s := s + 1] ] async]. From noreply at buildbot.pypy.org Sun Aug 10 20:56:17 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 10 Aug 2014 20:56:17 +0200 (CEST) Subject: [pypy-commit] pypy default: merge branch pytest-25: Update our py.test to 2.5.2 Message-ID: <20140810185617.5A0311C0136@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r72746:c1f4835ed3c3 Date: 2014-08-10 19:55 +0100 http://bitbucket.org/pypy/pypy/changeset/c1f4835ed3c3/ Log: merge branch pytest-25: Update our py.test to 2.5.2 diff too long, truncating to 2000 out of 10234 lines diff --git a/_pytest/__init__.py b/_pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.4.dev2' +__version__ = '2.5.2' diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py new file mode 100644 --- /dev/null +++ b/_pytest/_argcomplete.py @@ -0,0 +1,104 @@ + +"""allow bash-completion for argparse with argcomplete if installed +needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code. + +argcomplete does not support python 2.5 (although the changes for that +are minor). + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*' + ).completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh ) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK + +INSTALL/DEBUGGING +================= +To include this support in another application that has setup.py generated +scripts: +- add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point +- include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + , call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument() +If things do not work right away: +- switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 +- run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not +- sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" + +import sys +import os +from glob import glob + +class FastFilesCompleter: + 'Fast file completer class' + def __init__(self, directories=True): + self.directories = directories + + def __call__(self, prefix, **kwargs): + """only called on non option completions""" + if os.path.sep in prefix[1:]: # + prefix_dir = len(os.path.dirname(prefix) + os.path.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if '*' not in prefix and '?' not in prefix: + if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash + globbed.extend(glob(prefix + '.*')) + prefix += '*' + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += '/' + # append stripping the prefix (like bash, not like compgen) + completion.append(x[prefix_dir:]) + return completion + +if os.environ.get('_ARGCOMPLETE'): + # argcomplete 0.5.6 is not compatible with python 2.5.6: print/with/format + if sys.version_info[:2] < (2, 6): + sys.exit(1) + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter = FastFilesCompleter() + + def try_argcomplete(parser): + argcomplete.autocomplete(parser) +else: + def try_argcomplete(parser): pass + filescompleter = None diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py --- a/_pytest/assertion/__init__.py +++ b/_pytest/assertion/__init__.py @@ -3,7 +3,6 @@ """ import py import sys -import pytest from _pytest.monkeypatch import monkeypatch from _pytest.assertion import util @@ -19,8 +18,8 @@ to provide assert expression information. """) group.addoption('--no-assert', action="store_true", default=False, dest="noassert", help="DEPRECATED equivalent to --assert=plain") - group.addoption('--nomagic', action="store_true", default=False, - dest="nomagic", help="DEPRECATED equivalent to --assert=plain") + group.addoption('--nomagic', '--no-magic', action="store_true", + default=False, help="DEPRECATED equivalent to --assert=plain") class AssertionState: """State for the assertion plugin.""" @@ -35,22 +34,25 @@ mode = "plain" if mode == "rewrite": try: - import ast + import ast # noqa except ImportError: mode = "reinterp" else: - if sys.platform.startswith('java'): + # Both Jython and CPython 2.6.0 have AST bugs that make the + # assertion rewriting hook malfunction. + if (sys.platform.startswith('java') or + sys.version_info[:3] == (2, 6, 0)): mode = "reinterp" if mode != "plain": _load_modules(mode) m = monkeypatch() config._cleanup.append(m.undo) m.setattr(py.builtin.builtins, 'AssertionError', - reinterpret.AssertionError) + reinterpret.AssertionError) # noqa hook = None if mode == "rewrite": - hook = rewrite.AssertionRewritingHook() - sys.meta_path.append(hook) + hook = rewrite.AssertionRewritingHook() # noqa + sys.meta_path.insert(0, hook) warn_about_missing_assertion(mode) config._assertstate = AssertionState(config, mode) config._assertstate.hook = hook @@ -73,9 +75,16 @@ def callbinrepr(op, left, right): hook_result = item.ihook.pytest_assertrepr_compare( config=item.config, op=op, left=left, right=right) + for new_expl in hook_result: if new_expl: - res = '\n~'.join(new_expl) + # Don't include pageloads of data unless we are very + # verbose (-vv) + if (sum(len(p) for p in new_expl[1:]) > 80*8 + and item.config.option.verbose < 2): + new_expl[1:] = [py.builtin._totext( + 'Detailed information truncated, use "-vv" to show')] + res = py.builtin._totext('\n~').join(new_expl) if item.config.getvalue("assertmode") == "rewrite": # The result will be fed back a python % formatting # operation, which will fail if there are extraneous @@ -95,9 +104,9 @@ def _load_modules(mode): """Lazily import assertion related code.""" global rewrite, reinterpret - from _pytest.assertion import reinterpret + from _pytest.assertion import reinterpret # noqa if mode == "rewrite": - from _pytest.assertion import rewrite + from _pytest.assertion import rewrite # noqa def warn_about_missing_assertion(mode): try: diff --git a/_pytest/assertion/newinterpret.py b/_pytest/assertion/newinterpret.py --- a/_pytest/assertion/newinterpret.py +++ b/_pytest/assertion/newinterpret.py @@ -11,7 +11,7 @@ from _pytest.assertion.reinterpret import BuiltinAssertionError -if sys.platform.startswith("java") and sys.version_info < (2, 5, 2): +if sys.platform.startswith("java"): # See http://bugs.jython.org/issue1497 _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict", "ListComp", "GeneratorExp", "Yield", "Compare", "Call", diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py --- a/_pytest/assertion/oldinterpret.py +++ b/_pytest/assertion/oldinterpret.py @@ -526,10 +526,13 @@ # example: def f(): return 5 + def g(): return 3 + def h(x): return 'never' + check("f() * g() == 5") check("not f()") check("not (f() and g() or 0)") diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py --- a/_pytest/assertion/reinterpret.py +++ b/_pytest/assertion/reinterpret.py @@ -1,18 +1,26 @@ import sys import py from _pytest.assertion.util import BuiltinAssertionError +u = py.builtin._totext + class AssertionError(BuiltinAssertionError): def __init__(self, *args): BuiltinAssertionError.__init__(self, *args) if args: + # on Python2.6 we get len(args)==2 for: assert 0, (x,y) + # on Python2.7 and above we always get len(args) == 1 + # with args[0] being the (x,y) tuple. + if len(args) > 1: + toprint = args + else: + toprint = args[0] try: - self.msg = str(args[0]) - except py.builtin._sysex: - raise - except: - self.msg = "<[broken __repr__] %s at %0xd>" %( - args[0].__class__, id(args[0])) + self.msg = u(toprint) + except Exception: + self.msg = u( + "<[broken __repr__] %s at %0xd>" + % (toprint.__class__, id(toprint))) else: f = py.code.Frame(sys._getframe(1)) try: @@ -44,4 +52,3 @@ from _pytest.assertion.newinterpret import interpret as reinterpret else: reinterpret = reinterpret_old - diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py --- a/_pytest/assertion/rewrite.py +++ b/_pytest/assertion/rewrite.py @@ -6,6 +6,7 @@ import imp import marshal import os +import re import struct import sys import types @@ -14,13 +15,7 @@ from _pytest.assertion import util -# Windows gives ENOENT in places *nix gives ENOTDIR. -if sys.platform.startswith("win"): - PATH_COMPONENT_NOT_DIR = errno.ENOENT -else: - PATH_COMPONENT_NOT_DIR = errno.ENOTDIR - -# py.test caches rewritten pycs in __pycache__. +# pytest caches rewritten pycs in __pycache__. if hasattr(imp, "get_tag"): PYTEST_TAG = imp.get_tag() + "-PYTEST" else: @@ -34,17 +29,19 @@ PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1]) del ver, impl -PYC_EXT = ".py" + "c" if __debug__ else "o" +PYC_EXT = ".py" + (__debug__ and "c" or "o") PYC_TAIL = "." + PYTEST_TAG + PYC_EXT REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2) +ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 class AssertionRewritingHook(object): - """Import hook which rewrites asserts.""" + """PEP302 Import hook which rewrites asserts.""" def __init__(self): self.session = None self.modules = {} + self._register_with_pkg_resources() def set_session(self, session): self.fnpats = session.config.getini("python_files") @@ -59,8 +56,12 @@ names = name.rsplit(".", 1) lastname = names[-1] pth = None - if path is not None and len(path) == 1: - pth = path[0] + if path is not None: + # Starting with Python 3.3, path is a _NamespacePath(), which + # causes problems if not converted to list. + path = list(path) + if len(path) == 1: + pth = path[0] if pth is None: try: fd, fn, desc = imp.find_module(lastname, path) @@ -95,12 +96,13 @@ finally: self.session = sess else: - state.trace("matched test file (was specified on cmdline): %r" % (fn,)) + state.trace("matched test file (was specified on cmdline): %r" % + (fn,)) # The requested module looks like a test file, so rewrite it. This is # the most magical part of the process: load the source, rewrite the # asserts, and load the rewritten source. We also cache the rewritten # module code in a special pyc. We must be aware of the possibility of - # concurrent py.test processes rewriting and loading pycs. To avoid + # concurrent pytest processes rewriting and loading pycs. To avoid # tricky race conditions, we maintain the following invariant: The # cached pyc is always a complete, valid pyc. Operations on it must be # atomic. POSIX's atomic rename comes in handy. @@ -116,19 +118,19 @@ # common case) or it's blocked by a non-dir node. In the # latter case, we'll ignore it in _write_pyc. pass - elif e == PATH_COMPONENT_NOT_DIR: + elif e in [errno.ENOENT, errno.ENOTDIR]: # One of the path components was not a directory, likely # because we're in a zip file. write = False elif e == errno.EACCES: - state.trace("read only directory: %r" % (fn_pypath.dirname,)) + state.trace("read only directory: %r" % fn_pypath.dirname) write = False else: raise cache_name = fn_pypath.basename[:-3] + PYC_TAIL pyc = os.path.join(cache_dir, cache_name) - # Notice that even if we're in a read-only directory, I'm going to check - # for a cached pyc. This may not be optimal... + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... co = _read_pyc(fn_pypath, pyc) if co is None: state.trace("rewriting %r" % (fn,)) @@ -153,27 +155,59 @@ mod.__file__ = co.co_filename # Normally, this attribute is 3.2+. mod.__cached__ = pyc + mod.__loader__ = self py.builtin.exec_(co, mod.__dict__) except: del sys.modules[name] raise return sys.modules[name] -def _write_pyc(co, source_path, pyc): - # Technically, we don't have to have the same pyc format as (C)Python, since - # these "pycs" should never be seen by builtin import. However, there's - # little reason deviate, and I hope sometime to be able to use - # imp.load_compiled to load them. (See the comment in load_module above.) + + + def is_package(self, name): + try: + fd, fn, desc = imp.find_module(name) + except ImportError: + return False + if fd is not None: + fd.close() + tp = desc[2] + return tp == imp.PKG_DIRECTORY + + @classmethod + def _register_with_pkg_resources(cls): + """ + Ensure package resources can be loaded from this loader. May be called + multiple times, as the operation is idempotent. + """ + try: + import pkg_resources + # access an attribute in case a deferred importer is present + pkg_resources.__name__ + except ImportError: + return + + # Since pytest tests are always located in the file system, the + # DefaultProvider is appropriate. + pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider) + + +def _write_pyc(state, co, source_path, pyc): + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason deviate, and I hope + # sometime to be able to use imp.load_compiled to load them. (See + # the comment in load_module above.) mtime = int(source_path.mtime()) try: fp = open(pyc, "wb") except IOError: err = sys.exc_info()[1].errno - if err == PATH_COMPONENT_NOT_DIR: - # This happens when we get a EEXIST in find_module creating the - # __pycache__ directory and __pycache__ is by some non-dir node. - return False - raise + state.trace("error writing pyc file at %s: errno=%s" %(pyc, err)) + # we ignore any failure to write the cache file + # there are many reasons, permission-denied, __pycache__ being a + # file etc. + return False try: fp.write(imp.get_magic()) fp.write(struct.pack(">", - ast.Add : "+", - ast.Sub : "-", - ast.Mult : "*", - ast.Div : "/", - ast.FloorDiv : "//", - ast.Mod : "%", - ast.Eq : "==", - ast.NotEq : "!=", - ast.Lt : "<", - ast.LtE : "<=", - ast.Gt : ">", - ast.GtE : ">=", - ast.Pow : "**", - ast.Is : "is", - ast.IsNot : "is not", - ast.In : "in", - ast.NotIn : "not in" + ast.BitOr: "|", + ast.BitXor: "^", + ast.BitAnd: "&", + ast.LShift: "<<", + ast.RShift: ">>", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in" } @@ -341,7 +408,7 @@ lineno = 0 for item in mod.body: if (expect_docstring and isinstance(item, ast.Expr) and - isinstance(item.value, ast.Str)): + isinstance(item.value, ast.Str)): doc = item.value.s if "PYTEST_DONT_REWRITE" in doc: # The module has disabled assertion rewriting. @@ -462,7 +529,8 @@ body.append(raise_) # Clear temporary variables by setting them to None. if self.variables: - variables = [ast.Name(name, ast.Store()) for name in self.variables] + variables = [ast.Name(name, ast.Store()) + for name in self.variables] clear = ast.Assign(variables, ast.Name("None", ast.Load())) self.statements.append(clear) # Fix line numbers. @@ -471,11 +539,12 @@ return self.statements def visit_Name(self, name): - # Check if the name is local or not. + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. locs = ast.Call(self.builtin("locals"), [], [], None, None) - globs = ast.Call(self.builtin("globals"), [], [], None, None) - ops = [ast.In(), ast.IsNot()] - test = ast.Compare(ast.Str(name.id), ops, [locs, globs]) + inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) + dorepr = self.helper("should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) return name, self.explanation_param(expr) @@ -492,7 +561,8 @@ for i, v in enumerate(boolop.values): if i: fail_inner = [] - self.on_failure.append(ast.If(cond, fail_inner, [])) + # cond is set in a prior loop iteration below + self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa self.on_failure = fail_inner self.push_format_context() res, expl = self.visit(v) @@ -548,7 +618,8 @@ new_kwarg, expl = self.visit(call.kwargs) arg_expls.append("**" + expl) expl = "%s(%s)" % (func_expl, ', '.join(arg_expls)) - new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg) + new_call = ast.Call(new_func, new_args, new_kwargs, + new_star, new_kwarg) res = self.assign(new_call) res_expl = self.explanation_param(self.display(res)) outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) @@ -584,7 +655,7 @@ res_expr = ast.Compare(left_res, [op], [next_res]) self.statements.append(ast.Assign([store_names[i]], res_expr)) left_res, left_expl = next_res, next_expl - # Use py.code._reprcompare if that's available. + # Use pytest.assertion.util._reprcompare if that's available. expl_call = self.helper("call_reprcompare", ast.Tuple(syms, ast.Load()), ast.Tuple(load_names, ast.Load()), diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py --- a/_pytest/assertion/util.py +++ b/_pytest/assertion/util.py @@ -1,8 +1,13 @@ """Utilities for assertion debugging""" import py +try: + from collections import Sequence +except ImportError: + Sequence = list BuiltinAssertionError = py.builtin.builtins.AssertionError +u = py.builtin._totext # The _reprcompare attribute on the util module is used by the new assertion # interpretation code and assertion rewriter to detect this plugin was @@ -10,6 +15,7 @@ # DebugInterpreter. _reprcompare = None + def format_explanation(explanation): """This formats an explanation @@ -20,7 +26,18 @@ for when one explanation needs to span multiple lines, e.g. when displaying diffs. """ - # simplify 'assert False where False = ...' + explanation = _collapse_false(explanation) + lines = _split_explanation(explanation) + result = _format_lines(lines) + return u('\n').join(result) + + +def _collapse_false(explanation): + """Collapse expansions of False + + So this strips out any "assert False\n{where False = ...\n}" + blocks. + """ where = 0 while True: start = where = explanation.find("False\n{False = ", where) @@ -42,28 +59,48 @@ explanation = (explanation[:start] + explanation[start+15:end-1] + explanation[end+1:]) where -= 17 - raw_lines = (explanation or '').split('\n') - # escape newlines not followed by {, } and ~ + return explanation + + +def _split_explanation(explanation): + """Return a list of individual lines in the explanation + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or u('')).split('\n') lines = [raw_lines[0]] for l in raw_lines[1:]: if l.startswith('{') or l.startswith('}') or l.startswith('~'): lines.append(l) else: lines[-1] += '\\n' + l + return lines + +def _format_lines(lines): + """Format the individual lines + + This will replace the '{', '}' and '~' characters of our mini + formatting language with the proper 'where ...', 'and ...' and ' + + ...' text, taking care of indentation along the way. + + Return a list of formatted lines. + """ result = lines[:1] stack = [0] stackcnt = [0] for line in lines[1:]: if line.startswith('{'): if stackcnt[-1]: - s = 'and ' + s = u('and ') else: - s = 'where ' + s = u('where ') stack.append(len(result)) stackcnt[-1] += 1 stackcnt.append(0) - result.append(' +' + ' '*(len(stack)-1) + s + line[1:]) + result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:]) elif line.startswith('}'): assert line.startswith('}') stack.pop() @@ -71,9 +108,9 @@ result[stack[-1]] += line[1:] else: assert line.startswith('~') - result.append(' '*len(stack) + line[1:]) + result.append(u(' ')*len(stack) + line[1:]) assert len(stack) == 1 - return '\n'.join(result) + return result # Provide basestring in python3 @@ -83,132 +120,163 @@ basestring = str -def assertrepr_compare(op, left, right): - """return specialised explanations for some operators/operands""" - width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op +def assertrepr_compare(config, op, left, right): + """Return specialised explanations for some operators/operands""" + width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op left_repr = py.io.saferepr(left, maxsize=int(width/2)) right_repr = py.io.saferepr(right, maxsize=width-len(left_repr)) - summary = '%s %s %s' % (left_repr, op, right_repr) + summary = u('%s %s %s') % (left_repr, op, right_repr) - issequence = lambda x: isinstance(x, (list, tuple)) + issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) + and not isinstance(x, basestring)) istext = lambda x: isinstance(x, basestring) isdict = lambda x: isinstance(x, dict) - isset = lambda x: isinstance(x, set) + isset = lambda x: isinstance(x, (set, frozenset)) + verbose = config.getoption('verbose') explanation = None try: if op == '==': if istext(left) and istext(right): - explanation = _diff_text(left, right) + explanation = _diff_text(left, right, verbose) elif issequence(left) and issequence(right): - explanation = _compare_eq_sequence(left, right) + explanation = _compare_eq_sequence(left, right, verbose) elif isset(left) and isset(right): - explanation = _compare_eq_set(left, right) + explanation = _compare_eq_set(left, right, verbose) elif isdict(left) and isdict(right): - explanation = _diff_text(py.std.pprint.pformat(left), - py.std.pprint.pformat(right)) + explanation = _compare_eq_dict(left, right, verbose) elif op == 'not in': if istext(left) and istext(right): - explanation = _notin_text(left, right) - except py.builtin._sysex: - raise - except: + explanation = _notin_text(left, right, verbose) + except Exception: excinfo = py.code.ExceptionInfo() - explanation = ['(pytest_assertion plugin: representation of ' - 'details failed. Probably an object has a faulty __repr__.)', - str(excinfo) - ] - + explanation = [ + u('(pytest_assertion plugin: representation of details failed. ' + 'Probably an object has a faulty __repr__.)'), + u(excinfo)] if not explanation: return None - # Don't include pageloads of data, should be configurable - if len(''.join(explanation)) > 80*8: - explanation = ['Detailed information too verbose, truncated'] - return [summary] + explanation -def _diff_text(left, right): - """Return the explanation for the diff between text +def _diff_text(left, right, verbose=False): + """Return the explanation for the diff between text or bytes - This will skip leading and trailing characters which are - identical to keep the diff minimal. + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + + If the input are bytes they will be safely converted to text. """ explanation = [] - i = 0 # just in case left or right has zero length - for i in range(min(len(left), len(right))): - if left[i] != right[i]: - break - if i > 42: - i -= 10 # Provide some context - explanation = ['Skipping %s identical ' - 'leading characters in diff' % i] - left = left[i:] - right = right[i:] - if len(left) == len(right): - for i in range(len(left)): - if left[-i] != right[-i]: + if isinstance(left, py.builtin.bytes): + left = u(repr(left)[1:-1]).replace(r'\n', '\n') + if isinstance(right, py.builtin.bytes): + right = u(repr(right)[1:-1]).replace(r'\n', '\n') + if not verbose: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: break if i > 42: - i -= 10 # Provide some context - explanation += ['Skipping %s identical ' - 'trailing characters in diff' % i] - left = left[:-i] - right = right[:-i] + i -= 10 # Provide some context + explanation = [u('Skipping %s identical leading ' + 'characters in diff, use -v to show') % i] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [u('Skipping %s identical trailing ' + 'characters in diff, use -v to show') % i] + left = left[:-i] + right = right[:-i] explanation += [line.strip('\n') for line in py.std.difflib.ndiff(left.splitlines(), right.splitlines())] return explanation -def _compare_eq_sequence(left, right): +def _compare_eq_sequence(left, right, verbose=False): explanation = [] for i in range(min(len(left), len(right))): if left[i] != right[i]: - explanation += ['At index %s diff: %r != %r' % - (i, left[i], right[i])] + explanation += [u('At index %s diff: %r != %r') + % (i, left[i], right[i])] break if len(left) > len(right): - explanation += ['Left contains more items, ' - 'first extra item: %s' % py.io.saferepr(left[len(right)],)] + explanation += [u('Left contains more items, first extra item: %s') + % py.io.saferepr(left[len(right)],)] elif len(left) < len(right): - explanation += ['Right contains more items, ' - 'first extra item: %s' % py.io.saferepr(right[len(left)],)] - return explanation # + _diff_text(py.std.pprint.pformat(left), - # py.std.pprint.pformat(right)) + explanation += [ + u('Right contains more items, first extra item: %s') % + py.io.saferepr(right[len(left)],)] + return explanation # + _diff_text(py.std.pprint.pformat(left), + # py.std.pprint.pformat(right)) -def _compare_eq_set(left, right): +def _compare_eq_set(left, right, verbose=False): explanation = [] diff_left = left - right diff_right = right - left if diff_left: - explanation.append('Extra items in the left set:') + explanation.append(u('Extra items in the left set:')) for item in diff_left: explanation.append(py.io.saferepr(item)) if diff_right: - explanation.append('Extra items in the right set:') + explanation.append(u('Extra items in the right set:')) for item in diff_right: explanation.append(py.io.saferepr(item)) return explanation -def _notin_text(term, text): +def _compare_eq_dict(left, right, verbose=False): + explanation = [] + common = set(left).intersection(set(right)) + same = dict((k, left[k]) for k in common if left[k] == right[k]) + if same and not verbose: + explanation += [u('Omitting %s identical items, use -v to show') % + len(same)] + elif same: + explanation += [u('Common items:')] + explanation += py.std.pprint.pformat(same).splitlines() + diff = set(k for k in common if left[k] != right[k]) + if diff: + explanation += [u('Differing items:')] + for k in diff: + explanation += [py.io.saferepr({k: left[k]}) + ' != ' + + py.io.saferepr({k: right[k]})] + extra_left = set(left) - set(right) + if extra_left: + explanation.append(u('Left contains more items:')) + explanation.extend(py.std.pprint.pformat( + dict((k, left[k]) for k in extra_left)).splitlines()) + extra_right = set(right) - set(left) + if extra_right: + explanation.append(u('Right contains more items:')) + explanation.extend(py.std.pprint.pformat( + dict((k, right[k]) for k in extra_right)).splitlines()) + return explanation + + +def _notin_text(term, text, verbose=False): index = text.find(term) head = text[:index] tail = text[index+len(term):] correct_text = head + tail - diff = _diff_text(correct_text, text) - newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)] + diff = _diff_text(correct_text, text, verbose) + newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)] for line in diff: - if line.startswith('Skipping'): + if line.startswith(u('Skipping')): continue - if line.startswith('- '): + if line.startswith(u('- ')): continue - if line.startswith('+ '): - newdiff.append(' ' + line[2:]) + if line.startswith(u('+ ')): + newdiff.append(u(' ') + line[2:]) else: newdiff.append(line) return newdiff diff --git a/_pytest/capture.py b/_pytest/capture.py --- a/_pytest/capture.py +++ b/_pytest/capture.py @@ -1,43 +1,114 @@ -""" per-test stdout/stderr capturing mechanisms, ``capsys`` and ``capfd`` function arguments. """ +""" + per-test stdout/stderr capturing mechanisms, + ``capsys`` and ``capfd`` function arguments. +""" +# note: py.io capture was where copied from +# pylib 1.4.20.dev2 (rev 13d9af95547e) +import sys +import os +import tempfile -import pytest, py -import os +import py +import pytest + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO + +try: + from io import BytesIO +except ImportError: + class BytesIO(StringIO): + def write(self, data): + if isinstance(data, unicode): + raise TypeError("not a byte value: %r" % (data,)) + StringIO.write(self, data) + +if sys.version_info < (3, 0): + class TextIO(StringIO): + def write(self, data): + if not isinstance(data, unicode): + enc = getattr(self, '_encoding', 'UTF-8') + data = unicode(data, enc, 'replace') + StringIO.write(self, data) +else: + TextIO = StringIO + + +patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} + def pytest_addoption(parser): group = parser.getgroup("general") - group._addoption('--capture', action="store", default=None, - metavar="method", type="choice", choices=['fd', 'sys', 'no'], + group._addoption( + '--capture', action="store", default=None, + metavar="method", choices=['fd', 'sys', 'no'], help="per-test capturing method: one of fd (default)|sys|no.") - group._addoption('-s', action="store_const", const="no", dest="capture", + group._addoption( + '-s', action="store_const", const="no", dest="capture", help="shortcut for --capture=no.") + @pytest.mark.tryfirst -def pytest_cmdline_parse(pluginmanager, args): - # we want to perform capturing already for plugin/conftest loading - if '-s' in args or "--capture=no" in args: - method = "no" - elif hasattr(os, 'dup') and '--capture=sys' not in args: +def pytest_load_initial_conftests(early_config, parser, args, __multicall__): + ns = parser.parse_known_args(args) + method = ns.capture + if not method: method = "fd" - else: + if method == "fd" and not hasattr(os, "dup"): method = "sys" capman = CaptureManager(method) - pluginmanager.register(capman, "capturemanager") + early_config.pluginmanager.register(capman, "capturemanager") + + # make sure that capturemanager is properly reset at final shutdown + def teardown(): + try: + capman.reset_capturings() + except ValueError: + pass + + early_config.pluginmanager.add_shutdown(teardown) + + # make sure logging does not raise exceptions at the end + def silence_logging_at_shutdown(): + if "logging" in sys.modules: + sys.modules["logging"].raiseExceptions = False + early_config.pluginmanager.add_shutdown(silence_logging_at_shutdown) + + # finally trigger conftest loading but while capturing (issue93) + capman.resumecapture() + try: + try: + return __multicall__.execute() + finally: + out, err = capman.suspendcapture() + except: + sys.stdout.write(out) + sys.stderr.write(err) + raise + def addouterr(rep, outerr): for secname, content in zip(["out", "err"], outerr): if content: rep.sections.append(("Captured std%s" % secname, content)) + class NoCapture: def startall(self): pass + def resume(self): pass + def reset(self): pass + def suspend(self): return "", "" + class CaptureManager: def __init__(self, defaultmethod=None): self._method2capture = {} @@ -45,21 +116,23 @@ def _maketempfile(self): f = py.std.tempfile.TemporaryFile() - newf = py.io.dupfile(f, encoding="UTF-8") + newf = dupfile(f, encoding="UTF-8") f.close() return newf def _makestringio(self): - return py.io.TextIO() + return TextIO() def _getcapture(self, method): if method == "fd": - return py.io.StdCaptureFD(now=False, - out=self._maketempfile(), err=self._maketempfile() + return StdCaptureFD( + out=self._maketempfile(), + err=self._maketempfile(), ) elif method == "sys": - return py.io.StdCapture(now=False, - out=self._makestringio(), err=self._makestringio() + return StdCapture( + out=self._makestringio(), + err=self._makestringio(), ) elif method == "no": return NoCapture() @@ -74,23 +147,24 @@ method = config._conftest.rget("option_capture", path=fspath) except KeyError: method = "fd" - if method == "fd" and not hasattr(os, 'dup'): # e.g. jython + if method == "fd" and not hasattr(os, 'dup'): # e.g. jython method = "sys" return method def reset_capturings(self): - for name, cap in self._method2capture.items(): + for cap in self._method2capture.values(): cap.reset() def resumecapture_item(self, item): method = self._getmethod(item.config, item.fspath) if not hasattr(item, 'outerr'): - item.outerr = ('', '') # we accumulate outerr on the item + item.outerr = ('', '') # we accumulate outerr on the item return self.resumecapture(method) def resumecapture(self, method=None): if hasattr(self, '_capturing'): - raise ValueError("cannot resume, already capturing with %r" % + raise ValueError( + "cannot resume, already capturing with %r" % (self._capturing,)) if method is None: method = self._defaultmethod @@ -119,30 +193,29 @@ return "", "" def activate_funcargs(self, pyfuncitem): - if not hasattr(pyfuncitem, 'funcargs'): - return - assert not hasattr(self, '_capturing_funcargs') - self._capturing_funcargs = capturing_funcargs = [] - for name, capfuncarg in pyfuncitem.funcargs.items(): - if name in ('capsys', 'capfd'): - capturing_funcargs.append(capfuncarg) - capfuncarg._start() + funcargs = getattr(pyfuncitem, "funcargs", None) + if funcargs is not None: + for name, capfuncarg in funcargs.items(): + if name in ('capsys', 'capfd'): + assert not hasattr(self, '_capturing_funcarg') + self._capturing_funcarg = capfuncarg + capfuncarg._start() def deactivate_funcargs(self): - capturing_funcargs = getattr(self, '_capturing_funcargs', None) - if capturing_funcargs is not None: - while capturing_funcargs: - capfuncarg = capturing_funcargs.pop() - capfuncarg._finalize() - del self._capturing_funcargs + capturing_funcarg = getattr(self, '_capturing_funcarg', None) + if capturing_funcarg: + outerr = capturing_funcarg._finalize() + del self._capturing_funcarg + return outerr def pytest_make_collect_report(self, __multicall__, collector): method = self._getmethod(collector.config, collector.fspath) try: self.resumecapture(method) except ValueError: - return # recursive collect, XXX refactor capturing - # to allow for more lightweight recursive capturing + # recursive collect, XXX refactor capturing + # to allow for more lightweight recursive capturing + return try: rep = __multicall__.execute() finally: @@ -169,46 +242,371 @@ @pytest.mark.tryfirst def pytest_runtest_makereport(self, __multicall__, item, call): - self.deactivate_funcargs() + funcarg_outerr = self.deactivate_funcargs() rep = __multicall__.execute() outerr = self.suspendcapture(item) - if not rep.passed: - addouterr(rep, outerr) + if funcarg_outerr is not None: + outerr = (outerr[0] + funcarg_outerr[0], + outerr[1] + funcarg_outerr[1]) + addouterr(rep, outerr) if not rep.passed or rep.when == "teardown": outerr = ('', '') item.outerr = outerr return rep +error_capsysfderror = "cannot use capsys and capfd at the same time" + + def pytest_funcarg__capsys(request): """enables capturing of writes to sys.stdout/sys.stderr and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ - return CaptureFuncarg(py.io.StdCapture) + if "capfd" in request._funcargs: + raise request.raiseerror(error_capsysfderror) + return CaptureFixture(StdCapture) + def pytest_funcarg__capfd(request): """enables capturing of writes to file descriptors 1 and 2 and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ + if "capsys" in request._funcargs: + request.raiseerror(error_capsysfderror) if not hasattr(os, 'dup'): - py.test.skip("capfd funcarg needs os.dup") - return CaptureFuncarg(py.io.StdCaptureFD) + pytest.skip("capfd funcarg needs os.dup") + return CaptureFixture(StdCaptureFD) -class CaptureFuncarg: + +class CaptureFixture: def __init__(self, captureclass): - self.capture = captureclass(now=False) + self._capture = captureclass() def _start(self): - self.capture.startall() + self._capture.startall() def _finalize(self): - if hasattr(self, 'capture'): - self.capture.reset() - del self.capture + if hasattr(self, '_capture'): + outerr = self._outerr = self._capture.reset() + del self._capture + return outerr def readouterr(self): - return self.capture.readouterr() + try: + return self._capture.readouterr() + except AttributeError: + return self._outerr def close(self): self._finalize() + + +class FDCapture: + """ Capture IO to/from a given os-level filedescriptor. """ + + def __init__(self, targetfd, tmpfile=None, patchsys=False): + """ save targetfd descriptor, and open a new + temporary file there. If no tmpfile is + specified a tempfile.Tempfile() will be opened + in text mode. + """ + self.targetfd = targetfd + if tmpfile is None and targetfd != 0: + f = tempfile.TemporaryFile('wb+') + tmpfile = dupfile(f, encoding="UTF-8") + f.close() + self.tmpfile = tmpfile + self._savefd = os.dup(self.targetfd) + if patchsys: + self._oldsys = getattr(sys, patchsysdict[targetfd]) + + def start(self): + try: + os.fstat(self._savefd) + except OSError: + raise ValueError( + "saved filedescriptor not valid, " + "did you call start() twice?") + if self.targetfd == 0 and not self.tmpfile: + fd = os.open(os.devnull, os.O_RDONLY) + os.dup2(fd, 0) + os.close(fd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], DontReadFromInput()) + else: + os.dup2(self.tmpfile.fileno(), self.targetfd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self.tmpfile) + + def done(self): + """ unpatch and clean up, returns the self.tmpfile (file object) + """ + os.dup2(self._savefd, self.targetfd) + os.close(self._savefd) + if self.targetfd != 0: + self.tmpfile.seek(0) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self._oldsys) + return self.tmpfile + + def writeorg(self, data): + """ write a string to the original file descriptor + """ + tempfp = tempfile.TemporaryFile() + try: + os.dup2(self._savefd, tempfp.fileno()) + tempfp.write(data) + finally: + tempfp.close() + + +def dupfile(f, mode=None, buffering=0, raising=False, encoding=None): + """ return a new open file object that's a duplicate of f + + mode is duplicated if not given, 'buffering' controls + buffer size (defaulting to no buffering) and 'raising' + defines whether an exception is raised when an incompatible + file object is passed in (if raising is False, the file + object itself will be returned) + """ + try: + fd = f.fileno() + mode = mode or f.mode + except AttributeError: + if raising: + raise + return f + newfd = os.dup(fd) + if sys.version_info >= (3, 0): + if encoding is not None: + mode = mode.replace("b", "") + buffering = True + return os.fdopen(newfd, mode, buffering, encoding, closefd=True) + else: + f = os.fdopen(newfd, mode, buffering) + if encoding is not None: + return EncodedFile(f, encoding) + return f + + +class EncodedFile(object): + def __init__(self, _stream, encoding): + self._stream = _stream + self.encoding = encoding + + def write(self, obj): + if isinstance(obj, unicode): + obj = obj.encode(self.encoding) + self._stream.write(obj) + + def writelines(self, linelist): + data = ''.join(linelist) + self.write(data) + + def __getattr__(self, name): + return getattr(self._stream, name) + + +class Capture(object): + def reset(self): + """ reset sys.stdout/stderr and return captured output as strings. """ + if hasattr(self, '_reset'): + raise ValueError("was already reset") + self._reset = True + outfile, errfile = self.done(save=False) + out, err = "", "" + if outfile and not outfile.closed: + out = outfile.read() + outfile.close() + if errfile and errfile != outfile and not errfile.closed: + err = errfile.read() + errfile.close() + return out, err + + def suspend(self): + """ return current snapshot captures, memorize tempfiles. """ + outerr = self.readouterr() + outfile, errfile = self.done() + return outerr + + +class StdCaptureFD(Capture): + """ This class allows to capture writes to FD1 and FD2 + and may connect a NULL file to FD0 (and prevent + reads from sys.stdin). If any of the 0,1,2 file descriptors + is invalid it will not be captured. + """ + def __init__(self, out=True, err=True, in_=True, patchsys=True): + self._options = { + "out": out, + "err": err, + "in_": in_, + "patchsys": patchsys, + } + self._save() + + def _save(self): + in_ = self._options['in_'] + out = self._options['out'] + err = self._options['err'] + patchsys = self._options['patchsys'] + if in_: + try: + self.in_ = FDCapture( + 0, tmpfile=None, + patchsys=patchsys) + except OSError: + pass + if out: + tmpfile = None + if hasattr(out, 'write'): + tmpfile = out + try: + self.out = FDCapture( + 1, tmpfile=tmpfile, + patchsys=patchsys) + self._options['out'] = self.out.tmpfile + except OSError: + pass + if err: + if hasattr(err, 'write'): + tmpfile = err + else: + tmpfile = None + try: + self.err = FDCapture( + 2, tmpfile=tmpfile, + patchsys=patchsys) + self._options['err'] = self.err.tmpfile + except OSError: + pass + + def startall(self): + if hasattr(self, 'in_'): + self.in_.start() + if hasattr(self, 'out'): + self.out.start() + if hasattr(self, 'err'): + self.err.start() + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if hasattr(self, 'out') and not self.out.tmpfile.closed: + outfile = self.out.done() + if hasattr(self, 'err') and not self.err.tmpfile.closed: + errfile = self.err.done() + if hasattr(self, 'in_'): + self.in_.done() + if save: + self._save() + return outfile, errfile + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = self._readsnapshot('out') + err = self._readsnapshot('err') + return out, err + + def _readsnapshot(self, name): + if hasattr(self, name): + f = getattr(self, name).tmpfile + else: + return '' + + f.seek(0) + res = f.read() + enc = getattr(f, "encoding", None) + if enc: + res = py.builtin._totext(res, enc, "replace") + f.truncate(0) + f.seek(0) + return res + + +class StdCapture(Capture): + """ This class allows to capture writes to sys.stdout|stderr "in-memory" + and will raise errors on tries to read from sys.stdin. It only + modifies sys.stdout|stderr|stdin attributes and does not + touch underlying File Descriptors (use StdCaptureFD for that). + """ + def __init__(self, out=True, err=True, in_=True): + self._oldout = sys.stdout + self._olderr = sys.stderr + self._oldin = sys.stdin + if out and not hasattr(out, 'file'): + out = TextIO() + self.out = out + if err: + if not hasattr(err, 'write'): + err = TextIO() + self.err = err + self.in_ = in_ + + def startall(self): + if self.out: + sys.stdout = self.out + if self.err: + sys.stderr = self.err + if self.in_: + sys.stdin = self.in_ = DontReadFromInput() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if self.out and not self.out.closed: + sys.stdout = self._oldout + outfile = self.out + outfile.seek(0) + if self.err and not self.err.closed: + sys.stderr = self._olderr + errfile = self.err + errfile.seek(0) + if self.in_: + sys.stdin = self._oldin + return outfile, errfile + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = err = "" + if self.out: + out = self.out.getvalue() + self.out.truncate(0) + self.out.seek(0) + if self.err: + err = self.err.getvalue() + self.err.truncate(0) + self.err.seek(0) + return out, err + + +class DontReadFromInput: + """Temporary stub class. Ideally when stdin is accessed, the + capturing should be turned off, with possibly all data captured + so far sent to the screen. This should be configurable, though, + because in automated test runs it is better to crash than + hang indefinitely. + """ + def read(self, *args): + raise IOError("reading from stdin while output is captured") + readline = read + readlines = read + __iter__ = read + + def fileno(self): + raise ValueError("redirected Stdin is pseudofile, has no fileno()") + + def isatty(self): + return False + + def close(self): + pass diff --git a/_pytest/config.py b/_pytest/config.py --- a/_pytest/config.py +++ b/_pytest/config.py @@ -1,25 +1,91 @@ """ command line options, ini-file and conftest.py processing. """ import py +# DON't import pytest here because it causes import cycle troubles import sys, os +from _pytest import hookspec # the extension point definitions from _pytest.core import PluginManager -import pytest -def pytest_cmdline_parse(pluginmanager, args): - config = Config(pluginmanager) - config.parse(args) - return config +# pytest startup -def pytest_unconfigure(config): - while 1: - try: - fin = config._cleanup.pop() - except IndexError: - break - fin() +def main(args=None, plugins=None): + """ return exit code, after performing an in-process test run. + + :arg args: list of command line arguments. + + :arg plugins: list of plugin objects to be auto-registered during + initialization. + """ + config = _prepareconfig(args, plugins) + return config.hook.pytest_cmdline_main(config=config) + +class cmdline: # compatibility namespace + main = staticmethod(main) + +class UsageError(Exception): + """ error in pytest usage or invocation""" + +_preinit = [] + +default_plugins = ( + "mark main terminal runner python pdb unittest capture skipping " + "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript " + "junitxml resultlog doctest").split() + +def _preloadplugins(): + assert not _preinit + _preinit.append(get_plugin_manager()) + +def get_plugin_manager(): + if _preinit: + return _preinit.pop(0) + # subsequent calls to main will create a fresh instance + pluginmanager = PytestPluginManager() + pluginmanager.config = Config(pluginmanager) # XXX attr needed? + for spec in default_plugins: + pluginmanager.import_plugin(spec) + return pluginmanager + +def _prepareconfig(args=None, plugins=None): + if args is None: + args = sys.argv[1:] + elif isinstance(args, py.path.local): + args = [str(args)] + elif not isinstance(args, (tuple, list)): + if not isinstance(args, str): + raise ValueError("not a string or argument list: %r" % (args,)) + args = py.std.shlex.split(args) + pluginmanager = get_plugin_manager() + if plugins: + for plugin in plugins: + pluginmanager.register(plugin) + return pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args) + +class PytestPluginManager(PluginManager): + def __init__(self, hookspecs=[hookspec]): + super(PytestPluginManager, self).__init__(hookspecs=hookspecs) + self.register(self) + if os.environ.get('PYTEST_DEBUG'): + err = sys.stderr + encoding = getattr(err, 'encoding', 'utf8') + try: + err = py.io.dupfile(err, encoding=encoding) + except Exception: + pass + self.trace.root.setwriter(err.write) + + def pytest_configure(self, config): + config.addinivalue_line("markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible.") + config.addinivalue_line("markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible.") + class Parser: - """ Parser for command line arguments. """ + """ Parser for command line arguments and ini-file values. """ def __init__(self, usage=None, processopt=None): self._anonymous = OptionGroup("custom options", parser=self) @@ -35,15 +101,17 @@ if option.dest: self._processopt(option) - def addnote(self, note): - self._notes.append(note) - def getgroup(self, name, description="", after=None): """ get (or create) a named option Group. - :name: unique name of the option group. + :name: name of the option group. :description: long description for --help output. :after: name of other group, used for ordering --help output. + + The returned group object has an ``addoption`` method with the same + signature as :py:func:`parser.addoption + <_pytest.config.Parser.addoption>` but will be shown in the + respective group in the output of ``pytest. --help``. """ for group in self._groups: if group.name == name: @@ -57,33 +125,222 @@ return group def addoption(self, *opts, **attrs): - """ add an optparse-style option. """ + """ register a command line option. + + :opts: option names, can be short or long options. + :attrs: same attributes which the ``add_option()`` function of the + `argparse library + `_ + accepts. + + After command line parsing options are available on the pytest config + object via ``config.option.NAME`` where ``NAME`` is usually set + by passing a ``dest`` attribute, for example + ``addoption("--long", dest="NAME", ...)``. + """ self._anonymous.addoption(*opts, **attrs) def parse(self, args): - self.optparser = optparser = MyOptionParser(self) + from _pytest._argcomplete import try_argcomplete + self.optparser = self._getparser() + try_argcomplete(self.optparser) + return self.optparser.parse_args([str(x) for x in args]) + + def _getparser(self): + from _pytest._argcomplete import filescompleter + optparser = MyOptionParser(self) groups = self._groups + [self._anonymous] for group in groups: if group.options: desc = group.description or group.name - optgroup = py.std.optparse.OptionGroup(optparser, desc) - optgroup.add_options(group.options) - optparser.add_option_group(optgroup) - return self.optparser.parse_args([str(x) for x in args]) + arggroup = optparser.add_argument_group(desc) + for option in group.options: + n = option.names() + a = option.attrs() + arggroup.add_argument(*n, **a) + # bash like autocompletion for dirs (appending '/') + optparser.add_argument(FILE_OR_DIR, nargs='*' + ).completer=filescompleter + return optparser def parse_setoption(self, args, option): - parsedoption, args = self.parse(args) + parsedoption = self.parse(args) for name, value in parsedoption.__dict__.items(): setattr(option, name, value) - return args + return getattr(parsedoption, FILE_OR_DIR) + + def parse_known_args(self, args): + optparser = self._getparser() + args = [str(x) for x in args] + return optparser.parse_known_args(args)[0] def addini(self, name, help, type=None, default=None): - """ add an ini-file option with the given name and description. """ + """ register an ini-file option. + + :name: name of the ini-variable + :type: type of the variable, can be ``pathlist``, ``args`` or ``linelist``. + :default: default value if no ini-file option exists but is queried. + + The value of ini-variables can be retrieved via a call to + :py:func:`config.getini(name) <_pytest.config.Config.getini>`. + """ assert type in (None, "pathlist", "args", "linelist") self._inidict[name] = (help, type, default) self._ininames.append(name) +class ArgumentError(Exception): + """ + Raised if an Argument instance is created with invalid or + inconsistent arguments. + """ + + def __init__(self, msg, option): + self.msg = msg + self.option_id = str(option) + + def __str__(self): + if self.option_id: + return "option %s: %s" % (self.option_id, self.msg) + else: + return self.msg + + +class Argument: + """class that mimics the necessary behaviour of py.std.optparse.Option """ + _typ_map = { + 'int': int, + 'string': str, + } + # enable after some grace period for plugin writers + TYPE_WARN = False + + def __init__(self, *names, **attrs): + """store parms in private vars for use in add_argument""" + self._attrs = attrs + self._short_opts = [] + self._long_opts = [] + self.dest = attrs.get('dest') + if self.TYPE_WARN: + try: + help = attrs['help'] + if '%default' in help: + py.std.warnings.warn( + 'pytest now uses argparse. "%default" should be' + ' changed to "%(default)s" ', + FutureWarning, + stacklevel=3) + except KeyError: + pass + try: + typ = attrs['type'] + except KeyError: + pass + else: + # this might raise a keyerror as well, don't want to catch that + if isinstance(typ, py.builtin._basestring): + if typ == 'choice': + if self.TYPE_WARN: + py.std.warnings.warn( + 'type argument to addoption() is a string %r.' + ' For parsearg this is optional and when supplied ' + ' should be a type.' + ' (options: %s)' % (typ, names), + FutureWarning, + stacklevel=3) + # argparse expects a type here take it from + # the type of the first element + attrs['type'] = type(attrs['choices'][0]) + else: + if self.TYPE_WARN: + py.std.warnings.warn( + 'type argument to addoption() is a string %r.' + ' For parsearg this should be a type.' + ' (options: %s)' % (typ, names), + FutureWarning, + stacklevel=3) + attrs['type'] = Argument._typ_map[typ] + # used in test_parseopt -> test_parse_defaultgetter + self.type = attrs['type'] + else: + self.type = typ + try: + # attribute existence is tested in Config._processopt + self.default = attrs['default'] + except KeyError: + pass + self._set_opt_strings(names) + if not self.dest: + if self._long_opts: + self.dest = self._long_opts[0][2:].replace('-', '_') + else: + try: + self.dest = self._short_opts[0][1:] + except IndexError: + raise ArgumentError( + 'need a long or short option', self) + + def names(self): + return self._short_opts + self._long_opts + + def attrs(self): + # update any attributes set by processopt + attrs = 'default dest help'.split() + if self.dest: + attrs.append(self.dest) + for attr in attrs: + try: + self._attrs[attr] = getattr(self, attr) + except AttributeError: + pass + if self._attrs.get('help'): + a = self._attrs['help'] + a = a.replace('%default', '%(default)s') + #a = a.replace('%prog', '%(prog)s') + self._attrs['help'] = a + return self._attrs + + def _set_opt_strings(self, opts): + """directly from optparse + + might not be necessary as this is passed to argparse later on""" + for opt in opts: + if len(opt) < 2: + raise ArgumentError( + "invalid option string %r: " + "must be at least two characters long" % opt, self) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise ArgumentError( + "invalid short option string %r: " + "must be of the form -x, (x any non-dash char)" % opt, + self) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise ArgumentError( + "invalid long option string %r: " + "must start with --, followed by non-dash" % opt, + self) + self._long_opts.append(opt) + + def __repr__(self): + retval = 'Argument(' + if self._short_opts: + retval += '_short_opts: ' + repr(self._short_opts) + ', ' + if self._long_opts: + retval += '_long_opts: ' + repr(self._long_opts) + ', ' + retval += 'dest: ' + repr(self.dest) + ', ' + if hasattr(self, 'type'): + retval += 'type: ' + repr(self.type) + ', ' + if hasattr(self, 'default'): + retval += 'default: ' + repr(self.default) + ', ' + if retval[-2:] == ', ': # always long enough to test ("Argument(" ) + retval = retval[:-2] + retval += ')' + return retval + + class OptionGroup: def __init__(self, name, description="", parser=None): self.name = name @@ -92,12 +349,18 @@ self.parser = parser def addoption(self, *optnames, **attrs): - """ add an option to this group. """ - option = py.std.optparse.Option(*optnames, **attrs) + """ add an option to this group. + + if a shortened version of a long option is specified it will + be suppressed in the help. addoption('--twowords', '--two-words') + results in help showing '--two-words' only, but --twowords gets + accepted **and** the automatic destination is in args.twowords + """ + option = Argument(*optnames, **attrs) self._addoption_instance(option, shortupper=False) def _addoption(self, *optnames, **attrs): - option = py.std.optparse.Option(*optnames, **attrs) + option = Argument(*optnames, **attrs) self._addoption_instance(option, shortupper=True) From noreply at buildbot.pypy.org Sun Aug 10 20:59:20 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 10 Aug 2014 20:59:20 +0200 (CEST) Subject: [pypy-commit] pypy pytest-25: close branch Message-ID: <20140810185920.DD7101C0136@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: pytest-25 Changeset: r72747:4835eb5bf06a Date: 2014-08-10 19:58 +0100 http://bitbucket.org/pypy/pypy/changeset/4835eb5bf06a/ Log: close branch From noreply at buildbot.pypy.org Sun Aug 10 21:42:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Aug 2014 21:42:36 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Branch to throw away the most obscure hacks in pypy-stm -- er, I mean to Message-ID: <20140810194236.3A48B1C0136@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72748:f853f4db5261 Date: 2014-08-10 17:58 +0200 http://bitbucket.org/pypy/pypy/changeset/f853f4db5261/ Log: Branch to throw away the most obscure hacks in pypy-stm -- er, I mean to use rewind_setjmp From noreply at buildbot.pypy.org Sun Aug 10 21:42:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 10 Aug 2014 21:42:37 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: import stmgc/bdc151305c79 Message-ID: <20140810194237.896161C0136@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72749:ad844f52f065 Date: 2014-08-10 18:01 +0200 http://bitbucket.org/pypy/pypy/changeset/ad844f52f065/ Log: import stmgc/bdc151305c79 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -f18bff5ab704 +bdc151305c79 diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -325,14 +325,14 @@ STM_SEGMENT->transaction_read_version = 1; } -void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf) +static void _stm_start_transaction(stm_thread_local_t *tl, bool inevitable) { assert(!_stm_in_transaction(tl)); s_mutex_lock(); retry: - if (jmpbuf == NULL) { + if (inevitable) { wait_for_end_of_inevitable_transaction(tl); } @@ -347,11 +347,9 @@ STM_PSEGMENT->signalled_to_commit_soon = false; STM_PSEGMENT->safe_point = SP_RUNNING; STM_PSEGMENT->marker_inev[1] = 0; - if (jmpbuf == NULL) + if (inevitable) marker_fetch_inev(); - STM_PSEGMENT->transaction_state = (jmpbuf != NULL ? TS_REGULAR - : TS_INEVITABLE); - STM_SEGMENT->jmpbuf_ptr = jmpbuf; + STM_PSEGMENT->transaction_state = (inevitable ? TS_INEVITABLE : TS_REGULAR); #ifndef NDEBUG STM_PSEGMENT->running_pthread = pthread_self(); #endif @@ -391,6 +389,22 @@ check_nursery_at_transaction_start(); } +long stm_start_transaction(stm_thread_local_t *tl) +{ +#ifdef STM_NO_AUTOMATIC_SETJMP + long repeat_count = 0; /* test/support.py */ +#else + long repeat_count = rewind_jmp_setjmp(&tl->rjthread); +#endif + _stm_start_transaction(tl, false); + return repeat_count; +} + +void stm_start_inevitable_transaction(stm_thread_local_t *tl) +{ + _stm_start_transaction(tl, true); +} + /************************************************************/ @@ -815,7 +829,7 @@ dprintf(("commit_transaction\n")); assert(STM_SEGMENT->nursery_end == NURSERY_END); - STM_SEGMENT->jmpbuf_ptr = NULL; + rewind_jmp_forget(&STM_SEGMENT->running_thread->rjthread); /* if a major collection is required, do it here */ if (is_major_collection_requested()) { @@ -893,7 +907,7 @@ WRITE_BARRIER flag, unless they have been modified recently. Ignore the old flag; after copying from the other segment, we should have the flag. */ - assert(item->stm_flags & GCFLAG_WRITE_BARRIER); + assert(((struct object_s *)dst)->stm_flags & GCFLAG_WRITE_BARRIER); /* write all changes to the object before we release the write lock below. This is needed because we need to @@ -988,6 +1002,18 @@ #pragma pop_macro("STM_PSEGMENT") } +#ifdef STM_NO_AUTOMATIC_SETJMP +void _test_run_abort(stm_thread_local_t *tl) __attribute__((noreturn)); +int stm_is_inevitable(void) +{ + switch (STM_PSEGMENT->transaction_state) { + case TS_REGULAR: return 0; + case TS_INEVITABLE: return 1; + default: abort(); + } +} +#endif + static void abort_with_mutex(void) { assert(_has_mutex()); @@ -997,10 +1023,9 @@ abort_data_structures_from_segment_num(STM_SEGMENT->segment_num); - stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; + stm_thread_local_t *tl = STM_SEGMENT->running_thread; /* clear memory registered on the thread-local */ - stm_thread_local_t *tl = STM_SEGMENT->running_thread; if (tl->mem_clear_on_abort) memset(tl->mem_clear_on_abort, 0, tl->mem_bytes_to_clear_on_abort); @@ -1036,9 +1061,11 @@ */ usleep(1); - assert(jmpbuf_ptr != NULL); - assert(jmpbuf_ptr != (stm_jmpbuf_t *)-1); /* for tests only */ - __builtin_longjmp(*jmpbuf_ptr, 1); +#ifdef STM_NO_AUTOMATIC_SETJMP + _test_run_abort(tl); +#else + rewind_jmp_longjmp(&tl->rjthread); +#endif } void _stm_become_inevitable(const char *msg) @@ -1052,12 +1079,11 @@ marker_fetch_inev(); wait_for_end_of_inevitable_transaction(NULL); STM_PSEGMENT->transaction_state = TS_INEVITABLE; - STM_SEGMENT->jmpbuf_ptr = NULL; + rewind_jmp_forget(&STM_SEGMENT->running_thread->rjthread); clear_callbacks_on_abort(); } else { assert(STM_PSEGMENT->transaction_state == TS_INEVITABLE); - assert(STM_SEGMENT->jmpbuf_ptr == NULL); } s_mutex_unlock(); diff --git a/rpython/translator/stm/src_stm/stm/forksupport.c b/rpython/translator/stm/src_stm/stm/forksupport.c --- a/rpython/translator/stm/src_stm/stm/forksupport.c +++ b/rpython/translator/stm/src_stm/stm/forksupport.c @@ -177,21 +177,25 @@ static void fork_abort_thread(long i) { struct stm_priv_segment_info_s *pr = get_priv_segment(i); + stm_thread_local_t *tl = pr->pub.running_thread; dprintf(("forksupport_child: abort in seg%ld\n", i)); - assert(pr->pub.running_thread->associated_segment_num == i); + assert(tl->associated_segment_num == i); assert(pr->transaction_state == TS_REGULAR); set_gs_register(get_segment_base(i)); - stm_jmpbuf_t jmpbuf; - if (__builtin_setjmp(jmpbuf) == 0) { - pr->pub.jmpbuf_ptr = &jmpbuf; + rewind_jmp_buf rjbuf; + stm_rewind_jmp_enterframe(tl, &rjbuf); + if (rewind_jmp_setjmp(&tl->rjthread) == 0) { #ifndef NDEBUG pr->running_pthread = pthread_self(); #endif pr->pub.running_thread->shadowstack = ( pr->shadowstack_at_start_of_transaction); + strcpy(pr->marker_self, "fork"); stm_abort_transaction(); } + rewind_jmp_forget(&tl->rjthread); + stm_rewind_jmp_leaveframe(tl, &rjbuf); } static void forksupport_child(void) diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -525,17 +525,15 @@ /* this is called by _stm_largemalloc_sweep() */ object_t *obj = (object_t *)(data - stm_object_pages); if (!mark_visited_test_and_clear(obj)) { -#ifndef NDEBUG /* This is actually needed in order to avoid random write-read - conflicts with objects read and freed long in the past. Still, - it is probably rare enough so that we don't need this additional - overhead. (test_random hits it sometimes) */ + conflicts with objects read and freed long in the past. + It is probably rare enough, but still, we want to avoid any + false conflict. (test_random hits it sometimes) */ long i; for (i = 1; i <= NB_SEGMENTS; i++) { ((struct stm_read_marker_s *) (get_segment_base(i) + (((uintptr_t)obj) >> 4)))->rm = 0; } -#endif return false; } return true; diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -462,8 +462,10 @@ TREE_LOOP_FORWARD(*pseg->young_outside_nursery, item) { object_t *obj = (object_t*)item->addr; + assert(!_is_in_nursery(obj)); - /* mark slot as unread */ + /* mark slot as unread (it can only have the read marker + in this segment) */ ((struct stm_read_marker_s *) (pseg->pub.segment_base + (((uintptr_t)obj) >> 4)))->rm = 0; diff --git a/rpython/translator/stm/src_stm/stmgc.c b/rpython/translator/stm/src_stm/stmgc.c --- a/rpython/translator/stm/src_stm/stmgc.c +++ b/rpython/translator/stm/src_stm/stmgc.c @@ -37,3 +37,4 @@ #include "stm/weakref.c" #include "stm/timing.c" #include "stm/marker.c" +#include "stm/rewind_setjmp.c" diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -14,6 +14,8 @@ #include #include +#include "stm/rewind_setjmp.h" + #if LONG_MAX == 2147483647 # error "Requires a 64-bit environment" #endif @@ -26,7 +28,6 @@ typedef TLPREFIX struct stm_read_marker_s stm_read_marker_t; typedef TLPREFIX struct stm_creation_marker_s stm_creation_marker_t; typedef TLPREFIX char stm_char; -typedef void* stm_jmpbuf_t[5]; /* for use with __builtin_setjmp() */ struct stm_read_marker_s { /* In every segment, every object has a corresponding read marker. @@ -45,7 +46,6 @@ stm_char *nursery_current; uintptr_t nursery_end; struct stm_thread_local_s *running_thread; - stm_jmpbuf_t *jmpbuf_ptr; }; #define STM_SEGMENT ((stm_segment_info_t *)4352) @@ -80,6 +80,8 @@ typedef struct stm_thread_local_s { /* every thread should handle the shadow stack itself */ struct stm_shadowentry_s *shadowstack, *shadowstack_base; + /* rewind_setjmp's interface */ + rewind_jmp_thread rjthread; /* a generic optional thread-local object */ object_t *thread_local_obj; /* in case this thread runs a transaction that aborts, @@ -115,7 +117,6 @@ object_t *_stm_allocate_slowpath(ssize_t); object_t *_stm_allocate_external(ssize_t); void _stm_become_inevitable(const char*); -void _stm_start_transaction(stm_thread_local_t *, stm_jmpbuf_t *); void _stm_collectable_safe_point(void); /* for tests, but also used in duhton: */ @@ -327,40 +328,42 @@ void stm_register_thread_local(stm_thread_local_t *tl); void stm_unregister_thread_local(stm_thread_local_t *tl); +/* At some key places, like the entry point of the thread and in the + function with the interpreter's dispatch loop, you need to declare + a local variable of type 'rewind_jmp_buf' and call these macros. */ +#define stm_rewind_jmp_enterframe(tl, rjbuf) \ + rewind_jmp_enterframe(&(tl)->rjthread, rjbuf) +#define stm_rewind_jmp_leaveframe(tl, rjbuf) \ + rewind_jmp_leaveframe(&(tl)->rjthread, rjbuf) + /* Starting and ending transactions. stm_read(), stm_write() and stm_allocate() should only be called from within a transaction. - Use the macro STM_START_TRANSACTION() to start a transaction that - can be restarted using the 'jmpbuf' (a local variable of type - stm_jmpbuf_t). */ -#define STM_START_TRANSACTION(tl, jmpbuf) ({ \ - while (__builtin_setjmp(jmpbuf) == 1) { /*redo setjmp*/ } \ - _stm_start_transaction(tl, &jmpbuf); \ -}) - -/* Start an inevitable transaction, if it's going to return from the - current function immediately. */ -static inline void stm_start_inevitable_transaction(stm_thread_local_t *tl) { - _stm_start_transaction(tl, NULL); -} - -/* Commit a transaction. */ + The stm_start_transaction() call returns the number of times it + returned, starting at 0. If it is > 0, then the transaction was + aborted and restarted this number of times. */ +long stm_start_transaction(stm_thread_local_t *tl); +void stm_start_inevitable_transaction(stm_thread_local_t *tl); void stm_commit_transaction(void); -/* Abort the currently running transaction. */ +/* Abort the currently running transaction. This function never + returns: it jumps back to the stm_start_transaction(). */ void stm_abort_transaction(void) __attribute__((noreturn)); -/* Turn the current transaction inevitable. The 'jmpbuf' passed to - STM_START_TRANSACTION() is not going to be used any more after - this call (but the stm_become_inevitable() itself may still abort). */ +/* Turn the current transaction inevitable. + The stm_become_inevitable() itself may still abort. */ +#ifdef STM_NO_AUTOMATIC_SETJMP +int stm_is_inevitable(void); +#else +static inline int stm_is_inevitable(void) { + return !rewind_jmp_armed(&STM_SEGMENT->running_thread->rjthread); +} +#endif static inline void stm_become_inevitable(stm_thread_local_t *tl, const char* msg) { assert(STM_SEGMENT->running_thread == tl); - if (STM_SEGMENT->jmpbuf_ptr != NULL) + if (!stm_is_inevitable()) _stm_become_inevitable(msg); } -static inline int stm_is_inevitable(void) { - return (STM_SEGMENT->jmpbuf_ptr == NULL); -} /* Forces a safe-point if needed. Normally not needed: this is automatic if you call stm_allocate(). */ From noreply at buildbot.pypy.org Mon Aug 11 15:25:01 2014 From: noreply at buildbot.pypy.org (dcrosta) Date: Mon, 11 Aug 2014 15:25:01 +0200 (CEST) Subject: [pypy-commit] pypy fix-package-py: fix minor bug in package.py Message-ID: <20140811132501.114411C0EE9@cobra.cs.uni-duesseldorf.de> Author: Dan Crosta Branch: fix-package-py Changeset: r72750:fdc893a448eb Date: 2014-08-01 17:32 -0400 http://bitbucket.org/pypy/pypy/changeset/fdc893a448eb/ Log: fix minor bug in package.py diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -74,7 +74,7 @@ dirs = glob.glob(options.license_base + "/" +pat) if not dirs: raise ValueError, "Could not find "+ options.license_base + "/" + pat - if len(dirs) > 2: + if len(dirs) > 1: raise ValueError, "Multiple copies of "+pat dir = dirs[0] with open(os.path.join(dir, fname)) as fid: From noreply at buildbot.pypy.org Mon Aug 11 15:25:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 11 Aug 2014 15:25:02 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in dcrosta/pypy/fix-package-py (pull request #260) Message-ID: <20140811132502.6435D1C0EE9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72751:78232ee06e08 Date: 2014-08-11 15:24 +0200 http://bitbucket.org/pypy/pypy/changeset/78232ee06e08/ Log: Merged in dcrosta/pypy/fix-package-py (pull request #260) fix minor bug in package.py diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -74,7 +74,7 @@ dirs = glob.glob(options.license_base + "/" +pat) if not dirs: raise ValueError, "Could not find "+ options.license_base + "/" + pat - if len(dirs) > 2: + if len(dirs) > 1: raise ValueError, "Multiple copies of "+pat dir = dirs[0] with open(os.path.join(dir, fname)) as fid: From noreply at buildbot.pypy.org Mon Aug 11 15:41:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 11 Aug 2014 15:41:35 +0200 (CEST) Subject: [pypy-commit] pypy default: Clean up. Message-ID: <20140811134135.2808C1C3272@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72752:9eb10c16e25a Date: 2014-08-11 15:41 +0200 http://bitbucket.org/pypy/pypy/changeset/9eb10c16e25a/ Log: Clean up. diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -52,13 +52,18 @@ sep_template = "\nThis copy of PyPy includes a copy of %s, which is licensed under the following terms:\n\n" -def generate_license_linux(basedir, options): +def generate_license(basedir, options): base_file = str(basedir.join('LICENSE')) with open(base_file) as fid: txt = fid.read() - searches = [("bzip2","libbz2-*", "copyright", '---------'), - ("openssl", "openssl*", "copyright", 'LICENSE ISSUES'), - ] + if sys.platform == 'win32': + # shutil.copyfileobj(open("crtlicense.txt"), out) # We do not ship + # msvc runtime files, but otherwise we'd need this on Windows + searches = [("bzip2","bzip2-*", "LICENSE", ''), + ("openssl", "openssl-*", "LICENSE", '')] + else: + searches = [("bzip2","libbz2-*dev", "copyright", '---------'), + ("openssl", "openssl*", "copyright", 'LICENSE ISSUES')] if not options.no_tk: name = 'Tcl/Tk' txt += "License for '%s'" %name @@ -73,9 +78,9 @@ txt += sep_template % name dirs = glob.glob(options.license_base + "/" +pat) if not dirs: - raise ValueError, "Could not find "+ options.license_base + "/" + pat + raise ValueError, "Could not find %s/%s" % (options.license_base, pat) if len(dirs) > 1: - raise ValueError, "Multiple copies of "+pat + raise ValueError, "Multiple copies of %r: %r" % (pat, dirs) dir = dirs[0] with open(os.path.join(dir, fname)) as fid: # Read up to the line dividing the packaging header from the actual copyright @@ -92,43 +97,6 @@ txt += gdbm_bit return txt -def generate_license_windows(basedir, options): - base_file = str(basedir.join('LICENSE')) - with open(base_file) as fid: - txt = fid.read() - # shutil.copyfileobj(open("crtlicense.txt"), out) # We do not ship msvc runtime files - if not options.no_tk: - name = 'Tcl/Tk' - txt += "License for '%s'" %name - txt += '\n' + "="*(14 + len(name)) + '\n' - txt += sep_template % name - base_file = str(basedir.join('lib_pypy/_tkinter/license.terms')) - with open(base_file, 'r') as fid: - txt += fid.read() - for name, pat, file in (("bzip2","bzip2-*", "LICENSE"), - ("openssl", "openssl-*", "LICENSE")): - txt += sep_template % name - dirs = glob.glob(options.license_base + "/" +pat) - if not dirs: - raise ValueError, "Could not find "+ options.license_base + "/" + pat - if len(dirs) > 2: - raise ValueError, "Multiple copies of "+pat - dir = dirs[0] - with open(os.path.join(dir, file)) as fid: - txt += fid.read() - return txt - -def generate_license_darwin(basedir, options): - # where are copyright files on macos? - return generate_license_linux(basedir, options) - -if sys.platform == 'win32': - generate_license = generate_license_windows -elif sys.platform == 'darwin': - generate_license = generate_license_darwin -else: - generate_license = generate_license_linux - def create_cffi_import_libraries(pypy_c, options): modules = ['_sqlite3'] subprocess.check_call([str(pypy_c), '-c', 'import _sqlite3']) @@ -396,7 +364,9 @@ ---- The gdbm module includes code from gdbm.h, which is distributed under the terms -of the GPL license version 2 or any later version. +of the GPL license version 2 or any later version. Thus the gdbm module, provided in +the file lib_pypy/gdbm.py, is redistributed under the terms of the GPL license as +well. ''' From noreply at buildbot.pypy.org Mon Aug 11 16:22:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 11 Aug 2014 16:22:34 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix the outdated docstring Message-ID: <20140811142234.16EC21C0157@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72753:20ca991cc414 Date: 2014-08-11 16:22 +0200 http://bitbucket.org/pypy/pypy/changeset/20ca991cc414/ Log: Fix the outdated docstring diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -3,9 +3,8 @@ It uses 'pypy/goal/pypy-c' and parts of the rest of the working copy. Usage: - package.py [--options] + package.py [--options] pypy-VER-PLATFORM -Usually you would do: package.py --version-name pypy-VER-PLATFORM The output is found in the directory from --builddir, by default /tmp/usession-YOURNAME/build/. """ From noreply at buildbot.pypy.org Mon Aug 11 20:06:43 2014 From: noreply at buildbot.pypy.org (waedt) Date: Mon, 11 Aug 2014 20:06:43 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix text_index when run on a translated PyPy Message-ID: <20140811180643.B4FA31C0EE9@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72754:42f8daeaba72 Date: 2014-08-11 03:06 -0500 http://bitbucket.org/pypy/pypy/changeset/42f8daeaba72/ Log: Fix text_index when run on a translated PyPy diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -174,9 +174,14 @@ return pos def __getitem__(self, char_pos): - if not isinstance(char_pos, int): - raise TypeError("string index must be an integer, not %r" % - type(char_pos)) + if not we_are_translated(): + if hasattr(char_pos, '__index__'): + char_pos = char_pos.__index__() + + if not isinstance(char_pos, (int, long)): + raise TypeError("string index must be an integer, not %r" % + type(char_pos)) + # This if statement is needed for [-1:0] to slice correctly if char_pos >= self._len: raise IndexError() From noreply at buildbot.pypy.org Mon Aug 11 20:06:45 2014 From: noreply at buildbot.pypy.org (waedt) Date: Mon, 11 Aug 2014 20:06:45 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Handle newlines as ascii strings in W_TextIOWrapper Message-ID: <20140811180645.23F301C0EE9@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72755:8f5d79d24198 Date: 2014-08-11 12:58 -0500 http://bitbucket.org/pypy/pypy/changeset/8f5d79d24198/ Log: Handle newlines as ascii strings in W_TextIOWrapper diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -372,6 +372,7 @@ newline = None else: newline = space.unicode_w(w_newline) + if (newline is not None and len(newline) > 0 and not (utf8.EQ(newline, Utf8Str('\n')) or utf8.EQ(newline, Utf8Str('\r\n')) or @@ -379,20 +380,23 @@ r = space.str_w(space.repr(w_newline)) raise OperationError(space.w_ValueError, space.wrap( "illegal newline value: %s" % (r,))) + elif newline is not None: + # newline is guaranteed to be either empty or ascii + newline = newline.bytes self.line_buffering = line_buffering - self.readuniversal = newline is None or len(newline) == 0 + self.readuniversal = not newline self.readtranslate = newline is None self.readnl = newline - self.writetranslate = newline is None or len(newline) == 0 + self.writetranslate = (newline is not None and newline != '') if not self.readuniversal: self.writenl = self.readnl - if utf8.EQ(self.writenl, Utf8Str('\n')): + if self.writenl == '\n': self.writenl = None elif _WINDOWS: - self.writenl = Utf8Str("\r\n") + self.writenl = "\r\n" else: self.writenl = None diff --git a/pypy/module/_io/test/test_textio.py b/pypy/module/_io/test/test_textio.py --- a/pypy/module/_io/test/test_textio.py +++ b/pypy/module/_io/test/test_textio.py @@ -192,6 +192,26 @@ assert got_line == exp_line assert len(got_lines) == len(exp_lines) + def test_newlines_output(self): + import _io + import os + testdict = { + "": b"AAA\nBBB\nCCC\nX\rY\r\nZ", + "\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ", + "\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ", + "\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ", + } + tests = [(None, testdict[os.linesep])] + sorted(testdict.items()) + for newline, expected in tests: + buf = _io.BytesIO() + txt = _io.TextIOWrapper(buf, encoding="ascii", newline=newline) + txt.write(u"AAA\nB") + txt.write(u"BB\nCCC\n") + txt.write(u"X\rY\r\nZ") + txt.flush() + assert buf.closed == False + assert buf.getvalue() == expected + def test_readline(self): import _io From noreply at buildbot.pypy.org Mon Aug 11 21:57:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 11 Aug 2014 21:57:19 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20140811195719.CD7601C3272@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r522:8ca06ef29c45 Date: 2014-08-11 21:57 +0200 http://bitbucket.org/pypy/pypy.org/changeset/8ca06ef29c45/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $52126 of $105000 (49.6%) + $52294 of $105000 (49.8%)
      diff --git a/don3.html b/don3.html --- a/don3.html +++ b/don3.html @@ -9,13 +9,13 @@ - $48374 of $60000 (80.6%) + $48398 of $60000 (80.7%)
      diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -17,7 +17,7 @@ 2nd call: - $13463 of $80000 (16.8%) + $13478 of $80000 (16.8%)
      From noreply at buildbot.pypy.org Mon Aug 11 22:43:32 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 11 Aug 2014 22:43:32 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: document numpy installation Message-ID: <20140811204332.E7B331C0547@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: extradoc Changeset: r523:b87c15bb2467 Date: 2014-08-11 23:43 +0300 http://bitbucket.org/pypy/pypy.org/changeset/b87c15bb2467/ Log: document numpy installation diff --git a/compat.html b/compat.html --- a/compat.html +++ b/compat.html @@ -67,6 +67,7 @@
    • cPickle, _csv, ctypes, datetime, dbm, _functools, grp, pwd, readline, resource, sqlite3, syslog, tputil

    All modules that are pure python in CPython of course work.

    +

    Numpy support is not complete. We maintain our own fork of numpy for now, further instructions can be found at https://bitbucker.org/pypy/numpy.git.

    Python libraries known to work under PyPy (the list is not exhaustive). A community maintained compatibility wiki is hosted on bitbucket:

      diff --git a/source/compat.txt b/source/compat.txt --- a/source/compat.txt +++ b/source/compat.txt @@ -31,6 +31,10 @@ All modules that are pure python in CPython of course work. +Numpy support is not complete. We maintain our own fork of numpy for now, further instructions can be found at `https://bitbucker.org/pypy/numpy.git`__. + +.. __: https://bitbucket.org/pypy/numpy.git + Python libraries known to work under PyPy (the list is not exhaustive). A community maintained `compatibility wiki`_ is hosted on bitbucket: From noreply at buildbot.pypy.org Tue Aug 12 02:14:10 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 12 Aug 2014 02:14:10 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140812001410.E6C591C12CC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72756:87910b468690 Date: 2014-08-11 17:12 -0700 http://bitbucket.org/pypy/pypy/changeset/87910b468690/ Log: merge default diff too long, truncating to 2000 out of 10648 lines diff --git a/_pytest/__init__.py b/_pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.4.dev2' +__version__ = '2.5.2' diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py new file mode 100644 --- /dev/null +++ b/_pytest/_argcomplete.py @@ -0,0 +1,104 @@ + +"""allow bash-completion for argparse with argcomplete if installed +needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code. + +argcomplete does not support python 2.5 (although the changes for that +are minor). + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*' + ).completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh ) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK + +INSTALL/DEBUGGING +================= +To include this support in another application that has setup.py generated +scripts: +- add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point +- include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + , call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument() +If things do not work right away: +- switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 +- run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not +- sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" + +import sys +import os +from glob import glob + +class FastFilesCompleter: + 'Fast file completer class' + def __init__(self, directories=True): + self.directories = directories + + def __call__(self, prefix, **kwargs): + """only called on non option completions""" + if os.path.sep in prefix[1:]: # + prefix_dir = len(os.path.dirname(prefix) + os.path.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if '*' not in prefix and '?' not in prefix: + if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash + globbed.extend(glob(prefix + '.*')) + prefix += '*' + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += '/' + # append stripping the prefix (like bash, not like compgen) + completion.append(x[prefix_dir:]) + return completion + +if os.environ.get('_ARGCOMPLETE'): + # argcomplete 0.5.6 is not compatible with python 2.5.6: print/with/format + if sys.version_info[:2] < (2, 6): + sys.exit(1) + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter = FastFilesCompleter() + + def try_argcomplete(parser): + argcomplete.autocomplete(parser) +else: + def try_argcomplete(parser): pass + filescompleter = None diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py --- a/_pytest/assertion/__init__.py +++ b/_pytest/assertion/__init__.py @@ -3,7 +3,6 @@ """ import py import sys -import pytest from _pytest.monkeypatch import monkeypatch from _pytest.assertion import util @@ -19,8 +18,8 @@ to provide assert expression information. """) group.addoption('--no-assert', action="store_true", default=False, dest="noassert", help="DEPRECATED equivalent to --assert=plain") - group.addoption('--nomagic', action="store_true", default=False, - dest="nomagic", help="DEPRECATED equivalent to --assert=plain") + group.addoption('--nomagic', '--no-magic', action="store_true", + default=False, help="DEPRECATED equivalent to --assert=plain") class AssertionState: """State for the assertion plugin.""" @@ -35,22 +34,25 @@ mode = "plain" if mode == "rewrite": try: - import ast + import ast # noqa except ImportError: mode = "reinterp" else: - if sys.platform.startswith('java'): + # Both Jython and CPython 2.6.0 have AST bugs that make the + # assertion rewriting hook malfunction. + if (sys.platform.startswith('java') or + sys.version_info[:3] == (2, 6, 0)): mode = "reinterp" if mode != "plain": _load_modules(mode) m = monkeypatch() config._cleanup.append(m.undo) m.setattr(py.builtin.builtins, 'AssertionError', - reinterpret.AssertionError) + reinterpret.AssertionError) # noqa hook = None if mode == "rewrite": - hook = rewrite.AssertionRewritingHook() - sys.meta_path.append(hook) + hook = rewrite.AssertionRewritingHook() # noqa + sys.meta_path.insert(0, hook) warn_about_missing_assertion(mode) config._assertstate = AssertionState(config, mode) config._assertstate.hook = hook @@ -73,9 +75,16 @@ def callbinrepr(op, left, right): hook_result = item.ihook.pytest_assertrepr_compare( config=item.config, op=op, left=left, right=right) + for new_expl in hook_result: if new_expl: - res = '\n~'.join(new_expl) + # Don't include pageloads of data unless we are very + # verbose (-vv) + if (sum(len(p) for p in new_expl[1:]) > 80*8 + and item.config.option.verbose < 2): + new_expl[1:] = [py.builtin._totext( + 'Detailed information truncated, use "-vv" to show')] + res = py.builtin._totext('\n~').join(new_expl) if item.config.getvalue("assertmode") == "rewrite": # The result will be fed back a python % formatting # operation, which will fail if there are extraneous @@ -95,9 +104,9 @@ def _load_modules(mode): """Lazily import assertion related code.""" global rewrite, reinterpret - from _pytest.assertion import reinterpret + from _pytest.assertion import reinterpret # noqa if mode == "rewrite": - from _pytest.assertion import rewrite + from _pytest.assertion import rewrite # noqa def warn_about_missing_assertion(mode): try: diff --git a/_pytest/assertion/newinterpret.py b/_pytest/assertion/newinterpret.py --- a/_pytest/assertion/newinterpret.py +++ b/_pytest/assertion/newinterpret.py @@ -11,7 +11,7 @@ from _pytest.assertion.reinterpret import BuiltinAssertionError -if sys.platform.startswith("java") and sys.version_info < (2, 5, 2): +if sys.platform.startswith("java"): # See http://bugs.jython.org/issue1497 _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict", "ListComp", "GeneratorExp", "Yield", "Compare", "Call", diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py --- a/_pytest/assertion/oldinterpret.py +++ b/_pytest/assertion/oldinterpret.py @@ -526,10 +526,13 @@ # example: def f(): return 5 + def g(): return 3 + def h(x): return 'never' + check("f() * g() == 5") check("not f()") check("not (f() and g() or 0)") diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py --- a/_pytest/assertion/reinterpret.py +++ b/_pytest/assertion/reinterpret.py @@ -1,18 +1,26 @@ import sys import py from _pytest.assertion.util import BuiltinAssertionError +u = py.builtin._totext + class AssertionError(BuiltinAssertionError): def __init__(self, *args): BuiltinAssertionError.__init__(self, *args) if args: + # on Python2.6 we get len(args)==2 for: assert 0, (x,y) + # on Python2.7 and above we always get len(args) == 1 + # with args[0] being the (x,y) tuple. + if len(args) > 1: + toprint = args + else: + toprint = args[0] try: - self.msg = str(args[0]) - except py.builtin._sysex: - raise - except: - self.msg = "<[broken __repr__] %s at %0xd>" %( - args[0].__class__, id(args[0])) + self.msg = u(toprint) + except Exception: + self.msg = u( + "<[broken __repr__] %s at %0xd>" + % (toprint.__class__, id(toprint))) else: f = py.code.Frame(sys._getframe(1)) try: @@ -44,4 +52,3 @@ from _pytest.assertion.newinterpret import interpret as reinterpret else: reinterpret = reinterpret_old - diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py --- a/_pytest/assertion/rewrite.py +++ b/_pytest/assertion/rewrite.py @@ -6,6 +6,7 @@ import imp import marshal import os +import re import struct import sys import types @@ -14,13 +15,7 @@ from _pytest.assertion import util -# Windows gives ENOENT in places *nix gives ENOTDIR. -if sys.platform.startswith("win"): - PATH_COMPONENT_NOT_DIR = errno.ENOENT -else: - PATH_COMPONENT_NOT_DIR = errno.ENOTDIR - -# py.test caches rewritten pycs in __pycache__. +# pytest caches rewritten pycs in __pycache__. if hasattr(imp, "get_tag"): PYTEST_TAG = imp.get_tag() + "-PYTEST" else: @@ -34,17 +29,19 @@ PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1]) del ver, impl -PYC_EXT = ".py" + "c" if __debug__ else "o" +PYC_EXT = ".py" + (__debug__ and "c" or "o") PYC_TAIL = "." + PYTEST_TAG + PYC_EXT REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2) +ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 class AssertionRewritingHook(object): - """Import hook which rewrites asserts.""" + """PEP302 Import hook which rewrites asserts.""" def __init__(self): self.session = None self.modules = {} + self._register_with_pkg_resources() def set_session(self, session): self.fnpats = session.config.getini("python_files") @@ -59,8 +56,12 @@ names = name.rsplit(".", 1) lastname = names[-1] pth = None - if path is not None and len(path) == 1: - pth = path[0] + if path is not None: + # Starting with Python 3.3, path is a _NamespacePath(), which + # causes problems if not converted to list. + path = list(path) + if len(path) == 1: + pth = path[0] if pth is None: try: fd, fn, desc = imp.find_module(lastname, path) @@ -95,12 +96,13 @@ finally: self.session = sess else: - state.trace("matched test file (was specified on cmdline): %r" % (fn,)) + state.trace("matched test file (was specified on cmdline): %r" % + (fn,)) # The requested module looks like a test file, so rewrite it. This is # the most magical part of the process: load the source, rewrite the # asserts, and load the rewritten source. We also cache the rewritten # module code in a special pyc. We must be aware of the possibility of - # concurrent py.test processes rewriting and loading pycs. To avoid + # concurrent pytest processes rewriting and loading pycs. To avoid # tricky race conditions, we maintain the following invariant: The # cached pyc is always a complete, valid pyc. Operations on it must be # atomic. POSIX's atomic rename comes in handy. @@ -116,19 +118,19 @@ # common case) or it's blocked by a non-dir node. In the # latter case, we'll ignore it in _write_pyc. pass - elif e == PATH_COMPONENT_NOT_DIR: + elif e in [errno.ENOENT, errno.ENOTDIR]: # One of the path components was not a directory, likely # because we're in a zip file. write = False elif e == errno.EACCES: - state.trace("read only directory: %r" % (fn_pypath.dirname,)) + state.trace("read only directory: %r" % fn_pypath.dirname) write = False else: raise cache_name = fn_pypath.basename[:-3] + PYC_TAIL pyc = os.path.join(cache_dir, cache_name) - # Notice that even if we're in a read-only directory, I'm going to check - # for a cached pyc. This may not be optimal... + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... co = _read_pyc(fn_pypath, pyc) if co is None: state.trace("rewriting %r" % (fn,)) @@ -153,27 +155,59 @@ mod.__file__ = co.co_filename # Normally, this attribute is 3.2+. mod.__cached__ = pyc + mod.__loader__ = self py.builtin.exec_(co, mod.__dict__) except: del sys.modules[name] raise return sys.modules[name] -def _write_pyc(co, source_path, pyc): - # Technically, we don't have to have the same pyc format as (C)Python, since - # these "pycs" should never be seen by builtin import. However, there's - # little reason deviate, and I hope sometime to be able to use - # imp.load_compiled to load them. (See the comment in load_module above.) + + + def is_package(self, name): + try: + fd, fn, desc = imp.find_module(name) + except ImportError: + return False + if fd is not None: + fd.close() + tp = desc[2] + return tp == imp.PKG_DIRECTORY + + @classmethod + def _register_with_pkg_resources(cls): + """ + Ensure package resources can be loaded from this loader. May be called + multiple times, as the operation is idempotent. + """ + try: + import pkg_resources + # access an attribute in case a deferred importer is present + pkg_resources.__name__ + except ImportError: + return + + # Since pytest tests are always located in the file system, the + # DefaultProvider is appropriate. + pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider) + + +def _write_pyc(state, co, source_path, pyc): + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason deviate, and I hope + # sometime to be able to use imp.load_compiled to load them. (See + # the comment in load_module above.) mtime = int(source_path.mtime()) try: fp = open(pyc, "wb") except IOError: err = sys.exc_info()[1].errno - if err == PATH_COMPONENT_NOT_DIR: - # This happens when we get a EEXIST in find_module creating the - # __pycache__ directory and __pycache__ is by some non-dir node. - return False - raise + state.trace("error writing pyc file at %s: errno=%s" %(pyc, err)) + # we ignore any failure to write the cache file + # there are many reasons, permission-denied, __pycache__ being a + # file etc. + return False try: fp.write(imp.get_magic()) fp.write(struct.pack(">", - ast.Add : "+", - ast.Sub : "-", - ast.Mult : "*", - ast.Div : "/", - ast.FloorDiv : "//", - ast.Mod : "%", - ast.Eq : "==", - ast.NotEq : "!=", - ast.Lt : "<", - ast.LtE : "<=", - ast.Gt : ">", - ast.GtE : ">=", - ast.Pow : "**", - ast.Is : "is", - ast.IsNot : "is not", - ast.In : "in", - ast.NotIn : "not in" + ast.BitOr: "|", + ast.BitXor: "^", + ast.BitAnd: "&", + ast.LShift: "<<", + ast.RShift: ">>", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in" } @@ -341,7 +408,7 @@ lineno = 0 for item in mod.body: if (expect_docstring and isinstance(item, ast.Expr) and - isinstance(item.value, ast.Str)): + isinstance(item.value, ast.Str)): doc = item.value.s if "PYTEST_DONT_REWRITE" in doc: # The module has disabled assertion rewriting. @@ -462,7 +529,8 @@ body.append(raise_) # Clear temporary variables by setting them to None. if self.variables: - variables = [ast.Name(name, ast.Store()) for name in self.variables] + variables = [ast.Name(name, ast.Store()) + for name in self.variables] clear = ast.Assign(variables, ast.Name("None", ast.Load())) self.statements.append(clear) # Fix line numbers. @@ -471,11 +539,12 @@ return self.statements def visit_Name(self, name): - # Check if the name is local or not. + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. locs = ast.Call(self.builtin("locals"), [], [], None, None) - globs = ast.Call(self.builtin("globals"), [], [], None, None) - ops = [ast.In(), ast.IsNot()] - test = ast.Compare(ast.Str(name.id), ops, [locs, globs]) + inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) + dorepr = self.helper("should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) return name, self.explanation_param(expr) @@ -492,7 +561,8 @@ for i, v in enumerate(boolop.values): if i: fail_inner = [] - self.on_failure.append(ast.If(cond, fail_inner, [])) + # cond is set in a prior loop iteration below + self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa self.on_failure = fail_inner self.push_format_context() res, expl = self.visit(v) @@ -548,7 +618,8 @@ new_kwarg, expl = self.visit(call.kwargs) arg_expls.append("**" + expl) expl = "%s(%s)" % (func_expl, ', '.join(arg_expls)) - new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg) + new_call = ast.Call(new_func, new_args, new_kwargs, + new_star, new_kwarg) res = self.assign(new_call) res_expl = self.explanation_param(self.display(res)) outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) @@ -584,7 +655,7 @@ res_expr = ast.Compare(left_res, [op], [next_res]) self.statements.append(ast.Assign([store_names[i]], res_expr)) left_res, left_expl = next_res, next_expl - # Use py.code._reprcompare if that's available. + # Use pytest.assertion.util._reprcompare if that's available. expl_call = self.helper("call_reprcompare", ast.Tuple(syms, ast.Load()), ast.Tuple(load_names, ast.Load()), diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py --- a/_pytest/assertion/util.py +++ b/_pytest/assertion/util.py @@ -1,8 +1,13 @@ """Utilities for assertion debugging""" import py +try: + from collections import Sequence +except ImportError: + Sequence = list BuiltinAssertionError = py.builtin.builtins.AssertionError +u = py.builtin._totext # The _reprcompare attribute on the util module is used by the new assertion # interpretation code and assertion rewriter to detect this plugin was @@ -10,6 +15,7 @@ # DebugInterpreter. _reprcompare = None + def format_explanation(explanation): """This formats an explanation @@ -20,7 +26,18 @@ for when one explanation needs to span multiple lines, e.g. when displaying diffs. """ - # simplify 'assert False where False = ...' + explanation = _collapse_false(explanation) + lines = _split_explanation(explanation) + result = _format_lines(lines) + return u('\n').join(result) + + +def _collapse_false(explanation): + """Collapse expansions of False + + So this strips out any "assert False\n{where False = ...\n}" + blocks. + """ where = 0 while True: start = where = explanation.find("False\n{False = ", where) @@ -42,28 +59,48 @@ explanation = (explanation[:start] + explanation[start+15:end-1] + explanation[end+1:]) where -= 17 - raw_lines = (explanation or '').split('\n') - # escape newlines not followed by {, } and ~ + return explanation + + +def _split_explanation(explanation): + """Return a list of individual lines in the explanation + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or u('')).split('\n') lines = [raw_lines[0]] for l in raw_lines[1:]: if l.startswith('{') or l.startswith('}') or l.startswith('~'): lines.append(l) else: lines[-1] += '\\n' + l + return lines + +def _format_lines(lines): + """Format the individual lines + + This will replace the '{', '}' and '~' characters of our mini + formatting language with the proper 'where ...', 'and ...' and ' + + ...' text, taking care of indentation along the way. + + Return a list of formatted lines. + """ result = lines[:1] stack = [0] stackcnt = [0] for line in lines[1:]: if line.startswith('{'): if stackcnt[-1]: - s = 'and ' + s = u('and ') else: - s = 'where ' + s = u('where ') stack.append(len(result)) stackcnt[-1] += 1 stackcnt.append(0) - result.append(' +' + ' '*(len(stack)-1) + s + line[1:]) + result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:]) elif line.startswith('}'): assert line.startswith('}') stack.pop() @@ -71,9 +108,9 @@ result[stack[-1]] += line[1:] else: assert line.startswith('~') - result.append(' '*len(stack) + line[1:]) + result.append(u(' ')*len(stack) + line[1:]) assert len(stack) == 1 - return '\n'.join(result) + return result # Provide basestring in python3 @@ -83,132 +120,163 @@ basestring = str -def assertrepr_compare(op, left, right): - """return specialised explanations for some operators/operands""" - width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op +def assertrepr_compare(config, op, left, right): + """Return specialised explanations for some operators/operands""" + width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op left_repr = py.io.saferepr(left, maxsize=int(width/2)) right_repr = py.io.saferepr(right, maxsize=width-len(left_repr)) - summary = '%s %s %s' % (left_repr, op, right_repr) + summary = u('%s %s %s') % (left_repr, op, right_repr) - issequence = lambda x: isinstance(x, (list, tuple)) + issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) + and not isinstance(x, basestring)) istext = lambda x: isinstance(x, basestring) isdict = lambda x: isinstance(x, dict) - isset = lambda x: isinstance(x, set) + isset = lambda x: isinstance(x, (set, frozenset)) + verbose = config.getoption('verbose') explanation = None try: if op == '==': if istext(left) and istext(right): - explanation = _diff_text(left, right) + explanation = _diff_text(left, right, verbose) elif issequence(left) and issequence(right): - explanation = _compare_eq_sequence(left, right) + explanation = _compare_eq_sequence(left, right, verbose) elif isset(left) and isset(right): - explanation = _compare_eq_set(left, right) + explanation = _compare_eq_set(left, right, verbose) elif isdict(left) and isdict(right): - explanation = _diff_text(py.std.pprint.pformat(left), - py.std.pprint.pformat(right)) + explanation = _compare_eq_dict(left, right, verbose) elif op == 'not in': if istext(left) and istext(right): - explanation = _notin_text(left, right) - except py.builtin._sysex: - raise - except: + explanation = _notin_text(left, right, verbose) + except Exception: excinfo = py.code.ExceptionInfo() - explanation = ['(pytest_assertion plugin: representation of ' - 'details failed. Probably an object has a faulty __repr__.)', - str(excinfo) - ] - + explanation = [ + u('(pytest_assertion plugin: representation of details failed. ' + 'Probably an object has a faulty __repr__.)'), + u(excinfo)] if not explanation: return None - # Don't include pageloads of data, should be configurable - if len(''.join(explanation)) > 80*8: - explanation = ['Detailed information too verbose, truncated'] - return [summary] + explanation -def _diff_text(left, right): - """Return the explanation for the diff between text +def _diff_text(left, right, verbose=False): + """Return the explanation for the diff between text or bytes - This will skip leading and trailing characters which are - identical to keep the diff minimal. + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + + If the input are bytes they will be safely converted to text. """ explanation = [] - i = 0 # just in case left or right has zero length - for i in range(min(len(left), len(right))): - if left[i] != right[i]: - break - if i > 42: - i -= 10 # Provide some context - explanation = ['Skipping %s identical ' - 'leading characters in diff' % i] - left = left[i:] - right = right[i:] - if len(left) == len(right): - for i in range(len(left)): - if left[-i] != right[-i]: + if isinstance(left, py.builtin.bytes): + left = u(repr(left)[1:-1]).replace(r'\n', '\n') + if isinstance(right, py.builtin.bytes): + right = u(repr(right)[1:-1]).replace(r'\n', '\n') + if not verbose: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: break if i > 42: - i -= 10 # Provide some context - explanation += ['Skipping %s identical ' - 'trailing characters in diff' % i] - left = left[:-i] - right = right[:-i] + i -= 10 # Provide some context + explanation = [u('Skipping %s identical leading ' + 'characters in diff, use -v to show') % i] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [u('Skipping %s identical trailing ' + 'characters in diff, use -v to show') % i] + left = left[:-i] + right = right[:-i] explanation += [line.strip('\n') for line in py.std.difflib.ndiff(left.splitlines(), right.splitlines())] return explanation -def _compare_eq_sequence(left, right): +def _compare_eq_sequence(left, right, verbose=False): explanation = [] for i in range(min(len(left), len(right))): if left[i] != right[i]: - explanation += ['At index %s diff: %r != %r' % - (i, left[i], right[i])] + explanation += [u('At index %s diff: %r != %r') + % (i, left[i], right[i])] break if len(left) > len(right): - explanation += ['Left contains more items, ' - 'first extra item: %s' % py.io.saferepr(left[len(right)],)] + explanation += [u('Left contains more items, first extra item: %s') + % py.io.saferepr(left[len(right)],)] elif len(left) < len(right): - explanation += ['Right contains more items, ' - 'first extra item: %s' % py.io.saferepr(right[len(left)],)] - return explanation # + _diff_text(py.std.pprint.pformat(left), - # py.std.pprint.pformat(right)) + explanation += [ + u('Right contains more items, first extra item: %s') % + py.io.saferepr(right[len(left)],)] + return explanation # + _diff_text(py.std.pprint.pformat(left), + # py.std.pprint.pformat(right)) -def _compare_eq_set(left, right): +def _compare_eq_set(left, right, verbose=False): explanation = [] diff_left = left - right diff_right = right - left if diff_left: - explanation.append('Extra items in the left set:') + explanation.append(u('Extra items in the left set:')) for item in diff_left: explanation.append(py.io.saferepr(item)) if diff_right: - explanation.append('Extra items in the right set:') + explanation.append(u('Extra items in the right set:')) for item in diff_right: explanation.append(py.io.saferepr(item)) return explanation -def _notin_text(term, text): +def _compare_eq_dict(left, right, verbose=False): + explanation = [] + common = set(left).intersection(set(right)) + same = dict((k, left[k]) for k in common if left[k] == right[k]) + if same and not verbose: + explanation += [u('Omitting %s identical items, use -v to show') % + len(same)] + elif same: + explanation += [u('Common items:')] + explanation += py.std.pprint.pformat(same).splitlines() + diff = set(k for k in common if left[k] != right[k]) + if diff: + explanation += [u('Differing items:')] + for k in diff: + explanation += [py.io.saferepr({k: left[k]}) + ' != ' + + py.io.saferepr({k: right[k]})] + extra_left = set(left) - set(right) + if extra_left: + explanation.append(u('Left contains more items:')) + explanation.extend(py.std.pprint.pformat( + dict((k, left[k]) for k in extra_left)).splitlines()) + extra_right = set(right) - set(left) + if extra_right: + explanation.append(u('Right contains more items:')) + explanation.extend(py.std.pprint.pformat( + dict((k, right[k]) for k in extra_right)).splitlines()) + return explanation + + +def _notin_text(term, text, verbose=False): index = text.find(term) head = text[:index] tail = text[index+len(term):] correct_text = head + tail - diff = _diff_text(correct_text, text) - newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)] + diff = _diff_text(correct_text, text, verbose) + newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)] for line in diff: - if line.startswith('Skipping'): + if line.startswith(u('Skipping')): continue - if line.startswith('- '): + if line.startswith(u('- ')): continue - if line.startswith('+ '): - newdiff.append(' ' + line[2:]) + if line.startswith(u('+ ')): + newdiff.append(u(' ') + line[2:]) else: newdiff.append(line) return newdiff diff --git a/_pytest/capture.py b/_pytest/capture.py --- a/_pytest/capture.py +++ b/_pytest/capture.py @@ -1,43 +1,114 @@ -""" per-test stdout/stderr capturing mechanisms, ``capsys`` and ``capfd`` function arguments. """ +""" + per-test stdout/stderr capturing mechanisms, + ``capsys`` and ``capfd`` function arguments. +""" +# note: py.io capture was where copied from +# pylib 1.4.20.dev2 (rev 13d9af95547e) +import sys +import os +import tempfile -import pytest, py -import os +import py +import pytest + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO + +try: + from io import BytesIO +except ImportError: + class BytesIO(StringIO): + def write(self, data): + if isinstance(data, unicode): + raise TypeError("not a byte value: %r" % (data,)) + StringIO.write(self, data) + +if sys.version_info < (3, 0): + class TextIO(StringIO): + def write(self, data): + if not isinstance(data, unicode): + enc = getattr(self, '_encoding', 'UTF-8') + data = unicode(data, enc, 'replace') + StringIO.write(self, data) +else: + TextIO = StringIO + + +patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} + def pytest_addoption(parser): group = parser.getgroup("general") - group._addoption('--capture', action="store", default=None, - metavar="method", type="choice", choices=['fd', 'sys', 'no'], + group._addoption( + '--capture', action="store", default=None, + metavar="method", choices=['fd', 'sys', 'no'], help="per-test capturing method: one of fd (default)|sys|no.") - group._addoption('-s', action="store_const", const="no", dest="capture", + group._addoption( + '-s', action="store_const", const="no", dest="capture", help="shortcut for --capture=no.") + @pytest.mark.tryfirst -def pytest_cmdline_parse(pluginmanager, args): - # we want to perform capturing already for plugin/conftest loading - if '-s' in args or "--capture=no" in args: - method = "no" - elif hasattr(os, 'dup') and '--capture=sys' not in args: +def pytest_load_initial_conftests(early_config, parser, args, __multicall__): + ns = parser.parse_known_args(args) + method = ns.capture + if not method: method = "fd" - else: + if method == "fd" and not hasattr(os, "dup"): method = "sys" capman = CaptureManager(method) - pluginmanager.register(capman, "capturemanager") + early_config.pluginmanager.register(capman, "capturemanager") + + # make sure that capturemanager is properly reset at final shutdown + def teardown(): + try: + capman.reset_capturings() + except ValueError: + pass + + early_config.pluginmanager.add_shutdown(teardown) + + # make sure logging does not raise exceptions at the end + def silence_logging_at_shutdown(): + if "logging" in sys.modules: + sys.modules["logging"].raiseExceptions = False + early_config.pluginmanager.add_shutdown(silence_logging_at_shutdown) + + # finally trigger conftest loading but while capturing (issue93) + capman.resumecapture() + try: + try: + return __multicall__.execute() + finally: + out, err = capman.suspendcapture() + except: + sys.stdout.write(out) + sys.stderr.write(err) + raise + def addouterr(rep, outerr): for secname, content in zip(["out", "err"], outerr): if content: rep.sections.append(("Captured std%s" % secname, content)) + class NoCapture: def startall(self): pass + def resume(self): pass + def reset(self): pass + def suspend(self): return "", "" + class CaptureManager: def __init__(self, defaultmethod=None): self._method2capture = {} @@ -45,21 +116,23 @@ def _maketempfile(self): f = py.std.tempfile.TemporaryFile() - newf = py.io.dupfile(f, encoding="UTF-8") + newf = dupfile(f, encoding="UTF-8") f.close() return newf def _makestringio(self): - return py.io.TextIO() + return TextIO() def _getcapture(self, method): if method == "fd": - return py.io.StdCaptureFD(now=False, - out=self._maketempfile(), err=self._maketempfile() + return StdCaptureFD( + out=self._maketempfile(), + err=self._maketempfile(), ) elif method == "sys": - return py.io.StdCapture(now=False, - out=self._makestringio(), err=self._makestringio() + return StdCapture( + out=self._makestringio(), + err=self._makestringio(), ) elif method == "no": return NoCapture() @@ -74,23 +147,24 @@ method = config._conftest.rget("option_capture", path=fspath) except KeyError: method = "fd" - if method == "fd" and not hasattr(os, 'dup'): # e.g. jython + if method == "fd" and not hasattr(os, 'dup'): # e.g. jython method = "sys" return method def reset_capturings(self): - for name, cap in self._method2capture.items(): + for cap in self._method2capture.values(): cap.reset() def resumecapture_item(self, item): method = self._getmethod(item.config, item.fspath) if not hasattr(item, 'outerr'): - item.outerr = ('', '') # we accumulate outerr on the item + item.outerr = ('', '') # we accumulate outerr on the item return self.resumecapture(method) def resumecapture(self, method=None): if hasattr(self, '_capturing'): - raise ValueError("cannot resume, already capturing with %r" % + raise ValueError( + "cannot resume, already capturing with %r" % (self._capturing,)) if method is None: method = self._defaultmethod @@ -119,30 +193,29 @@ return "", "" def activate_funcargs(self, pyfuncitem): - if not hasattr(pyfuncitem, 'funcargs'): - return - assert not hasattr(self, '_capturing_funcargs') - self._capturing_funcargs = capturing_funcargs = [] - for name, capfuncarg in pyfuncitem.funcargs.items(): - if name in ('capsys', 'capfd'): - capturing_funcargs.append(capfuncarg) - capfuncarg._start() + funcargs = getattr(pyfuncitem, "funcargs", None) + if funcargs is not None: + for name, capfuncarg in funcargs.items(): + if name in ('capsys', 'capfd'): + assert not hasattr(self, '_capturing_funcarg') + self._capturing_funcarg = capfuncarg + capfuncarg._start() def deactivate_funcargs(self): - capturing_funcargs = getattr(self, '_capturing_funcargs', None) - if capturing_funcargs is not None: - while capturing_funcargs: - capfuncarg = capturing_funcargs.pop() - capfuncarg._finalize() - del self._capturing_funcargs + capturing_funcarg = getattr(self, '_capturing_funcarg', None) + if capturing_funcarg: + outerr = capturing_funcarg._finalize() + del self._capturing_funcarg + return outerr def pytest_make_collect_report(self, __multicall__, collector): method = self._getmethod(collector.config, collector.fspath) try: self.resumecapture(method) except ValueError: - return # recursive collect, XXX refactor capturing - # to allow for more lightweight recursive capturing + # recursive collect, XXX refactor capturing + # to allow for more lightweight recursive capturing + return try: rep = __multicall__.execute() finally: @@ -169,46 +242,371 @@ @pytest.mark.tryfirst def pytest_runtest_makereport(self, __multicall__, item, call): - self.deactivate_funcargs() + funcarg_outerr = self.deactivate_funcargs() rep = __multicall__.execute() outerr = self.suspendcapture(item) - if not rep.passed: - addouterr(rep, outerr) + if funcarg_outerr is not None: + outerr = (outerr[0] + funcarg_outerr[0], + outerr[1] + funcarg_outerr[1]) + addouterr(rep, outerr) if not rep.passed or rep.when == "teardown": outerr = ('', '') item.outerr = outerr return rep +error_capsysfderror = "cannot use capsys and capfd at the same time" + + def pytest_funcarg__capsys(request): """enables capturing of writes to sys.stdout/sys.stderr and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ - return CaptureFuncarg(py.io.StdCapture) + if "capfd" in request._funcargs: + raise request.raiseerror(error_capsysfderror) + return CaptureFixture(StdCapture) + def pytest_funcarg__capfd(request): """enables capturing of writes to file descriptors 1 and 2 and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ + if "capsys" in request._funcargs: + request.raiseerror(error_capsysfderror) if not hasattr(os, 'dup'): - py.test.skip("capfd funcarg needs os.dup") - return CaptureFuncarg(py.io.StdCaptureFD) + pytest.skip("capfd funcarg needs os.dup") + return CaptureFixture(StdCaptureFD) -class CaptureFuncarg: + +class CaptureFixture: def __init__(self, captureclass): - self.capture = captureclass(now=False) + self._capture = captureclass() def _start(self): - self.capture.startall() + self._capture.startall() def _finalize(self): - if hasattr(self, 'capture'): - self.capture.reset() - del self.capture + if hasattr(self, '_capture'): + outerr = self._outerr = self._capture.reset() + del self._capture + return outerr def readouterr(self): - return self.capture.readouterr() + try: + return self._capture.readouterr() + except AttributeError: + return self._outerr def close(self): self._finalize() + + +class FDCapture: + """ Capture IO to/from a given os-level filedescriptor. """ + + def __init__(self, targetfd, tmpfile=None, patchsys=False): + """ save targetfd descriptor, and open a new + temporary file there. If no tmpfile is + specified a tempfile.Tempfile() will be opened + in text mode. + """ + self.targetfd = targetfd + if tmpfile is None and targetfd != 0: + f = tempfile.TemporaryFile('wb+') + tmpfile = dupfile(f, encoding="UTF-8") + f.close() + self.tmpfile = tmpfile + self._savefd = os.dup(self.targetfd) + if patchsys: + self._oldsys = getattr(sys, patchsysdict[targetfd]) + + def start(self): + try: + os.fstat(self._savefd) + except OSError: + raise ValueError( + "saved filedescriptor not valid, " + "did you call start() twice?") + if self.targetfd == 0 and not self.tmpfile: + fd = os.open(os.devnull, os.O_RDONLY) + os.dup2(fd, 0) + os.close(fd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], DontReadFromInput()) + else: + os.dup2(self.tmpfile.fileno(), self.targetfd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self.tmpfile) + + def done(self): + """ unpatch and clean up, returns the self.tmpfile (file object) + """ + os.dup2(self._savefd, self.targetfd) + os.close(self._savefd) + if self.targetfd != 0: + self.tmpfile.seek(0) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self._oldsys) + return self.tmpfile + + def writeorg(self, data): + """ write a string to the original file descriptor + """ + tempfp = tempfile.TemporaryFile() + try: + os.dup2(self._savefd, tempfp.fileno()) + tempfp.write(data) + finally: + tempfp.close() + + +def dupfile(f, mode=None, buffering=0, raising=False, encoding=None): + """ return a new open file object that's a duplicate of f + + mode is duplicated if not given, 'buffering' controls + buffer size (defaulting to no buffering) and 'raising' + defines whether an exception is raised when an incompatible + file object is passed in (if raising is False, the file + object itself will be returned) + """ + try: + fd = f.fileno() + mode = mode or f.mode + except AttributeError: + if raising: + raise + return f + newfd = os.dup(fd) + if sys.version_info >= (3, 0): + if encoding is not None: + mode = mode.replace("b", "") + buffering = True + return os.fdopen(newfd, mode, buffering, encoding, closefd=True) + else: + f = os.fdopen(newfd, mode, buffering) + if encoding is not None: + return EncodedFile(f, encoding) + return f + + +class EncodedFile(object): + def __init__(self, _stream, encoding): + self._stream = _stream + self.encoding = encoding + + def write(self, obj): + if isinstance(obj, unicode): + obj = obj.encode(self.encoding) + self._stream.write(obj) + + def writelines(self, linelist): + data = ''.join(linelist) + self.write(data) + + def __getattr__(self, name): + return getattr(self._stream, name) + + +class Capture(object): + def reset(self): + """ reset sys.stdout/stderr and return captured output as strings. """ + if hasattr(self, '_reset'): + raise ValueError("was already reset") + self._reset = True + outfile, errfile = self.done(save=False) + out, err = "", "" + if outfile and not outfile.closed: + out = outfile.read() + outfile.close() + if errfile and errfile != outfile and not errfile.closed: + err = errfile.read() + errfile.close() + return out, err + + def suspend(self): + """ return current snapshot captures, memorize tempfiles. """ + outerr = self.readouterr() + outfile, errfile = self.done() + return outerr + + +class StdCaptureFD(Capture): + """ This class allows to capture writes to FD1 and FD2 + and may connect a NULL file to FD0 (and prevent + reads from sys.stdin). If any of the 0,1,2 file descriptors + is invalid it will not be captured. + """ + def __init__(self, out=True, err=True, in_=True, patchsys=True): + self._options = { + "out": out, + "err": err, + "in_": in_, + "patchsys": patchsys, + } + self._save() + + def _save(self): + in_ = self._options['in_'] + out = self._options['out'] + err = self._options['err'] + patchsys = self._options['patchsys'] + if in_: + try: + self.in_ = FDCapture( + 0, tmpfile=None, + patchsys=patchsys) + except OSError: + pass + if out: + tmpfile = None + if hasattr(out, 'write'): + tmpfile = out + try: + self.out = FDCapture( + 1, tmpfile=tmpfile, + patchsys=patchsys) + self._options['out'] = self.out.tmpfile + except OSError: + pass + if err: + if hasattr(err, 'write'): + tmpfile = err + else: + tmpfile = None + try: + self.err = FDCapture( + 2, tmpfile=tmpfile, + patchsys=patchsys) + self._options['err'] = self.err.tmpfile + except OSError: + pass + + def startall(self): + if hasattr(self, 'in_'): + self.in_.start() + if hasattr(self, 'out'): + self.out.start() + if hasattr(self, 'err'): + self.err.start() + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if hasattr(self, 'out') and not self.out.tmpfile.closed: + outfile = self.out.done() + if hasattr(self, 'err') and not self.err.tmpfile.closed: + errfile = self.err.done() + if hasattr(self, 'in_'): + self.in_.done() + if save: + self._save() + return outfile, errfile + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = self._readsnapshot('out') + err = self._readsnapshot('err') + return out, err + + def _readsnapshot(self, name): + if hasattr(self, name): + f = getattr(self, name).tmpfile + else: + return '' + + f.seek(0) + res = f.read() + enc = getattr(f, "encoding", None) + if enc: + res = py.builtin._totext(res, enc, "replace") + f.truncate(0) + f.seek(0) + return res + + +class StdCapture(Capture): + """ This class allows to capture writes to sys.stdout|stderr "in-memory" + and will raise errors on tries to read from sys.stdin. It only + modifies sys.stdout|stderr|stdin attributes and does not + touch underlying File Descriptors (use StdCaptureFD for that). + """ + def __init__(self, out=True, err=True, in_=True): + self._oldout = sys.stdout + self._olderr = sys.stderr + self._oldin = sys.stdin + if out and not hasattr(out, 'file'): + out = TextIO() + self.out = out + if err: + if not hasattr(err, 'write'): + err = TextIO() + self.err = err + self.in_ = in_ + + def startall(self): + if self.out: + sys.stdout = self.out + if self.err: + sys.stderr = self.err + if self.in_: + sys.stdin = self.in_ = DontReadFromInput() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if self.out and not self.out.closed: + sys.stdout = self._oldout + outfile = self.out + outfile.seek(0) + if self.err and not self.err.closed: + sys.stderr = self._olderr + errfile = self.err + errfile.seek(0) + if self.in_: + sys.stdin = self._oldin + return outfile, errfile + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = err = "" + if self.out: + out = self.out.getvalue() + self.out.truncate(0) + self.out.seek(0) + if self.err: + err = self.err.getvalue() + self.err.truncate(0) + self.err.seek(0) + return out, err + + +class DontReadFromInput: + """Temporary stub class. Ideally when stdin is accessed, the + capturing should be turned off, with possibly all data captured + so far sent to the screen. This should be configurable, though, + because in automated test runs it is better to crash than + hang indefinitely. + """ + def read(self, *args): + raise IOError("reading from stdin while output is captured") + readline = read + readlines = read + __iter__ = read + + def fileno(self): + raise ValueError("redirected Stdin is pseudofile, has no fileno()") + + def isatty(self): + return False + + def close(self): + pass diff --git a/_pytest/config.py b/_pytest/config.py --- a/_pytest/config.py +++ b/_pytest/config.py @@ -1,25 +1,91 @@ """ command line options, ini-file and conftest.py processing. """ import py +# DON't import pytest here because it causes import cycle troubles import sys, os +from _pytest import hookspec # the extension point definitions from _pytest.core import PluginManager -import pytest -def pytest_cmdline_parse(pluginmanager, args): - config = Config(pluginmanager) - config.parse(args) - return config +# pytest startup -def pytest_unconfigure(config): - while 1: - try: - fin = config._cleanup.pop() - except IndexError: - break - fin() +def main(args=None, plugins=None): + """ return exit code, after performing an in-process test run. + + :arg args: list of command line arguments. + + :arg plugins: list of plugin objects to be auto-registered during + initialization. + """ + config = _prepareconfig(args, plugins) + return config.hook.pytest_cmdline_main(config=config) + +class cmdline: # compatibility namespace + main = staticmethod(main) + +class UsageError(Exception): + """ error in pytest usage or invocation""" + +_preinit = [] + +default_plugins = ( + "mark main terminal runner python pdb unittest capture skipping " + "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript " + "junitxml resultlog doctest").split() + +def _preloadplugins(): + assert not _preinit + _preinit.append(get_plugin_manager()) + +def get_plugin_manager(): + if _preinit: + return _preinit.pop(0) + # subsequent calls to main will create a fresh instance + pluginmanager = PytestPluginManager() + pluginmanager.config = Config(pluginmanager) # XXX attr needed? + for spec in default_plugins: + pluginmanager.import_plugin(spec) + return pluginmanager + +def _prepareconfig(args=None, plugins=None): + if args is None: + args = sys.argv[1:] + elif isinstance(args, py.path.local): + args = [str(args)] + elif not isinstance(args, (tuple, list)): + if not isinstance(args, str): + raise ValueError("not a string or argument list: %r" % (args,)) + args = py.std.shlex.split(args) + pluginmanager = get_plugin_manager() + if plugins: + for plugin in plugins: + pluginmanager.register(plugin) + return pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args) + +class PytestPluginManager(PluginManager): + def __init__(self, hookspecs=[hookspec]): + super(PytestPluginManager, self).__init__(hookspecs=hookspecs) + self.register(self) + if os.environ.get('PYTEST_DEBUG'): + err = sys.stderr + encoding = getattr(err, 'encoding', 'utf8') + try: + err = py.io.dupfile(err, encoding=encoding) + except Exception: + pass + self.trace.root.setwriter(err.write) + + def pytest_configure(self, config): + config.addinivalue_line("markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible.") + config.addinivalue_line("markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible.") + class Parser: - """ Parser for command line arguments. """ + """ Parser for command line arguments and ini-file values. """ def __init__(self, usage=None, processopt=None): self._anonymous = OptionGroup("custom options", parser=self) @@ -35,15 +101,17 @@ if option.dest: self._processopt(option) - def addnote(self, note): - self._notes.append(note) - def getgroup(self, name, description="", after=None): """ get (or create) a named option Group. - :name: unique name of the option group. + :name: name of the option group. :description: long description for --help output. :after: name of other group, used for ordering --help output. + + The returned group object has an ``addoption`` method with the same + signature as :py:func:`parser.addoption + <_pytest.config.Parser.addoption>` but will be shown in the + respective group in the output of ``pytest. --help``. """ for group in self._groups: if group.name == name: @@ -57,33 +125,222 @@ return group def addoption(self, *opts, **attrs): - """ add an optparse-style option. """ + """ register a command line option. + + :opts: option names, can be short or long options. + :attrs: same attributes which the ``add_option()`` function of the + `argparse library + `_ + accepts. + + After command line parsing options are available on the pytest config + object via ``config.option.NAME`` where ``NAME`` is usually set + by passing a ``dest`` attribute, for example + ``addoption("--long", dest="NAME", ...)``. + """ self._anonymous.addoption(*opts, **attrs) def parse(self, args): - self.optparser = optparser = MyOptionParser(self) + from _pytest._argcomplete import try_argcomplete + self.optparser = self._getparser() + try_argcomplete(self.optparser) + return self.optparser.parse_args([str(x) for x in args]) + + def _getparser(self): + from _pytest._argcomplete import filescompleter + optparser = MyOptionParser(self) groups = self._groups + [self._anonymous] for group in groups: if group.options: desc = group.description or group.name - optgroup = py.std.optparse.OptionGroup(optparser, desc) - optgroup.add_options(group.options) - optparser.add_option_group(optgroup) - return self.optparser.parse_args([str(x) for x in args]) + arggroup = optparser.add_argument_group(desc) + for option in group.options: + n = option.names() + a = option.attrs() + arggroup.add_argument(*n, **a) + # bash like autocompletion for dirs (appending '/') + optparser.add_argument(FILE_OR_DIR, nargs='*' + ).completer=filescompleter + return optparser def parse_setoption(self, args, option): - parsedoption, args = self.parse(args) + parsedoption = self.parse(args) for name, value in parsedoption.__dict__.items(): setattr(option, name, value) - return args + return getattr(parsedoption, FILE_OR_DIR) + + def parse_known_args(self, args): + optparser = self._getparser() + args = [str(x) for x in args] + return optparser.parse_known_args(args)[0] def addini(self, name, help, type=None, default=None): - """ add an ini-file option with the given name and description. """ + """ register an ini-file option. + + :name: name of the ini-variable + :type: type of the variable, can be ``pathlist``, ``args`` or ``linelist``. + :default: default value if no ini-file option exists but is queried. + + The value of ini-variables can be retrieved via a call to + :py:func:`config.getini(name) <_pytest.config.Config.getini>`. + """ assert type in (None, "pathlist", "args", "linelist") self._inidict[name] = (help, type, default) self._ininames.append(name) +class ArgumentError(Exception): + """ + Raised if an Argument instance is created with invalid or + inconsistent arguments. + """ + + def __init__(self, msg, option): + self.msg = msg + self.option_id = str(option) + + def __str__(self): + if self.option_id: + return "option %s: %s" % (self.option_id, self.msg) + else: + return self.msg + + +class Argument: + """class that mimics the necessary behaviour of py.std.optparse.Option """ + _typ_map = { + 'int': int, + 'string': str, + } + # enable after some grace period for plugin writers + TYPE_WARN = False + + def __init__(self, *names, **attrs): + """store parms in private vars for use in add_argument""" + self._attrs = attrs + self._short_opts = [] + self._long_opts = [] + self.dest = attrs.get('dest') + if self.TYPE_WARN: + try: + help = attrs['help'] + if '%default' in help: + py.std.warnings.warn( + 'pytest now uses argparse. "%default" should be' + ' changed to "%(default)s" ', + FutureWarning, + stacklevel=3) + except KeyError: + pass + try: + typ = attrs['type'] + except KeyError: + pass + else: + # this might raise a keyerror as well, don't want to catch that + if isinstance(typ, py.builtin._basestring): + if typ == 'choice': + if self.TYPE_WARN: + py.std.warnings.warn( + 'type argument to addoption() is a string %r.' + ' For parsearg this is optional and when supplied ' + ' should be a type.' + ' (options: %s)' % (typ, names), + FutureWarning, + stacklevel=3) + # argparse expects a type here take it from + # the type of the first element + attrs['type'] = type(attrs['choices'][0]) + else: + if self.TYPE_WARN: + py.std.warnings.warn( + 'type argument to addoption() is a string %r.' + ' For parsearg this should be a type.' + ' (options: %s)' % (typ, names), + FutureWarning, + stacklevel=3) + attrs['type'] = Argument._typ_map[typ] + # used in test_parseopt -> test_parse_defaultgetter + self.type = attrs['type'] + else: + self.type = typ + try: + # attribute existence is tested in Config._processopt + self.default = attrs['default'] + except KeyError: + pass + self._set_opt_strings(names) + if not self.dest: + if self._long_opts: + self.dest = self._long_opts[0][2:].replace('-', '_') + else: + try: + self.dest = self._short_opts[0][1:] + except IndexError: + raise ArgumentError( + 'need a long or short option', self) + + def names(self): + return self._short_opts + self._long_opts + + def attrs(self): + # update any attributes set by processopt + attrs = 'default dest help'.split() + if self.dest: + attrs.append(self.dest) + for attr in attrs: + try: + self._attrs[attr] = getattr(self, attr) + except AttributeError: + pass + if self._attrs.get('help'): + a = self._attrs['help'] + a = a.replace('%default', '%(default)s') + #a = a.replace('%prog', '%(prog)s') + self._attrs['help'] = a + return self._attrs + + def _set_opt_strings(self, opts): + """directly from optparse + + might not be necessary as this is passed to argparse later on""" + for opt in opts: + if len(opt) < 2: + raise ArgumentError( + "invalid option string %r: " + "must be at least two characters long" % opt, self) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise ArgumentError( + "invalid short option string %r: " + "must be of the form -x, (x any non-dash char)" % opt, + self) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise ArgumentError( + "invalid long option string %r: " + "must start with --, followed by non-dash" % opt, + self) + self._long_opts.append(opt) + + def __repr__(self): + retval = 'Argument(' + if self._short_opts: + retval += '_short_opts: ' + repr(self._short_opts) + ', ' + if self._long_opts: + retval += '_long_opts: ' + repr(self._long_opts) + ', ' + retval += 'dest: ' + repr(self.dest) + ', ' + if hasattr(self, 'type'): + retval += 'type: ' + repr(self.type) + ', ' + if hasattr(self, 'default'): + retval += 'default: ' + repr(self.default) + ', ' + if retval[-2:] == ', ': # always long enough to test ("Argument(" ) + retval = retval[:-2] + retval += ')' + return retval + + class OptionGroup: def __init__(self, name, description="", parser=None): self.name = name @@ -92,12 +349,18 @@ self.parser = parser def addoption(self, *optnames, **attrs): - """ add an option to this group. """ - option = py.std.optparse.Option(*optnames, **attrs) + """ add an option to this group. + + if a shortened version of a long option is specified it will + be suppressed in the help. addoption('--twowords', '--two-words') + results in help showing '--two-words' only, but --twowords gets + accepted **and** the automatic destination is in args.twowords + """ + option = Argument(*optnames, **attrs) self._addoption_instance(option, shortupper=False) def _addoption(self, *optnames, **attrs): - option = py.std.optparse.Option(*optnames, **attrs) + option = Argument(*optnames, **attrs) self._addoption_instance(option, shortupper=True) From noreply at buildbot.pypy.org Tue Aug 12 02:14:12 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 12 Aug 2014 02:14:12 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140812001412.6FCB21C12CC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72757:2ccf30804e12 Date: 2014-08-11 17:13 -0700 http://bitbucket.org/pypy/pypy/changeset/2ccf30804e12/ Log: merge default diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -3,9 +3,8 @@ It uses 'pypy/goal/pypy-c' and parts of the rest of the working copy. Usage: - package.py [--options] + package.py [--options] pypy-VER-PLATFORM -Usually you would do: package.py --version-name pypy-VER-PLATFORM The output is found in the directory from --builddir, by default /tmp/usession-YOURNAME/build/. """ @@ -52,13 +51,18 @@ sep_template = "\nThis copy of PyPy includes a copy of %s, which is licensed under the following terms:\n\n" -def generate_license_linux(basedir, options): +def generate_license(basedir, options): base_file = str(basedir.join('LICENSE')) with open(base_file) as fid: txt = fid.read() - searches = [("bzip2","libbz2-*", "copyright", '---------'), - ("openssl", "openssl*", "copyright", 'LICENSE ISSUES'), - ] + if sys.platform == 'win32': + # shutil.copyfileobj(open("crtlicense.txt"), out) # We do not ship + # msvc runtime files, but otherwise we'd need this on Windows + searches = [("bzip2","bzip2-*", "LICENSE", ''), + ("openssl", "openssl-*", "LICENSE", '')] + else: + searches = [("bzip2","libbz2-*dev", "copyright", '---------'), + ("openssl", "openssl*", "copyright", 'LICENSE ISSUES')] if not options.no_tk: name = 'Tcl/Tk' txt += "License for '%s'" %name @@ -73,9 +77,9 @@ txt += sep_template % name dirs = glob.glob(options.license_base + "/" +pat) if not dirs: - raise ValueError, "Could not find "+ options.license_base + "/" + pat - if len(dirs) > 2: - raise ValueError, "Multiple copies of "+pat + raise ValueError, "Could not find %s/%s" % (options.license_base, pat) + if len(dirs) > 1: + raise ValueError, "Multiple copies of %r: %r" % (pat, dirs) dir = dirs[0] with open(os.path.join(dir, fname)) as fid: # Read up to the line dividing the packaging header from the actual copyright @@ -92,43 +96,6 @@ txt += gdbm_bit return txt -def generate_license_windows(basedir, options): - base_file = str(basedir.join('LICENSE')) - with open(base_file) as fid: - txt = fid.read() - # shutil.copyfileobj(open("crtlicense.txt"), out) # We do not ship msvc runtime files - if not options.no_tk: - name = 'Tcl/Tk' - txt += "License for '%s'" %name - txt += '\n' + "="*(14 + len(name)) + '\n' - txt += sep_template % name - base_file = str(basedir.join('lib_pypy/_tkinter/license.terms')) - with open(base_file, 'r') as fid: - txt += fid.read() - for name, pat, file in (("bzip2","bzip2-*", "LICENSE"), - ("openssl", "openssl-*", "LICENSE")): - txt += sep_template % name - dirs = glob.glob(options.license_base + "/" +pat) - if not dirs: - raise ValueError, "Could not find "+ options.license_base + "/" + pat - if len(dirs) > 2: - raise ValueError, "Multiple copies of "+pat - dir = dirs[0] - with open(os.path.join(dir, file)) as fid: - txt += fid.read() - return txt - -def generate_license_darwin(basedir, options): - # where are copyright files on macos? - return generate_license_linux(basedir, options) - -if sys.platform == 'win32': - generate_license = generate_license_windows -elif sys.platform == 'darwin': - generate_license = generate_license_darwin -else: - generate_license = generate_license_linux - def create_cffi_import_libraries(pypy_c, options): modules = ['_sqlite3'] subprocess.check_call([str(pypy_c), '-c', 'import _sqlite3']) @@ -406,7 +373,9 @@ ---- The _gdbm module includes code from gdbm.h, which is distributed under the terms -of the GPL license version 2 or any later version. +of the GPL license version 2 or any later version. Thus the _gdbm module, provided in +the file lib_pypy/_gdbm.py, is redistributed under the terms of the GPL license as +well. ''' From noreply at buildbot.pypy.org Tue Aug 12 12:47:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Aug 2014 12:47:54 +0200 (CEST) Subject: [pypy-commit] pypy default: Attempt to fix call_release_gil on Windows for stdcall functions. Message-ID: <20140812104754.842011D36D3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72758:5ace22e71a97 Date: 2014-08-12 12:47 +0200 http://bitbucket.org/pypy/pypy/changeset/5ace22e71a97/ Log: Attempt to fix call_release_gil on Windows for stdcall functions. diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -1,3 +1,4 @@ +import sys from rpython.rlib.clibffi import FFI_DEFAULT_ABI from rpython.rlib.objectmodel import we_are_translated from rpython.jit.metainterp.history import INT, FLOAT @@ -15,6 +16,8 @@ # Same for gcc 4.5.0, better safe than sorry CALL_ALIGN = 16 // WORD +stdcall_or_cdecl = sys.platform == "win32" + def align_stack_words(words): return (words + CALL_ALIGN - 1) & ~(CALL_ALIGN-1) @@ -44,11 +47,6 @@ self.stack_max = PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS assert self.stack_max >= 3 - def emit_raw_call(self): - self.mc.CALL(self.fnloc) - if self.callconv != FFI_DEFAULT_ABI: - self.current_esp += self._fix_stdcall(self.callconv) - def subtract_esp_aligned(self, count): if count > 0: align = align_stack_words(count) @@ -246,6 +244,21 @@ self.fnloc = RawEspLoc(p - WORD, INT) + def emit_raw_call(self): + if stdcall_or_cdecl and self.is_call_release_gil: + # Dynamically accept both stdcall and cdecl functions. + # We could try to detect from pyjitpl which calling + # convention this particular function takes, which would + # avoid these two extra MOVs... but later. The ebp register + # is unused here: it will be reloaded from the shadowstack. + self.mc.MOV(ebp, esp) + self.mc.CALL(self.fnloc) + self.mc.MOV(esp, ebp) + else: + self.mc.CALL(self.fnloc) + if self.callconv != FFI_DEFAULT_ABI: + self.current_esp += self._fix_stdcall(self.callconv) + def _fix_stdcall(self, callconv): from rpython.rlib.clibffi import FFI_STDCALL assert callconv == FFI_STDCALL @@ -417,8 +430,9 @@ remap_frame_layout(self.asm, src_locs, dst_locs, X86_64_SCRATCH_REG) - def _fix_stdcall(self, callconv): - assert 0 # should not occur on 64-bit + def emit_raw_call(self): + assert self.callconv == FFI_DEFAULT_ABI + self.mc.CALL(self.fnloc) def load_result(self): if self.restype == 'S': From noreply at buildbot.pypy.org Tue Aug 12 13:30:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Aug 2014 13:30:21 +0200 (CEST) Subject: [pypy-commit] pypy default: Adapt the test: call_release_gil supports mismatching calling conventions Message-ID: <20140812113021.9ACED1D3691@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72759:73e3404a6123 Date: 2014-08-12 12:58 +0200 http://bitbucket.org/pypy/pypy/changeset/73e3404a6123/ Log: Adapt the test: call_release_gil supports mismatching calling conventions diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py --- a/rpython/jit/backend/x86/test/test_runner.py +++ b/rpython/jit/backend/x86/test/test_runner.py @@ -438,20 +438,26 @@ if WORD != 4: py.test.skip("32-bit only test") from rpython.jit.backend.x86.regloc import eax, edx - from rpython.jit.backend.x86 import codebuf + from rpython.jit.backend.x86 import codebuf, callbuilder from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.rlib.libffi import types, clibffi had_stdcall = hasattr(clibffi, 'FFI_STDCALL') if not had_stdcall: # not running on Windows, but we can still test monkeypatch.setattr(clibffi, 'FFI_STDCALL', 12345, raising=False) + monkeypatch.setattr(callbuilder, 'stdcall_or_cdecl', True) + else: + assert callbuilder.stdcall_or_cdecl # - for ffi in [clibffi.FFI_DEFAULT_ABI, clibffi.FFI_STDCALL]: + for real_ffi, reported_ffi in [ + (clibffi.FFI_DEFAULT_ABI, clibffi.FFI_DEFAULT_ABI), + (clibffi.FFI_STDCALL, clibffi.FFI_DEFAULT_ABI), + (clibffi.FFI_STDCALL, clibffi.FFI_STDCALL)]: cpu = self.cpu mc = codebuf.MachineCodeBlockWrapper() mc.MOV_rs(eax.value, 4) # argument 1 mc.MOV_rs(edx.value, 40) # argument 10 mc.SUB_rr(eax.value, edx.value) # return arg1 - arg10 - if ffi == clibffi.FFI_DEFAULT_ABI: + if real_ffi == clibffi.FFI_DEFAULT_ABI: mc.RET() else: mc.RET16_i(40) @@ -459,7 +465,7 @@ # calldescr = cpu._calldescr_dynamic_for_tests([types.slong] * 10, types.slong) - calldescr.get_call_conv = lambda: ffi # <==== hack + calldescr.get_call_conv = lambda: reported_ffi # <==== hack # ^^^ we patch get_call_conv() so that the test also makes sense # on Linux, because clibffi.get_call_conv() would always # return FFI_DEFAULT_ABI on non-Windows platforms. From noreply at buildbot.pypy.org Tue Aug 12 13:30:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Aug 2014 13:30:22 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix the failing test. Message-ID: <20140812113022.D04851D3691@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72760:53caa1b58231 Date: 2014-08-12 13:29 +0200 http://bitbucket.org/pypy/pypy/changeset/53caa1b58231/ Log: Fix the failing test. diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -3,7 +3,7 @@ from rpython.rlib.objectmodel import we_are_translated from rpython.jit.metainterp.history import INT, FLOAT from rpython.jit.backend.x86.arch import (WORD, IS_X86_64, IS_X86_32, - PASS_ON_MY_FRAME) + PASS_ON_MY_FRAME, FRAME_FIXED_SIZE) from rpython.jit.backend.x86.regloc import (eax, ecx, edx, ebx, esp, ebp, esi, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, r8, r9, r10, r11, edi, r12, r13, r14, r15, X86_64_SCRATCH_REG, X86_64_XMM_SCRATCH_REG, @@ -251,9 +251,16 @@ # convention this particular function takes, which would # avoid these two extra MOVs... but later. The ebp register # is unused here: it will be reloaded from the shadowstack. + # (This doesn't work during testing, though. Hack hack hack.) + save_ebp = not self.asm.cpu.gc_ll_descr.is_shadow_stack() + ofs = WORD * (FRAME_FIXED_SIZE - 1) + if save_ebp: # only for testing (or with Boehm) + self.mc.MOV_sr(ofs, ebp.value) self.mc.MOV(ebp, esp) self.mc.CALL(self.fnloc) self.mc.MOV(esp, ebp) + if save_ebp: # only for testing (or with Boehm) + self.mc.MOV_rs(ebp.value, ofs) else: self.mc.CALL(self.fnloc) if self.callconv != FFI_DEFAULT_ABI: From noreply at buildbot.pypy.org Tue Aug 12 13:39:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Aug 2014 13:39:28 +0200 (CEST) Subject: [pypy-commit] pypy default: Issue #1834 Message-ID: <20140812113928.3983B1C0157@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72761:0bbe39e4636f Date: 2014-08-12 13:39 +0200 http://bitbucket.org/pypy/pypy/changeset/0bbe39e4636f/ Log: Issue #1834 Export "dooneevent" from Tkinter._tkinter. diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py --- a/lib_pypy/_tkinter/__init__.py +++ b/lib_pypy/_tkinter/__init__.py @@ -30,6 +30,10 @@ return TkApp(screenName, baseName, className, interactive, wantobjects, wantTk, sync, use) +def dooneevent(flags=0): + return tklib.Tcl_DoOneEvent(flags) + + def _flatten(item): def _flatten1(output, item, depth): if depth > 1000: From noreply at buildbot.pypy.org Tue Aug 12 13:51:01 2014 From: noreply at buildbot.pypy.org (groggi) Date: Tue, 12 Aug 2014 13:51:01 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: only run assert if no pinned objects are around Message-ID: <20140812115101.2C5DE1C03AC@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72762:6b1a2dd2d891 Date: 2014-08-12 13:15 +0200 http://bitbucket.org/pypy/pypy/changeset/6b1a2dd2d891/ Log: only run assert if no pinned objects are around diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2026,8 +2026,13 @@ debug_start("gc-collect-step") debug_print("starting gc state: ", GC_STATES[self.gc_state]) # Debugging checks - ll_assert(self.nursery_free == self.nursery, - "nursery not empty in major_collection_step()") + if self.pinned_objects_in_nursery == 0: + ll_assert(self.nursery_free == self.nursery, + "nursery not empty in major_collection_step()") + else: + # XXX try to add some similar check to the above one for the case + # that the nursery still contains some pinned objects (groggi) + pass self.debug_check_consistency() From noreply at buildbot.pypy.org Tue Aug 12 13:51:02 2014 From: noreply at buildbot.pypy.org (groggi) Date: Tue, 12 Aug 2014 13:51:02 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: old objects that no longer point to a pinned one are removed from the internal list Message-ID: <20140812115102.72E281C03AC@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72763:0645fc78c5e0 Date: 2014-08-12 13:16 +0200 http://bitbucket.org/pypy/pypy/changeset/0645fc78c5e0/ Log: old objects that no longer point to a pinned one are removed from the internal list diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1569,8 +1569,18 @@ # objects. This way we populate 'surviving_pinned_objects' # with pinned object that are (only) visible from an old # object. - self.old_objects_pointing_to_pinned.foreach( + # Additionally we create a new list as it may be that an old object + # no longer points to a pinned one and we want them to remove from + # the list. + if self.old_objects_pointing_to_pinned.non_empty(): + current_old_objects_pointing_to_pinned = \ + self.old_objects_pointing_to_pinned + # + self.old_objects_pointing_to_pinned = self.AddressStack() + # visit the ones we know of + current_old_objects_pointing_to_pinned.foreach( self._visit_old_objects_pointing_to_pinned, None) + current_old_objects_pointing_to_pinned.delete() # while True: # If we are using card marking, do a partial trace of the arrays @@ -1684,7 +1694,7 @@ debug_stop("gc-minor") def _visit_old_objects_pointing_to_pinned(self, obj, ignore): - self.trace(obj, self._trace_drag_out, llmemory.NULL) + self.trace(obj, self._trace_drag_out, obj) def collect_roots_in_nursery(self): # we don't need to trace prebuilt GcStructs during a minor collect: From noreply at buildbot.pypy.org Tue Aug 12 13:51:03 2014 From: noreply at buildbot.pypy.org (groggi) Date: Tue, 12 Aug 2014 13:51:03 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: wip: rewriting object pinning tests. they were a mess. Message-ID: <20140812115103.B09461C03AC@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72764:d1cb742ced7f Date: 2014-08-12 13:48 +0200 http://bitbucket.org/pypy/pypy/changeset/d1cb742ced7f/ Log: wip: rewriting object pinning tests. they were a mess. This new tests, which are based on the old tests, already discovered two overlooked problems. The new tests do check the state of the GC more thorough. diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -45,328 +45,339 @@ class TestIncminimark(PinningGCTest): from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass + from rpython.memory.gc.incminimark import STATE_SCANNING - def simple_pin_stack(self, collect_func): - # create object, pin it and point from stackroots to it + def pinned_obj_in_stackroot(self, collect_func): + # scenario: a pinned object that is part of the stack roots. Check if + # it is not moved + # ptr = self.malloc(S) ptr.someInt = 100 self.stackroots.append(ptr) + assert self.stackroots[0] == ptr # validate our assumption + + adr = llmemory.cast_ptr_to_adr(ptr) + assert self.gc.is_in_nursery(adr) # to be sure + assert self.gc.pin(adr) + # + # the object shouldn't move from now on + collect_func() + # + # check if it is still at the same location as expected + adr_after_collect = llmemory.cast_ptr_to_adr(self.stackroots[0]) + assert self.gc.is_in_nursery(adr_after_collect) + assert adr == adr_after_collect + assert self.gc._is_pinned(adr) + assert ptr.someInt == 100 + assert self.gc.pinned_objects_in_nursery == 1 + + def test_pinned_obj_in_stackroot_minor_collection(self): + self.pinned_obj_in_stackroot(self.gc.minor_collection) + + def test_pinned_obj_in_stackroot_full_major_collection(self): + self.pinned_obj_in_stackroot(self.gc.collect) + + def test_pinned_obj_in_stackroots_stepwise_major_collection(self): + # scenario: same as for 'pinned_obj_in_stackroot' with minor change + # that we do stepwise major collection and check in each step for + # a correct state + # + ptr = self.malloc(S) + ptr.someInt = 100 + self.stackroots.append(ptr) + assert self.stackroots[0] == ptr # validate our assumption + + adr = llmemory.cast_ptr_to_adr(ptr) + assert self.gc.is_in_nursery(adr) + assert self.gc.pin(adr) + # + # the object shouldn't move from now on. Do a full round of major + # steps and check each time for correct state + # + # check that we start at the expected point + assert self.gc.gc_state == self.STATE_SCANNING + done = False + while not done: + self.gc.debug_gc_step() + # check that the pinned object didn't move + ptr_after_collection = self.stackroots[0] + adr_after_collection = llmemory.cast_ptr_to_adr(ptr_after_collection) + assert self.gc.is_in_nursery(adr_after_collection) + assert adr == adr_after_collection + assert self.gc._is_pinned(adr) + assert ptr.someInt == 100 + assert self.gc.pinned_objects_in_nursery == 1 + # as the object is referenced from the stackroots, the gc internal + # 'old_objects_pointing_to_pinned' should be empty + assert not self.gc.old_objects_pointing_to_pinned.non_empty() + # + # break condition + done = self.gc.gc_state == self.STATE_SCANNING + + + def pin_unpin_moved_stackroot(self, collect_func): + # scenario: test if the pinned object is moved after being unpinned. + # the second part of the scenario is the tested one. The first part + # is already tests by other tests. + ptr = self.malloc(S) + ptr.someInt = 100 + self.stackroots.append(ptr) + assert self.stackroots[0] == ptr # validate our assumption adr = llmemory.cast_ptr_to_adr(ptr) assert self.gc.pin(adr) collect_func() + # + # from here on the test really starts. previouse logic is already tested + # + self.gc.unpin(adr) + assert not self.gc._is_pinned(adr) + assert self.gc.is_in_nursery(adr) + # + # now we do another collection and the object should be moved out of + # the nursery. + collect_func() + new_adr = llmemory.cast_ptr_to_adr(self.stackroots[0]) + assert not self.gc.is_in_nursery(new_adr) + assert self.stackroots[0].someInt == 100 + with py.test.raises(RuntimeError) as exinfo: + ptr.someInt = 200 + assert "freed" in str(exinfo.value) + + def test_pin_unpin_moved_stackroot_minor_collection(self): + self.pin_unpin_moved_stackroot(self.gc.minor_collection) + + def test_pin_unpin_moved_stackroot_major_collection(self): + self.pin_unpin_moved_stackroot(self.gc.collect) + + + def pin_referenced_from_old(self, collect_func): + # scenario: an old object points to a pinned one. Check if the pinned + # object is correctly kept in the nursery and not moved. + # + # create old object + old_ptr = self.malloc(S) + old_ptr.someInt = 900 + self.stackroots.append(old_ptr) + assert self.stackroots[0] == old_ptr # validate our assumption + collect_func() # make it old: move it out of the nursery + old_ptr = self.stackroots[0] + assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old_ptr)) + # + # create young pinned one and let the old one reference the young one + pinned_ptr = self.malloc(S) + pinned_ptr.someInt = 100 + self.write(old_ptr, 'next', pinned_ptr) + pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr) + assert self.gc.pin(pinned_adr) + assert self.gc.is_in_nursery(pinned_adr) + assert old_ptr.next.someInt == 100 + assert self.gc.pinned_objects_in_nursery == 1 + # + # do a collection run and make sure the pinned one didn't move + collect_func() + assert old_ptr.next.someInt == pinned_ptr.someInt == 100 + assert llmemory.cast_ptr_to_adr(old_ptr.next) == pinned_adr + assert self.gc.is_in_nursery(pinned_adr) - assert self.gc.is_in_nursery(adr) - assert ptr.someInt == 100 + def test_pin_referenced_from_old_minor_collection(self): + self.pin_referenced_from_old(self.gc.minor_collection) - def test_simple_pin_stack_full_collect(self): - self.simple_pin_stack(self.gc.collect) + def test_pin_referenced_from_old_major_collection(self): + self.pin_referenced_from_old(self.gc.collect) - def test_simple_pin_stack_minor_collect(self): - self.simple_pin_stack(self.gc.minor_collection) + def test_pin_referenced_from_old_stepwise_major_collection(self): + # scenario: same as in 'pin_referenced_from_old'. However, + # this time we do a major collection step by step and check + # between steps that the states are as expected. + # + # create old object + old_ptr = self.malloc(S) + old_ptr.someInt = 900 + self.stackroots.append(old_ptr) + assert self.stackroots[0] == old_ptr # validate our assumption + self.gc.minor_collection() # make it old: move it out of the nursery + old_ptr = self.stackroots[0] + old_adr = llmemory.cast_ptr_to_adr(old_ptr) + assert not self.gc.is_in_nursery(old_adr) + # + # create young pinned one and let the old one reference the young one + pinned_ptr = self.malloc(S) + pinned_ptr.someInt = 100 + self.write(old_ptr, 'next', pinned_ptr) + pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr) + assert self.gc.pin(pinned_adr) + assert self.gc.is_in_nursery(pinned_adr) + assert old_ptr.next.someInt == 100 + assert self.gc.pinned_objects_in_nursery == 1 + # + # stepwise major collection with validation between steps + # check that we start at the expected point + assert self.gc.gc_state == self.STATE_SCANNING + done = False + while not done: + self.gc.debug_gc_step() + # + # make sure pinned object didn't move + assert old_ptr.next.someInt == pinned_ptr.someInt == 100 + assert llmemory.cast_ptr_to_adr(old_ptr.next) == pinned_adr + assert self.gc.is_in_nursery(pinned_adr) + assert self.gc.pinned_objects_in_nursery == 1 + # + # validate that the old object is part of the internal list + # 'old_objects_pointing_to_pinned' as expected. + should_be_old_adr = self.gc.old_objects_pointing_to_pinned.pop() + assert should_be_old_adr == old_adr + self.gc.old_objects_pointing_to_pinned.append(should_be_old_adr) + # + # break condition + done = self.gc.gc_state == self.STATE_SCANNING - def simple_pin_unpin_stack(self, collect_func): - ptr = self.malloc(S) - ptr.someInt = 100 - - self.stackroots.append(ptr) - - adr = llmemory.cast_ptr_to_adr(ptr) - assert self.gc.pin(adr) - + + def pin_referenced_from_old_remove_ref(self, collect_func): + # scenario: an old object points to a pinned one. We remove the + # reference from the old one. So nothing points to the pinned object. + # After this the pinned object should be collected (it's dead). + # + # Create the objects and get them to our initial state (this is not + # tested here, should be already tested by other tests) + old_ptr = self.malloc(S) + old_ptr.someInt = 900 + self.stackroots.append(old_ptr) + assert self.stackroots[0] == old_ptr # check assumption + collect_func() # make it old + old_ptr = self.stackroots[0] + # + pinned_ptr = self.malloc(S) + pinned_ptr.someInt = 100 + self.write(old_ptr, 'next', pinned_ptr) + pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr) + assert self.gc.pin(pinned_adr) + # collect_func() + # from here on we have our initial state for this test. + # + # first check some basic assumptions. + assert self.gc.is_in_nursery(pinned_adr) + assert self.gc._is_pinned(pinned_adr) + # remove the reference + self.write(old_ptr, 'next', lltype.nullptr(S)) + # from now on the pinned object is dead. Do a collection and make sure + # old object still there and the pinned one is gone. + collect_func() + assert self.stackroots[0].someInt == 900 + assert not self.gc.old_objects_pointing_to_pinned.non_empty() + with py.test.raises(RuntimeError) as exinfo: + pinned_ptr.someInt = 200 + assert "freed" in str(exinfo.value) - assert self.gc.is_in_nursery(adr) - assert ptr.someInt == 100 - - # unpin and check if object is gone from nursery - self.gc.unpin(adr) + def test_pin_referenced_from_old_remove_ref_minor_collection(self): + self.pin_referenced_from_old_remove_ref(self.gc.minor_collection) + + def test_pin_referenced_from_old_remove_ref_major_collection(self): + self.pin_referenced_from_old_remove_ref(self.gc.collect) + + + def pin_referenced_from_old_remove_old(self, collect_func): + # scenario: an old object referenced a pinned object. After removing + # the stackroot reference to the old object, bot objects (old and pinned) + # must be collected. + # This test is important as we expect not reachable pinned objects to + # be collected. At the same time we have an internal list of objects + # pointing to pinned ones and we must make sure that because of it the + # old/pinned object survive. + # + # create the objects and get them to the initial state for this test. + # Everything on the way to the initial state should be covered by + # other tests. + old_ptr = self.malloc(S) + old_ptr.someInt = 900 + self.stackroots.append(old_ptr) collect_func() - try: - assert ptr.someInt == 100 - assert False - except RuntimeError as ex: - assert "freed" in str(ex) - - # check if we object is still accessible - ptr_old = self.stackroots[0] - assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(ptr_old)) - assert ptr_old.someInt == 100 + old_ptr = self.stackroots[0] + # + pinned_ptr = self.malloc(S) + pinned_ptr.someInt = 100 + self.write(old_ptr, 'next', pinned_ptr) + assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr)) + # + collect_func() + # + # now we have our initial state: old object referenced from stackroots. + # Old object referencing a young pinned one. Next step is to make some + # basic checks that we got the expected state. + assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old_ptr)) + assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr)) + assert pinned_ptr == old_ptr.next + # + # now we remove the old object from the stackroots... + self.stackroots.remove(old_ptr) + # ... and do a major collection (otherwise the old object wouldn't be + # gone). + self.gc.collect() + # check that both objects are gone + assert not self.gc.old_objects_pointing_to_pinned.non_empty() + with py.test.raises(RuntimeError) as exinfo_old: + old_ptr.someInt = 800 + assert "freed" in str(exinfo_old.value) + # + with py.test.raises(RuntimeError) as exinfo_pinned: + pinned_ptr.someInt = 200 + assert "freed" in str(exinfo_pinned.value) - def test_simple_pin_unpin_stack_full_collect(self): - self.simple_pin_unpin_stack(self.gc.collect) + def test_pin_referenced_from_old_remove_old_minor_collection(self): + self.pin_referenced_from_old_remove_old(self.gc.minor_collection) - def test_simple_pin_unpin_stack_minor_collect(self): - self.simple_pin_unpin_stack(self.gc.minor_collection) + def test_pin_referenced_from_old_remove_old_major_collection(self): + self.pin_referenced_from_old_remove_old(self.gc.collect) - def test_pinned_obj_collected_after_old_object_collected(self): + + def pin_referenced_from_young_in_stackroots(self, collect_func): + # scenario: a young object is referenced from the stackroots. This + # young object points to a young pinned object. We check if everything + # behaves as expected after a collection: the young object is moved out + # of the nursery while the pinned one stays where it is. + # root_ptr = self.malloc(S) - root_ptr.someInt = 999 + root_ptr.someInt = 900 self.stackroots.append(root_ptr) - self.gc.collect() - - root_ptr = self.stackroots[0] - next_ptr = self.malloc(S) - next_ptr.someInt = 111 - assert self.gc.pin(llmemory.cast_ptr_to_adr(next_ptr)) - self.write(root_ptr, 'next', next_ptr) - self.gc.collect() - # check still alive - assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(root_ptr.next)) - self.stackroots.remove(root_ptr) - self.gc.collect() - # root_ptr was collected and therefore also the pinned object should - # be gone - try: - next_ptr.someInt = 101 - assert False - except RuntimeError as ex: - assert "freed" in str(ex) - - # XXX more tests like the one above. Make list of all possible cases and - # write tests for each one. Also: minor/full major collection tests maybe - # needed - - def test_pin_referenced_from_stackroot_young(self): + assert self.stackroots[0] == old_ptr # validate assumption # - # create two objects and reference the pinned one - # from the one that will be moved out of the - # nursery. - root_ptr = self.malloc(S) - next_ptr = self.malloc(S) - self.write(root_ptr, 'next', next_ptr) - self.stackroots.append(root_ptr) + pinned_ptr = self.malloc(S) + pinned_ptr.someInt = 100 + self.write(root_ptr, 'next', pinned_ptr) + pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr) + assert self.gc.pin(pinned_adr) + # check both are in nursery + assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(root_ptr)) + assert self.gc.is_in_nursery(pinned_adr) # - next_ptr.someInt = 100 - root_ptr.someInt = 999 + # no old object yet pointing to a pinned one + assert not self.gc.old_objects_pointing_to_pinned.non_empty() # - next_adr = llmemory.cast_ptr_to_adr(next_ptr) - assert self.gc.pin(next_adr) + # now we do a collection and check if the result is as expected + collect_func() # - # in this step the 'root_ptr' object will be - # outside the nursery, pointing to the still - # young (because it's pinned) 'next_ptr'. - self.gc.collect() - # + # check if objects are where we expect them root_ptr = self.stackroots[0] assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(root_ptr)) - assert self.gc.is_in_nursery(next_adr) - assert next_ptr.someInt == 100 - assert root_ptr.next == next_ptr - # - # now we remove the reference to the pinned object and do a collect - # to check if the pinned object was removed from nursery. - self.write(root_ptr, 'next', lltype.nullptr(S)) - self.gc.collect() - try: - # should fail as this was the pinned object that is now collected - next_ptr.someInt = 0 - assert False - except RuntimeError as ex: - assert "freed" in str(ex) + assert self.gc.is_in_nursery(pinned_adr) + # and as 'root_ptr' object is now old, it should be tracked specially + should_be_root_ptr = self.gc.old_objects_pointing_to_pinned.pop() + assert should_be_root_ptr == root_ptr + self.gc.old_objects_pointing_to_pinned.push(should_be_root_ptr) + # check that old object still points to the pinned one as expected + assert root_ptr.next == pinned_ptr - def test_old_points_to_pinned(self): - # Test if we handle the case that an already old object can point - # to a pinned object and keeps the pinned object alive by - # that. - # - # create the old object that will point to a pinned object - old_ptr = self.malloc(S) - old_ptr.someInt = 999 - self.stackroots.append(old_ptr) - self.gc.collect() - assert not self.gc.is_in_nursery( - llmemory.cast_ptr_to_adr(self.stackroots[0])) - # - # create the young pinned object and attach it to the old object - pinned_ptr = self.malloc(S) - pinned_ptr.someInt = 6 - assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr)) - self.write(self.stackroots[0], 'next', pinned_ptr) - # - # let's check if everything stays in place before/after a collection - assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr)) - self.gc.collect() - assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr)) - # - self.stackroots[0].next.someInt = 100 - self.gc.collect() - assert self.stackroots[0].next.someInt == 100 + def test_pin_referenced_from_young_in_stackroots_minor_collection(self): + self.pin_referenced_from_young_in_stackroots(self.gc.minor_collection) - def not_pinned_and_stackroots_point_to_pinned(self, make_old): - # In this test case we point to a pinned object from an (old) object - # *and* from the stackroots - obj_ptr = self.malloc(S) - obj_ptr.someInt = 999 - self.stackroots.append(obj_ptr) - if make_old: - self.gc.collect() - obj_ptr = self.stackroots[0] - assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(obj_ptr)) - else: - assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(obj_ptr)) + def test_pin_referenced_from_young_in_stackroots_major_collection(self): + self.pin_referenced_from_young_in_stackroots(self.gc.collect) - pinned_ptr = self.malloc(S) - pinned_ptr.someInt = 111 - assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr)) - self.stackroots.append(pinned_ptr) - self.write(obj_ptr, 'next', pinned_ptr) - - self.gc.collect() - # done with preparation. do some basic checks - assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr)) - assert pinned_ptr.someInt == 111 - assert self.stackroots[0].next == pinned_ptr - - def test_old_and_stackroots_point_to_pinned(self): - self.not_pinned_and_stackroots_point_to_pinned(make_old=True) - - def test_young_and_stackroots_point_to_pinned(self): - self.not_pinned_and_stackroots_point_to_pinned(make_old=False) - - def test_old_points_to_old_points_to_pinned_1(self): - # - # Scenario: - # stackroots points to 'root_ptr'. 'root_ptr' points to 'next_ptr'. - # 'next_ptr' points to the young and pinned 'pinned_ptr'. Here we - # remove the reference to 'next_ptr' from 'root_ptr' and check if it - # behaves as expected. - # - root_ptr = self.malloc(S) - root_ptr.someInt = 100 - self.stackroots.append(root_ptr) - self.gc.collect() - root_ptr = self.stackroots[0] - # - next_ptr = self.malloc(S) - next_ptr.someInt = 200 - self.write(root_ptr, 'next', next_ptr) - self.gc.collect() - next_ptr = root_ptr.next - # - # check if everything is as expected - assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(root_ptr)) - assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(next_ptr)) - assert root_ptr.someInt == 100 - assert next_ptr.someInt == 200 - # - pinned_ptr = self.malloc(S) - pinned_ptr.someInt = 300 - assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr)) - self.write(next_ptr, 'next', pinned_ptr) - self.gc.collect() - # - # validate everything is as expected with 3 rounds of GC collecting - for _ in range(3): - self.gc.collect() - assert next_ptr.next == pinned_ptr - assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr)) - assert pinned_ptr.someInt == 300 - assert root_ptr.someInt == 100 - assert next_ptr.someInt == 200 - # - # remove the reference to the pinned object - self.write(next_ptr, 'next', root_ptr) - self.gc.minor_collection() - # the minor collection visits all old objects pointing to pinned ones. - # therefore the pinned object should be gone - try: - pinned_ptr.someInt == 300 - assert False - except RuntimeError as ex: - assert "freed" in str(ex) - - def test_old_points_to_old_points_to_pinned_2(self): - # - # Scenario: - # stackroots points to 'root_ptr'. 'root_ptr' points to 'next_ptr'. - # 'next_ptr' points to the young and pinned 'pinned_ptr'. Here we - # remove 'root_ptr' from the stackroots and check if it behaves as - # expected. - # - root_ptr = self.malloc(S) - root_ptr.someInt = 100 - self.stackroots.append(root_ptr) - self.gc.collect() - root_ptr = self.stackroots[0] - # - next_ptr = self.malloc(S) - next_ptr.someInt = 200 - self.write(root_ptr, 'next', next_ptr) - self.gc.collect() - next_ptr = root_ptr.next - # - # check if everything is as expected - assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(root_ptr)) - assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(next_ptr)) - assert root_ptr.someInt == 100 - assert next_ptr.someInt == 200 - # - pinned_ptr = self.malloc(S) - pinned_ptr.someInt = 300 - assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr)) - self.write(next_ptr, 'next', pinned_ptr) - self.gc.collect() - # - # validate everything is as expected with 3 rounds of GC collecting - for _ in range(3): - self.gc.collect() - assert next_ptr.next == pinned_ptr - assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr)) - assert pinned_ptr.someInt == 300 - assert root_ptr.someInt == 100 - assert next_ptr.someInt == 200 - # - # remove the root from stackroots - self.stackroots.remove(root_ptr) - self.gc.minor_collection() - # - # the minor collection will still visit 'next_ptr', although - # 'root_ptr' is not part of the stackroots anymore. This makes - # sense as 'next_ptr' is removed only in the next major collection - assert next_ptr.next.someInt == 300 - # - # now we do a major collection and everything should be gone - self.gc.collect() - try: - pinned_ptr.someInt == 300 - assert False - except RuntimeError as ex: - assert "freed" in str(ex) - - - def test_pin_old(self): - ptr = self.malloc(S) - ptr.someInt = 100 - self.stackroots.append(ptr) - self.gc.collect() - ptr = self.stackroots[0] - adr = llmemory.cast_ptr_to_adr(ptr) - assert ptr.someInt == 100 - assert not self.gc.is_in_nursery(adr) - assert not self.gc.pin(adr) - # ^^^ should not be possible, struct is already old and won't - # move. - - def test_pin_malloc_pin(self): - first_ptr = self.malloc(S) - first_ptr.someInt = 101 - self.stackroots.append(first_ptr) - assert self.gc.pin(llmemory.cast_ptr_to_adr(first_ptr)) - - self.gc.collect() - assert first_ptr.someInt == 101 - - second_ptr = self.malloc(S) - second_ptr.someInt = 102 - self.stackroots.append(second_ptr) - assert self.gc.pin(llmemory.cast_ptr_to_adr(second_ptr)) - - self.gc.collect() - assert first_ptr.someInt == 101 - assert second_ptr.someInt == 102 + # XXX REMOVE THIS COMMENT copied ones: def pin_shadow_1(self, collect_func): ptr = self.malloc(S) @@ -629,35 +640,6 @@ # we did not reset the whole nursery assert self.gc.nursery_top < self.gc.nursery_real_top - def test_collect_dead_pinned_objects(self): - # prepare three object, where two are stackroots - ptr_stackroot_1 = self.malloc(S) - ptr_stackroot_1.someInt = 100 - self.stackroots.append(ptr_stackroot_1) - - ptr_not_stackroot = self.malloc(S) - - ptr_stackroot_2 = self.malloc(S) - ptr_stackroot_2.someInt = 100 - self.stackroots.append(ptr_stackroot_2) - - # pin all three objects - assert self.gc.pin(llmemory.cast_ptr_to_adr(ptr_stackroot_1)) - assert self.gc.pin(llmemory.cast_ptr_to_adr(ptr_not_stackroot)) - assert self.gc.pin(llmemory.cast_ptr_to_adr(ptr_stackroot_2)) - assert self.gc.pinned_objects_in_nursery == 3 - - self.gc.collect() - # now the one not on the stack should be gone. - assert self.gc.pinned_objects_in_nursery == 2 - assert ptr_stackroot_1.someInt == 100 - assert ptr_stackroot_2.someInt == 100 - try: - ptr_not_stackroot.someInt = 100 - assert False - except RuntimeError as ex: - assert "freed" in str(ex) - def fill_nursery_with_pinned_objects(self): typeid = self.get_type_id(S) size = self.gc.fixed_size(typeid) + self.gc.gcheaderbuilder.size_gc_header @@ -714,3 +696,4 @@ # nursery should be full now, at least no space for another `S`. Next malloc should fail. py.test.raises(Exception, self.malloc, S) test_full_pinned_nursery_pin_fail.GC_PARAMS = {'max_number_of_pinned_objects': 50} + From noreply at buildbot.pypy.org Tue Aug 12 14:17:10 2014 From: noreply at buildbot.pypy.org (groggi) Date: Tue, 12 Aug 2014 14:17:10 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: additional object pinning tests. some tests renamed. Message-ID: <20140812121710.B28681C03AC@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72765:119a5775c08a Date: 2014-08-12 14:16 +0200 http://bitbucket.org/pypy/pypy/changeset/119a5775c08a/ Log: additional object pinning tests. some tests renamed. diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -47,6 +47,74 @@ from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass from rpython.memory.gc.incminimark import STATE_SCANNING + def test_pin_old(self): + # scenario: try pinning an old object. This should be not possible and + # we want to make sure everything stays as it is. + old_ptr = self.malloc(S) + old_ptr.someInt = 900 + self.stackroots.append(old_ptr) + assert self.stackroots[0] == old_ptr # test assumption + self.gc.collect() + old_ptr = self.stackroots[0] + # now we try to pin it + old_adr = llmemory.cast_ptr_to_adr(old_ptr) + assert not self.gc.is_in_nursery(old_adr) + assert not self.gc.pin(old_adr) + assert self.gc.pinned_objects_in_nursery == 0 + + + def pin_pin_pinned_object_count(self, collect_func): + # scenario: pin two objects that are referenced from stackroots. Check + # if the pinned objects count is correct, even after an other collection + pinned1_ptr = self.malloc(S) + pinned1_ptr.someInt = 100 + self.stackroots.append(pinned1_ptr) + # + pinned2_ptr = self.malloc(S) + pinned2_ptr.someInt = 200 + self.stackroots.append(pinned2_ptr) + # + assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned1_ptr)) + assert self.gc.pinned_objects_in_nursery == 1 + assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned2_ptr)) + assert self.gc.pinned_objects_in_nursery == 2 + # + collect_func() + # + assert self.gc.pinned_objects_in_nursery == 2 + + def test_pin_pin_pinned_object_count_minor_collection(self): + self.pin_pin_pinned_object_count(self.gc.minor_collection) + + def test_pin_pin_pinned_object_count_major_collection(self): + self.pin_pin_pinned_object_count(self.gc.collect) + + + def pin_unpin_pinned_object_count(self, collect_func): + # scenario: pin an object and check the pinned object count. Unpin it + # and check the count again. + pinned_ptr = self.malloc(S) + pinned_ptr.someInt = 100 + self.stackroots.append(pinned_ptr) + pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr) + # + assert self.gc.pinned_objects_in_nursery == 0 + assert self.gc.pin(pinned_adr) + assert self.gc.pinned_objects_in_nursery == 1 + collect_func() + assert self.gc.pinned_objects_in_nursery == 1 + self.gc.unpin(pinned_adr) + assert self.gc.pinned_objects_in_nursery == 0 + collect_func() + assert self.gc.pinned_objects_in_nursery == 0 + + def test_pin_unpin_pinned_object_count_minor_collection(self): + self.pin_unpin_pinned_object_count(self.gc.minor_collection) + + def test_pin_unpin_pinned_object_count_major_collection(self): + self.pin_unpin_pinned_object_count(self.gc.collect) + + def pinned_obj_in_stackroot(self, collect_func): # scenario: a pinned object that is part of the stack roots. Check if # it is not moved @@ -342,7 +410,7 @@ root_ptr = self.malloc(S) root_ptr.someInt = 900 self.stackroots.append(root_ptr) - assert self.stackroots[0] == old_ptr # validate assumption + assert self.stackroots[0] == root_ptr # validate assumption # pinned_ptr = self.malloc(S) pinned_ptr.someInt = 100 @@ -364,9 +432,9 @@ assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(root_ptr)) assert self.gc.is_in_nursery(pinned_adr) # and as 'root_ptr' object is now old, it should be tracked specially - should_be_root_ptr = self.gc.old_objects_pointing_to_pinned.pop() - assert should_be_root_ptr == root_ptr - self.gc.old_objects_pointing_to_pinned.push(should_be_root_ptr) + should_be_root_adr = self.gc.old_objects_pointing_to_pinned.pop() + assert should_be_root_adr == llmemory.cast_ptr_to_adr(root_ptr) + self.gc.old_objects_pointing_to_pinned.append(should_be_root_adr) # check that old object still points to the pinned one as expected assert root_ptr.next == pinned_ptr @@ -377,8 +445,6 @@ self.pin_referenced_from_young_in_stackroots(self.gc.collect) - # XXX REMOVE THIS COMMENT copied ones: - def pin_shadow_1(self, collect_func): ptr = self.malloc(S) adr = llmemory.cast_ptr_to_adr(ptr) @@ -394,12 +460,13 @@ adr = llmemory.cast_ptr_to_adr(self.stackroots[0]) assert not self.gc.is_in_nursery(adr) - def test_pin_shadow_1_minor(self): + def test_pin_shadow_1_minor_collection(self): self.pin_shadow_1(self.gc.minor_collection) - def test_pin_shadow_1_full(self): + def test_pin_shadow_1_major_collection(self): self.pin_shadow_1(self.gc.collect) + def pin_shadow_2(self, collect_func): ptr = self.malloc(S) adr = llmemory.cast_ptr_to_adr(ptr) @@ -415,12 +482,13 @@ adr = llmemory.cast_ptr_to_adr(self.stackroots[0]) assert not self.gc.is_in_nursery(adr) - def test_pin_shadow_2_minor(self): + def test_pin_shadow_2_minor_collection(self): self.pin_shadow_2(self.gc.minor_collection) - def test_pin_shadow_2_full(self): + def test_pin_shadow_2_major_collection(self): self.pin_shadow_2(self.gc.collect) + def test_pin_nursery_top_scenario1(self): ptr1 = self.malloc(S) adr1 = llmemory.cast_ptr_to_adr(ptr1) @@ -458,6 +526,7 @@ assert self.gc.nursery_free < self.gc.nursery_top assert self.gc.nursery_top == self.gc.nursery_real_top + def test_pin_nursery_top_scenario2(self): ptr1 = self.malloc(S) adr1 = llmemory.cast_ptr_to_adr(ptr1) @@ -497,6 +566,7 @@ assert self.gc.nursery_top < adr3 assert adr3 < self.gc.nursery_real_top + def test_pin_nursery_top_scenario3(self): ptr1 = self.malloc(S) adr1 = llmemory.cast_ptr_to_adr(ptr1) @@ -538,6 +608,7 @@ assert self.gc.nursery_top < adr2 assert adr3 < self.gc.nursery_real_top + def test_pin_nursery_top_scenario4(self): ptr1 = self.malloc(S) adr1 = llmemory.cast_ptr_to_adr(ptr1) @@ -580,6 +651,7 @@ assert self.gc.nursery_top < adr3 assert adr3 < self.gc.nursery_real_top + def test_pin_nursery_top_scenario5(self): ptr1 = self.malloc(S) adr1 = llmemory.cast_ptr_to_adr(ptr1) @@ -640,6 +712,7 @@ # we did not reset the whole nursery assert self.gc.nursery_top < self.gc.nursery_real_top + def fill_nursery_with_pinned_objects(self): typeid = self.get_type_id(S) size = self.gc.fixed_size(typeid) + self.gc.gcheaderbuilder.size_gc_header @@ -654,7 +727,8 @@ def test_full_pinned_nursery_pin_fail(self): self.fill_nursery_with_pinned_objects() - # nursery should be full now, at least no space for another `S`. Next malloc should fail. + # nursery should be full now, at least no space for another `S`. + # Next malloc should fail. py.test.raises(Exception, self.malloc, S) def test_full_pinned_nursery_arena_reset(self): @@ -693,7 +767,9 @@ self.stackroots.append(ptr) self.gc.pin(adr) # - # nursery should be full now, at least no space for another `S`. Next malloc should fail. + # nursery should be full now, at least no space for another `S`. + # Next malloc should fail. py.test.raises(Exception, self.malloc, S) - test_full_pinned_nursery_pin_fail.GC_PARAMS = {'max_number_of_pinned_objects': 50} + test_full_pinned_nursery_pin_fail.GC_PARAMS = \ + {'max_number_of_pinned_objects': 50} From noreply at buildbot.pypy.org Tue Aug 12 14:54:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Aug 2014 14:54:53 +0200 (CEST) Subject: [pypy-commit] stmgc default: Add a failing test for the preservation of the shadowstack Message-ID: <20140812125453.AE45C1C0157@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1296:93f047b33b57 Date: 2014-08-12 14:54 +0200 http://bitbucket.org/pypy/stmgc/changeset/93f047b33b57/ Log: Add a failing test for the preservation of the shadowstack diff --git a/c7/test/test_rewind.c b/c7/test/test_rewind.c --- a/c7/test/test_rewind.c +++ b/c7/test/test_rewind.c @@ -220,6 +220,49 @@ /************************************************************/ +typedef struct { char foo; } object_t; +struct stm_shadowentry_s { object_t *ss; }; +typedef struct { + struct stm_shadowentry_s *shadowstack; + struct stm_shadowentry_s _inline[99]; +} stm_thread_local_t; +#define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p)) +#define STM_POP_ROOT(tl, p) ((p) = (typeof(p))((--(tl).shadowstack)->ss)) +void stm_register_thread_local(stm_thread_local_t *tl) { + tl->shadowstack = tl->_inline; +} +void stm_unregister_thread_local(stm_thread_local_t *tl) { } +static stm_thread_local_t tl; + + +void testTL1(void) +{ + object_t *a1, *a2; + stm_register_thread_local(&tl); + + rewind_jmp_buf buf; + rewind_jmp_enterframe(>hread, &buf); + + a1 = a2 = (object_t *)123456; + STM_PUSH_ROOT(tl, a1); + + if (rewind_jmp_setjmp(>hread) == 0) { + /* first path */ + STM_POP_ROOT(tl, a2); + assert(a1 == a2); + STM_PUSH_ROOT(tl, NULL); + rewind_jmp_longjmp(>hread); + } + /* second path */ + STM_POP_ROOT(tl, a2); + assert(a1 == a2); + + rewind_jmp_leaveframe(>hread, &buf); + stm_unregister_thread_local(&tl); +} + +/************************************************************/ + int rj_malloc_count = 0; void *rj_malloc(size_t size) @@ -248,6 +291,7 @@ else if (!strcmp(argv[1], "4")) test4(); else if (!strcmp(argv[1], "5")) test5(); else if (!strcmp(argv[1], "6")) test6(); + else if (!strcmp(argv[1], "TL1")) testTL1(); else assert(!"bad argv[1]"); assert(rj_malloc_count == 0); diff --git a/c7/test/test_rewind.py b/c7/test/test_rewind.py --- a/c7/test/test_rewind.py +++ b/c7/test/test_rewind.py @@ -1,17 +1,17 @@ import os def run_test(opt): - err = os.system("clang -g -O%d -Werror -DRJBUF_CUSTOM_MALLOC -I../stm" - " -o test_rewind_O%d test_rewind.c ../stm/rewind_setjmp.c" + err = os.system("clang -g -O%s -Werror -DRJBUF_CUSTOM_MALLOC -I../stm" + " -o test_rewind_O%s test_rewind.c ../stm/rewind_setjmp.c" % (opt, opt)) if err != 0: raise OSError("clang failed on test_rewind.c") - for testnum in [1, 2, 3, 4, 5, 6]: - print '=== O%d: RUNNING TEST %d ===' % (opt, testnum) - err = os.system("./test_rewind_O%d %d" % (opt, testnum)) + for testnum in [1, 2, 3, 4, 5, 6, "TL1"]: + print '=== O%s: RUNNING TEST %s ===' % (opt, testnum) + err = os.system("./test_rewind_O%s %s" % (opt, testnum)) if err != 0: - raise OSError("'test_rewind_O%d %d' failed" % (opt, testnum)) - os.unlink("./test_rewind_O%d" % (opt,)) + raise OSError("'test_rewind_O%s %s' failed" % (opt, testnum)) + os.unlink("./test_rewind_O%s" % (opt,)) def test_O0(): run_test(0) def test_O1(): run_test(1) From noreply at buildbot.pypy.org Tue Aug 12 15:58:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Aug 2014 15:58:27 +0200 (CEST) Subject: [pypy-commit] stmgc default: Save and restore slices of the shadowstack in addition to slices of the Message-ID: <20140812135827.EC9A21C326A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1297:438a6f00fadc Date: 2014-08-12 15:58 +0200 http://bitbucket.org/pypy/stmgc/changeset/438a6f00fadc/ Log: Save and restore slices of the shadowstack in addition to slices of the C stack. diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c --- a/c7/stm/rewind_setjmp.c +++ b/c7/stm/rewind_setjmp.c @@ -7,7 +7,8 @@ struct _rewind_jmp_moved_s { struct _rewind_jmp_moved_s *next; - size_t size; + size_t stack_size; + size_t shadowstack_size; }; #define RJM_HEADER sizeof(struct _rewind_jmp_moved_s) @@ -20,28 +21,41 @@ #endif -static void copy_stack(rewind_jmp_thread *rjthread, char *base) +static void copy_stack(rewind_jmp_thread *rjthread, char *base, void *ssbase) { + /* Copy away part of the stack and shadowstack. + The stack is copied between 'base' (lower limit, i.e. newest bytes) + and 'rjthread->head->frame_base' (upper limit, i.e. oldest bytes). + The shadowstack is copied between 'ssbase' (upper limit, newest) + and 'rjthread->head->shadowstack_base' (lower limit, oldest). + */ assert(rjthread->head != NULL); char *stop = rjthread->head->frame_base; - assert(stop > base); + assert(stop >= base); + void *ssstop = rjthread->head->shadowstack_base; + assert(ssstop <= ssbase); struct _rewind_jmp_moved_s *next = (struct _rewind_jmp_moved_s *) - rj_malloc(RJM_HEADER + (stop - base)); + rj_malloc(RJM_HEADER + (stop - base) + (ssbase - ssstop)); assert(next != NULL); /* XXX out of memory */ next->next = rjthread->moved_off; - next->size = stop - base; + next->stack_size = stop - base; + next->shadowstack_size = ssbase - ssstop; memcpy(((char *)next) + RJM_HEADER, base, stop - base); + memcpy(((char *)next) + RJM_HEADER + (stop - base), ssstop, + ssbase - ssstop); rjthread->moved_off_base = stop; + rjthread->moved_off_ssbase = ssstop; rjthread->moved_off = next; } __attribute__((noinline)) -long rewind_jmp_setjmp(rewind_jmp_thread *rjthread) +long rewind_jmp_setjmp(rewind_jmp_thread *rjthread, void *ss) { if (rjthread->moved_off) { _rewind_jmp_free_stack_slices(rjthread); } + void *volatile ss1 = ss; rewind_jmp_thread *volatile rjthread1 = rjthread; int result; if (__builtin_setjmp(rjthread->jmpbuf) == 0) { @@ -55,7 +69,7 @@ result = rjthread->repeat_count + 1; } rjthread->repeat_count = result; - copy_stack(rjthread, (char *)&rjthread1); + copy_stack(rjthread, (char *)&rjthread1, ss1); return result; } @@ -67,13 +81,20 @@ while (rjthread->moved_off) { struct _rewind_jmp_moved_s *p = rjthread->moved_off; char *target = rjthread->moved_off_base; - target -= p->size; + target -= p->stack_size; if (target < stack_free) { /* need more stack space! */ do_longjmp(rjthread, alloca(stack_free - target)); } - memcpy(target, ((char *)p) + RJM_HEADER, p->size); + memcpy(target, ((char *)p) + RJM_HEADER, p->stack_size); + + char *sstarget = rjthread->moved_off_ssbase; + char *ssend = sstarget + p->shadowstack_size; + memcpy(sstarget, ((char *)p) + RJM_HEADER + p->stack_size, + p->shadowstack_size); + rjthread->moved_off_base = target; + rjthread->moved_off_ssbase = ssend; rjthread->moved_off = p->next; rj_free(p); } @@ -95,7 +116,7 @@ return; } assert(rjthread->moved_off_base < (char *)rjthread->head); - copy_stack(rjthread, rjthread->moved_off_base); + copy_stack(rjthread, rjthread->moved_off_base, rjthread->moved_off_ssbase); } void _rewind_jmp_free_stack_slices(rewind_jmp_thread *rjthread) diff --git a/c7/stm/rewind_setjmp.h b/c7/stm/rewind_setjmp.h --- a/c7/stm/rewind_setjmp.h +++ b/c7/stm/rewind_setjmp.h @@ -41,6 +41,7 @@ typedef struct _rewind_jmp_buf { char *frame_base; + char *shadowstack_base; struct _rewind_jmp_buf *prev; } rewind_jmp_buf; @@ -48,30 +49,36 @@ rewind_jmp_buf *head; rewind_jmp_buf *initial_head; char *moved_off_base; + char *moved_off_ssbase; struct _rewind_jmp_moved_s *moved_off; void *jmpbuf[5]; long repeat_count; } rewind_jmp_thread; -#define rewind_jmp_enterframe(rjthread, rjbuf) do { \ - (rjbuf)->frame_base = __builtin_frame_address(0); \ - (rjbuf)->prev = (rjthread)->head; \ - (rjthread)->head = (rjbuf); \ +#define rewind_jmp_enterframe(rjthread, rjbuf, ss) do { \ + (rjbuf)->frame_base = __builtin_frame_address(0); \ + (rjbuf)->shadowstack_base = (char *)(ss); \ + (rjbuf)->prev = (rjthread)->head; \ + (rjthread)->head = (rjbuf); \ } while (0) -#define rewind_jmp_leaveframe(rjthread, rjbuf) do { \ - (rjthread)->head = (rjbuf)->prev; \ - if ((rjbuf)->frame_base == (rjthread)->moved_off_base) \ - _rewind_jmp_copy_stack_slice(rjthread); \ +#define rewind_jmp_leaveframe(rjthread, rjbuf, ss) do { \ + assert((rjbuf)->shadowstack_base == (char *)(ss)); \ + (rjthread)->head = (rjbuf)->prev; \ + if ((rjbuf)->frame_base == (rjthread)->moved_off_base) { \ + assert((rjthread)->moved_off_ssbase == (char *)(ss));\ + _rewind_jmp_copy_stack_slice(rjthread); \ + } \ } while (0) -long rewind_jmp_setjmp(rewind_jmp_thread *rjthread); +long rewind_jmp_setjmp(rewind_jmp_thread *rjthread, void *ss); void rewind_jmp_longjmp(rewind_jmp_thread *rjthread) __attribute__((noreturn)); #define rewind_jmp_forget(rjthread) do { \ if ((rjthread)->moved_off) _rewind_jmp_free_stack_slices(rjthread); \ (rjthread)->moved_off_base = 0; \ + (rjthread)->moved_off_ssbase = 0; \ } while (0) void _rewind_jmp_copy_stack_slice(rewind_jmp_thread *); diff --git a/c7/test/test_rewind.c b/c7/test/test_rewind.c --- a/c7/test/test_rewind.c +++ b/c7/test/test_rewind.c @@ -43,10 +43,10 @@ void test1(void) { rewind_jmp_buf buf; - rewind_jmp_enterframe(>hread, &buf); + rewind_jmp_enterframe(>hread, &buf, NULL); test1_x = 0; - rewind_jmp_setjmp(>hread); + rewind_jmp_setjmp(>hread, NULL); test1_x++; f1(test1_x); @@ -59,7 +59,7 @@ rewind_jmp_forget(>hread); assert(!rewind_jmp_armed(>hread)); - rewind_jmp_leaveframe(>hread, &buf); + rewind_jmp_leaveframe(>hread, &buf, NULL); } /************************************************************/ @@ -70,22 +70,22 @@ int f2(void) { rewind_jmp_buf buf; - rewind_jmp_enterframe(>hread, &buf); + rewind_jmp_enterframe(>hread, &buf, NULL); test2_x = 0; - rewind_jmp_setjmp(>hread); - rewind_jmp_leaveframe(>hread, &buf); + rewind_jmp_setjmp(>hread, NULL); + rewind_jmp_leaveframe(>hread, &buf, NULL); return ++test2_x; } void test2(void) { rewind_jmp_buf buf; - rewind_jmp_enterframe(>hread, &buf); + rewind_jmp_enterframe(>hread, &buf, NULL); int x = f2(); gevent(x); if (x < 10) rewind_jmp_longjmp(>hread); - rewind_jmp_leaveframe(>hread, &buf); + rewind_jmp_leaveframe(>hread, &buf, NULL); int expected[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; CHECK(expected); } @@ -104,12 +104,12 @@ void test3(void) { rewind_jmp_buf buf; - rewind_jmp_enterframe(>hread, &buf); + rewind_jmp_enterframe(>hread, &buf, NULL); int x = f3(50); gevent(x); if (x < 10) rewind_jmp_longjmp(>hread); - rewind_jmp_leaveframe(>hread, &buf); + rewind_jmp_leaveframe(>hread, &buf, NULL); int expected[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; CHECK(expected); } @@ -120,25 +120,25 @@ int f4(int rec) { rewind_jmp_buf buf; - rewind_jmp_enterframe(>hread, &buf); + rewind_jmp_enterframe(>hread, &buf, NULL); int res; if (rec > 0) res = f4(rec - 1); else res = f2(); - rewind_jmp_leaveframe(>hread, &buf); + rewind_jmp_leaveframe(>hread, &buf, NULL); return res; } void test4(void) { rewind_jmp_buf buf; - rewind_jmp_enterframe(>hread, &buf); + rewind_jmp_enterframe(>hread, &buf, NULL); int x = f4(5); gevent(x); if (x < 10) rewind_jmp_longjmp(>hread); - rewind_jmp_leaveframe(>hread, &buf); + rewind_jmp_leaveframe(>hread, &buf, NULL); int expected[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; CHECK(expected); } @@ -148,11 +148,11 @@ void test5(void) { struct { int a; rewind_jmp_buf buf; int b; } sbuf; - rewind_jmp_enterframe(>hread, &sbuf.buf); + rewind_jmp_enterframe(>hread, &sbuf.buf, NULL); sbuf.a = 42; sbuf.b = -42; test2_x = 0; - rewind_jmp_setjmp(>hread); + rewind_jmp_setjmp(>hread, NULL); sbuf.a++; sbuf.b--; gevent(sbuf.a); @@ -163,7 +163,7 @@ } int expected[] = {43, -43, 43, -43}; CHECK(expected); - rewind_jmp_leaveframe(>hread, &sbuf.buf); + rewind_jmp_leaveframe(>hread, &sbuf.buf, NULL); } /************************************************************/ @@ -178,9 +178,9 @@ int a8, int a9, int a10, int a11, int a12, int a13) { rewind_jmp_buf buf; - rewind_jmp_enterframe(>hread, &buf); + rewind_jmp_enterframe(>hread, &buf, NULL); - rewind_jmp_setjmp(>hread); + rewind_jmp_setjmp(>hread, NULL); gevent(a1); gevent(a2); gevent(a3); gevent(a4); gevent(a5); gevent(a6); gevent(a7); gevent(a8); gevent(a9); gevent(a10); gevent(a11); gevent(a12); @@ -201,16 +201,16 @@ foo(&a13); rewind_jmp_longjmp(>hread); } - rewind_jmp_leaveframe(>hread, &buf); + rewind_jmp_leaveframe(>hread, &buf, NULL); } void test6(void) { rewind_jmp_buf buf; - rewind_jmp_enterframe(>hread, &buf); + rewind_jmp_enterframe(>hread, &buf, NULL); test6_x = 0; f6(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13); - rewind_jmp_leaveframe(>hread, &buf); + rewind_jmp_leaveframe(>hread, &buf, NULL); int expected[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, @@ -220,45 +220,64 @@ /************************************************************/ -typedef struct { char foo; } object_t; -struct stm_shadowentry_s { object_t *ss; }; -typedef struct { - struct stm_shadowentry_s *shadowstack; - struct stm_shadowentry_s _inline[99]; -} stm_thread_local_t; -#define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p)) -#define STM_POP_ROOT(tl, p) ((p) = (typeof(p))((--(tl).shadowstack)->ss)) -void stm_register_thread_local(stm_thread_local_t *tl) { - tl->shadowstack = tl->_inline; -} -void stm_unregister_thread_local(stm_thread_local_t *tl) { } -static stm_thread_local_t tl; - +static void *ssarray[99]; void testTL1(void) { - object_t *a1, *a2; - stm_register_thread_local(&tl); + void *a4, *a5; + rewind_jmp_buf buf; + rewind_jmp_enterframe(>hread, &buf, ssarray+5); - rewind_jmp_buf buf; - rewind_jmp_enterframe(>hread, &buf); + a4 = (void *)444444; + a5 = (void *)555555; + ssarray[4] = a4; + ssarray[5] = a5; - a1 = a2 = (object_t *)123456; - STM_PUSH_ROOT(tl, a1); - - if (rewind_jmp_setjmp(>hread) == 0) { + if (rewind_jmp_setjmp(>hread, ssarray+6) == 0) { /* first path */ - STM_POP_ROOT(tl, a2); - assert(a1 == a2); - STM_PUSH_ROOT(tl, NULL); + assert(ssarray[4] == a4); + assert(ssarray[5] == a5); + ssarray[4] = NULL; + ssarray[5] = NULL; rewind_jmp_longjmp(>hread); } /* second path */ - STM_POP_ROOT(tl, a2); - assert(a1 == a2); + assert(ssarray[4] == NULL); /* was not saved */ + assert(ssarray[5] == a5); /* saved and restored */ - rewind_jmp_leaveframe(>hread, &buf); - stm_unregister_thread_local(&tl); + rewind_jmp_leaveframe(>hread, &buf, ssarray+5); +} + +__attribute__((noinline)) +int gtl2(void) +{ + rewind_jmp_buf buf; + rewind_jmp_enterframe(>hread, &buf, ssarray+5); + ssarray[5] = (void *)555555; + + int result = rewind_jmp_setjmp(>hread, ssarray+6); + + assert(ssarray[4] == (void *)444444); + assert(ssarray[5] == (void *)555555); + ssarray[5] = NULL; + + rewind_jmp_leaveframe(>hread, &buf, ssarray+5); + return result; +} + +void testTL2(void) +{ + rewind_jmp_buf buf; + rewind_jmp_enterframe(>hread, &buf, ssarray+4); + + ssarray[4] = (void *)444444; + int result = gtl2(); + ssarray[4] = NULL; + + if (result == 0) + rewind_jmp_longjmp(>hread); + + rewind_jmp_leaveframe(>hread, &buf, ssarray+4); } /************************************************************/ @@ -292,6 +311,7 @@ else if (!strcmp(argv[1], "5")) test5(); else if (!strcmp(argv[1], "6")) test6(); else if (!strcmp(argv[1], "TL1")) testTL1(); + else if (!strcmp(argv[1], "TL2")) testTL2(); else assert(!"bad argv[1]"); assert(rj_malloc_count == 0); diff --git a/c7/test/test_rewind.py b/c7/test/test_rewind.py --- a/c7/test/test_rewind.py +++ b/c7/test/test_rewind.py @@ -6,7 +6,7 @@ % (opt, opt)) if err != 0: raise OSError("clang failed on test_rewind.c") - for testnum in [1, 2, 3, 4, 5, 6, "TL1"]: + for testnum in [1, 2, 3, 4, 5, 6, "TL1", "TL2"]: print '=== O%s: RUNNING TEST %s ===' % (opt, testnum) err = os.system("./test_rewind_O%s %s" % (opt, testnum)) if err != 0: From noreply at buildbot.pypy.org Tue Aug 12 16:00:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Aug 2014 16:00:44 +0200 (CEST) Subject: [pypy-commit] stmgc default: Add an assert Message-ID: <20140812140044.542201C326A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1298:c84f87cc1dad Date: 2014-08-12 16:00 +0200 http://bitbucket.org/pypy/stmgc/changeset/c84f87cc1dad/ Log: Add an assert diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c --- a/c7/stm/rewind_setjmp.c +++ b/c7/stm/rewind_setjmp.c @@ -67,6 +67,8 @@ rjthread = rjthread1; rjthread->head = rjthread->initial_head; result = rjthread->repeat_count + 1; + /* check that the shadowstack was correctly restored */ + assert(rjthread->moved_off_ssbase == ss1); } rjthread->repeat_count = result; copy_stack(rjthread, (char *)&rjthread1, ss1); From noreply at buildbot.pypy.org Tue Aug 12 16:05:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Aug 2014 16:05:16 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fix Message-ID: <20140812140516.C533A1C12CC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1299:99aae29fc053 Date: 2014-08-12 16:05 +0200 http://bitbucket.org/pypy/stmgc/changeset/99aae29fc053/ Log: Fix diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c --- a/c7/stm/rewind_setjmp.c +++ b/c7/stm/rewind_setjmp.c @@ -55,23 +55,26 @@ if (rjthread->moved_off) { _rewind_jmp_free_stack_slices(rjthread); } - void *volatile ss1 = ss; - rewind_jmp_thread *volatile rjthread1 = rjthread; + /* all locals of this function that need to be saved and restored + across the setjmp() should be stored inside this structure */ + struct { void *ss1; rewind_jmp_thread *rjthread1; } volatile saved = + { ss, rjthread }; + int result; if (__builtin_setjmp(rjthread->jmpbuf) == 0) { - rjthread = rjthread1; + rjthread = saved.rjthread1; rjthread->initial_head = rjthread->head; result = 0; } else { - rjthread = rjthread1; + rjthread = saved.rjthread1; rjthread->head = rjthread->initial_head; result = rjthread->repeat_count + 1; /* check that the shadowstack was correctly restored */ - assert(rjthread->moved_off_ssbase == ss1); + assert(rjthread->moved_off_ssbase == saved.ss1); } rjthread->repeat_count = result; - copy_stack(rjthread, (char *)&rjthread1, ss1); + copy_stack(rjthread, (char *)&saved, saved.ss1); return result; } From noreply at buildbot.pypy.org Tue Aug 12 16:43:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Aug 2014 16:43:01 +0200 (CEST) Subject: [pypy-commit] stmgc default: in-progress Message-ID: <20140812144301.0D1181C0547@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1300:2866cee6ce00 Date: 2014-08-12 16:29 +0200 http://bitbucket.org/pypy/stmgc/changeset/2866cee6ce00/ Log: in-progress diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -393,7 +393,7 @@ #ifdef STM_NO_AUTOMATIC_SETJMP long repeat_count = 0; /* test/support.py */ #else - long repeat_count = rewind_jmp_setjmp(&tl->rjthread); + long repeat_count = stm_rewind_jmp_setjmp(tl); #endif _stm_start_transaction(tl, false); return repeat_count; @@ -828,7 +828,7 @@ dprintf(("commit_transaction\n")); assert(STM_SEGMENT->nursery_end == NURSERY_END); - rewind_jmp_forget(&STM_SEGMENT->running_thread->rjthread); + stm_rewind_jmp_forget(STM_SEGMENT->running_thread); /* if a major collection is required, do it here */ if (is_major_collection_requested()) { @@ -983,12 +983,12 @@ reset_modified_from_other_segments(segment_num); _verify_cards_cleared_in_all_lists(pseg); - /* reset the tl->shadowstack and thread_local_obj to their original - value before the transaction start */ + /* reset tl->shadowstack and thread_local_obj to their original + value before the transaction start. Also restore the content + of the shadowstack here. */ stm_thread_local_t *tl = pseg->pub.running_thread; - assert(tl->shadowstack >= pseg->shadowstack_at_start_of_transaction); - pseg->shadowstack_at_abort = tl->shadowstack; - tl->shadowstack = pseg->shadowstack_at_start_of_transaction; + stm_rewind_jmp_restore_shadowstack(tl); + assert(tl->shadowstack == pseg->shadowstack_at_start_of_transaction); tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction; tl->last_abort__bytes_in_nursery = bytes_in_nursery; @@ -1063,7 +1063,7 @@ #ifdef STM_NO_AUTOMATIC_SETJMP _test_run_abort(tl); #else - rewind_jmp_longjmp(&tl->rjthread); + stm_rewind_jmp_longjmp(tl); #endif } @@ -1078,7 +1078,7 @@ marker_fetch_inev(); wait_for_end_of_inevitable_transaction(NULL); STM_PSEGMENT->transaction_state = TS_INEVITABLE; - rewind_jmp_forget(&STM_SEGMENT->running_thread->rjthread); + stm_rewind_jmp_forget(STM_SEGMENT->running_thread); clear_callbacks_on_abort(); } else { diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -186,7 +186,6 @@ 'thread_local_obj' field. */ struct stm_shadowentry_s *shadowstack_at_start_of_transaction; object_t *threadlocal_at_start_of_transaction; - struct stm_shadowentry_s *shadowstack_at_abort; /* Already signalled to commit soon: */ bool signalled_to_commit_soon; diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -184,7 +184,7 @@ rewind_jmp_buf rjbuf; stm_rewind_jmp_enterframe(tl, &rjbuf); - if (rewind_jmp_setjmp(&tl->rjthread) == 0) { + if (stm_rewind_jmp_setjmp(tl) == 0) { #ifndef NDEBUG pr->running_pthread = pthread_self(); #endif @@ -193,7 +193,7 @@ strcpy(pr->marker_self, "fork"); stm_abort_transaction(); } - rewind_jmp_forget(&tl->rjthread); + stm_rewind_jmp_forget(tl); stm_rewind_jmp_leaveframe(tl, &rjbuf); } diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c --- a/c7/stm/rewind_setjmp.c +++ b/c7/stm/rewind_setjmp.c @@ -93,13 +93,7 @@ } memcpy(target, ((char *)p) + RJM_HEADER, p->stack_size); - char *sstarget = rjthread->moved_off_ssbase; - char *ssend = sstarget + p->shadowstack_size; - memcpy(sstarget, ((char *)p) + RJM_HEADER + p->stack_size, - p->shadowstack_size); - rjthread->moved_off_base = target; - rjthread->moved_off_ssbase = ssend; rjthread->moved_off = p->next; rj_free(p); } @@ -113,6 +107,22 @@ do_longjmp(rjthread, &_rewind_jmp_marker); } +char *rewind_jmp_restore_shadowstack(rewind_jmp_thread *rjthread) +{ + struct _rewind_jmp_moved_s *p = rjthread->moved_off; + char *sstarget = rjthread->moved_off_ssbase; + + while (p) { + char *ssend = sstarget + p->shadowstack_size; + memcpy(sstarget, ((char *)p) + RJM_HEADER + p->stack_size, + p->shadowstack_size); + sstarget = ssend; + p = p->next; + } + rjthread->moved_off_ssbase = sstarget; + return sstarget; +} + __attribute__((noinline)) void _rewind_jmp_copy_stack_slice(rewind_jmp_thread *rjthread) { diff --git a/c7/stm/rewind_setjmp.h b/c7/stm/rewind_setjmp.h --- a/c7/stm/rewind_setjmp.h +++ b/c7/stm/rewind_setjmp.h @@ -74,6 +74,7 @@ long rewind_jmp_setjmp(rewind_jmp_thread *rjthread, void *ss); void rewind_jmp_longjmp(rewind_jmp_thread *rjthread) __attribute__((noreturn)); +char *rewind_jmp_restore_shadowstack(rewind_jmp_thread *rjthread); #define rewind_jmp_forget(rjthread) do { \ if ((rjthread)->moved_off) _rewind_jmp_free_stack_slices(rjthread); \ diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -331,9 +331,18 @@ function with the interpreter's dispatch loop, you need to declare a local variable of type 'rewind_jmp_buf' and call these macros. */ #define stm_rewind_jmp_enterframe(tl, rjbuf) \ - rewind_jmp_enterframe(&(tl)->rjthread, rjbuf) + rewind_jmp_enterframe(&(tl)->rjthread, rjbuf, (tl)->shadowstack) #define stm_rewind_jmp_leaveframe(tl, rjbuf) \ - rewind_jmp_leaveframe(&(tl)->rjthread, rjbuf) + rewind_jmp_leaveframe(&(tl)->rjthread, rjbuf, (tl)->shadowstack) +#define stm_rewind_jmp_setjmp(tl) \ + rewind_jmp_setjmp(&(tl)->rjthread, (tl)->shadowstack) +#define stm_rewind_jmp_longjmp(tl) \ + rewind_jmp_longjmp(&(tl)->rjthread) +#define stm_rewind_jmp_forget(tl) \ + rewind_jmp_forget(&(tl)->rjthread) +#define stm_rewind_jmp_restore_shadowstack(tl) \ + ((tl)->shadowstack = (struct stm_shadowentry_s *) \ + rewind_jmp_restore_shadowstack(&(tl)->rjthread)) /* Starting and ending transactions. stm_read(), stm_write() and stm_allocate() should only be called from within a transaction. diff --git a/c7/test/test_rewind.c b/c7/test/test_rewind.c --- a/c7/test/test_rewind.c +++ b/c7/test/test_rewind.c @@ -239,6 +239,7 @@ assert(ssarray[5] == a5); ssarray[4] = NULL; ssarray[5] = NULL; + rewind_jmp_restore_shadowstack(>hread); rewind_jmp_longjmp(>hread); } /* second path */ @@ -274,8 +275,10 @@ int result = gtl2(); ssarray[4] = NULL; - if (result == 0) + if (result == 0) { + rewind_jmp_restore_shadowstack(>hread); rewind_jmp_longjmp(>hread); + } rewind_jmp_leaveframe(>hread, &buf, ssarray+4); } From noreply at buildbot.pypy.org Tue Aug 12 16:43:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Aug 2014 16:43:02 +0200 (CEST) Subject: [pypy-commit] stmgc default: In tests, we don't save and restore the shadowstack correctly. Message-ID: <20140812144302.3D2381C0547@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1301:e691086e6ef0 Date: 2014-08-12 16:43 +0200 http://bitbucket.org/pypy/stmgc/changeset/e691086e6ef0/ Log: In tests, we don't save and restore the shadowstack correctly. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -987,8 +987,16 @@ value before the transaction start. Also restore the content of the shadowstack here. */ stm_thread_local_t *tl = pseg->pub.running_thread; +#ifdef STM_NO_AUTOMATIC_SETJMP + /* In tests, we don't save and restore the shadowstack correctly. + Be sure to not change items below shadowstack_at_start_of_transaction. + There is no such restrictions in non-Python-based tests. */ + assert(tl->shadowstack >= pseg->shadowstack_at_start_of_transaction); + tl->shadowstack = pseg->shadowstack_at_start_of_transaction; +#else stm_rewind_jmp_restore_shadowstack(tl); assert(tl->shadowstack == pseg->shadowstack_at_start_of_transaction); +#endif tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction; tl->last_abort__bytes_in_nursery = bytes_in_nursery; diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -340,8 +340,8 @@ rewind_jmp_longjmp(&(tl)->rjthread) #define stm_rewind_jmp_forget(tl) \ rewind_jmp_forget(&(tl)->rjthread) -#define stm_rewind_jmp_restore_shadowstack(tl) \ - ((tl)->shadowstack = (struct stm_shadowentry_s *) \ +#define stm_rewind_jmp_restore_shadowstack(tl) \ + ((tl)->shadowstack = (struct stm_shadowentry_s *) \ rewind_jmp_restore_shadowstack(&(tl)->rjthread)) /* Starting and ending transactions. stm_read(), stm_write() and diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -427,6 +427,19 @@ self.abort_transaction() py.test.raises(EmptyStack, self.pop_root) + def test_abort_restores_shadowstack_inv(self): + py.test.skip("the logic to save/restore the shadowstack doesn't " + "work in these tests") + self.push_root(ffi.cast("object_t *", 1234)) + self.start_transaction() + p = self.pop_root() + assert p == ffi.cast("object_t *", 1234) + self.push_root(ffi.cast("object_t *", 5678)) + self.pop_root() + self.abort_transaction() + p = self.pop_root() + assert p == ffi.cast("object_t *", 1234) + def test_check_content_after_commit(self): self.start_transaction() lp1 = stm_allocate(16) From noreply at buildbot.pypy.org Tue Aug 12 16:49:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Aug 2014 16:49:00 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Rephrase Message-ID: <20140812144900.1E85A1C0793@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r524:a3ef8f99cc06 Date: 2014-08-12 16:49 +0200 http://bitbucket.org/pypy/pypy.org/changeset/a3ef8f99cc06/ Log: Rephrase diff --git a/index.html b/index.html --- a/index.html +++ b/index.html @@ -51,8 +51,8 @@
      • Speed: thanks to its Just-in-Time compiler, Python programs often run faster on PyPy. (What is a JIT compiler?)
      • -
      • Memory usage: large, memory-hungry Python programs might end up -taking less space than they do in CPython.
      • +
      • Memory usage: memory-hungry Python programs (many hundreds of +MBs and above) might end up taking less space than they do in CPython.
      • Compatibility: PyPy is highly compatible with existing python code. It supports cffi and can run popular python libraries like twisted and django.
      • diff --git a/source/index.txt b/source/index.txt --- a/source/index.txt +++ b/source/index.txt @@ -9,8 +9,8 @@ * **Speed:** thanks to its Just-in-Time compiler, Python programs often run `faster`_ on PyPy. `(What is a JIT compiler?)`_ - * **Memory usage:** large, memory-hungry Python programs might end up - taking `less space`_ than they do in CPython. + * **Memory usage:** memory-hungry Python programs (many hundreds of + MBs and above) might end up taking `less space`_ than they do in CPython. * **Compatibility:** PyPy is `highly compatible`_ with existing python code. It supports `cffi`_ and can run popular python libraries like `twisted`_ From noreply at buildbot.pypy.org Tue Aug 12 16:51:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Aug 2014 16:51:10 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Englishify? Message-ID: <20140812145110.BD3C71C03AC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r525:26176c314bdd Date: 2014-08-12 16:51 +0200 http://bitbucket.org/pypy/pypy.org/changeset/26176c314bdd/ Log: Englishify? diff --git a/index.html b/index.html --- a/index.html +++ b/index.html @@ -51,8 +51,8 @@
        • Speed: thanks to its Just-in-Time compiler, Python programs often run faster on PyPy. (What is a JIT compiler?)
        • -
        • Memory usage: memory-hungry Python programs (many hundreds of -MBs and above) might end up taking less space than they do in CPython.
        • +
        • Memory usage: memory-hungry Python programs (several hundreds of +MBs or more) might end up taking less space than they do in CPython.
        • Compatibility: PyPy is highly compatible with existing python code. It supports cffi and can run popular python libraries like twisted and django.
        • diff --git a/source/index.txt b/source/index.txt --- a/source/index.txt +++ b/source/index.txt @@ -9,8 +9,8 @@ * **Speed:** thanks to its Just-in-Time compiler, Python programs often run `faster`_ on PyPy. `(What is a JIT compiler?)`_ - * **Memory usage:** memory-hungry Python programs (many hundreds of - MBs and above) might end up taking `less space`_ than they do in CPython. + * **Memory usage:** memory-hungry Python programs (several hundreds of + MBs or more) might end up taking `less space`_ than they do in CPython. * **Compatibility:** PyPy is `highly compatible`_ with existing python code. It supports `cffi`_ and can run popular python libraries like `twisted`_ From noreply at buildbot.pypy.org Tue Aug 12 17:04:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Aug 2014 17:04:23 +0200 (CEST) Subject: [pypy-commit] stmgc default: debugging... Message-ID: <20140812150423.BBA681C0793@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1302:b1ce5fe3d056 Date: 2014-08-12 17:04 +0200 http://bitbucket.org/pypy/stmgc/changeset/b1ce5fe3d056/ Log: debugging... diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -994,6 +994,9 @@ assert(tl->shadowstack >= pseg->shadowstack_at_start_of_transaction); tl->shadowstack = pseg->shadowstack_at_start_of_transaction; #else + /* NB. careful, this function might be called more than once to + abort a given segment. Make sure that + stm_rewind_jmp_restore_shadowstack() is idempotent. */ stm_rewind_jmp_restore_shadowstack(tl); assert(tl->shadowstack == pseg->shadowstack_at_start_of_transaction); #endif diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c --- a/c7/stm/rewind_setjmp.c +++ b/c7/stm/rewind_setjmp.c @@ -70,8 +70,6 @@ rjthread = saved.rjthread1; rjthread->head = rjthread->initial_head; result = rjthread->repeat_count + 1; - /* check that the shadowstack was correctly restored */ - assert(rjthread->moved_off_ssbase == saved.ss1); } rjthread->repeat_count = result; copy_stack(rjthread, (char *)&saved, saved.ss1); @@ -119,7 +117,6 @@ sstarget = ssend; p = p->next; } - rjthread->moved_off_ssbase = sstarget; return sstarget; } diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -340,9 +340,11 @@ rewind_jmp_longjmp(&(tl)->rjthread) #define stm_rewind_jmp_forget(tl) \ rewind_jmp_forget(&(tl)->rjthread) -#define stm_rewind_jmp_restore_shadowstack(tl) \ - ((tl)->shadowstack = (struct stm_shadowentry_s *) \ - rewind_jmp_restore_shadowstack(&(tl)->rjthread)) +#define stm_rewind_jmp_restore_shadowstack(tl) do { \ + assert(rewind_jmp_armed(&(tl)->rjthread)); \ + (tl)->shadowstack = (struct stm_shadowentry_s *) \ + rewind_jmp_restore_shadowstack(&(tl)->rjthread); \ +} while (0) /* Starting and ending transactions. stm_read(), stm_write() and stm_allocate() should only be called from within a transaction. From noreply at buildbot.pypy.org Tue Aug 12 17:08:20 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Aug 2014 17:08:20 +0200 (CEST) Subject: [pypy-commit] stmgc default: Bug fix: now the logic will complain if the shadow stack is not Message-ID: <20140812150820.6DF621C0EE9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1303:1815f493a1c5 Date: 2014-08-12 17:08 +0200 http://bitbucket.org/pypy/stmgc/changeset/1815f493a1c5/ Log: Bug fix: now the logic will complain if the shadow stack is not correctly balanced. diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -208,6 +208,11 @@ printf("setup ok\n"); } +void teardown_list(void) +{ + STM_POP_ROOT_RET(stm_thread_local); +} + static sem_t done; @@ -303,6 +308,7 @@ final_check(); + teardown_list(); stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); unregister_thread_local(); From noreply at buildbot.pypy.org Tue Aug 12 17:50:05 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 12 Aug 2014 17:50:05 +0200 (CEST) Subject: [pypy-commit] stmgc default: clean up shadowstack in demo_random for leaveframe Message-ID: <20140812155005.DA8081C0EE9@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1304:8e8c594bbad1 Date: 2014-08-12 17:51 +0200 http://bitbucket.org/pypy/stmgc/changeset/8e8c594bbad1/ Log: clean up shadowstack in demo_random for leaveframe diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -393,6 +393,16 @@ } } } + push_roots(); + stm_commit_transaction(); + + /* even out the shadow stack before leaveframe: */ + stm_start_inevitable_transaction(&stm_thread_local); + while (td.num_roots > 0) { + td.num_roots--; + objptr_t t; + STM_POP_ROOT(stm_thread_local, t); + } stm_commit_transaction(); stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); From noreply at buildbot.pypy.org Tue Aug 12 18:08:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Aug 2014 18:08:41 +0200 (CEST) Subject: [pypy-commit] stmgc default: Kill STM_STACK_MARKER_{NEW, OLD} and use 'moved_off_ssbase' instead. Message-ID: <20140812160841.111791C0EE9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1305:50d22a5baf26 Date: 2014-08-12 18:08 +0200 http://bitbucket.org/pypy/stmgc/changeset/50d22a5baf26/ Log: Kill STM_STACK_MARKER_{NEW,OLD} and use 'moved_off_ssbase' instead. diff --git a/c7/stm/marker.c b/c7/stm/marker.c --- a/c7/stm/marker.c +++ b/c7/stm/marker.c @@ -18,10 +18,9 @@ struct stm_shadowentry_s *current = tl->shadowstack - 1; struct stm_shadowentry_s *base = tl->shadowstack_base; - /* The shadowstack_base contains STM_STACK_MARKER_OLD, which is - a convenient stopper for the loop below but which shouldn't - be returned. */ - assert(base->ss == (object_t *)STM_STACK_MARKER_OLD); + /* The shadowstack_base contains -1, which is a convenient stopper for + the loop below but which shouldn't be returned. */ + assert(base->ss == (object_t *)-1); while (!(((uintptr_t)current->ss) & 1)) { current--; diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -156,27 +156,22 @@ { stm_thread_local_t *tl = STM_SEGMENT->running_thread; struct stm_shadowentry_s *current = tl->shadowstack; - struct stm_shadowentry_s *base = tl->shadowstack_base; - while (1) { + struct stm_shadowentry_s *finalbase = tl->shadowstack_base; + struct stm_shadowentry_s *ssbase; + ssbase = (struct stm_shadowentry_s *)tl->rjthread.moved_off_ssbase; + if (ssbase == NULL) + ssbase = finalbase; + else + assert(finalbase <= ssbase && ssbase <= current); + + while (current > ssbase) { --current; - OPT_ASSERT(current >= base); - uintptr_t x = (uintptr_t)current->ss; if ((x & 3) == 0) { /* the stack entry is a regular pointer (possibly NULL) */ minor_trace_if_young(¤t->ss); } - else if (x == STM_STACK_MARKER_NEW) { - /* the marker was not already seen: mark it as seen, - but continue looking more deeply in the shadowstack */ - current->ss = (object_t *)STM_STACK_MARKER_OLD; - } - else if (x == STM_STACK_MARKER_OLD) { - /* the marker was already seen: we can stop the - root stack tracing at this point */ - break; - } else { /* it is an odd-valued marker, ignore */ } diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -201,13 +201,13 @@ struct stm_shadowentry_s *s = (struct stm_shadowentry_s *)start; tl->shadowstack = s; tl->shadowstack_base = s; - STM_PUSH_ROOT(*tl, STM_STACK_MARKER_OLD); + STM_PUSH_ROOT(*tl, -1); } static void _done_shadow_stack(stm_thread_local_t *tl) { assert(tl->shadowstack > tl->shadowstack_base); - assert(tl->shadowstack_base->ss == (object_t *)STM_STACK_MARKER_OLD); + assert(tl->shadowstack_base->ss == (object_t *)-1); char *start = (char *)tl->shadowstack_base; _shadowstack_trap_page(start, PROT_READ | PROT_WRITE); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -313,8 +313,6 @@ #define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p)) #define STM_POP_ROOT(tl, p) ((p) = (typeof(p))((--(tl).shadowstack)->ss)) #define STM_POP_ROOT_RET(tl) ((--(tl).shadowstack)->ss) -#define STM_STACK_MARKER_NEW (-41) -#define STM_STACK_MARKER_OLD (-43) /* Every thread needs to have a corresponding stm_thread_local_t diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -12,8 +12,6 @@ #define _STM_FAST_ALLOC ... #define _STM_GCFLAG_WRITE_BARRIER ... #define _STM_CARD_SIZE ... -#define STM_STACK_MARKER_NEW ... -#define STM_STACK_MARKER_OLD ... struct stm_shadowentry_s { object_t *ss; @@ -622,7 +620,7 @@ def push_root_no_gc(self): "Pushes an invalid object, to crash in case the GC is called" - self.push_root(ffi.cast("object_t *", -1)) + self.push_root(ffi.cast("object_t *", 8)) def check_char_everywhere(self, obj, expected_content, offset=HDR): for i in range(len(self.tls)): diff --git a/c7/test/test_gcpage.py b/c7/test/test_gcpage.py --- a/c7/test/test_gcpage.py +++ b/c7/test/test_gcpage.py @@ -234,7 +234,7 @@ p1 = stm_allocate(600) stm_set_char(p1, 'o') self.push_root(p1) - self.push_root(ffi.cast("object_t *", lib.STM_STACK_MARKER_NEW)) + self.push_root(ffi.cast("object_t *", 123)) p2 = stm_allocate(600) stm_set_char(p2, 't') self.push_root(p2) @@ -243,7 +243,7 @@ # p2 = self.pop_root() m = self.pop_root() - assert m == ffi.cast("object_t *", lib.STM_STACK_MARKER_OLD) + assert m == ffi.cast("object_t *", 123) p1 = self.pop_root() assert stm_get_char(p1) == 'o' assert stm_get_char(p2) == 't' diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -203,7 +203,7 @@ p1 = stm_allocate(600) stm_set_char(p1, 'o') self.push_root(p1) - self.push_root(ffi.cast("object_t *", lib.STM_STACK_MARKER_NEW)) + self.push_root(ffi.cast("object_t *", 123)) p2 = stm_allocate(600) stm_set_char(p2, 't') self.push_root(p2) @@ -212,12 +212,13 @@ # p2 = self.pop_root() m = self.pop_root() - assert m == ffi.cast("object_t *", lib.STM_STACK_MARKER_OLD) + assert m == ffi.cast("object_t *", 123) p1 = self.pop_root() assert stm_get_char(p1) == 'o' assert stm_get_char(p2) == 't' def test_marker_2(self): + py.test.skip("testing this requires working shadowstack saving logic") self.start_transaction() p1 = stm_allocate(600) stm_set_char(p1, 'o') From noreply at buildbot.pypy.org Tue Aug 12 18:15:17 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 12 Aug 2014 18:15:17 +0200 (CEST) Subject: [pypy-commit] stmgc default: add demo_random2 that includes returning from frames in normal transactions Message-ID: <20140812161517.D3D921C0157@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1306:d29906f937fc Date: 2014-08-12 18:16 +0200 http://bitbucket.org/pypy/stmgc/changeset/d29906f937fc/ Log: add demo_random2 that includes returning from frames in normal transactions diff --git a/c7/demo/demo_random2.c b/c7/demo/demo_random2.c new file mode 100644 --- /dev/null +++ b/c7/demo/demo_random2.c @@ -0,0 +1,532 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "stmgc.h" + +#define NUMTHREADS 3 +#define STEPS_PER_THREAD 500 +#define THREAD_STARTS 1000 // how many restarts of threads +#define PREBUILT_ROOTS 3 +#define FORKS 3 + +#define ACTIVE_ROOTS_SET_SIZE 100 // max num of roots created/alive in one transaction + + +// SUPPORT +struct node_s; +typedef TLPREFIX struct node_s node_t; +typedef node_t* nodeptr_t; +typedef object_t* objptr_t; +int num_forked_children = 0; + +struct node_s { + struct object_s hdr; + int sig; + long my_size; + long my_id; + long my_hash; + nodeptr_t next; +}; + +#define SIGNATURE 0x01234567 + + +static sem_t done; +__thread stm_thread_local_t stm_thread_local; +__thread void *thread_may_fork; + +// global and per-thread-data +time_t default_seed; +objptr_t prebuilt_roots[PREBUILT_ROOTS]; + +struct thread_data { + unsigned int thread_seed; + int steps_left; + objptr_t active_roots_set[ACTIVE_ROOTS_SET_SIZE]; + int active_roots_num; + long roots_on_ss; + long roots_on_ss_at_tr_start; +}; +__thread struct thread_data td; + +struct thread_data *_get_td(void) +{ + return &td; /* for gdb */ +} + + +ssize_t stmcb_size_rounded_up(struct object_s *ob) +{ + return ((struct node_s*)ob)->my_size; +} + +void stmcb_trace(struct object_s *obj, void visit(object_t **)) +{ + struct node_s *n; + n = (struct node_s*)obj; + + /* and the same value at the end: */ + /* note, ->next may be the same as last_next */ + nodeptr_t *last_next = (nodeptr_t*)((char*)n + n->my_size - sizeof(void*)); + + assert(n->next == *last_next); + + visit((object_t **)&n->next); + visit((object_t **)last_next); + + assert(n->next == *last_next); +} + +void stmcb_commit_soon() {} + +void stmcb_trace_cards(struct object_s *obj, void cb(object_t **), + uintptr_t start, uintptr_t stop) { + abort(); +} +void stmcb_get_card_base_itemsize(struct object_s *obj, + uintptr_t offset_itemsize[2]) { + abort(); +} + +int get_rand(int max) +{ + if (max == 0) + return 0; + return (int)(rand_r(&td.thread_seed) % (unsigned int)max); +} + +objptr_t get_random_root() +{ + /* get some root from shadowstack or active_root_set or prebuilt_roots */ + int num = get_rand(3); + intptr_t ss_size = td.roots_on_ss; + if (num == 0 && ss_size > 0) { + num = get_rand(ss_size); + /* XXX: impl detail: there is already a "-1" on the SS -> +1 */ + objptr_t r = (objptr_t)stm_thread_local.shadowstack_base[num+1].ss; + assert((((uintptr_t)r) & 3) == 0); + } + + if (num == 1 && td.active_roots_num > 0) { + num = get_rand(td.active_roots_num); + return td.active_roots_set[num]; + } else { + num = get_rand(PREBUILT_ROOTS); + return prebuilt_roots[num]; + } +} + + +long push_roots() +{ + int i; + long to_push = td.active_roots_num; + for (i = to_push - 1; i >= 0; i--) { + STM_PUSH_ROOT(stm_thread_local, td.active_roots_set[i]); + td.roots_on_ss++; + td.active_roots_num--; + } + return to_push; +} + +void pop_roots(long to_pop) +{ + int i; + for (i = 0; i < to_pop; i++) { + STM_POP_ROOT(stm_thread_local, td.active_roots_set[i]); + td.roots_on_ss--; + td.active_roots_num++; + assert(td.active_roots_num < ACTIVE_ROOTS_SET_SIZE); + } +} + +void del_root(int idx) +{ + int i; + + for (i = idx; i < td.active_roots_num - 1; i++) + td.active_roots_set[i] = td.active_roots_set[i + 1]; + td.active_roots_num--; +} + +void add_root(objptr_t r) +{ + if (r && td.active_roots_num < ACTIVE_ROOTS_SET_SIZE) { + td.active_roots_set[td.active_roots_num++] = r; + } +} + + +void read_barrier(objptr_t p) +{ + if (p != NULL) { + stm_read(p); + } +} + +void write_barrier(objptr_t p) +{ + if (p != NULL) { + stm_write(p); + } +} + +void set_next(objptr_t p, objptr_t v) +{ + if (p != NULL) { + nodeptr_t n = (nodeptr_t)p; + + /* and the same value at the end: */ + nodeptr_t TLPREFIX *last_next = (nodeptr_t TLPREFIX *)((stm_char*)n + n->my_size - sizeof(void*)); + assert(n->next == *last_next); + n->next = (nodeptr_t)v; + *last_next = (nodeptr_t)v; + } +} + +nodeptr_t get_next(objptr_t p) +{ + nodeptr_t n = (nodeptr_t)p; + + /* and the same value at the end: */ + nodeptr_t TLPREFIX *last_next = (nodeptr_t TLPREFIX *)((stm_char*)n + n->my_size - sizeof(void*)); + OPT_ASSERT(n->next == *last_next); + + return n->next; +} + + +objptr_t simple_events(objptr_t p, objptr_t _r) +{ + int k = get_rand(10); + + switch (k) { + case 0: // remove a root + if (td.active_roots_num) { + del_root(get_rand(td.active_roots_num)); + } + break; + case 1: // add 'p' to roots + add_root(p); + break; + case 2: // set 'p' to point to a root + if (_r) + p = _r; + break; + case 3: // allocate fresh 'p' + ; + long pushed = push_roots(); + size_t sizes[4] = {sizeof(struct node_s), + sizeof(struct node_s) + (get_rand(100000) & ~15), + sizeof(struct node_s) + 4096, + sizeof(struct node_s) + 4096*70}; + size_t size = sizes[get_rand(4)]; + p = stm_allocate(size); + ((nodeptr_t)p)->sig = SIGNATURE; + ((nodeptr_t)p)->my_size = size; + ((nodeptr_t)p)->my_id = 0; + ((nodeptr_t)p)->my_hash = 0; + pop_roots(pushed); + break; + case 4: // read and validate 'p' + read_barrier(p); + break; + case 5: // only do a stm_write_barrier + write_barrier(p); + break; + case 6: // follow p->next + if (p) { + read_barrier(p); + p = (objptr_t)(get_next(p)); + } + break; + case 7: // set 'p' as *next in one of the roots + write_barrier(_r); + set_next(_r, p); + break; + case 8: // id checking + if (p) { + nodeptr_t n = (nodeptr_t)p; + if (n->my_id == 0) { + write_barrier(p); + n->my_id = stm_id(p); + } + else { + read_barrier(p); + assert(n->my_id == stm_id(p)); + } + } + break; + case 9: + if (p) { + nodeptr_t n = (nodeptr_t)p; + if (n->my_hash == 0) { + write_barrier(p); + n->my_hash = stm_identityhash(p); + } + else { + read_barrier(p); + assert(n->my_hash == stm_identityhash(p)); + } + } + break; + } + return p; +} + + +void frame_loop(); +objptr_t do_step(objptr_t p) +{ + objptr_t _r; + int k; + + _r = get_random_root(); + k = get_rand(12); + + if (k < 10) { + p = simple_events(p, _r); + } else if (get_rand(20) == 1) { + long pushed = push_roots(); + stm_commit_transaction(); + td.roots_on_ss_at_tr_start = td.roots_on_ss; + + /* if (get_rand(100) < 98) { */ + /* stm_start_transaction(&stm_thread_local); */ + /* } else */{ + stm_start_inevitable_transaction(&stm_thread_local); + } + td.roots_on_ss = td.roots_on_ss_at_tr_start; + td.active_roots_num = 0; + pop_roots(pushed); + return NULL; + } else if (get_rand(10) == 1) { + fprintf(stderr, "R"); + + long pushed = push_roots(); + /* leaving our frame */ + frame_loop(); + /* back in our frame */ + pop_roots(pushed); + + fprintf(stderr, "r"); + return NULL; + } else if (get_rand(20) == 1) { + long pushed = push_roots(); + stm_become_inevitable(&stm_thread_local, "please"); + assert(stm_is_inevitable()); + pop_roots(pushed); + return NULL; + } else if (get_rand(200) == 1) { + return (objptr_t)-1; // possibly fork + } else if (get_rand(240) == 1) { + long pushed = push_roots(); + stm_become_globally_unique_transaction(&stm_thread_local, "really"); + fprintf(stderr, "[GUT/%d]", (int)STM_SEGMENT->segment_num); + pop_roots(pushed); + return NULL; + } + return p; +} + +void frame_loop() +{ + objptr_t p = NULL; + rewind_jmp_buf rjbuf; + + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); + volatile long roots_on_ss = td.roots_on_ss; + /* "interpreter main loop": this is one "application-frame" */ + while (td.steps_left-->0 && get_rand(10) != 0) { + if (td.steps_left % 8 == 0) + fprintf(stdout, "#"); + + assert(p == NULL || ((nodeptr_t)p)->sig == SIGNATURE); + + p = do_step(p); + + if (p == (objptr_t)-1) { + p = NULL; + + /* long call_fork = (thread_may_fork != NULL && *(long *)thread_may_fork); */ + /* if (call_fork) { /\* common case *\/ */ + /* push_roots(); */ + /* /\* run a fork() inside the transaction *\/ */ + /* printf("========== FORK =========\n"); */ + /* *(long*)thread_may_fork = 0; */ + /* pid_t child = fork(); */ + /* printf("=== in process %d thread %lx, fork() returned %d\n", */ + /* (int)getpid(), (long)pthread_self(), (int)child); */ + /* if (child == -1) { */ + /* fprintf(stderr, "fork() error: %m\n"); */ + /* abort(); */ + /* } */ + /* if (child != 0) */ + /* num_forked_children++; */ + /* else */ + /* num_forked_children = 0; */ + + /* pop_roots(); */ + /* } */ + } + } + assert(roots_on_ss == td.roots_on_ss); + + stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); +} + + + +void setup_thread() +{ + memset(&td, 0, sizeof(struct thread_data)); + + /* stupid check because gdb shows garbage + in td.roots: */ + int i; + for (i = 0; i < ACTIVE_ROOTS_SET_SIZE; i++) + assert(td.active_roots_set[i] == NULL); + + td.thread_seed = default_seed++; + td.steps_left = STEPS_PER_THREAD; + td.active_roots_num = 0; + td.roots_on_ss = 0; + td.roots_on_ss_at_tr_start = 0; +} + + + +void *demo_random(void *arg) +{ + int status; + rewind_jmp_buf rjbuf; + stm_register_thread_local(&stm_thread_local); + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); + + setup_thread(); + + td.roots_on_ss_at_tr_start = 0; + stm_start_transaction(&stm_thread_local); + td.roots_on_ss = td.roots_on_ss_at_tr_start; + td.active_roots_num = 0; + + thread_may_fork = arg; + while (td.steps_left-->0) { + frame_loop(); + } + + stm_commit_transaction(); + + stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); + stm_unregister_thread_local(&stm_thread_local); + + status = sem_post(&done); assert(status == 0); + return NULL; +} + +void newthread(void*(*func)(void*), void *arg) +{ + pthread_t th; + int status = pthread_create(&th, NULL, func, arg); + if (status != 0) + abort(); + pthread_detach(th); + printf("started new thread\n"); +} + + +void setup_globals() +{ + int i; + + struct node_s prebuilt_template = { + .sig = SIGNATURE, + .my_size = sizeof(struct node_s), + .my_id = 0, + .my_hash = 0, + .next = NULL + }; + + stm_start_inevitable_transaction(&stm_thread_local); + for (i = 0; i < PREBUILT_ROOTS; i++) { + void* new_templ = malloc(sizeof(struct node_s)); + memcpy(new_templ, &prebuilt_template, sizeof(struct node_s)); + prebuilt_roots[i] = stm_setup_prebuilt((objptr_t)(long)new_templ); + + if (i % 2 == 0) { + int hash = i + 5; + stm_set_prebuilt_identityhash(prebuilt_roots[i], + hash); + ((nodeptr_t)prebuilt_roots[i])->my_hash = hash; + } + } + stm_commit_transaction(); +} + +int main(void) +{ + int i, status; + rewind_jmp_buf rjbuf; + + /* pick a random seed from the time in seconds. + A bit pointless for now... because the interleaving of the + threads is really random. */ + default_seed = time(NULL); + printf("running with seed=%lld\n", (long long)default_seed); + + status = sem_init(&done, 0, 0); + assert(status == 0); + + + stm_setup(); + stm_register_thread_local(&stm_thread_local); + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); + + setup_globals(); + + int thread_starts = NUMTHREADS * THREAD_STARTS; + for (i = 0; i < NUMTHREADS; i++) { + newthread(demo_random, NULL); + thread_starts--; + } + + for (i=0; i < NUMTHREADS * THREAD_STARTS; i++) { + status = sem_wait(&done); + assert(status == 0); + printf("thread finished\n"); + if (thread_starts) { + long forkbase = NUMTHREADS * THREAD_STARTS / (FORKS + 1); + long _fork = (thread_starts % forkbase) == 0; + thread_starts--; + newthread(demo_random, &_fork); + } + } + + for (i = 0; i < num_forked_children; i++) { + pid_t child = wait(&status); + if (child == -1) + perror("wait"); + printf("From %d: child %d terminated with exit status %d\n", + (int)getpid(), (int)child, status); + if (WIFEXITED(status) && WEXITSTATUS(status) == 0) + ; + else { + printf("*** error from the child ***\n"); + return 1; + } + } + + printf("Test OK!\n"); + + stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); + stm_unregister_thread_local(&stm_thread_local); + stm_teardown(); + + return 0; +} From noreply at buildbot.pypy.org Tue Aug 12 18:21:13 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 12 Aug 2014 18:21:13 +0200 (CEST) Subject: [pypy-commit] stmgc default: fix Message-ID: <20140812162113.D3E3B1C0157@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1307:d70bdefc4757 Date: 2014-08-12 18:22 +0200 http://bitbucket.org/pypy/stmgc/changeset/d70bdefc4757/ Log: fix diff --git a/c7/demo/demo_random2.c b/c7/demo/demo_random2.c --- a/c7/demo/demo_random2.c +++ b/c7/demo/demo_random2.c @@ -135,14 +135,15 @@ return to_push; } +void add_root(objptr_t r); void pop_roots(long to_pop) { int i; for (i = 0; i < to_pop; i++) { - STM_POP_ROOT(stm_thread_local, td.active_roots_set[i]); + objptr_t t; + STM_POP_ROOT(stm_thread_local, t); + add_root(t); td.roots_on_ss--; - td.active_roots_num++; - assert(td.active_roots_num < ACTIVE_ROOTS_SET_SIZE); } } @@ -297,9 +298,9 @@ stm_commit_transaction(); td.roots_on_ss_at_tr_start = td.roots_on_ss; - /* if (get_rand(100) < 98) { */ - /* stm_start_transaction(&stm_thread_local); */ - /* } else */{ + if (get_rand(100) < 98) { + stm_start_transaction(&stm_thread_local); + } else{ stm_start_inevitable_transaction(&stm_thread_local); } td.roots_on_ss = td.roots_on_ss_at_tr_start; From noreply at buildbot.pypy.org Tue Aug 12 18:35:12 2014 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 12 Aug 2014 18:35:12 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Fix translation Message-ID: <20140812163512.F32C81C0157@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72766:50441033e543 Date: 2014-08-12 11:34 -0500 http://bitbucket.org/pypy/pypy/changeset/50441033e543/ Log: Fix translation diff --git a/pypy/module/_io/interp_stringio.py b/pypy/module/_io/interp_stringio.py --- a/pypy/module/_io/interp_stringio.py +++ b/pypy/module/_io/interp_stringio.py @@ -27,11 +27,10 @@ newline = None else: newline = space.unicode_w(w_newline) + newline = newline.bytes - if (newline is not None and len(newline) != 0 and - utf8.NE(newline, Utf8Str('\n')) and - utf8.NE(newline, Utf8Str('\r\n')) and - utf8.NE(newline, Utf8Str('\r'))): + if (newline and newline != '\n' and newline != '\r\n' and + newline != '\r'): # Not using oefmt() because I don't know how to ues it # with unicode raise OperationError(space.w_ValueError, @@ -39,12 +38,12 @@ space.wrap("illegal newline value: %s"), space.wrap(newline) ) ) + if newline is not None: self.readnl = newline - self.readuniversal = newline is None or len(newline) == 0 + self.readuniversal = not newline self.readtranslate = newline is None - if (newline is not None and len(newline) > 0 and - utf8ord(newline) == ord("\r")): + if newline and newline[0] == '\r': self.writenl = newline if self.readuniversal: self.w_decoder = space.call_function( @@ -146,7 +145,8 @@ if self.writenl: w_decoded = space.call_method( - w_decoded, "replace", space.wrap("\n"), space.wrap(self.writenl) + w_decoded, "replace", space.wrap("\n"), + space.wrap(Utf8Str(self.writenl)) ) string = space.unicode_w(w_decoded) diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -372,17 +372,14 @@ newline = None else: newline = space.unicode_w(w_newline) + # newline is guaranteed to be either empty or ascii + newline = newline.bytes - if (newline is not None and len(newline) > 0 and - not (utf8.EQ(newline, Utf8Str('\n')) or - utf8.EQ(newline, Utf8Str('\r\n')) or - utf8.EQ(newline, Utf8Str('\r')))): + if (newline and newline != '\n' and newline != '\r\n' and + newline != '\r'): r = space.str_w(space.repr(w_newline)) raise OperationError(space.w_ValueError, space.wrap( "illegal newline value: %s" % (r,))) - elif newline is not None: - # newline is guaranteed to be either empty or ascii - newline = newline.bytes self.line_buffering = line_buffering From noreply at buildbot.pypy.org Tue Aug 12 18:38:50 2014 From: noreply at buildbot.pypy.org (waedt) Date: Tue, 12 Aug 2014 18:38:50 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Copy failing lib-python test Message-ID: <20140812163850.90FA91C0157@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72767:90afe25b63d5 Date: 2014-08-12 11:38 -0500 http://bitbucket.org/pypy/pypy/changeset/90afe25b63d5/ Log: Copy failing lib-python test diff --git a/pypy/module/_io/test/test_textio.py b/pypy/module/_io/test/test_textio.py --- a/pypy/module/_io/test/test_textio.py +++ b/pypy/module/_io/test/test_textio.py @@ -1,5 +1,5 @@ class AppTestTextIO: - spaceconfig = dict(usemodules=['_io', '_locale']) + spaceconfig = dict(usemodules=['_io', '_locale', 'thread', 'time', 'signal']) def test_constructor(self): import _io @@ -281,6 +281,58 @@ t.read() == u'a' + def test_interrupted_write(self): + import _io + import os + import threading + import sys + import signal + import errno + + item = u'xy' + bytes = 'xy' + + signal.signal(signal.SIGALRM, lambda x, y: 1 // 0) + + read_results = [] + def _read(): + s = os.read(r, 1) + read_results.append(s) + t = threading.Thread(target=_read) + t.daemon = True + r, w = os.pipe() + try: + wio = _io.open(w, mode='w', encoding="ascii") + t.start() + signal.alarm(1) + # Fill the pipe enough that the write will be blocking. + # It will be interrupted by the timer armed above. Since the + # other thread has read one byte, the low-level write will + # return with a successful (partial) result rather than an EINTR. + # The buffered IO layer must check for pending signal + # handlers, which in this case will invoke alarm_interrupt(). + + raises(ZeroDivisionError, wio.write, item * (4194305 // len(item) + 1)) + + t.join() + # We got one byte, get another one and check that it isn't a + # repeat of the first one. + read_results.append(os.read(r, 1)) + + assert read_results == [bytes[0:1], bytes[1:2]] + finally: + os.close(w) + os.close(r) + # This is deliberate. If we didn't close the file descriptor + # before closing wio, wio would try to flush its internal + # buffer, and block again. + try: + wio.close() + except IOError as e: + if e.errno != errno.EBADF: + raise + + class AppTestIncrementalNewlineDecoder: def test_newline_decoder(self): import _io From noreply at buildbot.pypy.org Tue Aug 12 18:43:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Aug 2014 18:43:29 +0200 (CEST) Subject: [pypy-commit] stmgc default: Add another unit test (run by test/test_demo.py) for major GC tracking Message-ID: <20140812164329.A6D1F1C0157@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1308:38dbf997b57b Date: 2014-08-12 18:43 +0200 http://bitbucket.org/pypy/stmgc/changeset/38dbf997b57b/ Log: Add another unit test (run by test/test_demo.py) for major GC tracking the parts of the shadowstacks that have been moved away diff --git a/c7/demo/test_shadowstack.c b/c7/demo/test_shadowstack.c new file mode 100644 --- /dev/null +++ b/c7/demo/test_shadowstack.c @@ -0,0 +1,68 @@ +#include +#include +#include "stmgc.h" + +stm_thread_local_t stm_thread_local; + +typedef TLPREFIX struct node_s node_t; + +struct node_s { + struct object_s hdr; + long value; +}; + +ssize_t stmcb_size_rounded_up(struct object_s *ob) +{ + return sizeof(struct node_s); +} +void stmcb_trace(struct object_s *obj, void visit(object_t **)) +{ +} +void stmcb_get_card_base_itemsize(struct object_s *obj, + uintptr_t offset_itemsize[2]) +{ + abort(); +} +void stmcb_trace_cards(struct object_s *obj, void visit(object_t **), + uintptr_t start, uintptr_t stop) +{ + abort(); +} +void stmcb_commit_soon() {} + + +int main(void) +{ + rewind_jmp_buf rjbuf; + + stm_setup(); + stm_register_thread_local(&stm_thread_local); + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); + + stm_start_transaction(&stm_thread_local); + node_t *node = (node_t *)stm_allocate(sizeof(struct node_s)); + node->value = 129821; + STM_PUSH_ROOT(stm_thread_local, node); + stm_commit_transaction(); + + /* now in a new transaction, pop the node off the shadowstack, but + then do a major collection. It should still be found by the + tracing logic. */ + stm_start_transaction(&stm_thread_local); + STM_POP_ROOT(stm_thread_local, node); + assert(node->value == 129821); + STM_PUSH_ROOT(stm_thread_local, NULL); + stm_collect(9); + + node_t *node2 = (node_t *)stm_allocate(sizeof(struct node_s)); + assert(node2 != node); + assert(node->value == 129821); + + STM_PUSH_ROOT(stm_thread_local, node2); + stm_collect(0); + STM_POP_ROOT(stm_thread_local, node2); + assert(node2 != node); + assert(node->value == 129821); + + return 0; +} diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -363,6 +363,16 @@ mark_trace(obj, segment_base); } +static void *mark_visit_objects_from_ss(void *_, const void *slice, size_t size) +{ + const struct stm_shadowentry_s *p, *end; + p = (const struct stm_shadowentry_s *)slice; + end = (const struct stm_shadowentry_s *)(slice + size); + for (; p < end; p++) + mark_visit_object(p->ss, stm_object_pages); + return NULL; +} + static void mark_visit_from_roots(void) { if (testing_prebuilt_objs != NULL) { @@ -386,6 +396,7 @@ mark_visit_object(current->ss, segment_base); } mark_visit_object(tl->thread_local_obj, segment_base); + stm_rewind_jmp_enum_shadowstack(tl, mark_visit_objects_from_ss); tl = tl->next; } while (tl != stm_all_thread_locals); diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c --- a/c7/stm/rewind_setjmp.c +++ b/c7/stm/rewind_setjmp.c @@ -105,21 +105,28 @@ do_longjmp(rjthread, &_rewind_jmp_marker); } -char *rewind_jmp_restore_shadowstack(rewind_jmp_thread *rjthread) + +char *rewind_jmp_enum_shadowstack(rewind_jmp_thread *rjthread, + void *callback(void *, const void *, size_t)) { struct _rewind_jmp_moved_s *p = rjthread->moved_off; char *sstarget = rjthread->moved_off_ssbase; while (p) { char *ssend = sstarget + p->shadowstack_size; - memcpy(sstarget, ((char *)p) + RJM_HEADER + p->stack_size, - p->shadowstack_size); + callback(sstarget, ((char *)p) + RJM_HEADER + p->stack_size, + p->shadowstack_size); sstarget = ssend; p = p->next; } return sstarget; } +char *rewind_jmp_restore_shadowstack(rewind_jmp_thread *rjthread) +{ + return rewind_jmp_enum_shadowstack(rjthread, memcpy); +} + __attribute__((noinline)) void _rewind_jmp_copy_stack_slice(rewind_jmp_thread *rjthread) { diff --git a/c7/stm/rewind_setjmp.h b/c7/stm/rewind_setjmp.h --- a/c7/stm/rewind_setjmp.h +++ b/c7/stm/rewind_setjmp.h @@ -1,6 +1,8 @@ #ifndef _REWIND_SETJMP_H_ #define _REWIND_SETJMP_H_ +#include + /************************************************************ : : ^^^^^ @@ -75,6 +77,8 @@ long rewind_jmp_setjmp(rewind_jmp_thread *rjthread, void *ss); void rewind_jmp_longjmp(rewind_jmp_thread *rjthread) __attribute__((noreturn)); char *rewind_jmp_restore_shadowstack(rewind_jmp_thread *rjthread); +char *rewind_jmp_enum_shadowstack(rewind_jmp_thread *rjthread, + void *callback(void *, const void *, size_t)); #define rewind_jmp_forget(rjthread) do { \ if ((rjthread)->moved_off) _rewind_jmp_free_stack_slices(rjthread); \ diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -343,6 +343,8 @@ (tl)->shadowstack = (struct stm_shadowentry_s *) \ rewind_jmp_restore_shadowstack(&(tl)->rjthread); \ } while (0) +#define stm_rewind_jmp_enum_shadowstack(tl, callback) \ + rewind_jmp_enum_shadowstack(&(tl)->rjthread, callback) /* Starting and ending transactions. stm_read(), stm_write() and stm_allocate() should only be called from within a transaction. diff --git a/c7/test/test_demo.py b/c7/test/test_demo.py --- a/c7/test/test_demo.py +++ b/c7/test/test_demo.py @@ -13,6 +13,8 @@ self._do("make -C ../demo %s" % target) self._do("../demo/%s 2> /dev/null" % target) + def test_shadowstack(self): self.make_and_run("debug-test_shadowstack") + def test_demo2_debug(self): self.make_and_run("debug-demo2") def test_demo2_build(self): self.make_and_run("build-demo2") def test_demo2_release(self): self.make_and_run("release-demo2") From noreply at buildbot.pypy.org Tue Aug 12 18:49:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 12 Aug 2014 18:49:22 +0200 (CEST) Subject: [pypy-commit] stmgc default: Test and fix Message-ID: <20140812164922.1CA051C0157@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1309:1cb240cc60a2 Date: 2014-08-12 18:49 +0200 http://bitbucket.org/pypy/stmgc/changeset/1cb240cc60a2/ Log: Test and fix diff --git a/c7/demo/test_shadowstack.c b/c7/demo/test_shadowstack.c --- a/c7/demo/test_shadowstack.c +++ b/c7/demo/test_shadowstack.c @@ -43,12 +43,14 @@ node_t *node = (node_t *)stm_allocate(sizeof(struct node_s)); node->value = 129821; STM_PUSH_ROOT(stm_thread_local, node); + STM_PUSH_ROOT(stm_thread_local, 333); /* odd value */ stm_commit_transaction(); /* now in a new transaction, pop the node off the shadowstack, but then do a major collection. It should still be found by the tracing logic. */ stm_start_transaction(&stm_thread_local); + STM_POP_ROOT_RET(stm_thread_local); STM_POP_ROOT(stm_thread_local, node); assert(node->value == 129821); STM_PUSH_ROOT(stm_thread_local, NULL); diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -369,7 +369,8 @@ p = (const struct stm_shadowentry_s *)slice; end = (const struct stm_shadowentry_s *)(slice + size); for (; p < end; p++) - mark_visit_object(p->ss, stm_object_pages); + if ((((uintptr_t)p->ss) & 3) == 0) + mark_visit_object(p->ss, stm_object_pages); return NULL; } From noreply at buildbot.pypy.org Tue Aug 12 18:54:25 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 12 Aug 2014 18:54:25 +0200 (CEST) Subject: [pypy-commit] stmgc default: add forking again to demo_random2 Message-ID: <20140812165425.ADE311C0157@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1310:3127164e93bf Date: 2014-08-12 18:55 +0200 http://bitbucket.org/pypy/stmgc/changeset/3127164e93bf/ Log: add forking again to demo_random2 diff --git a/c7/demo/demo_random2.c b/c7/demo/demo_random2.c --- a/c7/demo/demo_random2.c +++ b/c7/demo/demo_random2.c @@ -300,7 +300,7 @@ if (get_rand(100) < 98) { stm_start_transaction(&stm_thread_local); - } else{ + } else { stm_start_inevitable_transaction(&stm_thread_local); } td.roots_on_ss = td.roots_on_ss_at_tr_start; @@ -308,15 +308,11 @@ pop_roots(pushed); return NULL; } else if (get_rand(10) == 1) { - fprintf(stderr, "R"); - long pushed = push_roots(); /* leaving our frame */ frame_loop(); /* back in our frame */ pop_roots(pushed); - - fprintf(stderr, "r"); return NULL; } else if (get_rand(20) == 1) { long pushed = push_roots(); @@ -324,9 +320,9 @@ assert(stm_is_inevitable()); pop_roots(pushed); return NULL; - } else if (get_rand(200) == 1) { + } else if (get_rand(20) == 1) { return (objptr_t)-1; // possibly fork - } else if (get_rand(240) == 1) { + } else if (get_rand(20) == 1) { long pushed = push_roots(); stm_become_globally_unique_transaction(&stm_thread_local, "really"); fprintf(stderr, "[GUT/%d]", (int)STM_SEGMENT->segment_num); @@ -355,26 +351,26 @@ if (p == (objptr_t)-1) { p = NULL; - /* long call_fork = (thread_may_fork != NULL && *(long *)thread_may_fork); */ - /* if (call_fork) { /\* common case *\/ */ - /* push_roots(); */ - /* /\* run a fork() inside the transaction *\/ */ - /* printf("========== FORK =========\n"); */ - /* *(long*)thread_may_fork = 0; */ - /* pid_t child = fork(); */ - /* printf("=== in process %d thread %lx, fork() returned %d\n", */ - /* (int)getpid(), (long)pthread_self(), (int)child); */ - /* if (child == -1) { */ - /* fprintf(stderr, "fork() error: %m\n"); */ - /* abort(); */ - /* } */ - /* if (child != 0) */ - /* num_forked_children++; */ - /* else */ - /* num_forked_children = 0; */ + long call_fork = (thread_may_fork != NULL && *(long *)thread_may_fork); + if (call_fork) { /* common case */ + long pushed = push_roots(); + /* run a fork() inside the transaction */ + printf("========== FORK =========\n"); + *(long*)thread_may_fork = 0; + pid_t child = fork(); + printf("=== in process %d thread %lx, fork() returned %d\n", + (int)getpid(), (long)pthread_self(), (int)child); + if (child == -1) { + fprintf(stderr, "fork() error: %m\n"); + abort(); + } + if (child != 0) + num_forked_children++; + else + num_forked_children = 0; - /* pop_roots(); */ - /* } */ + pop_roots(pushed); + } } } assert(roots_on_ss == td.roots_on_ss); From noreply at buildbot.pypy.org Tue Aug 12 20:54:36 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Tue, 12 Aug 2014 20:54:36 +0200 (CEST) Subject: [pypy-commit] pypy py3.3-fixes2: make test_debugmallocstats cpython only Message-ID: <20140812185436.1CA5B1C0157@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3-fixes2 Changeset: r72768:ceebddacd8c1 Date: 2014-08-12 20:41 +0200 http://bitbucket.org/pypy/pypy/changeset/ceebddacd8c1/ Log: make test_debugmallocstats cpython only diff --git a/lib-python/3/test/test_sys.py b/lib-python/3/test/test_sys.py --- a/lib-python/3/test/test_sys.py +++ b/lib-python/3/test/test_sys.py @@ -588,6 +588,7 @@ self.assertEqual(sys.implementation.name, sys.implementation.name.lower()) + @test.support.cpython_only def test_debugmallocstats(self): # Test sys._debugmallocstats() from test.script_helper import assert_python_ok From noreply at buildbot.pypy.org Tue Aug 12 20:54:37 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 12 Aug 2014 20:54:37 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Merged in numerodix/pypy/py3.3-fixes2 (pull request #266) Message-ID: <20140812185437.5E98F1C0157@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: py3.3 Changeset: r72769:8d2fd0582587 Date: 2014-08-12 20:53 +0200 http://bitbucket.org/pypy/pypy/changeset/8d2fd0582587/ Log: Merged in numerodix/pypy/py3.3-fixes2 (pull request #266) make test_debugmallocstats cpython only diff --git a/lib-python/3/test/test_sys.py b/lib-python/3/test/test_sys.py --- a/lib-python/3/test/test_sys.py +++ b/lib-python/3/test/test_sys.py @@ -588,6 +588,7 @@ self.assertEqual(sys.implementation.name, sys.implementation.name.lower()) + @test.support.cpython_only def test_debugmallocstats(self): # Test sys._debugmallocstats() from test.script_helper import assert_python_ok From noreply at buildbot.pypy.org Wed Aug 13 00:01:12 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 13 Aug 2014 00:01:12 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: backed out changeset ca3b82260c3a, signatures are still relevant Message-ID: <20140812220112.129D71C0793@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r72770:86f97db12e19 Date: 2014-08-09 21:53 +0300 http://bitbucket.org/pypy/pypy/changeset/86f97db12e19/ Log: backed out changeset ca3b82260c3a, signatures are still relevant diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -139,7 +139,7 @@ out_flat = out_array.flat for i in range(in_array.size): out_flat[i] = in_flat[i] * 2 - def double_times2(in_array, out_array): + def double_times2(space, __args__): assert in_array.dtype == float in_flat = in_array.flat out_flat = out_array.flat @@ -147,6 +147,7 @@ out_flat[i] = in_flat[i] * 2 from numpy import frompyfunc, dtype, arange ufunc = frompyfunc([int_times2, double_times2], 1, 1, + signature='()->()', dtypes=[dtype(int), dtype(int), dtype(float), dtype(float) ] diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -482,20 +482,22 @@ class W_UfuncGeneric(W_Ufunc): ''' - Handle a number of python functions, each with a dtypes. - The dtypes can specify the input, output args for the function. - When called, the actual function used will be resolved by examining - the input arg's dtypes. + Handle a number of python functions, each with a signature and dtypes. + The signature can specify how to create the inner loop, i.e. + (i,j),(j,k)->(i,k) for a dot-like matrix multiplication, and the dtypes + can specify the input, output args for the function. When called, the actual + function used will be resolved by examining the input arg's dtypes. If dtypes == 'match', only one argument is provided and the output dtypes will match the input dtype (not cpython numpy compatible) ''' _immutable_fields_ = ["funcs", "dtypes", "data"] - def __init__(self, space, funcs, name, identity, nin, nout, dtypes, match_dtypes=False): - # XXX make sure funcs, dtypes, nin, nout are consistent + def __init__(self, space, funcs, name, identity, nin, nout, dtypes, signature, match_dtypes=False): + # XXX make sure funcs, signature, dtypes, nin, nout are consistent - # These don't matter, we use the dtypes for determining output dtype + # These don't matter, we use the signature and dtypes for determining + # output dtype promote_to_largest = promote_to_float = promote_bools = False allow_bool = allow_complex = True int_only = complex_to_float = False @@ -512,6 +514,7 @@ raise oefmt(space.w_ValueError, "generic ufunc with %d functions, %d arguments, but %d dtypes", len(funcs), self.nargs, len(dtypes)) + self.signature = signature def reduce(self, space, w_obj, w_axis, keepdims=False, out=None, dtype=None, cumulative=False): @@ -542,30 +545,27 @@ new_shape = inargs[0].get_shape() assert isinstance(outargs[0], W_NDimArray) res_dtype = outargs[0].get_dtype() - if not self.match_dtypes: + # XXX handle inner-loop indexing + sign_parts = self.signature.split('->') + if len(sign_parts) == 2 and sign_parts[0].strip() == '()' \ + and sign_parts[1].strip() == '()': + + arglist = space.newlist(inargs + outargs) func = self.funcs[index] space.call_function(func, *(inargs + outargs)) if len(outargs) < 2: return outargs[0] return outargs - # XXX TODO handle more complicated signatures, - # for now, assume (i) -> (i) if len(outargs) < 2: return loop.call_many_to_one(space, new_shape, self.funcs[index], res_dtype, inargs, outargs[0]) return loop.call_many_to_many(space, new_shape, self.funcs[index], res_dtype, inargs, outargs) - def type_resolver(self, space, inargs, outargs): + def type_resolver(self, space, index, outargs): # Find a match for the inargs.dtype in self.dtypes, like # linear_search_type_resolver in numy ufunc_type_resolutions.c - for i in range(0, len(self.dtypes), self.nargs): - if inargs[0].get_dtype() == self.dtypes[i]: - break - else: - raise oefmt(space.w_TypeError, - 'input dtype %r did not match any known dtypes', inargs[0].get_dtype()) - return i / self.nargs + return 0 def alloc_outargs(self, space, index, inargs, outargs): # Any None outarg should be allocated here @@ -911,12 +911,12 @@ def get(space): return space.fromcache(UfuncState) - at unwrap_spec(nin=int, nout=int, w_identity=WrappedDefault(None), + at unwrap_spec(nin=int, nout=int, signature=str, w_identity=WrappedDefault(None), name=str, doc=str) -def frompyfunc(space, w_func, nin, nout, w_dtypes=None, +def frompyfunc(space, w_func, nin, nout, w_dtypes=None, signature='', w_identity=None, name='', doc=''): ''' frompyfunc(func, nin, nout) #cpython numpy compatible - frompyfunc(func, nin, nout, dtypes=None, + frompyfunc(func, nin, nout, dtypes=None, signature='', identity=None, name='', doc='') Takes an arbitrary Python function and returns a ufunc. @@ -934,6 +934,9 @@ The number of arrays returned by `func`. dtypes: None or [dtype, ...] of the input, output args for each function, or 'match' to force output to exactly match input dtype + signature*: str, default='' + The mapping of input args to output args, defining the + inner-loop indexing identity*: None (default) or int For reduce-type ufuncs, the default value name: str, default='' @@ -948,7 +951,7 @@ Notes ----- - If the signature and dtypes are both missing, the returned ufunc always + If the signature and out_dtype are both missing, the returned ufunc always returns PyObject arrays (cpython numpy compatability). Examples @@ -976,7 +979,7 @@ raise oefmt(space.w_TypeError, 'func must be callable') func = [w_func] match_dtypes = False - if space.is_none(w_dtypes): + if space.is_none(w_dtypes) and not signature: raise oefmt(space.w_NotImplementedError, 'object dtype requested but not implemented') elif (space.isinstance_w(w_dtypes, space.w_tuple) or @@ -988,7 +991,9 @@ else: dtypes = [None]*len(_dtypes) for i in range(len(dtypes)): + print 'decoding',_dtypes[i] dtypes[i] = descriptor.decode_w_dtype(space, _dtypes[i]) + print 'got',dtypes[i] else: raise oefmt(space.w_ValueError, 'dtypes must be None or a list of dtypes') @@ -999,7 +1004,7 @@ identity = \ descriptor.get_dtype_cache(space).w_longdtype.box(w_identity) - w_ret = W_UfuncGeneric(space, func, name, identity, nin, nout, dtypes, + w_ret = W_UfuncGeneric(space, func, name, identity, nin, nout, dtypes, signature, match_dtypes=match_dtypes) if doc: w_ret.w_doc = space.wrap(doc) From noreply at buildbot.pypy.org Wed Aug 13 00:01:13 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 13 Aug 2014 00:01:13 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: make untranslated tests pass Message-ID: <20140812220113.6AFB51C0793@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r72771:c12de969c24c Date: 2014-08-09 22:54 +0300 http://bitbucket.org/pypy/pypy/changeset/c12de969c24c/ Log: make untranslated tests pass diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -112,10 +112,11 @@ assert 'object' in str(e) # Use pypy specific extension for out_dtype adder_ufunc0 = frompyfunc(adder, 2, 1, dtypes=['match']) - adder_ufunc1 = frompyfunc([adder, adder], 2, 1, dtypes=['match']) - int_func22 = frompyfunc([int, int], 2, 2, signature='()->()', + adder_ufunc1 = frompyfunc([adder, adder], 2, 1, + dtypes=[int, int, int, float, float, float]) + int_func22 = frompyfunc([int, int], 2, 2, signature='(i)->(i)', dtypes=['match']) - int_func12 = frompyfunc([int, int], 1, 2, signature='()->()', + int_func12 = frompyfunc([int], 1, 2, signature='(i)->(i)', dtypes=['match']) retype = dtype(int) assert isinstance(adder_ufunc1, ufunc) @@ -139,7 +140,7 @@ out_flat = out_array.flat for i in range(in_array.size): out_flat[i] = in_flat[i] * 2 - def double_times2(space, __args__): + def double_times2(in_array, out_array): assert in_array.dtype == float in_flat = in_array.flat out_flat = out_array.flat diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -562,10 +562,19 @@ return loop.call_many_to_many(space, new_shape, self.funcs[index], res_dtype, inargs, outargs) - def type_resolver(self, space, index, outargs): - # Find a match for the inargs.dtype in self.dtypes, like - # linear_search_type_resolver in numy ufunc_type_resolutions.c - return 0 + def type_resolver(self, space, inargs, outargs): + # Find a match for the inargs.dtype in self.dtypes, like + # linear_search_type_resolver in numy ufunc_type_resolutions.c + for i in range(0, len(self.dtypes), self.nargs): + if inargs[0].get_dtype() == self.dtypes[i]: + break + else: + if len(self.funcs) < 2: + return 0 + raise oefmt(space.w_TypeError, + 'input dtype %s did not match any known dtypes', + str(inargs[0].get_dtype())) + return i / self.nargs def alloc_outargs(self, space, index, inargs, outargs): # Any None outarg should be allocated here @@ -991,9 +1000,7 @@ else: dtypes = [None]*len(_dtypes) for i in range(len(dtypes)): - print 'decoding',_dtypes[i] dtypes[i] = descriptor.decode_w_dtype(space, _dtypes[i]) - print 'got',dtypes[i] else: raise oefmt(space.w_ValueError, 'dtypes must be None or a list of dtypes') From noreply at buildbot.pypy.org Wed Aug 13 00:01:14 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 13 Aug 2014 00:01:14 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: add asserts till translation passes Message-ID: <20140812220114.B47E31C0793@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r72772:904129afbc30 Date: 2014-08-10 00:17 +0300 http://bitbucket.org/pypy/pypy/changeset/904129afbc30/ Log: add asserts till translation passes diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -94,8 +94,9 @@ in_iters = [None] * nin in_states = [None] * nin for i in range(nin): - assert isinstance(in_args[i], W_NDimArray) - in_iter, in_state = in_args[i].create_iter(shape) + in_i = in_args[i] + assert isinstance(in_i, W_NDimArray) + in_iter, in_state = in_i.create_iter(shape) in_iters[i] = in_iter in_states[i] = in_state shapelen = len(shape) @@ -130,13 +131,15 @@ out_iters = [None] * nout out_states = [None] * nout for i in range(nin): - assert isinstance(in_args[i], W_NDimArray) - in_iter, in_state = in_args[i].create_iter(shape) + in_i = in_args[i] + assert isinstance(in_i, W_NDimArray) + in_iter, in_state = in_i.create_iter(shape) in_iters[i] = in_iter in_states[i] = in_state for i in range(nout): - assert isinstance(out_args[i], W_NDimArray) - out_iter, out_state = out_args[i].create_iter(shape) + out_i = in_args[i] + assert isinstance(out_i, W_NDimArray) + out_iter, out_state = out_i.create_iter(shape) out_iters[i] = out_iter out_states[i] = out_state shapelen = len(shape) diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -541,10 +541,13 @@ 'output arg %d must be an array, not %s', i+self.nin, str(args_w[i+self.nin])) outargs[i] = out index = self.type_resolver(space, inargs, outargs) - self.alloc_outargs(space, index, inargs, outargs) - new_shape = inargs[0].get_shape() - assert isinstance(outargs[0], W_NDimArray) - res_dtype = outargs[0].get_dtype() + outargs = self.alloc_outargs(space, index, inargs, outargs) + inargs0 = inargs[0] + outargs0 = outargs[0] + assert isinstance(inargs0, W_NDimArray) + assert isinstance(outargs0, W_NDimArray) + new_shape = inargs0.get_shape() + res_dtype = outargs0.get_dtype() # XXX handle inner-loop indexing sign_parts = self.signature.split('->') if len(sign_parts) == 2 and sign_parts[0].strip() == '()' \ @@ -552,10 +555,11 @@ arglist = space.newlist(inargs + outargs) func = self.funcs[index] - space.call_function(func, *(inargs + outargs)) + arglist = space.newlist(inargs + outargs) + space.call_args(func, Arguments.frompacked(space, arglist)) if len(outargs) < 2: - return outargs[0] - return outargs + return outargs0 + return space.newtuple(outargs) if len(outargs) < 2: return loop.call_many_to_one(space, new_shape, self.funcs[index], res_dtype, inargs, outargs[0]) @@ -563,27 +567,34 @@ res_dtype, inargs, outargs) def type_resolver(self, space, inargs, outargs): - # Find a match for the inargs.dtype in self.dtypes, like - # linear_search_type_resolver in numy ufunc_type_resolutions.c + # Find a match for the inargs.dtype in self.dtypes, like + # linear_search_type_resolver in numy ufunc_type_resolutions.c + inargs0 = inargs[0] + assert isinstance(inargs0, W_NDimArray) for i in range(0, len(self.dtypes), self.nargs): - if inargs[0].get_dtype() == self.dtypes[i]: + if inargs0.get_dtype() == self.dtypes[i]: break else: if len(self.funcs) < 2: return 0 raise oefmt(space.w_TypeError, 'input dtype %s did not match any known dtypes', - str(inargs[0].get_dtype())) + str(inargs0.get_dtype())) return i / self.nargs def alloc_outargs(self, space, index, inargs, outargs): # Any None outarg should be allocated here - temp_shape = inargs[0].get_shape() # XXX wrong!!! - dtype = inargs[0].get_dtype() # XXX wrong!!! - order = inargs[0].get_order() + inargs0 = inargs[0] + assert isinstance(inargs0, W_NDimArray) + temp_shape = inargs0.get_shape() # XXX wrong!!! + dtype = inargs0.get_dtype() # XXX wrong!!! + order = inargs0.get_order() for i in range(len(outargs)): if outargs[i] is None: outargs[i] = W_NDimArray.from_shape(space, temp_shape, dtype, order) + for i in range(len(outargs)): + assert isinstance(outargs[i], W_NDimArray) + return outargs def prep_call(self, space, index, inargs, outargs): # Use the index and signature to determine From noreply at buildbot.pypy.org Wed Aug 13 00:01:16 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 13 Aug 2014 00:01:16 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: start to properly wrap raw ufunc for frompyfunc Message-ID: <20140812220116.0A7CC1C0793@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r72773:d706b1f3f1eb Date: 2014-08-12 14:01 +0300 http://bitbucket.org/pypy/pypy/changeset/d706b1f3f1eb/ Log: start to properly wrap raw ufunc for frompyfunc diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -13,6 +13,9 @@ from pypy.module.micronumpy.concrete import ConcreteArray from pypy.module.micronumpy import ufuncs from rpython.rlib.rawstorage import RAW_STORAGE_PTR +from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.gateway import interp2app NPY_C_CONTIGUOUS = 0x0001 NPY_F_CONTIGUOUS = 0x0002 @@ -254,6 +257,29 @@ order=order, owning=owning, w_subtype=w_subtype) npy_intpp = rffi.LONGP +class W_GenericUFuncCaller(W_Root): + def __init__(self, func): + self.func = func + + def descr_call(self, space, __args__): + args_w, kwds_w = __args__.unpack() + datap = rffi.CFixedArray(rffi.CCHARP, len(args_w)) + dim_p = rffi.CFixedArray(npy_intpp, len(args_w)) + stepp = rffi.CFixedArray(npy_intpp, len(args_w)) + data = rffi.VOIDP + for i in len(args_w): + arg_i = args[i] + assert isinstance(arg_i, W_NDimArray) + datap[i] = cffi.cast(rffi.CCHARP, args.implementation.storage) + #This assumes we iterate over the last dimension? + dim_p[i] = arg_i.get_shape()[0] + stepp[i] = arg_i.get_strides()[0] + space.call_args(self.func, datap, dim_p, stepp, data) + +W_GenericUFuncCaller.typedef = TypeDef("hiddenclass", + __call__ = interp2app(W_GenericUFuncCaller.descr_call), +) + GenericUfunc = lltype.FuncType([rffi.CArrayPtr(rffi.CCHARP), npy_intpp, npy_intpp, rffi.VOIDP], rffi.VOIDP) gufunctype = lltype.Ptr(GenericUfunc) @@ -265,9 +291,13 @@ funcs_w = [None] * ntypes dtypes_w = [None] * ntypes * (nin + nout) for i in range(ntypes): - funcs_w[i] = space.wrap(funcs[i]) - #print 'function',i,'is',funcs[i], hex(rffi.cast(lltype.Signed, funcs[i])) + funcs_w[i] = W_GenericUFuncCaller(funcs[i]) for i in range(ntypes*(nin+nout)): dtypes_w[i] = get_dtype_cache(space).dtypes_by_num[ord(types[i])] - return ufuncs.frompyfunc(space, space.newlist(funcs_w), nin, nout, dtypes_w, - signature, identity, name, doc) + w_funcs = space.newlist(funcs_w) + w_dtypes = space.newlist(dtypes_w) + w_signature = rffi.charp2str(signature) + w_doc = rffi.charp2str(doc) + w_name = rffi.charp2str(name) + return ufuncs.frompyfunc(space, w_funcs, nin, nout, w_dtypes, + w_signature, identity, w_name, w_doc) diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -1,7 +1,7 @@ +import py from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rtyper.lltypesystem import rffi, lltype - from pypy.module.micronumpy.ndarray import W_NDimArray from pypy.module.micronumpy.descriptor import get_dtype_cache @@ -213,14 +213,16 @@ assert res.get_scalar_value().real == 3. assert res.get_scalar_value().imag == 4. - def test_Ufunc_FromFuncAndDataAndSignature(self. space, api): + def _test_Ufunc_FromFuncAndDataAndSignature(self, space, api): + py.test.skip('preliminary non-translated test') + ''' PyUFuncGenericFunction funcs[] = {&double_times2, &int_times2}; char types[] = { NPY_DOUBLE,NPY_DOUBLE, NPY_INT, NPY_INT }; void *array_data[] = {NULL, NULL}; ufunc = api._PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, types, ntypes, nin, nout, identity, doc, check_return, signature) - + ''' class AppTestNDArray(AppTestCpythonExtensionBase): def test_ndarray_object_c(self): From noreply at buildbot.pypy.org Wed Aug 13 00:01:17 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 13 Aug 2014 00:01:17 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: c function is called but crashes Message-ID: <20140812220117.5122C1C0793@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r72774:4f42ac64e652 Date: 2014-08-13 00:59 +0300 http://bitbucket.org/pypy/pypy/changeset/4f42ac64e652/ Log: c function is called but crashes diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -12,9 +12,12 @@ from pypy.module.micronumpy.descriptor import get_dtype_cache, W_Dtype from pypy.module.micronumpy.concrete import ConcreteArray from pypy.module.micronumpy import ufuncs -from rpython.rlib.rawstorage import RAW_STORAGE_PTR +from rpython.rlib.rawstorage import (RAW_STORAGE_PTR, raw_storage_getitem, raw_storage_setitem, + free_raw_storage, alloc_raw_storage) +from rpython.rlib.rarithmetic import LONG_BIT, _get_bitsize from pypy.interpreter.typedef import TypeDef from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.argument import Arguments from pypy.interpreter.gateway import interp2app NPY_C_CONTIGUOUS = 0x0001 @@ -257,31 +260,44 @@ order=order, owning=owning, w_subtype=w_subtype) npy_intpp = rffi.LONGP +LONG_SIZE = LONG_BIT / 8 +CCHARP_SIZE = _get_bitsize('P') / 8 + class W_GenericUFuncCaller(W_Root): def __init__(self, func): self.func = func def descr_call(self, space, __args__): args_w, kwds_w = __args__.unpack() - datap = rffi.CFixedArray(rffi.CCHARP, len(args_w)) - dim_p = rffi.CFixedArray(npy_intpp, len(args_w)) - stepp = rffi.CFixedArray(npy_intpp, len(args_w)) - data = rffi.VOIDP - for i in len(args_w): - arg_i = args[i] + dataps = alloc_raw_storage(CCHARP_SIZE * len(args_w), track_allocation=False) + dims = alloc_raw_storage(LONG_SIZE * len(args_w), track_allocation=False) + steps = alloc_raw_storage(LONG_SIZE * len(args_w), track_allocation=False) + user_data = None + for i in range(len(args_w)): + arg_i = args_w[i] assert isinstance(arg_i, W_NDimArray) - datap[i] = cffi.cast(rffi.CCHARP, args.implementation.storage) + raw_storage_setitem(dataps, CCHARP_SIZE * i, rffi.cast(rffi.CCHARP, arg_i.implementation.storage)) #This assumes we iterate over the last dimension? - dim_p[i] = arg_i.get_shape()[0] - stepp[i] = arg_i.get_strides()[0] - space.call_args(self.func, datap, dim_p, stepp, data) + raw_storage_setitem(dims, LONG_SIZE * i, rffi.cast(rffi.LONG, arg_i.get_shape()[0])) + raw_storage_setitem(steps, LONG_SIZE * i, rffi.cast(rffi.LONG, arg_i.implementation.strides[0])) + try: + import pdb;pdb.set_trace() + self.func(rffi.cast(rffi.CArrayPtr(rffi.CCHARP), dataps), + rffi.cast(npy_intpp, dims), rffi.cast(npy_intpp, steps), user_data) + except: + import traceback; traceback.print_exc() + raise + finally: + free_raw_storage(dataps, track_allocation=False) + free_raw_storage(dims, track_allocation=False) + free_raw_storage(steps, track_allocation=False) W_GenericUFuncCaller.typedef = TypeDef("hiddenclass", __call__ = interp2app(W_GenericUFuncCaller.descr_call), ) GenericUfunc = lltype.FuncType([rffi.CArrayPtr(rffi.CCHARP), npy_intpp, npy_intpp, - rffi.VOIDP], rffi.VOIDP) + rffi.VOIDP], lltype.Void) gufunctype = lltype.Ptr(GenericUfunc) @cpython_api([rffi.CArrayPtr(gufunctype), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t, @@ -299,5 +315,6 @@ w_signature = rffi.charp2str(signature) w_doc = rffi.charp2str(doc) w_name = rffi.charp2str(name) - return ufuncs.frompyfunc(space, w_funcs, nin, nout, w_dtypes, + ufunc_generic = ufuncs.frompyfunc(space, w_funcs, nin, nout, w_dtypes, w_signature, identity, w_name, w_doc) + return ufunc_generic From noreply at buildbot.pypy.org Wed Aug 13 00:20:11 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Wed, 13 Aug 2014 00:20:11 +0200 (CEST) Subject: [pypy-commit] pypy py3.3-fixes2: sys.exit() should produce a SystemExit with code is None Message-ID: <20140812222011.EF84E1C0157@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3-fixes2 Changeset: r72775:4558aef78acc Date: 2014-08-12 21:34 +0200 http://bitbucket.org/pypy/pypy/changeset/4558aef78acc/ Log: sys.exit() should produce a SystemExit with code is None diff --git a/pypy/module/exceptions/test/test_exc.py b/pypy/module/exceptions/test/test_exc.py --- a/pypy/module/exceptions/test/test_exc.py +++ b/pypy/module/exceptions/test/test_exc.py @@ -127,6 +127,24 @@ assert SystemExit("x").code == "x" assert SystemExit(1, 2).code == (1, 2) + def test_sys_exit(self): + import sys + + exc = raises(SystemExit, sys.exit) + assert exc.value.code is None + + exc = raises(SystemExit, sys.exit, 0) + assert exc.value.code == 0 + + exc = raises(SystemExit, sys.exit, 1) + assert exc.value.code == 1 + + exc = raises(SystemExit, sys.exit, 2) + assert exc.value.code == 2 + + exc = raises(SystemExit, sys.exit, (1, 2, 3)) + assert exc.value.code == (1, 2, 3) + def test_str_unicode(self): e = ValueError('àèì') assert str(e) == 'àèì' diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py --- a/pypy/module/sys/app.py +++ b/pypy/module/sys/app.py @@ -49,7 +49,7 @@ except: return False # got an exception again... ignore, report the original -def exit(exitcode=0): +def exit(exitcode=None): """Exit the interpreter by raising SystemExit(exitcode). If the exitcode is omitted or None, it defaults to zero (i.e., success). If the exitcode is numeric, it will be used as the system exit status. From noreply at buildbot.pypy.org Wed Aug 13 00:20:13 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 13 Aug 2014 00:20:13 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Merged in numerodix/pypy/py3.3-fixes2 (pull request #267) Message-ID: <20140812222013.6270D1C0157@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72776:a52dc76c7d2f Date: 2014-08-12 15:19 -0700 http://bitbucket.org/pypy/pypy/changeset/a52dc76c7d2f/ Log: Merged in numerodix/pypy/py3.3-fixes2 (pull request #267) sys.exit() should produce a SystemExit with code is None diff --git a/pypy/module/exceptions/test/test_exc.py b/pypy/module/exceptions/test/test_exc.py --- a/pypy/module/exceptions/test/test_exc.py +++ b/pypy/module/exceptions/test/test_exc.py @@ -127,6 +127,24 @@ assert SystemExit("x").code == "x" assert SystemExit(1, 2).code == (1, 2) + def test_sys_exit(self): + import sys + + exc = raises(SystemExit, sys.exit) + assert exc.value.code is None + + exc = raises(SystemExit, sys.exit, 0) + assert exc.value.code == 0 + + exc = raises(SystemExit, sys.exit, 1) + assert exc.value.code == 1 + + exc = raises(SystemExit, sys.exit, 2) + assert exc.value.code == 2 + + exc = raises(SystemExit, sys.exit, (1, 2, 3)) + assert exc.value.code == (1, 2, 3) + def test_str_unicode(self): e = ValueError('àèì') assert str(e) == 'àèì' diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py --- a/pypy/module/sys/app.py +++ b/pypy/module/sys/app.py @@ -49,7 +49,7 @@ except: return False # got an exception again... ignore, report the original -def exit(exitcode=0): +def exit(exitcode=None): """Exit the interpreter by raising SystemExit(exitcode). If the exitcode is omitted or None, it defaults to zero (i.e., success). If the exitcode is numeric, it will be used as the system exit status. From noreply at buildbot.pypy.org Wed Aug 13 00:39:45 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Wed, 13 Aug 2014 00:39:45 +0200 (CEST) Subject: [pypy-commit] pypy gc_no_cleanup_nursery: clean out some unused code and insert zero_gc_ptr after GcArray malloc Message-ID: <20140812223945.7D3351C0547@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: gc_no_cleanup_nursery Changeset: r72777:c6e682e7221d Date: 2014-08-08 16:04 -0700 http://bitbucket.org/pypy/pypy/changeset/c6e682e7221d/ Log: clean out some unused code and insert zero_gc_ptr after GcArray malloc diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -11,7 +11,6 @@ from rpython.rtyper.rmodel import inputconst from rpython.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat -from rpython.rlib.debug import ll_assert from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator from rpython.tool.sourcetools import func_with_new_name @@ -270,9 +269,6 @@ if need_exc_matching: assert lastblock.exitswitch == c_last_exception if not self.raise_analyzer.can_raise(lastblock.operations[-1]): - #print ("operation %s cannot raise, but has exception" - # " guarding in graph %s" % (lastblock.operations[-1], - # graph)) lastblock.exitswitch = None lastblock.recloseblock(lastblock.exits[0]) lastblock.exits[0].exitcase = None @@ -393,10 +389,6 @@ return newgraph, SpaceOperation("direct_call", [fptr] + callargs, op.result) def gen_exc_check(self, block, returnblock, normalafterblock=None): - #var_exc_occured = Variable() - #var_exc_occured.concretetype = lltype.Bool - #block.operations.append(SpaceOperation("safe_call", [self.rpyexc_occured_ptr], var_exc_occured)) - llops = rtyper.LowLevelOpList(None) spaceop = block.operations[-1] @@ -425,9 +417,8 @@ l0.exitcase = l0.llexitcase = True block.recloseblock(l0, l) - insert_zeroing_op = False - if spaceop.opname == 'malloc': + if spaceop.opname in ['malloc','malloc_varsize']: flavor = spaceop.args[1].value['flavor'] if flavor == 'gc': insert_zeroing_op = True From noreply at buildbot.pypy.org Wed Aug 13 00:39:46 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Wed, 13 Aug 2014 00:39:46 +0200 (CEST) Subject: [pypy-commit] pypy gc_no_cleanup_nursery: add import Message-ID: <20140812223946.B7CFC1C0793@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: gc_no_cleanup_nursery Changeset: r72778:05756b433478 Date: 2014-08-11 16:28 -0700 http://bitbucket.org/pypy/pypy/changeset/05756b433478/ Log: add import diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -11,6 +11,7 @@ from rpython.rtyper.rmodel import inputconst from rpython.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat +from rpython.rlib.debug import ll_assert from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator from rpython.tool.sourcetools import func_with_new_name From noreply at buildbot.pypy.org Wed Aug 13 00:39:47 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Wed, 13 Aug 2014 00:39:47 +0200 (CEST) Subject: [pypy-commit] pypy gc_no_cleanup_nursery: add tests Message-ID: <20140812223947.E210F1C0793@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: gc_no_cleanup_nursery Changeset: r72779:21a70c2f9848 Date: 2014-08-11 18:42 -0700 http://bitbucket.org/pypy/pypy/changeset/21a70c2f9848/ Log: add tests diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py --- a/rpython/memory/gc/test/test_direct.py +++ b/rpython/memory/gc/test/test_direct.py @@ -107,7 +107,7 @@ def malloc(self, TYPE, n=None): addr = self.gc.malloc(self.get_type_id(TYPE), n, zero=True) obj_ptr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(TYPE)) - #TODO: only zero fields if there is gc filed add something like has_gc_ptr() + if not self.gc.malloc_zero_filled: zero_gc_pointers_inside(obj_ptr, TYPE) return obj_ptr @@ -667,9 +667,25 @@ class TestIncrementalMiniMarkGCFull(DirectGCTest): from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass - def test_no_cleanup(self): + def test_malloc_fixedsize_no_cleanup(self): p = self.malloc(S) import pytest with pytest.raises(lltype.UninitializedMemoryAccess): x1 = p.x - \ No newline at end of file + assert p.prev == lltype.nullptr(S) + assert p.next == lltype.nullptr(S) + + def test_malloc_varsize_no_cleanup(self): + x = lltype.Signed + VAR1 = lltype.GcArray(x) + p = self.malloc(VAR1,5) + import pytest + with pytest.raises(lltype.UninitializedMemoryAccess): + x1 = p[0] + + def test_malloc_varsize_no_cleanup2(self): + p = self.malloc(VAR,100) + for i in range(100): + assert p[0] == lltype.nullptr(S) + assert False + diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py --- a/rpython/memory/test/test_transformed_gc.py +++ b/rpython/memory/test/test_transformed_gc.py @@ -1,6 +1,7 @@ import py import inspect +from rpython.rlib.objectmodel import compute_hash, compute_identity_hash from rpython.translator.c import gc from rpython.annotator import model as annmodel from rpython.rtyper.llannotation import SomePtr @@ -13,6 +14,7 @@ from rpython.conftest import option from rpython.rlib.rstring import StringBuilder from rpython.rlib.rarithmetic import LONG_BIT +import pdb WORD = LONG_BIT // 8 @@ -154,7 +156,6 @@ class GenericGCTests(GCTest): GC_CAN_SHRINK_ARRAY = False - def define_instances(cls): class A(object): pass @@ -709,7 +710,6 @@ GC_CAN_MOVE = True GC_CAN_MALLOC_NONMOVABLE = False GC_CAN_TEST_ID = False - def define_many_ids(cls): class A(object): pass @@ -1118,6 +1118,7 @@ def test_adr_of_nursery(self): run = self.runner("adr_of_nursery") res = run([]) + class TestGenerationalNoFullCollectGC(GCTest): # test that nursery is doing its job and that no full collection @@ -1178,7 +1179,7 @@ 'large_object': 8*WORD, 'translated_to_c': False} root_stack_depth = 200 - + def define_ref_from_rawmalloced_to_regular(cls): import gc S = lltype.GcStruct('S', ('x', lltype.Signed)) @@ -1232,8 +1233,7 @@ def test_malloc_nonmovable_fixsize(self): py.test.skip("not supported") - - + class TestMiniMarkGC(TestHybridGC): gcname = "minimark" GC_CAN_TEST_ID = True @@ -1250,7 +1250,7 @@ 'translated_to_c': False, } root_stack_depth = 200 - + def define_no_clean_setarrayitems(cls): # The optimization find_clean_setarrayitems() in # gctransformer/framework.py does not work with card marking. @@ -1275,6 +1275,29 @@ run = self.runner("no_clean_setarrayitems") res = run([]) assert res == 123 + + def define_nursery_hash_base(cls): + class A: + pass + def fn(): + objects = [] + hashes = [] + for i in range(200): + rgc.collect(0) # nursery-only collection, if possible + obj = A() + objects.append(obj) + hashes.append(compute_identity_hash(obj)) + unique = {} + for i in range(len(objects)): + assert compute_identity_hash(objects[i]) == hashes[i] + unique[hashes[i]] = None + return len(unique) + return fn + + def test_nursery_hash_base(self): + res = self.runner('nursery_hash_base') + assert res >= 195 + assert False class TestIncrementalMiniMarkGC(TestMiniMarkGC): gcname = "incminimark" @@ -1292,8 +1315,58 @@ 'translated_to_c': False, } root_stack_depth = 200 + + def define_malloc_array_of_gcptr(self): + S = lltype.GcStruct('S', ('x', lltype.Signed)) + A = lltype.GcArray(lltype.Ptr(S)) + def f(): + lst = lltype.malloc(A, 5, zero= False) + return (lst[0] == lltype.nullptr(S) + and lst[1] == lltype.nullptr(S) + and lst[2] == lltype.nullptr(S) + and lst[3] == lltype.nullptr(S) + and lst[4] == lltype.nullptr(S)) + return f + + def test_malloc_array_of_gcptr(self): + run = self.runner('malloc_array_of_gcptr') + res = run([]) + assert not res + ''' + def define_malloc_struct_of_gcptr(cls): + S1 = lltype.GcStruct('S', ('x', lltype.Signed)) + S = lltype.GcStruct('S', + ('x', lltype.Signed), + ('filed1', lltype.Ptr(S1)), + ('filed2', lltype.Ptr(S1))) + s0 = lltype.malloc(S) + def f(): + return (s0.filed1 == lltype.nullptr(S1) and s0.filed2 == lltype.nullptr(S1)) + return f + def test_malloc_struct_of_gcptr(self): + run = self.runner("malloc_struct_of_gcptr") + res = run([]) + assert res + ''' + ''' + def define_malloc_struct_of_gcptr(cls): + S = lltype.GcForwardReference() + S.become(lltype.GcStruct('S', + ('x', lltype.Signed), + ('prev', lltype.Ptr(S)), + ('next', lltype.Ptr(S)))) + s0 = lltype.malloc(S,zero = False) + def f(): + return s0.next == lltype.nullptr(S) + return f + def test_malloc_struct_of_gcptr(self): + run = self.runner("malloc_struct_of_gcptr") + pdb.set_trace() + res = run([]) + assert res + ''' # ________________________________________________________________ # tagged pointers From noreply at buildbot.pypy.org Wed Aug 13 00:39:49 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Wed, 13 Aug 2014 00:39:49 +0200 (CEST) Subject: [pypy-commit] pypy gc_no_cleanup_nursery: add test Message-ID: <20140812223949.29E451C0793@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: gc_no_cleanup_nursery Changeset: r72780:44d322ba289f Date: 2014-08-12 15:37 -0700 http://bitbucket.org/pypy/pypy/changeset/44d322ba289f/ Log: add test diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py --- a/rpython/memory/gc/test/test_direct.py +++ b/rpython/memory/gc/test/test_direct.py @@ -12,6 +12,7 @@ from rpython.rlib.rarithmetic import LONG_BIT, is_valid_int from rpython.memory.gc import minimark, incminimark from rpython.memory.gctypelayout import zero_gc_pointers_inside +from rpython.rlib.debug import debug_print WORD = LONG_BIT // 8 ADDR_ARRAY = lltype.Array(llmemory.Address) @@ -670,8 +671,10 @@ def test_malloc_fixedsize_no_cleanup(self): p = self.malloc(S) import pytest + #ensure the memory is uninitialized with pytest.raises(lltype.UninitializedMemoryAccess): x1 = p.x + #ensure all the ptr fields are zeroed assert p.prev == lltype.nullptr(S) assert p.next == lltype.nullptr(S) @@ -686,6 +689,52 @@ def test_malloc_varsize_no_cleanup2(self): p = self.malloc(VAR,100) for i in range(100): + print type(p[0]) assert p[0] == lltype.nullptr(S) - assert False + def test_malloc_struct_of_ptr_arr(self): + S2 = lltype.GcForwardReference() + S2.become(lltype.GcStruct('S2', + ('gcptr_arr', VAR))) + s2 = self.malloc(S2) + s2.gcptr_arr = self.malloc(VAR,100) + for i in range(100): + assert s2.gcptr_arr[i] == lltype.nullptr(S) + + def test_malloc_struct_of_ptr_struct(self): + S3 = lltype.GcForwardReference() + S3.become(lltype.GcStruct('S3', + ('gcptr_struct', S), + ('prev', lltype.Ptr(S)), + ('next', lltype.Ptr(S)))) + s3 = self.malloc(S3) + assert s3.gcptr_struct.prev == lltype.nullptr(S) + assert s3.gcptr_struct.next == lltype.nullptr(S) + + def test_malloc_array_of_ptr_struct(self): + ARR_OF_PTR_STRUCT = lltype.GcArray(lltype.Ptr(S)) + arr_of_ptr_struct = self.malloc(ARR_OF_PTR_STRUCT,5) + for i in range(5): + assert arr_of_ptr_struct[i] == lltype.nullptr(S) + assert arr_of_ptr_struct[i] == lltype.nullptr(S) + arr_of_ptr_struct[i] = self.malloc(S) + assert arr_of_ptr_struct[i].prev == lltype.nullptr(S) + assert arr_of_ptr_struct[i].next == lltype.nullptr(S) + + + def test_malloc_array_of_ptr_arr(self): + ARR_OF_PTR_ARR = lltype.GcArray(lltype.Ptr(lltype.GcArray(lltype.Ptr(S)))) + arr_of_ptr_arr = lltype.malloc(ARR_OF_PTR_ARR, 10) + for i in range(10): + assert arr_of_ptr_arr[i] == lltype.nullptr(lltype.GcArray(lltype.Ptr(S))) + for i in range(10): + arr_of_ptr_arr[i] = self.malloc(lltype.GcArray(lltype.Ptr(S)), i) + debug_print (arr_of_ptr_arr[i]) + for elem in arr_of_ptr_arr[i]: + debug_print(elem) + assert elem == lltype.nullptr(S) + elem = self.malloc(S) + #assert elem.prev == lltype.nullptr(S) + #assert elem.next == lltype.nullptr(S) + + \ No newline at end of file From noreply at buildbot.pypy.org Wed Aug 13 01:44:58 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 13 Aug 2014 01:44:58 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140812234458.811281C0157@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72781:92b4b658ae4b Date: 2014-08-12 16:43 -0700 http://bitbucket.org/pypy/pypy/changeset/92b4b658ae4b/ Log: merge default diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py --- a/lib_pypy/_tkinter/__init__.py +++ b/lib_pypy/_tkinter/__init__.py @@ -30,6 +30,10 @@ return TkApp(screenName, className, interactive, wantobjects, wantTk, sync, use) +def dooneevent(flags=0): + return tklib.Tcl_DoOneEvent(flags) + + def _flatten(item): def _flatten1(output, item, depth): if depth > 1000: diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -1,8 +1,9 @@ +import sys from rpython.rlib.clibffi import FFI_DEFAULT_ABI from rpython.rlib.objectmodel import we_are_translated from rpython.jit.metainterp.history import INT, FLOAT from rpython.jit.backend.x86.arch import (WORD, IS_X86_64, IS_X86_32, - PASS_ON_MY_FRAME) + PASS_ON_MY_FRAME, FRAME_FIXED_SIZE) from rpython.jit.backend.x86.regloc import (eax, ecx, edx, ebx, esp, ebp, esi, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, r8, r9, r10, r11, edi, r12, r13, r14, r15, X86_64_SCRATCH_REG, X86_64_XMM_SCRATCH_REG, @@ -15,6 +16,8 @@ # Same for gcc 4.5.0, better safe than sorry CALL_ALIGN = 16 // WORD +stdcall_or_cdecl = sys.platform == "win32" + def align_stack_words(words): return (words + CALL_ALIGN - 1) & ~(CALL_ALIGN-1) @@ -44,11 +47,6 @@ self.stack_max = PASS_ON_MY_FRAME - asmgcroot.JIT_USE_WORDS assert self.stack_max >= 3 - def emit_raw_call(self): - self.mc.CALL(self.fnloc) - if self.callconv != FFI_DEFAULT_ABI: - self.current_esp += self._fix_stdcall(self.callconv) - def subtract_esp_aligned(self, count): if count > 0: align = align_stack_words(count) @@ -246,6 +244,28 @@ self.fnloc = RawEspLoc(p - WORD, INT) + def emit_raw_call(self): + if stdcall_or_cdecl and self.is_call_release_gil: + # Dynamically accept both stdcall and cdecl functions. + # We could try to detect from pyjitpl which calling + # convention this particular function takes, which would + # avoid these two extra MOVs... but later. The ebp register + # is unused here: it will be reloaded from the shadowstack. + # (This doesn't work during testing, though. Hack hack hack.) + save_ebp = not self.asm.cpu.gc_ll_descr.is_shadow_stack() + ofs = WORD * (FRAME_FIXED_SIZE - 1) + if save_ebp: # only for testing (or with Boehm) + self.mc.MOV_sr(ofs, ebp.value) + self.mc.MOV(ebp, esp) + self.mc.CALL(self.fnloc) + self.mc.MOV(esp, ebp) + if save_ebp: # only for testing (or with Boehm) + self.mc.MOV_rs(ebp.value, ofs) + else: + self.mc.CALL(self.fnloc) + if self.callconv != FFI_DEFAULT_ABI: + self.current_esp += self._fix_stdcall(self.callconv) + def _fix_stdcall(self, callconv): from rpython.rlib.clibffi import FFI_STDCALL assert callconv == FFI_STDCALL @@ -417,8 +437,9 @@ remap_frame_layout(self.asm, src_locs, dst_locs, X86_64_SCRATCH_REG) - def _fix_stdcall(self, callconv): - assert 0 # should not occur on 64-bit + def emit_raw_call(self): + assert self.callconv == FFI_DEFAULT_ABI + self.mc.CALL(self.fnloc) def load_result(self): if self.restype == 'S': diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py --- a/rpython/jit/backend/x86/test/test_runner.py +++ b/rpython/jit/backend/x86/test/test_runner.py @@ -438,20 +438,26 @@ if WORD != 4: py.test.skip("32-bit only test") from rpython.jit.backend.x86.regloc import eax, edx - from rpython.jit.backend.x86 import codebuf + from rpython.jit.backend.x86 import codebuf, callbuilder from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.rlib.libffi import types, clibffi had_stdcall = hasattr(clibffi, 'FFI_STDCALL') if not had_stdcall: # not running on Windows, but we can still test monkeypatch.setattr(clibffi, 'FFI_STDCALL', 12345, raising=False) + monkeypatch.setattr(callbuilder, 'stdcall_or_cdecl', True) + else: + assert callbuilder.stdcall_or_cdecl # - for ffi in [clibffi.FFI_DEFAULT_ABI, clibffi.FFI_STDCALL]: + for real_ffi, reported_ffi in [ + (clibffi.FFI_DEFAULT_ABI, clibffi.FFI_DEFAULT_ABI), + (clibffi.FFI_STDCALL, clibffi.FFI_DEFAULT_ABI), + (clibffi.FFI_STDCALL, clibffi.FFI_STDCALL)]: cpu = self.cpu mc = codebuf.MachineCodeBlockWrapper() mc.MOV_rs(eax.value, 4) # argument 1 mc.MOV_rs(edx.value, 40) # argument 10 mc.SUB_rr(eax.value, edx.value) # return arg1 - arg10 - if ffi == clibffi.FFI_DEFAULT_ABI: + if real_ffi == clibffi.FFI_DEFAULT_ABI: mc.RET() else: mc.RET16_i(40) @@ -459,7 +465,7 @@ # calldescr = cpu._calldescr_dynamic_for_tests([types.slong] * 10, types.slong) - calldescr.get_call_conv = lambda: ffi # <==== hack + calldescr.get_call_conv = lambda: reported_ffi # <==== hack # ^^^ we patch get_call_conv() so that the test also makes sense # on Linux, because clibffi.get_call_conv() would always # return FFI_DEFAULT_ABI on non-Windows platforms. From noreply at buildbot.pypy.org Wed Aug 13 01:45:01 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 13 Aug 2014 01:45:01 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge py3k Message-ID: <20140812234501.8C2B61C0157@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72782:ed05d05aefb3 Date: 2014-08-12 16:44 -0700 http://bitbucket.org/pypy/pypy/changeset/ed05d05aefb3/ Log: merge py3k diff too long, truncating to 2000 out of 11009 lines diff --git a/_pytest/__init__.py b/_pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.4.dev2' +__version__ = '2.5.2' diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py new file mode 100644 --- /dev/null +++ b/_pytest/_argcomplete.py @@ -0,0 +1,104 @@ + +"""allow bash-completion for argparse with argcomplete if installed +needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code. + +argcomplete does not support python 2.5 (although the changes for that +are minor). + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*' + ).completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh ) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK + +INSTALL/DEBUGGING +================= +To include this support in another application that has setup.py generated +scripts: +- add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point +- include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + , call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument() +If things do not work right away: +- switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 +- run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not +- sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" + +import sys +import os +from glob import glob + +class FastFilesCompleter: + 'Fast file completer class' + def __init__(self, directories=True): + self.directories = directories + + def __call__(self, prefix, **kwargs): + """only called on non option completions""" + if os.path.sep in prefix[1:]: # + prefix_dir = len(os.path.dirname(prefix) + os.path.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if '*' not in prefix and '?' not in prefix: + if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash + globbed.extend(glob(prefix + '.*')) + prefix += '*' + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += '/' + # append stripping the prefix (like bash, not like compgen) + completion.append(x[prefix_dir:]) + return completion + +if os.environ.get('_ARGCOMPLETE'): + # argcomplete 0.5.6 is not compatible with python 2.5.6: print/with/format + if sys.version_info[:2] < (2, 6): + sys.exit(1) + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter = FastFilesCompleter() + + def try_argcomplete(parser): + argcomplete.autocomplete(parser) +else: + def try_argcomplete(parser): pass + filescompleter = None diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py --- a/_pytest/assertion/__init__.py +++ b/_pytest/assertion/__init__.py @@ -3,7 +3,6 @@ """ import py import sys -import pytest from _pytest.monkeypatch import monkeypatch from _pytest.assertion import util @@ -19,8 +18,8 @@ to provide assert expression information. """) group.addoption('--no-assert', action="store_true", default=False, dest="noassert", help="DEPRECATED equivalent to --assert=plain") - group.addoption('--nomagic', action="store_true", default=False, - dest="nomagic", help="DEPRECATED equivalent to --assert=plain") + group.addoption('--nomagic', '--no-magic', action="store_true", + default=False, help="DEPRECATED equivalent to --assert=plain") class AssertionState: """State for the assertion plugin.""" @@ -35,22 +34,25 @@ mode = "plain" if mode == "rewrite": try: - import ast + import ast # noqa except ImportError: mode = "reinterp" else: - if sys.platform.startswith('java'): + # Both Jython and CPython 2.6.0 have AST bugs that make the + # assertion rewriting hook malfunction. + if (sys.platform.startswith('java') or + sys.version_info[:3] == (2, 6, 0)): mode = "reinterp" if mode != "plain": _load_modules(mode) m = monkeypatch() config._cleanup.append(m.undo) m.setattr(py.builtin.builtins, 'AssertionError', - reinterpret.AssertionError) + reinterpret.AssertionError) # noqa hook = None if mode == "rewrite": - hook = rewrite.AssertionRewritingHook() - sys.meta_path.append(hook) + hook = rewrite.AssertionRewritingHook() # noqa + sys.meta_path.insert(0, hook) warn_about_missing_assertion(mode) config._assertstate = AssertionState(config, mode) config._assertstate.hook = hook @@ -73,9 +75,16 @@ def callbinrepr(op, left, right): hook_result = item.ihook.pytest_assertrepr_compare( config=item.config, op=op, left=left, right=right) + for new_expl in hook_result: if new_expl: - res = '\n~'.join(new_expl) + # Don't include pageloads of data unless we are very + # verbose (-vv) + if (sum(len(p) for p in new_expl[1:]) > 80*8 + and item.config.option.verbose < 2): + new_expl[1:] = [py.builtin._totext( + 'Detailed information truncated, use "-vv" to show')] + res = py.builtin._totext('\n~').join(new_expl) if item.config.getvalue("assertmode") == "rewrite": # The result will be fed back a python % formatting # operation, which will fail if there are extraneous @@ -95,9 +104,9 @@ def _load_modules(mode): """Lazily import assertion related code.""" global rewrite, reinterpret - from _pytest.assertion import reinterpret + from _pytest.assertion import reinterpret # noqa if mode == "rewrite": - from _pytest.assertion import rewrite + from _pytest.assertion import rewrite # noqa def warn_about_missing_assertion(mode): try: diff --git a/_pytest/assertion/newinterpret.py b/_pytest/assertion/newinterpret.py --- a/_pytest/assertion/newinterpret.py +++ b/_pytest/assertion/newinterpret.py @@ -11,7 +11,7 @@ from _pytest.assertion.reinterpret import BuiltinAssertionError -if sys.platform.startswith("java") and sys.version_info < (2, 5, 2): +if sys.platform.startswith("java"): # See http://bugs.jython.org/issue1497 _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict", "ListComp", "GeneratorExp", "Yield", "Compare", "Call", diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py --- a/_pytest/assertion/oldinterpret.py +++ b/_pytest/assertion/oldinterpret.py @@ -526,10 +526,13 @@ # example: def f(): return 5 + def g(): return 3 + def h(x): return 'never' + check("f() * g() == 5") check("not f()") check("not (f() and g() or 0)") diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py --- a/_pytest/assertion/reinterpret.py +++ b/_pytest/assertion/reinterpret.py @@ -1,18 +1,26 @@ import sys import py from _pytest.assertion.util import BuiltinAssertionError +u = py.builtin._totext + class AssertionError(BuiltinAssertionError): def __init__(self, *args): BuiltinAssertionError.__init__(self, *args) if args: + # on Python2.6 we get len(args)==2 for: assert 0, (x,y) + # on Python2.7 and above we always get len(args) == 1 + # with args[0] being the (x,y) tuple. + if len(args) > 1: + toprint = args + else: + toprint = args[0] try: - self.msg = str(args[0]) - except py.builtin._sysex: - raise - except: - self.msg = "<[broken __repr__] %s at %0xd>" %( - args[0].__class__, id(args[0])) + self.msg = u(toprint) + except Exception: + self.msg = u( + "<[broken __repr__] %s at %0xd>" + % (toprint.__class__, id(toprint))) else: f = py.code.Frame(sys._getframe(1)) try: @@ -44,4 +52,3 @@ from _pytest.assertion.newinterpret import interpret as reinterpret else: reinterpret = reinterpret_old - diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py --- a/_pytest/assertion/rewrite.py +++ b/_pytest/assertion/rewrite.py @@ -6,6 +6,7 @@ import imp import marshal import os +import re import struct import sys import types @@ -14,13 +15,7 @@ from _pytest.assertion import util -# Windows gives ENOENT in places *nix gives ENOTDIR. -if sys.platform.startswith("win"): - PATH_COMPONENT_NOT_DIR = errno.ENOENT -else: - PATH_COMPONENT_NOT_DIR = errno.ENOTDIR - -# py.test caches rewritten pycs in __pycache__. +# pytest caches rewritten pycs in __pycache__. if hasattr(imp, "get_tag"): PYTEST_TAG = imp.get_tag() + "-PYTEST" else: @@ -34,17 +29,19 @@ PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1]) del ver, impl -PYC_EXT = ".py" + "c" if __debug__ else "o" +PYC_EXT = ".py" + (__debug__ and "c" or "o") PYC_TAIL = "." + PYTEST_TAG + PYC_EXT REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2) +ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 class AssertionRewritingHook(object): - """Import hook which rewrites asserts.""" + """PEP302 Import hook which rewrites asserts.""" def __init__(self): self.session = None self.modules = {} + self._register_with_pkg_resources() def set_session(self, session): self.fnpats = session.config.getini("python_files") @@ -59,8 +56,12 @@ names = name.rsplit(".", 1) lastname = names[-1] pth = None - if path is not None and len(path) == 1: - pth = path[0] + if path is not None: + # Starting with Python 3.3, path is a _NamespacePath(), which + # causes problems if not converted to list. + path = list(path) + if len(path) == 1: + pth = path[0] if pth is None: try: fd, fn, desc = imp.find_module(lastname, path) @@ -95,12 +96,13 @@ finally: self.session = sess else: - state.trace("matched test file (was specified on cmdline): %r" % (fn,)) + state.trace("matched test file (was specified on cmdline): %r" % + (fn,)) # The requested module looks like a test file, so rewrite it. This is # the most magical part of the process: load the source, rewrite the # asserts, and load the rewritten source. We also cache the rewritten # module code in a special pyc. We must be aware of the possibility of - # concurrent py.test processes rewriting and loading pycs. To avoid + # concurrent pytest processes rewriting and loading pycs. To avoid # tricky race conditions, we maintain the following invariant: The # cached pyc is always a complete, valid pyc. Operations on it must be # atomic. POSIX's atomic rename comes in handy. @@ -116,19 +118,19 @@ # common case) or it's blocked by a non-dir node. In the # latter case, we'll ignore it in _write_pyc. pass - elif e == PATH_COMPONENT_NOT_DIR: + elif e in [errno.ENOENT, errno.ENOTDIR]: # One of the path components was not a directory, likely # because we're in a zip file. write = False elif e == errno.EACCES: - state.trace("read only directory: %r" % (fn_pypath.dirname,)) + state.trace("read only directory: %r" % fn_pypath.dirname) write = False else: raise cache_name = fn_pypath.basename[:-3] + PYC_TAIL pyc = os.path.join(cache_dir, cache_name) - # Notice that even if we're in a read-only directory, I'm going to check - # for a cached pyc. This may not be optimal... + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... co = _read_pyc(fn_pypath, pyc) if co is None: state.trace("rewriting %r" % (fn,)) @@ -153,27 +155,59 @@ mod.__file__ = co.co_filename # Normally, this attribute is 3.2+. mod.__cached__ = pyc + mod.__loader__ = self py.builtin.exec_(co, mod.__dict__) except: del sys.modules[name] raise return sys.modules[name] -def _write_pyc(co, source_path, pyc): - # Technically, we don't have to have the same pyc format as (C)Python, since - # these "pycs" should never be seen by builtin import. However, there's - # little reason deviate, and I hope sometime to be able to use - # imp.load_compiled to load them. (See the comment in load_module above.) + + + def is_package(self, name): + try: + fd, fn, desc = imp.find_module(name) + except ImportError: + return False + if fd is not None: + fd.close() + tp = desc[2] + return tp == imp.PKG_DIRECTORY + + @classmethod + def _register_with_pkg_resources(cls): + """ + Ensure package resources can be loaded from this loader. May be called + multiple times, as the operation is idempotent. + """ + try: + import pkg_resources + # access an attribute in case a deferred importer is present + pkg_resources.__name__ + except ImportError: + return + + # Since pytest tests are always located in the file system, the + # DefaultProvider is appropriate. + pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider) + + +def _write_pyc(state, co, source_path, pyc): + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason deviate, and I hope + # sometime to be able to use imp.load_compiled to load them. (See + # the comment in load_module above.) mtime = int(source_path.mtime()) try: fp = open(pyc, "wb") except IOError: err = sys.exc_info()[1].errno - if err == PATH_COMPONENT_NOT_DIR: - # This happens when we get a EEXIST in find_module creating the - # __pycache__ directory and __pycache__ is by some non-dir node. - return False - raise + state.trace("error writing pyc file at %s: errno=%s" %(pyc, err)) + # we ignore any failure to write the cache file + # there are many reasons, permission-denied, __pycache__ being a + # file etc. + return False try: fp.write(imp.get_magic()) fp.write(struct.pack(">", - ast.Add : "+", - ast.Sub : "-", - ast.Mult : "*", - ast.Div : "/", - ast.FloorDiv : "//", - ast.Mod : "%", - ast.Eq : "==", - ast.NotEq : "!=", - ast.Lt : "<", - ast.LtE : "<=", - ast.Gt : ">", - ast.GtE : ">=", - ast.Pow : "**", - ast.Is : "is", - ast.IsNot : "is not", - ast.In : "in", - ast.NotIn : "not in" + ast.BitOr: "|", + ast.BitXor: "^", + ast.BitAnd: "&", + ast.LShift: "<<", + ast.RShift: ">>", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in" } @@ -341,7 +408,7 @@ lineno = 0 for item in mod.body: if (expect_docstring and isinstance(item, ast.Expr) and - isinstance(item.value, ast.Str)): + isinstance(item.value, ast.Str)): doc = item.value.s if "PYTEST_DONT_REWRITE" in doc: # The module has disabled assertion rewriting. @@ -462,7 +529,8 @@ body.append(raise_) # Clear temporary variables by setting them to None. if self.variables: - variables = [ast.Name(name, ast.Store()) for name in self.variables] + variables = [ast.Name(name, ast.Store()) + for name in self.variables] clear = ast.Assign(variables, ast.Name("None", ast.Load())) self.statements.append(clear) # Fix line numbers. @@ -471,11 +539,12 @@ return self.statements def visit_Name(self, name): - # Check if the name is local or not. + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. locs = ast.Call(self.builtin("locals"), [], [], None, None) - globs = ast.Call(self.builtin("globals"), [], [], None, None) - ops = [ast.In(), ast.IsNot()] - test = ast.Compare(ast.Str(name.id), ops, [locs, globs]) + inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) + dorepr = self.helper("should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) return name, self.explanation_param(expr) @@ -492,7 +561,8 @@ for i, v in enumerate(boolop.values): if i: fail_inner = [] - self.on_failure.append(ast.If(cond, fail_inner, [])) + # cond is set in a prior loop iteration below + self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa self.on_failure = fail_inner self.push_format_context() res, expl = self.visit(v) @@ -548,7 +618,8 @@ new_kwarg, expl = self.visit(call.kwargs) arg_expls.append("**" + expl) expl = "%s(%s)" % (func_expl, ', '.join(arg_expls)) - new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg) + new_call = ast.Call(new_func, new_args, new_kwargs, + new_star, new_kwarg) res = self.assign(new_call) res_expl = self.explanation_param(self.display(res)) outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) @@ -584,7 +655,7 @@ res_expr = ast.Compare(left_res, [op], [next_res]) self.statements.append(ast.Assign([store_names[i]], res_expr)) left_res, left_expl = next_res, next_expl - # Use py.code._reprcompare if that's available. + # Use pytest.assertion.util._reprcompare if that's available. expl_call = self.helper("call_reprcompare", ast.Tuple(syms, ast.Load()), ast.Tuple(load_names, ast.Load()), diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py --- a/_pytest/assertion/util.py +++ b/_pytest/assertion/util.py @@ -1,8 +1,13 @@ """Utilities for assertion debugging""" import py +try: + from collections import Sequence +except ImportError: + Sequence = list BuiltinAssertionError = py.builtin.builtins.AssertionError +u = py.builtin._totext # The _reprcompare attribute on the util module is used by the new assertion # interpretation code and assertion rewriter to detect this plugin was @@ -10,6 +15,7 @@ # DebugInterpreter. _reprcompare = None + def format_explanation(explanation): """This formats an explanation @@ -20,7 +26,18 @@ for when one explanation needs to span multiple lines, e.g. when displaying diffs. """ - # simplify 'assert False where False = ...' + explanation = _collapse_false(explanation) + lines = _split_explanation(explanation) + result = _format_lines(lines) + return u('\n').join(result) + + +def _collapse_false(explanation): + """Collapse expansions of False + + So this strips out any "assert False\n{where False = ...\n}" + blocks. + """ where = 0 while True: start = where = explanation.find("False\n{False = ", where) @@ -42,28 +59,48 @@ explanation = (explanation[:start] + explanation[start+15:end-1] + explanation[end+1:]) where -= 17 - raw_lines = (explanation or '').split('\n') - # escape newlines not followed by {, } and ~ + return explanation + + +def _split_explanation(explanation): + """Return a list of individual lines in the explanation + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or u('')).split('\n') lines = [raw_lines[0]] for l in raw_lines[1:]: if l.startswith('{') or l.startswith('}') or l.startswith('~'): lines.append(l) else: lines[-1] += '\\n' + l + return lines + +def _format_lines(lines): + """Format the individual lines + + This will replace the '{', '}' and '~' characters of our mini + formatting language with the proper 'where ...', 'and ...' and ' + + ...' text, taking care of indentation along the way. + + Return a list of formatted lines. + """ result = lines[:1] stack = [0] stackcnt = [0] for line in lines[1:]: if line.startswith('{'): if stackcnt[-1]: - s = 'and ' + s = u('and ') else: - s = 'where ' + s = u('where ') stack.append(len(result)) stackcnt[-1] += 1 stackcnt.append(0) - result.append(' +' + ' '*(len(stack)-1) + s + line[1:]) + result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:]) elif line.startswith('}'): assert line.startswith('}') stack.pop() @@ -71,9 +108,9 @@ result[stack[-1]] += line[1:] else: assert line.startswith('~') - result.append(' '*len(stack) + line[1:]) + result.append(u(' ')*len(stack) + line[1:]) assert len(stack) == 1 - return '\n'.join(result) + return result # Provide basestring in python3 @@ -83,132 +120,163 @@ basestring = str -def assertrepr_compare(op, left, right): - """return specialised explanations for some operators/operands""" - width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op +def assertrepr_compare(config, op, left, right): + """Return specialised explanations for some operators/operands""" + width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op left_repr = py.io.saferepr(left, maxsize=int(width/2)) right_repr = py.io.saferepr(right, maxsize=width-len(left_repr)) - summary = '%s %s %s' % (left_repr, op, right_repr) + summary = u('%s %s %s') % (left_repr, op, right_repr) - issequence = lambda x: isinstance(x, (list, tuple)) + issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) + and not isinstance(x, basestring)) istext = lambda x: isinstance(x, basestring) isdict = lambda x: isinstance(x, dict) - isset = lambda x: isinstance(x, set) + isset = lambda x: isinstance(x, (set, frozenset)) + verbose = config.getoption('verbose') explanation = None try: if op == '==': if istext(left) and istext(right): - explanation = _diff_text(left, right) + explanation = _diff_text(left, right, verbose) elif issequence(left) and issequence(right): - explanation = _compare_eq_sequence(left, right) + explanation = _compare_eq_sequence(left, right, verbose) elif isset(left) and isset(right): - explanation = _compare_eq_set(left, right) + explanation = _compare_eq_set(left, right, verbose) elif isdict(left) and isdict(right): - explanation = _diff_text(py.std.pprint.pformat(left), - py.std.pprint.pformat(right)) + explanation = _compare_eq_dict(left, right, verbose) elif op == 'not in': if istext(left) and istext(right): - explanation = _notin_text(left, right) - except py.builtin._sysex: - raise - except: + explanation = _notin_text(left, right, verbose) + except Exception: excinfo = py.code.ExceptionInfo() - explanation = ['(pytest_assertion plugin: representation of ' - 'details failed. Probably an object has a faulty __repr__.)', - str(excinfo) - ] - + explanation = [ + u('(pytest_assertion plugin: representation of details failed. ' + 'Probably an object has a faulty __repr__.)'), + u(excinfo)] if not explanation: return None - # Don't include pageloads of data, should be configurable - if len(''.join(explanation)) > 80*8: - explanation = ['Detailed information too verbose, truncated'] - return [summary] + explanation -def _diff_text(left, right): - """Return the explanation for the diff between text +def _diff_text(left, right, verbose=False): + """Return the explanation for the diff between text or bytes - This will skip leading and trailing characters which are - identical to keep the diff minimal. + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + + If the input are bytes they will be safely converted to text. """ explanation = [] - i = 0 # just in case left or right has zero length - for i in range(min(len(left), len(right))): - if left[i] != right[i]: - break - if i > 42: - i -= 10 # Provide some context - explanation = ['Skipping %s identical ' - 'leading characters in diff' % i] - left = left[i:] - right = right[i:] - if len(left) == len(right): - for i in range(len(left)): - if left[-i] != right[-i]: + if isinstance(left, py.builtin.bytes): + left = u(repr(left)[1:-1]).replace(r'\n', '\n') + if isinstance(right, py.builtin.bytes): + right = u(repr(right)[1:-1]).replace(r'\n', '\n') + if not verbose: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: break if i > 42: - i -= 10 # Provide some context - explanation += ['Skipping %s identical ' - 'trailing characters in diff' % i] - left = left[:-i] - right = right[:-i] + i -= 10 # Provide some context + explanation = [u('Skipping %s identical leading ' + 'characters in diff, use -v to show') % i] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [u('Skipping %s identical trailing ' + 'characters in diff, use -v to show') % i] + left = left[:-i] + right = right[:-i] explanation += [line.strip('\n') for line in py.std.difflib.ndiff(left.splitlines(), right.splitlines())] return explanation -def _compare_eq_sequence(left, right): +def _compare_eq_sequence(left, right, verbose=False): explanation = [] for i in range(min(len(left), len(right))): if left[i] != right[i]: - explanation += ['At index %s diff: %r != %r' % - (i, left[i], right[i])] + explanation += [u('At index %s diff: %r != %r') + % (i, left[i], right[i])] break if len(left) > len(right): - explanation += ['Left contains more items, ' - 'first extra item: %s' % py.io.saferepr(left[len(right)],)] + explanation += [u('Left contains more items, first extra item: %s') + % py.io.saferepr(left[len(right)],)] elif len(left) < len(right): - explanation += ['Right contains more items, ' - 'first extra item: %s' % py.io.saferepr(right[len(left)],)] - return explanation # + _diff_text(py.std.pprint.pformat(left), - # py.std.pprint.pformat(right)) + explanation += [ + u('Right contains more items, first extra item: %s') % + py.io.saferepr(right[len(left)],)] + return explanation # + _diff_text(py.std.pprint.pformat(left), + # py.std.pprint.pformat(right)) -def _compare_eq_set(left, right): +def _compare_eq_set(left, right, verbose=False): explanation = [] diff_left = left - right diff_right = right - left if diff_left: - explanation.append('Extra items in the left set:') + explanation.append(u('Extra items in the left set:')) for item in diff_left: explanation.append(py.io.saferepr(item)) if diff_right: - explanation.append('Extra items in the right set:') + explanation.append(u('Extra items in the right set:')) for item in diff_right: explanation.append(py.io.saferepr(item)) return explanation -def _notin_text(term, text): +def _compare_eq_dict(left, right, verbose=False): + explanation = [] + common = set(left).intersection(set(right)) + same = dict((k, left[k]) for k in common if left[k] == right[k]) + if same and not verbose: + explanation += [u('Omitting %s identical items, use -v to show') % + len(same)] + elif same: + explanation += [u('Common items:')] + explanation += py.std.pprint.pformat(same).splitlines() + diff = set(k for k in common if left[k] != right[k]) + if diff: + explanation += [u('Differing items:')] + for k in diff: + explanation += [py.io.saferepr({k: left[k]}) + ' != ' + + py.io.saferepr({k: right[k]})] + extra_left = set(left) - set(right) + if extra_left: + explanation.append(u('Left contains more items:')) + explanation.extend(py.std.pprint.pformat( + dict((k, left[k]) for k in extra_left)).splitlines()) + extra_right = set(right) - set(left) + if extra_right: + explanation.append(u('Right contains more items:')) + explanation.extend(py.std.pprint.pformat( + dict((k, right[k]) for k in extra_right)).splitlines()) + return explanation + + +def _notin_text(term, text, verbose=False): index = text.find(term) head = text[:index] tail = text[index+len(term):] correct_text = head + tail - diff = _diff_text(correct_text, text) - newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)] + diff = _diff_text(correct_text, text, verbose) + newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)] for line in diff: - if line.startswith('Skipping'): + if line.startswith(u('Skipping')): continue - if line.startswith('- '): + if line.startswith(u('- ')): continue - if line.startswith('+ '): - newdiff.append(' ' + line[2:]) + if line.startswith(u('+ ')): + newdiff.append(u(' ') + line[2:]) else: newdiff.append(line) return newdiff diff --git a/_pytest/capture.py b/_pytest/capture.py --- a/_pytest/capture.py +++ b/_pytest/capture.py @@ -1,43 +1,114 @@ -""" per-test stdout/stderr capturing mechanisms, ``capsys`` and ``capfd`` function arguments. """ +""" + per-test stdout/stderr capturing mechanisms, + ``capsys`` and ``capfd`` function arguments. +""" +# note: py.io capture was where copied from +# pylib 1.4.20.dev2 (rev 13d9af95547e) +import sys +import os +import tempfile -import pytest, py -import os +import py +import pytest + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO + +try: + from io import BytesIO +except ImportError: + class BytesIO(StringIO): + def write(self, data): + if isinstance(data, unicode): + raise TypeError("not a byte value: %r" % (data,)) + StringIO.write(self, data) + +if sys.version_info < (3, 0): + class TextIO(StringIO): + def write(self, data): + if not isinstance(data, unicode): + enc = getattr(self, '_encoding', 'UTF-8') + data = unicode(data, enc, 'replace') + StringIO.write(self, data) +else: + TextIO = StringIO + + +patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} + def pytest_addoption(parser): group = parser.getgroup("general") - group._addoption('--capture', action="store", default=None, - metavar="method", type="choice", choices=['fd', 'sys', 'no'], + group._addoption( + '--capture', action="store", default=None, + metavar="method", choices=['fd', 'sys', 'no'], help="per-test capturing method: one of fd (default)|sys|no.") - group._addoption('-s', action="store_const", const="no", dest="capture", + group._addoption( + '-s', action="store_const", const="no", dest="capture", help="shortcut for --capture=no.") + @pytest.mark.tryfirst -def pytest_cmdline_parse(pluginmanager, args): - # we want to perform capturing already for plugin/conftest loading - if '-s' in args or "--capture=no" in args: - method = "no" - elif hasattr(os, 'dup') and '--capture=sys' not in args: +def pytest_load_initial_conftests(early_config, parser, args, __multicall__): + ns = parser.parse_known_args(args) + method = ns.capture + if not method: method = "fd" - else: + if method == "fd" and not hasattr(os, "dup"): method = "sys" capman = CaptureManager(method) - pluginmanager.register(capman, "capturemanager") + early_config.pluginmanager.register(capman, "capturemanager") + + # make sure that capturemanager is properly reset at final shutdown + def teardown(): + try: + capman.reset_capturings() + except ValueError: + pass + + early_config.pluginmanager.add_shutdown(teardown) + + # make sure logging does not raise exceptions at the end + def silence_logging_at_shutdown(): + if "logging" in sys.modules: + sys.modules["logging"].raiseExceptions = False + early_config.pluginmanager.add_shutdown(silence_logging_at_shutdown) + + # finally trigger conftest loading but while capturing (issue93) + capman.resumecapture() + try: + try: + return __multicall__.execute() + finally: + out, err = capman.suspendcapture() + except: + sys.stdout.write(out) + sys.stderr.write(err) + raise + def addouterr(rep, outerr): for secname, content in zip(["out", "err"], outerr): if content: rep.sections.append(("Captured std%s" % secname, content)) + class NoCapture: def startall(self): pass + def resume(self): pass + def reset(self): pass + def suspend(self): return "", "" + class CaptureManager: def __init__(self, defaultmethod=None): self._method2capture = {} @@ -45,21 +116,23 @@ def _maketempfile(self): f = py.std.tempfile.TemporaryFile() - newf = py.io.dupfile(f, encoding="UTF-8") + newf = dupfile(f, encoding="UTF-8") f.close() return newf def _makestringio(self): - return py.io.TextIO() + return TextIO() def _getcapture(self, method): if method == "fd": - return py.io.StdCaptureFD(now=False, - out=self._maketempfile(), err=self._maketempfile() + return StdCaptureFD( + out=self._maketempfile(), + err=self._maketempfile(), ) elif method == "sys": - return py.io.StdCapture(now=False, - out=self._makestringio(), err=self._makestringio() + return StdCapture( + out=self._makestringio(), + err=self._makestringio(), ) elif method == "no": return NoCapture() @@ -74,23 +147,24 @@ method = config._conftest.rget("option_capture", path=fspath) except KeyError: method = "fd" - if method == "fd" and not hasattr(os, 'dup'): # e.g. jython + if method == "fd" and not hasattr(os, 'dup'): # e.g. jython method = "sys" return method def reset_capturings(self): - for name, cap in self._method2capture.items(): + for cap in self._method2capture.values(): cap.reset() def resumecapture_item(self, item): method = self._getmethod(item.config, item.fspath) if not hasattr(item, 'outerr'): - item.outerr = ('', '') # we accumulate outerr on the item + item.outerr = ('', '') # we accumulate outerr on the item return self.resumecapture(method) def resumecapture(self, method=None): if hasattr(self, '_capturing'): - raise ValueError("cannot resume, already capturing with %r" % + raise ValueError( + "cannot resume, already capturing with %r" % (self._capturing,)) if method is None: method = self._defaultmethod @@ -119,30 +193,29 @@ return "", "" def activate_funcargs(self, pyfuncitem): - if not hasattr(pyfuncitem, 'funcargs'): - return - assert not hasattr(self, '_capturing_funcargs') - self._capturing_funcargs = capturing_funcargs = [] - for name, capfuncarg in pyfuncitem.funcargs.items(): - if name in ('capsys', 'capfd'): - capturing_funcargs.append(capfuncarg) - capfuncarg._start() + funcargs = getattr(pyfuncitem, "funcargs", None) + if funcargs is not None: + for name, capfuncarg in funcargs.items(): + if name in ('capsys', 'capfd'): + assert not hasattr(self, '_capturing_funcarg') + self._capturing_funcarg = capfuncarg + capfuncarg._start() def deactivate_funcargs(self): - capturing_funcargs = getattr(self, '_capturing_funcargs', None) - if capturing_funcargs is not None: - while capturing_funcargs: - capfuncarg = capturing_funcargs.pop() - capfuncarg._finalize() - del self._capturing_funcargs + capturing_funcarg = getattr(self, '_capturing_funcarg', None) + if capturing_funcarg: + outerr = capturing_funcarg._finalize() + del self._capturing_funcarg + return outerr def pytest_make_collect_report(self, __multicall__, collector): method = self._getmethod(collector.config, collector.fspath) try: self.resumecapture(method) except ValueError: - return # recursive collect, XXX refactor capturing - # to allow for more lightweight recursive capturing + # recursive collect, XXX refactor capturing + # to allow for more lightweight recursive capturing + return try: rep = __multicall__.execute() finally: @@ -169,46 +242,371 @@ @pytest.mark.tryfirst def pytest_runtest_makereport(self, __multicall__, item, call): - self.deactivate_funcargs() + funcarg_outerr = self.deactivate_funcargs() rep = __multicall__.execute() outerr = self.suspendcapture(item) - if not rep.passed: - addouterr(rep, outerr) + if funcarg_outerr is not None: + outerr = (outerr[0] + funcarg_outerr[0], + outerr[1] + funcarg_outerr[1]) + addouterr(rep, outerr) if not rep.passed or rep.when == "teardown": outerr = ('', '') item.outerr = outerr return rep +error_capsysfderror = "cannot use capsys and capfd at the same time" + + def pytest_funcarg__capsys(request): """enables capturing of writes to sys.stdout/sys.stderr and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ - return CaptureFuncarg(py.io.StdCapture) + if "capfd" in request._funcargs: + raise request.raiseerror(error_capsysfderror) + return CaptureFixture(StdCapture) + def pytest_funcarg__capfd(request): """enables capturing of writes to file descriptors 1 and 2 and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ + if "capsys" in request._funcargs: + request.raiseerror(error_capsysfderror) if not hasattr(os, 'dup'): - py.test.skip("capfd funcarg needs os.dup") - return CaptureFuncarg(py.io.StdCaptureFD) + pytest.skip("capfd funcarg needs os.dup") + return CaptureFixture(StdCaptureFD) -class CaptureFuncarg: + +class CaptureFixture: def __init__(self, captureclass): - self.capture = captureclass(now=False) + self._capture = captureclass() def _start(self): - self.capture.startall() + self._capture.startall() def _finalize(self): - if hasattr(self, 'capture'): - self.capture.reset() - del self.capture + if hasattr(self, '_capture'): + outerr = self._outerr = self._capture.reset() + del self._capture + return outerr def readouterr(self): - return self.capture.readouterr() + try: + return self._capture.readouterr() + except AttributeError: + return self._outerr def close(self): self._finalize() + + +class FDCapture: + """ Capture IO to/from a given os-level filedescriptor. """ + + def __init__(self, targetfd, tmpfile=None, patchsys=False): + """ save targetfd descriptor, and open a new + temporary file there. If no tmpfile is + specified a tempfile.Tempfile() will be opened + in text mode. + """ + self.targetfd = targetfd + if tmpfile is None and targetfd != 0: + f = tempfile.TemporaryFile('wb+') + tmpfile = dupfile(f, encoding="UTF-8") + f.close() + self.tmpfile = tmpfile + self._savefd = os.dup(self.targetfd) + if patchsys: + self._oldsys = getattr(sys, patchsysdict[targetfd]) + + def start(self): + try: + os.fstat(self._savefd) + except OSError: + raise ValueError( + "saved filedescriptor not valid, " + "did you call start() twice?") + if self.targetfd == 0 and not self.tmpfile: + fd = os.open(os.devnull, os.O_RDONLY) + os.dup2(fd, 0) + os.close(fd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], DontReadFromInput()) + else: + os.dup2(self.tmpfile.fileno(), self.targetfd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self.tmpfile) + + def done(self): + """ unpatch and clean up, returns the self.tmpfile (file object) + """ + os.dup2(self._savefd, self.targetfd) + os.close(self._savefd) + if self.targetfd != 0: + self.tmpfile.seek(0) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self._oldsys) + return self.tmpfile + + def writeorg(self, data): + """ write a string to the original file descriptor + """ + tempfp = tempfile.TemporaryFile() + try: + os.dup2(self._savefd, tempfp.fileno()) + tempfp.write(data) + finally: + tempfp.close() + + +def dupfile(f, mode=None, buffering=0, raising=False, encoding=None): + """ return a new open file object that's a duplicate of f + + mode is duplicated if not given, 'buffering' controls + buffer size (defaulting to no buffering) and 'raising' + defines whether an exception is raised when an incompatible + file object is passed in (if raising is False, the file + object itself will be returned) + """ + try: + fd = f.fileno() + mode = mode or f.mode + except AttributeError: + if raising: + raise + return f + newfd = os.dup(fd) + if sys.version_info >= (3, 0): + if encoding is not None: + mode = mode.replace("b", "") + buffering = True + return os.fdopen(newfd, mode, buffering, encoding, closefd=True) + else: + f = os.fdopen(newfd, mode, buffering) + if encoding is not None: + return EncodedFile(f, encoding) + return f + + +class EncodedFile(object): + def __init__(self, _stream, encoding): + self._stream = _stream + self.encoding = encoding + + def write(self, obj): + if isinstance(obj, unicode): + obj = obj.encode(self.encoding) + self._stream.write(obj) + + def writelines(self, linelist): + data = ''.join(linelist) + self.write(data) + + def __getattr__(self, name): + return getattr(self._stream, name) + + +class Capture(object): + def reset(self): + """ reset sys.stdout/stderr and return captured output as strings. """ + if hasattr(self, '_reset'): + raise ValueError("was already reset") + self._reset = True + outfile, errfile = self.done(save=False) + out, err = "", "" + if outfile and not outfile.closed: + out = outfile.read() + outfile.close() + if errfile and errfile != outfile and not errfile.closed: + err = errfile.read() + errfile.close() + return out, err + + def suspend(self): + """ return current snapshot captures, memorize tempfiles. """ + outerr = self.readouterr() + outfile, errfile = self.done() + return outerr + + +class StdCaptureFD(Capture): + """ This class allows to capture writes to FD1 and FD2 + and may connect a NULL file to FD0 (and prevent + reads from sys.stdin). If any of the 0,1,2 file descriptors + is invalid it will not be captured. + """ + def __init__(self, out=True, err=True, in_=True, patchsys=True): + self._options = { + "out": out, + "err": err, + "in_": in_, + "patchsys": patchsys, + } + self._save() + + def _save(self): + in_ = self._options['in_'] + out = self._options['out'] + err = self._options['err'] + patchsys = self._options['patchsys'] + if in_: + try: + self.in_ = FDCapture( + 0, tmpfile=None, + patchsys=patchsys) + except OSError: + pass + if out: + tmpfile = None + if hasattr(out, 'write'): + tmpfile = out + try: + self.out = FDCapture( + 1, tmpfile=tmpfile, + patchsys=patchsys) + self._options['out'] = self.out.tmpfile + except OSError: + pass + if err: + if hasattr(err, 'write'): + tmpfile = err + else: + tmpfile = None + try: + self.err = FDCapture( + 2, tmpfile=tmpfile, + patchsys=patchsys) + self._options['err'] = self.err.tmpfile + except OSError: + pass + + def startall(self): + if hasattr(self, 'in_'): + self.in_.start() + if hasattr(self, 'out'): + self.out.start() + if hasattr(self, 'err'): + self.err.start() + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if hasattr(self, 'out') and not self.out.tmpfile.closed: + outfile = self.out.done() + if hasattr(self, 'err') and not self.err.tmpfile.closed: + errfile = self.err.done() + if hasattr(self, 'in_'): + self.in_.done() + if save: + self._save() + return outfile, errfile + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = self._readsnapshot('out') + err = self._readsnapshot('err') + return out, err + + def _readsnapshot(self, name): + if hasattr(self, name): + f = getattr(self, name).tmpfile + else: + return '' + + f.seek(0) + res = f.read() + enc = getattr(f, "encoding", None) + if enc: + res = py.builtin._totext(res, enc, "replace") + f.truncate(0) + f.seek(0) + return res + + +class StdCapture(Capture): + """ This class allows to capture writes to sys.stdout|stderr "in-memory" + and will raise errors on tries to read from sys.stdin. It only + modifies sys.stdout|stderr|stdin attributes and does not + touch underlying File Descriptors (use StdCaptureFD for that). + """ + def __init__(self, out=True, err=True, in_=True): + self._oldout = sys.stdout + self._olderr = sys.stderr + self._oldin = sys.stdin + if out and not hasattr(out, 'file'): + out = TextIO() + self.out = out + if err: + if not hasattr(err, 'write'): + err = TextIO() + self.err = err + self.in_ = in_ + + def startall(self): + if self.out: + sys.stdout = self.out + if self.err: + sys.stderr = self.err + if self.in_: + sys.stdin = self.in_ = DontReadFromInput() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if self.out and not self.out.closed: + sys.stdout = self._oldout + outfile = self.out + outfile.seek(0) + if self.err and not self.err.closed: + sys.stderr = self._olderr + errfile = self.err + errfile.seek(0) + if self.in_: + sys.stdin = self._oldin + return outfile, errfile + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = err = "" + if self.out: + out = self.out.getvalue() + self.out.truncate(0) + self.out.seek(0) + if self.err: + err = self.err.getvalue() + self.err.truncate(0) + self.err.seek(0) + return out, err + + +class DontReadFromInput: + """Temporary stub class. Ideally when stdin is accessed, the + capturing should be turned off, with possibly all data captured + so far sent to the screen. This should be configurable, though, + because in automated test runs it is better to crash than + hang indefinitely. + """ + def read(self, *args): + raise IOError("reading from stdin while output is captured") + readline = read + readlines = read + __iter__ = read + + def fileno(self): + raise ValueError("redirected Stdin is pseudofile, has no fileno()") + + def isatty(self): + return False + + def close(self): + pass diff --git a/_pytest/config.py b/_pytest/config.py --- a/_pytest/config.py +++ b/_pytest/config.py @@ -1,25 +1,91 @@ """ command line options, ini-file and conftest.py processing. """ import py +# DON't import pytest here because it causes import cycle troubles import sys, os +from _pytest import hookspec # the extension point definitions from _pytest.core import PluginManager -import pytest -def pytest_cmdline_parse(pluginmanager, args): - config = Config(pluginmanager) - config.parse(args) - return config +# pytest startup -def pytest_unconfigure(config): - while 1: - try: - fin = config._cleanup.pop() - except IndexError: - break - fin() +def main(args=None, plugins=None): + """ return exit code, after performing an in-process test run. + + :arg args: list of command line arguments. + + :arg plugins: list of plugin objects to be auto-registered during + initialization. + """ + config = _prepareconfig(args, plugins) + return config.hook.pytest_cmdline_main(config=config) + +class cmdline: # compatibility namespace + main = staticmethod(main) + +class UsageError(Exception): + """ error in pytest usage or invocation""" + +_preinit = [] + +default_plugins = ( + "mark main terminal runner python pdb unittest capture skipping " + "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript " + "junitxml resultlog doctest").split() + +def _preloadplugins(): + assert not _preinit + _preinit.append(get_plugin_manager()) + +def get_plugin_manager(): + if _preinit: + return _preinit.pop(0) + # subsequent calls to main will create a fresh instance + pluginmanager = PytestPluginManager() + pluginmanager.config = Config(pluginmanager) # XXX attr needed? + for spec in default_plugins: + pluginmanager.import_plugin(spec) + return pluginmanager + +def _prepareconfig(args=None, plugins=None): + if args is None: + args = sys.argv[1:] + elif isinstance(args, py.path.local): + args = [str(args)] + elif not isinstance(args, (tuple, list)): + if not isinstance(args, str): + raise ValueError("not a string or argument list: %r" % (args,)) + args = py.std.shlex.split(args) + pluginmanager = get_plugin_manager() + if plugins: + for plugin in plugins: + pluginmanager.register(plugin) + return pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args) + +class PytestPluginManager(PluginManager): + def __init__(self, hookspecs=[hookspec]): + super(PytestPluginManager, self).__init__(hookspecs=hookspecs) + self.register(self) + if os.environ.get('PYTEST_DEBUG'): + err = sys.stderr + encoding = getattr(err, 'encoding', 'utf8') + try: + err = py.io.dupfile(err, encoding=encoding) + except Exception: + pass + self.trace.root.setwriter(err.write) + + def pytest_configure(self, config): + config.addinivalue_line("markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible.") + config.addinivalue_line("markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible.") + class Parser: - """ Parser for command line arguments. """ + """ Parser for command line arguments and ini-file values. """ def __init__(self, usage=None, processopt=None): self._anonymous = OptionGroup("custom options", parser=self) @@ -35,15 +101,17 @@ if option.dest: self._processopt(option) - def addnote(self, note): - self._notes.append(note) - def getgroup(self, name, description="", after=None): """ get (or create) a named option Group. - :name: unique name of the option group. + :name: name of the option group. :description: long description for --help output. :after: name of other group, used for ordering --help output. + + The returned group object has an ``addoption`` method with the same + signature as :py:func:`parser.addoption + <_pytest.config.Parser.addoption>` but will be shown in the + respective group in the output of ``pytest. --help``. """ for group in self._groups: if group.name == name: @@ -57,33 +125,222 @@ return group def addoption(self, *opts, **attrs): - """ add an optparse-style option. """ + """ register a command line option. + + :opts: option names, can be short or long options. + :attrs: same attributes which the ``add_option()`` function of the + `argparse library + `_ + accepts. + + After command line parsing options are available on the pytest config + object via ``config.option.NAME`` where ``NAME`` is usually set + by passing a ``dest`` attribute, for example + ``addoption("--long", dest="NAME", ...)``. + """ self._anonymous.addoption(*opts, **attrs) def parse(self, args): - self.optparser = optparser = MyOptionParser(self) + from _pytest._argcomplete import try_argcomplete + self.optparser = self._getparser() + try_argcomplete(self.optparser) + return self.optparser.parse_args([str(x) for x in args]) + + def _getparser(self): + from _pytest._argcomplete import filescompleter + optparser = MyOptionParser(self) groups = self._groups + [self._anonymous] for group in groups: if group.options: desc = group.description or group.name - optgroup = py.std.optparse.OptionGroup(optparser, desc) - optgroup.add_options(group.options) - optparser.add_option_group(optgroup) - return self.optparser.parse_args([str(x) for x in args]) + arggroup = optparser.add_argument_group(desc) + for option in group.options: + n = option.names() + a = option.attrs() + arggroup.add_argument(*n, **a) + # bash like autocompletion for dirs (appending '/') + optparser.add_argument(FILE_OR_DIR, nargs='*' + ).completer=filescompleter + return optparser def parse_setoption(self, args, option): - parsedoption, args = self.parse(args) + parsedoption = self.parse(args) for name, value in parsedoption.__dict__.items(): setattr(option, name, value) - return args + return getattr(parsedoption, FILE_OR_DIR) + + def parse_known_args(self, args): + optparser = self._getparser() + args = [str(x) for x in args] + return optparser.parse_known_args(args)[0] def addini(self, name, help, type=None, default=None): - """ add an ini-file option with the given name and description. """ + """ register an ini-file option. + + :name: name of the ini-variable + :type: type of the variable, can be ``pathlist``, ``args`` or ``linelist``. + :default: default value if no ini-file option exists but is queried. + + The value of ini-variables can be retrieved via a call to + :py:func:`config.getini(name) <_pytest.config.Config.getini>`. + """ assert type in (None, "pathlist", "args", "linelist") self._inidict[name] = (help, type, default) self._ininames.append(name) +class ArgumentError(Exception): + """ + Raised if an Argument instance is created with invalid or + inconsistent arguments. + """ + + def __init__(self, msg, option): + self.msg = msg + self.option_id = str(option) + + def __str__(self): + if self.option_id: + return "option %s: %s" % (self.option_id, self.msg) + else: + return self.msg + + +class Argument: + """class that mimics the necessary behaviour of py.std.optparse.Option """ + _typ_map = { + 'int': int, + 'string': str, + } + # enable after some grace period for plugin writers + TYPE_WARN = False + + def __init__(self, *names, **attrs): + """store parms in private vars for use in add_argument""" + self._attrs = attrs + self._short_opts = [] + self._long_opts = [] + self.dest = attrs.get('dest') + if self.TYPE_WARN: + try: + help = attrs['help'] + if '%default' in help: + py.std.warnings.warn( + 'pytest now uses argparse. "%default" should be' + ' changed to "%(default)s" ', + FutureWarning, + stacklevel=3) + except KeyError: + pass + try: + typ = attrs['type'] + except KeyError: + pass + else: + # this might raise a keyerror as well, don't want to catch that + if isinstance(typ, py.builtin._basestring): + if typ == 'choice': + if self.TYPE_WARN: + py.std.warnings.warn( + 'type argument to addoption() is a string %r.' + ' For parsearg this is optional and when supplied ' + ' should be a type.' + ' (options: %s)' % (typ, names), + FutureWarning, + stacklevel=3) + # argparse expects a type here take it from + # the type of the first element + attrs['type'] = type(attrs['choices'][0]) + else: + if self.TYPE_WARN: + py.std.warnings.warn( + 'type argument to addoption() is a string %r.' + ' For parsearg this should be a type.' + ' (options: %s)' % (typ, names), + FutureWarning, + stacklevel=3) + attrs['type'] = Argument._typ_map[typ] + # used in test_parseopt -> test_parse_defaultgetter + self.type = attrs['type'] + else: + self.type = typ + try: + # attribute existence is tested in Config._processopt + self.default = attrs['default'] + except KeyError: + pass + self._set_opt_strings(names) + if not self.dest: + if self._long_opts: + self.dest = self._long_opts[0][2:].replace('-', '_') + else: + try: + self.dest = self._short_opts[0][1:] + except IndexError: + raise ArgumentError( + 'need a long or short option', self) + + def names(self): + return self._short_opts + self._long_opts + + def attrs(self): + # update any attributes set by processopt + attrs = 'default dest help'.split() + if self.dest: + attrs.append(self.dest) + for attr in attrs: + try: + self._attrs[attr] = getattr(self, attr) + except AttributeError: + pass + if self._attrs.get('help'): + a = self._attrs['help'] + a = a.replace('%default', '%(default)s') + #a = a.replace('%prog', '%(prog)s') + self._attrs['help'] = a + return self._attrs + + def _set_opt_strings(self, opts): + """directly from optparse + + might not be necessary as this is passed to argparse later on""" + for opt in opts: + if len(opt) < 2: + raise ArgumentError( + "invalid option string %r: " + "must be at least two characters long" % opt, self) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise ArgumentError( + "invalid short option string %r: " + "must be of the form -x, (x any non-dash char)" % opt, + self) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise ArgumentError( + "invalid long option string %r: " + "must start with --, followed by non-dash" % opt, + self) + self._long_opts.append(opt) + + def __repr__(self): + retval = 'Argument(' + if self._short_opts: + retval += '_short_opts: ' + repr(self._short_opts) + ', ' + if self._long_opts: + retval += '_long_opts: ' + repr(self._long_opts) + ', ' + retval += 'dest: ' + repr(self.dest) + ', ' + if hasattr(self, 'type'): + retval += 'type: ' + repr(self.type) + ', ' + if hasattr(self, 'default'): + retval += 'default: ' + repr(self.default) + ', ' + if retval[-2:] == ', ': # always long enough to test ("Argument(" ) + retval = retval[:-2] + retval += ')' + return retval + + class OptionGroup: def __init__(self, name, description="", parser=None): self.name = name @@ -92,12 +349,18 @@ self.parser = parser def addoption(self, *optnames, **attrs): - """ add an option to this group. """ - option = py.std.optparse.Option(*optnames, **attrs) + """ add an option to this group. + + if a shortened version of a long option is specified it will + be suppressed in the help. addoption('--twowords', '--two-words') + results in help showing '--two-words' only, but --twowords gets + accepted **and** the automatic destination is in args.twowords + """ + option = Argument(*optnames, **attrs) self._addoption_instance(option, shortupper=False) def _addoption(self, *optnames, **attrs): - option = py.std.optparse.Option(*optnames, **attrs) + option = Argument(*optnames, **attrs) self._addoption_instance(option, shortupper=True) From noreply at buildbot.pypy.org Wed Aug 13 01:55:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 13 Aug 2014 01:55:21 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20140812235521.ABCDC1C0157@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r526:44c9384d8110 Date: 2014-08-13 01:55 +0200 http://bitbucket.org/pypy/pypy.org/changeset/44c9384d8110/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $52294 of $105000 (49.8%) + $52304 of $105000 (49.8%)
          diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,7 +9,7 @@ @@ -17,7 +17,7 @@ 2nd call: - $13478 of $80000 (16.8%) + $13914 of $80000 (17.4%)
          From noreply at buildbot.pypy.org Wed Aug 13 02:17:03 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 13 Aug 2014 02:17:03 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix another use of _mixin_ Message-ID: <20140813001703.79A2E1C0547@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72783:9e37694ae6d3 Date: 2014-08-12 17:16 -0700 http://bitbucket.org/pypy/pypy/changeset/9e37694ae6d3/ Log: fix another use of _mixin_ diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -1,5 +1,6 @@ from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import buffer +from rpython.rlib.objectmodel import import_from_mixin from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, Py_buffer) from pypy.module.cpyext.pyobject import PyObject, Py_DecRef @@ -13,7 +14,6 @@ return 1 class CBufferMixin(object): - _mixin_ = True def __init__(self, space, c_buf, c_len, w_obj): self.space = space @@ -35,7 +35,8 @@ return rffi.charpsize2str(rffi.cast(rffi.CCHARP, self.c_buf), self.c_len) -class CBuffer(CBufferMixin, buffer.Buffer): +class CBuffer(buffer.Buffer): + import_from_mixin(CBufferMixin) _immutable_ = True def __del__(self): From noreply at buildbot.pypy.org Wed Aug 13 02:17:04 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 13 Aug 2014 02:17:04 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge py3k Message-ID: <20140813001704.C59A91C0547@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72784:d121572ebfcd Date: 2014-08-12 17:16 -0700 http://bitbucket.org/pypy/pypy/changeset/d121572ebfcd/ Log: merge py3k diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -1,5 +1,6 @@ from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import buffer +from rpython.rlib.objectmodel import import_from_mixin from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, Py_buffer) from pypy.module.cpyext.pyobject import PyObject, Py_DecRef @@ -13,7 +14,6 @@ return 1 class CBufferMixin(object): - _mixin_ = True def __init__(self, space, c_buf, c_len, w_obj): self.space = space @@ -35,7 +35,8 @@ return rffi.charpsize2str(rffi.cast(rffi.CCHARP, self.c_buf), self.c_len) -class CBuffer(CBufferMixin, buffer.Buffer): +class CBuffer(buffer.Buffer): + import_from_mixin(CBufferMixin) _immutable_ = True def __del__(self): From noreply at buildbot.pypy.org Wed Aug 13 18:11:33 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 13 Aug 2014 18:11:33 +0200 (CEST) Subject: [pypy-commit] stmgc default: fix a race between doing shadow stack snapshots and a concurrently running major Message-ID: <20140813161133.E9C1D1C03AC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1311:b7f132d1afba Date: 2014-08-13 18:12 +0200 http://bitbucket.org/pypy/stmgc/changeset/b7f132d1afba/ Log: fix a race between doing shadow stack snapshots and a concurrently running major collection. Also add more comments diff --git a/c7/demo/demo_random2.c b/c7/demo/demo_random2.c --- a/c7/demo/demo_random2.c +++ b/c7/demo/demo_random2.c @@ -16,7 +16,7 @@ #define FORKS 3 #define ACTIVE_ROOTS_SET_SIZE 100 // max num of roots created/alive in one transaction - +#define MAX_ROOTS_ON_SS 1000 // max on shadow stack // SUPPORT struct node_s; @@ -127,12 +127,17 @@ { int i; long to_push = td.active_roots_num; + long not_pushed = 0; for (i = to_push - 1; i >= 0; i--) { - STM_PUSH_ROOT(stm_thread_local, td.active_roots_set[i]); - td.roots_on_ss++; td.active_roots_num--; + if (td.roots_on_ss < MAX_ROOTS_ON_SS) { + STM_PUSH_ROOT(stm_thread_local, td.active_roots_set[i]); + td.roots_on_ss++; + } else { + not_pushed++; + } } - return to_push; + return to_push - not_pushed; } void add_root(objptr_t r); @@ -206,6 +211,7 @@ objptr_t simple_events(objptr_t p, objptr_t _r) { int k = get_rand(10); + long pushed; switch (k) { case 0: // remove a root @@ -221,8 +227,7 @@ p = _r; break; case 3: // allocate fresh 'p' - ; - long pushed = push_roots(); + pushed = push_roots(); size_t sizes[4] = {sizeof(struct node_s), sizeof(struct node_s) + (get_rand(100000) & ~15), sizeof(struct node_s) + 4096, @@ -281,7 +286,6 @@ return p; } - void frame_loop(); objptr_t do_step(objptr_t p) { @@ -306,28 +310,28 @@ td.roots_on_ss = td.roots_on_ss_at_tr_start; td.active_roots_num = 0; pop_roots(pushed); - return NULL; + p = NULL; } else if (get_rand(10) == 1) { long pushed = push_roots(); /* leaving our frame */ frame_loop(); /* back in our frame */ pop_roots(pushed); - return NULL; + p = NULL; } else if (get_rand(20) == 1) { long pushed = push_roots(); stm_become_inevitable(&stm_thread_local, "please"); assert(stm_is_inevitable()); pop_roots(pushed); - return NULL; + p= NULL; } else if (get_rand(20) == 1) { - return (objptr_t)-1; // possibly fork + p = (objptr_t)-1; // possibly fork } else if (get_rand(20) == 1) { long pushed = push_roots(); stm_become_globally_unique_transaction(&stm_thread_local, "really"); fprintf(stderr, "[GUT/%d]", (int)STM_SEGMENT->segment_num); pop_roots(pushed); - return NULL; + p = NULL; } return p; } @@ -338,7 +342,9 @@ rewind_jmp_buf rjbuf; stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); - volatile long roots_on_ss = td.roots_on_ss; + //fprintf(stderr,"%p F: %p\n", STM_SEGMENT->running_thread, __builtin_frame_address(0)); + + long roots_on_ss = td.roots_on_ss; /* "interpreter main loop": this is one "application-frame" */ while (td.steps_left-->0 && get_rand(10) != 0) { if (td.steps_left % 8 == 0) @@ -348,6 +354,7 @@ p = do_step(p); + if (p == (objptr_t)-1) { p = NULL; diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -997,6 +997,9 @@ /* NB. careful, this function might be called more than once to abort a given segment. Make sure that stm_rewind_jmp_restore_shadowstack() is idempotent. */ + /* we need to do this here and not directly in rewind_longjmp() because + that is called when we already released everything (safe point) + and a concurrent major GC could mess things up. */ stm_rewind_jmp_restore_shadowstack(tl); assert(tl->shadowstack == pseg->shadowstack_at_start_of_transaction); #endif diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c --- a/c7/stm/rewind_setjmp.c +++ b/c7/stm/rewind_setjmp.c @@ -4,6 +4,12 @@ #include #include +#ifndef _STM_CORE_H_ +long _has_mutex() {return 1;} +void s_mutex_lock() {} +void s_mutex_unlock() {} +#endif + struct _rewind_jmp_moved_s { struct _rewind_jmp_moved_s *next; @@ -23,26 +29,39 @@ static void copy_stack(rewind_jmp_thread *rjthread, char *base, void *ssbase) { - /* Copy away part of the stack and shadowstack. + /* Copy away part of the stack and shadowstack. Sets moved_off_base to + the current frame_base. + The stack is copied between 'base' (lower limit, i.e. newest bytes) and 'rjthread->head->frame_base' (upper limit, i.e. oldest bytes). The shadowstack is copied between 'ssbase' (upper limit, newest) and 'rjthread->head->shadowstack_base' (lower limit, oldest). */ + struct _rewind_jmp_moved_s *next; + char *stop; + void *ssstop; + size_t stack_size, ssstack_size; + + assert(_has_mutex()); + assert(rjthread->head != NULL); - char *stop = rjthread->head->frame_base; + stop = rjthread->head->frame_base; + ssstop = rjthread->head->shadowstack_base; assert(stop >= base); - void *ssstop = rjthread->head->shadowstack_base; assert(ssstop <= ssbase); - struct _rewind_jmp_moved_s *next = (struct _rewind_jmp_moved_s *) - rj_malloc(RJM_HEADER + (stop - base) + (ssbase - ssstop)); + stack_size = stop - base; + ssstack_size = ssbase - ssstop; + + next = (struct _rewind_jmp_moved_s *) + rj_malloc(RJM_HEADER + stack_size + ssstack_size); assert(next != NULL); /* XXX out of memory */ next->next = rjthread->moved_off; - next->stack_size = stop - base; - next->shadowstack_size = ssbase - ssstop; - memcpy(((char *)next) + RJM_HEADER, base, stop - base); - memcpy(((char *)next) + RJM_HEADER + (stop - base), ssstop, - ssbase - ssstop); + next->stack_size = stack_size; + next->shadowstack_size = ssstack_size; + + memcpy(((char *)next) + RJM_HEADER, base, stack_size); + memcpy(((char *)next) + RJM_HEADER + stack_size, ssstop, + ssstack_size); rjthread->moved_off_base = stop; rjthread->moved_off_ssbase = ssstop; @@ -52,7 +71,12 @@ __attribute__((noinline)) long rewind_jmp_setjmp(rewind_jmp_thread *rjthread, void *ss) { + /* saves the current stack frame to the list of slices and + calls setjmp(). It returns the number of times a longjmp() + jumped back to this setjmp() */ if (rjthread->moved_off) { + /* old stack slices are not needed anymore (next longjmp() + will restore only to this setjmp()) */ _rewind_jmp_free_stack_slices(rjthread); } /* all locals of this function that need to be saved and restored @@ -72,22 +96,36 @@ result = rjthread->repeat_count + 1; } rjthread->repeat_count = result; + + /* snapshot of top frame: needed every time because longjmp() frees + the previous one. Need to have mutex locked otherwise a concurrent + GC may get garbage while saving shadow stack */ + s_mutex_lock(); copy_stack(rjthread, (char *)&saved, saved.ss1); + s_mutex_unlock(); + return result; } __attribute__((noinline, noreturn)) static void do_longjmp(rewind_jmp_thread *rjthread, char *stack_free) { + /* go through list of copied stack-slices and copy them back to the + current stack, expanding it if necessary. The shadowstack should + already be restored at this point (restore_shadowstack()) */ assert(rjthread->moved_off_base != NULL); + s_mutex_lock(); while (rjthread->moved_off) { struct _rewind_jmp_moved_s *p = rjthread->moved_off; char *target = rjthread->moved_off_base; + /* CPU stack grows downwards: */ target -= p->stack_size; if (target < stack_free) { /* need more stack space! */ + s_mutex_unlock(); do_longjmp(rjthread, alloca(stack_free - target)); + abort(); /* unreachable */ } memcpy(target, ((char *)p) + RJM_HEADER, p->stack_size); @@ -95,6 +133,8 @@ rjthread->moved_off = p->next; rj_free(p); } + + s_mutex_unlock(); __builtin_longjmp(rjthread->jmpbuf, 1); } @@ -109,19 +149,25 @@ char *rewind_jmp_enum_shadowstack(rewind_jmp_thread *rjthread, void *callback(void *, const void *, size_t)) { + /* enumerate all saved shadow-stack slices */ struct _rewind_jmp_moved_s *p = rjthread->moved_off; char *sstarget = rjthread->moved_off_ssbase; + assert(_has_mutex()); + while (p) { - char *ssend = sstarget + p->shadowstack_size; - callback(sstarget, ((char *)p) + RJM_HEADER + p->stack_size, - p->shadowstack_size); - sstarget = ssend; + if (p->shadowstack_size) { + void *ss_slice = ((char *)p) + RJM_HEADER + p->stack_size; + callback(sstarget, ss_slice, p->shadowstack_size); + + sstarget += p->shadowstack_size; + } p = p->next; } return sstarget; } + char *rewind_jmp_restore_shadowstack(rewind_jmp_thread *rjthread) { return rewind_jmp_enum_shadowstack(rjthread, memcpy); @@ -130,16 +176,23 @@ __attribute__((noinline)) void _rewind_jmp_copy_stack_slice(rewind_jmp_thread *rjthread) { + /* called when leaving a frame. copies the now-current frame + to the list of stack-slices */ + s_mutex_lock(); if (rjthread->head == NULL) { _rewind_jmp_free_stack_slices(rjthread); + s_mutex_unlock(); return; } assert(rjthread->moved_off_base < (char *)rjthread->head); copy_stack(rjthread, rjthread->moved_off_base, rjthread->moved_off_ssbase); + s_mutex_unlock(); } void _rewind_jmp_free_stack_slices(rewind_jmp_thread *rjthread) { + /* frees all saved stack copies */ + assert(_has_mutex()); struct _rewind_jmp_moved_s *p = rjthread->moved_off; struct _rewind_jmp_moved_s *pnext; while (p) { diff --git a/c7/stm/rewind_setjmp.h b/c7/stm/rewind_setjmp.h --- a/c7/stm/rewind_setjmp.h +++ b/c7/stm/rewind_setjmp.h @@ -1,9 +1,20 @@ #ifndef _REWIND_SETJMP_H_ #define _REWIND_SETJMP_H_ + #include /************************************************************ +There is a singly-linked list of frames in each thread +rjthread->head->prev->prev->prev + +Another singly-linked list is the list of copied stack-slices. +When doing a setjmp(), we copy the top-frame, free all old +stack-slices, and link it to the top-frame->moved_off. +When returning from the top-frame while moved_off still points +to a slice, we also need to copy the top-frame->prev frame/slice +and add it to this list (pointed to by moved_off). +-------------------------------------------------------------- : : ^^^^^ |-------------------| older frames in the stack @@ -58,6 +69,7 @@ } rewind_jmp_thread; +/* remember the current stack and ss_stack positions */ #define rewind_jmp_enterframe(rjthread, rjbuf, ss) do { \ (rjbuf)->frame_base = __builtin_frame_address(0); \ (rjbuf)->shadowstack_base = (char *)(ss); \ @@ -65,6 +77,8 @@ (rjthread)->head = (rjbuf); \ } while (0) +/* go up one frame. if there was a setjmp call in this frame, + */ #define rewind_jmp_leaveframe(rjthread, rjbuf, ss) do { \ assert((rjbuf)->shadowstack_base == (char *)(ss)); \ (rjthread)->head = (rjbuf)->prev; \ From noreply at buildbot.pypy.org Thu Aug 14 08:17:53 2014 From: noreply at buildbot.pypy.org (waedt) Date: Thu, 14 Aug 2014 08:17:53 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Using byte-strings 8f5d79d24198 causes wierd rtyper errors. Try something else. Message-ID: <20140814061753.525321C0323@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72785:0016d703c625 Date: 2014-08-13 02:28 -0500 http://bitbucket.org/pypy/pypy/changeset/0016d703c625/ Log: Using byte-strings 8f5d79d24198 causes wierd rtyper errors. Try something else. diff --git a/pypy/module/_io/interp_stringio.py b/pypy/module/_io/interp_stringio.py --- a/pypy/module/_io/interp_stringio.py +++ b/pypy/module/_io/interp_stringio.py @@ -27,23 +27,24 @@ newline = None else: newline = space.unicode_w(w_newline) - newline = newline.bytes - if (newline and newline != '\n' and newline != '\r\n' and - newline != '\r'): - # Not using oefmt() because I don't know how to ues it + if (newline is not None and len(newline) and + not (utf8.EQ(newline, Utf8Str('\n')) or + utf8.EQ(newline, Utf8Str('\r\n')) or + utf8.EQ(newline, Utf8Str('\r')))): + # Not using oefmt() because I don't know how to use it # with unicode raise OperationError(space.w_ValueError, space.mod( space.wrap("illegal newline value: %s"), space.wrap(newline) ) ) - if newline is not None: self.readnl = newline - self.readuniversal = not newline + self.readuniversal = newline is None or not len(newline) self.readtranslate = newline is None - if newline and newline[0] == '\r': + if (newline is not None and len(newline) and + utf8ord(newline) == ord("\r")): self.writenl = newline if self.readuniversal: self.w_decoder = space.call_function( @@ -143,10 +144,9 @@ else: w_decoded = w_obj - if self.writenl: + if self.writenl is not None and len(self.writenl): w_decoded = space.call_method( - w_decoded, "replace", space.wrap("\n"), - space.wrap(Utf8Str(self.writenl)) + w_decoded, "replace", space.wrap("\n"), space.wrap(self.writenl) ) string = space.unicode_w(w_decoded) diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py --- a/pypy/module/_io/interp_textio.py +++ b/pypy/module/_io/interp_textio.py @@ -372,28 +372,28 @@ newline = None else: newline = space.unicode_w(w_newline) - # newline is guaranteed to be either empty or ascii - newline = newline.bytes - if (newline and newline != '\n' and newline != '\r\n' and - newline != '\r'): + if (newline is not None and len(newline) and + not (utf8.EQ(newline, Utf8Str('\n')) or + utf8.EQ(newline, Utf8Str('\r\n')) or + utf8.EQ(newline, Utf8Str('\r')))): r = space.str_w(space.repr(w_newline)) raise OperationError(space.w_ValueError, space.wrap( "illegal newline value: %s" % (r,))) self.line_buffering = line_buffering - self.readuniversal = not newline + self.readuniversal = newline is None or not len(newline) self.readtranslate = newline is None self.readnl = newline - self.writetranslate = (newline is not None and newline != '') + self.writetranslate = (newline is not None and len(newline)) if not self.readuniversal: self.writenl = self.readnl - if self.writenl == '\n': + if utf8.EQ(self.writenl, Utf8Str('\n')): self.writenl = None elif _WINDOWS: - self.writenl = "\r\n" + self.writenl = Utf8Str("\r\n") else: self.writenl = None @@ -663,7 +663,7 @@ start = endpos = offset_to_buffer = 0 break - if not remaining: + if remaining is None or not len(remaining): line = self.decoded_chars start = self.decoded_chars_used offset_to_buffer = 0 @@ -705,22 +705,22 @@ # We have consumed the buffer self._set_decoded_chars(None) - if line: + if line is not None and len(line): # Our line ends in the current buffer decoded_chars_used = endpos - offset_to_buffer assert decoded_chars_used >= 0 self.decoded_chars_used = decoded_chars_used if start > 0 or endpos < len(line): line = line[start:endpos] - if remaining: + if remaining is not None and len(remaining): chunks.append(remaining) remaining = None if chunks: - if line: + if line is not None and len(line): chunks.append(line) line = Utf8Str('').join(chunks) - if line: + if line is not None and len(line): return space.wrap(line) else: return space.wrap(Utf8Str('')) @@ -743,10 +743,12 @@ textlen = len(text) haslf = False - if (self.writetranslate and self.writenl) or self.line_buffering: + if (self.writetranslate and self.writenl is not None and + len(self.writenl)) or self.line_buffering: if text.find('\n') >= 0: haslf = True - if haslf and self.writetranslate and self.writenl: + if (haslf and self.writetranslate and + self.writenl is not None and len(self.writenl)): w_text = space.call_method(w_text, "replace", space.wrap(Utf8Str('\n')), space.wrap(self.writenl)) From noreply at buildbot.pypy.org Thu Aug 14 08:17:54 2014 From: noreply at buildbot.pypy.org (waedt) Date: Thu, 14 Aug 2014 08:17:54 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: More untranslated vs translated bools Message-ID: <20140814061754.8B00B1C0323@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72786:c5c9087c291c Date: 2014-08-13 02:29 -0500 http://bitbucket.org/pypy/pypy/changeset/c5c9087c291c/ Log: More untranslated vs translated bools diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -462,7 +462,7 @@ self._precision = -1 spec = self.spec - if not spec: + if (spec is None or not len(spec)): return True length = len(spec) @@ -656,7 +656,7 @@ if self._fill_char == ord("0") and self._align == ord("="): spec.n_min_width = self._width - extra_length - if self._loc_thousands: + if self._loc_thousands is not None and len(self._loc_thousands): self._group_digits(spec, digits[to_number:]) n_grouped_digits = len(self._grouped_digits) else: @@ -774,7 +774,7 @@ out.append_multiple_char(chr(fill_char), spec.n_spadding) if spec.n_digits != 0: - if self._loc_thousands: + if self._loc_thousands is not None and len(self._loc_thousands): if grouped_digits is not None: digits = grouped_digits else: diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -447,7 +447,7 @@ sb = self._builder(prealloc_size) for i in range(size): - if value and i != 0: + if (value is not None and len(value)) and i != 0: sb.append(value) sb.append(unwrapped[i]) return self._new(sb.build()) From noreply at buildbot.pypy.org Thu Aug 14 08:17:55 2014 From: noreply at buildbot.pypy.org (waedt) Date: Thu, 14 Aug 2014 08:17:55 +0200 (CEST) Subject: [pypy-commit] pypy utf8-unicode2: Simplify iterators. Use iterators consistently when encoding unicode strings Message-ID: <20140814061755.CDCC21C0323@cobra.cs.uni-duesseldorf.de> Author: Tyler Wade Branch: utf8-unicode2 Changeset: r72787:9f7fc269657f Date: 2014-08-14 01:02 -0500 http://bitbucket.org/pypy/pypy/changeset/9f7fc269657f/ Log: Simplify iterators. Use iterators consistently when encoding unicode strings diff --git a/pypy/interpreter/test/test_utf8.py b/pypy/interpreter/test/test_utf8.py --- a/pypy/interpreter/test/test_utf8.py +++ b/pypy/interpreter/test/test_utf8.py @@ -2,8 +2,7 @@ import py import sys -from pypy.interpreter.utf8 import ( - Utf8Str, Utf8Builder, utf8chr, utf8ord) +from pypy.interpreter.utf8 import Utf8Str, Utf8Builder, utf8chr, utf8ord from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.test.test_llinterp import interpret @@ -29,24 +28,8 @@ def test_iterator(): s = build_utf8str() iter = s.codepoint_iter() - assert iter.peek_next() == 0x41 assert list(iter) == [0x41, 0x10F, 0x20AC, 0x1F63D] - for i in range(1, 5): - iter = s.codepoint_iter() - iter.move(i) - if i != 4: - assert iter.peek_next() == [0x41, 0x10F, 0x20AC, 0x1F63D][i] - l = list(iter) - assert l == [0x41, 0x10F, 0x20AC, 0x1F63D][i:] - - for i in range(1, 5): - iter = s.codepoint_iter() - list(iter) # move the iterator to the end - iter.move(-i) - l = list(iter) - assert l == [0x41, 0x10F, 0x20AC, 0x1F63D][4-i:] - iter = s.char_iter() l = [s.bytes.decode('utf8') for s in list(iter)] if sys.maxunicode < 65536: @@ -54,26 +37,17 @@ else: assert l == [u'A', u'\u010F', u'\u20AC', u'\U0001F63D'] -def test_reverse_iterator(): +def test_new_iterator(): s = build_utf8str() - iter = s.reverse_codepoint_iter() - assert iter.peek_next() == 0x1F63D - assert list(iter) == [0x1F63D, 0x20AC, 0x10F, 0x41] + i = s.iter() + while not i.finished(): + assert utf8ord(s, i.pos()) == i.current() + i.move(1) - for i in range(1, 5): - iter = s.reverse_codepoint_iter() - iter.move(i) - if i != 4: - assert iter.peek_next() == [0x1F63D, 0x20AC, 0x10F, 0x41][i] - l = list(iter) - assert l == [0x1F63D, 0x20AC, 0x10F, 0x41][i:] - - for i in range(1, 5): - iter = s.reverse_codepoint_iter() - list(iter) # move the iterator to the end - iter.move(-i) - l = list(iter) - assert l == [0x1F63D, 0x20AC, 0x10F, 0x41][4-i:] + i = s.iter(len(s) - 1) + while i.pos() >= 0: + assert utf8ord(s, i.pos()) == i.current() + i.move(-1) def test_builder_append_slice(): builder = Utf8Builder() @@ -146,7 +120,6 @@ s = Utf8Str(' ') assert s.join([]) == u'' - assert s.join([Utf8Str('one')]) == u'one' assert s.join([Utf8Str('one'), Utf8Str('two')]) == u'one two' diff --git a/pypy/interpreter/utf8.py b/pypy/interpreter/utf8.py --- a/pypy/interpreter/utf8.py +++ b/pypy/interpreter/utf8.py @@ -309,18 +309,15 @@ def __unicode__(self): return unicode(self.bytes, 'utf8') + def iter(self, start=0): + return Utf8Iterator(self, start) + def char_iter(self): return Utf8CharacterIter(self) - def reverse_char_iter(self): - return Utf8ReverseCharacterIter(self) - def codepoint_iter(self): return Utf8CodePointIter(self) - def reverse_codepoint_iter(self): - return Utf8ReverseCodePointIter(self) - @specialize.argtype(1, 2) def _bound_check(self, start, end): if start is None: @@ -432,7 +429,7 @@ else: break - start_byte = iter.byte_pos + start_byte = iter._byte_pos assert start_byte >= 0 if maxsplit == 0: @@ -449,7 +446,7 @@ self._is_ascii)) break - end = iter.byte_pos + end = iter._byte_pos assert end >= 0 res.append(Utf8Str(self.bytes[start_byte:end], self._is_ascii)) maxsplit -= 1 @@ -466,32 +463,32 @@ other_bytes = other.bytes return [Utf8Str(s) for s in self.bytes.rsplit(other_bytes, maxsplit)] + if len(self) == 0: + return [] + res = [] - iter = self.reverse_codepoint_iter() + iter = self.iter(len(self) - 1) while True: # Find the start of the next word - for cd in iter: - if not unicodedb.isspace(cd): - break - else: + while iter.pos() >= 0 and unicodedb.isspace(iter.current()): + iter.move(-1) + if iter.pos() < 0: break - start_byte = self.next_char(iter.byte_pos) - + start_byte = self.next_char(iter.byte_pos()) if maxsplit == 0: res.append(Utf8Str(self.bytes[0:start_byte], self._is_ascii)) break # Find the end of the word - for cd in iter: - if unicodedb.isspace(cd): - break - else: + while iter.pos() >= 0 and not unicodedb.isspace(iter.current()): + iter.move(-1) + if iter.pos() < 0: # We hit the end of the string res.append(Utf8Str(self.bytes[0:start_byte], self._is_ascii)) break - end_byte = self.next_char(iter.byte_pos) + end_byte = self.next_char(iter.byte_pos()) res.append(Utf8Str(self.bytes[end_byte:start_byte], self._is_ascii)) maxsplit -= 1 @@ -756,117 +753,27 @@ # _______________________________________________ -# iter.current is the current (ie the last returned) element -# iter.pos isthe position of the current element -# iter.byte_pos isthe byte position of the current element -# In the before-the-start state, for foward iterators iter.pos and -# iter.byte_pos are -1. For reverse iterators, they are len(ustr) and -# len(ustr.bytes) respectively. - class ForwardIterBase(object): def __init__(self, ustr): - self.ustr = ustr - self.pos = -1 - - self._byte_pos = 0 - self.byte_pos = -1 - self.current = self._default + self._str = ustr + self._byte_pos = -1 def __iter__(self): return self def next(self): - if self.pos + 1 == len(self.ustr): + if self._byte_pos == -1: + if len(self._str) == 0: + raise StopIteration() + self._byte_pos = 0 + return self._value(0) + + self._byte_pos = self._str.next_char(self._byte_pos) + if self._byte_pos == len(self._str.bytes): raise StopIteration() - self.pos += 1 - self.byte_pos = self._byte_pos - - self.current = self._value(self.byte_pos) - - self._byte_pos = self.ustr.next_char(self._byte_pos) - return self.current - - def peek_next(self): return self._value(self._byte_pos) - def peek_prev(self): - return self._value(self._move_backward(self.byte_pos)) - - def move(self, count): - if count > 0: - self.pos += count - - while count != 1: - self._byte_pos = self.ustr.next_char(self._byte_pos) - count -= 1 - self.byte_pos = self._byte_pos - self._byte_pos = self.ustr.next_char(self._byte_pos) - self.current = self._value(self.byte_pos) - - elif count < 0: - self.pos += count - while count < -1: - self.byte_pos = self.ustr.prev_char(self.byte_pos) - count += 1 - self._byte_pos = self.byte_pos - self.byte_pos = self.ustr.prev_char(self.byte_pos) - self.current = self._value(self.byte_pos) - - def copy(self): - iter = self.__class__(self.ustr) - iter.pos = self.pos - iter.byte_pos = self.byte_pos - iter._byte_pos = self._byte_pos - iter.current = self.current - return iter - -class ReverseIterBase(object): - def __init__(self, ustr): - self.ustr = ustr - self.pos = len(ustr) - self.byte_pos = len(ustr.bytes) - self.current = self._default - - def __iter__(self): - return self - - def next(self): - if self.pos == 0: - raise StopIteration() - - self.pos -= 1 - self.byte_pos = self.ustr.prev_char(self.byte_pos) - self.current = self._value(self.byte_pos) - return self.current - - def peek_next(self): - return self._value(self.ustr.prev_char(self.byte_pos)) - - def peek_prev(self): - return self._value(self.ustr.next_char(self.byte_pos)) - - def move(self, count): - if count > 0: - self.pos -= count - while count != 0: - self.byte_pos = self.ustr.prev_char(self.byte_pos) - count -= 1 - self.current = self._value(self.byte_pos) - elif count < 0: - self.pos -= count - while count != 0: - self.byte_pos = self.ustr.next_char(self.byte_pos) - count += 1 - self.current = self._value(self.byte_pos) - - def copy(self): - iter = self.__class__(self.ustr) - iter.pos = self.pos - iter.byte_pos = self.byte_pos - iter.current = self.current - return iter - def make_iterator(name, base, calc_value, default): class C(object): import_from_mixin(base, ['__init__', '__iter__']) @@ -876,32 +783,91 @@ return C def codepoint_calc_value(self, byte_pos): - if byte_pos == -1 or byte_pos == len(self.ustr.bytes): + if byte_pos == -1 or byte_pos == len(self._str.bytes): return -1 - return utf8ord_bytes(self.ustr.bytes, byte_pos) + return utf8ord_bytes(self._str.bytes, byte_pos) def character_calc_value(self, byte_pos): - if byte_pos == -1 or byte_pos == len(self.ustr.bytes): + if byte_pos == -1 or byte_pos == len(self._str.bytes): return None - length = utf8_code_length[ord(self.ustr.bytes[self.byte_pos])] - return Utf8Str(''.join([self.ustr.bytes[i] - for i in range(self.byte_pos, self.byte_pos + length)]), + length = utf8_code_length[ord(self._str.bytes[self._byte_pos])] + return Utf8Str(''.join([self._str.bytes[i] + for i in range(self._byte_pos, self._byte_pos + length)]), length == 1) Utf8CodePointIter = make_iterator("Utf8CodePointIter", ForwardIterBase, codepoint_calc_value, -1) Utf8CharacterIter = make_iterator("Utf8CharacterIter", ForwardIterBase, character_calc_value, None) -Utf8ReverseCodePointIter = make_iterator( - "Utf8ReverseCodePointIter", ReverseIterBase, codepoint_calc_value, -1) -Utf8ReverseCharacterIter = make_iterator( - "Utf8ReverseCharacterIter", ReverseIterBase, character_calc_value, None) del make_iterator del codepoint_calc_value del character_calc_value del ForwardIterBase -del ReverseIterBase +# _______________________________________________ + +class Utf8Iterator(object): + def __init__(self, str, start=0): + self._str = str + + self._pos = start + self._byte_pos = str.index_of_char(start) + + self._calc_current() + + def _calc_current(self): + if self._pos >= len(self._str) or self._pos < 0: + raise IndexError() + else: + self._current = utf8ord_bytes(self._str.bytes, self._byte_pos) + + def current(self): + if self._current == -1: + self._calc_current() + return self._current + + def pos(self): + return self._pos + + def byte_pos(self): + return self._byte_pos + + def move(self, count): + # TODO: As an optimization, we could delay moving byte_pos until we + # _calc_current + if count > 0: + self._pos += count + + if self._pos < 0: + self._byte_pos = 0 + else: + while count != 0: + self._byte_pos = self._str.next_char(self._byte_pos) + count -= 1 + self._current = -1 + + elif count < 0: + self._pos += count + + if self._pos < 0: + self._byte_pos = 0 + else: + while count < 0: + self._byte_pos = self._str.prev_char(self._byte_pos) + count += 1 + self._current = -1 + + def finished(self): + return self._pos >= len(self._str) + + def copy(self): + i = Utf8Iterator(self._str) + i._pos = self._pos + i._byte_pos = self._byte_pos + i._current = self._current + return i + + diff --git a/pypy/interpreter/utf8_codecs.py b/pypy/interpreter/utf8_codecs.py --- a/pypy/interpreter/utf8_codecs.py +++ b/pypy/interpreter/utf8_codecs.py @@ -327,15 +327,15 @@ if size == 0: return '' result = StringBuilder(size) - pos = 0 - while pos < size: - oc = utf8ord(s, pos) - + iter = s.iter() + while not iter.finished(): + oc = iter.current() if oc < 0x100: result.append(chr(oc)) else: raw_unicode_escape_helper(result, oc) - pos += 1 + + iter.move(1) return result.build() @@ -397,28 +397,29 @@ if size == 0: return '' result = StringBuilder(size) - pos = 0 - while pos < size: - od = utf8ord(p, pos) + iter = p.iter() + while not iter.finished(): + od = iter.current() if od < limit: result.append(chr(od)) - pos += 1 + iter.move(1) else: - # startpos for collecting unencodable chars - collstart = pos - collend = pos+1 - while collend < len(p) and utf8ord(p, collend) >= limit: - collend += 1 + coll = iter.copy() + while not coll.finished() and coll.current() >= limit: + coll.move(1) + collstart = iter.pos() + collend = coll.pos() + ru, rs, pos = errorhandler(errors, encoding, reason, p, collstart, collend) + iter.move(pos - iter.pos()) if rs is not None: # py3k only result.append(rs) continue - for ch in ru: - cd = utf8ord(ch, 0) + for cd in ru.codepoint_iter(): if cd < limit: result.append(chr(cd)) else: @@ -452,41 +453,48 @@ allow_surrogates) def unicode_encode_utf_8_impl(s, size, errors, errorhandler, allow_surrogates): - iter = s.codepoint_iter() - for oc in iter: + iter = s.iter() + + while not iter.finished(): + oc = iter.current() if oc >= 0xD800 and oc <= 0xDFFF: break - if iter.pos == size: - return s.bytes - else: + iter.move(1) + if iter.finished(): return s.bytes result = Utf8Builder(len(s.bytes)) - result.append_slice(s.bytes, 0, iter.byte_pos) + result.append_slice(s.bytes, 0, iter.byte_pos()) - iter.move(-1) - for oc in iter: + while not iter.finished(): + oc = iter.current() + iter.move(1) + if oc >= 0xD800 and oc <= 0xDFFF: # Check the next character to see if this is a surrogate pair - if (iter.pos != len(s) and oc <= 0xDBFF and - 0xDC00 <= iter.peek_next() <= 0xDFFF): - oc2 = iter.next() + if (not iter.finished() and oc <= 0xDBFF and + 0xDC00 <= iter.current() <= 0xDFFF): + + oc2 = iter.current() result.append_codepoint( ((oc - 0xD800) << 10 | (oc2 - 0xDC00)) + 0x10000) + iter.move(1) + elif allow_surrogates: result.append_codepoint(oc) else: ru, rs, pos = errorhandler(errors, 'utf8', 'surrogates not allowed', s, - iter.pos-1, iter.pos) - iter.move(pos - iter.pos) + iter.pos()-2, iter.pos()-1) + iter.move(pos - iter.pos()) if rs is not None: # py3k only result.append_utf8(rs) + iter.move(1) continue - for ch in ru: - if ord(ch) < 0x80: - result.append_ascii(ch) + for ch in ru.codepoint_iter(): + if ch < 0x80: + result.append_ascii(chr(ch)) else: errorhandler('strict', 'utf8', 'surrogates not allowed', @@ -809,10 +817,10 @@ _STORECHAR(result, 0xFEFF, BYTEORDER) byteorder = BYTEORDER - i = 0 - while i < size: - ch = utf8ord(s, i) - i += 1 + iter = s.iter() + while not iter.finished(): + ch = iter.current() + iter.move(1) ch, ch2 = create_surrogate_pair(ch) _STORECHAR(result, ch, byteorder) @@ -980,16 +988,16 @@ _STORECHAR32(result, 0xFEFF, BYTEORDER) byteorder = BYTEORDER - i = 0 - while i < size: - ch = utf8ord(s, i) - i += 1 + iter = s.iter() + while not iter.finished(): + ch = iter.current() + iter.move(1) ch2 = 0 if MAXUNICODE < 65536 and 0xD800 <= ch <= 0xDBFF and i < size: - ch2 = ord(s[i]) + ch2 = iter.current() if 0xDC00 <= ch2 <= 0xDFFF: ch = (((ch & 0x3FF)<<10) | (ch2 & 0x3FF)) + 0x10000; - i += 1 + iter.move(1) _STORECHAR32(result, ch, byteorder) return result.build() @@ -1228,10 +1236,9 @@ base64bits = 0 base64buffer = 0 - # TODO: Looping like this is worse than O(n) - pos = 0 - while pos < size: - oc = utf8ord(s, pos) + iter = s.iter() + while not iter.finished(): + oc = iter.current() if not inShift: if oc == ord('+'): result.append('+-') @@ -1260,7 +1267,7 @@ else: base64bits, base64buffer = _utf7_ENCODE_CHAR( result, oc, base64bits, base64buffer) - pos += 1 + iter.move(1) if base64bits: result.append(_utf7_TO_BASE64(base64buffer << (6 - base64bits))) @@ -1318,15 +1325,17 @@ if size == 0: return '' result = StringBuilder(size) - pos = 0 - while pos < size: - ch = s[pos] + + iter = s.iter() + while not iter.finished(): + ch = utf8chr(iter.current()) c = mapping.get(ch, '') if len(c) == 0: ru, rs, pos = errorhandler(errors, "charmap", "character maps to ", - s, pos, pos + 1) + s, iter.pos(), iter.pos() + 1) + iter.move(pos - iter.pos()) if rs is not None: # py3k only result.append(rs) @@ -1337,11 +1346,11 @@ errorhandler( "strict", "charmap", "character maps to ", - s, pos, pos + 1) + s, iter.pos(), iter.pos() + 1) result.append(c2) continue result.append(c) - pos += 1 + iter.move(1) return result.build() # }}} @@ -1367,9 +1376,9 @@ errorhandler = default_unicode_error_decode if BYTEORDER == 'little': - iorder = [0, 1, 2, 3] + iorder = (0, 1, 2, 3) else: - iorder = [3, 2, 1, 0] + iorder = (3, 2, 1, 0) if size == 0: return Utf8Str(''), 0 @@ -1542,30 +1551,35 @@ if size == 0: return '' result = StringBuilder(size) - pos = 0 - while pos < size: - ch = utf8ord(s, pos) + + iter = s.iter() + while not iter.finished(): + ch = iter.current() + if unicodedb.isspace(ch): result.append(' ') - pos += 1 + iter.move(1) continue + try: decimal = unicodedb.decimal(ch) except KeyError: pass else: result.append(chr(48 + decimal)) - pos += 1 + iter.move(1) continue + if 0 < ch < 256: result.append(chr(ch)) - pos += 1 + iter.move(1) continue + # All other characters are considered unencodable - collstart = pos - collend = collstart + 1 - while collend < size: - ch = utf8ord(s, collend) + colliter = iter.copy() + colliter.move(1) + while not colliter.finished(): + ch = colliter.current() try: if (0 < ch < 256 or unicodedb.isspace(ch) or @@ -1574,15 +1588,19 @@ except KeyError: # not a decimal pass - collend += 1 + colliter.move(1) + + collstart = iter.pos() + collend = colliter.pos() + msg = "invalid decimal Unicode string" ru, rs, pos = errorhandler(errors, 'decimal', msg, s, collstart, collend) + iter.move(pos - iter.pos()) if rs is not None: # py3k only errorhandler('strict', 'decimal', msg, s, collstart, collend) - for i in range(len(ru)): - ch = utf8.ORD(ru, i) + for ch in ru.codepoint_iter(): if unicodedb.isspace(ch): result.append(' ') continue From noreply at buildbot.pypy.org Thu Aug 14 10:35:33 2014 From: noreply at buildbot.pypy.org (groggi) Date: Thu, 14 Aug 2014 10:35:33 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: * _is_pinned takes 'pinned_objects' into account. Message-ID: <20140814083533.36EDB1C0323@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72788:28a9b76f8eef Date: 2014-08-13 14:12 +0200 http://bitbucket.org/pypy/pypy/changeset/28a9b76f8eef/ Log: * _is_pinned takes 'pinned_objects' into account. * removed 'we_are_translated' from pin()/unpin() as they are always not translated (rgc.py version). * added some comments for clarification diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -46,11 +46,8 @@ Note further that pinning an object does not prevent it from being collected if it is not used anymore. """ - if we_are_translated(): - return False - else: - pinned_objects.append(obj) - return True + pinned_objects.append(obj) + return True class PinEntry(ExtRegistryEntry): @@ -68,11 +65,7 @@ """Unpin 'obj', allowing it to move again. Must only be called after a call to pin(obj) returned True. """ - if we_are_translated(): - raise AssertionError("pin() always returns False, " - "so unpin() should not be called") - else: - pinned_objects.remove(obj) + pinned_objects.remove(obj) class UnpinEntry(ExtRegistryEntry): @@ -87,7 +80,7 @@ def _is_pinned(obj): """Method to check if 'obj' is pinned.""" - return False + return obj in pinned_objects class IsPinnedEntry(ExtRegistryEntry): _about_ = _is_pinned @@ -160,9 +153,12 @@ on objects that are already a bit old, so have a chance to be already non-movable.""" if not we_are_translated(): - return p not in pinned_objects + # for testing purpose + return not _is_pinned(p) # if _is_pinned(p): + # although a pinned object can't move we must return 'False'. A pinned + # object can be unpinned any time and becomes movable. return False i = 0 while can_move(p): From noreply at buildbot.pypy.org Thu Aug 14 10:35:34 2014 From: noreply at buildbot.pypy.org (groggi) Date: Thu, 14 Aug 2014 10:35:34 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: renamed parameters for consistency Message-ID: <20140814083534.805601C0323@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72789:9a727746d96f Date: 2014-08-13 15:21 +0200 http://bitbucket.org/pypy/pypy/changeset/9a727746d96f/ Log: renamed parameters for consistency diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -53,7 +53,7 @@ class PinEntry(ExtRegistryEntry): _about_ = pin - def compute_result_annotation(self, s_p): + def compute_result_annotation(self, s_obj): from rpython.annotator import model as annmodel return annmodel.SomeBool() @@ -71,7 +71,7 @@ class UnpinEntry(ExtRegistryEntry): _about_ = unpin - def compute_result_annotation(self, s_p): + def compute_result_annotation(self, s_obj): pass def specialize_call(self, hop): @@ -85,7 +85,7 @@ class IsPinnedEntry(ExtRegistryEntry): _about_ = _is_pinned - def compute_result_annotation(self, s_p): + def compute_result_annotation(self, s_obj): from rpython.annotator import model as annmodel return annmodel.SomeBool() From noreply at buildbot.pypy.org Thu Aug 14 10:35:35 2014 From: noreply at buildbot.pypy.org (groggi) Date: Thu, 14 Aug 2014 10:35:35 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: as discussed with fijal (irc, 2014-08-13) should be correct. Message-ID: <20140814083535.B01551C0323@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72790:7243202f2115 Date: 2014-08-13 16:54 +0200 http://bitbucket.org/pypy/pypy/changeset/7243202f2115/ Log: as discussed with fijal (irc, 2014-08-13) should be correct. diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -480,9 +480,9 @@ 'gc_writebarrier': LLOp(canrun=True), 'gc_writebarrier_before_copy': LLOp(canrun=True), 'gc_heap_stats' : LLOp(canmallocgc=True), - 'gc_pin' : LLOp(canrun=True), # XXX understand this, correct? (groggi) - 'gc_unpin' : LLOp(canrun=True), # XXX understand this, correct? (groggi) - 'gc__is_pinned' : LLOp(canrun=True), # XXX understand this, correct? (groggi) + 'gc_pin' : LLOp(canrun=True), + 'gc_unpin' : LLOp(canrun=True), + 'gc__is_pinned' : LLOp(canrun=True), 'gc_get_rpy_roots' : LLOp(), 'gc_get_rpy_referents': LLOp(), From noreply at buildbot.pypy.org Thu Aug 14 10:35:36 2014 From: noreply at buildbot.pypy.org (groggi) Date: Thu, 14 Aug 2014 10:35:36 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: comments edited Message-ID: <20140814083536.DDB581C0323@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72791:2a4ed31fc381 Date: 2014-08-13 17:53 +0200 http://bitbucket.org/pypy/pypy/changeset/2a4ed31fc381/ Log: comments edited diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1225,13 +1225,11 @@ # but this flag is progressively removed in the sweeping phase. # All objects should have this flag, except if they - # don't have any GC pointer + # don't have any GC pointer or are pinned objects typeid = self.get_type_id(obj) - if not self._is_pinned(obj): - # XXX do we need checks if the object is actually pinned? (groggi) - if self.has_gcptr(typeid): - ll_assert(self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS != 0, - "missing GCFLAG_TRACK_YOUNG_PTRS") + if self.has_gcptr(typeid) and not self._is_pinned(obj): + ll_assert(self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS != 0, + "missing GCFLAG_TRACK_YOUNG_PTRS") # the GCFLAG_FINALIZATION_ORDERING should not be set between coll. ll_assert(self.header(obj).tid & GCFLAG_FINALIZATION_ORDERING == 0, "unexpected GCFLAG_FINALIZATION_ORDERING") @@ -1530,15 +1528,11 @@ # # Keeps track of surviving pinned objects. See also '_trace_drag_out()' # where this stack is filled. Pinning an object only prevents it from - # being move, not from being collected if it is not used anymore. + # being moved, not from being collected if it is not reachable anymore. self.surviving_pinned_objects = self.AddressStack() # # The following counter keeps track of the amount of alive and pinned - # objects inside the nursery. The counter is reset, as we have to - # check which pinned objects are actually still alive. Pinning an - # object does not prevent the removal of an object, if it's not used - # anymore. - # XXX is this true? does it make sense? (groggi) + # objects inside the nursery. self.pinned_objects_in_nursery = 0 # # Before everything else, remove from 'old_objects_pointing_to_young' From noreply at buildbot.pypy.org Thu Aug 14 10:35:38 2014 From: noreply at buildbot.pypy.org (groggi) Date: Thu, 14 Aug 2014 10:35:38 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: added tests for prebuilt objects and pinning Message-ID: <20140814083538.15E6C1C0323@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72792:d28c2f266a13 Date: 2014-08-13 17:54 +0200 http://bitbucket.org/pypy/pypy/changeset/d28c2f266a13/ Log: added tests for prebuilt objects and pinning diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -40,6 +40,13 @@ self.gc.unpin(adr) assert not self.gc._is_pinned(adr) + def test_prebuilt_not_pinnable(self): + ptr = lltype.malloc(S, immortal=True) + self.consider_constant(ptr) + assert not self.gc.pin(llmemory.cast_ptr_to_adr(ptr)) + self.gc.collect() + assert not self.gc.pin(llmemory.cast_ptr_to_adr(ptr)) + # XXX test with multiple mallocs, and only part of them is pinned @@ -445,6 +452,41 @@ self.pin_referenced_from_young_in_stackroots(self.gc.collect) + def pin_referenced_from_prebuilt(self, collect_func): + # scenario: a prebuilt object points to a pinned object. Check if the + # pinned object doesn't move and is still accessible. + # + prebuilt_ptr = lltype.malloc(S, immortal=True) + prebuilt_ptr.someInt = 900 + self.consider_constant(prebuilt_ptr) + prebuilt_adr = llmemory.cast_ptr_to_adr(prebuilt_ptr) + collect_func() + # + pinned_ptr = self.malloc(S) + pinned_ptr.someInt = 100 + self.write(prebuilt_ptr, 'next', pinned_ptr) + pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr) + assert self.gc.pin(pinned_adr) + # + # check if everything is as expected + assert not self.gc.is_in_nursery(prebuilt_adr) + assert self.gc.is_in_nursery(pinned_adr) + assert pinned_ptr == prebuilt_ptr.next + assert pinned_ptr.someInt == 100 + # + # do a collection and check again + collect_func() + assert self.gc.is_in_nursery(pinned_adr) + assert pinned_ptr == prebuilt_ptr.next + assert pinned_ptr.someInt == 100 + + def test_pin_referenced_from_prebuilt_minor_collection(self): + self.pin_referenced_from_prebuilt(self.gc.minor_collection) + + def test_pin_referenced_from_prebuilt_major_collection(self): + self.pin_referenced_from_prebuilt(self.gc.collect) + + def pin_shadow_1(self, collect_func): ptr = self.malloc(S) adr = llmemory.cast_ptr_to_adr(ptr) From noreply at buildbot.pypy.org Thu Aug 14 10:35:39 2014 From: noreply at buildbot.pypy.org (groggi) Date: Thu, 14 Aug 2014 10:35:39 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: assert for case that should never happen Message-ID: <20140814083539.3B34E1C0323@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72793:d6f675594c58 Date: 2014-08-13 20:04 +0200 http://bitbucket.org/pypy/pypy/changeset/d6f675594c58/ Log: assert for case that should never happen diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -135,8 +135,7 @@ # if op.is_guard() or op.getopnum() == rop.FINISH: llref = cast_instance_to_gcref(op.getdescr()) - if not rgc._make_sure_does_not_move(llref): - raise NotImplementedError("blub") # XXX handle (groggi) + assert rgc._make_sure_does_not_move(llref) gcrefs_output_list.append(llref) newops.append(op) return newops From noreply at buildbot.pypy.org Thu Aug 14 11:47:14 2014 From: noreply at buildbot.pypy.org (groggi) Date: Thu, 14 Aug 2014 11:47:14 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: size of array for movable objects is now as large as it needs to be. Message-ID: <20140814094714.85E001C06C9@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72794:5dcc35cb8954 Date: 2014-08-14 11:30 +0200 http://bitbucket.org/pypy/pypy/changeset/5dcc35cb8954/ Log: size of array for movable objects is now as large as it needs to be. there is still some optimization possible: if only one object is pinned but used in six operations, the array will contain six pointers pointing to the same pinned object. diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -112,8 +112,8 @@ def gc_malloc_unicode(self, num_elem): return self._bh_malloc_array(num_elem, self.unicode_descr) - def _record_constptrs(self, op, gcrefs_output_list, pinned_obj_tracker): - newops = [] + def _record_constptrs(self, op, gcrefs_output_list, moving_output_list): + moving_output_list[op] = [] for i in range(op.numargs()): v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): @@ -121,22 +121,34 @@ if rgc._make_sure_does_not_move(p): gcrefs_output_list.append(p) else: - # encountered a pointer that points to a possibly moving object. - # Solve the problem by double loading the address to the object - # each run of the JITed code. - result_ptr = BoxPtr() - array_index = pinned_obj_tracker.add_ref(p) - load_op = ResOperation(rop.GETARRAYITEM_GC, - [ConstPtr(pinned_obj_tracker.ref_array_gcref), ConstInt(array_index)], - result_ptr, - descr=pinned_obj_tracker.ref_array_descr) - newops.append(load_op) - op.setarg(i, result_ptr) + moving_output_list[op].append(i) # if op.is_guard() or op.getopnum() == rop.FINISH: llref = cast_instance_to_gcref(op.getdescr()) assert rgc._make_sure_does_not_move(llref) gcrefs_output_list.append(llref) + # + if len(moving_output_list[op]) == 0: + del moving_output_list[op] + + def _rewrite_constptrs(self, op, moving_output_list, pinned_obj_tracker): + newops = [] + for arg_i in moving_output_list[op]: + v = op.getarg(arg_i) + # assert to make sure we got what we expected + assert isinstance(v, ConstPtr) + assert bool(v.value) + p = v.value + result_ptr = BoxPtr() + array_index = pinned_obj_tracker.add_ref(p) + load_op = ResOperation(rop.GETARRAYITEM_GC, + [ConstPtr(pinned_obj_tracker.ref_array_gcref), + ConstInt(array_index)], + result_ptr, + descr=pinned_obj_tracker.ref_array_descr) + newops.append(load_op) + op.setarg(arg_i, result_ptr) + # newops.append(op) return newops @@ -147,18 +159,27 @@ # keep them alive if they end up as constants in the assembler # XXX add comment (groggi) - # XXX handle size in a not constant way? Get it from the GC? (groggi) - pinned_obj_tracker = PinnedObjectTracker(cpu, 100) - if not we_are_translated(): - self.last_pinned_object_tracker = pinned_obj_tracker - gcrefs_output_list.append(pinned_obj_tracker.ref_array_gcref) - rgc._make_sure_does_not_move(pinned_obj_tracker.ref_array_gcref) - + newnewops = [] # XXX better name... (groggi) + moving_output_list = {} for op in newops: - ops = self._record_constptrs(op, gcrefs_output_list, pinned_obj_tracker) - newnewops.extend(ops) + self._record_constptrs(op, gcrefs_output_list, moving_output_list) + # + if len(moving_output_list) > 0: + pinned_obj_tracker = PinnedObjectTracker(cpu, len(moving_output_list)) + if not we_are_translated(): + self.last_pinned_object_tracker = pinned_obj_tracker + gcrefs_output_list.append(pinned_obj_tracker.ref_array_gcref) + rgc._make_sure_does_not_move(pinned_obj_tracker.ref_array_gcref) + + for op in newops: + if op in moving_output_list: + reops = self._rewrite_constptrs(op, moving_output_list, + pinned_obj_tracker) + newnewops.extend(reops) + else: + newnewops.append(op) return newnewops @specialize.memo() From noreply at buildbot.pypy.org Thu Aug 14 12:42:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Aug 2014 12:42:46 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20140814104246.68DF61C06C9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r527:393ae5365189 Date: 2014-08-14 12:42 +0200 http://bitbucket.org/pypy/pypy.org/changeset/393ae5365189/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $52304 of $105000 (49.8%) + $52313 of $105000 (49.8%)
          From noreply at buildbot.pypy.org Thu Aug 14 13:02:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Aug 2014 13:02:16 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fix. Message-ID: <20140814110216.DA3C81D2320@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1312:43bf7ea2e593 Date: 2014-08-14 13:02 +0200 http://bitbucket.org/pypy/stmgc/changeset/43bf7ea2e593/ Log: Fix. diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -397,17 +397,20 @@ mark_visit_object(current->ss, segment_base); } mark_visit_object(tl->thread_local_obj, segment_base); - stm_rewind_jmp_enum_shadowstack(tl, mark_visit_objects_from_ss); tl = tl->next; } while (tl != stm_all_thread_locals); long i; for (i = 1; i <= NB_SEGMENTS; i++) { - if (get_priv_segment(i)->transaction_state != TS_NONE) + if (get_priv_segment(i)->transaction_state != TS_NONE) { mark_visit_object( get_priv_segment(i)->threadlocal_at_start_of_transaction, get_segment_base(i)); + stm_rewind_jmp_enum_shadowstack( + get_segment(i)->running_thread, + mark_visit_objects_from_ss); + } } } diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c --- a/c7/stm/rewind_setjmp.c +++ b/c7/stm/rewind_setjmp.c @@ -194,12 +194,12 @@ /* frees all saved stack copies */ assert(_has_mutex()); struct _rewind_jmp_moved_s *p = rjthread->moved_off; - struct _rewind_jmp_moved_s *pnext; while (p) { - pnext = p->next; + struct _rewind_jmp_moved_s *pnext = p->next; rj_free(p); p = pnext; } rjthread->moved_off = NULL; rjthread->moved_off_base = NULL; + rjthread->moved_off_ssbase = NULL; } From noreply at buildbot.pypy.org Thu Aug 14 15:02:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Aug 2014 15:02:55 +0200 (CEST) Subject: [pypy-commit] stmgc default: Move the mutex locking outside rewind_setjmp.c, and remove some of Message-ID: <20140814130255.056FD1C06C9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1313:1e16b1651dd4 Date: 2014-08-14 14:46 +0200 http://bitbucket.org/pypy/stmgc/changeset/1e16b1651dd4/ Log: Move the mutex locking outside rewind_setjmp.c, and remove some of it, with some justification as comments. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -328,8 +328,6 @@ { assert(!_stm_in_transaction(tl)); - s_mutex_lock(); - retry: if (inevitable) { wait_for_end_of_inevitable_transaction(tl); @@ -390,6 +388,7 @@ long stm_start_transaction(stm_thread_local_t *tl) { + s_mutex_lock(); #ifdef STM_NO_AUTOMATIC_SETJMP long repeat_count = 0; /* test/support.py */ #else @@ -401,6 +400,7 @@ void stm_start_inevitable_transaction(stm_thread_local_t *tl) { + s_mutex_lock(); _stm_start_transaction(tl, true); } @@ -1077,6 +1077,7 @@ #ifdef STM_NO_AUTOMATIC_SETJMP _test_run_abort(tl); #else + s_mutex_lock(); stm_rewind_jmp_longjmp(tl); #endif } diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c --- a/c7/stm/rewind_setjmp.c +++ b/c7/stm/rewind_setjmp.c @@ -4,12 +4,6 @@ #include #include -#ifndef _STM_CORE_H_ -long _has_mutex() {return 1;} -void s_mutex_lock() {} -void s_mutex_unlock() {} -#endif - struct _rewind_jmp_moved_s { struct _rewind_jmp_moved_s *next; @@ -42,8 +36,6 @@ void *ssstop; size_t stack_size, ssstack_size; - assert(_has_mutex()); - assert(rjthread->head != NULL); stop = rjthread->head->frame_base; ssstop = rjthread->head->shadowstack_base; @@ -98,11 +90,14 @@ rjthread->repeat_count = result; /* snapshot of top frame: needed every time because longjmp() frees - the previous one. Need to have mutex locked otherwise a concurrent - GC may get garbage while saving shadow stack */ - s_mutex_lock(); + the previous one. Note that this function is called with the + mutex already acquired. Although it's not the job of this file, + we assert it is indeed acquired here. This is needed, otherwise a + concurrent GC may get garbage while saving shadow stack */ +#ifdef _STM_CORE_H_ + assert(_has_mutex()); +#endif copy_stack(rjthread, (char *)&saved, saved.ss1); - s_mutex_unlock(); return result; } @@ -114,7 +109,6 @@ current stack, expanding it if necessary. The shadowstack should already be restored at this point (restore_shadowstack()) */ assert(rjthread->moved_off_base != NULL); - s_mutex_lock(); while (rjthread->moved_off) { struct _rewind_jmp_moved_s *p = rjthread->moved_off; @@ -123,7 +117,6 @@ target -= p->stack_size; if (target < stack_free) { /* need more stack space! */ - s_mutex_unlock(); do_longjmp(rjthread, alloca(stack_free - target)); abort(); /* unreachable */ } @@ -134,7 +127,12 @@ rj_free(p); } - s_mutex_unlock(); +#ifdef _STM_CORE_H_ + /* This function must be called with the mutex held. It will + remain held across the longjmp that follows and into the + target rewind_jmp_setjmp() function. */ + assert(_has_mutex()); +#endif __builtin_longjmp(rjthread->jmpbuf, 1); } @@ -153,7 +151,9 @@ struct _rewind_jmp_moved_s *p = rjthread->moved_off; char *sstarget = rjthread->moved_off_ssbase; +#ifdef _STM_CORE_H_ assert(_has_mutex()); +#endif while (p) { if (p->shadowstack_size) { @@ -178,21 +178,26 @@ { /* called when leaving a frame. copies the now-current frame to the list of stack-slices */ - s_mutex_lock(); +#ifdef _STM_CORE_H_ + /* A transaction should be running now. This means in particular + that it's not possible that a major GC runs concurrently with + this code (and tries to read the shadowstack slice). */ + assert(_seems_to_be_running_transaction()); +#endif if (rjthread->head == NULL) { _rewind_jmp_free_stack_slices(rjthread); - s_mutex_unlock(); return; } assert(rjthread->moved_off_base < (char *)rjthread->head); copy_stack(rjthread, rjthread->moved_off_base, rjthread->moved_off_ssbase); - s_mutex_unlock(); } void _rewind_jmp_free_stack_slices(rewind_jmp_thread *rjthread) { /* frees all saved stack copies */ - assert(_has_mutex()); +#ifdef _STM_CORE_H_ + assert(_seems_to_be_running_transaction()); /* see previous function */ +#endif struct _rewind_jmp_moved_s *p = rjthread->moved_off; while (p) { struct _rewind_jmp_moved_s *pnext = p->next; From noreply at buildbot.pypy.org Thu Aug 14 15:02:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Aug 2014 15:02:56 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fix (shown by demo_random2) Message-ID: <20140814130256.4BD831C06C9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1314:94d281041161 Date: 2014-08-14 15:01 +0200 http://bitbucket.org/pypy/stmgc/changeset/94d281041161/ Log: Fix (shown by demo_random2) diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -1027,7 +1027,7 @@ } #endif -static void abort_with_mutex(void) +static stm_thread_local_t *abort_with_mutex_no_longjmp(void) { assert(_has_mutex()); dprintf(("~~~ ABORT\n")); @@ -1060,6 +1060,12 @@ /* Broadcast C_ABORTED to wake up contention.c */ cond_broadcast(C_ABORTED); + return tl; +} + +static void abort_with_mutex(void) +{ + stm_thread_local_t *tl = abort_with_mutex_no_longjmp(); s_mutex_unlock(); /* It seems to be a good idea, at least in some examples, to sleep diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -272,6 +272,7 @@ static void teardown_core(void); static void abort_with_mutex(void) __attribute__((noreturn)); +static stm_thread_local_t *abort_with_mutex_no_longjmp(void); static void abort_data_structures_from_segment_num(int segment_num); static inline bool was_read_remote(char *base, object_t *obj, diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -181,20 +181,17 @@ assert(tl->associated_segment_num == i); assert(pr->transaction_state == TS_REGULAR); set_gs_register(get_segment_base(i)); + assert(STM_SEGMENT->segment_num == i); - rewind_jmp_buf rjbuf; - stm_rewind_jmp_enterframe(tl, &rjbuf); - if (stm_rewind_jmp_setjmp(tl) == 0) { + s_mutex_lock(); #ifndef NDEBUG - pr->running_pthread = pthread_self(); + pr->running_pthread = pthread_self(); #endif - pr->pub.running_thread->shadowstack = ( - pr->shadowstack_at_start_of_transaction); - strcpy(pr->marker_self, "fork"); - stm_abort_transaction(); - } - stm_rewind_jmp_forget(tl); - stm_rewind_jmp_leaveframe(tl, &rjbuf); + pr->pub.running_thread->shadowstack = ( + pr->shadowstack_at_start_of_transaction); + strcpy(pr->marker_self, "fork"); + abort_with_mutex_no_longjmp(); + s_mutex_unlock(); } static void forksupport_child(void) From noreply at buildbot.pypy.org Thu Aug 14 15:14:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Aug 2014 15:14:14 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: import stmgc/1815f493a1c5 Message-ID: <20140814131414.50A811C06C9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72795:b56e8c73f107 Date: 2014-08-12 17:37 +0200 http://bitbucket.org/pypy/pypy/changeset/b56e8c73f107/ Log: import stmgc/1815f493a1c5 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -bdc151305c79 +1815f493a1c5 diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -394,7 +394,7 @@ #ifdef STM_NO_AUTOMATIC_SETJMP long repeat_count = 0; /* test/support.py */ #else - long repeat_count = rewind_jmp_setjmp(&tl->rjthread); + long repeat_count = stm_rewind_jmp_setjmp(tl); #endif _stm_start_transaction(tl, false); return repeat_count; @@ -829,7 +829,7 @@ dprintf(("commit_transaction\n")); assert(STM_SEGMENT->nursery_end == NURSERY_END); - rewind_jmp_forget(&STM_SEGMENT->running_thread->rjthread); + stm_rewind_jmp_forget(STM_SEGMENT->running_thread); /* if a major collection is required, do it here */ if (is_major_collection_requested()) { @@ -984,12 +984,23 @@ reset_modified_from_other_segments(segment_num); _verify_cards_cleared_in_all_lists(pseg); - /* reset the tl->shadowstack and thread_local_obj to their original - value before the transaction start */ + /* reset tl->shadowstack and thread_local_obj to their original + value before the transaction start. Also restore the content + of the shadowstack here. */ stm_thread_local_t *tl = pseg->pub.running_thread; +#ifdef STM_NO_AUTOMATIC_SETJMP + /* In tests, we don't save and restore the shadowstack correctly. + Be sure to not change items below shadowstack_at_start_of_transaction. + There is no such restrictions in non-Python-based tests. */ assert(tl->shadowstack >= pseg->shadowstack_at_start_of_transaction); - pseg->shadowstack_at_abort = tl->shadowstack; tl->shadowstack = pseg->shadowstack_at_start_of_transaction; +#else + /* NB. careful, this function might be called more than once to + abort a given segment. Make sure that + stm_rewind_jmp_restore_shadowstack() is idempotent. */ + stm_rewind_jmp_restore_shadowstack(tl); + assert(tl->shadowstack == pseg->shadowstack_at_start_of_transaction); +#endif tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction; tl->last_abort__bytes_in_nursery = bytes_in_nursery; @@ -1064,7 +1075,7 @@ #ifdef STM_NO_AUTOMATIC_SETJMP _test_run_abort(tl); #else - rewind_jmp_longjmp(&tl->rjthread); + stm_rewind_jmp_longjmp(tl); #endif } @@ -1079,7 +1090,7 @@ marker_fetch_inev(); wait_for_end_of_inevitable_transaction(NULL); STM_PSEGMENT->transaction_state = TS_INEVITABLE; - rewind_jmp_forget(&STM_SEGMENT->running_thread->rjthread); + stm_rewind_jmp_forget(STM_SEGMENT->running_thread); clear_callbacks_on_abort(); } else { diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h --- a/rpython/translator/stm/src_stm/stm/core.h +++ b/rpython/translator/stm/src_stm/stm/core.h @@ -187,7 +187,6 @@ 'thread_local_obj' field. */ struct stm_shadowentry_s *shadowstack_at_start_of_transaction; object_t *threadlocal_at_start_of_transaction; - struct stm_shadowentry_s *shadowstack_at_abort; /* Already signalled to commit soon: */ bool signalled_to_commit_soon; diff --git a/rpython/translator/stm/src_stm/stm/forksupport.c b/rpython/translator/stm/src_stm/stm/forksupport.c --- a/rpython/translator/stm/src_stm/stm/forksupport.c +++ b/rpython/translator/stm/src_stm/stm/forksupport.c @@ -185,7 +185,7 @@ rewind_jmp_buf rjbuf; stm_rewind_jmp_enterframe(tl, &rjbuf); - if (rewind_jmp_setjmp(&tl->rjthread) == 0) { + if (stm_rewind_jmp_setjmp(tl) == 0) { #ifndef NDEBUG pr->running_pthread = pthread_self(); #endif @@ -194,7 +194,7 @@ strcpy(pr->marker_self, "fork"); stm_abort_transaction(); } - rewind_jmp_forget(&tl->rjthread); + stm_rewind_jmp_forget(tl); stm_rewind_jmp_leaveframe(tl, &rjbuf); } diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -332,9 +332,20 @@ function with the interpreter's dispatch loop, you need to declare a local variable of type 'rewind_jmp_buf' and call these macros. */ #define stm_rewind_jmp_enterframe(tl, rjbuf) \ - rewind_jmp_enterframe(&(tl)->rjthread, rjbuf) + rewind_jmp_enterframe(&(tl)->rjthread, rjbuf, (tl)->shadowstack) #define stm_rewind_jmp_leaveframe(tl, rjbuf) \ - rewind_jmp_leaveframe(&(tl)->rjthread, rjbuf) + rewind_jmp_leaveframe(&(tl)->rjthread, rjbuf, (tl)->shadowstack) +#define stm_rewind_jmp_setjmp(tl) \ + rewind_jmp_setjmp(&(tl)->rjthread, (tl)->shadowstack) +#define stm_rewind_jmp_longjmp(tl) \ + rewind_jmp_longjmp(&(tl)->rjthread) +#define stm_rewind_jmp_forget(tl) \ + rewind_jmp_forget(&(tl)->rjthread) +#define stm_rewind_jmp_restore_shadowstack(tl) do { \ + assert(rewind_jmp_armed(&(tl)->rjthread)); \ + (tl)->shadowstack = (struct stm_shadowentry_s *) \ + rewind_jmp_restore_shadowstack(&(tl)->rjthread); \ +} while (0) /* Starting and ending transactions. stm_read(), stm_write() and stm_allocate() should only be called from within a transaction. From noreply at buildbot.pypy.org Thu Aug 14 15:14:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Aug 2014 15:14:15 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Comparing this with the trunk version, I think that this is what is meant Message-ID: <20140814131415.7A94F1C06C9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72796:209a49d5e176 Date: 2014-08-12 17:37 +0200 http://bitbucket.org/pypy/pypy/changeset/209a49d5e176/ Log: Comparing this with the trunk version, I think that this is what is meant diff --git a/rpython/jit/backend/tool/viewcode.py b/rpython/jit/backend/tool/viewcode.py --- a/rpython/jit/backend/tool/viewcode.py +++ b/rpython/jit/backend/tool/viewcode.py @@ -116,7 +116,7 @@ p = subprocess.Popen(symbollister % filename, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() - if not p.returncode: + if p.returncode: raise Exception('Encountered an error running nm: %s' % stderr) for line in stdout.splitlines(True): From noreply at buildbot.pypy.org Thu Aug 14 15:14:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Aug 2014 15:14:16 +0200 (CEST) Subject: [pypy-commit] pypy default: issue #1832: provide a better error message for some cases Message-ID: <20140814131416.BB6C71C06C9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72797:ed0f03db54a8 Date: 2014-08-14 15:13 +0200 http://bitbucket.org/pypy/pypy/changeset/ed0f03db54a8/ Log: issue #1832: provide a better error message for some cases diff --git a/rpython/rtyper/normalizecalls.py b/rpython/rtyper/normalizecalls.py --- a/rpython/rtyper/normalizecalls.py +++ b/rpython/rtyper/normalizecalls.py @@ -62,6 +62,8 @@ msg.append("the following functions:") msg.append(" %s" % ("\n ".join(pfg), )) msg.append("are called with inconsistent numbers of arguments") + msg.append("(and/or the argument names are different, which is" + " not supported in this case)") if shape1[0] != shape2[0]: msg.append("sometimes with %s arguments, sometimes with %s" % (shape1[0], shape2[0])) else: diff --git a/rpython/rtyper/test/test_normalizecalls.py b/rpython/rtyper/test/test_normalizecalls.py --- a/rpython/rtyper/test/test_normalizecalls.py +++ b/rpython/rtyper/test/test_normalizecalls.py @@ -185,6 +185,7 @@ .+Sub1.fn .+Sub2.fn are called with inconsistent numbers of arguments +\(and/or the argument names are different, which is not supported in this case\) sometimes with \d arguments, sometimes with \d the callers of these functions are: .+otherfunc From noreply at buildbot.pypy.org Thu Aug 14 15:40:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Aug 2014 15:40:12 +0200 (CEST) Subject: [pypy-commit] stmgc default: More attempt to fix demo_random2 Message-ID: <20140814134012.ED5F31C0323@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1315:29376f500349 Date: 2014-08-14 15:38 +0200 http://bitbucket.org/pypy/stmgc/changeset/29376f500349/ Log: More attempt to fix demo_random2 diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -1000,7 +1000,8 @@ /* we need to do this here and not directly in rewind_longjmp() because that is called when we already released everything (safe point) and a concurrent major GC could mess things up. */ - stm_rewind_jmp_restore_shadowstack(tl); + if (tl->shadowstack != NULL) + stm_rewind_jmp_restore_shadowstack(tl); assert(tl->shadowstack == pseg->shadowstack_at_start_of_transaction); #endif tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction; diff --git a/c7/stm/forksupport.c b/c7/stm/forksupport.c --- a/c7/stm/forksupport.c +++ b/c7/stm/forksupport.c @@ -187,9 +187,10 @@ #ifndef NDEBUG pr->running_pthread = pthread_self(); #endif - pr->pub.running_thread->shadowstack = ( - pr->shadowstack_at_start_of_transaction); strcpy(pr->marker_self, "fork"); + tl->shadowstack = NULL; + pr->shadowstack_at_start_of_transaction = NULL; + stm_rewind_jmp_forget(tl); abort_with_mutex_no_longjmp(); s_mutex_unlock(); } diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c --- a/c7/stm/rewind_setjmp.c +++ b/c7/stm/rewind_setjmp.c @@ -195,9 +195,6 @@ void _rewind_jmp_free_stack_slices(rewind_jmp_thread *rjthread) { /* frees all saved stack copies */ -#ifdef _STM_CORE_H_ - assert(_seems_to_be_running_transaction()); /* see previous function */ -#endif struct _rewind_jmp_moved_s *p = rjthread->moved_off; while (p) { struct _rewind_jmp_moved_s *pnext = p->next; From noreply at buildbot.pypy.org Thu Aug 14 16:19:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Aug 2014 16:19:17 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Fix the double definition of _GNU_SOURCE Message-ID: <20140814141917.B3D8C1C0323@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72798:654eb5a6b76f Date: 2014-08-14 16:03 +0200 http://bitbucket.org/pypy/pypy/changeset/654eb5a6b76f/ Log: Fix the double definition of _GNU_SOURCE diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -919,14 +919,6 @@ filename = targetdir.join(modulename + '.c') f = filename.open('w') - if database.with_stm: - print >> f, '/* XXX temporary, for SYS_arch_prctl below */' - print >> f, '#define _GNU_SOURCE' - print >> f, '#include ' - print >> f, '#include ' - print >> f, '#include ' - print >> f, '#include ' - print >> f incfilename = targetdir.join('common_header.h') fi = incfilename.open('w') fi.write('#ifndef _PY_COMMON_HEADER_H\n#define _PY_COMMON_HEADER_H\n') @@ -935,6 +927,12 @@ # Header # print >> f, '#include "common_header.h"' + if database.with_stm: + print >> f, '/* XXX temporary, for SYS_arch_prctl below */' + print >> f, '#include ' + print >> f, '#include ' + print >> f, '#include ' + print >> f, '#include ' print >> f commondefs(defines) for key, value in defines.items(): From noreply at buildbot.pypy.org Thu Aug 14 16:19:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Aug 2014 16:19:19 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Hack hack hack: going for the minimal amount of changes first Message-ID: <20140814141919.1525B1C0323@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72799:2cc154f95ab3 Date: 2014-08-14 16:17 +0200 http://bitbucket.org/pypy/pypy/changeset/2cc154f95ab3/ Log: Hack hack hack: going for the minimal amount of changes first diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -155,34 +155,35 @@ pypy_stm_nursery_low_fill_mark = _stm_nursery_start + limit; } -void pypy_stm_start_transaction(stm_jmpbuf_t *jmpbuf_ptr, - volatile long *v_counter) +static long _pypy_stm_start_transaction(void) { pypy_stm_nursery_low_fill_mark = 1; /* will be set to a correct value below */ - _stm_start_transaction(&stm_thread_local, jmpbuf_ptr); + long counter = stm_start_transaction(&stm_thread_local); - _pypy_stm_initialize_nursery_low_fill_mark(*v_counter); - *v_counter = *v_counter + 1; + _pypy_stm_initialize_nursery_low_fill_mark(counter); pypy_stm_ready_atomic = 1; /* reset after abort */ + + return counter; } void pypy_stm_perform_transaction(object_t *arg, int callback(object_t *, int)) { /* must save roots around this call */ - stm_jmpbuf_t jmpbuf; - long volatile v_counter = 0; - int (*volatile v_callback)(object_t *, int) = callback; + // + // XXX this function should be killed! We no longer need a + // callback-based approach at all. + #ifndef NDEBUG struct stm_shadowentry_s *volatile v_old_shadowstack = stm_thread_local.shadowstack; #endif - + rewind_jmp_buf rjbuf; + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); //STM_PUSH_ROOT(stm_thread_local, STM_STACK_MARKER_NEW); STM_PUSH_ROOT(stm_thread_local, arg); while (1) { long counter; - if (pypy_stm_should_break_transaction()) { //pypy_stm_ready_atomic == 1) { /* Not in an atomic transaction; but it might be an inevitable transaction. @@ -191,18 +192,12 @@ stm_commit_transaction(); - /* After setjmp(), the local variables v_* are preserved because - they are volatile. The other local variables should be - declared below than this point only. - */ - while (__builtin_setjmp(jmpbuf) == 1) { /*redo setjmp*/ } - counter = v_counter; - pypy_stm_start_transaction(&jmpbuf, &v_counter); + counter = _pypy_stm_start_transaction(); } else { /* In an atomic transaction */ //assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); - counter = v_counter; + counter = 0; } /* invoke the callback in the new transaction */ @@ -210,32 +205,17 @@ assert(v_old_shadowstack == stm_thread_local.shadowstack);// - 1); STM_PUSH_ROOT(stm_thread_local, arg); - long result = v_callback(arg, counter); + long result = callback(arg, counter); if (result <= 0) break; - v_counter = 0; - } - - if (STM_SEGMENT->jmpbuf_ptr == &jmpbuf) { - /* we can't leave this function leaving a non-inevitable - transaction whose jmpbuf points into this function. - we could break the transaction here but we instead rely - on the caller to break it. Since we have to use an inevitable - transaction anyway, using the current one may be cheaper. - */ - _stm_become_inevitable("perform_transaction left with inevitable"); - } - /* double-check */ - if (pypy_stm_ready_atomic == 1) { - } - else { - assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); } STM_POP_ROOT_RET(stm_thread_local); /* pop the 'arg' */ //uintptr_t x = (uintptr_t)STM_POP_ROOT_RET(stm_thread_local); //assert(x == STM_STACK_MARKER_NEW || x == STM_STACK_MARKER_OLD); assert(v_old_shadowstack == stm_thread_local.shadowstack); + + stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); } void _pypy_stm_inev_state(void) diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -39,7 +39,7 @@ static inline void pypy_stm_become_inevitable(const char *msg) { assert(STM_SEGMENT->running_thread == &stm_thread_local); - if (STM_SEGMENT->jmpbuf_ptr != NULL) { + if (!stm_is_inevitable()) { _pypy_stm_become_inevitable(msg); } } @@ -92,8 +92,7 @@ long pypy_stm_enter_callback_call(void); void pypy_stm_leave_callback_call(long); void pypy_stm_set_transaction_length(double); -void pypy_stm_perform_transaction(object_t *, int(object_t *, int)); -void pypy_stm_start_transaction(stm_jmpbuf_t *, volatile long *); +void pypy_stm_perform_transaction(object_t *, int(object_t *, int));//XXX static inline int pypy_stm_should_break_transaction(void) { From noreply at buildbot.pypy.org Thu Aug 14 16:19:20 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Aug 2014 16:19:20 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: import stmgc/29376f500349 Message-ID: <20140814141920.4912C1C0323@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72800:057a885fd864 Date: 2014-08-14 16:17 +0200 http://bitbucket.org/pypy/pypy/changeset/057a885fd864/ Log: import stmgc/29376f500349 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -1815f493a1c5 +29376f500349 diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -329,8 +329,6 @@ { assert(!_stm_in_transaction(tl)); - s_mutex_lock(); - retry: if (inevitable) { wait_for_end_of_inevitable_transaction(tl); @@ -391,6 +389,7 @@ long stm_start_transaction(stm_thread_local_t *tl) { + s_mutex_lock(); #ifdef STM_NO_AUTOMATIC_SETJMP long repeat_count = 0; /* test/support.py */ #else @@ -402,6 +401,7 @@ void stm_start_inevitable_transaction(stm_thread_local_t *tl) { + s_mutex_lock(); _stm_start_transaction(tl, true); } @@ -998,7 +998,11 @@ /* NB. careful, this function might be called more than once to abort a given segment. Make sure that stm_rewind_jmp_restore_shadowstack() is idempotent. */ - stm_rewind_jmp_restore_shadowstack(tl); + /* we need to do this here and not directly in rewind_longjmp() because + that is called when we already released everything (safe point) + and a concurrent major GC could mess things up. */ + if (tl->shadowstack != NULL) + stm_rewind_jmp_restore_shadowstack(tl); assert(tl->shadowstack == pseg->shadowstack_at_start_of_transaction); #endif tl->thread_local_obj = pseg->threadlocal_at_start_of_transaction; @@ -1025,7 +1029,7 @@ } #endif -static void abort_with_mutex(void) +static stm_thread_local_t *abort_with_mutex_no_longjmp(void) { assert(_has_mutex()); dprintf(("~~~ ABORT\n")); @@ -1058,6 +1062,12 @@ /* Broadcast C_ABORTED to wake up contention.c */ cond_broadcast(C_ABORTED); + return tl; +} + +static void abort_with_mutex(void) +{ + stm_thread_local_t *tl = abort_with_mutex_no_longjmp(); s_mutex_unlock(); /* It seems to be a good idea, at least in some examples, to sleep @@ -1075,6 +1085,7 @@ #ifdef STM_NO_AUTOMATIC_SETJMP _test_run_abort(tl); #else + s_mutex_lock(); stm_rewind_jmp_longjmp(tl); #endif } diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h --- a/rpython/translator/stm/src_stm/stm/core.h +++ b/rpython/translator/stm/src_stm/stm/core.h @@ -273,6 +273,7 @@ static void teardown_core(void); static void abort_with_mutex(void) __attribute__((noreturn)); +static stm_thread_local_t *abort_with_mutex_no_longjmp(void); static void abort_data_structures_from_segment_num(int segment_num); static inline bool was_read_remote(char *base, object_t *obj, diff --git a/rpython/translator/stm/src_stm/stm/forksupport.c b/rpython/translator/stm/src_stm/stm/forksupport.c --- a/rpython/translator/stm/src_stm/stm/forksupport.c +++ b/rpython/translator/stm/src_stm/stm/forksupport.c @@ -182,20 +182,18 @@ assert(tl->associated_segment_num == i); assert(pr->transaction_state == TS_REGULAR); set_gs_register(get_segment_base(i)); + assert(STM_SEGMENT->segment_num == i); - rewind_jmp_buf rjbuf; - stm_rewind_jmp_enterframe(tl, &rjbuf); - if (stm_rewind_jmp_setjmp(tl) == 0) { + s_mutex_lock(); #ifndef NDEBUG - pr->running_pthread = pthread_self(); + pr->running_pthread = pthread_self(); #endif - pr->pub.running_thread->shadowstack = ( - pr->shadowstack_at_start_of_transaction); - strcpy(pr->marker_self, "fork"); - stm_abort_transaction(); - } + strcpy(pr->marker_self, "fork"); + tl->shadowstack = NULL; + pr->shadowstack_at_start_of_transaction = NULL; stm_rewind_jmp_forget(tl); - stm_rewind_jmp_leaveframe(tl, &rjbuf); + abort_with_mutex_no_longjmp(); + s_mutex_unlock(); } static void forksupport_child(void) diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -364,6 +364,17 @@ mark_trace(obj, segment_base); } +static void *mark_visit_objects_from_ss(void *_, const void *slice, size_t size) +{ + const struct stm_shadowentry_s *p, *end; + p = (const struct stm_shadowentry_s *)slice; + end = (const struct stm_shadowentry_s *)(slice + size); + for (; p < end; p++) + if ((((uintptr_t)p->ss) & 3) == 0) + mark_visit_object(p->ss, stm_object_pages); + return NULL; +} + static void mark_visit_from_roots(void) { if (testing_prebuilt_objs != NULL) { @@ -393,10 +404,14 @@ long i; for (i = 1; i <= NB_SEGMENTS; i++) { - if (get_priv_segment(i)->transaction_state != TS_NONE) + if (get_priv_segment(i)->transaction_state != TS_NONE) { mark_visit_object( get_priv_segment(i)->threadlocal_at_start_of_transaction, get_segment_base(i)); + stm_rewind_jmp_enum_shadowstack( + get_segment(i)->running_thread, + mark_visit_objects_from_ss); + } } } diff --git a/rpython/translator/stm/src_stm/stm/marker.c b/rpython/translator/stm/src_stm/stm/marker.c --- a/rpython/translator/stm/src_stm/stm/marker.c +++ b/rpython/translator/stm/src_stm/stm/marker.c @@ -19,10 +19,9 @@ struct stm_shadowentry_s *current = tl->shadowstack - 1; struct stm_shadowentry_s *base = tl->shadowstack_base; - /* The shadowstack_base contains STM_STACK_MARKER_OLD, which is - a convenient stopper for the loop below but which shouldn't - be returned. */ - assert(base->ss == (object_t *)STM_STACK_MARKER_OLD); + /* The shadowstack_base contains -1, which is a convenient stopper for + the loop below but which shouldn't be returned. */ + assert(base->ss == (object_t *)-1); while (!(((uintptr_t)current->ss) & 1)) { current--; diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -157,27 +157,22 @@ { stm_thread_local_t *tl = STM_SEGMENT->running_thread; struct stm_shadowentry_s *current = tl->shadowstack; - struct stm_shadowentry_s *base = tl->shadowstack_base; - while (1) { + struct stm_shadowentry_s *finalbase = tl->shadowstack_base; + struct stm_shadowentry_s *ssbase; + ssbase = (struct stm_shadowentry_s *)tl->rjthread.moved_off_ssbase; + if (ssbase == NULL) + ssbase = finalbase; + else + assert(finalbase <= ssbase && ssbase <= current); + + while (current > ssbase) { --current; - OPT_ASSERT(current >= base); - uintptr_t x = (uintptr_t)current->ss; if ((x & 3) == 0) { /* the stack entry is a regular pointer (possibly NULL) */ minor_trace_if_young(¤t->ss); } - else if (x == STM_STACK_MARKER_NEW) { - /* the marker was not already seen: mark it as seen, - but continue looking more deeply in the shadowstack */ - current->ss = (object_t *)STM_STACK_MARKER_OLD; - } - else if (x == STM_STACK_MARKER_OLD) { - /* the marker was already seen: we can stop the - root stack tracing at this point */ - break; - } else { /* it is an odd-valued marker, ignore */ } diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c --- a/rpython/translator/stm/src_stm/stm/setup.c +++ b/rpython/translator/stm/src_stm/stm/setup.c @@ -202,13 +202,13 @@ struct stm_shadowentry_s *s = (struct stm_shadowentry_s *)start; tl->shadowstack = s; tl->shadowstack_base = s; - STM_PUSH_ROOT(*tl, STM_STACK_MARKER_OLD); + STM_PUSH_ROOT(*tl, -1); } static void _done_shadow_stack(stm_thread_local_t *tl) { assert(tl->shadowstack > tl->shadowstack_base); - assert(tl->shadowstack_base->ss == (object_t *)STM_STACK_MARKER_OLD); + assert(tl->shadowstack_base->ss == (object_t *)-1); char *start = (char *)tl->shadowstack_base; _shadowstack_trap_page(start, PROT_READ | PROT_WRITE); diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -314,8 +314,6 @@ #define STM_PUSH_ROOT(tl, p) ((tl).shadowstack++->ss = (object_t *)(p)) #define STM_POP_ROOT(tl, p) ((p) = (typeof(p))((--(tl).shadowstack)->ss)) #define STM_POP_ROOT_RET(tl) ((--(tl).shadowstack)->ss) -#define STM_STACK_MARKER_NEW (-41) -#define STM_STACK_MARKER_OLD (-43) /* Every thread needs to have a corresponding stm_thread_local_t @@ -346,6 +344,8 @@ (tl)->shadowstack = (struct stm_shadowentry_s *) \ rewind_jmp_restore_shadowstack(&(tl)->rjthread); \ } while (0) +#define stm_rewind_jmp_enum_shadowstack(tl, callback) \ + rewind_jmp_enum_shadowstack(&(tl)->rjthread, callback) /* Starting and ending transactions. stm_read(), stm_write() and stm_allocate() should only be called from within a transaction. From noreply at buildbot.pypy.org Thu Aug 14 16:19:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Aug 2014 16:19:21 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Fix. Message-ID: <20140814141921.7381A1C0323@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72801:8f655372a008 Date: 2014-08-14 16:18 +0200 http://bitbucket.org/pypy/pypy/changeset/8f655372a008/ Log: Fix. diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -248,7 +248,7 @@ void pypy_stm_become_globally_unique_transaction(void) { - if (STM_SEGMENT->jmpbuf_ptr != NULL) { + if (!stm_is_inevitable()) { _pypy_stm_inev_state(); } stm_become_globally_unique_transaction(&stm_thread_local, "for the JIT"); From noreply at buildbot.pypy.org Thu Aug 14 16:22:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Aug 2014 16:22:16 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Forgot to add these files Message-ID: <20140814142216.BCFFE1C0323@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72802:52e94842f3f8 Date: 2014-08-14 16:21 +0200 http://bitbucket.org/pypy/pypy/changeset/52e94842f3f8/ Log: Forgot to add these files diff --git a/rpython/translator/stm/src_stm/stm/rewind_setjmp.c b/rpython/translator/stm/src_stm/stm/rewind_setjmp.c new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/stm/rewind_setjmp.c @@ -0,0 +1,208 @@ +/* Imported by rpython/translator/stm/import_stmgc.py */ +#include "rewind_setjmp.h" +#include +#include +#include +#include + + +struct _rewind_jmp_moved_s { + struct _rewind_jmp_moved_s *next; + size_t stack_size; + size_t shadowstack_size; +}; +#define RJM_HEADER sizeof(struct _rewind_jmp_moved_s) + +#ifndef RJBUF_CUSTOM_MALLOC +#define rj_malloc malloc +#define rj_free free +#else +void *rj_malloc(size_t); +void rj_free(void *); +#endif + + +static void copy_stack(rewind_jmp_thread *rjthread, char *base, void *ssbase) +{ + /* Copy away part of the stack and shadowstack. Sets moved_off_base to + the current frame_base. + + The stack is copied between 'base' (lower limit, i.e. newest bytes) + and 'rjthread->head->frame_base' (upper limit, i.e. oldest bytes). + The shadowstack is copied between 'ssbase' (upper limit, newest) + and 'rjthread->head->shadowstack_base' (lower limit, oldest). + */ + struct _rewind_jmp_moved_s *next; + char *stop; + void *ssstop; + size_t stack_size, ssstack_size; + + assert(rjthread->head != NULL); + stop = rjthread->head->frame_base; + ssstop = rjthread->head->shadowstack_base; + assert(stop >= base); + assert(ssstop <= ssbase); + stack_size = stop - base; + ssstack_size = ssbase - ssstop; + + next = (struct _rewind_jmp_moved_s *) + rj_malloc(RJM_HEADER + stack_size + ssstack_size); + assert(next != NULL); /* XXX out of memory */ + next->next = rjthread->moved_off; + next->stack_size = stack_size; + next->shadowstack_size = ssstack_size; + + memcpy(((char *)next) + RJM_HEADER, base, stack_size); + memcpy(((char *)next) + RJM_HEADER + stack_size, ssstop, + ssstack_size); + + rjthread->moved_off_base = stop; + rjthread->moved_off_ssbase = ssstop; + rjthread->moved_off = next; +} + +__attribute__((noinline)) +long rewind_jmp_setjmp(rewind_jmp_thread *rjthread, void *ss) +{ + /* saves the current stack frame to the list of slices and + calls setjmp(). It returns the number of times a longjmp() + jumped back to this setjmp() */ + if (rjthread->moved_off) { + /* old stack slices are not needed anymore (next longjmp() + will restore only to this setjmp()) */ + _rewind_jmp_free_stack_slices(rjthread); + } + /* all locals of this function that need to be saved and restored + across the setjmp() should be stored inside this structure */ + struct { void *ss1; rewind_jmp_thread *rjthread1; } volatile saved = + { ss, rjthread }; + + int result; + if (__builtin_setjmp(rjthread->jmpbuf) == 0) { + rjthread = saved.rjthread1; + rjthread->initial_head = rjthread->head; + result = 0; + } + else { + rjthread = saved.rjthread1; + rjthread->head = rjthread->initial_head; + result = rjthread->repeat_count + 1; + } + rjthread->repeat_count = result; + + /* snapshot of top frame: needed every time because longjmp() frees + the previous one. Note that this function is called with the + mutex already acquired. Although it's not the job of this file, + we assert it is indeed acquired here. This is needed, otherwise a + concurrent GC may get garbage while saving shadow stack */ +#ifdef _STM_CORE_H_ + assert(_has_mutex()); +#endif + copy_stack(rjthread, (char *)&saved, saved.ss1); + + return result; +} + +__attribute__((noinline, noreturn)) +static void do_longjmp(rewind_jmp_thread *rjthread, char *stack_free) +{ + /* go through list of copied stack-slices and copy them back to the + current stack, expanding it if necessary. The shadowstack should + already be restored at this point (restore_shadowstack()) */ + assert(rjthread->moved_off_base != NULL); + + while (rjthread->moved_off) { + struct _rewind_jmp_moved_s *p = rjthread->moved_off; + char *target = rjthread->moved_off_base; + /* CPU stack grows downwards: */ + target -= p->stack_size; + if (target < stack_free) { + /* need more stack space! */ + do_longjmp(rjthread, alloca(stack_free - target)); + abort(); /* unreachable */ + } + memcpy(target, ((char *)p) + RJM_HEADER, p->stack_size); + + rjthread->moved_off_base = target; + rjthread->moved_off = p->next; + rj_free(p); + } + +#ifdef _STM_CORE_H_ + /* This function must be called with the mutex held. It will + remain held across the longjmp that follows and into the + target rewind_jmp_setjmp() function. */ + assert(_has_mutex()); +#endif + __builtin_longjmp(rjthread->jmpbuf, 1); +} + +__attribute__((noreturn)) +void rewind_jmp_longjmp(rewind_jmp_thread *rjthread) +{ + char _rewind_jmp_marker; + do_longjmp(rjthread, &_rewind_jmp_marker); +} + + +char *rewind_jmp_enum_shadowstack(rewind_jmp_thread *rjthread, + void *callback(void *, const void *, size_t)) +{ + /* enumerate all saved shadow-stack slices */ + struct _rewind_jmp_moved_s *p = rjthread->moved_off; + char *sstarget = rjthread->moved_off_ssbase; + +#ifdef _STM_CORE_H_ + assert(_has_mutex()); +#endif + + while (p) { + if (p->shadowstack_size) { + void *ss_slice = ((char *)p) + RJM_HEADER + p->stack_size; + callback(sstarget, ss_slice, p->shadowstack_size); + + sstarget += p->shadowstack_size; + } + p = p->next; + } + return sstarget; +} + + +char *rewind_jmp_restore_shadowstack(rewind_jmp_thread *rjthread) +{ + return rewind_jmp_enum_shadowstack(rjthread, memcpy); +} + +__attribute__((noinline)) +void _rewind_jmp_copy_stack_slice(rewind_jmp_thread *rjthread) +{ + /* called when leaving a frame. copies the now-current frame + to the list of stack-slices */ +#ifdef _STM_CORE_H_ + /* A transaction should be running now. This means in particular + that it's not possible that a major GC runs concurrently with + this code (and tries to read the shadowstack slice). */ + assert(_seems_to_be_running_transaction()); +#endif + if (rjthread->head == NULL) { + _rewind_jmp_free_stack_slices(rjthread); + return; + } + assert(rjthread->moved_off_base < (char *)rjthread->head); + copy_stack(rjthread, rjthread->moved_off_base, rjthread->moved_off_ssbase); +} + +void _rewind_jmp_free_stack_slices(rewind_jmp_thread *rjthread) +{ + /* frees all saved stack copies */ + struct _rewind_jmp_moved_s *p = rjthread->moved_off; + while (p) { + struct _rewind_jmp_moved_s *pnext = p->next; + rj_free(p); + p = pnext; + } + rjthread->moved_off = NULL; + rjthread->moved_off_base = NULL; + rjthread->moved_off_ssbase = NULL; +} diff --git a/rpython/translator/stm/src_stm/stm/rewind_setjmp.h b/rpython/translator/stm/src_stm/stm/rewind_setjmp.h new file mode 100644 --- /dev/null +++ b/rpython/translator/stm/src_stm/stm/rewind_setjmp.h @@ -0,0 +1,109 @@ +/* Imported by rpython/translator/stm/import_stmgc.py */ +#ifndef _REWIND_SETJMP_H_ +#define _REWIND_SETJMP_H_ + + +#include + +/************************************************************ +There is a singly-linked list of frames in each thread +rjthread->head->prev->prev->prev + +Another singly-linked list is the list of copied stack-slices. +When doing a setjmp(), we copy the top-frame, free all old +stack-slices, and link it to the top-frame->moved_off. +When returning from the top-frame while moved_off still points +to a slice, we also need to copy the top-frame->prev frame/slice +and add it to this list (pointed to by moved_off). +-------------------------------------------------------------- + + : : ^^^^^ + |-------------------| older frames in the stack + | prev=0 | + ,---> | rewind_jmp_buf | + | |-------------------| + | | | + | : : + | : : + | | | + | |-------------------| + `---------prev | + ,----> | rewind_jmp_buf | + | +-------------------| + | | | + | : : + | | | + | |-------------------| + `----------prev | + ,---> | rewind_jmp_buf | <--------------- MOVED_OFF_BASE + | |---------------- +-------------+ + | | | STACK COPY | + | | : : + | : | size | + | | | next | <---- MOVED_OFF + | | +---|------ +-------------+ + | | | | | STACK COPY | + | |-------------------| | : (SEQUEL) : + `---------prev | | : : +HEAD-----> | rewind_jmp_buf | | | | + |-------------------| | | size | + `------> | next=0 | + +-------------+ + + +************************************************************/ + +typedef struct _rewind_jmp_buf { + char *frame_base; + char *shadowstack_base; + struct _rewind_jmp_buf *prev; +} rewind_jmp_buf; + +typedef struct { + rewind_jmp_buf *head; + rewind_jmp_buf *initial_head; + char *moved_off_base; + char *moved_off_ssbase; + struct _rewind_jmp_moved_s *moved_off; + void *jmpbuf[5]; + long repeat_count; +} rewind_jmp_thread; + + +/* remember the current stack and ss_stack positions */ +#define rewind_jmp_enterframe(rjthread, rjbuf, ss) do { \ + (rjbuf)->frame_base = __builtin_frame_address(0); \ + (rjbuf)->shadowstack_base = (char *)(ss); \ + (rjbuf)->prev = (rjthread)->head; \ + (rjthread)->head = (rjbuf); \ +} while (0) + +/* go up one frame. if there was a setjmp call in this frame, + */ +#define rewind_jmp_leaveframe(rjthread, rjbuf, ss) do { \ + assert((rjbuf)->shadowstack_base == (char *)(ss)); \ + (rjthread)->head = (rjbuf)->prev; \ + if ((rjbuf)->frame_base == (rjthread)->moved_off_base) { \ + assert((rjthread)->moved_off_ssbase == (char *)(ss));\ + _rewind_jmp_copy_stack_slice(rjthread); \ + } \ +} while (0) + +long rewind_jmp_setjmp(rewind_jmp_thread *rjthread, void *ss); +void rewind_jmp_longjmp(rewind_jmp_thread *rjthread) __attribute__((noreturn)); +char *rewind_jmp_restore_shadowstack(rewind_jmp_thread *rjthread); +char *rewind_jmp_enum_shadowstack(rewind_jmp_thread *rjthread, + void *callback(void *, const void *, size_t)); + +#define rewind_jmp_forget(rjthread) do { \ + if ((rjthread)->moved_off) _rewind_jmp_free_stack_slices(rjthread); \ + (rjthread)->moved_off_base = 0; \ + (rjthread)->moved_off_ssbase = 0; \ +} while (0) + +void _rewind_jmp_copy_stack_slice(rewind_jmp_thread *); +void _rewind_jmp_free_stack_slices(rewind_jmp_thread *); + +#define rewind_jmp_armed(rjthread) ((rjthread)->moved_off_base != 0) + +#endif From noreply at buildbot.pypy.org Thu Aug 14 17:29:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Aug 2014 17:29:47 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Test and fix Message-ID: <20140814152947.5524F1C0323@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72803:7182ba694a88 Date: 2014-08-14 17:29 +0200 http://bitbucket.org/pypy/pypy/changeset/7182ba694a88/ Log: Test and fix diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py --- a/rpython/translator/stm/inevitable.py +++ b/rpython/translator/stm/inevitable.py @@ -20,6 +20,7 @@ 'jit_assembler_call', 'gc_writebarrier', 'shrink_array', 'jit_stm_transaction_break_point', 'jit_stm_should_break_transaction', + 'threadlocalref_get', 'threadlocalref_set', ]) ALWAYS_ALLOW_OPERATIONS |= set(lloperation.enum_tryfold_ops()) diff --git a/rpython/translator/stm/test/test_inevitable.py b/rpython/translator/stm/test/test_inevitable.py --- a/rpython/translator/stm/test/test_inevitable.py +++ b/rpython/translator/stm/test/test_inevitable.py @@ -279,3 +279,16 @@ res = self.interpret_inevitable(f1, []) assert res is None + + def test_threadlocal(self): + from rpython.rlib.rthread import ThreadLocalReference + opaque_id = lltype.opaqueptr(ThreadLocalReference.OPAQUEID, "foobar") + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + def f1(): + x = lltype.malloc(X) + llop.threadlocalref_set(lltype.Void, opaque_id, x) + y = llop.threadlocalref_get(lltype.Ptr(X), opaque_id) + return x == y + + res = self.interpret_inevitable(f1, []) + assert res is None From noreply at buildbot.pypy.org Thu Aug 14 17:32:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Aug 2014 17:32:49 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Accept set(NULL) Message-ID: <20140814153249.5E1501C0323@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72804:0d4e673c7370 Date: 2014-08-14 17:31 +0200 http://bitbucket.org/pypy/pypy/changeset/0d4e673c7370/ Log: Accept set(NULL) diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -322,8 +322,9 @@ from rpython.rlib.objectmodel import running_on_llinterp ptr = cast_instance_to_base_ptr(value) if not running_on_llinterp: - gcref = lltype.cast_opaque_ptr(llmemory.GCREF, ptr) - _make_sure_does_not_move(gcref) + if ptr: + gcref = lltype.cast_opaque_ptr(llmemory.GCREF, ptr) + _make_sure_does_not_move(gcref) llop.threadlocalref_set(lltype.Void, opaque_id, ptr) ensure_threadlocal() else: From noreply at buildbot.pypy.org Thu Aug 14 19:05:16 2014 From: noreply at buildbot.pypy.org (groggi) Date: Thu, 14 Aug 2014 19:05:16 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: add parent object as argument. forgot it for the partial trace and drag out Message-ID: <20140814170516.331BE1D362F@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72805:7175f6eeea87 Date: 2014-08-14 16:29 +0200 http://bitbucket.org/pypy/pypy/changeset/7175f6eeea87/ Log: add parent object as argument. forgot it for the partial trace and drag out diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1805,7 +1805,7 @@ ll_assert(start < stop, "empty or negative range " "in trace_and_drag_out_of_nursery_partial()") #print 'trace_partial:', start, stop, '\t', obj - self.trace_partial(obj, start, stop, self._trace_drag_out, llmemory.NULL) + self.trace_partial(obj, start, stop, self._trace_drag_out, obj) def _trace_drag_out1(self, root): From noreply at buildbot.pypy.org Thu Aug 14 19:05:17 2014 From: noreply at buildbot.pypy.org (groggi) Date: Thu, 14 Aug 2014 19:05:17 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: always add parent of a pinned object to the list of old objects pointing to Message-ID: <20140814170517.7EF7A1D362F@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72806:998fdf343c42 Date: 2014-08-14 16:35 +0200 http://bitbucket.org/pypy/pypy/changeset/998fdf343c42/ Log: always add parent of a pinned object to the list of old objects pointing to pinned objects. Forgot to think about the case where two (or more) old objects point to the same pinned one. Each of this old objects must be in the list as if one dies, we still have to keep the pinned object alive (and pinned). diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1862,14 +1862,15 @@ # elif self._is_pinned(obj): hdr = self.header(obj) + # track parent of pinned object specially + if parent != llmemory.NULL: + self.old_objects_pointing_to_pinned.append(parent) + if hdr.tid & GCFLAG_VISITED: # already visited and keeping track of the object return hdr.tid |= GCFLAG_VISITED # - if parent != llmemory.NULL: - self.old_objects_pointing_to_pinned.append(parent) - # # XXX add additional checks for unsupported pinned objects (groggi) ll_assert(not self.header(obj).tid & GCFLAG_HAS_CARDS, "pinned object with GCFLAG_HAS_CARDS not supported") From noreply at buildbot.pypy.org Thu Aug 14 19:05:18 2014 From: noreply at buildbot.pypy.org (groggi) Date: Thu, 14 Aug 2014 19:05:18 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: move old_object_pointing_to_pinned cleanup to the end of the marking phase. Message-ID: <20140814170518.B03EA1D362F@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72807:249e52c74a0c Date: 2014-08-14 16:38 +0200 http://bitbucket.org/pypy/pypy/changeset/249e52c74a0c/ Log: move old_object_pointing_to_pinned cleanup to the end of the marking phase. Doing it at the start of the sweeping phase just results in multiple runs of this cleanup code. At the same time, at the end of the marking phase we know everything we need to know to do a cleanup of the list. diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2102,19 +2102,19 @@ #objects_to_trace processed fully, can move on to sweeping self.ac.mass_free_prepare() self.start_free_rawmalloc_objects() + # + # get rid of objects pointing to pinned objects that were not + # visited + new_old_objects_pointing_to_pinned = self.AddressStack() + self.old_objects_pointing_to_pinned.foreach( + self._sweep_old_objects_pointing_to_pinned, + new_old_objects_pointing_to_pinned) + self.old_objects_pointing_to_pinned.delete() + self.old_objects_pointing_to_pinned = new_old_objects_pointing_to_pinned self.gc_state = STATE_SWEEPING #END MARKING elif self.gc_state == STATE_SWEEPING: # - # get rid of objects pointing to pinned objects that were not - # visited - new_old_objects_pointing_to_pinned = self.AddressStack() - self.old_objects_pointing_to_pinned.foreach( - self._sweep_old_objects_pointing_to_pinned, - new_old_objects_pointing_to_pinned) - self.old_objects_pointing_to_pinned.delete() - self.old_objects_pointing_to_pinned = new_old_objects_pointing_to_pinned - # if self.raw_malloc_might_sweep.non_empty(): # Walk all rawmalloced objects and free the ones that don't # have the GCFLAG_VISITED flag. Visit at most 'limit' objects. From noreply at buildbot.pypy.org Thu Aug 14 19:05:19 2014 From: noreply at buildbot.pypy.org (groggi) Date: Thu, 14 Aug 2014 19:05:19 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: new test to check if old_objects_pointing_to_pinned isn't growing while Message-ID: <20140814170519.D4F411D362F@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72808:79000e7c5466 Date: 2014-08-14 16:39 +0200 http://bitbucket.org/pypy/pypy/changeset/79000e7c5466/ Log: new test to check if old_objects_pointing_to_pinned isn't growing while old object pointing to pinned objects stays the same. fails right now. diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -6,7 +6,8 @@ S = lltype.GcForwardReference() S.become(lltype.GcStruct('pinning_test_struct', ('someInt', lltype.Signed), - ('next', lltype.Ptr(S)))) + ('next', lltype.Ptr(S)), + ('data', lltype.Ptr(S)))) class PinningGCTest(BaseDirectGCTest): @@ -487,6 +488,41 @@ self.pin_referenced_from_prebuilt(self.gc.collect) + def test_old_objects_pointing_to_pinned_not_exploading(self): + # scenario: two old object, each pointing twice to a pinned object. + # The internal 'old_objects_pointing_to_pinned' should contain + # always two objects. + # In previous implementation the list exploded (grew with every minor + # collection), hence this test. + old1_ptr = self.malloc(S) + old1_ptr.someInt = 900 + self.stackroots.append(old1_ptr) + + old2_ptr = self.malloc(S) + old2_ptr.someInt = 800 + self.stackroots.append(old2_ptr) + + pinned_ptr = self.malloc(S) + pinned_ptr.someInt = 100 + assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr)) + + self.write(old1_ptr, 'next', pinned_ptr) + self.write(old1_ptr, 'data', pinned_ptr) + self.write(old2_ptr, 'next', pinned_ptr) + self.write(old2_ptr, 'data', pinned_ptr) + + self.gc.collect() + old1_ptr = self.stackroots[0] + old2_ptr = self.stackroots[1] + assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old1_ptr)) + assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old2_ptr)) + + # do multiple rounds to make sure + for _ in range(10): + assert self.gc.old_objects_pointing_to_pinned.length() == 2 + self.gc.debug_gc_step() + + def pin_shadow_1(self, collect_func): ptr = self.malloc(S) adr = llmemory.cast_ptr_to_adr(ptr) From noreply at buildbot.pypy.org Thu Aug 14 19:05:21 2014 From: noreply at buildbot.pypy.org (groggi) Date: Thu, 14 Aug 2014 19:05:21 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: initial fix for 'test_old_objects_pointing_to_pinned_not_exploading' Message-ID: <20140814170521.01D6F1D362F@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72809:df44232936bf Date: 2014-08-14 16:42 +0200 http://bitbucket.org/pypy/pypy/changeset/df44232936bf/ Log: initial fix for 'test_old_objects_pointing_to_pinned_not_exploading' diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1676,6 +1676,9 @@ self.nursery_free = self.nursery self.nursery_top = self.nursery_barriers.popleft() + self.old_objects_pointing_to_pinned.foreach( + self._reset_flag_old_objects_pointing_to_pinned, None) + debug_print("minor collect, total memory used:", self.get_total_memory_used()) debug_print("number of pinned objects:", @@ -1687,6 +1690,10 @@ # debug_stop("gc-minor") + def _reset_flag_old_objects_pointing_to_pinned(self, obj, ignore): + assert self.header(obj).tid & GCFLAG_PINNED + self.header(obj).tid &= ~GCFLAG_PINNED + def _visit_old_objects_pointing_to_pinned(self, obj, ignore): self.trace(obj, self._trace_drag_out, obj) @@ -1863,8 +1870,11 @@ elif self._is_pinned(obj): hdr = self.header(obj) # track parent of pinned object specially - if parent != llmemory.NULL: + if parent != llmemory.NULL and \ + not self.header(parent).tid & GCFLAG_PINNED: + # self.old_objects_pointing_to_pinned.append(parent) + self.header(parent).tid |= GCFLAG_PINNED if hdr.tid & GCFLAG_VISITED: # already visited and keeping track of the object From noreply at buildbot.pypy.org Thu Aug 14 19:05:22 2014 From: noreply at buildbot.pypy.org (groggi) Date: Thu, 14 Aug 2014 19:05:22 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: new GC flag that uses the same bit as GCFLAG_PINNED for parents pointing to pinned objects. Message-ID: <20140814170522.2A9D91D362F@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72810:f2e0b083a76b Date: 2014-08-14 16:51 +0200 http://bitbucket.org/pypy/pypy/changeset/f2e0b083a76b/ Log: new GC flag that uses the same bit as GCFLAG_PINNED for parents pointing to pinned objects. Looks a bit cleaner than using GCFLAG_PINNED on old objects. diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -138,6 +138,14 @@ # details. GCFLAG_PINNED = first_gcflag << 9 +# The following flag is set only on objects outside the nursery +# (i.e. old objects). Therefore we can reuse GCFLAG_PINNED as it is used for +# the same feature (object pinning) and GCFLAG_PINNED is only used on nursery +# objects. +# If this flag is set, the flagged object is already an element of +# 'old_objects_pointing_to_pinned' and doesn't have to be added again. +GCFLAG_PINNED_OBJECT_PARENT_KNOWN = GCFLAG_PINNED + _GCFLAG_FIRST_UNUSED = first_gcflag << 10 # the first unused bit @@ -1676,6 +1684,7 @@ self.nursery_free = self.nursery self.nursery_top = self.nursery_barriers.popleft() + # clear GCFLAG_PINNED_OBJECT_PARENT_KNOWN from all parents in the list. self.old_objects_pointing_to_pinned.foreach( self._reset_flag_old_objects_pointing_to_pinned, None) @@ -1691,8 +1700,8 @@ debug_stop("gc-minor") def _reset_flag_old_objects_pointing_to_pinned(self, obj, ignore): - assert self.header(obj).tid & GCFLAG_PINNED - self.header(obj).tid &= ~GCFLAG_PINNED + assert self.header(obj).tid & GCFLAG_PINNED_OBJECT_PARENT_KNOWN + self.header(obj).tid &= ~GCFLAG_PINNED_OBJECT_PARENT_KNOWN def _visit_old_objects_pointing_to_pinned(self, obj, ignore): self.trace(obj, self._trace_drag_out, obj) @@ -1871,7 +1880,7 @@ hdr = self.header(obj) # track parent of pinned object specially if parent != llmemory.NULL and \ - not self.header(parent).tid & GCFLAG_PINNED: + not self.header(parent).tid & GCFLAG_PINNED_OBJECT_PARENT_KNOWN: # self.old_objects_pointing_to_pinned.append(parent) self.header(parent).tid |= GCFLAG_PINNED From noreply at buildbot.pypy.org Thu Aug 14 19:05:23 2014 From: noreply at buildbot.pypy.org (groggi) Date: Thu, 14 Aug 2014 19:05:23 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: use one const pointer to point to the array and not each time a new one. Message-ID: <20140814170523.72FCF1D362F@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72811:f7318fcee996 Date: 2014-08-14 17:05 +0200 http://bitbucket.org/pypy/pypy/changeset/f7318fcee996/ Log: use one const pointer to point to the array and not each time a new one. the JIT trace looks nicer this way :-) diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -27,15 +27,17 @@ _ref_array_type = lltype.GcArray(llmemory.GCREF) def __init__(self, cpu, size): + self._size = size self._next_item = 0 self._ref_array = lltype.malloc(PinnedObjectTracker._ref_array_type, size) self.ref_array_descr = cpu.arraydescrof(PinnedObjectTracker._ref_array_type) self.ref_array_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, self._ref_array) + self.const_ptr_gcref_array = ConstPtr(self.ref_array_gcref) def add_ref(self, ref): index = self._next_item + assert index < self._size self._next_item += 1 - # self._ref_array[index] = ref return index @@ -142,7 +144,7 @@ result_ptr = BoxPtr() array_index = pinned_obj_tracker.add_ref(p) load_op = ResOperation(rop.GETARRAYITEM_GC, - [ConstPtr(pinned_obj_tracker.ref_array_gcref), + [pinned_obj_tracker.const_ptr_gcref_array, ConstInt(array_index)], result_ptr, descr=pinned_obj_tracker.ref_array_descr) From noreply at buildbot.pypy.org Thu Aug 14 19:05:24 2014 From: noreply at buildbot.pypy.org (groggi) Date: Thu, 14 Aug 2014 19:05:24 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: add first simple JIT test with a pinned object Message-ID: <20140814170524.90F401D362F@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72812:b5b57f12e441 Date: 2014-08-14 19:03 +0200 http://bitbucket.org/pypy/pypy/changeset/b5b57f12e441/ Log: add first simple JIT test with a pinned object diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -775,3 +775,32 @@ def test_compile_framework_call_assembler(self): self.run('compile_framework_call_assembler') + + def define_pinned_simple(cls): + from rpython.rlib.jit import promote + class H: + inst = None + helper = H() + + @dont_look_inside + def get_y(): + if not helper.inst: + helper.inst = X() + helper.inst.x = 101 + assert rgc.pin(helper.inst) + else: + assert rgc._is_pinned(helper.inst) + return helper.inst + + def fn(n, x, *args): + t = get_y() + promote(t) + t.x += 11 + n -= 1 + return (n, x) + args + + return None, fn, None + + def test_pinned_simple(self): + self.run('pinned_simple') + From noreply at buildbot.pypy.org Thu Aug 14 19:05:25 2014 From: noreply at buildbot.pypy.org (groggi) Date: Thu, 14 Aug 2014 19:05:25 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: fix code for the case of no pinned objects Message-ID: <20140814170525.BCC741D362F@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72813:4eed5d00ac19 Date: 2014-08-14 19:04 +0200 http://bitbucket.org/pypy/pypy/changeset/4eed5d00ac19/ Log: fix code for the case of no pinned objects diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -182,7 +182,10 @@ newnewops.extend(reops) else: newnewops.append(op) - return newnewops + # + return newnewops + else: + return newops @specialize.memo() def getframedescrs(self, cpu): From noreply at buildbot.pypy.org Thu Aug 14 19:50:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Aug 2014 19:50:47 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a FAQ entry Message-ID: <20140814175047.0715B1D362E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72814:0b7f33f969db Date: 2014-08-14 19:50 +0200 http://bitbucket.org/pypy/pypy/changeset/0b7f33f969db/ Log: Add a FAQ entry diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -154,6 +154,17 @@ declaring that some sections of the code should run without releasing the GIL in the middle (these are called *atomic sections* in STM). +-------------------------------------------------- +Is PyPy more clever than CPython about Tail Calls? +-------------------------------------------------- + +No. PyPy follows the Python language design, including the built-in +debugger features. `This prevents tail calls.`__ Neither the JIT +nor Stackless__ change anything to that. + +.. __: http://neopythonic.blogspot.com.au/2009/04/final-words-on-tail-calls.html +.. __: stackless.html + ------------------------------------------ How do I write extension modules for PyPy? ------------------------------------------ From noreply at buildbot.pypy.org Thu Aug 14 20:01:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Aug 2014 20:01:36 +0200 (CEST) Subject: [pypy-commit] pypy default: Update the FAQ entry Message-ID: <20140814180136.9B1E91D362E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72815:0610ca82473d Date: 2014-08-14 20:00 +0200 http://bitbucket.org/pypy/pypy/changeset/0610ca82473d/ Log: Update the FAQ entry diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -159,10 +159,12 @@ -------------------------------------------------- No. PyPy follows the Python language design, including the built-in -debugger features. `This prevents tail calls.`__ Neither the JIT -nor Stackless__ change anything to that. +debugger features. This prevents tail calls, as summarized by Guido +van Rossum in two__ blog__ posts. Moreover, neither the JIT nor +Stackless__ change anything to that. -.. __: http://neopythonic.blogspot.com.au/2009/04/final-words-on-tail-calls.html +.. __: http://neopythonic.blogspot.com/2009/04/tail-recursion-elimination.html +.. __: http://neopythonic.blogspot.com/2009/04/final-words-on-tail-calls.html .. __: stackless.html ------------------------------------------ From noreply at buildbot.pypy.org Thu Aug 14 20:06:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 14 Aug 2014 20:06:00 +0200 (CEST) Subject: [pypy-commit] pypy default: Update this FAQ entry Message-ID: <20140814180600.3DF8E1D3633@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72816:87622de1d682 Date: 2014-08-14 20:05 +0200 http://bitbucket.org/pypy/pypy/changeset/87622de1d682/ Log: Update this FAQ entry diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -147,12 +147,12 @@ programmer). Instead, since 2012, there is work going on on a still very experimental -Software Transactional Memory (STM) version of PyPy. This should give -an alternative PyPy which internally has no GIL, while at the same time +`Software Transactional Memory`_ (STM) version of PyPy. This should give +an alternative PyPy which works without a GIL, while at the same time continuing to give the Python programmer the complete illusion of having -one. It would in fact push forward *more* GIL-ish behavior, like -declaring that some sections of the code should run without releasing -the GIL in the middle (these are called *atomic sections* in STM). +one. + +.. _`Software Transactional Memory`: stm.html -------------------------------------------------- Is PyPy more clever than CPython about Tail Calls? From noreply at buildbot.pypy.org Fri Aug 15 11:35:50 2014 From: noreply at buildbot.pypy.org (groggi) Date: Fri, 15 Aug 2014 11:35:50 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: JIT test to check if unpinning an objects works Message-ID: <20140815093550.E3DEA1C0EC8@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72817:a2d0b17c975c Date: 2014-08-15 11:34 +0200 http://bitbucket.org/pypy/pypy/changeset/a2d0b17c975c/ Log: JIT test to check if unpinning an objects works diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -804,3 +804,48 @@ def test_pinned_simple(self): self.run('pinned_simple') + def define_pinned_unpin(cls): + from rpython.rlib.jit import promote + class H: + inst = None + pinned = False + count_pinned = 0 + count_unpinned = 0 + helper = H() + + @dont_look_inside + def get_y(n): + if not helper.inst: + helper.inst = X() + helper.inst.x = 101 + helper.pinned = True + assert rgc.pin(helper.inst) + elif n < 100 and helper.pinned: + rgc.unpin(helper.inst) + helper.pinned = False + # + if helper.pinned: + assert rgc._is_pinned(helper.inst) + helper.count_pinned += 1 + else: + assert not rgc._is_pinned(helper.inst) + helper.count_unpinned += 1 + return helper.inst + + def fn(n, x, *args): + t = get_y(n) + promote(t) + assert t.x == 101 + n -= 1 + return (n, x) + args + + def after(n, x, *args): + assert helper.count_pinned > 0 + assert helper.count_unpinned > 0 + assert not helper.pinned + + return None, fn, after + + def test_pinned_unpin(self): + self.run('pinned_unpin') + From noreply at buildbot.pypy.org Fri Aug 15 13:17:55 2014 From: noreply at buildbot.pypy.org (groggi) Date: Fri, 15 Aug 2014 13:17:55 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: consistency: use check() instead of assert Message-ID: <20140815111755.3F4851C0157@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72818:54527e0ff202 Date: 2014-08-15 13:16 +0200 http://bitbucket.org/pypy/pypy/changeset/54527e0ff202/ Log: consistency: use check() instead of assert diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -787,9 +787,9 @@ if not helper.inst: helper.inst = X() helper.inst.x = 101 - assert rgc.pin(helper.inst) + check(rgc.pin(helper.inst)) else: - assert rgc._is_pinned(helper.inst) + check(rgc._is_pinned(helper.inst)) return helper.inst def fn(n, x, *args): @@ -819,30 +819,30 @@ helper.inst = X() helper.inst.x = 101 helper.pinned = True - assert rgc.pin(helper.inst) + check(rgc.pin(helper.inst)) elif n < 100 and helper.pinned: rgc.unpin(helper.inst) helper.pinned = False # if helper.pinned: - assert rgc._is_pinned(helper.inst) + check(rgc._is_pinned(helper.inst)) helper.count_pinned += 1 else: - assert not rgc._is_pinned(helper.inst) + check(not rgc._is_pinned(helper.inst)) helper.count_unpinned += 1 return helper.inst def fn(n, x, *args): t = get_y(n) promote(t) - assert t.x == 101 + check(t.x == 101) n -= 1 return (n, x) + args def after(n, x, *args): - assert helper.count_pinned > 0 - assert helper.count_unpinned > 0 - assert not helper.pinned + check(helper.count_pinned > 0) + check(helper.count_unpinned > 0) + check(not helper.pinned) return None, fn, after From noreply at buildbot.pypy.org Fri Aug 15 19:36:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 15 Aug 2014 19:36:36 +0200 (CEST) Subject: [pypy-commit] pypy default: Oups, thanks gregor_w for noticing that the test was not testing anything. Message-ID: <20140815173636.DE26B1C0226@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72819:39bb9189ac28 Date: 2014-08-15 19:25 +0200 http://bitbucket.org/pypy/pypy/changeset/39bb9189ac28/ Log: Oups, thanks gregor_w for noticing that the test was not testing anything. Make it do so, and fix it. diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -633,9 +633,9 @@ return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s def after(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): - check(x.x == 1800 * 2 + 1850 * 2 + 200 - 150) + check(x.x == 1800 * 2 + 150 * 2 + 200 - 1850) - return before, f, None + return before, f, after def test_compile_framework_external_exception_handling(self): self.run('compile_framework_external_exception_handling') From noreply at buildbot.pypy.org Sat Aug 16 03:04:24 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 16 Aug 2014 03:04:24 +0200 (CEST) Subject: [pypy-commit] pypy default: py3 compat Message-ID: <20140816010424.5B9C51C0157@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r72820:0efde1cfee32 Date: 2014-08-15 17:58 -0700 http://bitbucket.org/pypy/pypy/changeset/0efde1cfee32/ Log: py3 compat diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -288,9 +288,9 @@ yield 5 raise # should raise "no active exception to re-raise" gen = f() - gen.next() # --> 5 + next(gen) # --> 5 try: - gen.next() + next(gen) except TypeError: pass From noreply at buildbot.pypy.org Sat Aug 16 03:04:26 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 16 Aug 2014 03:04:26 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140816010426.0D22F1C0157@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72821:57f83c7b11fc Date: 2014-08-15 17:58 -0700 http://bitbucket.org/pypy/pypy/changeset/57f83c7b11fc/ Log: merge default diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -147,12 +147,25 @@ programmer). Instead, since 2012, there is work going on on a still very experimental -Software Transactional Memory (STM) version of PyPy. This should give -an alternative PyPy which internally has no GIL, while at the same time +`Software Transactional Memory`_ (STM) version of PyPy. This should give +an alternative PyPy which works without a GIL, while at the same time continuing to give the Python programmer the complete illusion of having -one. It would in fact push forward *more* GIL-ish behavior, like -declaring that some sections of the code should run without releasing -the GIL in the middle (these are called *atomic sections* in STM). +one. + +.. _`Software Transactional Memory`: stm.html + +-------------------------------------------------- +Is PyPy more clever than CPython about Tail Calls? +-------------------------------------------------- + +No. PyPy follows the Python language design, including the built-in +debugger features. This prevents tail calls, as summarized by Guido +van Rossum in two__ blog__ posts. Moreover, neither the JIT nor +Stackless__ change anything to that. + +.. __: http://neopythonic.blogspot.com/2009/04/tail-recursion-elimination.html +.. __: http://neopythonic.blogspot.com/2009/04/final-words-on-tail-calls.html +.. __: stackless.html ------------------------------------------ How do I write extension modules for PyPy? diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -309,9 +309,9 @@ yield 5 raise # should raise "no active exception to re-raise" gen = f() - gen.next() # --> 5 + next(gen) # --> 5 try: - gen.next() + next(gen) except TypeError: pass diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -633,9 +633,9 @@ return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s def after(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): - check(x.x == 1800 * 2 + 1850 * 2 + 200 - 150) + check(x.x == 1800 * 2 + 150 * 2 + 200 - 1850) - return before, f, None + return before, f, after def test_compile_framework_external_exception_handling(self): self.run('compile_framework_external_exception_handling') diff --git a/rpython/rtyper/normalizecalls.py b/rpython/rtyper/normalizecalls.py --- a/rpython/rtyper/normalizecalls.py +++ b/rpython/rtyper/normalizecalls.py @@ -62,6 +62,8 @@ msg.append("the following functions:") msg.append(" %s" % ("\n ".join(pfg), )) msg.append("are called with inconsistent numbers of arguments") + msg.append("(and/or the argument names are different, which is" + " not supported in this case)") if shape1[0] != shape2[0]: msg.append("sometimes with %s arguments, sometimes with %s" % (shape1[0], shape2[0])) else: diff --git a/rpython/rtyper/test/test_normalizecalls.py b/rpython/rtyper/test/test_normalizecalls.py --- a/rpython/rtyper/test/test_normalizecalls.py +++ b/rpython/rtyper/test/test_normalizecalls.py @@ -185,6 +185,7 @@ .+Sub1.fn .+Sub2.fn are called with inconsistent numbers of arguments +\(and/or the argument names are different, which is not supported in this case\) sometimes with \d arguments, sometimes with \d the callers of these functions are: .+otherfunc From noreply at buildbot.pypy.org Sat Aug 16 03:04:27 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 16 Aug 2014 03:04:27 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix preserving the exception state between generator yields for the 3rd time Message-ID: <20140816010427.50C511C0157@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72822:7ca938cad6ed Date: 2014-08-15 18:01 -0700 http://bitbucket.org/pypy/pypy/changeset/7ca938cad6ed/ Log: fix preserving the exception state between generator yields for the 3rd time diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -63,6 +63,8 @@ try: while True: next_instr = self.handle_bytecode(co_code, next_instr, ec) + except Yield: + return self.popvalue() except ExitFrame: self.last_exception = None return self.popvalue() diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -307,12 +307,12 @@ foobar except NameError: yield 5 - raise # should raise "no active exception to re-raise" + raise gen = f() next(gen) # --> 5 try: next(gen) - except TypeError: + except NameError: pass From noreply at buildbot.pypy.org Sat Aug 16 03:04:28 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 16 Aug 2014 03:04:28 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge py3k Message-ID: <20140816010428.8F2DC1C0157@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72823:1d47dd6f19e4 Date: 2014-08-15 18:02 -0700 http://bitbucket.org/pypy/pypy/changeset/1d47dd6f19e4/ Log: merge py3k diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -147,12 +147,25 @@ programmer). Instead, since 2012, there is work going on on a still very experimental -Software Transactional Memory (STM) version of PyPy. This should give -an alternative PyPy which internally has no GIL, while at the same time +`Software Transactional Memory`_ (STM) version of PyPy. This should give +an alternative PyPy which works without a GIL, while at the same time continuing to give the Python programmer the complete illusion of having -one. It would in fact push forward *more* GIL-ish behavior, like -declaring that some sections of the code should run without releasing -the GIL in the middle (these are called *atomic sections* in STM). +one. + +.. _`Software Transactional Memory`: stm.html + +-------------------------------------------------- +Is PyPy more clever than CPython about Tail Calls? +-------------------------------------------------- + +No. PyPy follows the Python language design, including the built-in +debugger features. This prevents tail calls, as summarized by Guido +van Rossum in two__ blog__ posts. Moreover, neither the JIT nor +Stackless__ change anything to that. + +.. __: http://neopythonic.blogspot.com/2009/04/tail-recursion-elimination.html +.. __: http://neopythonic.blogspot.com/2009/04/final-words-on-tail-calls.html +.. __: stackless.html ------------------------------------------ How do I write extension modules for PyPy? diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -63,6 +63,8 @@ try: while True: next_instr = self.handle_bytecode(co_code, next_instr, ec) + except Yield: + return self.popvalue() except ExitFrame: self.last_exception = None return self.popvalue() diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -307,12 +307,12 @@ foobar except NameError: yield 5 - raise # should raise "no active exception to re-raise" + raise gen = f() - gen.next() # --> 5 + next(gen) # --> 5 try: - gen.next() - except TypeError: + next(gen) + except NameError: pass def test_yield_return(self): diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -633,9 +633,9 @@ return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s def after(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s): - check(x.x == 1800 * 2 + 1850 * 2 + 200 - 150) + check(x.x == 1800 * 2 + 150 * 2 + 200 - 1850) - return before, f, None + return before, f, after def test_compile_framework_external_exception_handling(self): self.run('compile_framework_external_exception_handling') diff --git a/rpython/rtyper/normalizecalls.py b/rpython/rtyper/normalizecalls.py --- a/rpython/rtyper/normalizecalls.py +++ b/rpython/rtyper/normalizecalls.py @@ -62,6 +62,8 @@ msg.append("the following functions:") msg.append(" %s" % ("\n ".join(pfg), )) msg.append("are called with inconsistent numbers of arguments") + msg.append("(and/or the argument names are different, which is" + " not supported in this case)") if shape1[0] != shape2[0]: msg.append("sometimes with %s arguments, sometimes with %s" % (shape1[0], shape2[0])) else: diff --git a/rpython/rtyper/test/test_normalizecalls.py b/rpython/rtyper/test/test_normalizecalls.py --- a/rpython/rtyper/test/test_normalizecalls.py +++ b/rpython/rtyper/test/test_normalizecalls.py @@ -185,6 +185,7 @@ .+Sub1.fn .+Sub2.fn are called with inconsistent numbers of arguments +\(and/or the argument names are different, which is not supported in this case\) sometimes with \d arguments, sometimes with \d the callers of these functions are: .+otherfunc From noreply at buildbot.pypy.org Sat Aug 16 16:39:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 16 Aug 2014 16:39:48 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Minimal set of changes to pass targetdemo2 while moving away from the Message-ID: <20140816143948.6AE541C0157@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72824:459042b5f19d Date: 2014-08-16 16:39 +0200 http://bitbucket.org/pypy/pypy/changeset/459042b5f19d/ Log: Minimal set of changes to pass targetdemo2 while moving away from the stm_perform_*() model diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -18,7 +18,6 @@ from pypy.interpreter.nestedscope import Cell from pypy.interpreter.pycode import PyCode, BytecodeCorruption from pypy.tool.stdlib_opcode import bytecode_spec -from rpython.rlib.jit import we_are_jitted def unaryoperation(operationname): """NOT_RPYTHON""" @@ -44,14 +43,6 @@ return func_with_new_name(opimpl, "opcode_impl_for_%s" % operationname) -# ____________________________________________________________ - -stmonly_jitdriver = jit.JitDriver(greens=[], reds=['next_instr', 'ec', - 'self', 'co_code'], - stm_do_transaction_breaks=True) - -# ____________________________________________________________ - opcodedesc = bytecode_spec.opcodedesc HAVE_ARGUMENT = bytecode_spec.HAVE_ARGUMENT @@ -65,13 +56,8 @@ # For the sequel, force 'next_instr' to be unsigned for performance next_instr = r_uint(next_instr) co_code = pycode.co_code + rstm.rewind_jmp_frame() while True: - if self.space.config.translation.stm: - # only used for no-jit. The jit-jitdriver is - # in interp_jit.py - stmonly_jitdriver.jit_merge_point( - self=self, co_code=co_code, - next_instr=next_instr, ec=ec) rstm.push_marker(intmask(next_instr) * 2 + 1, self.pycode) try: next_instr = self.handle_bytecode(co_code, next_instr, ec) @@ -165,6 +151,7 @@ ec.bytecode_only_trace(self) else: ec.bytecode_trace(self) + rstm.possible_transaction_break() next_instr = r_uint(self.last_instr) opcode = ord(co_code[next_instr]) next_instr += 1 @@ -199,7 +186,7 @@ else: unroller = SReturnValue(w_returnvalue) next_instr = block.handle(self, unroller) - # now inside a 'finally' block + return next_instr # now inside a 'finally' block elif opcode == opcodedesc.END_FINALLY.index: unroller = self.end_finally() if isinstance(unroller, SuspendedUnroller): @@ -211,12 +198,13 @@ raise Return else: next_instr = block.handle(self, unroller) + return next_instr elif opcode == opcodedesc.JUMP_ABSOLUTE.index: return self.jump_absolute(oparg, ec) elif opcode == opcodedesc.BREAK_LOOP.index: next_instr = self.BREAK_LOOP(oparg, next_instr) elif opcode == opcodedesc.CONTINUE_LOOP.index: - next_instr = self.CONTINUE_LOOP(oparg, next_instr) + return self.CONTINUE_LOOP(oparg, next_instr) elif opcode == opcodedesc.FOR_ITER.index: next_instr = self.FOR_ITER(oparg, next_instr) elif opcode == opcodedesc.JUMP_FORWARD.index: @@ -457,22 +445,6 @@ if jit.we_are_jitted(): return next_instr - if self.space.config.translation.stm: - # with STM, if should_break_transaction(), then it is a good - # idea to leave and let _dispatch_stm_breaking_transaction() - # break the transaction. But avoid doing it if we are in a - # tail-call position: if the next opcode is RETURN_VALUE, or - # one of the opcodes in the one of the sequences - # * POP_TOP/LOAD_CONST/RETURN_VALUE - # * POP_TOP/LOAD_FAST/RETURN_VALUE - if rstm.should_break_transaction(): - opcode = ord(co_code[next_instr]) - if opcode not in (opcodedesc.RETURN_VALUE.index, - opcodedesc.POP_TOP.index, - opcodedesc.LOAD_CONST.index, - opcodedesc.LOAD_FAST.index): - return next_instr - rstm.update_marker_num(intmask(next_instr) * 2 + 1) @jit.unroll_safe diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -483,16 +483,12 @@ name = 'jitdriver' inline_jit_merge_point = False _store_last_enter_jit = None - stm_do_transaction_breaks = False - stm_report_location = None def __init__(self, greens=None, reds=None, virtualizables=None, get_jitcell_at=None, set_jitcell_at=None, get_printable_location=None, confirm_enter_jit=None, can_never_inline=None, should_unroll_one_iteration=None, - name='jitdriver', check_untranslated=True, - stm_do_transaction_breaks=None, - stm_report_location=None): + name='jitdriver', check_untranslated=True): if greens is not None: self.greens = greens self.name = name @@ -528,10 +524,6 @@ self.can_never_inline = can_never_inline self.should_unroll_one_iteration = should_unroll_one_iteration self.check_untranslated = check_untranslated - if stm_do_transaction_breaks is not None: - self.stm_do_transaction_breaks = stm_do_transaction_breaks - if stm_report_location is not None: - self.stm_report_location = stm_report_location def _freeze_(self): return True @@ -826,6 +818,9 @@ v_red = hop.inputarg(r_red, arg=i) reds_v.append(v_red) hop.exception_cannot_occur() + if self.instance.__name__ == 'jit_merge_point': + if hop.rtyper.annotator.translator.config.translation.stm: + hop.genop('stm_rewind_jmp_frame', [], resulttype=lltype.Void) vlist = [hop.inputconst(lltype.Void, self.instance.__name__), hop.inputconst(lltype.Void, driver)] vlist.extend(greens_v) diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -1,5 +1,6 @@ from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rlib.objectmodel import CDefinedIntSymbolic +from rpython.rlib.rgc import stm_is_enabled from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.extregistry import ExtRegistryEntry @@ -40,6 +41,18 @@ CFlexSymbolic('((long)&pypy_stm_start_transaction)')) +def rewind_jmp_frame(): + """At some key places, like the entry point of the thread and in the + function with the interpreter's dispatch loop, this must be called + (it turns into a marker in the caller's function). There is one + automatically in any jit.jit_merge_point().""" + # special-cased below + +def possible_transaction_break(): + if stm_is_enabled(): + if llop.stm_should_break_transaction(lltype.Bool): + llop.stm_transaction_break(lltype.Void) + def jit_stm_transaction_break_point(): # XXX REFACTOR AWAY if we_are_translated(): @@ -77,6 +90,10 @@ llop.stm_should_break_transaction(lltype.Bool)) @dont_look_inside +def break_transaction(): + llop.stm_break_transaction(lltype.Void) + + at dont_look_inside def set_transaction_length(fraction): llop.stm_set_transaction_length(lltype.Void, float(fraction)) @@ -161,26 +178,13 @@ # ____________________________________________________________ -def make_perform_transaction(func, CONTAINERP): - from rpython.rtyper.annlowlevel import llhelper - from rpython.rtyper.annlowlevel import cast_instance_to_base_ptr - from rpython.translator.stm.stmgcintf import CALLBACK_TX - # - def _stm_callback(llcontainer, retry_counter): - llcontainer = rffi.cast(CONTAINERP, llcontainer) - retry_counter = rffi.cast(lltype.Signed, retry_counter) - try: - res = func(llcontainer, retry_counter) - except Exception, e: - res = 0 # ends perform_transaction() and returns - lle = cast_instance_to_base_ptr(e) - llcontainer.got_exception = lle - return rffi.cast(rffi.INT_real, res) - # - @dont_look_inside - def perform_transaction(llcontainer): - llcallback = llhelper(CALLBACK_TX, _stm_callback) - llop.stm_perform_transaction(lltype.Void, llcontainer, llcallback) - perform_transaction._transaction_break_ = True - # - return perform_transaction +class _Entry(ExtRegistryEntry): + _about_ = rewind_jmp_frame + + def compute_result_annotation(self): + pass + + def specialize_call(self, hop): + hop.exception_cannot_occur() + if hop.rtyper.annotator.translator.config.translation.stm: + hop.genop('stm_rewind_jmp_frame', [], resulttype=lltype.Void) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -434,8 +434,9 @@ 'stm_abort_and_retry': LLOp(canmallocgc=True), 'stm_enter_callback_call': LLOp(canmallocgc=True), 'stm_leave_callback_call': LLOp(), - 'stm_perform_transaction': LLOp(canmallocgc=True), + 'stm_transaction_break': LLOp(canmallocgc=True), 'stm_should_break_transaction': LLOp(sideeffects=False), + 'stm_rewind_jmp_frame': LLOp(), 'stm_set_transaction_length': LLOp(), 'stm_hint_commit_soon': LLOp(canrun=True), diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -318,8 +318,11 @@ if aroundstate is not None: if aroundstate.enter_callback is not None: token = aroundstate.enter_callback() - elif aroundstate.after is not None: - aroundstate.after() + llop.stm_rewind_jmp_frame(lltype.Void, 1) + else: + after = aroundstate.after + if after is not None: + after() # from now on we hold the GIL stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py @@ -336,9 +339,12 @@ stackcounter.stacks_counter -= 1 if aroundstate is not None: if aroundstate.leave_callback is not None: + llop.stm_rewind_jmp_frame(lltype.Void, 2) aroundstate.leave_callback(token) - elif aroundstate.before is not None: - aroundstate.before() + else: + before = aroundstate.before + if before is not None: + before() # here we don't hold the GIL any more. As in the wrapper() produced # by llexternal, it is essential that no exception checking occurs # after the call to before(). diff --git a/rpython/translator/backendopt/gilanalysis.py b/rpython/translator/backendopt/gilanalysis.py --- a/rpython/translator/backendopt/gilanalysis.py +++ b/rpython/translator/backendopt/gilanalysis.py @@ -21,13 +21,11 @@ self, graph, seen) def analyze_external_call(self, op, seen=None): - funcobj = op.args[0].value._obj - if getattr(funcobj, 'transactionsafe', False): - return False - else: - return False - + return False + def analyze_simple_operation(self, op, graphinfo): + if op.opname == 'stm_break_transaction': + return True return False def analyze(graphs, translator): diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -1,5 +1,4 @@ import sys -from rpython.translator.c.support import USESLOTS # set to False if necessary while refactoring from rpython.translator.c.support import cdecl from rpython.translator.c.support import llvalue_from_constant, gen_assignments from rpython.translator.c.support import c_string_constant, barebonearray @@ -24,17 +23,7 @@ Collects information about a function which we have to generate from a flow graph. """ - - if USESLOTS: - __slots__ = """graph db gcpolicy - exception_policy - more_ll_values - vars all_cached_consts - illtypes - functionname - blocknum - innerloops - oldgraph""".split() + use_stm_rewind_jmp_frame = False def __init__(self, graph, db, exception_policy=None, functionname=None): graph._seen_by_the_backend = True @@ -75,6 +64,11 @@ for block in self.graph.iterblocks(): mix.extend(block.inputargs) for op in block.operations: + if op.opname == 'stm_rewind_jmp_frame': + if len(op.args) == 0: + self.use_stm_rewind_jmp_frame = "automatic" + elif not self.use_stm_rewind_jmp_frame: + self.use_stm_rewind_jmp_frame = True mix.extend(op.args) mix.append(op.result) for link in block.exits: @@ -203,6 +197,11 @@ # ____________________________________________________________ def cfunction_body(self): + if self.use_stm_rewind_jmp_frame: + yield 'rewind_jmp_buf rjbuf1;' + if self.use_stm_rewind_jmp_frame == "automatic": + yield 'stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf1);' + # graph = self.graph yield 'goto block0;' # to avoid a warning "this label is not used" @@ -221,6 +220,9 @@ if len(block.exits) == 0: assert len(block.inputargs) == 1 # regular return block + if self.use_stm_rewind_jmp_frame == "automatic": + yield ('stm_rewind_jmp_leaveframe(' + '&stm_thread_local, &rjbuf1);') retval = self.expr(block.inputargs[0]) if self.exception_policy != "exc_helper": yield 'RPY_DEBUG_RETURN();' @@ -920,5 +922,3 @@ self.expr(op.args[0])) else: return None # use the default - -assert not USESLOTS or '__dict__' not in dir(FunctionCodeGenerator) diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -49,6 +49,11 @@ errmsg = RPython_StartupCode(); if (errmsg) goto error; +#ifdef RPY_STM + rewind_jmp_buf rjbuf; + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); +#endif + exitcode = STANDALONE_ENTRY_POINT(argc, argv); pypy_debug_alloc_results(); @@ -60,6 +65,10 @@ pypy_malloc_counters_results(); +#ifdef RPY_STM + stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); +#endif + RPython_TeardownCode(); return exitcode; diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -175,11 +175,8 @@ arg0 = funcgen.expr(op.args[0]) return 'pypy_stm_set_transaction_length(%s);' % (arg0,) -def stm_perform_transaction(funcgen, op): - arg0 = funcgen.expr(op.args[0]) - arg1 = funcgen.expr(op.args[1]) - return ('pypy_stm_perform_transaction((object_t *)%s, ' - '(int(*)(object_t *, int))%s);' % (arg0, arg1)) +def stm_transaction_break(funcgen, op): + return 'pypy_stm_transaction_break();' def stm_increment_atomic(funcgen, op): return 'pypy_stm_increment_atomic();' @@ -259,3 +256,11 @@ 'stm_thread_local.longest_marker_time = 0.0;\n' 'stm_thread_local.longest_marker_self[0] = 0;\n' 'stm_thread_local.longest_marker_other[0] = 0;') + +def stm_rewind_jmp_frame(funcgen, op): + if len(op.args) == 0: + return '/* automatic stm_rewind_jmp_frame */' + elif op.args[0].value == 1: + return 'stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf1);' + else: + return 'stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf1);' diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -167,55 +167,11 @@ return counter; } -void pypy_stm_perform_transaction(object_t *arg, int callback(object_t *, int)) -{ /* must save roots around this call */ - // - // XXX this function should be killed! We no longer need a - // callback-based approach at all. - -#ifndef NDEBUG - struct stm_shadowentry_s *volatile v_old_shadowstack = - stm_thread_local.shadowstack; -#endif - rewind_jmp_buf rjbuf; - stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); - //STM_PUSH_ROOT(stm_thread_local, STM_STACK_MARKER_NEW); - STM_PUSH_ROOT(stm_thread_local, arg); - - while (1) { - long counter; - if (pypy_stm_should_break_transaction()) { //pypy_stm_ready_atomic == 1) { - /* Not in an atomic transaction; but it might be an inevitable - transaction. - */ - assert(pypy_stm_nursery_low_fill_mark != (uintptr_t) -1); - - stm_commit_transaction(); - - counter = _pypy_stm_start_transaction(); - } - else { - /* In an atomic transaction */ - //assert(pypy_stm_nursery_low_fill_mark == (uintptr_t) -1); - counter = 0; - } - - /* invoke the callback in the new transaction */ - STM_POP_ROOT(stm_thread_local, arg); - assert(v_old_shadowstack == stm_thread_local.shadowstack);// - 1); - STM_PUSH_ROOT(stm_thread_local, arg); - - long result = callback(arg, counter); - if (result <= 0) - break; - } - - STM_POP_ROOT_RET(stm_thread_local); /* pop the 'arg' */ - //uintptr_t x = (uintptr_t)STM_POP_ROOT_RET(stm_thread_local); - //assert(x == STM_STACK_MARKER_NEW || x == STM_STACK_MARKER_OLD); - assert(v_old_shadowstack == stm_thread_local.shadowstack); - - stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); +void pypy_stm_transaction_break(void) +{ + assert(pypy_stm_nursery_low_fill_mark != (uintptr_t) -1); + stm_commit_transaction(); + _pypy_stm_start_transaction(); } void _pypy_stm_inev_state(void) diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -92,7 +92,7 @@ long pypy_stm_enter_callback_call(void); void pypy_stm_leave_callback_call(long); void pypy_stm_set_transaction_length(double); -void pypy_stm_perform_transaction(object_t *, int(object_t *, int));//XXX +void pypy_stm_transaction_break(void); static inline int pypy_stm_should_break_transaction(void) { diff --git a/rpython/translator/stm/test/targetdemo2.py b/rpython/translator/stm/test/targetdemo2.py --- a/rpython/translator/stm/test/targetdemo2.py +++ b/rpython/translator/stm/test/targetdemo2.py @@ -1,6 +1,6 @@ import time from rpython.rlib import rthread -from rpython.rlib import rstm, jit +from rpython.rlib import rstm from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.objectmodel import compute_identity_hash from rpython.rlib.debug import ll_assert @@ -65,12 +65,6 @@ print "check ok!" -jitdriver_hash = jit.JitDriver(greens=[], reds=['value', 'self']) -jitdriver_inev = jit.JitDriver(greens=[], reds=['value', 'self']) -jitdriver_ptreq = jit.JitDriver(greens=[], reds=['self']) -jitdriver_really = jit.JitDriver(greens=[], reds=['value', 'self']) - - class ThreadRunner(object): arg = None @@ -94,7 +88,7 @@ def do_run_really(self): value = 0 while True: - jitdriver_really.jit_merge_point(self=self, value=value) + rstm.possible_transaction_break() if not self.run_really(value): break value += 1 @@ -115,7 +109,7 @@ return (value+1) < glob.LENGTH def do_check_ptr_equality(self): - jitdriver_ptreq.jit_merge_point(self=self) + rstm.possible_transaction_break() self.check_ptr_equality(0) def check_ptr_equality(self, foo): @@ -129,7 +123,7 @@ def do_check_inev(self): value = 0 while True: - jitdriver_inev.jit_merge_point(self=self, value=value) + rstm.possible_transaction_break() if not self.check_inev(value): break value += 1 @@ -157,7 +151,7 @@ def do_check_hash(self): value = 0 while True: - jitdriver_hash.jit_merge_point(self=self, value=value) + rstm.possible_transaction_break() value = self.check_hash(value) if value >= glob.LENGTH: break From noreply at buildbot.pypy.org Sat Aug 16 18:47:27 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 16 Aug 2014 18:47:27 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: if the same pointer is used multiple times, only use one array element. Message-ID: <20140816164727.AC1561C0157@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72825:79cdf621b8fb Date: 2014-08-16 15:58 +0200 http://bitbucket.org/pypy/pypy/changeset/79cdf621b8fb/ Log: if the same pointer is used multiple times, only use one array element. Implements the optimisation that was pointed to in commit 5dcc35cb8954a4ce373804707a6745c8adb3487c diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -18,6 +18,7 @@ from rpython.jit.backend.llsupport.descr import get_call_descr from rpython.jit.backend.llsupport.rewrite import GcRewriterAssembler from rpython.memory.gctransform import asmgcroot +from rpython.rtyper.lltypesystem import llmemory class PinnedObjectTracker(object): """Simple helper class to keep informations regarding the 'GcArray' @@ -26,21 +27,26 @@ _ref_array_type = lltype.GcArray(llmemory.GCREF) - def __init__(self, cpu, size): - self._size = size - self._next_item = 0 + def __init__(self, cpu, pointers): + # prepare GC array to hold the pointers + size = len(pointers) self._ref_array = lltype.malloc(PinnedObjectTracker._ref_array_type, size) self.ref_array_descr = cpu.arraydescrof(PinnedObjectTracker._ref_array_type) self.ref_array_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, self._ref_array) self.const_ptr_gcref_array = ConstPtr(self.ref_array_gcref) + # + # assign each pointer an index and put the pointer into the GC array + self._indexes = {} + for index in range(len(pointers)): + ptr = pointers[index] + self._indexes[llmemory.cast_ptr_to_adr(ptr)] = llmemory.cast_int_to_adr(index) + self._ref_array[index] = ptr - def add_ref(self, ref): - index = self._next_item - assert index < self._size - self._next_item += 1 - self._ref_array[index] = ref + def add_ref(self, ptr): + assert llmemory.cast_ptr_to_adr(ptr) in self._indexes + index = llmemory.cast_adr_to_int(self._indexes[llmemory.cast_ptr_to_adr(ptr)]) + assert ptr == self._ref_array[index] return index - # ____________________________________________________________ class GcLLDescription(GcCache): @@ -114,7 +120,8 @@ def gc_malloc_unicode(self, num_elem): return self._bh_malloc_array(num_elem, self.unicode_descr) - def _record_constptrs(self, op, gcrefs_output_list, moving_output_list): + def _record_constptrs(self, op, gcrefs_output_list, moving_output_list, + known_pointers): moving_output_list[op] = [] for i in range(op.numargs()): v = op.getarg(i) @@ -124,6 +131,8 @@ gcrefs_output_list.append(p) else: moving_output_list[op].append(i) + if p not in known_pointers: + known_pointers.append(p) # if op.is_guard() or op.getopnum() == rop.FINISH: llref = cast_instance_to_gcref(op.getdescr()) @@ -165,11 +174,13 @@ newnewops = [] # XXX better name... (groggi) moving_output_list = {} + known_pointers = [] for op in newops: - self._record_constptrs(op, gcrefs_output_list, moving_output_list) + self._record_constptrs(op, gcrefs_output_list, moving_output_list, + known_pointers) # if len(moving_output_list) > 0: - pinned_obj_tracker = PinnedObjectTracker(cpu, len(moving_output_list)) + pinned_obj_tracker = PinnedObjectTracker(cpu, known_pointers) if not we_are_translated(): self.last_pinned_object_tracker = pinned_obj_tracker gcrefs_output_list.append(pinned_obj_tracker.ref_array_gcref) From noreply at buildbot.pypy.org Sat Aug 16 18:47:29 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 16 Aug 2014 18:47:29 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: renaming, rewriting, etc. for handling movable objects (i.e. ConstPtrs that have a pointer which isn't really constant) inside the JIT Message-ID: <20140816164729.03F9E1C0157@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72826:ed703b3e1311 Date: 2014-08-16 18:46 +0200 http://bitbucket.org/pypy/pypy/changeset/ed703b3e1311/ Log: renaming, rewriting, etc. for handling movable objects (i.e. ConstPtrs that have a pointer which isn't really constant) inside the JIT diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -20,32 +20,36 @@ from rpython.memory.gctransform import asmgcroot from rpython.rtyper.lltypesystem import llmemory -class PinnedObjectTracker(object): - """Simple helper class to keep informations regarding the 'GcArray' - in one place that is used to double load pinned objects. - """ +class MovableObjectTracker(object): - _ref_array_type = lltype.GcArray(llmemory.GCREF) + ptr_array_type = lltype.GcArray(llmemory.GCREF) - def __init__(self, cpu, pointers): - # prepare GC array to hold the pointers - size = len(pointers) - self._ref_array = lltype.malloc(PinnedObjectTracker._ref_array_type, size) - self.ref_array_descr = cpu.arraydescrof(PinnedObjectTracker._ref_array_type) - self.ref_array_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, self._ref_array) - self.const_ptr_gcref_array = ConstPtr(self.ref_array_gcref) + def __init__(self, cpu, const_pointers): + size = len(const_pointers) + # check that there are any moving object (i.e. chaning pointers). + # Otherwise there is no reason for an instance of this class. + assert size > 0 # - # assign each pointer an index and put the pointer into the GC array + # prepare GC array to hold the pointers that may change + self.ptr_array = lltype.malloc(MovableObjectTracker.ptr_array_type, size) + self.ptr_array_descr = cpu.arraydescrof(MovableObjectTracker.ptr_array_type) + self.ptr_array_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, self.ptr_array) + # use always the same ConstPtr to access the array + # (easer to read JIT trace) + self.const_ptr_gcref_array = ConstPtr(self.ptr_array_gcref) + # + # assign each pointer an index and put the pointer into the GC array. + # as pointers and addresses are not a good key to use before translation + # ConstPtrs are used as the key for the dict. self._indexes = {} - for index in range(len(pointers)): - ptr = pointers[index] - self._indexes[llmemory.cast_ptr_to_adr(ptr)] = llmemory.cast_int_to_adr(index) - self._ref_array[index] = ptr + for index in range(size): + ptr = const_pointers[index] + self._indexes[ptr] = index + self.ptr_array[index] = ptr.value - def add_ref(self, ptr): - assert llmemory.cast_ptr_to_adr(ptr) in self._indexes - index = llmemory.cast_adr_to_int(self._indexes[llmemory.cast_ptr_to_adr(ptr)]) - assert ptr == self._ref_array[index] + def get_array_index(self, const_ptr): + index = self._indexes[const_ptr] + assert const_ptr.value == self.ptr_array[index] return index # ____________________________________________________________ @@ -120,9 +124,9 @@ def gc_malloc_unicode(self, num_elem): return self._bh_malloc_array(num_elem, self.unicode_descr) - def _record_constptrs(self, op, gcrefs_output_list, moving_output_list, - known_pointers): - moving_output_list[op] = [] + def _record_constptrs(self, op, gcrefs_output_list, ops_with_movable_const_ptr, + changeable_const_pointers): + ops_with_movable_const_ptr[op] = [] for i in range(op.numargs()): v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): @@ -130,33 +134,31 @@ if rgc._make_sure_does_not_move(p): gcrefs_output_list.append(p) else: - moving_output_list[op].append(i) - if p not in known_pointers: - known_pointers.append(p) + ops_with_movable_const_ptr[op].append(i) + if v not in changeable_const_pointers: + changeable_const_pointers.append(v) # if op.is_guard() or op.getopnum() == rop.FINISH: llref = cast_instance_to_gcref(op.getdescr()) assert rgc._make_sure_does_not_move(llref) gcrefs_output_list.append(llref) # - if len(moving_output_list[op]) == 0: - del moving_output_list[op] + if len(ops_with_movable_const_ptr[op]) == 0: + del ops_with_movable_const_ptr[op] - def _rewrite_constptrs(self, op, moving_output_list, pinned_obj_tracker): + def _rewrite_changeable_constptrs(self, op, ops_with_movable_const_ptr, moving_obj_tracker): newops = [] - for arg_i in moving_output_list[op]: + for arg_i in ops_with_movable_const_ptr[op]: v = op.getarg(arg_i) # assert to make sure we got what we expected assert isinstance(v, ConstPtr) - assert bool(v.value) - p = v.value result_ptr = BoxPtr() - array_index = pinned_obj_tracker.add_ref(p) + array_index = moving_obj_tracker.get_array_index(v) load_op = ResOperation(rop.GETARRAYITEM_GC, - [pinned_obj_tracker.const_ptr_gcref_array, + [moving_obj_tracker.const_ptr_gcref_array, ConstInt(array_index)], result_ptr, - descr=pinned_obj_tracker.ref_array_descr) + descr=moving_obj_tracker.ptr_array_descr) newops.append(load_op) op.setarg(arg_i, result_ptr) # @@ -166,37 +168,48 @@ def rewrite_assembler(self, cpu, operations, gcrefs_output_list): rewriter = GcRewriterAssembler(self, cpu) newops = rewriter.rewrite(operations) - # record all GCREFs, because the GC (or Boehm) cannot see them and - # keep them alive if they end up as constants in the assembler - - # XXX add comment (groggi) - - newnewops = [] # XXX better name... (groggi) - moving_output_list = {} - known_pointers = [] + # the key is an operation that contains a ConstPtr as an argument and + # this ConstPtrs pointer might change as it points to an object that + # can't be made non-moving (e.g. the object is pinned). + ops_with_movable_const_ptr = {} + # + # a list of such not really constant ConstPtrs. + changeable_const_pointers = [] for op in newops: - self._record_constptrs(op, gcrefs_output_list, moving_output_list, - known_pointers) + # record all GCREFs, because the GC (or Boehm) cannot see them and + # keep them alive if they end up as constants in the assembler. + # If such a GCREF can change and we can't make the object it points + # to non-movable, we have to handle it seperatly. Such GCREF's are + # returned as ConstPtrs in 'changeable_const_pointers' and the + # affected operation is returned in 'op_with_movable_const_ptr'. + # For this special case see 'rewrite_changeable_constptrs'. + self._record_constptrs(op, gcrefs_output_list, + ops_with_movable_const_ptr, changeable_const_pointers) # - if len(moving_output_list) > 0: - pinned_obj_tracker = PinnedObjectTracker(cpu, known_pointers) + # handle pointers that are not guaranteed to stay the same + if len(ops_with_movable_const_ptr) > 0: + moving_obj_tracker = MovableObjectTracker(cpu, changeable_const_pointers) + # if not we_are_translated(): - self.last_pinned_object_tracker = pinned_obj_tracker - gcrefs_output_list.append(pinned_obj_tracker.ref_array_gcref) - rgc._make_sure_does_not_move(pinned_obj_tracker.ref_array_gcref) + # used for testing + self.last_moving_obj_tracker = moving_obj_tracker + # make sure the array containing the pointers is not collected by + # the GC (or Boehm) + gcrefs_output_list.append(moving_obj_tracker.ptr_array_gcref) + rgc._make_sure_does_not_move(moving_obj_tracker.ptr_array_gcref) - for op in newops: - if op in moving_output_list: - reops = self._rewrite_constptrs(op, moving_output_list, - pinned_obj_tracker) - newnewops.extend(reops) + ops = newops + newops = [] + for op in ops: + if op in ops_with_movable_const_ptr: + rewritten_ops = self._rewrite_changeable_constptrs(op, + ops_with_movable_const_ptr, moving_obj_tracker) + newops.extend(rewritten_ops) else: - newnewops.append(op) - # - return newnewops - else: - return newops + newops.append(op) + # + return newops @specialize.memo() def getframedescrs(self, cpu): diff --git a/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py b/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py @@ -3,7 +3,7 @@ get_field_descr, get_array_descr, ArrayDescr, FieldDescr,\ SizeDescrWithVTable, get_interiorfield_descr from rpython.jit.backend.llsupport.gc import GcLLDescr_boehm,\ - GcLLDescr_framework, PinnedObjectTracker + GcLLDescr_framework, MovableObjectTracker from rpython.jit.backend.llsupport import jitframe, gc from rpython.jit.metainterp.gc import get_description from rpython.jit.tool.oparser import parse @@ -45,7 +45,7 @@ notpinned_obj_ptr = lltype.malloc(notpinned_obj_type) notpinned_obj_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, notpinned_obj_ptr) # - ref_array_descr = self.cpu.arraydescrof(PinnedObjectTracker._ref_array_type) + ptr_array_descr = self.cpu.arraydescrof(MovableObjectTracker.ptr_array_type) # vtable_descr = self.gc_ll_descr.fielddescr_vtable O = lltype.GcStruct('O', ('parent', rclass.OBJECT), @@ -92,9 +92,9 @@ []) # make the array containing the GCREF's accessible inside the tests. # This must be done after we call 'rewrite_assembler'. Before that - # call 'last_pinned_object_tracker' is None or filled with some old + # call 'last_moving_obj_tracker' is None or filled with some old # value. - namespace['ref_array_gcref'] = self.gc_ll_descr.last_pinned_object_tracker.ref_array_gcref + namespace['ptr_array_gcref'] = self.gc_ll_descr.last_moving_obj_tracker.ptr_array_gcref expected = parse(to_operations % Evaluator(namespace), namespace=namespace) equaloplists(operations, expected.operations) @@ -127,7 +127,7 @@ i0 = getfield_gc(ConstPtr(pinned_obj_gcref), descr=pinned_obj_my_int_descr) """, """ [] - p1 = getarrayitem_gc(ConstPtr(ref_array_gcref), 0, descr=ref_array_descr) + p1 = getarrayitem_gc(ConstPtr(ptr_array_gcref), 0, descr=ptr_array_descr) i0 = getfield_gc(p1, descr=pinned_obj_my_int_descr) """) @@ -139,9 +139,9 @@ i2 = getfield_gc(ConstPtr(pinned_obj_gcref), descr=pinned_obj_my_int_descr) """, """ [] - p1 = getarrayitem_gc(ConstPtr(ref_array_gcref), 0, descr=ref_array_descr) + p1 = getarrayitem_gc(ConstPtr(ptr_array_gcref), 0, descr=ptr_array_descr) i0 = getfield_gc(p1, descr=pinned_obj_my_int_descr) i1 = getfield_gc(ConstPtr(notpinned_obj_gcref), descr=notpinned_obj_my_int_descr) - p2 = getarrayitem_gc(ConstPtr(ref_array_gcref), 1, descr=ref_array_descr) + p2 = getarrayitem_gc(ConstPtr(ptr_array_gcref), 1, descr=ptr_array_descr) i2 = getfield_gc(p2, descr=pinned_obj_my_int_descr) """) From noreply at buildbot.pypy.org Sat Aug 16 20:47:40 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 16 Aug 2014 20:47:40 +0200 (CEST) Subject: [pypy-commit] pypy py3.3-fixes2: Close branch py3.3-fixes2 Message-ID: <20140816184740.1B6BB1C0157@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3-fixes2 Changeset: r72827:fa18c2a58936 Date: 2014-08-16 11:47 -0700 http://bitbucket.org/pypy/pypy/changeset/fa18c2a58936/ Log: Close branch py3.3-fixes2 From noreply at buildbot.pypy.org Sat Aug 16 20:47:49 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 16 Aug 2014 20:47:49 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Merged in numerodix/pypy/py3.3-fixes2 (pull request #268) Message-ID: <20140816184749.9D7581C0157@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72828:c046f2fe5239 Date: 2014-08-16 11:47 -0700 http://bitbucket.org/pypy/pypy/changeset/c046f2fe5239/ Log: Merged in numerodix/pypy/py3.3-fixes2 (pull request #268) [py3.3] bz2: disallow pickling for compressor/decompressor (cpython compat) From noreply at buildbot.pypy.org Sat Aug 16 22:49:13 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 16 Aug 2014 22:49:13 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: test_ufunc leaks references but passes test, how to create an array of function pointers? Message-ID: <20140816204913.CBB611C0157@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r72829:422aa6836286 Date: 2014-08-16 23:41 +0300 http://bitbucket.org/pypy/pypy/changeset/422aa6836286/ Log: test_ufunc leaks references but passes test, how to create an array of function pointers? diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -277,11 +277,10 @@ arg_i = args_w[i] assert isinstance(arg_i, W_NDimArray) raw_storage_setitem(dataps, CCHARP_SIZE * i, rffi.cast(rffi.CCHARP, arg_i.implementation.storage)) - #This assumes we iterate over the last dimension? - raw_storage_setitem(dims, LONG_SIZE * i, rffi.cast(rffi.LONG, arg_i.get_shape()[0])) - raw_storage_setitem(steps, LONG_SIZE * i, rffi.cast(rffi.LONG, arg_i.implementation.strides[0])) + #This assumes we iterate over the whole array (it should be a view...) + raw_storage_setitem(dims, LONG_SIZE * i, rffi.cast(rffi.LONG, arg_i.get_size())) + raw_storage_setitem(steps, LONG_SIZE * i, rffi.cast(rffi.LONG, arg_i.get_dtype().elsize)) try: - import pdb;pdb.set_trace() self.func(rffi.cast(rffi.CArrayPtr(rffi.CCHARP), dataps), rffi.cast(npy_intpp, dims), rffi.cast(npy_intpp, steps), user_data) except: @@ -299,7 +298,7 @@ GenericUfunc = lltype.FuncType([rffi.CArrayPtr(rffi.CCHARP), npy_intpp, npy_intpp, rffi.VOIDP], lltype.Void) gufunctype = lltype.Ptr(GenericUfunc) - at cpython_api([rffi.CArrayPtr(gufunctype), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, + at cpython_api([gufunctype, rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t, rffi.CCHARP], PyObject) def _PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, types, ntypes, @@ -307,7 +306,7 @@ funcs_w = [None] * ntypes dtypes_w = [None] * ntypes * (nin + nout) for i in range(ntypes): - funcs_w[i] = W_GenericUFuncCaller(funcs[i]) + funcs_w[i] = W_GenericUFuncCaller(funcs) for i in range(ntypes*(nin+nout)): dtypes_w[i] = get_dtype_cache(space).dtypes_by_num[ord(types[i])] w_funcs = space.newlist(funcs_w) diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -314,14 +314,16 @@ raises(TypeError, "mod.check_array(42)") def test_ufunc(self): - from _numpypy.multiarray import ndarray + from _numpypy.multiarray import arange mod = self.import_extension('foo', [ ("create_ufunc", "METH_NOARGS", """ PyUFuncGenericFunction funcs[] = {&double_times2, &int_times2}; char types[] = { NPY_DOUBLE,NPY_DOUBLE, NPY_INT, NPY_INT }; void *array_data[] = {NULL, NULL}; - PyObject * retval = _PyUFunc_FromFuncAndDataAndSignature(funcs, + PyObject * retval; + /* XXX should be 'funcs', not 'funcs[1]' but how to define an array of function pointers? */ + retval = _PyUFunc_FromFuncAndDataAndSignature(funcs[1], array_data, types, 2, 1, 1, PyUFunc_None, "times2", "times2_docstring", 0, "()->()"); Py_INCREF(retval); @@ -361,7 +363,6 @@ char *in = args[0], *out=args[1]; npy_intp in_step = steps[0], out_step = steps[1]; int tmp; - for (i = 0; i < n; i++) { /*BEGIN main ufunc computation*/ tmp = *(int *)in; @@ -374,6 +375,6 @@ }; }; ''') times2 = mod.create_ufunc() - arr = ndarray((3, 4), dtype='i') + arr = arange(12, dtype='i').reshape(3, 4) out = times2(arr) - assert (out == [6, 8]).all() + assert (out == arr * 2).all() From noreply at buildbot.pypy.org Sat Aug 16 22:49:15 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 16 Aug 2014 22:49:15 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: document where api is still not compatable (help will be appreciated) Message-ID: <20140816204915.2870D1C0157@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r72830:8e348173655b Date: 2014-08-16 23:48 +0300 http://bitbucket.org/pypy/pypy/changeset/8e348173655b/ Log: document where api is still not compatable (help will be appreciated) diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -283,9 +283,6 @@ try: self.func(rffi.cast(rffi.CArrayPtr(rffi.CCHARP), dataps), rffi.cast(npy_intpp, dims), rffi.cast(npy_intpp, steps), user_data) - except: - import traceback; traceback.print_exc() - raise finally: free_raw_storage(dataps, track_allocation=False) free_raw_storage(dims, track_allocation=False) @@ -298,6 +295,8 @@ GenericUfunc = lltype.FuncType([rffi.CArrayPtr(rffi.CCHARP), npy_intpp, npy_intpp, rffi.VOIDP], lltype.Void) gufunctype = lltype.Ptr(GenericUfunc) +# XXX the signature is wrong, it should be an array of gufunctype, but +# XXX rffi.CArrayPtr(gufunctype) does not seem to work ??? @cpython_api([gufunctype, rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t, rffi.CCHARP], PyObject) @@ -306,6 +305,7 @@ funcs_w = [None] * ntypes dtypes_w = [None] * ntypes * (nin + nout) for i in range(ntypes): + # XXX this should be 'funcs[i]' not 'funcs' funcs_w[i] = W_GenericUFuncCaller(funcs) for i in range(ntypes*(nin+nout)): dtypes_w[i] = get_dtype_cache(space).dtypes_by_num[ord(types[i])] diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -322,7 +322,8 @@ char types[] = { NPY_DOUBLE,NPY_DOUBLE, NPY_INT, NPY_INT }; void *array_data[] = {NULL, NULL}; PyObject * retval; - /* XXX should be 'funcs', not 'funcs[1]' but how to define an array of function pointers? */ + /* XXX should be 'funcs', not 'funcs[1]' but how to define an array of + function pointers in ndarrayobject.py? */ retval = _PyUFunc_FromFuncAndDataAndSignature(funcs[1], array_data, types, 2, 1, 1, PyUFunc_None, "times2", "times2_docstring", 0, "()->()"); From noreply at buildbot.pypy.org Sat Aug 16 23:08:42 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 16 Aug 2014 23:08:42 +0200 (CEST) Subject: [pypy-commit] pypy py3.3-fixes2: bz2: disallow pickling for compressor/decompresson (cpython compat) Message-ID: <20140816210842.613731C02ED@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3-fixes2 Changeset: r72831:a11bfe6cc0b1 Date: 2014-08-15 23:50 +0200 http://bitbucket.org/pypy/pypy/changeset/a11bfe6cc0b1/ Log: bz2: disallow pickling for compressor/decompresson (cpython compat) diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -268,6 +268,10 @@ BZ2_bzCompressEnd(self.bzs) lltype.free(self.bzs, flavor='raw') + def __getstate__(self): + raise OperationError(self.space.w_TypeError, + self.space.wrap("cannot serialize '_bz2.BZ2Compressor' object")) + @unwrap_spec(data='bufferstr') def compress(self, data): """compress(data) -> string @@ -333,6 +337,7 @@ W_BZ2Compressor.typedef = TypeDef("_bz2.BZ2Compressor", __doc__ = W_BZ2Compressor.__doc__, __new__ = interp2app(descr_compressor__new__), + __getstate__ = interp2app(W_BZ2Compressor.__getstate__), compress = interp2app(W_BZ2Compressor.compress), flush = interp2app(W_BZ2Compressor.flush), ) @@ -372,6 +377,10 @@ BZ2_bzDecompressEnd(self.bzs) lltype.free(self.bzs, flavor='raw') + def __getstate__(self): + raise OperationError(self.space.w_TypeError, + self.space.wrap("cannot serialize '_bz2.BZ2Decompressor' object")) + def eof_w(self, space): if self.running: return space.w_False @@ -429,6 +438,7 @@ W_BZ2Decompressor.typedef = TypeDef("_bz2.BZ2Decompressor", __doc__ = W_BZ2Decompressor.__doc__, __new__ = interp2app(descr_decompressor__new__), + __getstate__ = interp2app(W_BZ2Decompressor.__getstate__), unused_data = interp_attrproperty_bytes("unused_data", W_BZ2Decompressor), eof = GetSetProperty(W_BZ2Decompressor.eof_w), decompress = interp2app(W_BZ2Decompressor.decompress), diff --git a/pypy/module/bz2/test/test_bz2_compdecomp.py b/pypy/module/bz2/test/test_bz2_compdecomp.py --- a/pypy/module/bz2/test/test_bz2_compdecomp.py +++ b/pypy/module/bz2/test/test_bz2_compdecomp.py @@ -108,6 +108,13 @@ data += bz2c.flush() assert self.decompress(data) == self.TEXT + def test_compressor_pickle_error(self): + from bz2 import BZ2Compressor + import pickle + + exc = raises(TypeError, pickle.dumps, BZ2Compressor()) + assert exc.value.args[0] == "cannot serialize '_bz2.BZ2Compressor' object" + class AppTestBZ2Decompressor(CheckAllocation): spaceconfig = dict(usemodules=('bz2', 'rctime')) @@ -186,6 +193,13 @@ assert decompressed_data == b'' raises(IOError, bz2d.decompress, self.BUGGY_DATA) + def test_decompressor_pickle_error(self): + from bz2 import BZ2Decompressor + import pickle + + exc = raises(TypeError, pickle.dumps, BZ2Decompressor()) + assert exc.value.args[0] == "cannot serialize '_bz2.BZ2Decompressor' object" + class AppTestBZ2ModuleFunctions(CheckAllocation): spaceconfig = dict(usemodules=('bz2', 'rctime')) From noreply at buildbot.pypy.org Sat Aug 16 23:08:43 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 16 Aug 2014 23:08:43 +0200 (CEST) Subject: [pypy-commit] pypy py3.3-fixes2: prefer oefmt over OperationError Message-ID: <20140816210843.A4C771C02ED@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3-fixes2 Changeset: r72832:50ea833f79dd Date: 2014-08-16 06:57 +0200 http://bitbucket.org/pypy/pypy/changeset/50ea833f79dd/ Log: prefer oefmt over OperationError diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -2,7 +2,7 @@ from rpython.rtyper.tool import rffi_platform as platform from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.lltypesystem import lltype -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef, interp_attrproperty_bytes from pypy.interpreter.typedef import GetSetProperty @@ -269,8 +269,7 @@ lltype.free(self.bzs, flavor='raw') def __getstate__(self): - raise OperationError(self.space.w_TypeError, - self.space.wrap("cannot serialize '_bz2.BZ2Compressor' object")) + raise oefmt(self.space.w_TypeError, "cannot serialize '%T' object", self) @unwrap_spec(data='bufferstr') def compress(self, data): @@ -378,8 +377,7 @@ lltype.free(self.bzs, flavor='raw') def __getstate__(self): - raise OperationError(self.space.w_TypeError, - self.space.wrap("cannot serialize '_bz2.BZ2Decompressor' object")) + raise oefmt(self.space.w_TypeError, "cannot serialize '%T' object", self) def eof_w(self, space): if self.running: From noreply at buildbot.pypy.org Sat Aug 16 23:08:44 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 16 Aug 2014 23:08:44 +0200 (CEST) Subject: [pypy-commit] pypy py3.3-fixes2: close branch Message-ID: <20140816210844.C3B631C02ED@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3-fixes2 Changeset: r72833:e4d0e6a3c726 Date: 2014-08-16 14:05 -0700 http://bitbucket.org/pypy/pypy/changeset/e4d0e6a3c726/ Log: close branch From noreply at buildbot.pypy.org Sat Aug 16 23:08:45 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 16 Aug 2014 23:08:45 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge py3.3-fixes2 Message-ID: <20140816210845.F22761C02ED@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72834:ca985b528468 Date: 2014-08-16 14:06 -0700 http://bitbucket.org/pypy/pypy/changeset/ca985b528468/ Log: merge py3.3-fixes2 diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -2,7 +2,7 @@ from rpython.rtyper.tool import rffi_platform as platform from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.lltypesystem import lltype -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef, interp_attrproperty_bytes from pypy.interpreter.typedef import GetSetProperty @@ -268,6 +268,9 @@ BZ2_bzCompressEnd(self.bzs) lltype.free(self.bzs, flavor='raw') + def __getstate__(self): + raise oefmt(self.space.w_TypeError, "cannot serialize '%T' object", self) + @unwrap_spec(data='bufferstr') def compress(self, data): """compress(data) -> string @@ -333,6 +336,7 @@ W_BZ2Compressor.typedef = TypeDef("_bz2.BZ2Compressor", __doc__ = W_BZ2Compressor.__doc__, __new__ = interp2app(descr_compressor__new__), + __getstate__ = interp2app(W_BZ2Compressor.__getstate__), compress = interp2app(W_BZ2Compressor.compress), flush = interp2app(W_BZ2Compressor.flush), ) @@ -372,6 +376,9 @@ BZ2_bzDecompressEnd(self.bzs) lltype.free(self.bzs, flavor='raw') + def __getstate__(self): + raise oefmt(self.space.w_TypeError, "cannot serialize '%T' object", self) + def eof_w(self, space): if self.running: return space.w_False @@ -429,6 +436,7 @@ W_BZ2Decompressor.typedef = TypeDef("_bz2.BZ2Decompressor", __doc__ = W_BZ2Decompressor.__doc__, __new__ = interp2app(descr_decompressor__new__), + __getstate__ = interp2app(W_BZ2Decompressor.__getstate__), unused_data = interp_attrproperty_bytes("unused_data", W_BZ2Decompressor), eof = GetSetProperty(W_BZ2Decompressor.eof_w), decompress = interp2app(W_BZ2Decompressor.decompress), diff --git a/pypy/module/bz2/test/test_bz2_compdecomp.py b/pypy/module/bz2/test/test_bz2_compdecomp.py --- a/pypy/module/bz2/test/test_bz2_compdecomp.py +++ b/pypy/module/bz2/test/test_bz2_compdecomp.py @@ -108,6 +108,13 @@ data += bz2c.flush() assert self.decompress(data) == self.TEXT + def test_compressor_pickle_error(self): + from bz2 import BZ2Compressor + import pickle + + exc = raises(TypeError, pickle.dumps, BZ2Compressor()) + assert exc.value.args[0] == "cannot serialize '_bz2.BZ2Compressor' object" + class AppTestBZ2Decompressor(CheckAllocation): spaceconfig = dict(usemodules=('bz2', 'rctime')) @@ -186,6 +193,13 @@ assert decompressed_data == b'' raises(IOError, bz2d.decompress, self.BUGGY_DATA) + def test_decompressor_pickle_error(self): + from bz2 import BZ2Decompressor + import pickle + + exc = raises(TypeError, pickle.dumps, BZ2Decompressor()) + assert exc.value.args[0] == "cannot serialize '_bz2.BZ2Decompressor' object" + class AppTestBZ2ModuleFunctions(CheckAllocation): spaceconfig = dict(usemodules=('bz2', 'rctime')) From noreply at buildbot.pypy.org Sun Aug 17 03:07:12 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 17 Aug 2014 03:07:12 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: pep8 whitespace Message-ID: <20140817010712.2E5E51C3436@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72835:62b5d70f4e91 Date: 2014-08-16 17:30 -0700 http://bitbucket.org/pypy/pypy/changeset/62b5d70f4e91/ Log: pep8 whitespace diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -61,7 +61,7 @@ else: varargname = None if code.co_flags & CO_VARKEYWORDS: - kwargname = code.co_varnames[argcount+kwonlyargcount] + kwargname = code.co_varnames[argcount + kwonlyargcount] argcount += 1 else: kwargname = None From noreply at buildbot.pypy.org Sun Aug 17 03:07:13 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 17 Aug 2014 03:07:13 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix handling of None values in kw_defaults, which are valid. found by py3.3's Message-ID: <20140817010713.74E191C3436@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72836:7da16b3cbdc2 Date: 2014-08-16 18:04 -0700 http://bitbucket.org/pypy/pypy/changeset/7da16b3cbdc2/ Log: fix handling of None values in kw_defaults, which are valid. found by py3.3's tests diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -2416,7 +2416,8 @@ self.kw_defaults = None if self.kw_defaults is not None: for node in self.kw_defaults: - node.sync_app_attrs(space) + if node: + node.sync_app_attrs(space) class arg(AST): diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -1034,6 +1034,13 @@ exec('# -*- coding: utf-8 -*-\n\nu = "\xf0\x9f\x92\x8b"', d) assert len(d['u']) == 4 + def test_kw_defaults_None(self): + import _ast + source = "def foo(self, *args, name): pass" + ast = compile(source, '', 'exec', _ast.PyCF_ONLY_AST) + # compiling the produced AST previously triggered a crash + compile(ast, '', 'exec') + class TestOptimizations: def count_instructions(self, source): From noreply at buildbot.pypy.org Sun Aug 17 03:07:14 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 17 Aug 2014 03:07:14 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge py3k Message-ID: <20140817010714.D94BB1C3436@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72837:3d71999bc05e Date: 2014-08-16 18:04 -0700 http://bitbucket.org/pypy/pypy/changeset/3d71999bc05e/ Log: merge py3k diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -2443,7 +2443,8 @@ self.kw_defaults = None if self.kw_defaults is not None: for node in self.kw_defaults: - node.sync_app_attrs(space) + if node: + node.sync_app_attrs(space) class arg(AST): diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -1052,6 +1052,13 @@ exec('# -*- coding: utf-8 -*-\n\nu = "\xf0\x9f\x92\x8b"', d) assert len(d['u']) == 4 + def test_kw_defaults_None(self): + import _ast + source = "def foo(self, *args, name): pass" + ast = compile(source, '', 'exec', _ast.PyCF_ONLY_AST) + # compiling the produced AST previously triggered a crash + compile(ast, '', 'exec') + class TestOptimizations: def count_instructions(self, source): From noreply at buildbot.pypy.org Sun Aug 17 03:07:16 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 17 Aug 2014 03:07:16 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: fix handling of BoolOp.values when it's None Message-ID: <20140817010716.29FDD1C3436@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72838:f0974e24fc50 Date: 2014-08-16 18:04 -0700 http://bitbucket.org/pypy/pypy/changeset/f0974e24fc50/ Log: fix handling of BoolOp.values when it's None diff --git a/pypy/interpreter/astcompiler/test/test_validate.py b/pypy/interpreter/astcompiler/test/test_validate.py --- a/pypy/interpreter/astcompiler/test/test_validate.py +++ b/pypy/interpreter/astcompiler/test/test_validate.py @@ -237,6 +237,8 @@ def test_boolop(self): b = ast.BoolOp(ast.And, [], 0, 0) self.expr(b, "less than 2 values") + b = ast.BoolOp(ast.And, None, 0, 0) + self.expr(b, "less than 2 values") b = ast.BoolOp(ast.And, [ast.Num(self.space.wrap(3), 0, 0)], 0, 0) self.expr(b, "less than 2 values") b = ast.BoolOp(ast.And, [ast.Num(self.space.wrap(4), 0, 0), None], 0, 0) diff --git a/pypy/interpreter/astcompiler/validate.py b/pypy/interpreter/astcompiler/validate.py --- a/pypy/interpreter/astcompiler/validate.py +++ b/pypy/interpreter/astcompiler/validate.py @@ -295,7 +295,7 @@ pass def visit_BoolOp(self, node): - if len(node.values) < 2: + if self._len(node.values) < 2: raise ValidationError("BoolOp with less than 2 values") self._validate_exprs(node.values) From noreply at buildbot.pypy.org Sun Aug 17 10:26:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 17 Aug 2014 10:26:14 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Kill the stm's jitdriver transformation Message-ID: <20140817082614.EFBD61C332E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72839:987cc6fc0366 Date: 2014-08-16 17:05 +0200 http://bitbucket.org/pypy/pypy/changeset/987cc6fc0366/ Log: Kill the stm's jitdriver transformation diff --git a/rpython/translator/stm/jitdriver.py b/rpython/translator/stm/jitdriver.py deleted file mode 100644 --- a/rpython/translator/stm/jitdriver.py +++ /dev/null @@ -1,269 +0,0 @@ -from rpython.rtyper.lltypesystem import lltype, rclass -from rpython.flowspace.model import checkgraph, copygraph -from rpython.flowspace.model import Block, Link, SpaceOperation, Constant -from rpython.translator.unsimplify import split_block, varoftype -from rpython.annotator.model import s_Int -from rpython.rtyper.llannotation import lltype_to_annotation -from rpython.rtyper.annlowlevel import (MixLevelHelperAnnotator, - cast_base_ptr_to_instance) -from rpython.rlib import rstm -from rpython.tool.sourcetools import compile2 -from rpython.translator.c.support import log - -def find_jit_merge_point(graph, relaxed=False): - found = [] - for block in graph.iterblocks(): - for i in range(len(block.operations)): - op = block.operations[i] - if (op.opname == 'jit_marker' - and op.args[0].value == 'jit_merge_point'): - jitdriver = op.args[1].value - if not jitdriver.autoreds: - if jitdriver.stm_do_transaction_breaks: - found.append((block, i)) - else: - log.WARNING("ignoring non-stm jitdriver in %r" % ( - graph,)) - else: - log.WARNING("ignoring jitdriver with autoreds in %r" % ( - graph,)) # XXX XXX! - - assert len(found) <= 1, "several jit_merge_point's in %r" % (graph,) - if found: - return found[0] - else: - return None - -def reorganize_around_jit_driver(stmtransformer, graph): - location = find_jit_merge_point(graph) - if location is not None: - JitDriverSplitter(stmtransformer, graph).split(location) - -# ____________________________________________________________ - - -class JitDriverSplitter(object): - # - # def graph(..): | def graph(..): - # stuff_before | stuff_before - # while 1: ====> while 1: - # jit_merge_point() | if should_break_transaction(): - # stuff_after | return invoke_stm(..) - # | stuff_after - # ----------------------------+ - # - # def invoke_stm(..): - # p = new container object - # store (green args, red args) into p - # perform_transaction(callback, p) - # if p.got_exception: raise p.got_exception - # return p.result_value - # - # (note that perform_transaction() itself will fill p.got_exception) - # - # def callback(p, retry_counter): - # fish (green args, red args) from p - # while 1: - # stuff_after - # if should_break_transaction(): - # store (green args, red args) into p - # return 1 # causes perform_tr() to loop and call us again - # p.result_value = result_value - # return 0 # stop perform_tr() and returns - - def __init__(self, stmtransformer, graph): - self.stmtransformer = stmtransformer - self.main_graph = graph - self.RESTYPE = graph.getreturnvar().concretetype - - def split(self, portal_location): - self.check_jitdriver(portal_location) - self.split_after_jit_merge_point(portal_location) - self.make_container_type() - # - rtyper = self.stmtransformer.translator.rtyper - self.mixlevelannotator = MixLevelHelperAnnotator(rtyper) - self.make_callback_function() - self.make_invoke_stm_function() - self.rewrite_main_graph() - self.mixlevelannotator.finish() - - def check_jitdriver(self, (portalblock, portalopindex)): - op_jitmarker = portalblock.operations[portalopindex] - assert op_jitmarker.opname == 'jit_marker' - assert op_jitmarker.args[0].value == 'jit_merge_point' - jitdriver = op_jitmarker.args[1].value - assert not jitdriver.autoreds # fix me - - def split_after_jit_merge_point(self, (portalblock, portalopindex)): - link = split_block(None, portalblock, portalopindex + 1) - self.TYPES = [v.concretetype for v in link.args] - - def make_container_type(self): - args = [('a%d' % i, self.TYPES[i]) for i in range(len(self.TYPES))] - self.CONTAINER = lltype.GcStruct('StmArgs', - ('result_value', self.RESTYPE), - ('got_exception', rclass.OBJECTPTR), - *args) - self.CONTAINERP = lltype.Ptr(self.CONTAINER) - - def add_call_should_break_transaction(self, block): - # add a should_break_transaction() call at the end of the block, - # turn the following link into an "if False" link, add a new - # "if True" link going to a fresh new block, and return this new - # block. - v2 = varoftype(lltype.Bool) - block.operations.append( - SpaceOperation('stm_should_break_transaction', [], v2)) - # - assert block.exitswitch is None - [link] = block.exits - block.exitswitch = v2 - link.exitcase = False - link.llexitcase = False - newblock = Block([varoftype(v.concretetype) for v in link.args]) - otherlink = Link(link.args[:], newblock) - otherlink.exitcase = True - otherlink.llexitcase = True - block.recloseblock(link, otherlink) - return newblock - - def rewrite_main_graph(self): - # add 'should_break_transaction()' - main_graph = self.main_graph - block1, i = find_jit_merge_point(main_graph, relaxed=True) - assert i == len(block1.operations) - 1 - del block1.operations[i] - blockf = self.add_call_should_break_transaction(block1) - # - # fill in blockf with a call to invoke_stm() - v = varoftype(self.RESTYPE, 'result') - op = SpaceOperation('direct_call', - [self.c_invoke_stm_func] + blockf.inputargs, v) - blockf.operations.append(op) - blockf.closeblock(Link([v], main_graph.returnblock)) - # - checkgraph(main_graph) - - def make_invoke_stm_function(self): - CONTAINER = self.CONTAINER - callback = self.callback_function - perform_transaction = rstm.make_perform_transaction(callback, - self.CONTAINERP) - irange = range(len(self.TYPES)) - source = """if 1: - def ll_invoke_stm(%s): - p = lltype.malloc(CONTAINER) - %s - perform_transaction(p) - if p.got_exception: - raise cast_base_ptr_to_instance(Exception, p.got_exception) - return p.result_value -""" % (', '.join(['a%d' % i for i in irange]), - '; '.join(['p.a%d = a%d' % (i, i) for i in irange])) - d = {'CONTAINER': CONTAINER, - 'lltype': lltype, - 'perform_transaction': perform_transaction, - 'cast_base_ptr_to_instance': cast_base_ptr_to_instance, - } - exec compile2(source) in d - ll_invoke_stm = d['ll_invoke_stm'] - # - mix = self.mixlevelannotator - c_func = mix.constfunc(ll_invoke_stm, - map(lltype_to_annotation, self.TYPES), - lltype_to_annotation(self.RESTYPE)) - self.c_invoke_stm_func = c_func - - def container_var(self): - return varoftype(self.CONTAINERP, 'stmargs') - - def make_callback_function(self): - # make a copy of the 'main_graph' - callback_graph = copygraph(self.main_graph) - callback_graph.name += '_stm' - self.callback_graph = callback_graph - self.stmtransformer.translator.graphs.append(callback_graph) - #for v1, v2 in zip( - # self.main_graph.getargs() + [self.main_graph.getreturnvar()], - # callback_graph.getargs() + [callback_graph.getreturnvar()]): - # self.stmtransformer.translator.annotator.transfer_binding(v2, v1) - # - # make a new startblock - v_p = self.container_var() - v_retry_counter = varoftype(lltype.Signed, 'retry_counter') - blockst = Block([v_retry_counter]) # 'v_p' inserted below - renamed_p = {blockst: v_p} - annotator = self.stmtransformer.translator.annotator - annotator.setbinding(v_p, lltype_to_annotation(self.CONTAINERP)) - annotator.setbinding(v_retry_counter, s_Int) - # - # change the startblock of callback_graph to point just after the - # jit_merge_point - block1, i = find_jit_merge_point(callback_graph, relaxed=True) - assert i == len(block1.operations) - 1 - del block1.operations[i] - [link] = block1.exits - callback_graph.startblock = blockst - # - # fill in the operations of blockst: getfields reading all live vars - a_vars = [] - for i in range(len(self.TYPES)): - c_a_i = Constant('a%d' % i, lltype.Void) - v_a_i = varoftype(self.TYPES[i]) - blockst.operations.append( - SpaceOperation('getfield', [v_p, c_a_i], v_a_i)) - a_vars.append(v_a_i) - blockst.closeblock(Link(a_vars, link.target)) - # - # hack at the regular return block, to set the result into - # 'p.result_value' and return 0. Note that 'p.got_exception' - # is already cleared. - blockr = callback_graph.returnblock - c_result_value = Constant('result_value', lltype.Void) - v_p = self.container_var() - renamed_p[blockr] = v_p - blockr.operations = [ - SpaceOperation('setfield', - [v_p, c_result_value, blockr.inputargs[0]], - varoftype(lltype.Void)), - ] - v = varoftype(lltype.Signed) - annotator.setbinding(v, s_Int) - newblockr = Block([v]) - newblockr.operations = () - newblockr.closeblock() - blockr.recloseblock(Link([Constant(0, lltype.Signed)], newblockr)) - callback_graph.returnblock = newblockr - # - # add 'should_break_transaction()' at the end of the loop - blockf = self.add_call_should_break_transaction(block1) - # store the variables again into v_p - v_p = self.container_var() - renamed_p[blockf] = v_p - for i in range(len(self.TYPES)): - c_a_i = Constant('a%d' % i, lltype.Void) - v_a_i = blockf.inputargs[i] - assert v_a_i.concretetype == self.TYPES[i] - blockf.operations.append( - SpaceOperation('setfield', [v_p, c_a_i, v_a_i], - varoftype(lltype.Void))) - blockf.closeblock(Link([Constant(1, lltype.Signed)], newblockr)) - # - # now pass the original 'v_p' everywhere - for block in callback_graph.iterblocks(): - if block.operations == (): # skip return and except blocks - continue - v_p = renamed_p.get(block, self.container_var()) - block.inputargs = [v_p] + block.inputargs - for link in block.exits: - if link.target.operations != (): # to return or except block - link.args = [v_p] + link.args - # - checkgraph(callback_graph) - # - FUNCTYPE = lltype.FuncType([self.CONTAINERP, lltype.Signed], - lltype.Signed) - mix = self.mixlevelannotator - self.callback_function = mix.graph2delayed(callback_graph, - FUNCTYPE=FUNCTYPE) diff --git a/rpython/translator/stm/test/test_jitdriver.py b/rpython/translator/stm/test/test_jitdriver.py deleted file mode 100644 --- a/rpython/translator/stm/test/test_jitdriver.py +++ /dev/null @@ -1,59 +0,0 @@ -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.translator.stm.test.transform_support import BaseTestTransform -from rpython.rlib.jit import JitDriver -from rpython.rlib import rstm - - -class TestJitDriver(BaseTestTransform): - do_jit_driver = True - - def test_loop_no_arg(self): - class X: - counter = 10 - x = X() - myjitdriver = JitDriver(greens=[], reds=[], - stm_do_transaction_breaks=True) - - def f1(): - while x.counter > 0: - myjitdriver.jit_merge_point() - if rstm.jit_stm_should_break_transaction(False): - rstm.jit_stm_transaction_break_point() - x.counter -= 1 - return 'X' - - res = self.interpret(f1, []) - assert res == 'X' - - def test_loop_args(self): - class X: - counter = 100 - x = X() - myjitdriver = JitDriver(greens=['a'], reds=['b', 'c']) - - def f1(a, b, c): - while x.counter > 0: - myjitdriver.jit_merge_point(a=a, b=b, c=c) - x.counter -= (ord(a) + rffi.cast(lltype.Signed, b) + c) - return 'X' - - res = self.interpret(f1, ['\x03', rffi.cast(rffi.SHORT, 4), 2]) - assert res == 'X' - - def test_loop_void_result(self): - class X: - counter = 10 - x = X() - myjitdriver = JitDriver(greens=[], reds=[], - stm_do_transaction_breaks=True) - - def f1(): - while x.counter > 0: - myjitdriver.jit_merge_point() - if rstm.jit_stm_should_break_transaction(False): - rstm.jit_stm_transaction_break_point() - - x.counter -= 1 - - res = self.interpret(f1, []) - assert res == None diff --git a/rpython/translator/stm/transform.py b/rpython/translator/stm/transform.py --- a/rpython/translator/stm/transform.py +++ b/rpython/translator/stm/transform.py @@ -1,6 +1,5 @@ from rpython.translator.stm.inevitable import insert_turn_inevitable from rpython.translator.stm.readbarrier import insert_stm_read_barrier -from rpython.translator.stm.jitdriver import reorganize_around_jit_driver from rpython.translator.c.support import log @@ -12,7 +11,6 @@ def transform(self): assert not hasattr(self.translator, 'stm_transformation_applied') self.start_log(1) - self.transform_jit_driver() self.transform_turn_inevitable() self.print_logs(1) self.translator.stm_transformation_applied = True @@ -35,10 +33,6 @@ for graph in self.translator.graphs: insert_turn_inevitable(graph) - def transform_jit_driver(self): - for graph in self.translator.graphs: - reorganize_around_jit_driver(self, graph) - def start_log(self, step): log.info("Software Transactional Memory transformation, step %d" % step) From noreply at buildbot.pypy.org Sun Aug 17 10:26:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 17 Aug 2014 10:26:16 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Bah Message-ID: <20140817082616.306A91C332E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72840:7f570c76ae70 Date: 2014-08-17 10:25 +0200 http://bitbucket.org/pypy/pypy/changeset/7f570c76ae70/ Log: Bah diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -157,9 +157,8 @@ # sync with lloperation.py gct_stm_become_inevitable = _gct_with_roots_pushed - gct_stm_become_globally_unique_transaction = _gct_with_roots_pushed - gct_stm_perform_transaction = _gct_with_roots_pushed + gct_stm_transaction_break = _gct_with_roots_pushed class StmRootWalker(BaseRootWalker): From noreply at buildbot.pypy.org Sun Aug 17 16:01:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 17 Aug 2014 16:01:36 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Remove some other inevitable transactions Message-ID: <20140817140136.6AD0B1C059C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72841:9d2407315c43 Date: 2014-08-17 16:01 +0200 http://bitbucket.org/pypy/pypy/changeset/9d2407315c43/ Log: Remove some other inevitable transactions diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -125,21 +125,21 @@ def after_external_call(): if we_are_translated(): # starts a new transaction if we are not atomic already - llop.stm_start_inevitable_if_not_atomic(lltype.Void) + llop.stm_start_if_not_atomic(lltype.Void) after_external_call._dont_reach_me_in_del_ = True after_external_call._transaction_break_ = True @dont_look_inside -def enter_callback_call(): +def enter_callback_call(rjbuf): if we_are_translated(): - return llop.stm_enter_callback_call(lltype.Signed) + return llop.stm_enter_callback_call(lltype.Signed, rjbuf) enter_callback_call._dont_reach_me_in_del_ = True enter_callback_call._transaction_break_ = True @dont_look_inside -def leave_callback_call(token): +def leave_callback_call(rjbuf, token): if we_are_translated(): - llop.stm_leave_callback_call(lltype.Void, token) + llop.stm_leave_callback_call(lltype.Void, rjbuf, token) leave_callback_call._dont_reach_me_in_del_ = True leave_callback_call._transaction_break_ = True diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -430,7 +430,7 @@ 'stm_push_root': LLOp(), 'stm_pop_root_into': LLOp(), 'stm_commit_if_not_atomic': LLOp(canmallocgc=True), - 'stm_start_inevitable_if_not_atomic': LLOp(canmallocgc=True), + 'stm_start_if_not_atomic': LLOp(canmallocgc=True), 'stm_abort_and_retry': LLOp(canmallocgc=True), 'stm_enter_callback_call': LLOp(canmallocgc=True), 'stm_leave_callback_call': LLOp(), diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -315,10 +315,10 @@ source = py.code.Source(r""" def wrapper(%(args)s): # no *args - no GIL for mallocing the tuple token = 0 + rjbuf = llop.stm_rewind_jmp_frame(llmemory.Address, 1) if aroundstate is not None: if aroundstate.enter_callback is not None: - token = aroundstate.enter_callback() - llop.stm_rewind_jmp_frame(lltype.Void, 1) + token = aroundstate.enter_callback(rjbuf) else: after = aroundstate.after if after is not None: @@ -339,8 +339,7 @@ stackcounter.stacks_counter -= 1 if aroundstate is not None: if aroundstate.leave_callback is not None: - llop.stm_rewind_jmp_frame(lltype.Void, 2) - aroundstate.leave_callback(token) + aroundstate.leave_callback(rjbuf, token) else: before = aroundstate.before if before is not None: @@ -355,13 +354,16 @@ miniglobals['os'] = os miniglobals['we_are_translated'] = we_are_translated miniglobals['stackcounter'] = stackcounter + miniglobals['llmemory'] = llmemory exec source.compile() in miniglobals return miniglobals['wrapper'] _make_wrapper_for._annspecialcase_ = 'specialize:memo' AroundFnPtr = lltype.Ptr(lltype.FuncType([], lltype.Void)) -EnterCallbackFnPtr = lltype.Ptr(lltype.FuncType([], lltype.Signed)) -LeaveCallbackFnPtr = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void)) +EnterCallbackFnPtr = lltype.Ptr(lltype.FuncType([llmemory.Address], + lltype.Signed)) +LeaveCallbackFnPtr = lltype.Ptr(lltype.FuncType([llmemory.Address, + lltype.Signed], lltype.Void)) class AroundState: _alloc_flavor_ = "raw" diff --git a/rpython/translator/stm/breakfinder.py b/rpython/translator/stm/breakfinder.py --- a/rpython/translator/stm/breakfinder.py +++ b/rpython/translator/stm/breakfinder.py @@ -4,8 +4,7 @@ TRANSACTION_BREAK = set([ 'stm_commit_if_not_atomic', - 'stm_start_inevitable_if_not_atomic', - #'stm_perform_transaction', + 'stm_start_if_not_atomic', #'stm_partial_commit_and_resume_other_threads', # new priv_revision #'jit_assembler_call', #'jit_stm_transaction_break_point', diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -156,16 +156,18 @@ def stm_commit_if_not_atomic(funcgen, op): return 'pypy_stm_commit_if_not_atomic();' -def stm_start_inevitable_if_not_atomic(funcgen, op): - return 'pypy_stm_start_inevitable_if_not_atomic();' +def stm_start_if_not_atomic(funcgen, op): + return 'pypy_stm_start_if_not_atomic();' def stm_enter_callback_call(funcgen, op): + arg0 = funcgen.expr(op.args[0]) result = funcgen.expr(op.result) - return '%s = pypy_stm_enter_callback_call();' % (result,) + return '%s = pypy_stm_enter_callback_call(%s);' % (result, arg0) def stm_leave_callback_call(funcgen, op): arg0 = funcgen.expr(op.args[0]) - return 'pypy_stm_leave_callback_call(%s);' % (arg0,) + arg1 = funcgen.expr(op.args[1]) + return 'pypy_stm_leave_callback_call(%s, %s);' % (arg0, arg1) def stm_should_break_transaction(funcgen, op): result = funcgen.expr(op.result) @@ -259,8 +261,10 @@ def stm_rewind_jmp_frame(funcgen, op): if len(op.args) == 0: + assert op.result.concretetype is lltype.Void return '/* automatic stm_rewind_jmp_frame */' elif op.args[0].value == 1: - return 'stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf1);' + assert op.result.concretetype is llmemory.Address + return '%s = &rjbuf1;' % (funcgen.expr(op.result),) else: - return 'stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf1);' + assert False, op.args[0].value diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -89,41 +89,45 @@ /* stm_teardown() not called here for now; it's mostly for tests */ } -long pypy_stm_enter_callback_call(void) +long pypy_stm_enter_callback_call(void *rjbuf) { if (pypy_stm_ready_atomic == 0) { /* first time we see this thread */ assert(pypy_transaction_length >= 0); int e = errno; pypy_stm_register_thread_local(); + stm_rewind_jmp_enterframe(&stm_thread_local, (rewind_jmp_buf *)rjbuf); errno = e; pypy_stm_ready_atomic = 1; - pypy_stm_start_inevitable_if_not_atomic(); + pypy_stm_start_if_not_atomic(); return 1; } else { /* callback from C code, itself called from Python code */ - pypy_stm_start_inevitable_if_not_atomic(); + stm_rewind_jmp_enterframe(&stm_thread_local, (rewind_jmp_buf *)rjbuf); + pypy_stm_start_if_not_atomic(); return 0; } } -void pypy_stm_leave_callback_call(long token) +void pypy_stm_leave_callback_call(void *rjbuf, long token) { + int e = errno; if (token == 1) { /* if we're returning into foreign C code that was not itself called from Python code, then we're ignoring the atomic status and committing anyway. */ - int e = errno; pypy_stm_ready_atomic = 1; stm_commit_transaction(); pypy_stm_ready_atomic = 0; + stm_rewind_jmp_leaveframe(&stm_thread_local, (rewind_jmp_buf *)rjbuf); pypy_stm_unregister_thread_local(); - errno = e; } else { pypy_stm_commit_if_not_atomic(); + stm_rewind_jmp_leaveframe(&stm_thread_local, (rewind_jmp_buf *)rjbuf); } + errno = e; } void _pypy_stm_initialize_nursery_low_fill_mark(long v_counter) diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -53,6 +53,15 @@ } errno = e; } +static inline void pypy_stm_start_if_not_atomic(void) { + if (pypy_stm_ready_atomic == 1) { + int e = errno; + stm_start_transaction(&stm_thread_local); + _pypy_stm_initialize_nursery_low_fill_mark(0); + _pypy_stm_inev_state(); + errno = e; + } +} static inline void pypy_stm_start_inevitable_if_not_atomic(void) { if (pypy_stm_ready_atomic == 1) { int e = errno; @@ -89,8 +98,8 @@ static inline long pypy_stm_get_atomic(void) { return pypy_stm_ready_atomic - 1; } -long pypy_stm_enter_callback_call(void); -void pypy_stm_leave_callback_call(long); +long pypy_stm_enter_callback_call(void *); +void pypy_stm_leave_callback_call(void *, long); void pypy_stm_set_transaction_length(double); void pypy_stm_transaction_break(void); diff --git a/rpython/translator/stm/test/test_ztranslated.py b/rpython/translator/stm/test/test_ztranslated.py --- a/rpython/translator/stm/test/test_ztranslated.py +++ b/rpython/translator/stm/test/test_ztranslated.py @@ -73,7 +73,7 @@ rthread.start_new_thread(threadfn, ()) while glob.seen is None: llop.stm_commit_if_not_atomic(lltype.Void) - llop.stm_start_inevitable_if_not_atomic(lltype.Void) + llop.stm_start_if_not_atomic(lltype.Void) return glob.seen.value # t, cbuilder = self.compile(entry_point) @@ -470,7 +470,7 @@ lst[42] = 43 lst2[999] = lst llop.stm_commit_if_not_atomic(lltype.Void) - llop.stm_start_inevitable_if_not_atomic(lltype.Void) + llop.stm_start_if_not_atomic(lltype.Void) print 'did not crash', lst2[999][42] return 0 From noreply at buildbot.pypy.org Sun Aug 17 19:35:28 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sun, 17 Aug 2014 19:35:28 +0200 (CEST) Subject: [pypy-commit] pypy py3.3-fixes2: merge py3.3 Message-ID: <20140817173528.69E911C347F@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3-fixes2 Changeset: r72842:bb19e3e737b0 Date: 2014-08-17 15:00 +0200 http://bitbucket.org/pypy/pypy/changeset/bb19e3e737b0/ Log: merge py3.3 diff too long, truncating to 2000 out of 11112 lines diff --git a/_pytest/__init__.py b/_pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.4.dev2' +__version__ = '2.5.2' diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py new file mode 100644 --- /dev/null +++ b/_pytest/_argcomplete.py @@ -0,0 +1,104 @@ + +"""allow bash-completion for argparse with argcomplete if installed +needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code. + +argcomplete does not support python 2.5 (although the changes for that +are minor). + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*' + ).completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh ) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK + +INSTALL/DEBUGGING +================= +To include this support in another application that has setup.py generated +scripts: +- add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point +- include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + , call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument() +If things do not work right away: +- switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 +- run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not +- sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" + +import sys +import os +from glob import glob + +class FastFilesCompleter: + 'Fast file completer class' + def __init__(self, directories=True): + self.directories = directories + + def __call__(self, prefix, **kwargs): + """only called on non option completions""" + if os.path.sep in prefix[1:]: # + prefix_dir = len(os.path.dirname(prefix) + os.path.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if '*' not in prefix and '?' not in prefix: + if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash + globbed.extend(glob(prefix + '.*')) + prefix += '*' + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += '/' + # append stripping the prefix (like bash, not like compgen) + completion.append(x[prefix_dir:]) + return completion + +if os.environ.get('_ARGCOMPLETE'): + # argcomplete 0.5.6 is not compatible with python 2.5.6: print/with/format + if sys.version_info[:2] < (2, 6): + sys.exit(1) + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter = FastFilesCompleter() + + def try_argcomplete(parser): + argcomplete.autocomplete(parser) +else: + def try_argcomplete(parser): pass + filescompleter = None diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py --- a/_pytest/assertion/__init__.py +++ b/_pytest/assertion/__init__.py @@ -3,7 +3,6 @@ """ import py import sys -import pytest from _pytest.monkeypatch import monkeypatch from _pytest.assertion import util @@ -19,8 +18,8 @@ to provide assert expression information. """) group.addoption('--no-assert', action="store_true", default=False, dest="noassert", help="DEPRECATED equivalent to --assert=plain") - group.addoption('--nomagic', action="store_true", default=False, - dest="nomagic", help="DEPRECATED equivalent to --assert=plain") + group.addoption('--nomagic', '--no-magic', action="store_true", + default=False, help="DEPRECATED equivalent to --assert=plain") class AssertionState: """State for the assertion plugin.""" @@ -35,22 +34,25 @@ mode = "plain" if mode == "rewrite": try: - import ast + import ast # noqa except ImportError: mode = "reinterp" else: - if sys.platform.startswith('java'): + # Both Jython and CPython 2.6.0 have AST bugs that make the + # assertion rewriting hook malfunction. + if (sys.platform.startswith('java') or + sys.version_info[:3] == (2, 6, 0)): mode = "reinterp" if mode != "plain": _load_modules(mode) m = monkeypatch() config._cleanup.append(m.undo) m.setattr(py.builtin.builtins, 'AssertionError', - reinterpret.AssertionError) + reinterpret.AssertionError) # noqa hook = None if mode == "rewrite": - hook = rewrite.AssertionRewritingHook() - sys.meta_path.append(hook) + hook = rewrite.AssertionRewritingHook() # noqa + sys.meta_path.insert(0, hook) warn_about_missing_assertion(mode) config._assertstate = AssertionState(config, mode) config._assertstate.hook = hook @@ -73,9 +75,16 @@ def callbinrepr(op, left, right): hook_result = item.ihook.pytest_assertrepr_compare( config=item.config, op=op, left=left, right=right) + for new_expl in hook_result: if new_expl: - res = '\n~'.join(new_expl) + # Don't include pageloads of data unless we are very + # verbose (-vv) + if (sum(len(p) for p in new_expl[1:]) > 80*8 + and item.config.option.verbose < 2): + new_expl[1:] = [py.builtin._totext( + 'Detailed information truncated, use "-vv" to show')] + res = py.builtin._totext('\n~').join(new_expl) if item.config.getvalue("assertmode") == "rewrite": # The result will be fed back a python % formatting # operation, which will fail if there are extraneous @@ -95,9 +104,9 @@ def _load_modules(mode): """Lazily import assertion related code.""" global rewrite, reinterpret - from _pytest.assertion import reinterpret + from _pytest.assertion import reinterpret # noqa if mode == "rewrite": - from _pytest.assertion import rewrite + from _pytest.assertion import rewrite # noqa def warn_about_missing_assertion(mode): try: diff --git a/_pytest/assertion/newinterpret.py b/_pytest/assertion/newinterpret.py --- a/_pytest/assertion/newinterpret.py +++ b/_pytest/assertion/newinterpret.py @@ -11,7 +11,7 @@ from _pytest.assertion.reinterpret import BuiltinAssertionError -if sys.platform.startswith("java") and sys.version_info < (2, 5, 2): +if sys.platform.startswith("java"): # See http://bugs.jython.org/issue1497 _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict", "ListComp", "GeneratorExp", "Yield", "Compare", "Call", diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py --- a/_pytest/assertion/oldinterpret.py +++ b/_pytest/assertion/oldinterpret.py @@ -526,10 +526,13 @@ # example: def f(): return 5 + def g(): return 3 + def h(x): return 'never' + check("f() * g() == 5") check("not f()") check("not (f() and g() or 0)") diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py --- a/_pytest/assertion/reinterpret.py +++ b/_pytest/assertion/reinterpret.py @@ -1,18 +1,26 @@ import sys import py from _pytest.assertion.util import BuiltinAssertionError +u = py.builtin._totext + class AssertionError(BuiltinAssertionError): def __init__(self, *args): BuiltinAssertionError.__init__(self, *args) if args: + # on Python2.6 we get len(args)==2 for: assert 0, (x,y) + # on Python2.7 and above we always get len(args) == 1 + # with args[0] being the (x,y) tuple. + if len(args) > 1: + toprint = args + else: + toprint = args[0] try: - self.msg = str(args[0]) - except py.builtin._sysex: - raise - except: - self.msg = "<[broken __repr__] %s at %0xd>" %( - args[0].__class__, id(args[0])) + self.msg = u(toprint) + except Exception: + self.msg = u( + "<[broken __repr__] %s at %0xd>" + % (toprint.__class__, id(toprint))) else: f = py.code.Frame(sys._getframe(1)) try: @@ -44,4 +52,3 @@ from _pytest.assertion.newinterpret import interpret as reinterpret else: reinterpret = reinterpret_old - diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py --- a/_pytest/assertion/rewrite.py +++ b/_pytest/assertion/rewrite.py @@ -6,6 +6,7 @@ import imp import marshal import os +import re import struct import sys import types @@ -14,13 +15,7 @@ from _pytest.assertion import util -# Windows gives ENOENT in places *nix gives ENOTDIR. -if sys.platform.startswith("win"): - PATH_COMPONENT_NOT_DIR = errno.ENOENT -else: - PATH_COMPONENT_NOT_DIR = errno.ENOTDIR - -# py.test caches rewritten pycs in __pycache__. +# pytest caches rewritten pycs in __pycache__. if hasattr(imp, "get_tag"): PYTEST_TAG = imp.get_tag() + "-PYTEST" else: @@ -34,17 +29,19 @@ PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1]) del ver, impl -PYC_EXT = ".py" + "c" if __debug__ else "o" +PYC_EXT = ".py" + (__debug__ and "c" or "o") PYC_TAIL = "." + PYTEST_TAG + PYC_EXT REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2) +ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 class AssertionRewritingHook(object): - """Import hook which rewrites asserts.""" + """PEP302 Import hook which rewrites asserts.""" def __init__(self): self.session = None self.modules = {} + self._register_with_pkg_resources() def set_session(self, session): self.fnpats = session.config.getini("python_files") @@ -59,8 +56,12 @@ names = name.rsplit(".", 1) lastname = names[-1] pth = None - if path is not None and len(path) == 1: - pth = path[0] + if path is not None: + # Starting with Python 3.3, path is a _NamespacePath(), which + # causes problems if not converted to list. + path = list(path) + if len(path) == 1: + pth = path[0] if pth is None: try: fd, fn, desc = imp.find_module(lastname, path) @@ -95,12 +96,13 @@ finally: self.session = sess else: - state.trace("matched test file (was specified on cmdline): %r" % (fn,)) + state.trace("matched test file (was specified on cmdline): %r" % + (fn,)) # The requested module looks like a test file, so rewrite it. This is # the most magical part of the process: load the source, rewrite the # asserts, and load the rewritten source. We also cache the rewritten # module code in a special pyc. We must be aware of the possibility of - # concurrent py.test processes rewriting and loading pycs. To avoid + # concurrent pytest processes rewriting and loading pycs. To avoid # tricky race conditions, we maintain the following invariant: The # cached pyc is always a complete, valid pyc. Operations on it must be # atomic. POSIX's atomic rename comes in handy. @@ -116,19 +118,19 @@ # common case) or it's blocked by a non-dir node. In the # latter case, we'll ignore it in _write_pyc. pass - elif e == PATH_COMPONENT_NOT_DIR: + elif e in [errno.ENOENT, errno.ENOTDIR]: # One of the path components was not a directory, likely # because we're in a zip file. write = False elif e == errno.EACCES: - state.trace("read only directory: %r" % (fn_pypath.dirname,)) + state.trace("read only directory: %r" % fn_pypath.dirname) write = False else: raise cache_name = fn_pypath.basename[:-3] + PYC_TAIL pyc = os.path.join(cache_dir, cache_name) - # Notice that even if we're in a read-only directory, I'm going to check - # for a cached pyc. This may not be optimal... + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... co = _read_pyc(fn_pypath, pyc) if co is None: state.trace("rewriting %r" % (fn,)) @@ -153,27 +155,59 @@ mod.__file__ = co.co_filename # Normally, this attribute is 3.2+. mod.__cached__ = pyc + mod.__loader__ = self py.builtin.exec_(co, mod.__dict__) except: del sys.modules[name] raise return sys.modules[name] -def _write_pyc(co, source_path, pyc): - # Technically, we don't have to have the same pyc format as (C)Python, since - # these "pycs" should never be seen by builtin import. However, there's - # little reason deviate, and I hope sometime to be able to use - # imp.load_compiled to load them. (See the comment in load_module above.) + + + def is_package(self, name): + try: + fd, fn, desc = imp.find_module(name) + except ImportError: + return False + if fd is not None: + fd.close() + tp = desc[2] + return tp == imp.PKG_DIRECTORY + + @classmethod + def _register_with_pkg_resources(cls): + """ + Ensure package resources can be loaded from this loader. May be called + multiple times, as the operation is idempotent. + """ + try: + import pkg_resources + # access an attribute in case a deferred importer is present + pkg_resources.__name__ + except ImportError: + return + + # Since pytest tests are always located in the file system, the + # DefaultProvider is appropriate. + pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider) + + +def _write_pyc(state, co, source_path, pyc): + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason deviate, and I hope + # sometime to be able to use imp.load_compiled to load them. (See + # the comment in load_module above.) mtime = int(source_path.mtime()) try: fp = open(pyc, "wb") except IOError: err = sys.exc_info()[1].errno - if err == PATH_COMPONENT_NOT_DIR: - # This happens when we get a EEXIST in find_module creating the - # __pycache__ directory and __pycache__ is by some non-dir node. - return False - raise + state.trace("error writing pyc file at %s: errno=%s" %(pyc, err)) + # we ignore any failure to write the cache file + # there are many reasons, permission-denied, __pycache__ being a + # file etc. + return False try: fp.write(imp.get_magic()) fp.write(struct.pack(">", - ast.Add : "+", - ast.Sub : "-", - ast.Mult : "*", - ast.Div : "/", - ast.FloorDiv : "//", - ast.Mod : "%", - ast.Eq : "==", - ast.NotEq : "!=", - ast.Lt : "<", - ast.LtE : "<=", - ast.Gt : ">", - ast.GtE : ">=", - ast.Pow : "**", - ast.Is : "is", - ast.IsNot : "is not", - ast.In : "in", - ast.NotIn : "not in" + ast.BitOr: "|", + ast.BitXor: "^", + ast.BitAnd: "&", + ast.LShift: "<<", + ast.RShift: ">>", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in" } @@ -341,7 +408,7 @@ lineno = 0 for item in mod.body: if (expect_docstring and isinstance(item, ast.Expr) and - isinstance(item.value, ast.Str)): + isinstance(item.value, ast.Str)): doc = item.value.s if "PYTEST_DONT_REWRITE" in doc: # The module has disabled assertion rewriting. @@ -462,7 +529,8 @@ body.append(raise_) # Clear temporary variables by setting them to None. if self.variables: - variables = [ast.Name(name, ast.Store()) for name in self.variables] + variables = [ast.Name(name, ast.Store()) + for name in self.variables] clear = ast.Assign(variables, ast.Name("None", ast.Load())) self.statements.append(clear) # Fix line numbers. @@ -471,11 +539,12 @@ return self.statements def visit_Name(self, name): - # Check if the name is local or not. + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. locs = ast.Call(self.builtin("locals"), [], [], None, None) - globs = ast.Call(self.builtin("globals"), [], [], None, None) - ops = [ast.In(), ast.IsNot()] - test = ast.Compare(ast.Str(name.id), ops, [locs, globs]) + inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) + dorepr = self.helper("should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) return name, self.explanation_param(expr) @@ -492,7 +561,8 @@ for i, v in enumerate(boolop.values): if i: fail_inner = [] - self.on_failure.append(ast.If(cond, fail_inner, [])) + # cond is set in a prior loop iteration below + self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa self.on_failure = fail_inner self.push_format_context() res, expl = self.visit(v) @@ -548,7 +618,8 @@ new_kwarg, expl = self.visit(call.kwargs) arg_expls.append("**" + expl) expl = "%s(%s)" % (func_expl, ', '.join(arg_expls)) - new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg) + new_call = ast.Call(new_func, new_args, new_kwargs, + new_star, new_kwarg) res = self.assign(new_call) res_expl = self.explanation_param(self.display(res)) outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) @@ -584,7 +655,7 @@ res_expr = ast.Compare(left_res, [op], [next_res]) self.statements.append(ast.Assign([store_names[i]], res_expr)) left_res, left_expl = next_res, next_expl - # Use py.code._reprcompare if that's available. + # Use pytest.assertion.util._reprcompare if that's available. expl_call = self.helper("call_reprcompare", ast.Tuple(syms, ast.Load()), ast.Tuple(load_names, ast.Load()), diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py --- a/_pytest/assertion/util.py +++ b/_pytest/assertion/util.py @@ -1,8 +1,13 @@ """Utilities for assertion debugging""" import py +try: + from collections import Sequence +except ImportError: + Sequence = list BuiltinAssertionError = py.builtin.builtins.AssertionError +u = py.builtin._totext # The _reprcompare attribute on the util module is used by the new assertion # interpretation code and assertion rewriter to detect this plugin was @@ -10,6 +15,7 @@ # DebugInterpreter. _reprcompare = None + def format_explanation(explanation): """This formats an explanation @@ -20,7 +26,18 @@ for when one explanation needs to span multiple lines, e.g. when displaying diffs. """ - # simplify 'assert False where False = ...' + explanation = _collapse_false(explanation) + lines = _split_explanation(explanation) + result = _format_lines(lines) + return u('\n').join(result) + + +def _collapse_false(explanation): + """Collapse expansions of False + + So this strips out any "assert False\n{where False = ...\n}" + blocks. + """ where = 0 while True: start = where = explanation.find("False\n{False = ", where) @@ -42,28 +59,48 @@ explanation = (explanation[:start] + explanation[start+15:end-1] + explanation[end+1:]) where -= 17 - raw_lines = (explanation or '').split('\n') - # escape newlines not followed by {, } and ~ + return explanation + + +def _split_explanation(explanation): + """Return a list of individual lines in the explanation + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or u('')).split('\n') lines = [raw_lines[0]] for l in raw_lines[1:]: if l.startswith('{') or l.startswith('}') or l.startswith('~'): lines.append(l) else: lines[-1] += '\\n' + l + return lines + +def _format_lines(lines): + """Format the individual lines + + This will replace the '{', '}' and '~' characters of our mini + formatting language with the proper 'where ...', 'and ...' and ' + + ...' text, taking care of indentation along the way. + + Return a list of formatted lines. + """ result = lines[:1] stack = [0] stackcnt = [0] for line in lines[1:]: if line.startswith('{'): if stackcnt[-1]: - s = 'and ' + s = u('and ') else: - s = 'where ' + s = u('where ') stack.append(len(result)) stackcnt[-1] += 1 stackcnt.append(0) - result.append(' +' + ' '*(len(stack)-1) + s + line[1:]) + result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:]) elif line.startswith('}'): assert line.startswith('}') stack.pop() @@ -71,9 +108,9 @@ result[stack[-1]] += line[1:] else: assert line.startswith('~') - result.append(' '*len(stack) + line[1:]) + result.append(u(' ')*len(stack) + line[1:]) assert len(stack) == 1 - return '\n'.join(result) + return result # Provide basestring in python3 @@ -83,132 +120,163 @@ basestring = str -def assertrepr_compare(op, left, right): - """return specialised explanations for some operators/operands""" - width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op +def assertrepr_compare(config, op, left, right): + """Return specialised explanations for some operators/operands""" + width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op left_repr = py.io.saferepr(left, maxsize=int(width/2)) right_repr = py.io.saferepr(right, maxsize=width-len(left_repr)) - summary = '%s %s %s' % (left_repr, op, right_repr) + summary = u('%s %s %s') % (left_repr, op, right_repr) - issequence = lambda x: isinstance(x, (list, tuple)) + issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) + and not isinstance(x, basestring)) istext = lambda x: isinstance(x, basestring) isdict = lambda x: isinstance(x, dict) - isset = lambda x: isinstance(x, set) + isset = lambda x: isinstance(x, (set, frozenset)) + verbose = config.getoption('verbose') explanation = None try: if op == '==': if istext(left) and istext(right): - explanation = _diff_text(left, right) + explanation = _diff_text(left, right, verbose) elif issequence(left) and issequence(right): - explanation = _compare_eq_sequence(left, right) + explanation = _compare_eq_sequence(left, right, verbose) elif isset(left) and isset(right): - explanation = _compare_eq_set(left, right) + explanation = _compare_eq_set(left, right, verbose) elif isdict(left) and isdict(right): - explanation = _diff_text(py.std.pprint.pformat(left), - py.std.pprint.pformat(right)) + explanation = _compare_eq_dict(left, right, verbose) elif op == 'not in': if istext(left) and istext(right): - explanation = _notin_text(left, right) - except py.builtin._sysex: - raise - except: + explanation = _notin_text(left, right, verbose) + except Exception: excinfo = py.code.ExceptionInfo() - explanation = ['(pytest_assertion plugin: representation of ' - 'details failed. Probably an object has a faulty __repr__.)', - str(excinfo) - ] - + explanation = [ + u('(pytest_assertion plugin: representation of details failed. ' + 'Probably an object has a faulty __repr__.)'), + u(excinfo)] if not explanation: return None - # Don't include pageloads of data, should be configurable - if len(''.join(explanation)) > 80*8: - explanation = ['Detailed information too verbose, truncated'] - return [summary] + explanation -def _diff_text(left, right): - """Return the explanation for the diff between text +def _diff_text(left, right, verbose=False): + """Return the explanation for the diff between text or bytes - This will skip leading and trailing characters which are - identical to keep the diff minimal. + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + + If the input are bytes they will be safely converted to text. """ explanation = [] - i = 0 # just in case left or right has zero length - for i in range(min(len(left), len(right))): - if left[i] != right[i]: - break - if i > 42: - i -= 10 # Provide some context - explanation = ['Skipping %s identical ' - 'leading characters in diff' % i] - left = left[i:] - right = right[i:] - if len(left) == len(right): - for i in range(len(left)): - if left[-i] != right[-i]: + if isinstance(left, py.builtin.bytes): + left = u(repr(left)[1:-1]).replace(r'\n', '\n') + if isinstance(right, py.builtin.bytes): + right = u(repr(right)[1:-1]).replace(r'\n', '\n') + if not verbose: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: break if i > 42: - i -= 10 # Provide some context - explanation += ['Skipping %s identical ' - 'trailing characters in diff' % i] - left = left[:-i] - right = right[:-i] + i -= 10 # Provide some context + explanation = [u('Skipping %s identical leading ' + 'characters in diff, use -v to show') % i] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [u('Skipping %s identical trailing ' + 'characters in diff, use -v to show') % i] + left = left[:-i] + right = right[:-i] explanation += [line.strip('\n') for line in py.std.difflib.ndiff(left.splitlines(), right.splitlines())] return explanation -def _compare_eq_sequence(left, right): +def _compare_eq_sequence(left, right, verbose=False): explanation = [] for i in range(min(len(left), len(right))): if left[i] != right[i]: - explanation += ['At index %s diff: %r != %r' % - (i, left[i], right[i])] + explanation += [u('At index %s diff: %r != %r') + % (i, left[i], right[i])] break if len(left) > len(right): - explanation += ['Left contains more items, ' - 'first extra item: %s' % py.io.saferepr(left[len(right)],)] + explanation += [u('Left contains more items, first extra item: %s') + % py.io.saferepr(left[len(right)],)] elif len(left) < len(right): - explanation += ['Right contains more items, ' - 'first extra item: %s' % py.io.saferepr(right[len(left)],)] - return explanation # + _diff_text(py.std.pprint.pformat(left), - # py.std.pprint.pformat(right)) + explanation += [ + u('Right contains more items, first extra item: %s') % + py.io.saferepr(right[len(left)],)] + return explanation # + _diff_text(py.std.pprint.pformat(left), + # py.std.pprint.pformat(right)) -def _compare_eq_set(left, right): +def _compare_eq_set(left, right, verbose=False): explanation = [] diff_left = left - right diff_right = right - left if diff_left: - explanation.append('Extra items in the left set:') + explanation.append(u('Extra items in the left set:')) for item in diff_left: explanation.append(py.io.saferepr(item)) if diff_right: - explanation.append('Extra items in the right set:') + explanation.append(u('Extra items in the right set:')) for item in diff_right: explanation.append(py.io.saferepr(item)) return explanation -def _notin_text(term, text): +def _compare_eq_dict(left, right, verbose=False): + explanation = [] + common = set(left).intersection(set(right)) + same = dict((k, left[k]) for k in common if left[k] == right[k]) + if same and not verbose: + explanation += [u('Omitting %s identical items, use -v to show') % + len(same)] + elif same: + explanation += [u('Common items:')] + explanation += py.std.pprint.pformat(same).splitlines() + diff = set(k for k in common if left[k] != right[k]) + if diff: + explanation += [u('Differing items:')] + for k in diff: + explanation += [py.io.saferepr({k: left[k]}) + ' != ' + + py.io.saferepr({k: right[k]})] + extra_left = set(left) - set(right) + if extra_left: + explanation.append(u('Left contains more items:')) + explanation.extend(py.std.pprint.pformat( + dict((k, left[k]) for k in extra_left)).splitlines()) + extra_right = set(right) - set(left) + if extra_right: + explanation.append(u('Right contains more items:')) + explanation.extend(py.std.pprint.pformat( + dict((k, right[k]) for k in extra_right)).splitlines()) + return explanation + + +def _notin_text(term, text, verbose=False): index = text.find(term) head = text[:index] tail = text[index+len(term):] correct_text = head + tail - diff = _diff_text(correct_text, text) - newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)] + diff = _diff_text(correct_text, text, verbose) + newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)] for line in diff: - if line.startswith('Skipping'): + if line.startswith(u('Skipping')): continue - if line.startswith('- '): + if line.startswith(u('- ')): continue - if line.startswith('+ '): - newdiff.append(' ' + line[2:]) + if line.startswith(u('+ ')): + newdiff.append(u(' ') + line[2:]) else: newdiff.append(line) return newdiff diff --git a/_pytest/capture.py b/_pytest/capture.py --- a/_pytest/capture.py +++ b/_pytest/capture.py @@ -1,43 +1,114 @@ -""" per-test stdout/stderr capturing mechanisms, ``capsys`` and ``capfd`` function arguments. """ +""" + per-test stdout/stderr capturing mechanisms, + ``capsys`` and ``capfd`` function arguments. +""" +# note: py.io capture was where copied from +# pylib 1.4.20.dev2 (rev 13d9af95547e) +import sys +import os +import tempfile -import pytest, py -import os +import py +import pytest + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO + +try: + from io import BytesIO +except ImportError: + class BytesIO(StringIO): + def write(self, data): + if isinstance(data, unicode): + raise TypeError("not a byte value: %r" % (data,)) + StringIO.write(self, data) + +if sys.version_info < (3, 0): + class TextIO(StringIO): + def write(self, data): + if not isinstance(data, unicode): + enc = getattr(self, '_encoding', 'UTF-8') + data = unicode(data, enc, 'replace') + StringIO.write(self, data) +else: + TextIO = StringIO + + +patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} + def pytest_addoption(parser): group = parser.getgroup("general") - group._addoption('--capture', action="store", default=None, - metavar="method", type="choice", choices=['fd', 'sys', 'no'], + group._addoption( + '--capture', action="store", default=None, + metavar="method", choices=['fd', 'sys', 'no'], help="per-test capturing method: one of fd (default)|sys|no.") - group._addoption('-s', action="store_const", const="no", dest="capture", + group._addoption( + '-s', action="store_const", const="no", dest="capture", help="shortcut for --capture=no.") + @pytest.mark.tryfirst -def pytest_cmdline_parse(pluginmanager, args): - # we want to perform capturing already for plugin/conftest loading - if '-s' in args or "--capture=no" in args: - method = "no" - elif hasattr(os, 'dup') and '--capture=sys' not in args: +def pytest_load_initial_conftests(early_config, parser, args, __multicall__): + ns = parser.parse_known_args(args) + method = ns.capture + if not method: method = "fd" - else: + if method == "fd" and not hasattr(os, "dup"): method = "sys" capman = CaptureManager(method) - pluginmanager.register(capman, "capturemanager") + early_config.pluginmanager.register(capman, "capturemanager") + + # make sure that capturemanager is properly reset at final shutdown + def teardown(): + try: + capman.reset_capturings() + except ValueError: + pass + + early_config.pluginmanager.add_shutdown(teardown) + + # make sure logging does not raise exceptions at the end + def silence_logging_at_shutdown(): + if "logging" in sys.modules: + sys.modules["logging"].raiseExceptions = False + early_config.pluginmanager.add_shutdown(silence_logging_at_shutdown) + + # finally trigger conftest loading but while capturing (issue93) + capman.resumecapture() + try: + try: + return __multicall__.execute() + finally: + out, err = capman.suspendcapture() + except: + sys.stdout.write(out) + sys.stderr.write(err) + raise + def addouterr(rep, outerr): for secname, content in zip(["out", "err"], outerr): if content: rep.sections.append(("Captured std%s" % secname, content)) + class NoCapture: def startall(self): pass + def resume(self): pass + def reset(self): pass + def suspend(self): return "", "" + class CaptureManager: def __init__(self, defaultmethod=None): self._method2capture = {} @@ -45,21 +116,23 @@ def _maketempfile(self): f = py.std.tempfile.TemporaryFile() - newf = py.io.dupfile(f, encoding="UTF-8") + newf = dupfile(f, encoding="UTF-8") f.close() return newf def _makestringio(self): - return py.io.TextIO() + return TextIO() def _getcapture(self, method): if method == "fd": - return py.io.StdCaptureFD(now=False, - out=self._maketempfile(), err=self._maketempfile() + return StdCaptureFD( + out=self._maketempfile(), + err=self._maketempfile(), ) elif method == "sys": - return py.io.StdCapture(now=False, - out=self._makestringio(), err=self._makestringio() + return StdCapture( + out=self._makestringio(), + err=self._makestringio(), ) elif method == "no": return NoCapture() @@ -74,23 +147,24 @@ method = config._conftest.rget("option_capture", path=fspath) except KeyError: method = "fd" - if method == "fd" and not hasattr(os, 'dup'): # e.g. jython + if method == "fd" and not hasattr(os, 'dup'): # e.g. jython method = "sys" return method def reset_capturings(self): - for name, cap in self._method2capture.items(): + for cap in self._method2capture.values(): cap.reset() def resumecapture_item(self, item): method = self._getmethod(item.config, item.fspath) if not hasattr(item, 'outerr'): - item.outerr = ('', '') # we accumulate outerr on the item + item.outerr = ('', '') # we accumulate outerr on the item return self.resumecapture(method) def resumecapture(self, method=None): if hasattr(self, '_capturing'): - raise ValueError("cannot resume, already capturing with %r" % + raise ValueError( + "cannot resume, already capturing with %r" % (self._capturing,)) if method is None: method = self._defaultmethod @@ -119,30 +193,29 @@ return "", "" def activate_funcargs(self, pyfuncitem): - if not hasattr(pyfuncitem, 'funcargs'): - return - assert not hasattr(self, '_capturing_funcargs') - self._capturing_funcargs = capturing_funcargs = [] - for name, capfuncarg in pyfuncitem.funcargs.items(): - if name in ('capsys', 'capfd'): - capturing_funcargs.append(capfuncarg) - capfuncarg._start() + funcargs = getattr(pyfuncitem, "funcargs", None) + if funcargs is not None: + for name, capfuncarg in funcargs.items(): + if name in ('capsys', 'capfd'): + assert not hasattr(self, '_capturing_funcarg') + self._capturing_funcarg = capfuncarg + capfuncarg._start() def deactivate_funcargs(self): - capturing_funcargs = getattr(self, '_capturing_funcargs', None) - if capturing_funcargs is not None: - while capturing_funcargs: - capfuncarg = capturing_funcargs.pop() - capfuncarg._finalize() - del self._capturing_funcargs + capturing_funcarg = getattr(self, '_capturing_funcarg', None) + if capturing_funcarg: + outerr = capturing_funcarg._finalize() + del self._capturing_funcarg + return outerr def pytest_make_collect_report(self, __multicall__, collector): method = self._getmethod(collector.config, collector.fspath) try: self.resumecapture(method) except ValueError: - return # recursive collect, XXX refactor capturing - # to allow for more lightweight recursive capturing + # recursive collect, XXX refactor capturing + # to allow for more lightweight recursive capturing + return try: rep = __multicall__.execute() finally: @@ -169,46 +242,371 @@ @pytest.mark.tryfirst def pytest_runtest_makereport(self, __multicall__, item, call): - self.deactivate_funcargs() + funcarg_outerr = self.deactivate_funcargs() rep = __multicall__.execute() outerr = self.suspendcapture(item) - if not rep.passed: - addouterr(rep, outerr) + if funcarg_outerr is not None: + outerr = (outerr[0] + funcarg_outerr[0], + outerr[1] + funcarg_outerr[1]) + addouterr(rep, outerr) if not rep.passed or rep.when == "teardown": outerr = ('', '') item.outerr = outerr return rep +error_capsysfderror = "cannot use capsys and capfd at the same time" + + def pytest_funcarg__capsys(request): """enables capturing of writes to sys.stdout/sys.stderr and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ - return CaptureFuncarg(py.io.StdCapture) + if "capfd" in request._funcargs: + raise request.raiseerror(error_capsysfderror) + return CaptureFixture(StdCapture) + def pytest_funcarg__capfd(request): """enables capturing of writes to file descriptors 1 and 2 and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ + if "capsys" in request._funcargs: + request.raiseerror(error_capsysfderror) if not hasattr(os, 'dup'): - py.test.skip("capfd funcarg needs os.dup") - return CaptureFuncarg(py.io.StdCaptureFD) + pytest.skip("capfd funcarg needs os.dup") + return CaptureFixture(StdCaptureFD) -class CaptureFuncarg: + +class CaptureFixture: def __init__(self, captureclass): - self.capture = captureclass(now=False) + self._capture = captureclass() def _start(self): - self.capture.startall() + self._capture.startall() def _finalize(self): - if hasattr(self, 'capture'): - self.capture.reset() - del self.capture + if hasattr(self, '_capture'): + outerr = self._outerr = self._capture.reset() + del self._capture + return outerr def readouterr(self): - return self.capture.readouterr() + try: + return self._capture.readouterr() + except AttributeError: + return self._outerr def close(self): self._finalize() + + +class FDCapture: + """ Capture IO to/from a given os-level filedescriptor. """ + + def __init__(self, targetfd, tmpfile=None, patchsys=False): + """ save targetfd descriptor, and open a new + temporary file there. If no tmpfile is + specified a tempfile.Tempfile() will be opened + in text mode. + """ + self.targetfd = targetfd + if tmpfile is None and targetfd != 0: + f = tempfile.TemporaryFile('wb+') + tmpfile = dupfile(f, encoding="UTF-8") + f.close() + self.tmpfile = tmpfile + self._savefd = os.dup(self.targetfd) + if patchsys: + self._oldsys = getattr(sys, patchsysdict[targetfd]) + + def start(self): + try: + os.fstat(self._savefd) + except OSError: + raise ValueError( + "saved filedescriptor not valid, " + "did you call start() twice?") + if self.targetfd == 0 and not self.tmpfile: + fd = os.open(os.devnull, os.O_RDONLY) + os.dup2(fd, 0) + os.close(fd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], DontReadFromInput()) + else: + os.dup2(self.tmpfile.fileno(), self.targetfd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self.tmpfile) + + def done(self): + """ unpatch and clean up, returns the self.tmpfile (file object) + """ + os.dup2(self._savefd, self.targetfd) + os.close(self._savefd) + if self.targetfd != 0: + self.tmpfile.seek(0) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self._oldsys) + return self.tmpfile + + def writeorg(self, data): + """ write a string to the original file descriptor + """ + tempfp = tempfile.TemporaryFile() + try: + os.dup2(self._savefd, tempfp.fileno()) + tempfp.write(data) + finally: + tempfp.close() + + +def dupfile(f, mode=None, buffering=0, raising=False, encoding=None): + """ return a new open file object that's a duplicate of f + + mode is duplicated if not given, 'buffering' controls + buffer size (defaulting to no buffering) and 'raising' + defines whether an exception is raised when an incompatible + file object is passed in (if raising is False, the file + object itself will be returned) + """ + try: + fd = f.fileno() + mode = mode or f.mode + except AttributeError: + if raising: + raise + return f + newfd = os.dup(fd) + if sys.version_info >= (3, 0): + if encoding is not None: + mode = mode.replace("b", "") + buffering = True + return os.fdopen(newfd, mode, buffering, encoding, closefd=True) + else: + f = os.fdopen(newfd, mode, buffering) + if encoding is not None: + return EncodedFile(f, encoding) + return f + + +class EncodedFile(object): + def __init__(self, _stream, encoding): + self._stream = _stream + self.encoding = encoding + + def write(self, obj): + if isinstance(obj, unicode): + obj = obj.encode(self.encoding) + self._stream.write(obj) + + def writelines(self, linelist): + data = ''.join(linelist) + self.write(data) + + def __getattr__(self, name): + return getattr(self._stream, name) + + +class Capture(object): + def reset(self): + """ reset sys.stdout/stderr and return captured output as strings. """ + if hasattr(self, '_reset'): + raise ValueError("was already reset") + self._reset = True + outfile, errfile = self.done(save=False) + out, err = "", "" + if outfile and not outfile.closed: + out = outfile.read() + outfile.close() + if errfile and errfile != outfile and not errfile.closed: + err = errfile.read() + errfile.close() + return out, err + + def suspend(self): + """ return current snapshot captures, memorize tempfiles. """ + outerr = self.readouterr() + outfile, errfile = self.done() + return outerr + + +class StdCaptureFD(Capture): + """ This class allows to capture writes to FD1 and FD2 + and may connect a NULL file to FD0 (and prevent + reads from sys.stdin). If any of the 0,1,2 file descriptors + is invalid it will not be captured. + """ + def __init__(self, out=True, err=True, in_=True, patchsys=True): + self._options = { + "out": out, + "err": err, + "in_": in_, + "patchsys": patchsys, + } + self._save() + + def _save(self): + in_ = self._options['in_'] + out = self._options['out'] + err = self._options['err'] + patchsys = self._options['patchsys'] + if in_: + try: + self.in_ = FDCapture( + 0, tmpfile=None, + patchsys=patchsys) + except OSError: + pass + if out: + tmpfile = None + if hasattr(out, 'write'): + tmpfile = out + try: + self.out = FDCapture( + 1, tmpfile=tmpfile, + patchsys=patchsys) + self._options['out'] = self.out.tmpfile + except OSError: + pass + if err: + if hasattr(err, 'write'): + tmpfile = err + else: + tmpfile = None + try: + self.err = FDCapture( + 2, tmpfile=tmpfile, + patchsys=patchsys) + self._options['err'] = self.err.tmpfile + except OSError: + pass + + def startall(self): + if hasattr(self, 'in_'): + self.in_.start() + if hasattr(self, 'out'): + self.out.start() + if hasattr(self, 'err'): + self.err.start() + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if hasattr(self, 'out') and not self.out.tmpfile.closed: + outfile = self.out.done() + if hasattr(self, 'err') and not self.err.tmpfile.closed: + errfile = self.err.done() + if hasattr(self, 'in_'): + self.in_.done() + if save: + self._save() + return outfile, errfile + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = self._readsnapshot('out') + err = self._readsnapshot('err') + return out, err + + def _readsnapshot(self, name): + if hasattr(self, name): + f = getattr(self, name).tmpfile + else: + return '' + + f.seek(0) + res = f.read() + enc = getattr(f, "encoding", None) + if enc: + res = py.builtin._totext(res, enc, "replace") + f.truncate(0) + f.seek(0) + return res + + +class StdCapture(Capture): + """ This class allows to capture writes to sys.stdout|stderr "in-memory" + and will raise errors on tries to read from sys.stdin. It only + modifies sys.stdout|stderr|stdin attributes and does not + touch underlying File Descriptors (use StdCaptureFD for that). + """ + def __init__(self, out=True, err=True, in_=True): + self._oldout = sys.stdout + self._olderr = sys.stderr + self._oldin = sys.stdin + if out and not hasattr(out, 'file'): + out = TextIO() + self.out = out + if err: + if not hasattr(err, 'write'): + err = TextIO() + self.err = err + self.in_ = in_ + + def startall(self): + if self.out: + sys.stdout = self.out + if self.err: + sys.stderr = self.err + if self.in_: + sys.stdin = self.in_ = DontReadFromInput() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if self.out and not self.out.closed: + sys.stdout = self._oldout + outfile = self.out + outfile.seek(0) + if self.err and not self.err.closed: + sys.stderr = self._olderr + errfile = self.err + errfile.seek(0) + if self.in_: + sys.stdin = self._oldin + return outfile, errfile + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = err = "" + if self.out: + out = self.out.getvalue() + self.out.truncate(0) + self.out.seek(0) + if self.err: + err = self.err.getvalue() + self.err.truncate(0) + self.err.seek(0) + return out, err + + +class DontReadFromInput: + """Temporary stub class. Ideally when stdin is accessed, the + capturing should be turned off, with possibly all data captured + so far sent to the screen. This should be configurable, though, + because in automated test runs it is better to crash than + hang indefinitely. + """ + def read(self, *args): + raise IOError("reading from stdin while output is captured") + readline = read + readlines = read + __iter__ = read + + def fileno(self): + raise ValueError("redirected Stdin is pseudofile, has no fileno()") + + def isatty(self): + return False + + def close(self): + pass diff --git a/_pytest/config.py b/_pytest/config.py --- a/_pytest/config.py +++ b/_pytest/config.py @@ -1,25 +1,91 @@ """ command line options, ini-file and conftest.py processing. """ import py +# DON't import pytest here because it causes import cycle troubles import sys, os +from _pytest import hookspec # the extension point definitions from _pytest.core import PluginManager -import pytest -def pytest_cmdline_parse(pluginmanager, args): - config = Config(pluginmanager) - config.parse(args) - return config +# pytest startup -def pytest_unconfigure(config): - while 1: - try: - fin = config._cleanup.pop() - except IndexError: - break - fin() +def main(args=None, plugins=None): + """ return exit code, after performing an in-process test run. + + :arg args: list of command line arguments. + + :arg plugins: list of plugin objects to be auto-registered during + initialization. + """ + config = _prepareconfig(args, plugins) + return config.hook.pytest_cmdline_main(config=config) + +class cmdline: # compatibility namespace + main = staticmethod(main) + +class UsageError(Exception): + """ error in pytest usage or invocation""" + +_preinit = [] + +default_plugins = ( + "mark main terminal runner python pdb unittest capture skipping " + "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript " + "junitxml resultlog doctest").split() + +def _preloadplugins(): + assert not _preinit + _preinit.append(get_plugin_manager()) + +def get_plugin_manager(): + if _preinit: + return _preinit.pop(0) + # subsequent calls to main will create a fresh instance + pluginmanager = PytestPluginManager() + pluginmanager.config = Config(pluginmanager) # XXX attr needed? + for spec in default_plugins: + pluginmanager.import_plugin(spec) + return pluginmanager + +def _prepareconfig(args=None, plugins=None): + if args is None: + args = sys.argv[1:] + elif isinstance(args, py.path.local): + args = [str(args)] + elif not isinstance(args, (tuple, list)): + if not isinstance(args, str): + raise ValueError("not a string or argument list: %r" % (args,)) + args = py.std.shlex.split(args) + pluginmanager = get_plugin_manager() + if plugins: + for plugin in plugins: + pluginmanager.register(plugin) + return pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args) + +class PytestPluginManager(PluginManager): + def __init__(self, hookspecs=[hookspec]): + super(PytestPluginManager, self).__init__(hookspecs=hookspecs) + self.register(self) + if os.environ.get('PYTEST_DEBUG'): + err = sys.stderr + encoding = getattr(err, 'encoding', 'utf8') + try: + err = py.io.dupfile(err, encoding=encoding) + except Exception: + pass + self.trace.root.setwriter(err.write) + + def pytest_configure(self, config): + config.addinivalue_line("markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible.") + config.addinivalue_line("markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible.") + class Parser: - """ Parser for command line arguments. """ + """ Parser for command line arguments and ini-file values. """ def __init__(self, usage=None, processopt=None): self._anonymous = OptionGroup("custom options", parser=self) @@ -35,15 +101,17 @@ if option.dest: self._processopt(option) - def addnote(self, note): - self._notes.append(note) - def getgroup(self, name, description="", after=None): """ get (or create) a named option Group. - :name: unique name of the option group. + :name: name of the option group. :description: long description for --help output. :after: name of other group, used for ordering --help output. + + The returned group object has an ``addoption`` method with the same + signature as :py:func:`parser.addoption + <_pytest.config.Parser.addoption>` but will be shown in the + respective group in the output of ``pytest. --help``. """ for group in self._groups: if group.name == name: @@ -57,33 +125,222 @@ return group def addoption(self, *opts, **attrs): - """ add an optparse-style option. """ + """ register a command line option. + + :opts: option names, can be short or long options. + :attrs: same attributes which the ``add_option()`` function of the + `argparse library + `_ + accepts. + + After command line parsing options are available on the pytest config + object via ``config.option.NAME`` where ``NAME`` is usually set + by passing a ``dest`` attribute, for example + ``addoption("--long", dest="NAME", ...)``. + """ self._anonymous.addoption(*opts, **attrs) def parse(self, args): - self.optparser = optparser = MyOptionParser(self) + from _pytest._argcomplete import try_argcomplete + self.optparser = self._getparser() + try_argcomplete(self.optparser) + return self.optparser.parse_args([str(x) for x in args]) + + def _getparser(self): + from _pytest._argcomplete import filescompleter + optparser = MyOptionParser(self) groups = self._groups + [self._anonymous] for group in groups: if group.options: desc = group.description or group.name - optgroup = py.std.optparse.OptionGroup(optparser, desc) - optgroup.add_options(group.options) - optparser.add_option_group(optgroup) - return self.optparser.parse_args([str(x) for x in args]) + arggroup = optparser.add_argument_group(desc) + for option in group.options: + n = option.names() + a = option.attrs() + arggroup.add_argument(*n, **a) + # bash like autocompletion for dirs (appending '/') + optparser.add_argument(FILE_OR_DIR, nargs='*' + ).completer=filescompleter + return optparser def parse_setoption(self, args, option): - parsedoption, args = self.parse(args) + parsedoption = self.parse(args) for name, value in parsedoption.__dict__.items(): setattr(option, name, value) - return args + return getattr(parsedoption, FILE_OR_DIR) + + def parse_known_args(self, args): + optparser = self._getparser() + args = [str(x) for x in args] + return optparser.parse_known_args(args)[0] def addini(self, name, help, type=None, default=None): - """ add an ini-file option with the given name and description. """ + """ register an ini-file option. + + :name: name of the ini-variable + :type: type of the variable, can be ``pathlist``, ``args`` or ``linelist``. + :default: default value if no ini-file option exists but is queried. + + The value of ini-variables can be retrieved via a call to + :py:func:`config.getini(name) <_pytest.config.Config.getini>`. + """ assert type in (None, "pathlist", "args", "linelist") self._inidict[name] = (help, type, default) self._ininames.append(name) +class ArgumentError(Exception): + """ + Raised if an Argument instance is created with invalid or + inconsistent arguments. + """ + + def __init__(self, msg, option): + self.msg = msg + self.option_id = str(option) + + def __str__(self): + if self.option_id: + return "option %s: %s" % (self.option_id, self.msg) + else: + return self.msg + + +class Argument: + """class that mimics the necessary behaviour of py.std.optparse.Option """ + _typ_map = { + 'int': int, + 'string': str, + } + # enable after some grace period for plugin writers + TYPE_WARN = False + + def __init__(self, *names, **attrs): + """store parms in private vars for use in add_argument""" + self._attrs = attrs + self._short_opts = [] + self._long_opts = [] + self.dest = attrs.get('dest') + if self.TYPE_WARN: + try: + help = attrs['help'] + if '%default' in help: + py.std.warnings.warn( + 'pytest now uses argparse. "%default" should be' + ' changed to "%(default)s" ', + FutureWarning, + stacklevel=3) + except KeyError: + pass + try: + typ = attrs['type'] + except KeyError: + pass + else: + # this might raise a keyerror as well, don't want to catch that + if isinstance(typ, py.builtin._basestring): + if typ == 'choice': + if self.TYPE_WARN: + py.std.warnings.warn( + 'type argument to addoption() is a string %r.' + ' For parsearg this is optional and when supplied ' + ' should be a type.' + ' (options: %s)' % (typ, names), + FutureWarning, + stacklevel=3) + # argparse expects a type here take it from + # the type of the first element + attrs['type'] = type(attrs['choices'][0]) + else: + if self.TYPE_WARN: + py.std.warnings.warn( + 'type argument to addoption() is a string %r.' + ' For parsearg this should be a type.' + ' (options: %s)' % (typ, names), + FutureWarning, + stacklevel=3) + attrs['type'] = Argument._typ_map[typ] + # used in test_parseopt -> test_parse_defaultgetter + self.type = attrs['type'] + else: + self.type = typ + try: + # attribute existence is tested in Config._processopt + self.default = attrs['default'] + except KeyError: + pass + self._set_opt_strings(names) + if not self.dest: + if self._long_opts: + self.dest = self._long_opts[0][2:].replace('-', '_') + else: + try: + self.dest = self._short_opts[0][1:] + except IndexError: + raise ArgumentError( + 'need a long or short option', self) + + def names(self): + return self._short_opts + self._long_opts + + def attrs(self): + # update any attributes set by processopt + attrs = 'default dest help'.split() + if self.dest: + attrs.append(self.dest) + for attr in attrs: + try: + self._attrs[attr] = getattr(self, attr) + except AttributeError: + pass + if self._attrs.get('help'): + a = self._attrs['help'] + a = a.replace('%default', '%(default)s') + #a = a.replace('%prog', '%(prog)s') + self._attrs['help'] = a + return self._attrs + + def _set_opt_strings(self, opts): + """directly from optparse + + might not be necessary as this is passed to argparse later on""" + for opt in opts: + if len(opt) < 2: + raise ArgumentError( + "invalid option string %r: " + "must be at least two characters long" % opt, self) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise ArgumentError( + "invalid short option string %r: " + "must be of the form -x, (x any non-dash char)" % opt, + self) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise ArgumentError( + "invalid long option string %r: " + "must start with --, followed by non-dash" % opt, + self) + self._long_opts.append(opt) + + def __repr__(self): + retval = 'Argument(' + if self._short_opts: + retval += '_short_opts: ' + repr(self._short_opts) + ', ' + if self._long_opts: + retval += '_long_opts: ' + repr(self._long_opts) + ', ' + retval += 'dest: ' + repr(self.dest) + ', ' + if hasattr(self, 'type'): + retval += 'type: ' + repr(self.type) + ', ' + if hasattr(self, 'default'): + retval += 'default: ' + repr(self.default) + ', ' + if retval[-2:] == ', ': # always long enough to test ("Argument(" ) + retval = retval[:-2] + retval += ')' + return retval + + class OptionGroup: def __init__(self, name, description="", parser=None): self.name = name @@ -92,12 +349,18 @@ self.parser = parser def addoption(self, *optnames, **attrs): - """ add an option to this group. """ - option = py.std.optparse.Option(*optnames, **attrs) + """ add an option to this group. + + if a shortened version of a long option is specified it will + be suppressed in the help. addoption('--twowords', '--two-words') + results in help showing '--two-words' only, but --twowords gets + accepted **and** the automatic destination is in args.twowords + """ + option = Argument(*optnames, **attrs) self._addoption_instance(option, shortupper=False) def _addoption(self, *optnames, **attrs): - option = py.std.optparse.Option(*optnames, **attrs) + option = Argument(*optnames, **attrs) self._addoption_instance(option, shortupper=True) From noreply at buildbot.pypy.org Sun Aug 17 19:35:29 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sun, 17 Aug 2014 19:35:29 +0200 (CEST) Subject: [pypy-commit] pypy py3.3-fixes2: implement flush kwarg for print() Message-ID: <20140817173529.A0F851C347F@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3-fixes2 Changeset: r72843:c92b637d6a83 Date: 2014-08-17 15:01 +0200 http://bitbucket.org/pypy/pypy/changeset/c92b637d6a83/ Log: implement flush kwarg for print() diff --git a/pypy/module/__builtin__/app_io.py b/pypy/module/__builtin__/app_io.py --- a/pypy/module/__builtin__/app_io.py +++ b/pypy/module/__builtin__/app_io.py @@ -57,13 +57,14 @@ return line def print_(*args, **kwargs): - r"""print(value, ..., sep=' ', end='\n', file=sys.stdout) + r"""print(value, ..., sep=' ', end='\n', file=sys.stdout, flush=False) Prints the values to a stream, or to sys.stdout by default. Optional keyword arguments: - file: a file-like object (stream); defaults to the current sys.stdout. - sep: string inserted between values, default a space. - end: string appended after the last value, default a newline. + file: a file-like object (stream); defaults to the current sys.stdout. + sep: string inserted between values, default a space. + end: string appended after the last value, default a newline. + flush: whether to forcibly flush the stream. """ fp = kwargs.pop("file", None) if fp is None: @@ -80,6 +81,7 @@ if end is not None: if not isinstance(end, str): raise TypeError("end must be None or a string") + flush = kwargs.pop('flush', None) if kwargs: raise TypeError("invalid keyword arguments to print()") if sep is None: @@ -91,3 +93,5 @@ write(sep) write(arg) write(end) + if flush: + fp.flush() diff --git a/pypy/module/__builtin__/test/test_print.py b/pypy/module/__builtin__/test/test_print.py new file mode 100644 --- /dev/null +++ b/pypy/module/__builtin__/test/test_print.py @@ -0,0 +1,29 @@ +class AppTestPrint: + + def test_print_flush(self): + """ + # operation of the flush flag + class filelike(): + def __init__(self): + self.written = '' + self.flushed = 0 + def write(self, str): + self.written += str + def flush(self): + self.flushed += 1 + + f = filelike() + print(1, file=f, end='', flush=True) + print(2, file=f, end='', flush=True) + print(3, file=f, flush=False) + assert f.written == '123\\n' + assert f.flushed == 2 + + # ensure exceptions from flush are passed through + class noflush(): + def write(self, str): + pass + def flush(self): + raise RuntimeError + raises(RuntimeError, print, 1, file=noflush(), flush=True) + """ From noreply at buildbot.pypy.org Sun Aug 17 19:35:30 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 17 Aug 2014 19:35:30 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Merged in numerodix/pypy/py3.3-fixes2 (pull request #269) Message-ID: <20140817173530.CD8471C347F@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72844:eda8876b5645 Date: 2014-08-17 10:34 -0700 http://bitbucket.org/pypy/pypy/changeset/eda8876b5645/ Log: Merged in numerodix/pypy/py3.3-fixes2 (pull request #269) py3.3: implement flush kwarg for print() diff --git a/pypy/module/__builtin__/app_io.py b/pypy/module/__builtin__/app_io.py --- a/pypy/module/__builtin__/app_io.py +++ b/pypy/module/__builtin__/app_io.py @@ -57,13 +57,14 @@ return line def print_(*args, **kwargs): - r"""print(value, ..., sep=' ', end='\n', file=sys.stdout) + r"""print(value, ..., sep=' ', end='\n', file=sys.stdout, flush=False) Prints the values to a stream, or to sys.stdout by default. Optional keyword arguments: - file: a file-like object (stream); defaults to the current sys.stdout. - sep: string inserted between values, default a space. - end: string appended after the last value, default a newline. + file: a file-like object (stream); defaults to the current sys.stdout. + sep: string inserted between values, default a space. + end: string appended after the last value, default a newline. + flush: whether to forcibly flush the stream. """ fp = kwargs.pop("file", None) if fp is None: @@ -80,6 +81,7 @@ if end is not None: if not isinstance(end, str): raise TypeError("end must be None or a string") + flush = kwargs.pop('flush', None) if kwargs: raise TypeError("invalid keyword arguments to print()") if sep is None: @@ -91,3 +93,5 @@ write(sep) write(arg) write(end) + if flush: + fp.flush() diff --git a/pypy/module/__builtin__/test/test_print.py b/pypy/module/__builtin__/test/test_print.py new file mode 100644 --- /dev/null +++ b/pypy/module/__builtin__/test/test_print.py @@ -0,0 +1,29 @@ +class AppTestPrint: + + def test_print_flush(self): + """ + # operation of the flush flag + class filelike(): + def __init__(self): + self.written = '' + self.flushed = 0 + def write(self, str): + self.written += str + def flush(self): + self.flushed += 1 + + f = filelike() + print(1, file=f, end='', flush=True) + print(2, file=f, end='', flush=True) + print(3, file=f, flush=False) + assert f.written == '123\\n' + assert f.flushed == 2 + + # ensure exceptions from flush are passed through + class noflush(): + def write(self, str): + pass + def flush(self): + raise RuntimeError + raises(RuntimeError, print, 1, file=noflush(), flush=True) + """ From noreply at buildbot.pypy.org Sun Aug 17 19:36:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 17 Aug 2014 19:36:57 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Document the kind of trace produced by jit_stm_xxx(), before refactoring it. Message-ID: <20140817173657.42E9E1C347F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72845:35c9220c71f4 Date: 2014-08-17 17:38 +0200 http://bitbucket.org/pypy/pypy/changeset/35c9220c71f4/ Log: Document the kind of trace produced by jit_stm_xxx(), before refactoring it. diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -53,6 +53,26 @@ if llop.stm_should_break_transaction(lltype.Bool): llop.stm_transaction_break(lltype.Void) +# Typical usage of the following two functions: +# +# just after jit_merge_point: +# if rstm.jit_stm_should_break_transaction(False): +# rstm.jit_stm_transaction_break_point() +# +# just before can_enter_jit: +# if rstm.jit_stm_should_break_transaction(True): +# rstm.jit_stm_transaction_break_point() +# +# resulting JIT trace (common case): +# ... +# call_release_gil(...) +# stm_transaction_break(0) # in-line, because we expect "inevitable" +# guard_not_forced() +# ... +# i1 = stm_should_break_transaction() +# guard_false(i1) # out-of-line, because rarely needed +# jump() +# def jit_stm_transaction_break_point(): # XXX REFACTOR AWAY if we_are_translated(): From noreply at buildbot.pypy.org Sun Aug 17 19:36:58 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 17 Aug 2014 19:36:58 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: In-progress: kill stuff from the JIT Message-ID: <20140817173658.932A61C347F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72846:02f43fae7c8c Date: 2014-08-17 18:17 +0200 http://bitbucket.org/pypy/pypy/changeset/02f43fae7c8c/ Log: In-progress: kill stuff from the JIT diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -56,9 +56,6 @@ pypyjitdriver.jit_merge_point(ec=ec, frame=self, next_instr=next_instr, pycode=pycode, is_being_profiled=self.is_being_profiled) - if self.space.threadlocals.threads_running: # quasi-immutable field - if rstm.jit_stm_should_break_transaction(False): - rstm.jit_stm_transaction_break_point() co_code = pycode.co_code self.valuestackdepth = hint(self.valuestackdepth, promote=True) @@ -89,8 +86,7 @@ ec.bytecode_trace(self, decr_by) jumpto = r_uint(self.last_instr) if self.space.threadlocals.threads_running: # quasi-immutable field - if rstm.jit_stm_should_break_transaction(True): - rstm.jit_stm_transaction_break_point() + rstm.possible_transaction_break() # pypyjitdriver.can_enter_jit(frame=self, ec=ec, next_instr=jumpto, pycode=self.getcode(), diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1418,20 +1418,9 @@ [v], None)) return ops - def rewrite_op_jit_stm_should_break_transaction(self, op): - assert isinstance(op.args[0], Constant) - - arg = int(op.args[0].value) - c_arg = Constant(arg, lltype.Signed) + def rewrite_op_stm_rewind_jmp_frame(self, op): + return [] - return [SpaceOperation('stm_should_break_transaction', - [c_arg], op.result), - SpaceOperation('-live-', [], None),] - - def rewrite_op_jit_stm_transaction_break_point(self, op): - return [SpaceOperation('stm_transaction_break', [], op.result), - SpaceOperation('-live-', [], None),] - def rewrite_op_jit_marker(self, op): key = op.args[0].value jitdriver = op.args[1].value diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -929,15 +929,18 @@ assert block.operations[1].result is None assert block.exits[0].args == [v1] -def test_jit_stm_transaction_break_point(): - py.test.skip("XXX?") - op = SpaceOperation('jit_stm_transaction_break_point', - [Constant(1, lltype.Signed)], lltype.Void) +def test_stm_should_break_transaction(): + op = SpaceOperation('stm_should_break_transaction', [], lltype.Bool) tr = Transformer() op2 = tr.rewrite_operation(op) - assert op2.opname == 'stm_transaction_break' - assert op2.args[0].value == 1 - + assert op2.opname == 'stm_should_break_transaction' + +def test_stm_rewind_jmp_frame(): + op = SpaceOperation('stm_rewind_jmp_frame', [], lltype.Void) + tr = Transformer() + op2 = tr.rewrite_operation(op) + assert op2 == [] + def test_jit_merge_point_1(): class FakeJitDriverSD: index = 42 diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -908,18 +908,15 @@ return False - @arguments("i", returns="i") - def bhimpl_stm_should_break_transaction(if_there_is_no_other): - return False - - - @arguments() - def bhimpl_stm_transaction_break(): - pass + @arguments(returns="i") + def bhimpl_stm_should_break_transaction(): + from rpython.rlib import rstm + return rstm.should_break_transaction() @arguments() def bhimpl_stm_hint_commit_soon(): - pass + from rpython.rlib import rstm + rstm.hint_commit_soon() # ---------- diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -51,13 +51,6 @@ self.input_indirections = {} self.output_indirections = {} - - # to do some of the work of optimizeopt/stm.py, we have a similar - # logic here: - self.stm_break_wanted = True - - - def _input_indirection(self, box): return self.input_indirections.get(box, box) @@ -137,11 +130,8 @@ opnum == rop.SETFIELD_RAW or opnum == rop.SETARRAYITEM_RAW or opnum == rop.SETINTERIORFIELD_RAW or - opnum == rop.RAW_STORE): - return - if opnum in (rop.GUARD_NOT_FORCED, rop.GUARD_NOT_FORCED_2, - rop.STM_HINT_COMMIT_SOON): - self.stm_break_wanted = True + opnum == rop.RAW_STORE or + opnum == rop.STM_HINT_COMMIT_SOON): return if (rop._OVF_FIRST <= opnum <= rop._OVF_LAST or rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST or @@ -207,7 +197,6 @@ del cache[frombox] return else: - self.stm_break_wanted = True # Only invalidate things that are either escaped or arguments for descr, boxes in self.heap_cache.iteritems(): for box in boxes.keys(): @@ -226,8 +215,6 @@ # above, but hit an assertion in "pypy test_multiprocessing.py". self.reset(reset_virtuals=False, trace_branch=False) - self.stm_break_wanted = True - def is_class_known(self, box): return box in self.known_class_boxes @@ -338,6 +325,3 @@ def replace_box(self, oldbox, newbox): self.input_indirections[self._output_indirection(newbox)] = self._input_indirection(oldbox) self.output_indirections[self._input_indirection(oldbox)] = self._output_indirection(newbox) - - def stm_break_done(self): - self.stm_break_wanted = False diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -627,7 +627,6 @@ operations = None call_pure_results = None stm_info = None - is_really_loop = False logops = None quasi_immutable_deps = None diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -8,7 +8,6 @@ from rpython.jit.metainterp.optimizeopt.simplify import OptSimplify from rpython.jit.metainterp.optimizeopt.pure import OptPure from rpython.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce -from rpython.jit.metainterp.optimizeopt.stm import OptSTM from rpython.rlib.jit import PARAMETERS, ENABLE_ALL_OPTS from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.debug import debug_start, debug_stop, debug_print @@ -35,9 +34,6 @@ def build_opt_chain(metainterp_sd, enable_opts): optimizations = [] unroll = 'unroll' in enable_opts # 'enable_opts' is normally a dict - if metainterp_sd.config.translation.stm: - optimizations.append(OptSTM()) - for name, opt in unroll_all_opts: if name in enable_opts: if opt is not None: diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -275,14 +275,12 @@ opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array return - if (opnum == rop.STM_TRANSACTION_BREAK or - opnum == rop.CALL_ASSEMBLER): - self._seen_guard_not_invalidated = False if (opnum == rop.CALL or opnum == rop.CALL_PURE or opnum == rop.COND_CALL or opnum == rop.CALL_MAY_FORCE or - opnum == rop.CALL_RELEASE_GIL): + opnum == rop.CALL_RELEASE_GIL or + opnum == rop.CALL_ASSEMBLER): if opnum == rop.CALL_ASSEMBLER: self._seen_guard_not_invalidated = False else: diff --git a/rpython/jit/metainterp/optimizeopt/stm.py b/rpython/jit/metainterp/optimizeopt/stm.py deleted file mode 100644 --- a/rpython/jit/metainterp/optimizeopt/stm.py +++ /dev/null @@ -1,94 +0,0 @@ -from rpython.jit.metainterp.optimizeopt.optimizer import (Optimization, ) -from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method -from rpython.jit.codewriter.effectinfo import EffectInfo -from rpython.jit.metainterp.resoperation import rop, ResOperation - -class OptSTM(Optimization): - """ - This step removes a lot of uncecessary transaction_breaks (TBs) - emitted by pyjitpl from traces. We only want to keep these - unconditional TBs after external calls (identified by GUARD_NOT_FORCED) - because they are likely to return as inevitable transactions which - we want to break ASAP. - Guarded TBs are left in place, as they represent app-level loops - and are likely points to break between atomic transactions. - - The cached_ops is here to remove the virtualizable-forcing added - by pyjitpl before unconditional TBs. See tests. - """ - def __init__(self): - self.remove_next_gnf = False # guard_not_forced - self.keep_but_ignore_gnf = False - self.cached_ops = [] - - def propagate_forward(self, op): - dispatch_opt(self, op) - - def flush_cached(self): - while self.cached_ops: - self.emit_operation(self.cached_ops.pop(0)) - - def flush(self): - # just in case. it shouldn't be necessary - self.flush_cached() - - def default_emit(self, op): - self.flush_cached() - self.emit_operation(op) - - def _break_wanted(self): - is_loop = self.optimizer.loop.is_really_loop - return self.optimizer.stm_info.get('break_wanted', is_loop) - - def _set_break_wanted(self, val): - self.optimizer.stm_info['break_wanted'] = val - - def optimize_FORCE_TOKEN(self, op): - # if we have cached stuff, flush it. Not our case - self.flush_cached() - self.cached_ops.append(op) - - def optimize_SETFIELD_GC(self, op): - if not self.cached_ops: - # setfield not for force_token - self.emit_operation(op) - else: - assert len(self.cached_ops) == 1 - assert self.cached_ops[0].getopnum() == rop.FORCE_TOKEN - self.cached_ops.append(op) - - def optimize_STM_SHOULD_BREAK_TRANSACTION(self, op): - self.flush_cached() - self._set_break_wanted(False) - self.emit_operation(op) - - def optimize_STM_TRANSACTION_BREAK(self, op): - assert not self.remove_next_gnf - really_wanted = op.getarg(0).getint() - if really_wanted or self._break_wanted(): - self.flush_cached() - self._set_break_wanted(False) - self.emit_operation(op) - self.keep_but_ignore_gnf = True - else: - self.cached_ops = [] - self.remove_next_gnf = True - - def optimize_GUARD_NOT_FORCED(self, op): - self.flush_cached() - if self.remove_next_gnf: - self.remove_next_gnf = False - else: - if not self.keep_but_ignore_gnf: - self._set_break_wanted(True) - self.keep_but_ignore_gnf = False - self.emit_operation(op) - - def optimize_STM_HINT_COMMIT_SOON(self, op): - self.flush_cached() - self._set_break_wanted(True) - self.emit_operation(op) - - -dispatch_opt = make_dispatcher_method(OptSTM, 'optimize_', - default=OptSTM.default_emit) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_stm.py b/rpython/jit/metainterp/optimizeopt/test/test_stm.py deleted file mode 100644 --- a/rpython/jit/metainterp/optimizeopt/test/test_stm.py +++ /dev/null @@ -1,331 +0,0 @@ -from rpython.jit.metainterp.optimizeopt.test.test_optimizeopt import ( - BaseTestWithUnroll,) -from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin -from rpython.jit.codewriter.effectinfo import EffectInfo -from rpython.rtyper.lltypesystem import lltype, rclass, rffi, llmemory - - -class TestSTM(BaseTestWithUnroll, LLtypeMixin): - stm = True - - namespace = LLtypeMixin.namespace.copy() - namespace.update(locals()) - - - def test_unrolled_loop(self): - ops = """ - [] - i0 = stm_should_break_transaction() - guard_false(i0) [] - jump() - """ - self.optimize_loop(ops, ops, expected_preamble=ops) - - def test_really_wanted_tb(self): - ops = """ - [] - stm_transaction_break(0) - guard_not_forced() [] - - stm_transaction_break(1) - guard_not_forced() [] - - jump() - """ - preamble = """ - [] - stm_transaction_break(0) - guard_not_forced() [] - - stm_transaction_break(1) - guard_not_forced() [] - - jump() - """ - expected = """ - [] - stm_transaction_break(1) - guard_not_forced() [] - - jump() - """ - self.optimize_loop(ops, expected, expected_preamble=preamble) - - - def test_unrolled_loop2(self): - ops = """ - [] - stm_transaction_break(0) - guard_not_forced() [] - - i0 = stm_should_break_transaction() - guard_false(i0) [] - - jump() - """ - preamble = """ - [] - stm_transaction_break(0) - guard_not_forced() [] - - i0 = stm_should_break_transaction() - guard_false(i0) [] - - jump() - """ - expected = """ - [] - i0 = stm_should_break_transaction() - guard_false(i0) [] - jump() - """ - self.optimize_loop(ops, expected, expected_preamble=preamble) - - def test_not_disable_opt(self): - ops = """ - [p1] - i1 = getfield_gc(p1, descr=adescr) - - i0 = stm_should_break_transaction() - guard_false(i0) [] - jump(p1) - """ - preamble = """ - [p1] - i1 = getfield_gc(p1, descr=adescr) - - i0 = stm_should_break_transaction() - guard_false(i0) [] - jump(p1) - """ - expected = """ - [p1] - i0 = stm_should_break_transaction() - guard_false(i0) [] - - jump(p1) - """ - self.optimize_loop(ops, expected, expected_preamble=preamble) - - def test_dont_remove_first_tb(self): - ops = """ - [] - stm_transaction_break(0) - guard_not_forced() [] - stm_transaction_break(0) - guard_not_forced() [] - stm_transaction_break(0) - guard_not_forced() [] - i0 = stm_should_break_transaction() - guard_false(i0) [] - jump() - """ - preamble = """ - [] - stm_transaction_break(0) - guard_not_forced() [] - - i0 = stm_should_break_transaction() - guard_false(i0) [] - jump() - """ - expected = """ - [] - i0 = stm_should_break_transaction() - guard_false(i0) [] - jump() - """ - self.optimize_loop(ops, expected, expected_preamble=preamble) - - def test_add_tb_after_guard_not_forced(self): - ops = """ - [] - stm_transaction_break(0) - guard_not_forced() [] - - escape() # e.g. like a call_release_gil - guard_not_forced() [] - - stm_transaction_break(0) - guard_not_forced() [] - stm_transaction_break(0) - guard_not_forced() [] - i0 = stm_should_break_transaction() - guard_false(i0) [] - jump() - """ - preamble = """ - [] - stm_transaction_break(0) - guard_not_forced() [] - - escape() - guard_not_forced() [] - - stm_transaction_break(0) - guard_not_forced() [] - - i0 = stm_should_break_transaction() - guard_false(i0) [] - jump() - """ - expected = """ - [] - escape() - guard_not_forced() [] - - stm_transaction_break(0) - guard_not_forced() [] - - i0 = stm_should_break_transaction() - guard_false(i0) [] - jump() - """ - self.optimize_loop(ops, expected, expected_preamble=preamble) - - def test_remove_force_token(self): - ops = """ - [p0] - p1 = force_token() - setfield_gc(p0, p1, descr=adescr) - stm_transaction_break(0) - guard_not_forced() [] - - p2 = force_token() - setfield_gc(p0, p2, descr=adescr) - stm_transaction_break(0) - guard_not_forced() [] - - p3 = force_token() - setfield_gc(p0, p3, descr=adescr) - stm_transaction_break(0) - guard_not_forced() [] - - escape() - - p4 = force_token() - setfield_gc(p0, p4, descr=adescr) - stm_transaction_break(0) - guard_not_forced() [] - - p6 = force_token() # not removed! - - i0 = stm_should_break_transaction() - guard_false(i0) [] - jump(p0) - """ - preamble = """ - [p0] - p1 = force_token() - setfield_gc(p0, p1, descr=adescr) - stm_transaction_break(0) - guard_not_forced() [] - - escape() - - p6 = force_token() # not removed! - - i0 = stm_should_break_transaction() - guard_false(i0) [] - jump(p0) - """ - expected = """ - [p0] - escape() - - p6 = force_token() # not removed! - - i0 = stm_should_break_transaction() - guard_false(i0) [] - jump(p0) - """ - self.optimize_loop(ops, expected, expected_preamble=preamble) - - def test_not_remove_setfield(self): - ops = """ - [p0, p1] - setfield_gc(p0, p1, descr=adescr) - stm_transaction_break(0) - - p2 = force_token() - p3 = force_token() - jump(p0, p1) - """ - preamble = """ - [p0, p1] - setfield_gc(p0, p1, descr=adescr) - stm_transaction_break(0) - - p2 = force_token() - p3 = force_token() - jump(p0, p1) - """ - expected = """ - [p0, p1] - p2 = force_token() - p3 = force_token() - - setfield_gc(p0, p1, descr=adescr) # moved here by other stuff... - jump(p0, p1) - """ - self.optimize_loop(ops, expected, expected_preamble=preamble) - - def test_stm_location_1(self): - # This tests setfield_gc on a non-virtual. On a virtual, it doesn't - # really matter, because STM conflicts are impossible anyway - ops = """ - [i1, p1] - setfield_gc(p1, i1, descr=adescr) {81} - call(i1, descr=nonwritedescr) {90} - jump(i1, p1) - """ - expected = """ - [i1, p1] - call(i1, descr=nonwritedescr) {90} - setfield_gc(p1, i1, descr=adescr) {81} - jump(i1, p1) - """ - self.optimize_loop(ops, expected) - - def test_add_tb_after_commit_soon(self): - ops = """ - [] - stm_transaction_break(0) - guard_not_forced() [] - - stm_hint_commit_soon() - - stm_transaction_break(0) - guard_not_forced() [] - stm_transaction_break(0) - guard_not_forced() [] - i0 = stm_should_break_transaction() - guard_false(i0) [] - jump() - """ - preamble = """ - [] - stm_transaction_break(0) - guard_not_forced() [] - - stm_hint_commit_soon() - - stm_transaction_break(0) - guard_not_forced() [] - - i0 = stm_should_break_transaction() - guard_false(i0) [] - jump() - """ - expected = """ - [] - stm_hint_commit_soon() - - stm_transaction_break(0) - guard_not_forced() [] - - i0 = stm_should_break_transaction() - guard_false(i0) [] - jump() - """ - self.optimize_loop(ops, expected, expected_preamble=preamble) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -1,6 +1,6 @@ import py, random -from rpython.rtyper.lltypesystem import lltype, llmemory, rclass, rstr, rffi +from rpython.rtyper.lltypesystem import lltype, llmemory, rclass, rffi from rpython.rtyper.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from rpython.rtyper.rclass import FieldListAccessor, IR_QUASIIMMUTABLE @@ -331,7 +331,7 @@ def get_name_from_address(self, addr): # hack try: - return "".join(addr.ptr.name)[:-1] # remove \x00 + return "".join(addr.ptr.name.chars) except AttributeError: return "" diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -75,7 +75,6 @@ start_label = loop.operations[0] if start_label.getopnum() == rop.LABEL: - loop.is_really_loop = True loop.operations = loop.operations[1:] # We need to emit the label op before import_state() as emitting it # will clear heap caches diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -186,52 +186,16 @@ raise AssertionError("bad result box type") # ------------------------------ - def _record_stm_transaction_break(self, really_wanted): - # records an unconditional stm_transaction_break - mi = self.metainterp - mi.vable_and_vrefs_before_residual_call() - mi._record_helper_nonpure_varargs( - rop.STM_TRANSACTION_BREAK, None, None, - [history.ConstInt(really_wanted)]) - mi.vrefs_after_residual_call() - mi.vable_after_residual_call() - # - if not really_wanted: - # we're about the return ConstInt(0), which will go into the - # jitcode's %iN variable. But it will be captured by the - # GUARD_NOT_FORCED's resume data too. It is essential that we - # don't capture the old, stale value! Also, store ConstInt(1) - # to make sure that upon resuming we'll see a result of 1 (XXX - # unsure if it's needed, but it shouldn't hurt). - self.make_result_of_lastop(ConstInt(1)) - # - mi.generate_guard(rop.GUARD_NOT_FORCED, None) - self.metainterp.heapcache.stm_break_done() - - - @arguments("int") - def opimpl_stm_should_break_transaction(self, if_there_is_no_other): - val = bool(if_there_is_no_other) - mi = self.metainterp - if val: - # app-level loop: only one of these per loop is really needed - resbox = history.BoxInt(0) - mi.history.record(rop.STM_SHOULD_BREAK_TRANSACTION, [], resbox) - self.metainterp.heapcache.stm_break_done() - return resbox - else: - # between byte-code instructions: only keep if it is - # likely that we are inevitable here - if self.metainterp.heapcache.stm_break_wanted: - self._record_stm_transaction_break(False) - return ConstInt(0) @arguments() - def opimpl_stm_transaction_break(self): - # always wanted: inserted after we compile a bridge because there - # were just too many breaks and we failed the should_break&guard - # because of that - self._record_stm_transaction_break(True) + def opimpl_stm_should_break_transaction(self): + # XXX make it return BoxInt(1) instead of BoxInt(0) if there + # is an inevitable transaction, because it's likely that there + # will always be an inevitable transaction here + resbox = history.BoxInt(0) + mi = self.metainterp + mi.history.record(rop.STM_SHOULD_BREAK_TRANSACTION, [], resbox) + return resbox @arguments() def opimpl_stm_hint_commit_soon(self): @@ -1855,8 +1819,6 @@ if opnum == rop.GUARD_NOT_FORCED or opnum == rop.GUARD_NOT_FORCED_2: resumedescr = compile.ResumeGuardForcedDescr(self.staticdata, self.jitdriver_sd) - # for detecting stm breaks that are needed - self.heapcache.invalidate_caches(opnum, resumedescr, moreargs) elif opnum == rop.GUARD_NOT_INVALIDATED: resumedescr = compile.ResumeGuardNotInvalidated() elif opnum == rop.GUARD_FUTURE_CONDITION: diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -521,7 +521,6 @@ 'QUASIIMMUT_FIELD/1d', # [objptr], descr=SlowMutateDescr 'RECORD_KNOWN_CLASS/2', # [objptr, clsptr] 'KEEPALIVE/1', - 'STM_TRANSACTION_BREAK/1', 'STM_READ/1', '_CANRAISE_FIRST', # ----- start of can_raise operations ----- diff --git a/rpython/jit/metainterp/test/test_stm.py b/rpython/jit/metainterp/test/test_stm.py --- a/rpython/jit/metainterp/test/test_stm.py +++ b/rpython/jit/metainterp/test/test_stm.py @@ -11,63 +11,10 @@ class STMTests: def test_simple(self): def g(): - return rstm.jit_stm_should_break_transaction(False) + return rstm.should_break_transaction() res = self.interp_operations(g, [], translationoptions={"stm":True}) assert res == False - self.check_operations_history(stm_transaction_break=1, - stm_should_break_transaction=0) - - def test_not_removed(self): - import time - def g(): - time.sleep(0) - return rstm.jit_stm_should_break_transaction(False) - res = self.interp_operations(g, [], translationoptions={"stm":True}) - assert res == False - self.check_operations_history(stm_transaction_break=1, - call_may_force=1, - stm_should_break_transaction=0) - - def test_not_removed2(self): - def g(): - return rstm.jit_stm_should_break_transaction(True) - res = self.interp_operations(g, [], translationoptions={"stm":True}) - assert res == False - self.check_operations_history(stm_transaction_break=0, - stm_should_break_transaction=1) - - def test_transaction_break(self): - def g(): - rstm.jit_stm_transaction_break_point() - return 42 - self.interp_operations(g, [], translationoptions={"stm":True}) - self.check_operations_history({'stm_transaction_break':1, - 'guard_not_forced':1}) - - def test_heapcache(self): - import time - def g(): - rstm.jit_stm_should_break_transaction(True) # keep (start of loop) - rstm.jit_stm_should_break_transaction(False) - time.sleep(0) - rstm.jit_stm_should_break_transaction(False) # keep (after guard_not_forced) - rstm.jit_stm_should_break_transaction(False) - rstm.jit_stm_should_break_transaction(True) # keep (True) - rstm.jit_stm_should_break_transaction(True) # keep (True) - rstm.jit_stm_should_break_transaction(False) - rstm.hint_commit_soon() - rstm.jit_stm_should_break_transaction(False) # keep - rstm.jit_stm_should_break_transaction(False) - return 42 - res = self.interp_operations(g, [], translationoptions={"stm":True}) - assert res == 42 - self.check_operations_history({ - 'stm_transaction_break':2, - 'stm_hint_commit_soon':1, - 'stm_should_break_transaction':3, - 'guard_not_forced':3, - 'guard_no_exception':1, - 'call_may_force':1}) + self.check_operations_history(stm_should_break_transaction=1) def test_debug_merge_points(self): myjitdriver = JitDriver(greens = ['a'], reds = ['x', 'res']) diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -483,12 +483,14 @@ name = 'jitdriver' inline_jit_merge_point = False _store_last_enter_jit = None + stm_report_location = None def __init__(self, greens=None, reds=None, virtualizables=None, get_jitcell_at=None, set_jitcell_at=None, get_printable_location=None, confirm_enter_jit=None, can_never_inline=None, should_unroll_one_iteration=None, - name='jitdriver', check_untranslated=True): + name='jitdriver', check_untranslated=True, + stm_report_location=None): if greens is not None: self.greens = greens self.name = name @@ -524,6 +526,8 @@ self.can_never_inline = can_never_inline self.should_unroll_one_iteration = should_unroll_one_iteration self.check_untranslated = check_untranslated + if stm_report_location is not None: + self.stm_report_location = stm_report_location def _freeze_(self): return True diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -46,47 +46,13 @@ function with the interpreter's dispatch loop, this must be called (it turns into a marker in the caller's function). There is one automatically in any jit.jit_merge_point().""" - # special-cased below + # special-cased below: the emitted operation must be placed + # directly in the caller's graph def possible_transaction_break(): if stm_is_enabled(): if llop.stm_should_break_transaction(lltype.Bool): - llop.stm_transaction_break(lltype.Void) - -# Typical usage of the following two functions: -# -# just after jit_merge_point: -# if rstm.jit_stm_should_break_transaction(False): -# rstm.jit_stm_transaction_break_point() -# -# just before can_enter_jit: -# if rstm.jit_stm_should_break_transaction(True): -# rstm.jit_stm_transaction_break_point() -# -# resulting JIT trace (common case): -# ... -# call_release_gil(...) -# stm_transaction_break(0) # in-line, because we expect "inevitable" -# guard_not_forced() -# ... -# i1 = stm_should_break_transaction() -# guard_false(i1) # out-of-line, because rarely needed -# jump() -# -def jit_stm_transaction_break_point(): - # XXX REFACTOR AWAY - if we_are_translated(): - llop.jit_stm_transaction_break_point(lltype.Void) - - at specialize.arg(0) -def jit_stm_should_break_transaction(if_there_is_no_other): - # XXX REFACTOR AWAY - # if_there_is_no_other means that we use this point only - # if there is no other break point in the trace. - # If it is False, the point may be used if it comes right - # after a CALL_RELEASE_GIL - return llop.jit_stm_should_break_transaction(lltype.Bool, - if_there_is_no_other) + break_transaction() def hint_commit_soon(): """As the name says, just a hint. Maybe calling it @@ -104,7 +70,6 @@ def partial_commit_and_resume_other_threads(): pass # for now - at dont_look_inside def should_break_transaction(): return we_are_translated() and ( llop.stm_should_break_transaction(lltype.Bool)) diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -943,7 +943,6 @@ op_stm_initialize = _stm_not_implemented op_stm_finalize = _stm_not_implemented op_stm_perform_transaction = _stm_not_implemented - op_stm_should_break_transaction = _stm_not_implemented op_stm_commit_transaction = _stm_not_implemented op_stm_begin_inevitable_transaction = _stm_not_implemented op_stm_barrier = _stm_not_implemented @@ -971,6 +970,9 @@ op_stm_stop_all_other_threads = _stm_not_implemented op_stm_partial_commit_and_resume_other_threads = _stm_not_implemented + def op_stm_should_break_transaction(self): + return False + def op_threadlocalref_set(self, key, value): try: d = self.llinterpreter.tlrefsdict diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -436,7 +436,7 @@ 'stm_leave_callback_call': LLOp(), 'stm_transaction_break': LLOp(canmallocgc=True), 'stm_should_break_transaction': LLOp(sideeffects=False), - 'stm_rewind_jmp_frame': LLOp(), + 'stm_rewind_jmp_frame': LLOp(canrun=True), 'stm_set_transaction_length': LLOp(), 'stm_hint_commit_soon': LLOp(canrun=True), @@ -521,8 +521,6 @@ 'jit_assembler_call': LLOp(canrun=True, # similar to an 'indirect_call' canraise=(Exception,), canmallocgc=True), - 'jit_stm_transaction_break_point' : LLOp(canrun=True,canmallocgc=True), - 'jit_stm_should_break_transaction' : LLOp(canrun=True), # __________ GC operations __________ diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -715,10 +715,7 @@ def op_jit_assembler_call(funcptr, *args): return funcptr(*args) -def op_jit_stm_should_break_transaction(if_there_is_no_other): - return False - -def op_jit_stm_transaction_break_point(): +def op_stm_rewind_jmp_frame(): pass def op_stm_hint_commit_soon(): diff --git a/rpython/translator/stm/breakfinder.py b/rpython/translator/stm/breakfinder.py --- a/rpython/translator/stm/breakfinder.py +++ b/rpython/translator/stm/breakfinder.py @@ -7,7 +7,6 @@ 'stm_start_if_not_atomic', #'stm_partial_commit_and_resume_other_threads', # new priv_revision #'jit_assembler_call', - #'jit_stm_transaction_break_point', 'stm_enter_callback_call', 'stm_leave_callback_call', ]) diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py --- a/rpython/translator/stm/inevitable.py +++ b/rpython/translator/stm/inevitable.py @@ -18,8 +18,7 @@ 'gc_adr_of_root_stack_top', 'gc_add_memory_pressure', 'weakref_create', 'weakref_deref', 'jit_assembler_call', 'gc_writebarrier', - 'shrink_array', 'jit_stm_transaction_break_point', - 'jit_stm_should_break_transaction', + 'shrink_array', 'threadlocalref_get', 'threadlocalref_set', ]) ALWAYS_ALLOW_OPERATIONS |= set(lloperation.enum_tryfold_ops()) From noreply at buildbot.pypy.org Sun Aug 17 19:36:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 17 Aug 2014 19:36:59 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Fix tlc. Message-ID: <20140817173659.C06D21C347F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72847:e9196681b65b Date: 2014-08-17 18:20 +0200 http://bitbucket.org/pypy/pypy/changeset/e9196681b65b/ Log: Fix tlc. diff --git a/rpython/jit/tl/tlc.py b/rpython/jit/tl/tlc.py --- a/rpython/jit/tl/tlc.py +++ b/rpython/jit/tl/tlc.py @@ -229,8 +229,7 @@ def make_interp(supports_call, jitted=True): myjitdriver = JitDriver(greens = ['pc', 'code'], - reds = ['frame', 'pool'], - stm_do_transaction_breaks=True) + reds = ['frame', 'pool']) def interp(code='', pc=0, inputarg=0, pool=None): if not isinstance(code,str): @@ -250,9 +249,6 @@ if jitted: myjitdriver.jit_merge_point(frame=frame, code=code, pc=pc, pool=pool) - # nothing inbetween! - if rstm.jit_stm_should_break_transaction(False): - rstm.jit_stm_transaction_break_point() opcode = ord(code[pc]) pc += 1 stack = frame.stack @@ -353,8 +349,7 @@ pc += char2int(code[pc]) pc += 1 if jitted and old_pc > pc: - if rstm.jit_stm_should_break_transaction(True): - rstm.jit_stm_transaction_break_point() + rstm.possible_transaction_break() myjitdriver.can_enter_jit(code=code, pc=pc, frame=frame, pool=pool) @@ -364,8 +359,7 @@ old_pc = pc pc += char2int(code[pc]) + 1 if jitted and old_pc > pc: - if rstm.jit_stm_should_break_transaction(True): - rstm.jit_stm_transaction_break_point() + rstm.possible_transaction_break() myjitdriver.can_enter_jit(code=code, pc=pc, frame=frame, pool=pool) else: @@ -377,8 +371,7 @@ old_pc = pc pc += offset if jitted and old_pc > pc: - if rstm.jit_stm_should_break_transaction(True): - rstm.jit_stm_transaction_break_point() + rstm.possible_transaction_break() myjitdriver.can_enter_jit(code=code, pc=pc, frame=frame, pool=pool) From noreply at buildbot.pypy.org Sun Aug 17 19:37:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 17 Aug 2014 19:37:00 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Revert this change Message-ID: <20140817173700.ECDF11C347F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72848:778a09fd4f39 Date: 2014-08-17 18:22 +0200 http://bitbucket.org/pypy/pypy/changeset/778a09fd4f39/ Log: Revert this change diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -759,10 +759,9 @@ # ____________________________________________________________ class History(object): - def __init__(self, metainterp_sd): + def __init__(self): self.inputargs = None self.operations = [] - self.config = metainterp_sd.config self.stm_location = None def record(self, opnum, argboxes, resbox, descr=None): diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1849,7 +1849,7 @@ self.framestack[-1].pc = saved_pc def create_empty_history(self): - self.history = history.History(self.staticdata) + self.history = history.History() self.staticdata.stats.set_history(self.history) def _all_constants(self, *boxes): @@ -2453,7 +2453,7 @@ rstack._stack_criticalcode_start() try: self.portal_call_depth = -1 # always one portal around - self.history = history.History(self.staticdata) + self.history = history.History() inputargs_and_holes = self.rebuild_state_after_failure(resumedescr, deadframe) self.history.inputargs = [box for box in inputargs_and_holes if box] From noreply at buildbot.pypy.org Sun Aug 17 19:37:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 17 Aug 2014 19:37:02 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Oups. Looks like the merge in 22f849260e70 did that wrong. Message-ID: <20140817173702.1D18C1C347F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72849:5034d5a420d3 Date: 2014-08-17 18:29 +0200 http://bitbucket.org/pypy/pypy/changeset/5034d5a420d3/ Log: Oups. Looks like the merge in 22f849260e70 did that wrong. diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -242,7 +242,6 @@ i = 0 while i < length: copy_item(source, dest, i + source_start, i + dest_start) - dest[i + dest_start] = source[i + source_start] i += 1 return source_addr = llmemory.cast_ptr_to_adr(source) From noreply at buildbot.pypy.org Sun Aug 17 19:37:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 17 Aug 2014 19:37:03 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Remove outdated test Message-ID: <20140817173703.3E21E1C347F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72850:61b69f802758 Date: 2014-08-17 18:30 +0200 http://bitbucket.org/pypy/pypy/changeset/61b69f802758/ Log: Remove outdated test diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py --- a/rpython/jit/metainterp/test/test_heapcache.py +++ b/rpython/jit/metainterp/test/test_heapcache.py @@ -534,30 +534,6 @@ ) assert h.getarrayitem(box1, index1, descr1) is box3 - def test_stm_break(self): - h = HeapCache() - assert h.stm_break_wanted - h.stm_break_done() - assert not h.stm_break_wanted - # loop headers - h.reset() - assert h.stm_break_wanted - h.stm_break_done() - assert not h.stm_break_wanted - # call that may make the transaction inevitable - h.invalidate_caches( - rop.CALL, FakeCallDescr(FakeEffectinfo.EF_RANDOM_EFFECTS), [box1] - ) - assert h.stm_break_wanted - h.stm_break_done() - # unknown op - h.invalidate_caches(rop.JIT_DEBUG, None, [box1, lengthbox2, box2]) - assert h.stm_break_wanted - h.stm_break_done() - # GUARD_NOT_FORCED - h.invalidate_caches(rop.GUARD_NOT_FORCED, None, []) - assert h.stm_break_wanted - def test_bug_missing_ignored_operations(self): h = HeapCache() h.new(box1) From noreply at buildbot.pypy.org Sun Aug 17 19:37:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 17 Aug 2014 19:37:04 +0200 (CEST) Subject: [pypy-commit] pypy default: From stmgc-c7: put the "# Loop" and "# bridge" header in more dumped Message-ID: <20140817173704.5E8111C347F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72851:a134c3074fb6 Date: 2014-08-17 18:41 +0200 http://bitbucket.org/pypy/pypy/changeset/a134c3074fb6/ Log: From stmgc-c7: put the "# Loop" and "# bridge" header in more dumped logs than just the "jit-log-opt" one. diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -15,10 +15,14 @@ def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None, name=''): if type is None: debug_start("jit-log-noopt-loop") + debug_print("# Loop", number, '(%s)' % name, ":", "noopt", + "with", len(operations), "ops") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-loop") elif type == "rewritten": debug_start("jit-log-rewritten-loop") + debug_print("# Loop", number, '(%s)' % name, ":", type, + "with", len(operations), "ops") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-rewritten-loop") elif number == -2: @@ -37,12 +41,18 @@ descr=None, ops_offset=None): if extra == "noopt": debug_start("jit-log-noopt-bridge") + debug_print("# bridge out of Guard", + "0x%x" % compute_unique_id(descr), + "with", len(operations), "ops") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-noopt-bridge") elif extra == "rewritten": debug_start("jit-log-rewritten-bridge") + debug_print("# bridge out of Guard", + "0x%x" % compute_unique_id(descr), + "with", len(operations), "ops") logops = self._log_operations(inputargs, operations, ops_offset) - debug_stop("jit-log-rewritten-bridge") + debug_stop("jit-log-rewritten-bridge") elif extra == "compiling": debug_start("jit-log-compiling-bridge") logops = self._log_operations(inputargs, operations, ops_offset) diff --git a/rpython/jit/metainterp/test/test_logger.py b/rpython/jit/metainterp/test/test_logger.py --- a/rpython/jit/metainterp/test/test_logger.py +++ b/rpython/jit/metainterp/test/test_logger.py @@ -32,10 +32,11 @@ return log_stream.getvalue() class Logger(logger.Logger): - def log_loop(self, loop, namespace={}, ops_offset=None): + def log_loop(self, loop, namespace={}, ops_offset=None, name=''): self.namespace = namespace return capturing(logger.Logger.log_loop, self, - loop.inputargs, loop.operations, ops_offset=ops_offset) + loop.inputargs, loop.operations, ops_offset=ops_offset, + name=name) def _make_log_operations(self1): class LogOperations(logger.LogOperations): @@ -230,8 +231,9 @@ None: 40 } logger = Logger(self.make_metainterp_sd()) - output = logger.log_loop(loop, ops_offset=ops_offset) + output = logger.log_loop(loop, ops_offset=ops_offset, name="foo") assert output.strip() == """ +# Loop 0 (foo) : noopt with 3 ops [i0] +10: i2 = int_add(i0, 1) i4 = int_mul(i2, 2) From noreply at buildbot.pypy.org Sun Aug 17 19:37:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 17 Aug 2014 19:37:07 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: hg merge default Message-ID: <20140817173707.714F91C347F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72852:e10ea4772e19 Date: 2014-08-17 18:45 +0200 http://bitbucket.org/pypy/pypy/changeset/e10ea4772e19/ Log: hg merge default diff too long, truncating to 2000 out of 14840 lines diff --git a/_pytest/__init__.py b/_pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.4.dev2' +__version__ = '2.5.2' diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py new file mode 100644 --- /dev/null +++ b/_pytest/_argcomplete.py @@ -0,0 +1,104 @@ + +"""allow bash-completion for argparse with argcomplete if installed +needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code. + +argcomplete does not support python 2.5 (although the changes for that +are minor). + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*' + ).completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh ) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK + +INSTALL/DEBUGGING +================= +To include this support in another application that has setup.py generated +scripts: +- add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point +- include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + , call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument() +If things do not work right away: +- switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 +- run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not +- sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" + +import sys +import os +from glob import glob + +class FastFilesCompleter: + 'Fast file completer class' + def __init__(self, directories=True): + self.directories = directories + + def __call__(self, prefix, **kwargs): + """only called on non option completions""" + if os.path.sep in prefix[1:]: # + prefix_dir = len(os.path.dirname(prefix) + os.path.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if '*' not in prefix and '?' not in prefix: + if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash + globbed.extend(glob(prefix + '.*')) + prefix += '*' + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += '/' + # append stripping the prefix (like bash, not like compgen) + completion.append(x[prefix_dir:]) + return completion + +if os.environ.get('_ARGCOMPLETE'): + # argcomplete 0.5.6 is not compatible with python 2.5.6: print/with/format + if sys.version_info[:2] < (2, 6): + sys.exit(1) + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter = FastFilesCompleter() + + def try_argcomplete(parser): + argcomplete.autocomplete(parser) +else: + def try_argcomplete(parser): pass + filescompleter = None diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py --- a/_pytest/assertion/__init__.py +++ b/_pytest/assertion/__init__.py @@ -3,7 +3,6 @@ """ import py import sys -import pytest from _pytest.monkeypatch import monkeypatch from _pytest.assertion import util @@ -19,8 +18,8 @@ to provide assert expression information. """) group.addoption('--no-assert', action="store_true", default=False, dest="noassert", help="DEPRECATED equivalent to --assert=plain") - group.addoption('--nomagic', action="store_true", default=False, - dest="nomagic", help="DEPRECATED equivalent to --assert=plain") + group.addoption('--nomagic', '--no-magic', action="store_true", + default=False, help="DEPRECATED equivalent to --assert=plain") class AssertionState: """State for the assertion plugin.""" @@ -35,22 +34,25 @@ mode = "plain" if mode == "rewrite": try: - import ast + import ast # noqa except ImportError: mode = "reinterp" else: - if sys.platform.startswith('java'): + # Both Jython and CPython 2.6.0 have AST bugs that make the + # assertion rewriting hook malfunction. + if (sys.platform.startswith('java') or + sys.version_info[:3] == (2, 6, 0)): mode = "reinterp" if mode != "plain": _load_modules(mode) m = monkeypatch() config._cleanup.append(m.undo) m.setattr(py.builtin.builtins, 'AssertionError', - reinterpret.AssertionError) + reinterpret.AssertionError) # noqa hook = None if mode == "rewrite": - hook = rewrite.AssertionRewritingHook() - sys.meta_path.append(hook) + hook = rewrite.AssertionRewritingHook() # noqa + sys.meta_path.insert(0, hook) warn_about_missing_assertion(mode) config._assertstate = AssertionState(config, mode) config._assertstate.hook = hook @@ -73,9 +75,16 @@ def callbinrepr(op, left, right): hook_result = item.ihook.pytest_assertrepr_compare( config=item.config, op=op, left=left, right=right) + for new_expl in hook_result: if new_expl: - res = '\n~'.join(new_expl) + # Don't include pageloads of data unless we are very + # verbose (-vv) + if (sum(len(p) for p in new_expl[1:]) > 80*8 + and item.config.option.verbose < 2): + new_expl[1:] = [py.builtin._totext( + 'Detailed information truncated, use "-vv" to show')] + res = py.builtin._totext('\n~').join(new_expl) if item.config.getvalue("assertmode") == "rewrite": # The result will be fed back a python % formatting # operation, which will fail if there are extraneous @@ -95,9 +104,9 @@ def _load_modules(mode): """Lazily import assertion related code.""" global rewrite, reinterpret - from _pytest.assertion import reinterpret + from _pytest.assertion import reinterpret # noqa if mode == "rewrite": - from _pytest.assertion import rewrite + from _pytest.assertion import rewrite # noqa def warn_about_missing_assertion(mode): try: diff --git a/_pytest/assertion/newinterpret.py b/_pytest/assertion/newinterpret.py --- a/_pytest/assertion/newinterpret.py +++ b/_pytest/assertion/newinterpret.py @@ -11,7 +11,7 @@ from _pytest.assertion.reinterpret import BuiltinAssertionError -if sys.platform.startswith("java") and sys.version_info < (2, 5, 2): +if sys.platform.startswith("java"): # See http://bugs.jython.org/issue1497 _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict", "ListComp", "GeneratorExp", "Yield", "Compare", "Call", diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py --- a/_pytest/assertion/oldinterpret.py +++ b/_pytest/assertion/oldinterpret.py @@ -526,10 +526,13 @@ # example: def f(): return 5 + def g(): return 3 + def h(x): return 'never' + check("f() * g() == 5") check("not f()") check("not (f() and g() or 0)") diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py --- a/_pytest/assertion/reinterpret.py +++ b/_pytest/assertion/reinterpret.py @@ -1,18 +1,26 @@ import sys import py from _pytest.assertion.util import BuiltinAssertionError +u = py.builtin._totext + class AssertionError(BuiltinAssertionError): def __init__(self, *args): BuiltinAssertionError.__init__(self, *args) if args: + # on Python2.6 we get len(args)==2 for: assert 0, (x,y) + # on Python2.7 and above we always get len(args) == 1 + # with args[0] being the (x,y) tuple. + if len(args) > 1: + toprint = args + else: + toprint = args[0] try: - self.msg = str(args[0]) - except py.builtin._sysex: - raise - except: - self.msg = "<[broken __repr__] %s at %0xd>" %( - args[0].__class__, id(args[0])) + self.msg = u(toprint) + except Exception: + self.msg = u( + "<[broken __repr__] %s at %0xd>" + % (toprint.__class__, id(toprint))) else: f = py.code.Frame(sys._getframe(1)) try: @@ -44,4 +52,3 @@ from _pytest.assertion.newinterpret import interpret as reinterpret else: reinterpret = reinterpret_old - diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py --- a/_pytest/assertion/rewrite.py +++ b/_pytest/assertion/rewrite.py @@ -6,6 +6,7 @@ import imp import marshal import os +import re import struct import sys import types @@ -14,13 +15,7 @@ from _pytest.assertion import util -# Windows gives ENOENT in places *nix gives ENOTDIR. -if sys.platform.startswith("win"): - PATH_COMPONENT_NOT_DIR = errno.ENOENT -else: - PATH_COMPONENT_NOT_DIR = errno.ENOTDIR - -# py.test caches rewritten pycs in __pycache__. +# pytest caches rewritten pycs in __pycache__. if hasattr(imp, "get_tag"): PYTEST_TAG = imp.get_tag() + "-PYTEST" else: @@ -34,17 +29,19 @@ PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1]) del ver, impl -PYC_EXT = ".py" + "c" if __debug__ else "o" +PYC_EXT = ".py" + (__debug__ and "c" or "o") PYC_TAIL = "." + PYTEST_TAG + PYC_EXT REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2) +ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 class AssertionRewritingHook(object): - """Import hook which rewrites asserts.""" + """PEP302 Import hook which rewrites asserts.""" def __init__(self): self.session = None self.modules = {} + self._register_with_pkg_resources() def set_session(self, session): self.fnpats = session.config.getini("python_files") @@ -59,8 +56,12 @@ names = name.rsplit(".", 1) lastname = names[-1] pth = None - if path is not None and len(path) == 1: - pth = path[0] + if path is not None: + # Starting with Python 3.3, path is a _NamespacePath(), which + # causes problems if not converted to list. + path = list(path) + if len(path) == 1: + pth = path[0] if pth is None: try: fd, fn, desc = imp.find_module(lastname, path) @@ -95,12 +96,13 @@ finally: self.session = sess else: - state.trace("matched test file (was specified on cmdline): %r" % (fn,)) + state.trace("matched test file (was specified on cmdline): %r" % + (fn,)) # The requested module looks like a test file, so rewrite it. This is # the most magical part of the process: load the source, rewrite the # asserts, and load the rewritten source. We also cache the rewritten # module code in a special pyc. We must be aware of the possibility of - # concurrent py.test processes rewriting and loading pycs. To avoid + # concurrent pytest processes rewriting and loading pycs. To avoid # tricky race conditions, we maintain the following invariant: The # cached pyc is always a complete, valid pyc. Operations on it must be # atomic. POSIX's atomic rename comes in handy. @@ -116,19 +118,19 @@ # common case) or it's blocked by a non-dir node. In the # latter case, we'll ignore it in _write_pyc. pass - elif e == PATH_COMPONENT_NOT_DIR: + elif e in [errno.ENOENT, errno.ENOTDIR]: # One of the path components was not a directory, likely # because we're in a zip file. write = False elif e == errno.EACCES: - state.trace("read only directory: %r" % (fn_pypath.dirname,)) + state.trace("read only directory: %r" % fn_pypath.dirname) write = False else: raise cache_name = fn_pypath.basename[:-3] + PYC_TAIL pyc = os.path.join(cache_dir, cache_name) - # Notice that even if we're in a read-only directory, I'm going to check - # for a cached pyc. This may not be optimal... + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... co = _read_pyc(fn_pypath, pyc) if co is None: state.trace("rewriting %r" % (fn,)) @@ -153,27 +155,59 @@ mod.__file__ = co.co_filename # Normally, this attribute is 3.2+. mod.__cached__ = pyc + mod.__loader__ = self py.builtin.exec_(co, mod.__dict__) except: del sys.modules[name] raise return sys.modules[name] -def _write_pyc(co, source_path, pyc): - # Technically, we don't have to have the same pyc format as (C)Python, since - # these "pycs" should never be seen by builtin import. However, there's - # little reason deviate, and I hope sometime to be able to use - # imp.load_compiled to load them. (See the comment in load_module above.) + + + def is_package(self, name): + try: + fd, fn, desc = imp.find_module(name) + except ImportError: + return False + if fd is not None: + fd.close() + tp = desc[2] + return tp == imp.PKG_DIRECTORY + + @classmethod + def _register_with_pkg_resources(cls): + """ + Ensure package resources can be loaded from this loader. May be called + multiple times, as the operation is idempotent. + """ + try: + import pkg_resources + # access an attribute in case a deferred importer is present + pkg_resources.__name__ + except ImportError: + return + + # Since pytest tests are always located in the file system, the + # DefaultProvider is appropriate. + pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider) + + +def _write_pyc(state, co, source_path, pyc): + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason deviate, and I hope + # sometime to be able to use imp.load_compiled to load them. (See + # the comment in load_module above.) mtime = int(source_path.mtime()) try: fp = open(pyc, "wb") except IOError: err = sys.exc_info()[1].errno - if err == PATH_COMPONENT_NOT_DIR: - # This happens when we get a EEXIST in find_module creating the - # __pycache__ directory and __pycache__ is by some non-dir node. - return False - raise + state.trace("error writing pyc file at %s: errno=%s" %(pyc, err)) + # we ignore any failure to write the cache file + # there are many reasons, permission-denied, __pycache__ being a + # file etc. + return False try: fp.write(imp.get_magic()) fp.write(struct.pack(">", - ast.Add : "+", - ast.Sub : "-", - ast.Mult : "*", - ast.Div : "/", - ast.FloorDiv : "//", - ast.Mod : "%", - ast.Eq : "==", - ast.NotEq : "!=", - ast.Lt : "<", - ast.LtE : "<=", - ast.Gt : ">", - ast.GtE : ">=", - ast.Pow : "**", - ast.Is : "is", - ast.IsNot : "is not", - ast.In : "in", - ast.NotIn : "not in" + ast.BitOr: "|", + ast.BitXor: "^", + ast.BitAnd: "&", + ast.LShift: "<<", + ast.RShift: ">>", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in" } @@ -341,7 +408,7 @@ lineno = 0 for item in mod.body: if (expect_docstring and isinstance(item, ast.Expr) and - isinstance(item.value, ast.Str)): + isinstance(item.value, ast.Str)): doc = item.value.s if "PYTEST_DONT_REWRITE" in doc: # The module has disabled assertion rewriting. @@ -462,7 +529,8 @@ body.append(raise_) # Clear temporary variables by setting them to None. if self.variables: - variables = [ast.Name(name, ast.Store()) for name in self.variables] + variables = [ast.Name(name, ast.Store()) + for name in self.variables] clear = ast.Assign(variables, ast.Name("None", ast.Load())) self.statements.append(clear) # Fix line numbers. @@ -471,11 +539,12 @@ return self.statements def visit_Name(self, name): - # Check if the name is local or not. + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. locs = ast.Call(self.builtin("locals"), [], [], None, None) - globs = ast.Call(self.builtin("globals"), [], [], None, None) - ops = [ast.In(), ast.IsNot()] - test = ast.Compare(ast.Str(name.id), ops, [locs, globs]) + inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) + dorepr = self.helper("should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) return name, self.explanation_param(expr) @@ -492,7 +561,8 @@ for i, v in enumerate(boolop.values): if i: fail_inner = [] - self.on_failure.append(ast.If(cond, fail_inner, [])) + # cond is set in a prior loop iteration below + self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa self.on_failure = fail_inner self.push_format_context() res, expl = self.visit(v) @@ -548,7 +618,8 @@ new_kwarg, expl = self.visit(call.kwargs) arg_expls.append("**" + expl) expl = "%s(%s)" % (func_expl, ', '.join(arg_expls)) - new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg) + new_call = ast.Call(new_func, new_args, new_kwargs, + new_star, new_kwarg) res = self.assign(new_call) res_expl = self.explanation_param(self.display(res)) outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) @@ -584,7 +655,7 @@ res_expr = ast.Compare(left_res, [op], [next_res]) self.statements.append(ast.Assign([store_names[i]], res_expr)) left_res, left_expl = next_res, next_expl - # Use py.code._reprcompare if that's available. + # Use pytest.assertion.util._reprcompare if that's available. expl_call = self.helper("call_reprcompare", ast.Tuple(syms, ast.Load()), ast.Tuple(load_names, ast.Load()), diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py --- a/_pytest/assertion/util.py +++ b/_pytest/assertion/util.py @@ -1,8 +1,13 @@ """Utilities for assertion debugging""" import py +try: + from collections import Sequence +except ImportError: + Sequence = list BuiltinAssertionError = py.builtin.builtins.AssertionError +u = py.builtin._totext # The _reprcompare attribute on the util module is used by the new assertion # interpretation code and assertion rewriter to detect this plugin was @@ -10,6 +15,7 @@ # DebugInterpreter. _reprcompare = None + def format_explanation(explanation): """This formats an explanation @@ -20,7 +26,18 @@ for when one explanation needs to span multiple lines, e.g. when displaying diffs. """ - # simplify 'assert False where False = ...' + explanation = _collapse_false(explanation) + lines = _split_explanation(explanation) + result = _format_lines(lines) + return u('\n').join(result) + + +def _collapse_false(explanation): + """Collapse expansions of False + + So this strips out any "assert False\n{where False = ...\n}" + blocks. + """ where = 0 while True: start = where = explanation.find("False\n{False = ", where) @@ -42,28 +59,48 @@ explanation = (explanation[:start] + explanation[start+15:end-1] + explanation[end+1:]) where -= 17 - raw_lines = (explanation or '').split('\n') - # escape newlines not followed by {, } and ~ + return explanation + + +def _split_explanation(explanation): + """Return a list of individual lines in the explanation + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or u('')).split('\n') lines = [raw_lines[0]] for l in raw_lines[1:]: if l.startswith('{') or l.startswith('}') or l.startswith('~'): lines.append(l) else: lines[-1] += '\\n' + l + return lines + +def _format_lines(lines): + """Format the individual lines + + This will replace the '{', '}' and '~' characters of our mini + formatting language with the proper 'where ...', 'and ...' and ' + + ...' text, taking care of indentation along the way. + + Return a list of formatted lines. + """ result = lines[:1] stack = [0] stackcnt = [0] for line in lines[1:]: if line.startswith('{'): if stackcnt[-1]: - s = 'and ' + s = u('and ') else: - s = 'where ' + s = u('where ') stack.append(len(result)) stackcnt[-1] += 1 stackcnt.append(0) - result.append(' +' + ' '*(len(stack)-1) + s + line[1:]) + result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:]) elif line.startswith('}'): assert line.startswith('}') stack.pop() @@ -71,9 +108,9 @@ result[stack[-1]] += line[1:] else: assert line.startswith('~') - result.append(' '*len(stack) + line[1:]) + result.append(u(' ')*len(stack) + line[1:]) assert len(stack) == 1 - return '\n'.join(result) + return result # Provide basestring in python3 @@ -83,132 +120,163 @@ basestring = str -def assertrepr_compare(op, left, right): - """return specialised explanations for some operators/operands""" - width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op +def assertrepr_compare(config, op, left, right): + """Return specialised explanations for some operators/operands""" + width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op left_repr = py.io.saferepr(left, maxsize=int(width/2)) right_repr = py.io.saferepr(right, maxsize=width-len(left_repr)) - summary = '%s %s %s' % (left_repr, op, right_repr) + summary = u('%s %s %s') % (left_repr, op, right_repr) - issequence = lambda x: isinstance(x, (list, tuple)) + issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) + and not isinstance(x, basestring)) istext = lambda x: isinstance(x, basestring) isdict = lambda x: isinstance(x, dict) - isset = lambda x: isinstance(x, set) + isset = lambda x: isinstance(x, (set, frozenset)) + verbose = config.getoption('verbose') explanation = None try: if op == '==': if istext(left) and istext(right): - explanation = _diff_text(left, right) + explanation = _diff_text(left, right, verbose) elif issequence(left) and issequence(right): - explanation = _compare_eq_sequence(left, right) + explanation = _compare_eq_sequence(left, right, verbose) elif isset(left) and isset(right): - explanation = _compare_eq_set(left, right) + explanation = _compare_eq_set(left, right, verbose) elif isdict(left) and isdict(right): - explanation = _diff_text(py.std.pprint.pformat(left), - py.std.pprint.pformat(right)) + explanation = _compare_eq_dict(left, right, verbose) elif op == 'not in': if istext(left) and istext(right): - explanation = _notin_text(left, right) - except py.builtin._sysex: - raise - except: + explanation = _notin_text(left, right, verbose) + except Exception: excinfo = py.code.ExceptionInfo() - explanation = ['(pytest_assertion plugin: representation of ' - 'details failed. Probably an object has a faulty __repr__.)', - str(excinfo) - ] - + explanation = [ + u('(pytest_assertion plugin: representation of details failed. ' + 'Probably an object has a faulty __repr__.)'), + u(excinfo)] if not explanation: return None - # Don't include pageloads of data, should be configurable - if len(''.join(explanation)) > 80*8: - explanation = ['Detailed information too verbose, truncated'] - return [summary] + explanation -def _diff_text(left, right): - """Return the explanation for the diff between text +def _diff_text(left, right, verbose=False): + """Return the explanation for the diff between text or bytes - This will skip leading and trailing characters which are - identical to keep the diff minimal. + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + + If the input are bytes they will be safely converted to text. """ explanation = [] - i = 0 # just in case left or right has zero length - for i in range(min(len(left), len(right))): - if left[i] != right[i]: - break - if i > 42: - i -= 10 # Provide some context - explanation = ['Skipping %s identical ' - 'leading characters in diff' % i] - left = left[i:] - right = right[i:] - if len(left) == len(right): - for i in range(len(left)): - if left[-i] != right[-i]: + if isinstance(left, py.builtin.bytes): + left = u(repr(left)[1:-1]).replace(r'\n', '\n') + if isinstance(right, py.builtin.bytes): + right = u(repr(right)[1:-1]).replace(r'\n', '\n') + if not verbose: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: break if i > 42: - i -= 10 # Provide some context - explanation += ['Skipping %s identical ' - 'trailing characters in diff' % i] - left = left[:-i] - right = right[:-i] + i -= 10 # Provide some context + explanation = [u('Skipping %s identical leading ' + 'characters in diff, use -v to show') % i] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [u('Skipping %s identical trailing ' + 'characters in diff, use -v to show') % i] + left = left[:-i] + right = right[:-i] explanation += [line.strip('\n') for line in py.std.difflib.ndiff(left.splitlines(), right.splitlines())] return explanation -def _compare_eq_sequence(left, right): +def _compare_eq_sequence(left, right, verbose=False): explanation = [] for i in range(min(len(left), len(right))): if left[i] != right[i]: - explanation += ['At index %s diff: %r != %r' % - (i, left[i], right[i])] + explanation += [u('At index %s diff: %r != %r') + % (i, left[i], right[i])] break if len(left) > len(right): - explanation += ['Left contains more items, ' - 'first extra item: %s' % py.io.saferepr(left[len(right)],)] + explanation += [u('Left contains more items, first extra item: %s') + % py.io.saferepr(left[len(right)],)] elif len(left) < len(right): - explanation += ['Right contains more items, ' - 'first extra item: %s' % py.io.saferepr(right[len(left)],)] - return explanation # + _diff_text(py.std.pprint.pformat(left), - # py.std.pprint.pformat(right)) + explanation += [ + u('Right contains more items, first extra item: %s') % + py.io.saferepr(right[len(left)],)] + return explanation # + _diff_text(py.std.pprint.pformat(left), + # py.std.pprint.pformat(right)) -def _compare_eq_set(left, right): +def _compare_eq_set(left, right, verbose=False): explanation = [] diff_left = left - right diff_right = right - left if diff_left: - explanation.append('Extra items in the left set:') + explanation.append(u('Extra items in the left set:')) for item in diff_left: explanation.append(py.io.saferepr(item)) if diff_right: - explanation.append('Extra items in the right set:') + explanation.append(u('Extra items in the right set:')) for item in diff_right: explanation.append(py.io.saferepr(item)) return explanation -def _notin_text(term, text): +def _compare_eq_dict(left, right, verbose=False): + explanation = [] + common = set(left).intersection(set(right)) + same = dict((k, left[k]) for k in common if left[k] == right[k]) + if same and not verbose: + explanation += [u('Omitting %s identical items, use -v to show') % + len(same)] + elif same: + explanation += [u('Common items:')] + explanation += py.std.pprint.pformat(same).splitlines() + diff = set(k for k in common if left[k] != right[k]) + if diff: + explanation += [u('Differing items:')] + for k in diff: + explanation += [py.io.saferepr({k: left[k]}) + ' != ' + + py.io.saferepr({k: right[k]})] + extra_left = set(left) - set(right) + if extra_left: + explanation.append(u('Left contains more items:')) + explanation.extend(py.std.pprint.pformat( + dict((k, left[k]) for k in extra_left)).splitlines()) + extra_right = set(right) - set(left) + if extra_right: + explanation.append(u('Right contains more items:')) + explanation.extend(py.std.pprint.pformat( + dict((k, right[k]) for k in extra_right)).splitlines()) + return explanation + + +def _notin_text(term, text, verbose=False): index = text.find(term) head = text[:index] tail = text[index+len(term):] correct_text = head + tail - diff = _diff_text(correct_text, text) - newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)] + diff = _diff_text(correct_text, text, verbose) + newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)] for line in diff: - if line.startswith('Skipping'): + if line.startswith(u('Skipping')): continue - if line.startswith('- '): + if line.startswith(u('- ')): continue - if line.startswith('+ '): - newdiff.append(' ' + line[2:]) + if line.startswith(u('+ ')): + newdiff.append(u(' ') + line[2:]) else: newdiff.append(line) return newdiff diff --git a/_pytest/capture.py b/_pytest/capture.py --- a/_pytest/capture.py +++ b/_pytest/capture.py @@ -1,43 +1,114 @@ -""" per-test stdout/stderr capturing mechanisms, ``capsys`` and ``capfd`` function arguments. """ +""" + per-test stdout/stderr capturing mechanisms, + ``capsys`` and ``capfd`` function arguments. +""" +# note: py.io capture was where copied from +# pylib 1.4.20.dev2 (rev 13d9af95547e) +import sys +import os +import tempfile -import pytest, py -import os +import py +import pytest + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO + +try: + from io import BytesIO +except ImportError: + class BytesIO(StringIO): + def write(self, data): + if isinstance(data, unicode): + raise TypeError("not a byte value: %r" % (data,)) + StringIO.write(self, data) + +if sys.version_info < (3, 0): + class TextIO(StringIO): + def write(self, data): + if not isinstance(data, unicode): + enc = getattr(self, '_encoding', 'UTF-8') + data = unicode(data, enc, 'replace') + StringIO.write(self, data) +else: + TextIO = StringIO + + +patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} + def pytest_addoption(parser): group = parser.getgroup("general") - group._addoption('--capture', action="store", default=None, - metavar="method", type="choice", choices=['fd', 'sys', 'no'], + group._addoption( + '--capture', action="store", default=None, + metavar="method", choices=['fd', 'sys', 'no'], help="per-test capturing method: one of fd (default)|sys|no.") - group._addoption('-s', action="store_const", const="no", dest="capture", + group._addoption( + '-s', action="store_const", const="no", dest="capture", help="shortcut for --capture=no.") + @pytest.mark.tryfirst -def pytest_cmdline_parse(pluginmanager, args): - # we want to perform capturing already for plugin/conftest loading - if '-s' in args or "--capture=no" in args: - method = "no" - elif hasattr(os, 'dup') and '--capture=sys' not in args: +def pytest_load_initial_conftests(early_config, parser, args, __multicall__): + ns = parser.parse_known_args(args) + method = ns.capture + if not method: method = "fd" - else: + if method == "fd" and not hasattr(os, "dup"): method = "sys" capman = CaptureManager(method) - pluginmanager.register(capman, "capturemanager") + early_config.pluginmanager.register(capman, "capturemanager") + + # make sure that capturemanager is properly reset at final shutdown + def teardown(): + try: + capman.reset_capturings() + except ValueError: + pass + + early_config.pluginmanager.add_shutdown(teardown) + + # make sure logging does not raise exceptions at the end + def silence_logging_at_shutdown(): + if "logging" in sys.modules: + sys.modules["logging"].raiseExceptions = False + early_config.pluginmanager.add_shutdown(silence_logging_at_shutdown) + + # finally trigger conftest loading but while capturing (issue93) + capman.resumecapture() + try: + try: + return __multicall__.execute() + finally: + out, err = capman.suspendcapture() + except: + sys.stdout.write(out) + sys.stderr.write(err) + raise + def addouterr(rep, outerr): for secname, content in zip(["out", "err"], outerr): if content: rep.sections.append(("Captured std%s" % secname, content)) + class NoCapture: def startall(self): pass + def resume(self): pass + def reset(self): pass + def suspend(self): return "", "" + class CaptureManager: def __init__(self, defaultmethod=None): self._method2capture = {} @@ -45,21 +116,23 @@ def _maketempfile(self): f = py.std.tempfile.TemporaryFile() - newf = py.io.dupfile(f, encoding="UTF-8") + newf = dupfile(f, encoding="UTF-8") f.close() return newf def _makestringio(self): - return py.io.TextIO() + return TextIO() def _getcapture(self, method): if method == "fd": - return py.io.StdCaptureFD(now=False, - out=self._maketempfile(), err=self._maketempfile() + return StdCaptureFD( + out=self._maketempfile(), + err=self._maketempfile(), ) elif method == "sys": - return py.io.StdCapture(now=False, - out=self._makestringio(), err=self._makestringio() + return StdCapture( + out=self._makestringio(), + err=self._makestringio(), ) elif method == "no": return NoCapture() @@ -74,23 +147,24 @@ method = config._conftest.rget("option_capture", path=fspath) except KeyError: method = "fd" - if method == "fd" and not hasattr(os, 'dup'): # e.g. jython + if method == "fd" and not hasattr(os, 'dup'): # e.g. jython method = "sys" return method def reset_capturings(self): - for name, cap in self._method2capture.items(): + for cap in self._method2capture.values(): cap.reset() def resumecapture_item(self, item): method = self._getmethod(item.config, item.fspath) if not hasattr(item, 'outerr'): - item.outerr = ('', '') # we accumulate outerr on the item + item.outerr = ('', '') # we accumulate outerr on the item return self.resumecapture(method) def resumecapture(self, method=None): if hasattr(self, '_capturing'): - raise ValueError("cannot resume, already capturing with %r" % + raise ValueError( + "cannot resume, already capturing with %r" % (self._capturing,)) if method is None: method = self._defaultmethod @@ -119,30 +193,29 @@ return "", "" def activate_funcargs(self, pyfuncitem): - if not hasattr(pyfuncitem, 'funcargs'): - return - assert not hasattr(self, '_capturing_funcargs') - self._capturing_funcargs = capturing_funcargs = [] - for name, capfuncarg in pyfuncitem.funcargs.items(): - if name in ('capsys', 'capfd'): - capturing_funcargs.append(capfuncarg) - capfuncarg._start() + funcargs = getattr(pyfuncitem, "funcargs", None) + if funcargs is not None: + for name, capfuncarg in funcargs.items(): + if name in ('capsys', 'capfd'): + assert not hasattr(self, '_capturing_funcarg') + self._capturing_funcarg = capfuncarg + capfuncarg._start() def deactivate_funcargs(self): - capturing_funcargs = getattr(self, '_capturing_funcargs', None) - if capturing_funcargs is not None: - while capturing_funcargs: - capfuncarg = capturing_funcargs.pop() - capfuncarg._finalize() - del self._capturing_funcargs + capturing_funcarg = getattr(self, '_capturing_funcarg', None) + if capturing_funcarg: + outerr = capturing_funcarg._finalize() + del self._capturing_funcarg + return outerr def pytest_make_collect_report(self, __multicall__, collector): method = self._getmethod(collector.config, collector.fspath) try: self.resumecapture(method) except ValueError: - return # recursive collect, XXX refactor capturing - # to allow for more lightweight recursive capturing + # recursive collect, XXX refactor capturing + # to allow for more lightweight recursive capturing + return try: rep = __multicall__.execute() finally: @@ -169,46 +242,371 @@ @pytest.mark.tryfirst def pytest_runtest_makereport(self, __multicall__, item, call): - self.deactivate_funcargs() + funcarg_outerr = self.deactivate_funcargs() rep = __multicall__.execute() outerr = self.suspendcapture(item) - if not rep.passed: - addouterr(rep, outerr) + if funcarg_outerr is not None: + outerr = (outerr[0] + funcarg_outerr[0], + outerr[1] + funcarg_outerr[1]) + addouterr(rep, outerr) if not rep.passed or rep.when == "teardown": outerr = ('', '') item.outerr = outerr return rep +error_capsysfderror = "cannot use capsys and capfd at the same time" + + def pytest_funcarg__capsys(request): """enables capturing of writes to sys.stdout/sys.stderr and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ - return CaptureFuncarg(py.io.StdCapture) + if "capfd" in request._funcargs: + raise request.raiseerror(error_capsysfderror) + return CaptureFixture(StdCapture) + def pytest_funcarg__capfd(request): """enables capturing of writes to file descriptors 1 and 2 and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ + if "capsys" in request._funcargs: + request.raiseerror(error_capsysfderror) if not hasattr(os, 'dup'): - py.test.skip("capfd funcarg needs os.dup") - return CaptureFuncarg(py.io.StdCaptureFD) + pytest.skip("capfd funcarg needs os.dup") + return CaptureFixture(StdCaptureFD) -class CaptureFuncarg: + +class CaptureFixture: def __init__(self, captureclass): - self.capture = captureclass(now=False) + self._capture = captureclass() def _start(self): - self.capture.startall() + self._capture.startall() def _finalize(self): - if hasattr(self, 'capture'): - self.capture.reset() - del self.capture + if hasattr(self, '_capture'): + outerr = self._outerr = self._capture.reset() + del self._capture + return outerr def readouterr(self): - return self.capture.readouterr() + try: + return self._capture.readouterr() + except AttributeError: + return self._outerr def close(self): self._finalize() + + +class FDCapture: + """ Capture IO to/from a given os-level filedescriptor. """ + + def __init__(self, targetfd, tmpfile=None, patchsys=False): + """ save targetfd descriptor, and open a new + temporary file there. If no tmpfile is + specified a tempfile.Tempfile() will be opened + in text mode. + """ + self.targetfd = targetfd + if tmpfile is None and targetfd != 0: + f = tempfile.TemporaryFile('wb+') + tmpfile = dupfile(f, encoding="UTF-8") + f.close() + self.tmpfile = tmpfile + self._savefd = os.dup(self.targetfd) + if patchsys: + self._oldsys = getattr(sys, patchsysdict[targetfd]) + + def start(self): + try: + os.fstat(self._savefd) + except OSError: + raise ValueError( + "saved filedescriptor not valid, " + "did you call start() twice?") + if self.targetfd == 0 and not self.tmpfile: + fd = os.open(os.devnull, os.O_RDONLY) + os.dup2(fd, 0) + os.close(fd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], DontReadFromInput()) + else: + os.dup2(self.tmpfile.fileno(), self.targetfd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self.tmpfile) + + def done(self): + """ unpatch and clean up, returns the self.tmpfile (file object) + """ + os.dup2(self._savefd, self.targetfd) + os.close(self._savefd) + if self.targetfd != 0: + self.tmpfile.seek(0) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self._oldsys) + return self.tmpfile + + def writeorg(self, data): + """ write a string to the original file descriptor + """ + tempfp = tempfile.TemporaryFile() + try: + os.dup2(self._savefd, tempfp.fileno()) + tempfp.write(data) + finally: + tempfp.close() + + +def dupfile(f, mode=None, buffering=0, raising=False, encoding=None): + """ return a new open file object that's a duplicate of f + + mode is duplicated if not given, 'buffering' controls + buffer size (defaulting to no buffering) and 'raising' + defines whether an exception is raised when an incompatible + file object is passed in (if raising is False, the file + object itself will be returned) + """ + try: + fd = f.fileno() + mode = mode or f.mode + except AttributeError: + if raising: + raise + return f + newfd = os.dup(fd) + if sys.version_info >= (3, 0): + if encoding is not None: + mode = mode.replace("b", "") + buffering = True + return os.fdopen(newfd, mode, buffering, encoding, closefd=True) + else: + f = os.fdopen(newfd, mode, buffering) + if encoding is not None: + return EncodedFile(f, encoding) + return f + + +class EncodedFile(object): + def __init__(self, _stream, encoding): + self._stream = _stream + self.encoding = encoding + + def write(self, obj): + if isinstance(obj, unicode): + obj = obj.encode(self.encoding) + self._stream.write(obj) + + def writelines(self, linelist): + data = ''.join(linelist) + self.write(data) + + def __getattr__(self, name): + return getattr(self._stream, name) + + +class Capture(object): + def reset(self): + """ reset sys.stdout/stderr and return captured output as strings. """ + if hasattr(self, '_reset'): + raise ValueError("was already reset") + self._reset = True + outfile, errfile = self.done(save=False) + out, err = "", "" + if outfile and not outfile.closed: + out = outfile.read() + outfile.close() + if errfile and errfile != outfile and not errfile.closed: + err = errfile.read() + errfile.close() + return out, err + + def suspend(self): + """ return current snapshot captures, memorize tempfiles. """ + outerr = self.readouterr() + outfile, errfile = self.done() + return outerr + + +class StdCaptureFD(Capture): + """ This class allows to capture writes to FD1 and FD2 + and may connect a NULL file to FD0 (and prevent + reads from sys.stdin). If any of the 0,1,2 file descriptors + is invalid it will not be captured. + """ + def __init__(self, out=True, err=True, in_=True, patchsys=True): + self._options = { + "out": out, + "err": err, + "in_": in_, + "patchsys": patchsys, + } + self._save() + + def _save(self): + in_ = self._options['in_'] + out = self._options['out'] + err = self._options['err'] + patchsys = self._options['patchsys'] + if in_: + try: + self.in_ = FDCapture( + 0, tmpfile=None, + patchsys=patchsys) + except OSError: + pass + if out: + tmpfile = None + if hasattr(out, 'write'): + tmpfile = out + try: + self.out = FDCapture( + 1, tmpfile=tmpfile, + patchsys=patchsys) + self._options['out'] = self.out.tmpfile + except OSError: + pass + if err: + if hasattr(err, 'write'): + tmpfile = err + else: + tmpfile = None + try: + self.err = FDCapture( + 2, tmpfile=tmpfile, + patchsys=patchsys) + self._options['err'] = self.err.tmpfile + except OSError: + pass + + def startall(self): + if hasattr(self, 'in_'): + self.in_.start() + if hasattr(self, 'out'): + self.out.start() + if hasattr(self, 'err'): + self.err.start() + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if hasattr(self, 'out') and not self.out.tmpfile.closed: + outfile = self.out.done() + if hasattr(self, 'err') and not self.err.tmpfile.closed: + errfile = self.err.done() + if hasattr(self, 'in_'): + self.in_.done() + if save: + self._save() + return outfile, errfile + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = self._readsnapshot('out') + err = self._readsnapshot('err') + return out, err + + def _readsnapshot(self, name): + if hasattr(self, name): + f = getattr(self, name).tmpfile + else: + return '' + + f.seek(0) + res = f.read() + enc = getattr(f, "encoding", None) + if enc: + res = py.builtin._totext(res, enc, "replace") + f.truncate(0) + f.seek(0) + return res + + +class StdCapture(Capture): + """ This class allows to capture writes to sys.stdout|stderr "in-memory" + and will raise errors on tries to read from sys.stdin. It only + modifies sys.stdout|stderr|stdin attributes and does not + touch underlying File Descriptors (use StdCaptureFD for that). + """ + def __init__(self, out=True, err=True, in_=True): + self._oldout = sys.stdout + self._olderr = sys.stderr + self._oldin = sys.stdin + if out and not hasattr(out, 'file'): + out = TextIO() + self.out = out + if err: + if not hasattr(err, 'write'): + err = TextIO() + self.err = err + self.in_ = in_ + + def startall(self): + if self.out: + sys.stdout = self.out + if self.err: + sys.stderr = self.err + if self.in_: + sys.stdin = self.in_ = DontReadFromInput() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if self.out and not self.out.closed: + sys.stdout = self._oldout + outfile = self.out + outfile.seek(0) + if self.err and not self.err.closed: + sys.stderr = self._olderr + errfile = self.err + errfile.seek(0) + if self.in_: + sys.stdin = self._oldin + return outfile, errfile + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = err = "" + if self.out: + out = self.out.getvalue() + self.out.truncate(0) + self.out.seek(0) + if self.err: + err = self.err.getvalue() + self.err.truncate(0) + self.err.seek(0) + return out, err + + +class DontReadFromInput: + """Temporary stub class. Ideally when stdin is accessed, the + capturing should be turned off, with possibly all data captured + so far sent to the screen. This should be configurable, though, + because in automated test runs it is better to crash than + hang indefinitely. + """ + def read(self, *args): + raise IOError("reading from stdin while output is captured") + readline = read + readlines = read + __iter__ = read + + def fileno(self): + raise ValueError("redirected Stdin is pseudofile, has no fileno()") + + def isatty(self): + return False + + def close(self): + pass diff --git a/_pytest/config.py b/_pytest/config.py --- a/_pytest/config.py +++ b/_pytest/config.py @@ -1,25 +1,91 @@ """ command line options, ini-file and conftest.py processing. """ import py +# DON't import pytest here because it causes import cycle troubles import sys, os +from _pytest import hookspec # the extension point definitions from _pytest.core import PluginManager -import pytest -def pytest_cmdline_parse(pluginmanager, args): - config = Config(pluginmanager) - config.parse(args) - return config +# pytest startup -def pytest_unconfigure(config): - while 1: - try: - fin = config._cleanup.pop() - except IndexError: - break - fin() +def main(args=None, plugins=None): + """ return exit code, after performing an in-process test run. + + :arg args: list of command line arguments. + + :arg plugins: list of plugin objects to be auto-registered during + initialization. + """ + config = _prepareconfig(args, plugins) + return config.hook.pytest_cmdline_main(config=config) + +class cmdline: # compatibility namespace + main = staticmethod(main) + +class UsageError(Exception): + """ error in pytest usage or invocation""" + +_preinit = [] + +default_plugins = ( + "mark main terminal runner python pdb unittest capture skipping " + "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript " + "junitxml resultlog doctest").split() + +def _preloadplugins(): + assert not _preinit + _preinit.append(get_plugin_manager()) + +def get_plugin_manager(): + if _preinit: + return _preinit.pop(0) + # subsequent calls to main will create a fresh instance + pluginmanager = PytestPluginManager() + pluginmanager.config = Config(pluginmanager) # XXX attr needed? + for spec in default_plugins: + pluginmanager.import_plugin(spec) + return pluginmanager + +def _prepareconfig(args=None, plugins=None): + if args is None: + args = sys.argv[1:] + elif isinstance(args, py.path.local): + args = [str(args)] + elif not isinstance(args, (tuple, list)): + if not isinstance(args, str): + raise ValueError("not a string or argument list: %r" % (args,)) + args = py.std.shlex.split(args) + pluginmanager = get_plugin_manager() + if plugins: + for plugin in plugins: + pluginmanager.register(plugin) + return pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args) + +class PytestPluginManager(PluginManager): + def __init__(self, hookspecs=[hookspec]): + super(PytestPluginManager, self).__init__(hookspecs=hookspecs) + self.register(self) + if os.environ.get('PYTEST_DEBUG'): + err = sys.stderr + encoding = getattr(err, 'encoding', 'utf8') + try: + err = py.io.dupfile(err, encoding=encoding) + except Exception: + pass + self.trace.root.setwriter(err.write) + + def pytest_configure(self, config): + config.addinivalue_line("markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible.") + config.addinivalue_line("markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible.") + class Parser: - """ Parser for command line arguments. """ + """ Parser for command line arguments and ini-file values. """ def __init__(self, usage=None, processopt=None): self._anonymous = OptionGroup("custom options", parser=self) @@ -35,15 +101,17 @@ if option.dest: self._processopt(option) - def addnote(self, note): - self._notes.append(note) - def getgroup(self, name, description="", after=None): """ get (or create) a named option Group. - :name: unique name of the option group. + :name: name of the option group. :description: long description for --help output. :after: name of other group, used for ordering --help output. + + The returned group object has an ``addoption`` method with the same + signature as :py:func:`parser.addoption + <_pytest.config.Parser.addoption>` but will be shown in the + respective group in the output of ``pytest. --help``. """ for group in self._groups: if group.name == name: @@ -57,33 +125,222 @@ return group def addoption(self, *opts, **attrs): - """ add an optparse-style option. """ + """ register a command line option. + + :opts: option names, can be short or long options. + :attrs: same attributes which the ``add_option()`` function of the + `argparse library + `_ + accepts. + + After command line parsing options are available on the pytest config + object via ``config.option.NAME`` where ``NAME`` is usually set + by passing a ``dest`` attribute, for example + ``addoption("--long", dest="NAME", ...)``. + """ self._anonymous.addoption(*opts, **attrs) def parse(self, args): - self.optparser = optparser = MyOptionParser(self) + from _pytest._argcomplete import try_argcomplete + self.optparser = self._getparser() + try_argcomplete(self.optparser) + return self.optparser.parse_args([str(x) for x in args]) + + def _getparser(self): + from _pytest._argcomplete import filescompleter + optparser = MyOptionParser(self) groups = self._groups + [self._anonymous] for group in groups: if group.options: desc = group.description or group.name - optgroup = py.std.optparse.OptionGroup(optparser, desc) - optgroup.add_options(group.options) - optparser.add_option_group(optgroup) - return self.optparser.parse_args([str(x) for x in args]) + arggroup = optparser.add_argument_group(desc) + for option in group.options: + n = option.names() + a = option.attrs() + arggroup.add_argument(*n, **a) + # bash like autocompletion for dirs (appending '/') + optparser.add_argument(FILE_OR_DIR, nargs='*' + ).completer=filescompleter + return optparser def parse_setoption(self, args, option): - parsedoption, args = self.parse(args) + parsedoption = self.parse(args) for name, value in parsedoption.__dict__.items(): setattr(option, name, value) - return args + return getattr(parsedoption, FILE_OR_DIR) + + def parse_known_args(self, args): + optparser = self._getparser() + args = [str(x) for x in args] + return optparser.parse_known_args(args)[0] def addini(self, name, help, type=None, default=None): - """ add an ini-file option with the given name and description. """ + """ register an ini-file option. + + :name: name of the ini-variable + :type: type of the variable, can be ``pathlist``, ``args`` or ``linelist``. + :default: default value if no ini-file option exists but is queried. + + The value of ini-variables can be retrieved via a call to + :py:func:`config.getini(name) <_pytest.config.Config.getini>`. + """ assert type in (None, "pathlist", "args", "linelist") self._inidict[name] = (help, type, default) self._ininames.append(name) +class ArgumentError(Exception): + """ + Raised if an Argument instance is created with invalid or + inconsistent arguments. + """ + + def __init__(self, msg, option): + self.msg = msg + self.option_id = str(option) + + def __str__(self): + if self.option_id: + return "option %s: %s" % (self.option_id, self.msg) + else: + return self.msg + + +class Argument: + """class that mimics the necessary behaviour of py.std.optparse.Option """ + _typ_map = { + 'int': int, + 'string': str, + } + # enable after some grace period for plugin writers + TYPE_WARN = False + + def __init__(self, *names, **attrs): + """store parms in private vars for use in add_argument""" + self._attrs = attrs + self._short_opts = [] + self._long_opts = [] + self.dest = attrs.get('dest') + if self.TYPE_WARN: + try: + help = attrs['help'] + if '%default' in help: + py.std.warnings.warn( + 'pytest now uses argparse. "%default" should be' + ' changed to "%(default)s" ', + FutureWarning, + stacklevel=3) + except KeyError: + pass + try: + typ = attrs['type'] + except KeyError: + pass + else: + # this might raise a keyerror as well, don't want to catch that + if isinstance(typ, py.builtin._basestring): + if typ == 'choice': + if self.TYPE_WARN: + py.std.warnings.warn( + 'type argument to addoption() is a string %r.' + ' For parsearg this is optional and when supplied ' + ' should be a type.' + ' (options: %s)' % (typ, names), + FutureWarning, + stacklevel=3) + # argparse expects a type here take it from + # the type of the first element + attrs['type'] = type(attrs['choices'][0]) + else: + if self.TYPE_WARN: + py.std.warnings.warn( + 'type argument to addoption() is a string %r.' + ' For parsearg this should be a type.' + ' (options: %s)' % (typ, names), + FutureWarning, + stacklevel=3) + attrs['type'] = Argument._typ_map[typ] + # used in test_parseopt -> test_parse_defaultgetter + self.type = attrs['type'] + else: + self.type = typ + try: + # attribute existence is tested in Config._processopt + self.default = attrs['default'] + except KeyError: + pass + self._set_opt_strings(names) + if not self.dest: + if self._long_opts: + self.dest = self._long_opts[0][2:].replace('-', '_') + else: + try: + self.dest = self._short_opts[0][1:] + except IndexError: + raise ArgumentError( + 'need a long or short option', self) + + def names(self): + return self._short_opts + self._long_opts + + def attrs(self): + # update any attributes set by processopt + attrs = 'default dest help'.split() + if self.dest: + attrs.append(self.dest) + for attr in attrs: + try: + self._attrs[attr] = getattr(self, attr) + except AttributeError: + pass + if self._attrs.get('help'): + a = self._attrs['help'] + a = a.replace('%default', '%(default)s') + #a = a.replace('%prog', '%(prog)s') + self._attrs['help'] = a + return self._attrs + + def _set_opt_strings(self, opts): + """directly from optparse + + might not be necessary as this is passed to argparse later on""" + for opt in opts: + if len(opt) < 2: + raise ArgumentError( + "invalid option string %r: " + "must be at least two characters long" % opt, self) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise ArgumentError( + "invalid short option string %r: " + "must be of the form -x, (x any non-dash char)" % opt, + self) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise ArgumentError( + "invalid long option string %r: " + "must start with --, followed by non-dash" % opt, + self) + self._long_opts.append(opt) + + def __repr__(self): + retval = 'Argument(' + if self._short_opts: + retval += '_short_opts: ' + repr(self._short_opts) + ', ' + if self._long_opts: + retval += '_long_opts: ' + repr(self._long_opts) + ', ' + retval += 'dest: ' + repr(self.dest) + ', ' + if hasattr(self, 'type'): + retval += 'type: ' + repr(self.type) + ', ' + if hasattr(self, 'default'): + retval += 'default: ' + repr(self.default) + ', ' + if retval[-2:] == ', ': # always long enough to test ("Argument(" ) + retval = retval[:-2] + retval += ')' + return retval + + class OptionGroup: def __init__(self, name, description="", parser=None): self.name = name @@ -92,12 +349,18 @@ self.parser = parser def addoption(self, *optnames, **attrs): - """ add an option to this group. """ - option = py.std.optparse.Option(*optnames, **attrs) + """ add an option to this group. + + if a shortened version of a long option is specified it will + be suppressed in the help. addoption('--twowords', '--two-words') + results in help showing '--two-words' only, but --twowords gets + accepted **and** the automatic destination is in args.twowords + """ + option = Argument(*optnames, **attrs) self._addoption_instance(option, shortupper=False) def _addoption(self, *optnames, **attrs): - option = py.std.optparse.Option(*optnames, **attrs) + option = Argument(*optnames, **attrs) self._addoption_instance(option, shortupper=True) From noreply at buildbot.pypy.org Sun Aug 17 20:14:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 17 Aug 2014 20:14:06 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Fix the test and the code for stmrewrite Message-ID: <20140817181406.C26581C059C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72853:477a18370cb0 Date: 2014-08-17 20:13 +0200 http://bitbucket.org/pypy/pypy/changeset/477a18370cb0/ Log: Fix the test and the code for stmrewrite diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -23,14 +23,6 @@ self.newops.append(op) return # ---------- transaction breaks ---------- - if opnum == rop.STM_SHOULD_BREAK_TRANSACTION: - self.handle_should_break_transaction(op) - return - if opnum == rop.STM_TRANSACTION_BREAK: - self.emitting_an_operation_that_can_collect() - self.next_op_may_be_in_new_transaction() - self.newops.append(op) - return if opnum == rop.STM_HINT_COMMIT_SOON: self._do_stm_call('stm_hint_commit_soon', [], None, op.stm_location) @@ -84,14 +76,19 @@ self.next_op_may_be_in_new_transaction() self.newops.append(op) return - # ---------- jumps, finish, other ignored ops ---------- - if opnum in (rop.JUMP, rop.FINISH, rop.FORCE_TOKEN, + # ---------- other ignored ops ---------- + if opnum in (rop.STM_SHOULD_BREAK_TRANSACTION, rop.FORCE_TOKEN, rop.READ_TIMESTAMP, rop.MARK_OPAQUE_PTR, rop.JIT_DEBUG, rop.KEEPALIVE, rop.QUASIIMMUT_FIELD, rop.RECORD_KNOWN_CLASS, ): self.newops.append(op) return + # ---------- jump, finish ---------- + if opnum == rop.JUMP or opnum == rop.FINISH: + self.add_dummy_allocation() + self.newops.append(op) + return # ---------- fall-back ---------- # Check that none of the ops handled here can collect. # This is not done by the fallback here @@ -122,7 +119,7 @@ self.newops.append(op1) self.read_barrier_applied[v_ptr] = None - def handle_should_break_transaction(self, op): + def add_dummy_allocation(self): if not self.does_any_allocation: # do a fake allocation since this is needed to check # for requested safe-points: @@ -134,9 +131,6 @@ assert self._op_malloc_nursery is None # no ongoing allocation self.gen_malloc_nursery(size, v_result) - self.newops.append(op) - - def must_apply_write_barrier(self, val, v=None): return val not in self.write_barrier_applied diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -65,8 +65,10 @@ def check_rewrite(self, frm_operations, to_operations, **namespace): inev = ("call(ConstClass(stm_try_inevitable)," " descr=stm_try_inevitable_descr)") + dummyalloc = "p999 = call_malloc_nursery(16)" frm_operations = frm_operations.replace('$INEV', inev) to_operations = to_operations .replace('$INEV', inev) + to_operations = to_operations .replace('$DUMMYALLOC', dummyalloc) for name, value in self.gc_ll_descr.__dict__.items(): if name.endswith('descr') and name[1] == '2' and len(name) == 8: namespace[name] = value # "X2Ydescr" @@ -94,6 +96,7 @@ [] %s call(123, descr=cd) + $DUMMYALLOC jump() """ % ("$INEV" if inev else "",), cd=calldescr) @@ -106,6 +109,7 @@ [p1, p2] cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, p2, descr=tzdescr) + $DUMMYALLOC jump() """) @@ -118,6 +122,7 @@ [p1, i2] cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, i2, descr=tzdescr) + $DUMMYALLOC jump() """) @@ -132,6 +137,7 @@ [p1, p2] cond_call_gc_wb(ConstPtr(t), descr=wbdescr) setfield_gc(ConstPtr(t), p2, descr=tzdescr) + $DUMMYALLOC jump() """, t=NULL) @@ -144,6 +150,7 @@ [p1] p2 = getfield_gc(p1, descr=tzdescr) stm_read(p1) + $DUMMYALLOC jump() """) @@ -163,6 +170,7 @@ p5 = getfield_gc(p2, descr=tzdescr) stm_read(p2) p6 = getfield_gc(p1, descr=tzdescr) + $DUMMYALLOC jump() """) @@ -177,6 +185,7 @@ cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, i2, descr=tydescr) p3 = getfield_gc(p1, descr=tzdescr) + $DUMMYALLOC jump(p3) """) @@ -196,6 +205,7 @@ cond_call_gc_wb(p2, descr=wbdescr) setfield_gc(p2, p0, descr=tzdescr) p4 = getfield_gc(p1, descr=tzdescr) + $DUMMYALLOC jump() """, t=NULL) @@ -274,6 +284,7 @@ setfield_gc(p1, p2, descr=tzdescr) cond_call_gc_wb(p3, descr=wbdescr) setfield_gc(p3, p4, descr=tzdescr) + $DUMMYALLOC jump() """) @@ -288,6 +299,7 @@ cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, p2, descr=tzdescr) setfield_gc(p1, i3, descr=tydescr) + $DUMMYALLOC jump() """) @@ -305,6 +317,7 @@ label(p1, i3) cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, i3, descr=tydescr) + $DUMMYALLOC jump(p1) """) @@ -317,6 +330,7 @@ """, """ [i1, i2] + $DUMMYALLOC jump() """) @@ -337,9 +351,10 @@ testcase = """ [i1, i2, p1, p2, f1] %s + $DUMMYALLOC finish() """ % op - self.check_rewrite(testcase, testcase) + self.check_rewrite(testcase.replace('$DUMMYALLOC', ''), testcase) def test_rewrite_getfield_gc_const(self): TP = lltype.GcArray(lltype.Signed) @@ -352,9 +367,9 @@ [p1] p2 = getfield_gc(ConstPtr(t), descr=tzdescr) stm_read(ConstPtr(t)) + $DUMMYALLOC jump(p2) """, t=NULL) - # XXX could do better: G2Rdescr def test_rewrite_getarrayitem_gc(self): self.check_rewrite(""" @@ -365,6 +380,7 @@ [p1, i2] i3 = getarrayitem_gc(p1, i2, descr=adescr) stm_read(p1) + $DUMMYALLOC jump(i3) """) @@ -377,6 +393,7 @@ [p1, i2] i3 = getinteriorfield_gc(p1, i2, descr=intzdescr) stm_read(p1) + $DUMMYALLOC jump(i3) """) @@ -392,6 +409,7 @@ stm_read(p1) i2 = getfield_gc(p2, descr=tydescr) stm_read(p2) + $DUMMYALLOC jump(p2, i2) """) @@ -411,7 +429,7 @@ i2 = int_add(i1, 1) cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, i2, descr=tydescr) - + $DUMMYALLOC jump(p1) """) @@ -438,6 +456,7 @@ call(p2, descr=calldescr1) cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, 5, descr=tydescr) + $DUMMYALLOC jump(p2) """, calldescr1=calldescr1) @@ -454,7 +473,7 @@ i3 = getfield_raw(i1, descr=tydescr) keepalive(i3) i4 = getfield_raw(i2, descr=tydescr) - + $DUMMYALLOC jump(i3, i4) """) @@ -470,7 +489,7 @@ """, """ [i1] i2 = getfield_raw(i1, descr=fdescr) - + $DUMMYALLOC jump(i2) """, fdescr=fdescr) @@ -488,7 +507,7 @@ label(i1, i2, i3) $INEV i4 = getfield_raw(i2, descr=tydescr) - + $DUMMYALLOC jump(i3, i4) """) @@ -503,7 +522,7 @@ $INEV i3 = getarrayitem_raw(i1, 5, descr=adescr) i4 = getarrayitem_raw(i2, i3, descr=adescr) - + $DUMMYALLOC jump(i3, i4) """) @@ -519,7 +538,7 @@ setarrayitem_gc(p1, i1, p2, descr=adescr) cond_call_gc_wb_array(p3, i3, descr=wbdescr) setarrayitem_gc(p3, i3, p4, descr=adescr) - + $DUMMYALLOC jump() """) @@ -537,7 +556,7 @@ i4 = read_timestamp() cond_call_gc_wb_array(p1, i3, descr=wbdescr) setarrayitem_gc(p1, i3, p3, descr=adescr) - + $DUMMYALLOC jump() """) @@ -555,7 +574,7 @@ i4 = read_timestamp() cond_call_gc_wb_array(p1, i3, descr=wbdescr) setinteriorfield_gc(p1, i3, p3, descr=intzdescr) - + $DUMMYALLOC jump() """) @@ -570,7 +589,7 @@ cond_call_gc_wb(p1, descr=wbdescr) strsetitem(p1, i2, i3) unicodesetitem(p1, i2, i3) - + $DUMMYALLOC jump() """) @@ -585,6 +604,7 @@ [i2, i3] p1 = call_malloc_nursery_varsize(1, 1, i3, descr=strdescr) setfield_gc(p1, i3, descr=strlendescr) + cond_call_gc_wb(p1, descr=wbdescr) strsetitem(p1, i2, i3) unicodesetitem(p1, i2, i3) jump() @@ -600,6 +620,7 @@ [p1, i2, i3] i4=strgetitem(p1, i2) i5=unicodegetitem(p1, i2) + $DUMMYALLOC jump() """) @@ -623,6 +644,7 @@ cond_call_gc_wb(p7, descr=wbdescr) setfield_gc(p7, 20, descr=tydescr) + $DUMMYALLOC jump(i2, p7) """, calldescr2=calldescr2) @@ -651,6 +673,7 @@ cond_call_gc_wb(p7, descr=wbdescr) setfield_gc(p7, 20, descr=tydescr) + $DUMMYALLOC jump(i2, p7) """ % op, calldescr2=calldescr2) @@ -664,6 +687,7 @@ [p1, i1, i2, i3] p2 = call_malloc_nursery_varsize(1, 1, i3, descr=strdescr) setfield_gc(p2, i3, descr=strlendescr) + cond_call_gc_wb(p2, descr=wbdescr) copystrcontent(p1, p2, i1, i2, i3) jump() """) @@ -677,6 +701,7 @@ [p1, p2, i1, i2, i3] cond_call_gc_wb(p2, descr=wbdescr) copystrcontent(p1, p2, i1, i2, i3) + $DUMMYALLOC jump() """) @@ -698,7 +723,7 @@ setfield_gc(p1, 10, descr=tydescr) %s setfield_gc(p1, 20, descr=tydescr) - + $DUMMYALLOC jump(p1) """ % op) @@ -734,6 +759,7 @@ cond_call_gc_wb(p1, descr=wbdescr) setfield_gc(p1, 20, descr=tydescr) + $DUMMYALLOC jump(p1) """ % (op, guard, tr_break), calldescr2=calldescr2) @@ -794,24 +820,18 @@ self.check_rewrite(""" [p1, p2] i1 = ptr_eq(p1, NULL) - jump(i1) """, """ [p1, p2] i1 = ptr_eq(p1, NULL) - - jump(i1) """) def test_ptr_eq(self): self.check_rewrite(""" [p1, p2] i1 = ptr_eq(p1, p2) - jump(i1) """, """ [p1, p2] i1 = ptr_eq(p1, p2) - - jump(i1) """) def test_instance_ptr_eq(self): @@ -822,7 +842,7 @@ """, """ [p1, p2] i1 = instance_ptr_eq(p1, p2) - + $DUMMYALLOC jump(i1) """) @@ -830,24 +850,18 @@ self.check_rewrite(""" [p1, p2] i1 = ptr_ne(p1, p2) - jump(i1) """, """ [p1, p2] i1 = ptr_ne(p1, p2) - - jump(i1) """) def test_instance_ptr_ne(self): self.check_rewrite(""" [p1, p2] i1 = instance_ptr_ne(p1, p2) - jump(i1) """, """ [p1, p2] i1 = instance_ptr_ne(p1, p2) - - jump(i1) """) # ----------- tests copied from rewrite.py ------------- @@ -856,10 +870,12 @@ self.check_rewrite(""" [p1] p0 = new(descr=sdescr) + jump() """, """ [p1] p0 = call_malloc_nursery(%(sdescr.size)d) setfield_gc(p0, 1234, descr=tiddescr) + jump() """) def test_rewrite_assembler_new3_to_malloc(self): @@ -868,6 +884,7 @@ p0 = new(descr=sdescr) p1 = new(descr=tdescr) p2 = new(descr=sdescr) + jump() """, """ [] p0 = call_malloc_nursery( \ @@ -877,18 +894,21 @@ setfield_gc(p1, 5678, descr=tiddescr) p2 = int_add(p1, %(tdescr.size)d) setfield_gc(p2, 1234, descr=tiddescr) + jump() """) def test_rewrite_assembler_new_array_fixed_to_malloc(self): self.check_rewrite(""" [] p0 = new_array(10, descr=adescr) + jump() """, """ [] p0 = call_malloc_nursery( \ %(adescr.basesize + 10 * adescr.itemsize)d) setfield_gc(p0, 4321, descr=tiddescr) setfield_gc(p0, 10, descr=alendescr) + jump() """) def test_rewrite_assembler_new_and_new_array_fixed_to_malloc(self): @@ -896,6 +916,7 @@ [] p0 = new(descr=sdescr) p1 = new_array(10, descr=adescr) + jump() """, """ [] p0 = call_malloc_nursery( \ @@ -905,17 +926,20 @@ p1 = int_add(p0, %(sdescr.size)d) setfield_gc(p1, 4321, descr=tiddescr) setfield_gc(p1, 10, descr=alendescr) + jump() """) def test_rewrite_assembler_round_up(self): self.check_rewrite(""" [] p0 = new_array(6, descr=bdescr) + jump() """, """ [] p0 = call_malloc_nursery(%(bdescr.basesize + 8)d) setfield_gc(p0, 8765, descr=tiddescr) setfield_gc(p0, 6, descr=blendescr) + jump() """) def test_rewrite_assembler_round_up_always(self): @@ -925,6 +949,7 @@ p1 = new_array(5, descr=bdescr) p2 = new_array(5, descr=bdescr) p3 = new_array(5, descr=bdescr) + jump() """, """ [] p0 = call_malloc_nursery(%(4 * (bdescr.basesize + 8))d) @@ -939,6 +964,7 @@ p3 = int_add(p2, %(bdescr.basesize + 8)d) setfield_gc(p3, 8765, descr=tiddescr) setfield_gc(p3, 5, descr=blendescr) + jump() """) def test_rewrite_assembler_minimal_size(self): @@ -946,12 +972,14 @@ [] p0 = new(descr=edescr) p1 = new(descr=edescr) + jump() """, """ [] p0 = call_malloc_nursery(%(4*WORD)d) setfield_gc(p0, 9000, descr=tiddescr) p1 = int_add(p0, %(2*WORD)d) setfield_gc(p1, 9000, descr=tiddescr) + jump() """) def test_rewrite_assembler_variable_size(self): @@ -1086,6 +1114,7 @@ p1 = newunicode(10) p2 = newunicode(i2) p3 = newstr(i2) + jump() """, """ [i2] p0 = call_malloc_nursery( \ @@ -1093,15 +1122,20 @@ unicodedescr.basesize + 10 * unicodedescr.itemsize)d) setfield_gc(p0, %(strdescr.tid)d, descr=tiddescr) setfield_gc(p0, 14, descr=strlendescr) + p1 = int_add(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d) setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr) setfield_gc(p1, 10, descr=unicodelendescr) + p2 = call_malloc_nursery_varsize(2, 4, i2, \ descr=unicodedescr) setfield_gc(p2, i2, descr=unicodelendescr) + p3 = call_malloc_nursery_varsize(1, 1, i2, \ descr=strdescr) setfield_gc(p3, i2, descr=strlendescr) + + jump() """) def test_label_makes_size_unknown(self): @@ -1134,7 +1168,8 @@ [i0, f0] p0 = new_array(5, descr=bdescr) p1 = new_array(5, descr=bdescr) - stm_transaction_break(1) + call_may_force(12345, descr=calldescr2) # stm_transaction_break + guard_not_forced() [] p2 = new_array(5, descr=bdescr) """, """ [i0, f0] @@ -1146,7 +1181,8 @@ setfield_gc(p1, 8765, descr=tiddescr) setfield_gc(p1, 5, descr=blendescr) - stm_transaction_break(1) + call_may_force(12345, descr=calldescr2) # stm_transaction_break + guard_not_forced() [] p2 = call_malloc_nursery( \ %(bdescr.basesize + 8)d) @@ -1188,6 +1224,7 @@ %(comment)s stm_read(p1) i4 = getarrayitem_gc%(pure)s(p4, i1, descr=vdescr) %(comment)s stm_read(p4) + $DUMMYALLOC jump(p2) """ % d, uxdescr=uxdescr, vdescr=vdescr) @@ -1195,24 +1232,20 @@ self.check_rewrite(""" [p1, p2] setfield_gc(p1, p2, descr=tzdescr) {50} - jump() """, """ [p1, p2] cond_call_gc_wb(p1, descr=wbdescr) {50} setfield_gc(p1, p2, descr=tzdescr) {50} - jump() """) def test_stm_location_2(self): self.check_rewrite(""" [i1] i3 = getfield_raw(i1, descr=tydescr) {52} - jump(i3) """, """ [i1] $INEV {52} i3 = getfield_raw(i1, descr=tydescr) {52} - jump(i3) """) def test_stm_location_3(self): @@ -1240,8 +1273,8 @@ jump(i1) """, """ [] - p99 = call_malloc_nursery(16) i1 = stm_should_break_transaction() + $DUMMYALLOC jump(i1) """) @@ -1267,9 +1300,9 @@ jump(i1, i2) """, """ [] - p99 = call_malloc_nursery(16) i1 = stm_should_break_transaction() i2 = stm_should_break_transaction() + $DUMMYALLOC jump(i1, i2) """) @@ -1285,7 +1318,7 @@ p2 = call_malloc_nursery(%(tdescr.size)d) setfield_gc(p2, %(tdescr.tid)d, descr=tiddescr) label() - p99 = call_malloc_nursery(16) i1 = stm_should_break_transaction() + $DUMMYALLOC jump(i1) """) From noreply at buildbot.pypy.org Sun Aug 17 20:38:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 17 Aug 2014 20:38:49 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Fix the remaining tests from llsupport/test/ Message-ID: <20140817183849.0F0681C332E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72854:be297a6d9b9d Date: 2014-08-17 20:38 +0200 http://bitbucket.org/pypy/pypy/changeset/be297a6d9b9d/ Log: Fix the remaining tests from llsupport/test/ diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -74,7 +74,7 @@ self.gc_minimal_size_in_nursery = gc_ll_descr.minimal_size_in_nursery else: self.gc_minimal_size_in_nursery = 0 - if hasattr(gc_ll_descr, 'gcheaderbuilder'): + if getattr(gc_ll_descr, 'gcheaderbuilder', None) is not None: self.gc_size_of_header = gc_ll_descr.gcheaderbuilder.size_gc_header else: self.gc_size_of_header = WORD # for tests diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -650,8 +650,7 @@ def can_merge_with_next_guard(self, op, i, operations): if (op.getopnum() == rop.CALL_MAY_FORCE or op.getopnum() == rop.CALL_ASSEMBLER or - op.getopnum() == rop.CALL_RELEASE_GIL or - op.getopnum() == rop.STM_TRANSACTION_BREAK): + op.getopnum() == rop.CALL_RELEASE_GIL): assert operations[i + 1].getopnum() == rop.GUARD_NOT_FORCED return True if (not op.is_comparison() and diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -263,7 +263,7 @@ mallocs. (For all I know this latter case never occurs in practice, but better safe than sorry.) """ - if self.gc_ll_descr.fielddescr_tid is not None: + if self.gc_ll_descr.fielddescr_tid is not None: # framework GC assert (size & (WORD-1)) == 0, "size not aligned?" addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_big_fixedsize') args = [ConstInt(addr), ConstInt(size), ConstInt(typeid)] diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -184,7 +184,7 @@ rewriter = GcRewriterAssembler(gc_ll_descr, None) newops = rewriter.newops v_base = BoxPtr() - rewriter.gen_write_barrier(v_base) + rewriter.gen_write_barrier(v_base, stm_location=None) assert llop1.record == [] assert len(newops) == 1 assert newops[0].getopnum() == rop.COND_CALL_GC_WB diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -10,7 +10,7 @@ GcLLDescr_framework, GcCache, JitFrameDescrs from rpython.jit.backend.detect_cpu import getcpuclass from rpython.jit.backend.llsupport.symbolic import WORD -from rpython.jit.backend.llsupport import jitframe +from rpython.jit.backend.llsupport import jitframe, gcmap from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rtyper.annlowlevel import llhelper, llhelper_args @@ -315,11 +315,13 @@ def test_malloc_slowpath(self): def check(frame): - expected_size = 1 + # xxx for now we always have GCMAP_STM_LOCATION, but it should + # be added only if we really have stm in the first place + expected_size = 1 + gcmap.GCMAP_STM_LOCATION idx = 0 if self.cpu.backend_name.startswith('arm'): # jitframe fixed part is larger here - expected_size = 2 + expected_size = 2 + gcmap.GCMAP_STM_LOCATION idx = 1 assert len(frame.jf_gcmap) == expected_size if self.cpu.IS_64_BIT: @@ -355,11 +357,11 @@ def check(frame): x = frame.jf_gcmap if self.cpu.IS_64_BIT: - assert len(x) == 1 + assert len(x) == 1 + gcmap.GCMAP_STM_LOCATION assert (bin(x[0]).count('1') == '0b1111100000000000000001111111011110'.count('1')) else: - assert len(x) == 2 + assert len(x) == 2 + gcmap.GCMAP_STM_LOCATION s = bin(x[0]).count('1') + bin(x[1]).count('1') assert s == 16 # all but two registers + some stuff on stack diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -915,7 +915,16 @@ self.mc.MOV(self.heap_shadowstack_top(), ecx) else: # SUB [rootstacktop], WORD - self.mc.SUB(self.heap_shadowstack_top(), WORD) + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + rst = gcrootmap.get_root_stack_top_addr() + if rx86.fits_in_32bits(rst): + # SUB [rootstacktop], WORD + self.mc.SUB_ji8((self.SEGMENT_NO, rst), WORD) + else: + # MOV ebx, rootstacktop + # SUB [ebx], WORD + self.mc.MOV_ri(ebx.value, rst) + self.mc.SUB_mi8((self.SEGMENT_NO, ebx.value, 0), WORD) def redirect_call_assembler(self, oldlooptoken, newlooptoken): # some minimal sanity checking @@ -2616,7 +2625,7 @@ else: self.implement_guard(guard_token, 'AE') # JAE goes to "no, don't" - def genop_guard_stm_transaction_break(self, op, guard_op, guard_token, + def XXXgenop_guard_stm_transaction_break(self, op, guard_op, guard_token, arglocs, result_loc): assert self.cpu.gc_ll_descr.stm if not we_are_translated(): diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1292,7 +1292,7 @@ need_lower_byte=True) self.perform(op, [], resloc) - def consider_stm_transaction_break(self, op, guard_op): + def XXXconsider_stm_transaction_break(self, op, guard_op): self.perform_with_guard(op, guard_op, [], None) def consider_jump(self, op): @@ -1444,7 +1444,6 @@ or num == rop.CALL_MAY_FORCE or num == rop.CALL_ASSEMBLER or num == rop.CALL_RELEASE_GIL - or num == rop.STM_TRANSACTION_BREAK or num == rop.STM_SHOULD_BREAK_TRANSACTION): oplist_with_guard[num] = value oplist[num] = add_none_argument(value) From noreply at buildbot.pypy.org Sun Aug 17 22:43:40 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 17 Aug 2014 22:43:40 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: fix api to accept {&func1, &func2}, probably could be cleaner. Message-ID: <20140817204340.3CA5D1C347F@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r72855:2f354c411ced Date: 2014-08-17 23:42 +0300 http://bitbucket.org/pypy/pypy/changeset/2f354c411ced/ Log: fix api to accept {&func1, &func2}, probably could be cleaner. diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -295,9 +295,8 @@ GenericUfunc = lltype.FuncType([rffi.CArrayPtr(rffi.CCHARP), npy_intpp, npy_intpp, rffi.VOIDP], lltype.Void) gufunctype = lltype.Ptr(GenericUfunc) -# XXX the signature is wrong, it should be an array of gufunctype, but -# XXX rffi.CArrayPtr(gufunctype) does not seem to work ??? - at cpython_api([gufunctype, rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, +# XXX single rffi.CArrayPtr(gufunctype) does not work, this does, why??? + at cpython_api([rffi.CArrayPtr(rffi.CArrayPtr(gufunctype)), rffi.VOIDP, rffi.CCHARP, Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t, rffi.CCHARP, rffi.CCHARP, Py_ssize_t, rffi.CCHARP], PyObject) def _PyUFunc_FromFuncAndDataAndSignature(space, funcs, data, types, ntypes, @@ -305,8 +304,7 @@ funcs_w = [None] * ntypes dtypes_w = [None] * ntypes * (nin + nout) for i in range(ntypes): - # XXX this should be 'funcs[i]' not 'funcs' - funcs_w[i] = W_GenericUFuncCaller(funcs) + funcs_w[i] = W_GenericUFuncCaller(rffi.cast(gufunctype, funcs[i])) for i in range(ntypes*(nin+nout)): dtypes_w[i] = get_dtype_cache(space).dtypes_by_num[ord(types[i])] w_funcs = space.newlist(funcs_w) diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -324,10 +324,10 @@ PyObject * retval; /* XXX should be 'funcs', not 'funcs[1]' but how to define an array of function pointers in ndarrayobject.py? */ - retval = _PyUFunc_FromFuncAndDataAndSignature(funcs[1], + printf("calling w/funcs[0] = 0x%x, funcs[1] = 0x%x \\n", funcs[0], funcs[1]); + retval = _PyUFunc_FromFuncAndDataAndSignature(funcs, array_data, types, 2, 1, 1, PyUFunc_None, "times2", "times2_docstring", 0, "()->()"); - Py_INCREF(retval); return retval; """ ), From noreply at buildbot.pypy.org Mon Aug 18 00:10:06 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 18 Aug 2014 00:10:06 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: silence a non-fatal build error on windows Message-ID: <20140817221006.D19EA1C059C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r72856:a7730d9255c3 Date: 2014-08-18 00:30 +0300 http://bitbucket.org/pypy/pypy/changeset/a7730d9255c3/ Log: silence a non-fatal build error on windows diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -306,9 +306,10 @@ return PyPyJitPolicy(pypy_hooks) def get_entry_point(self, config): - from pypy.tool.lib_pypy import import_from_lib_pypy - rebuild = import_from_lib_pypy('ctypes_config_cache/rebuild') - rebuild.try_rebuild() + if sys.platform != 'win32': + from pypy.tool.lib_pypy import import_from_lib_pypy + rebuild = import_from_lib_pypy('ctypes_config_cache/rebuild') + rebuild.try_rebuild() space = make_objspace(config) From noreply at buildbot.pypy.org Mon Aug 18 00:10:08 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 18 Aug 2014 00:10:08 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: translation fixes Message-ID: <20140817221008.0BA781C059C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r72857:28e42d741c8a Date: 2014-08-18 01:09 +0300 http://bitbucket.org/pypy/pypy/changeset/28e42d741c8a/ Log: translation fixes diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -312,6 +312,9 @@ w_signature = rffi.charp2str(signature) w_doc = rffi.charp2str(doc) w_name = rffi.charp2str(name) - ufunc_generic = ufuncs.frompyfunc(space, w_funcs, nin, nout, w_dtypes, - w_signature, identity, w_name, w_doc) + w_nin = int(nin) + w_nout = int(nout) + w_identity = space.wrap(identity) + ufunc_generic = ufuncs.frompyfunc(space, w_funcs, w_nin, w_nout, w_dtypes, + w_signature, w_identity, w_name, w_doc) return ufunc_generic From noreply at buildbot.pypy.org Mon Aug 18 01:05:41 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Mon, 18 Aug 2014 01:05:41 +0200 (CEST) Subject: [pypy-commit] pypy py3.3-fixes3: add missing name attribute for MD5Type Message-ID: <20140817230541.846C71C33E2@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3-fixes3 Changeset: r72858:18c94a38e38e Date: 2014-08-17 16:56 +0200 http://bitbucket.org/pypy/pypy/changeset/18c94a38e38e/ Log: add missing name attribute for MD5Type diff --git a/pypy/module/_md5/interp_md5.py b/pypy/module/_md5/interp_md5.py --- a/pypy/module/_md5/interp_md5.py +++ b/pypy/module/_md5/interp_md5.py @@ -52,6 +52,7 @@ copy = interp2app(W_MD5.copy_w), digest_size = 16, block_size = 64, + name = 'md5', __doc__ = """md5(arg) -> return new md5 object. If arg is present, the method call update(arg) is made.""") diff --git a/pypy/module/_md5/test/test_md5.py b/pypy/module/_md5/test/test_md5.py --- a/pypy/module/_md5/test/test_md5.py +++ b/pypy/module/_md5/test/test_md5.py @@ -19,6 +19,12 @@ """) + def test_name(self): + """ + md5.name should be 'md5'. + """ + assert self.md5.md5().name == 'md5' + def test_digest_size(self): """ md5.digest_size should be 16. From noreply at buildbot.pypy.org Mon Aug 18 01:05:42 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Mon, 18 Aug 2014 01:05:42 +0200 (CEST) Subject: [pypy-commit] pypy py3.3-fixes3: fix unbound variable Message-ID: <20140817230542.B568D1C33E2@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3-fixes3 Changeset: r72859:7bdae69fd02e Date: 2014-08-17 18:03 +0200 http://bitbucket.org/pypy/pypy/changeset/7bdae69fd02e/ Log: fix unbound variable diff --git a/lib-python/3/test/test_hashlib.py b/lib-python/3/test/test_hashlib.py --- a/lib-python/3/test/test_hashlib.py +++ b/lib-python/3/test/test_hashlib.py @@ -142,7 +142,7 @@ def test_hexdigest(self): for cons in self.hash_constructors: h = cons() - assert isinstance(h.digest(), bytes), name + assert isinstance(h.digest(), bytes), cons.__name__ self.assertEqual(hexstr(h.digest()), h.hexdigest()) def test_large_update(self): From noreply at buildbot.pypy.org Mon Aug 18 01:05:44 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Mon, 18 Aug 2014 01:05:44 +0200 (CEST) Subject: [pypy-commit] pypy py3.3-fixes3: port _sha256.py to py3 Message-ID: <20140817230544.0539A1C33E2@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3-fixes3 Changeset: r72860:27a13b5357da Date: 2014-08-17 19:49 +0200 http://bitbucket.org/pypy/pypy/changeset/27a13b5357da/ Log: port _sha256.py to py3 diff --git a/lib_pypy/_sha256.py b/lib_pypy/_sha256.py --- a/lib_pypy/_sha256.py +++ b/lib_pypy/_sha256.py @@ -201,7 +201,7 @@ dig = [] for i in sha_info['digest']: dig.extend([ ((i>>24) & 0xff), ((i>>16) & 0xff), ((i>>8) & 0xff), (i & 0xff) ]) - return ''.join([chr(i) for i in dig]) + return bytes(dig) class sha256(object): digest_size = digestsize = SHA_DIGESTSIZE @@ -219,7 +219,7 @@ return sha_final(self._sha.copy())[:self._sha['digestsize']] def hexdigest(self): - return ''.join(['%.2x' % ord(i) for i in self.digest()]) + return ''.join(['%.2x' % i for i in self.digest()]) def copy(self): new = sha256.__new__(sha256) @@ -240,7 +240,7 @@ return new def test(): - a_str = "just a test string" + a_str = b"just a test string" assert 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' == sha256().hexdigest() assert 'd7b553c6f09ac85d142415f857c5310f3bbbe7cdd787cce4b985acedd585266f' == sha256(a_str).hexdigest() From noreply at buildbot.pypy.org Mon Aug 18 01:05:45 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Mon, 18 Aug 2014 01:05:45 +0200 (CEST) Subject: [pypy-commit] pypy py3.3-fixes3: port _sha512.py module test to py3 Message-ID: <20140817230545.4654A1C33E2@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3-fixes3 Changeset: r72861:9852a31d49d9 Date: 2014-08-17 19:50 +0200 http://bitbucket.org/pypy/pypy/changeset/9852a31d49d9/ Log: port _sha512.py module test to py3 diff --git a/lib_pypy/_sha512.py b/lib_pypy/_sha512.py --- a/lib_pypy/_sha512.py +++ b/lib_pypy/_sha512.py @@ -270,7 +270,7 @@ def test(): import _sha512 - a_str = "just a test string" + a_str = b"just a test string" assert _sha512.sha512().hexdigest() == sha512().hexdigest() assert _sha512.sha512(a_str).hexdigest() == sha512(a_str).hexdigest() From noreply at buildbot.pypy.org Mon Aug 18 01:05:46 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Mon, 18 Aug 2014 01:05:46 +0200 (CEST) Subject: [pypy-commit] pypy py3.3-fixes3: add .name attribute for all instances of shaXXX classes Message-ID: <20140817230546.8E0981C33E2@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3-fixes3 Changeset: r72862:98b3c0bab1fe Date: 2014-08-17 20:14 +0200 http://bitbucket.org/pypy/pypy/changeset/98b3c0bab1fe/ Log: add .name attribute for all instances of shaXXX classes diff --git a/lib_pypy/_sha1.py b/lib_pypy/_sha1.py --- a/lib_pypy/_sha1.py +++ b/lib_pypy/_sha1.py @@ -123,6 +123,8 @@ def __init__(self): "Initialisation." + self.name = 'sha' + # Initial message length in bits(!). self.length = 0 self.count = [0, 0] @@ -349,6 +351,7 @@ """ crypto = sha() + crypto.name = 'sha1' if arg: crypto.update(arg) diff --git a/lib_pypy/_sha256.py b/lib_pypy/_sha256.py --- a/lib_pypy/_sha256.py +++ b/lib_pypy/_sha256.py @@ -208,6 +208,7 @@ block_size = SHA_BLOCKSIZE def __init__(self, s=None): + self.name = 'sha256' self._sha = sha_init() if s: sha_update(self._sha, s) @@ -230,6 +231,7 @@ digest_size = digestsize = 28 def __init__(self, s=None): + self.name = 'sha224' self._sha = sha224_init() if s: sha_update(self._sha, s) diff --git a/lib_pypy/_sha512.py b/lib_pypy/_sha512.py --- a/lib_pypy/_sha512.py +++ b/lib_pypy/_sha512.py @@ -236,6 +236,7 @@ block_size = SHA_BLOCKSIZE def __init__(self, s=None): + self.name = 'sha512' self._sha = sha_init() if s: sha_update(self._sha, s) @@ -258,6 +259,7 @@ digest_size = digestsize = 48 def __init__(self, s=None): + self.name = 'sha384' self._sha = sha384_init() if s: sha_update(self._sha, s) From noreply at buildbot.pypy.org Mon Aug 18 01:05:47 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Mon, 18 Aug 2014 01:05:47 +0200 (CEST) Subject: [pypy-commit] pypy py3.3-fixes3: add tests for .name attribute on sha objects Message-ID: <20140817230547.B3C8A1C33E2@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3-fixes3 Changeset: r72863:51ec894e3d52 Date: 2014-08-17 20:17 +0200 http://bitbucket.org/pypy/pypy/changeset/51ec894e3d52/ Log: add tests for .name attribute on sha objects diff --git a/pypy/module/test_lib_pypy/test_sha_extra.py b/pypy/module/test_lib_pypy/test_sha_extra.py --- a/pypy/module/test_lib_pypy/test_sha_extra.py +++ b/pypy/module/test_lib_pypy/test_sha_extra.py @@ -37,3 +37,30 @@ assert _sha.sha1().digest_size == 20 assert _sha.sha1().digestsize == 20 assert _sha.sha1().block_size == 64 + + assert _sha.sha().name == 'sha' + assert _sha.sha1().name == 'sha1' + + +class AppTestSHA256: + spaceconfig = dict(usemodules=('struct',)) + + def setup_class(cls): + cls.w__sha256 = import_lib_pypy(cls.space, '_sha256') + + def test_attributes(self): + _sha256 = self._sha256 + assert _sha256.sha224().name == 'sha224' + assert _sha256.sha256().name == 'sha256' + + +class AppTestSHA512: + spaceconfig = dict(usemodules=('struct',)) + + def setup_class(cls): + cls.w__sha512 = import_lib_pypy(cls.space, '_sha512') + + def test_attributes(self): + _sha512 = self._sha512 + assert _sha512.sha384().name == 'sha384' + assert _sha512.sha512().name == 'sha512' From noreply at buildbot.pypy.org Mon Aug 18 01:05:49 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 18 Aug 2014 01:05:49 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Merged in numerodix/pypy/py3.3-fixes3 (pull request #270) Message-ID: <20140817230549.01B711C33E2@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72864:0c8f0c10188c Date: 2014-08-17 16:05 -0700 http://bitbucket.org/pypy/pypy/changeset/0c8f0c10188c/ Log: Merged in numerodix/pypy/py3.3-fixes3 (pull request #270) py3.3: fixes for failing hashlib tests diff --git a/lib-python/3/test/test_hashlib.py b/lib-python/3/test/test_hashlib.py --- a/lib-python/3/test/test_hashlib.py +++ b/lib-python/3/test/test_hashlib.py @@ -142,7 +142,7 @@ def test_hexdigest(self): for cons in self.hash_constructors: h = cons() - assert isinstance(h.digest(), bytes), name + assert isinstance(h.digest(), bytes), cons.__name__ self.assertEqual(hexstr(h.digest()), h.hexdigest()) def test_large_update(self): diff --git a/lib_pypy/_sha1.py b/lib_pypy/_sha1.py --- a/lib_pypy/_sha1.py +++ b/lib_pypy/_sha1.py @@ -123,6 +123,8 @@ def __init__(self): "Initialisation." + self.name = 'sha' + # Initial message length in bits(!). self.length = 0 self.count = [0, 0] @@ -349,6 +351,7 @@ """ crypto = sha() + crypto.name = 'sha1' if arg: crypto.update(arg) diff --git a/lib_pypy/_sha256.py b/lib_pypy/_sha256.py --- a/lib_pypy/_sha256.py +++ b/lib_pypy/_sha256.py @@ -201,13 +201,14 @@ dig = [] for i in sha_info['digest']: dig.extend([ ((i>>24) & 0xff), ((i>>16) & 0xff), ((i>>8) & 0xff), (i & 0xff) ]) - return ''.join([chr(i) for i in dig]) + return bytes(dig) class sha256(object): digest_size = digestsize = SHA_DIGESTSIZE block_size = SHA_BLOCKSIZE def __init__(self, s=None): + self.name = 'sha256' self._sha = sha_init() if s: sha_update(self._sha, s) @@ -219,7 +220,7 @@ return sha_final(self._sha.copy())[:self._sha['digestsize']] def hexdigest(self): - return ''.join(['%.2x' % ord(i) for i in self.digest()]) + return ''.join(['%.2x' % i for i in self.digest()]) def copy(self): new = sha256.__new__(sha256) @@ -230,6 +231,7 @@ digest_size = digestsize = 28 def __init__(self, s=None): + self.name = 'sha224' self._sha = sha224_init() if s: sha_update(self._sha, s) @@ -240,7 +242,7 @@ return new def test(): - a_str = "just a test string" + a_str = b"just a test string" assert 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' == sha256().hexdigest() assert 'd7b553c6f09ac85d142415f857c5310f3bbbe7cdd787cce4b985acedd585266f' == sha256(a_str).hexdigest() diff --git a/lib_pypy/_sha512.py b/lib_pypy/_sha512.py --- a/lib_pypy/_sha512.py +++ b/lib_pypy/_sha512.py @@ -236,6 +236,7 @@ block_size = SHA_BLOCKSIZE def __init__(self, s=None): + self.name = 'sha512' self._sha = sha_init() if s: sha_update(self._sha, s) @@ -258,6 +259,7 @@ digest_size = digestsize = 48 def __init__(self, s=None): + self.name = 'sha384' self._sha = sha384_init() if s: sha_update(self._sha, s) @@ -270,7 +272,7 @@ def test(): import _sha512 - a_str = "just a test string" + a_str = b"just a test string" assert _sha512.sha512().hexdigest() == sha512().hexdigest() assert _sha512.sha512(a_str).hexdigest() == sha512(a_str).hexdigest() diff --git a/pypy/module/_md5/interp_md5.py b/pypy/module/_md5/interp_md5.py --- a/pypy/module/_md5/interp_md5.py +++ b/pypy/module/_md5/interp_md5.py @@ -52,6 +52,7 @@ copy = interp2app(W_MD5.copy_w), digest_size = 16, block_size = 64, + name = 'md5', __doc__ = """md5(arg) -> return new md5 object. If arg is present, the method call update(arg) is made.""") diff --git a/pypy/module/_md5/test/test_md5.py b/pypy/module/_md5/test/test_md5.py --- a/pypy/module/_md5/test/test_md5.py +++ b/pypy/module/_md5/test/test_md5.py @@ -19,6 +19,12 @@ """) + def test_name(self): + """ + md5.name should be 'md5'. + """ + assert self.md5.md5().name == 'md5' + def test_digest_size(self): """ md5.digest_size should be 16. diff --git a/pypy/module/test_lib_pypy/test_sha_extra.py b/pypy/module/test_lib_pypy/test_sha_extra.py --- a/pypy/module/test_lib_pypy/test_sha_extra.py +++ b/pypy/module/test_lib_pypy/test_sha_extra.py @@ -37,3 +37,30 @@ assert _sha.sha1().digest_size == 20 assert _sha.sha1().digestsize == 20 assert _sha.sha1().block_size == 64 + + assert _sha.sha().name == 'sha' + assert _sha.sha1().name == 'sha1' + + +class AppTestSHA256: + spaceconfig = dict(usemodules=('struct',)) + + def setup_class(cls): + cls.w__sha256 = import_lib_pypy(cls.space, '_sha256') + + def test_attributes(self): + _sha256 = self._sha256 + assert _sha256.sha224().name == 'sha224' + assert _sha256.sha256().name == 'sha256' + + +class AppTestSHA512: + spaceconfig = dict(usemodules=('struct',)) + + def setup_class(cls): + cls.w__sha512 = import_lib_pypy(cls.space, '_sha512') + + def test_attributes(self): + _sha512 = self._sha512 + assert _sha512.sha384().name == 'sha384' + assert _sha512.sha512().name == 'sha512' From noreply at buildbot.pypy.org Mon Aug 18 06:58:02 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 18 Aug 2014 06:58:02 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: fix 'data' and 'identity' handling Message-ID: <20140818045802.44FD01C347F@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r72865:481393e4eca5 Date: 2014-08-18 07:57 +0300 http://bitbucket.org/pypy/pypy/changeset/481393e4eca5/ Log: fix 'data' and 'identity' handling diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -264,15 +264,15 @@ CCHARP_SIZE = _get_bitsize('P') / 8 class W_GenericUFuncCaller(W_Root): - def __init__(self, func): + def __init__(self, func, data): self.func = func + self.data = data def descr_call(self, space, __args__): args_w, kwds_w = __args__.unpack() dataps = alloc_raw_storage(CCHARP_SIZE * len(args_w), track_allocation=False) dims = alloc_raw_storage(LONG_SIZE * len(args_w), track_allocation=False) steps = alloc_raw_storage(LONG_SIZE * len(args_w), track_allocation=False) - user_data = None for i in range(len(args_w)): arg_i = args_w[i] assert isinstance(arg_i, W_NDimArray) @@ -282,7 +282,7 @@ raw_storage_setitem(steps, LONG_SIZE * i, rffi.cast(rffi.LONG, arg_i.get_dtype().elsize)) try: self.func(rffi.cast(rffi.CArrayPtr(rffi.CCHARP), dataps), - rffi.cast(npy_intpp, dims), rffi.cast(npy_intpp, steps), user_data) + rffi.cast(npy_intpp, dims), rffi.cast(npy_intpp, steps), self.data) finally: free_raw_storage(dataps, track_allocation=False) free_raw_storage(dims, track_allocation=False) @@ -304,7 +304,7 @@ funcs_w = [None] * ntypes dtypes_w = [None] * ntypes * (nin + nout) for i in range(ntypes): - funcs_w[i] = W_GenericUFuncCaller(rffi.cast(gufunctype, funcs[i])) + funcs_w[i] = W_GenericUFuncCaller(rffi.cast(gufunctype, funcs[i]), data) for i in range(ntypes*(nin+nout)): dtypes_w[i] = get_dtype_cache(space).dtypes_by_num[ord(types[i])] w_funcs = space.newlist(funcs_w) @@ -312,9 +312,7 @@ w_signature = rffi.charp2str(signature) w_doc = rffi.charp2str(doc) w_name = rffi.charp2str(name) - w_nin = int(nin) - w_nout = int(nout) w_identity = space.wrap(identity) - ufunc_generic = ufuncs.frompyfunc(space, w_funcs, w_nin, w_nout, w_dtypes, + ufunc_generic = ufuncs.frompyfunc(space, w_funcs, nin, nout, w_dtypes, w_signature, w_identity, w_name, w_doc) return ufunc_generic diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -1018,9 +1018,12 @@ if space.is_none(w_identity): identity = None - else: + elif space.isinstance_w(w_identity, space.w_int): identity = \ - descriptor.get_dtype_cache(space).w_longdtype.box(w_identity) + descriptor.get_dtype_cache(space).w_longdtype.box(space.int_w(w_identity)) + else: + raise oefmt(space.w_ValueError, + 'identity must be None or an int') w_ret = W_UfuncGeneric(space, func, name, identity, nin, nout, dtypes, signature, match_dtypes=match_dtypes) From noreply at buildbot.pypy.org Mon Aug 18 09:55:18 2014 From: noreply at buildbot.pypy.org (groggi) Date: Mon, 18 Aug 2014 09:55:18 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: create a new AddressStack only if needed Message-ID: <20140818075518.53D481C34DB@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72866:e9322b528a2c Date: 2014-08-18 09:54 +0200 http://bitbucket.org/pypy/pypy/changeset/e9322b528a2c/ Log: create a new AddressStack only if needed diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2124,12 +2124,13 @@ # # get rid of objects pointing to pinned objects that were not # visited - new_old_objects_pointing_to_pinned = self.AddressStack() - self.old_objects_pointing_to_pinned.foreach( - self._sweep_old_objects_pointing_to_pinned, - new_old_objects_pointing_to_pinned) - self.old_objects_pointing_to_pinned.delete() - self.old_objects_pointing_to_pinned = new_old_objects_pointing_to_pinned + if self.old_objects_pointing_to_pinned.non_empty(): + new_old_objects_pointing_to_pinned = self.AddressStack() + self.old_objects_pointing_to_pinned.foreach( + self._sweep_old_objects_pointing_to_pinned, + new_old_objects_pointing_to_pinned) + self.old_objects_pointing_to_pinned.delete() + self.old_objects_pointing_to_pinned = new_old_objects_pointing_to_pinned self.gc_state = STATE_SWEEPING #END MARKING elif self.gc_state == STATE_SWEEPING: From noreply at buildbot.pypy.org Mon Aug 18 10:42:18 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 10:42:18 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: import stmgc/3bfb99304c6d Message-ID: <20140818084218.47DC21C059C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72867:a674757895ed Date: 2014-08-18 10:11 +0200 http://bitbucket.org/pypy/pypy/changeset/a674757895ed/ Log: import stmgc/3bfb99304c6d diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -29376f500349 +e85ce411f190 diff --git a/rpython/translator/stm/src_stm/stm/rewind_setjmp.c b/rpython/translator/stm/src_stm/stm/rewind_setjmp.c --- a/rpython/translator/stm/src_stm/stm/rewind_setjmp.c +++ b/rpython/translator/stm/src_stm/stm/rewind_setjmp.c @@ -38,8 +38,17 @@ size_t stack_size, ssstack_size; assert(rjthread->head != NULL); - stop = rjthread->head->frame_base; ssstop = rjthread->head->shadowstack_base; + if (((long)ssstop) & 1) { + /* PyPy's JIT: 'head->frame_base' is missing; use directly 'head', + which should be at the end of the frame (and doesn't need itself + to be copied because it contains immutable data only) */ + ssstop = ((char *)ssstop) - 1; + stop = (char *)rjthread->head; + } + else { + stop = rjthread->head->frame_base; + } assert(stop >= base); assert(ssstop <= ssbase); stack_size = stop - base; diff --git a/rpython/translator/stm/src_stm/stm/rewind_setjmp.h b/rpython/translator/stm/src_stm/stm/rewind_setjmp.h --- a/rpython/translator/stm/src_stm/stm/rewind_setjmp.h +++ b/rpython/translator/stm/src_stm/stm/rewind_setjmp.h @@ -54,9 +54,12 @@ ************************************************************/ typedef struct _rewind_jmp_buf { - char *frame_base; char *shadowstack_base; struct _rewind_jmp_buf *prev; + char *frame_base; + /* NB: PyPy's JIT has got details of this structure hard-coded, + as follows: it uses 2 words only (so frame_base is invalid) + and sets the lowest bit of 'shadowstack_base' to tell this */ } rewind_jmp_buf; typedef struct { @@ -72,6 +75,7 @@ /* remember the current stack and ss_stack positions */ #define rewind_jmp_enterframe(rjthread, rjbuf, ss) do { \ + assert((((long)(ss)) & 1) == 0); \ (rjbuf)->frame_base = __builtin_frame_address(0); \ (rjbuf)->shadowstack_base = (char *)(ss); \ (rjbuf)->prev = (rjthread)->head; \ From noreply at buildbot.pypy.org Mon Aug 18 10:42:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 10:42:19 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: in-progress Message-ID: <20140818084219.87BC51C059C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72868:8ff5b23d8b84 Date: 2014-08-18 10:41 +0200 http://bitbucket.org/pypy/pypy/changeset/8ff5b23d8b84/ Log: in-progress diff --git a/rpython/jit/backend/x86/arch.py b/rpython/jit/backend/x86/arch.py --- a/rpython/jit/backend/x86/arch.py +++ b/rpython/jit/backend/x86/arch.py @@ -16,7 +16,7 @@ # +--------------------+ <== aligned to 16 bytes # | return address | # +--------------------+ ------------------------. -# | resume buf (if STM)| STM_FRAME_FIXED_SIZE | +# | rewind_jmp_buf(STM)| STM_FRAME_FIXED_SIZE | # +--------------------+ ----------------------. | # | saved regs | FRAME_FIXED_SIZE | | # +--------------------+ --------------------. | | @@ -46,18 +46,9 @@ assert PASS_ON_MY_FRAME >= 12 # asmgcc needs at least JIT_USE_WORDS + 3 -# The STM resume buffer (on x86-64) is four words wide. Actually, clang -# uses three words (see test_stm.py): rbp, rip, rsp. But the value of -# rbp is not interesting for the JIT-generated machine code. So the -# STM_JMPBUF_OFS is the offset from the stack top to the start of the -# buffer, with only words at offset +1 and +2 in this buffer being -# meaningful. We use ebp, i.e. the word at offset +0, to store the -# resume counter. - -STM_RESUME_BUF_WORDS = 4 -STM_FRAME_FIXED_SIZE = FRAME_FIXED_SIZE + STM_RESUME_BUF_WORDS -STM_JMPBUF_OFS = WORD * FRAME_FIXED_SIZE -STM_JMPBUF_OFS_RBP = STM_JMPBUF_OFS + 0 * WORD -STM_JMPBUF_OFS_RIP = STM_JMPBUF_OFS + 1 * WORD -STM_JMPBUF_OFS_RSP = STM_JMPBUF_OFS + 2 * WORD -STM_OLD_SHADOWSTACK = STM_JMPBUF_OFS + 3 * WORD +# The STM rewind_jmp_buf (on x86-64) is two words wide: +STM_REWIND_JMP_BUF_WORDS = 2 +STM_FRAME_FIXED_SIZE = FRAME_FIXED_SIZE + STM_REWIND_JMP_BUF_WORDS +STM_JMPBUF_OFS = WORD * FRAME_FIXED_SIZE +STM_SHADOWSTACK_BASE_OFS = STM_JMPBUF_OFS + 0 * WORD +STM_PREV_OFS = STM_JMPBUF_OFS + 1 * WORD diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -19,8 +19,7 @@ from rpython.jit.backend.x86.arch import ( FRAME_FIXED_SIZE, WORD, IS_X86_64, JITFRAME_FIXED_SIZE, IS_X86_32, PASS_ON_MY_FRAME, STM_FRAME_FIXED_SIZE, STM_JMPBUF_OFS, - STM_JMPBUF_OFS_RIP, STM_JMPBUF_OFS_RSP, STM_JMPBUF_OFS_RBP, - STM_OLD_SHADOWSTACK) + STM_SHADOWSTACK_BASE_OFS, STM_PREV_OFS) from rpython.jit.backend.x86.regloc import (eax, ecx, edx, ebx, esp, ebp, esi, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, r8, r9, r10, r11, edi, r12, r13, r14, r15, X86_64_SCRATCH_REG, X86_64_XMM_SCRATCH_REG, @@ -886,45 +885,83 @@ gcrootmap = self.cpu.gc_ll_descr.gcrootmap return self.heap_tl(gcrootmap.get_root_stack_top_addr()) + def heap_rjthread(self): + """STM: Return an AddressLoc for '&stm_thread_local.rjthread'.""" + return self.heap_tl(rstm.adr_rjthread) + + def heap_rjthread_head(self): + """STM: Return an AddressLoc for '&stm_thread_local.rjthread.head'.""" + return self.heap_tl(rstm.adr_rjthread_head) + + def heap_rjthread_moved_off_base(self): + """STM: AddressLoc for '&stm_thread_local.rjthread.moved_off_base'.""" + return self.heap_tl(rstm.adr_rjthread_moved_off_base) + def _call_header_shadowstack(self): # put the frame in ebp on the shadowstack for the GC to find # (ebp is a writeable object and does not need a write-barrier # again (ensured by the code calling the loop)) - self.mc.MOV(ebx, self.heap_shadowstack_top()) + mc = self.mc + mc.MOV(ebx, self.heap_shadowstack_top()) + mc.MOV_mr((self.SEGMENT_NO, ebx.value, 0), ebp.value) + # MOV [ebx], ebp if self.cpu.gc_ll_descr.stm: - self.mc.MOV_mi((self.SEGMENT_NO, ebx.value, 0), - rstm.stm_stack_marker_new) # MOV [ebx], MARKER_NEW - self.mc.MOV_mr((self.SEGMENT_NO, ebx.value, WORD), - ebp.value) # MOV [ebx+WORD], ebp - self.mc.MOV_sr(STM_OLD_SHADOWSTACK, ebx.value) - # MOV [esp+xx], ebx - self.mc.ADD_ri(ebx.value, 2 * WORD) + # inlining stm_rewind_jmp_enterframe() + r11v = X86_64_SCRATCH_REG.value + rjh = self.heap_rjthread_head() + mc.ADD_ri8(ebx.value, 1) # ADD ebx, 1 + mc.MOV_rm(r11v, rjh) # MOV r11, [rjthread.head] + mc.MOV_sr(STM_SHADOWSTACK_BASE_OFS, ebx.value) + # MOV [esp+ssbase], ebx + mc.ADD_ri8(ebx.value, WORD-1) # ADD ebx, 7 + mc.MOV_sr(STM_PREV_OFS, r11v) # MOV [esp+prev], r11 + mc.MOV(self.heap_shadowstack_top(), ebx) # MOV [rootstacktop], ebx + mc.LEA_rs(r11v, STM_JMPBUF_OFS) # LEA r11, [esp+bufofs] + mc.MOV_mr(rjh, r11v) # MOV [rjthread.head], r11 + # else: - self.mc.MOV_mr((self.SEGMENT_NO, ebx.value, 0), - ebp.value) # MOV [ebx], ebp - self.mc.ADD_ri(ebx.value, WORD) - self.mc.MOV(self.heap_shadowstack_top(), ebx) # MOV [rootstacktop], ebx + mc.ADD_ri(ebx.value, WORD) # ADD ebx, WORD + mc.MOV(self.heap_shadowstack_top(), ebx) # MOV [rootstacktop], ebx def _call_footer_shadowstack(self): + mc = self.mc if self.cpu.gc_ll_descr.stm: # STM: in the rare case where we need realloc_frame, the new # frame is pushed on top of the old one. It's even possible # that this occurs more than once. So we have to restore # the old shadowstack by looking up its original saved value. - self.mc.MOV_rs(ecx.value, STM_OLD_SHADOWSTACK) - self.mc.MOV(self.heap_shadowstack_top(), ecx) + # The rest of this is inlining stm_rewind_jmp_leaveframe(). + r11v = X86_64_SCRATCH_REG.value + rjh = self.heap_rjthread_head() + rjmovd_o_b = self.heap_rjthread_moved_off_base() + adr_rjthread_moved_off_base + mc.MOV_rs(r11v, STM_SHADOWSTACK_BASE_OFS) # MOV r11, [esp+ssbase] + mc.MOV_rs(ebx.value, STM_PREV_OFS) # MOV ebx, [esp+prev] + mc.MOV(self.heap_shadowstack_top(), r11v) # MOV [rootstacktop], r11 + mc.LEA_rs(r11v, STM_JMPBUF_OFS) # LEA r11, [esp+bufofs] + mc.MOV_mr(rjh, ebx.value) # MOV [rjthread.head], ebx + mc.CMP_rm(r11v, rjmovd_o_b) # CMP r11, [rjth.movd_o_b] + mc.J_il8(rx86.Conditions['NE'], 0) # JNE label_below + jne_location = mc.get_relative_pos() + # + mc.CALL(imm(rstm.adr_pypy__rewind_jmp_copy_stack_slice)) + # + # patch the JNE above + offset = mc.get_relative_pos() - jne_location + assert 0 < offset <= 127 + mc.overwrite(jne_location-1, chr(offset)) else: # SUB [rootstacktop], WORD gcrootmap = self.cpu.gc_ll_descr.gcrootmap rst = gcrootmap.get_root_stack_top_addr() if rx86.fits_in_32bits(rst): # SUB [rootstacktop], WORD - self.mc.SUB_ji8((self.SEGMENT_NO, rst), WORD) + mc.SUB_ji8((self.SEGMENT_NO, rst), WORD) else: # MOV ebx, rootstacktop # SUB [ebx], WORD - self.mc.MOV_ri(ebx.value, rst) - self.mc.SUB_mi8((self.SEGMENT_NO, ebx.value, 0), WORD) + mc.MOV_ri(ebx.value, rst) + mc.SUB_mi8((self.SEGMENT_NO, ebx.value, 0), WORD) def redirect_call_assembler(self, oldlooptoken, newlooptoken): # some minimal sanity checking diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1292,9 +1292,6 @@ need_lower_byte=True) self.perform(op, [], resloc) - def XXXconsider_stm_transaction_break(self, op, guard_op): - self.perform_with_guard(op, guard_op, [], None) - def consider_jump(self, op): assembler = self.assembler assert self.jump_target_descr is None diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -13,12 +13,16 @@ TID = rffi.UINT tid_offset = CFlexSymbolic('offsetof(struct rpyobj_s, tid)') stm_nb_segments = CFlexSymbolic('STM_NB_SEGMENTS') -stm_stack_marker_new = CFlexSymbolic('STM_STACK_MARKER_NEW') -stm_stack_marker_old = CFlexSymbolic('STM_STACK_MARKER_OLD') adr_nursery_free = CFlexSymbolic('((long)&STM_SEGMENT->nursery_current)') adr_nursery_top = CFlexSymbolic('((long)&STM_SEGMENT->nursery_end)') adr_pypy_stm_nursery_low_fill_mark = ( CFlexSymbolic('((long)&pypy_stm_nursery_low_fill_mark)')) +adr_rjthread = ( + CFlexSymbolic('((long)&stm_thread_local.rjthread')) +adr_rjthread_head = ( + CFlexSymbolic('((long)&stm_thread_local.rjthread.head')) +adr_rjthread_moved_off_base = ( + CFlexSymbolic('((long)&stm_thread_local.rjthread.moved_off_base')) adr_transaction_read_version = ( CFlexSymbolic('((long)&STM_SEGMENT->transaction_read_version)')) adr_jmpbuf_ptr = ( @@ -39,6 +43,8 @@ CFlexSymbolic('((long)&stm_commit_transaction)')) adr_pypy_stm_start_transaction = ( CFlexSymbolic('((long)&pypy_stm_start_transaction)')) +adr_pypy__rewind_jmp_copy_stack_slice = ( + CFlexSymbolic('((long)&pypy__rewind_jmp_copy_stack_slice)')) def rewind_jmp_frame(): diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -116,5 +116,10 @@ /* NB. this logic is hard-coded in jit/backend/x86/assembler.py too */ } +static void pypy__rewind_jmp_copy_stack_slice(void) +{ + _rewind_jmp_copy_stack_slice(&stm_thread_local.rjthread); +} + #endif /* _RPY_STMGCINTF_H */ From noreply at buildbot.pypy.org Mon Aug 18 10:42:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 10:42:23 +0200 (CEST) Subject: [pypy-commit] stmgc default: Avoid one word here with the jit Message-ID: <20140818084223.167091C059C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1316:e85ce411f190 Date: 2014-08-18 10:10 +0200 http://bitbucket.org/pypy/stmgc/changeset/e85ce411f190/ Log: Avoid one word here with the jit diff --git a/c7/stm/rewind_setjmp.c b/c7/stm/rewind_setjmp.c --- a/c7/stm/rewind_setjmp.c +++ b/c7/stm/rewind_setjmp.c @@ -37,8 +37,17 @@ size_t stack_size, ssstack_size; assert(rjthread->head != NULL); - stop = rjthread->head->frame_base; ssstop = rjthread->head->shadowstack_base; + if (((long)ssstop) & 1) { + /* PyPy's JIT: 'head->frame_base' is missing; use directly 'head', + which should be at the end of the frame (and doesn't need itself + to be copied because it contains immutable data only) */ + ssstop = ((char *)ssstop) - 1; + stop = (char *)rjthread->head; + } + else { + stop = rjthread->head->frame_base; + } assert(stop >= base); assert(ssstop <= ssbase); stack_size = stop - base; diff --git a/c7/stm/rewind_setjmp.h b/c7/stm/rewind_setjmp.h --- a/c7/stm/rewind_setjmp.h +++ b/c7/stm/rewind_setjmp.h @@ -53,9 +53,12 @@ ************************************************************/ typedef struct _rewind_jmp_buf { - char *frame_base; char *shadowstack_base; struct _rewind_jmp_buf *prev; + char *frame_base; + /* NB: PyPy's JIT has got details of this structure hard-coded, + as follows: it uses 2 words only (so frame_base is invalid) + and sets the lowest bit of 'shadowstack_base' to tell this */ } rewind_jmp_buf; typedef struct { @@ -71,6 +74,7 @@ /* remember the current stack and ss_stack positions */ #define rewind_jmp_enterframe(rjthread, rjbuf, ss) do { \ + assert((((long)(ss)) & 1) == 0); \ (rjbuf)->frame_base = __builtin_frame_address(0); \ (rjbuf)->shadowstack_base = (char *)(ss); \ (rjbuf)->prev = (rjthread)->head; \ From noreply at buildbot.pypy.org Mon Aug 18 10:49:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 10:49:00 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Kill this code Message-ID: <20140818084900.3DD151C059C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72869:f028586da4f9 Date: 2014-08-18 10:48 +0200 http://bitbucket.org/pypy/pypy/changeset/f028586da4f9/ Log: Kill this code diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2662,78 +2662,6 @@ else: self.implement_guard(guard_token, 'AE') # JAE goes to "no, don't" - def XXXgenop_guard_stm_transaction_break(self, op, guard_op, guard_token, - arglocs, result_loc): - assert self.cpu.gc_ll_descr.stm - if not we_are_translated(): - return # tests only - - gcmap = self._regalloc.get_gcmap() - self._store_force_index(guard_op) - - mc = self.mc - self._generate_cmp_break_transaction() - # use JAE to jump over the following piece of code if we don't need - # to break the transaction now - mc.J_il(rx86.Conditions['AE'], 0xfffff) # patched later - jae_location = mc.get_relative_pos() - - # This is the case in which we have to do the same as the logic - # in pypy_stm_perform_transaction(). We know that we're not in - # an atomic transaction (otherwise the jump above always triggers). - # So we only have to do the following three operations: - # stm_commit_transaction(); - # __builtin_setjmp(jmpbuf); - # pypy_stm_start_transaction(&jmpbuf); - - # save all registers and the gcmap - self.push_gcmap(mc, gcmap, store=True) - grp_regs = self._regalloc.rm.reg_bindings.values() - xmm_regs = self._regalloc.xrm.reg_bindings.values() - self._push_pop_regs_to_frame(True, mc, grp_regs, xmm_regs) - # - # call stm_commit_transaction() - mc.CALL(imm(rstm.adr_stm_commit_transaction)) - # - # update the two words in the STM_RESUME_BUF, as described - # in arch.py. The "learip" pseudo-instruction turns into - # what is, in gnu as syntax: lea 0(%rip), %rax (the 0 is - # four bytes, patched just below) - mc.LEARIP_rl32(eax.value, 0) - learip_location = mc.get_relative_pos() - mc.MOV_sr(STM_JMPBUF_OFS_RIP, eax.value) - mc.MOV_sr(STM_JMPBUF_OFS_RSP, esp.value) - mc.XOR(ebp, ebp) - mc.MOV_sr(STM_JMPBUF_OFS_RBP, ebp.value) - # - offset = mc.get_relative_pos() - learip_location - assert 0 < offset <= 127 - mc.overwrite32(learip_location - 4, offset) - # ** HERE ** is the place an aborted transaction retries - # (when resuming, ebp is garbage, but the STM_RESUME_BUF is - # still correct in case of repeated aborting) - # - # call pypy_stm_start_transaction(&jmpbuf, &v_counter) - # where v_counter is abusively stored in the jmpbuf at - # the location for ebp (so that the value in v_counter - # is here found in ebp, if we needed it). - mc.LEA_rs(edi.value, STM_JMPBUF_OFS) - mc.LEA_rs(esi.value, STM_JMPBUF_OFS_RBP) - mc.CALL(imm(rstm.adr_pypy_stm_start_transaction)) - # - # reload ebp with the frame now - self._reload_frame_if_necessary(self.mc) - # - # restore regs - self._push_pop_regs_to_frame(False, mc, grp_regs, xmm_regs) - # - self._emit_guard_not_forced(guard_token) - - # patch the JAE above (note that we also skip the guard_not_forced - # in the common situation where we jump over the code above) - offset = mc.get_relative_pos() - jae_location - mc.overwrite32(jae_location - 4, offset) - def genop_discard_stm_read(self, op, arglocs): if not IS_X86_64: todo() # "needed for X86_64_SCRATCH_REG" From noreply at buildbot.pypy.org Mon Aug 18 10:51:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 10:51:32 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Kill more stuff Message-ID: <20140818085132.0D3B41C059C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72870:ebf6e1ab041b Date: 2014-08-18 10:51 +0200 http://bitbucket.org/pypy/pypy/changeset/ebf6e1ab041b/ Log: Kill more stuff diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -837,35 +837,6 @@ def _call_footer(self): gcrootmap = self.cpu.gc_ll_descr.gcrootmap - if self.cpu.gc_ll_descr.stm and we_are_translated(): - # call _pypy_stm_become_inevitable() if the current jmpbuf is set - # to this frame, because we're about to leave. This is if - # we called a pypy_stm_start_transaction() earlier. - assert IS_X86_64 - mc = self.mc - # - # load the address of the jmpbuf - mc.LEA_rs(edi.value, STM_JMPBUF_OFS) - # compare it with the currently-stored jmpbuf - mc.CMP_rj(edi.value, (self.SEGMENT_GC, rstm.adr_jmpbuf_ptr)) - # if they differ (or if jmpbuf_ptr is already NULL), nothing to do - mc.J_il8(rx86.Conditions['NE'], 0) # patched later - jne_location = mc.get_relative_pos() - # - # if they are equal, we need to become inevitable now - mc.XOR_rr(edi.value, edi.value) - mc.CALL(imm(rstm.adr__pypy_stm_become_inevitable)) - # there could have been a collection in the call above; - # reload the frame into ebp (but we don't need to apply the - # write barrier to it now) - mc.MOV(ecx, self.heap_shadowstack_top()) - mc.MOV_rm(ebp.value, (self.SEGMENT_NO, ecx.value, -WORD)) - # - # this is where the JNE above jumps - offset = mc.get_relative_pos() - jne_location - assert 0 < offset <= 127 - mc.overwrite(jne_location-1, chr(offset)) - if gcrootmap and gcrootmap.is_shadow_stack: self._call_footer_shadowstack() diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -25,8 +25,6 @@ CFlexSymbolic('((long)&stm_thread_local.rjthread.moved_off_base')) adr_transaction_read_version = ( CFlexSymbolic('((long)&STM_SEGMENT->transaction_read_version)')) -adr_jmpbuf_ptr = ( - CFlexSymbolic('((long)&STM_SEGMENT->jmpbuf_ptr)')) adr_segment_base = ( CFlexSymbolic('((long)&STM_SEGMENT->segment_base)')) adr_write_slowpath = CFlexSymbolic('((long)&_stm_write_slowpath)') @@ -37,12 +35,6 @@ CARD_MARKED = CFlexSymbolic('_STM_CARD_MARKED') CARD_SIZE = CFlexSymbolic('_STM_CARD_SIZE') -adr__pypy_stm_become_inevitable = ( - CFlexSymbolic('((long)&_pypy_stm_become_inevitable)')) -adr_stm_commit_transaction = ( - CFlexSymbolic('((long)&stm_commit_transaction)')) -adr_pypy_stm_start_transaction = ( - CFlexSymbolic('((long)&pypy_stm_start_transaction)')) adr_pypy__rewind_jmp_copy_stack_slice = ( CFlexSymbolic('((long)&pypy__rewind_jmp_copy_stack_slice)')) From noreply at buildbot.pypy.org Mon Aug 18 12:07:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 12:07:01 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: in-progress Message-ID: <20140818100701.BDBCE1C0EF5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72874:a279a16c839d Date: 2014-08-18 12:06 +0200 http://bitbucket.org/pypy/pypy/changeset/a279a16c839d/ Log: in-progress diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -74,9 +74,9 @@ self.gc_minimal_size_in_nursery = gc_ll_descr.minimal_size_in_nursery else: self.gc_minimal_size_in_nursery = 0 - if getattr(gc_ll_descr, 'gcheaderbuilder', None) is not None: + try: self.gc_size_of_header = gc_ll_descr.gcheaderbuilder.size_gc_header - else: + except AttributeError: self.gc_size_of_header = WORD # for tests self.memcpy_addr = self.cpu.cast_ptr_to_int(memcpy_fn) # building the barriers needs to happen before these: diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -856,10 +856,6 @@ gcrootmap = self.cpu.gc_ll_descr.gcrootmap return self.heap_tl(gcrootmap.get_root_stack_top_addr()) - def heap_rjthread(self): - """STM: Return an AddressLoc for '&stm_thread_local.rjthread'.""" - return self.heap_tl(rstm.adr_rjthread) - def heap_rjthread_head(self): """STM: Return an AddressLoc for '&stm_thread_local.rjthread.head'.""" return self.heap_tl(rstm.adr_rjthread_head) @@ -878,17 +874,17 @@ # MOV [ebx], ebp if self.cpu.gc_ll_descr.stm: # inlining stm_rewind_jmp_enterframe() - r11v = X86_64_SCRATCH_REG.value + r11 = X86_64_SCRATCH_REG rjh = self.heap_rjthread_head() - mc.ADD_ri8(ebx.value, 1) # ADD ebx, 1 - mc.MOV_rm(r11v, rjh) # MOV r11, [rjthread.head] + mc.ADD_ri(ebx.value, 1) # ADD ebx, 1 + mc.MOV(r11, rjh) # MOV r11, [rjthread.head] mc.MOV_sr(STM_SHADOWSTACK_BASE_OFS, ebx.value) # MOV [esp+ssbase], ebx - mc.ADD_ri8(ebx.value, WORD-1) # ADD ebx, 7 - mc.MOV_sr(STM_PREV_OFS, r11v) # MOV [esp+prev], r11 + mc.ADD_ri(ebx.value, WORD-1) # ADD ebx, 7 + mc.MOV_sr(STM_PREV_OFS, r11.value) # MOV [esp+prev], r11 mc.MOV(self.heap_shadowstack_top(), ebx) # MOV [rootstacktop], ebx - mc.LEA_rs(r11v, STM_JMPBUF_OFS) # LEA r11, [esp+bufofs] - mc.MOV_mr(rjh, r11v) # MOV [rjthread.head], r11 + mc.LEA_rs(r11.value, STM_JMPBUF_OFS) # LEA r11, [esp+bufofs] + mc.MOV(rjh, r11) # MOV [rjthread.head], r11 # else: mc.ADD_ri(ebx.value, WORD) # ADD ebx, WORD @@ -902,16 +898,16 @@ # that this occurs more than once. So we have to restore # the old shadowstack by looking up its original saved value. # The rest of this is inlining stm_rewind_jmp_leaveframe(). - r11v = X86_64_SCRATCH_REG.value + r11 = X86_64_SCRATCH_REG rjh = self.heap_rjthread_head() rjmovd_o_b = self.heap_rjthread_moved_off_base() - adr_rjthread_moved_off_base - mc.MOV_rs(r11v, STM_SHADOWSTACK_BASE_OFS) # MOV r11, [esp+ssbase] - mc.MOV_rs(ebx.value, STM_PREV_OFS) # MOV ebx, [esp+prev] - mc.MOV(self.heap_shadowstack_top(), r11v) # MOV [rootstacktop], r11 - mc.LEA_rs(r11v, STM_JMPBUF_OFS) # LEA r11, [esp+bufofs] - mc.MOV_mr(rjh, ebx.value) # MOV [rjthread.head], ebx - mc.CMP_rm(r11v, rjmovd_o_b) # CMP r11, [rjth.movd_o_b] + mc.MOV_rs(ebx.value, STM_SHADOWSTACK_BASE_OFS) + # MOV ebx, [esp+ssbase] + mc.MOV_rs(r11.value, STM_PREV_OFS) # MOV r11, [esp+prev] + mc.MOV(self.heap_shadowstack_top(), ebx) # MOV [rootstacktop], ebx + mc.LEA_rs(ebx.value, STM_JMPBUF_OFS) # LEA ebx, [esp+bufofs] + mc.MOV(rjh, r11) # MOV [rjthread.head], r11 + mc.CMP(ebx, rjmovd_o_b) # CMP ebx, [rjth.movd_o_b] mc.J_il8(rx86.Conditions['NE'], 0) # JNE label_below jne_location = mc.get_relative_pos() # diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -17,12 +17,10 @@ adr_nursery_top = CFlexSymbolic('((long)&STM_SEGMENT->nursery_end)') adr_pypy_stm_nursery_low_fill_mark = ( CFlexSymbolic('((long)&pypy_stm_nursery_low_fill_mark)')) -adr_rjthread = ( - CFlexSymbolic('((long)&stm_thread_local.rjthread')) adr_rjthread_head = ( - CFlexSymbolic('((long)&stm_thread_local.rjthread.head')) + CFlexSymbolic('((long)&stm_thread_local.rjthread.head)')) adr_rjthread_moved_off_base = ( - CFlexSymbolic('((long)&stm_thread_local.rjthread.moved_off_base')) + CFlexSymbolic('((long)&stm_thread_local.rjthread.moved_off_base)')) adr_transaction_read_version = ( CFlexSymbolic('((long)&STM_SEGMENT->transaction_read_version)')) adr_segment_base = ( @@ -74,7 +72,7 @@ @dont_look_inside def break_transaction(): - llop.stm_break_transaction(lltype.Void) + llop.stm_transaction_break(lltype.Void) @dont_look_inside def set_transaction_length(fraction): diff --git a/rpython/translator/backendopt/gilanalysis.py b/rpython/translator/backendopt/gilanalysis.py --- a/rpython/translator/backendopt/gilanalysis.py +++ b/rpython/translator/backendopt/gilanalysis.py @@ -24,7 +24,7 @@ return False def analyze_simple_operation(self, op, graphinfo): - if op.opname == 'stm_break_transaction': + if op.opname == 'stm_transaction_break': return True return False diff --git a/rpython/translator/stm/src_stm/extracode.h b/rpython/translator/stm/src_stm/extracode.h --- a/rpython/translator/stm/src_stm/extracode.h +++ b/rpython/translator/stm/src_stm/extracode.h @@ -77,16 +77,6 @@ long fnlen = 1, nlen = 1, line = 0; char *fn = "?", *name = "?"; -#ifdef RPY_STM_JIT - if (odd_number == STM_STACK_MARKER_NEW || - odd_number == STM_STACK_MARKER_OLD) { - assert(o); - /* XXX ji_jf_forward */ - /* XXX */ - o = NULL; - } -#endif - if (o) { co_filename =_fetch_rpsspace0(segment_base, o, g_co_filename_ofs); co_name =_fetch_rpsspace0(segment_base, o, g_co_name_ofs); From noreply at buildbot.pypy.org Mon Aug 18 13:41:23 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 18 Aug 2014 13:41:23 +0200 (CEST) Subject: [pypy-commit] stmgc default: fix release build of random2 Message-ID: <20140818114123.73E051C059C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1317:b62545917bee Date: 2014-08-18 13:41 +0200 http://bitbucket.org/pypy/stmgc/changeset/b62545917bee/ Log: fix release build of random2 diff --git a/c7/demo/demo_random2.c b/c7/demo/demo_random2.c --- a/c7/demo/demo_random2.c +++ b/c7/demo/demo_random2.c @@ -110,7 +110,7 @@ num = get_rand(ss_size); /* XXX: impl detail: there is already a "-1" on the SS -> +1 */ objptr_t r = (objptr_t)stm_thread_local.shadowstack_base[num+1].ss; - assert((((uintptr_t)r) & 3) == 0); + OPT_ASSERT((((uintptr_t)r) & 3) == 0); } if (num == 1 && td.active_roots_num > 0) { @@ -380,7 +380,7 @@ } } } - assert(roots_on_ss == td.roots_on_ss); + OPT_ASSERT(roots_on_ss == td.roots_on_ss); stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); } From noreply at buildbot.pypy.org Mon Aug 18 13:58:39 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 18 Aug 2014 13:58:39 +0200 (CEST) Subject: [pypy-commit] stmgc default: add all demos to tests Message-ID: <20140818115839.F42101C347F@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1318:ec5c149ff346 Date: 2014-08-18 13:58 +0200 http://bitbucket.org/pypy/stmgc/changeset/ec5c149ff346/ Log: add all demos to tests diff --git a/c7/demo/demo_simple.c b/c7/demo/demo_simple.c --- a/c7/demo/demo_simple.c +++ b/c7/demo/demo_simple.c @@ -10,7 +10,7 @@ # include "stmgc.h" #endif -#define ITERS 1000000 +#define ITERS 100000 #define NTHREADS 2 @@ -59,14 +59,16 @@ void *demo2(void *arg) { int status; + rewind_jmp_buf rjbuf; stm_register_thread_local(&stm_thread_local); + stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf); char *org = (char *)stm_thread_local.shadowstack; tl_counter = 0; object_t *tmp; int i = 0; while (i < ITERS) { - stm_start_inevitable_transaction(&stm_thread_local); + stm_start_transaction(&stm_thread_local); tl_counter++; if (i % 500 < 250) STM_PUSH_ROOT(stm_thread_local, stm_allocate(16));//gl_counter++; @@ -76,8 +78,9 @@ i++; } - assert(org == (char *)stm_thread_local.shadowstack); + OPT_ASSERT(org == (char *)stm_thread_local.shadowstack); + stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf); stm_unregister_thread_local(&stm_thread_local); status = sem_post(&done); assert(status == 0); return NULL; diff --git a/c7/test/test_demo.py b/c7/test/test_demo.py --- a/c7/test/test_demo.py +++ b/c7/test/test_demo.py @@ -15,6 +15,19 @@ def test_shadowstack(self): self.make_and_run("debug-test_shadowstack") - def test_demo2_debug(self): self.make_and_run("debug-demo2") + def test_demo_simple_build(self): self.make_and_run("build-demo_simple") + def test_demo_largemalloc_build(self): self.make_and_run("build-demo_largemalloc") + + + + # def test_demo2_debug(self): self.make_and_run("debug-demo2") def test_demo2_build(self): self.make_and_run("build-demo2") def test_demo2_release(self): self.make_and_run("release-demo2") + + # def test_demo_random_debug(self): self.make_and_run("debug-demo_random") + def test_demo_random_build(self): self.make_and_run("build-demo_random") + def test_demo_random_release(self): self.make_and_run("release-demo_random") + + # def test_demo_random2_debug(self): self.make_and_run("debug-demo_random2") + def test_demo_random2_build(self): self.make_and_run("build-demo_random2") + def test_demo_random2_release(self): self.make_and_run("release-demo_random2") From noreply at buildbot.pypy.org Mon Aug 18 14:01:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 14:01:56 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Fix Message-ID: <20140818120156.1FBC11C347F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72875:2eafe08987ef Date: 2014-08-18 13:52 +0200 http://bitbucket.org/pypy/pypy/changeset/2eafe08987ef/ Log: Fix diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -904,6 +904,7 @@ mc.MOV_rs(ebx.value, STM_SHADOWSTACK_BASE_OFS) # MOV ebx, [esp+ssbase] mc.MOV_rs(r11.value, STM_PREV_OFS) # MOV r11, [esp+prev] + mc.SUB_ri(ebx.value, 1) # SUB ebx, 1 mc.MOV(self.heap_shadowstack_top(), ebx) # MOV [rootstacktop], ebx mc.LEA_rs(ebx.value, STM_JMPBUF_OFS) # LEA ebx, [esp+bufofs] mc.MOV(rjh, r11) # MOV [rjthread.head], r11 From noreply at buildbot.pypy.org Mon Aug 18 14:26:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 14:26:22 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix for "assert isinstance(x, str)" in RPython, in case x is "str-or-None". Message-ID: <20140818122622.712401C0EF5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72876:a24d530761ce Date: 2014-08-18 14:25 +0200 http://bitbucket.org/pypy/pypy/changeset/a24d530761ce/ Log: Fix for "assert isinstance(x, str)" in RPython, in case x is "str- or-None". diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4301,6 +4301,38 @@ s = a.build_types(f, []) assert isinstance(s, annmodel.SomeString) + def test_isinstance_str_1(self): + def g(): + pass + def f(n): + if n > 5: + s = "foo" + else: + s = None + g() + return isinstance(s, str) + a = self.RPythonAnnotator() + s = a.build_types(f, [int]) + assert isinstance(s, annmodel.SomeBool) + assert not s.is_constant() + + def test_isinstance_str_2(self): + def g(): + pass + def f(n): + if n > 5: + s = "foo" + else: + s = None + g() + if isinstance(s, str): + return s + return "" + a = self.RPythonAnnotator() + s = a.build_types(f, [int]) + assert isinstance(s, annmodel.SomeString) + assert not s.can_be_none() + def g(n): return [0, 1, 2, n] diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -683,13 +683,14 @@ if hop.s_result.is_constant(): return hop.inputconst(lltype.Bool, hop.s_result.const) - if hop.args_s[1].is_constant() and hop.args_s[1].const == list: - if hop.args_s[0].knowntype != list: - raise TyperError("isinstance(x, list) expects x to be known statically to be a list or None") - rlist = hop.args_r[0] - vlist = hop.inputarg(rlist, arg=0) - cnone = hop.inputconst(rlist, None) - return hop.genop('ptr_ne', [vlist, cnone], resulttype=lltype.Bool) + if hop.args_s[1].is_constant() and hop.args_s[1].const in (str, list): + if hop.args_s[0].knowntype not in (str, list): + raise TyperError("isinstance(x, str/list) expects x to be known" + " statically to be a str/list or None") + rstrlist = hop.args_r[0] + vstrlist = hop.inputarg(rstrlist, arg=0) + cnone = hop.inputconst(rstrlist, None) + return hop.genop('ptr_ne', [vstrlist, cnone], resulttype=lltype.Bool) assert isinstance(hop.args_r[0], rclass.InstanceRepr) return hop.args_r[0].rtype_isinstance(hop) diff --git a/rpython/rtyper/test/test_rbuiltin.py b/rpython/rtyper/test/test_rbuiltin.py --- a/rpython/rtyper/test/test_rbuiltin.py +++ b/rpython/rtyper/test/test_rbuiltin.py @@ -364,17 +364,35 @@ assert res == isinstance([A(), B(), C()][x-1], [A, B, C][y-1]) * 3 def test_isinstance_list(self): + def g(): + pass def f(i): if i == 0: l = [] else: l = None + g() return isinstance(l, list) res = self.interpret(f, [0]) assert res is True res = self.interpret(f, [1]) assert res is False + def test_isinstance_str(self): + def g(): + pass + def f(i): + if i == 0: + l = "foobar" + else: + l = None + g() + return isinstance(l, str) + res = self.interpret(f, [0]) + assert res is True + res = self.interpret(f, [1]) + assert res is False + def test_instantiate(self): class A: pass From noreply at buildbot.pypy.org Mon Aug 18 14:27:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 14:27:50 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Fix Message-ID: <20140818122750.568081C0EF5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72877:e7b26f5a2320 Date: 2014-08-18 14:27 +0200 http://bitbucket.org/pypy/pypy/changeset/e7b26f5a2320/ Log: Fix diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -44,8 +44,7 @@ pypyjitdriver = PyPyJitDriver(get_printable_location = get_printable_location, should_unroll_one_iteration = should_unroll_one_iteration, - name='pypyjit', - stm_do_transaction_breaks=True) + name='pypyjit') class __extend__(PyFrame): From noreply at buildbot.pypy.org Mon Aug 18 16:05:34 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Mon, 18 Aug 2014 16:05:34 +0200 (CEST) Subject: [pypy-commit] pypy default: hg merge default Message-ID: <20140818140534.A38811D22EF@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r72878:77c1babd513e Date: 2014-08-18 00:53 +0200 http://bitbucket.org/pypy/pypy/changeset/77c1babd513e/ Log: hg merge default diff too long, truncating to 2000 out of 10260 lines diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -1,5 +1,4 @@ # Generated by tools/asdl_py.py -from rpython.rlib.unroll import unrolling_iterable from rpython.tool.pairtype import extendabletype from rpython.tool.sourcetools import func_with_new_name @@ -11,7 +10,7 @@ def raise_attriberr(space, w_obj, name): raise oefmt(space.w_AttributeError, - "'%T' object has no attribute '%s'", w_obj, name) + \"'%T' object has no attribute '%s'\", w_obj, name) def check_string(space, w_obj): @@ -21,11 +20,15 @@ 'AST string must be of type str or unicode')) return w_obj - -class AST(W_Root): - - w_dict = None - +def get_field(space, w_node, name, optional): + w_obj = w_node.getdictvalue(space, name) + if w_obj is None and not optional: + raise oefmt(space.w_TypeError, + "required field \"%s\" missing from %T", name, w_node) + return w_obj + + +class AST(object): __metaclass__ = extendabletype def walkabout(self, visitor): @@ -34,8 +37,23 @@ def mutate_over(self, visitor): raise AssertionError("mutate_over() implementation not provided") - def sync_app_attrs(self, space): - raise NotImplementedError + +class NodeVisitorNotImplemented(Exception): + pass + + +class _FieldsWrapper(W_Root): + "Hack around the fact we can't store tuples on a TypeDef." + + def __init__(self, fields): + self.fields = fields + + def __spacebind__(self, space): + return space.newtuple([space.wrap(field) for field in self.fields]) + + +class W_AST(W_Root): + w_dict = None def getdict(self, space): if self.w_dict is None: @@ -47,7 +65,7 @@ if w_dict is None: w_dict = space.newdict() w_type = space.type(self) - w_fields = w_type.getdictvalue(space, "_fields") + w_fields = space.getattr(w_type, space.wrap("_fields")) for w_name in space.fixedview(w_fields): try: space.setitem(w_dict, w_name, @@ -71,79 +89,94 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) - def missing_field(self, space, required, host): - "Find which required field is missing." - state = self.initialization_state - for i in range(len(required)): - if (state >> i) & 1: - continue # field is present - missing = required[i] - if missing is None: - continue # field is optional - w_obj = self.getdictvalue(space, missing) - if w_obj is None: - raise oefmt(space.w_TypeError, - "required field \"%s\" missing from %s", - missing, host) - else: - raise oefmt(space.w_TypeError, - "incorrect type for field \"%s\" in %s", - missing, host) - raise AssertionError("should not reach here") - - -class NodeVisitorNotImplemented(Exception): - pass - - -class _FieldsWrapper(W_Root): - "Hack around the fact we can't store tuples on a TypeDef." - - def __init__(self, fields): - self.fields = fields - - def __spacebind__(self, space): - return space.newtuple([space.wrap(field) for field in self.fields]) - - -def get_AST_new(node_class): - def generic_AST_new(space, w_type, __args__): - node = space.allocate_instance(node_class, w_type) - node.initialization_state = 0 - return space.wrap(node) - return func_with_new_name(generic_AST_new, "new_%s" % node_class.__name__) - -def AST_init(space, w_self, __args__): +def W_AST_new(space, w_type, __args__): + node = space.allocate_instance(W_AST, w_type) + return space.wrap(node) + +def W_AST_init(space, w_self, __args__): args_w, kwargs_w = __args__.unpack() - if args_w and len(args_w) != 0: - w_err = space.wrap("_ast.AST constructor takes 0 positional arguments") - raise OperationError(space.w_TypeError, w_err) + fields_w = space.fixedview(space.getattr(space.type(w_self), + space.wrap("_fields"))) + num_fields = len(fields_w) if fields_w else 0 + if args_w and len(args_w) != num_fields: + if num_fields == 0: + raise oefmt(space.w_TypeError, + "%T constructor takes 0 positional arguments", w_self) + elif num_fields == 1: + raise oefmt(space.w_TypeError, + "%T constructor takes either 0 or %d positional argument", w_self, num_fields) + else: + raise oefmt(space.w_TypeError, + "%T constructor takes either 0 or %d positional arguments", w_self, num_fields) + if args_w: + for i, w_field in enumerate(fields_w): + space.setattr(w_self, w_field, args_w[i]) for field, w_value in kwargs_w.iteritems(): space.setattr(w_self, space.wrap(field), w_value) -AST.typedef = typedef.TypeDef("_ast.AST", + +W_AST.typedef = typedef.TypeDef("_ast.AST", _fields=_FieldsWrapper([]), _attributes=_FieldsWrapper([]), - __reduce__=interp2app(AST.reduce_w), - __setstate__=interp2app(AST.setstate_w), + __reduce__=interp2app(W_AST.reduce_w), + __setstate__=interp2app(W_AST.setstate_w), __dict__ = typedef.GetSetProperty(typedef.descr_get_dict, - typedef.descr_set_dict, cls=AST), - __new__=interp2app(get_AST_new(AST)), - __init__=interp2app(AST_init), + typedef.descr_set_dict, cls=W_AST), + __new__=interp2app(W_AST_new), + __init__=interp2app(W_AST_init), ) - - +class State: + AST_TYPES = [] + + @classmethod + def ast_type(cls, name, base, fields, attributes=None): + cls.AST_TYPES.append((name, base, fields, attributes)) + + def __init__(self, space): + self.w_AST = space.gettypeobject(W_AST.typedef) + for (name, base, fields, attributes) in self.AST_TYPES: + self.make_new_type(space, name, base, fields, attributes) + + def make_new_type(self, space, name, base, fields, attributes): + w_base = getattr(self, 'w_%s' % base) + w_dict = space.newdict() + space.setitem_str(w_dict, '__module__', space.wrap('_ast')) + if fields is not None: + space.setitem_str(w_dict, "_fields", + space.newtuple([space.wrap(f) for f in fields])) + if attributes is not None: + space.setitem_str(w_dict, "_attributes", + space.newtuple([space.wrap(a) for a in attributes])) + w_type = space.call_function( + space.w_type, + space.wrap(name), space.newtuple([w_base]), w_dict) + setattr(self, 'w_%s' % name, w_type) + +def get(space): + return space.fromcache(State) class mod(AST): - pass + @staticmethod + def from_object(space, w_node): + if space.is_w(w_node, space.w_None): + return None + if space.isinstance_w(w_node, get(space).w_Module): + return Module.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Interactive): + return Interactive.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Expression): + return Expression.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Suite): + return Suite.from_object(space, w_node) + raise oefmt(space.w_TypeError, + "Expected mod node, got %T", w_node) +State.ast_type('mod', 'AST', None, []) class Module(mod): def __init__(self, body): self.body = body - self.w_body = None - self.initialization_state = 1 def walkabout(self, visitor): visitor.visit_Module(self) @@ -153,29 +186,30 @@ visitor._mutate_sequence(self.body) return visitor.visit_Module(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 1: - self.missing_field(space, ['body'], 'Module') + def to_object(self, space): + w_node = space.call_function(get(space).w_Module) + if self.body is None: + body_w = [] else: - pass - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + return Module(_body) + +State.ast_type('Module', 'mod', ['body']) class Interactive(mod): def __init__(self, body): self.body = body - self.w_body = None - self.initialization_state = 1 def walkabout(self, visitor): visitor.visit_Interactive(self) @@ -185,28 +219,30 @@ visitor._mutate_sequence(self.body) return visitor.visit_Interactive(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 1: - self.missing_field(space, ['body'], 'Interactive') + def to_object(self, space): + w_node = space.call_function(get(space).w_Interactive) + if self.body is None: + body_w = [] else: - pass - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + return Interactive(_body) + +State.ast_type('Interactive', 'mod', ['body']) class Expression(mod): def __init__(self, body): self.body = body - self.initialization_state = 1 def walkabout(self, visitor): visitor.visit_Expression(self) @@ -215,20 +251,25 @@ self.body = self.body.mutate_over(visitor) return visitor.visit_Expression(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 1: - self.missing_field(space, ['body'], 'Expression') - else: - pass - self.body.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Expression) + w_body = self.body.to_object(space) # expr + space.setattr(w_node, space.wrap('body'), w_body) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + _body = expr.from_object(space, w_body) + return Expression(_body) + +State.ast_type('Expression', 'mod', ['body']) class Suite(mod): def __init__(self, body): self.body = body - self.w_body = None - self.initialization_state = 1 def walkabout(self, visitor): visitor.visit_Suite(self) @@ -238,21 +279,24 @@ visitor._mutate_sequence(self.body) return visitor.visit_Suite(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 1: - self.missing_field(space, ['body'], 'Suite') + def to_object(self, space): + w_node = space.call_function(get(space).w_Suite) + if self.body is None: + body_w = [] else: - pass - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + return Suite(_body) + +State.ast_type('Suite', 'mod', ['body']) class stmt(AST): @@ -261,17 +305,68 @@ self.lineno = lineno self.col_offset = col_offset + @staticmethod + def from_object(space, w_node): + if space.is_w(w_node, space.w_None): + return None + if space.isinstance_w(w_node, get(space).w_FunctionDef): + return FunctionDef.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_ClassDef): + return ClassDef.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Return): + return Return.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Delete): + return Delete.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Assign): + return Assign.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_AugAssign): + return AugAssign.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Print): + return Print.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_For): + return For.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_While): + return While.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_If): + return If.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_With): + return With.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Raise): + return Raise.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_TryExcept): + return TryExcept.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_TryFinally): + return TryFinally.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Assert): + return Assert.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Import): + return Import.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_ImportFrom): + return ImportFrom.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Exec): + return Exec.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Global): + return Global.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Expr): + return Expr.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Pass): + return Pass.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Break): + return Break.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Continue): + return Continue.from_object(space, w_node) + raise oefmt(space.w_TypeError, + "Expected stmt node, got %T", w_node) +State.ast_type('stmt', 'AST', None, ['lineno', 'col_offset']) + class FunctionDef(stmt): def __init__(self, name, args, body, decorator_list, lineno, col_offset): self.name = name self.args = args self.body = body - self.w_body = None self.decorator_list = decorator_list - self.w_decorator_list = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 63 def walkabout(self, visitor): visitor.visit_FunctionDef(self) @@ -284,32 +379,49 @@ visitor._mutate_sequence(self.decorator_list) return visitor.visit_FunctionDef(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 63: - self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef') + def to_object(self, space): + w_node = space.call_function(get(space).w_FunctionDef) + w_name = space.wrap(self.name) # identifier + space.setattr(w_node, space.wrap('name'), w_name) + w_args = self.args.to_object(space) # arguments + space.setattr(w_node, space.wrap('args'), w_args) + if self.body is None: + body_w = [] else: - pass - self.args.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_decorator_list - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.decorator_list = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.decorator_list = None - if self.decorator_list is not None: - for node in self.decorator_list: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.decorator_list is None: + decorator_list_w = [] + else: + decorator_list_w = [node.to_object(space) for node in self.decorator_list] # expr + w_decorator_list = space.newlist(decorator_list_w) + space.setattr(w_node, space.wrap('decorator_list'), w_decorator_list) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_name = get_field(space, w_node, 'name', False) + w_args = get_field(space, w_node, 'args', False) + w_body = get_field(space, w_node, 'body', False) + w_decorator_list = get_field(space, w_node, 'decorator_list', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _name = space.realstr_w(w_name) + _args = arguments.from_object(space, w_args) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + decorator_list_w = space.unpackiterable(w_decorator_list) + _decorator_list = [expr.from_object(space, w_item) for w_item in decorator_list_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return FunctionDef(_name, _args, _body, _decorator_list, _lineno, _col_offset) + +State.ast_type('FunctionDef', 'stmt', ['name', 'args', 'body', 'decorator_list']) class ClassDef(stmt): @@ -317,13 +429,9 @@ def __init__(self, name, bases, body, decorator_list, lineno, col_offset): self.name = name self.bases = bases - self.w_bases = None self.body = body - self.w_body = None self.decorator_list = decorator_list - self.w_decorator_list = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 63 def walkabout(self, visitor): visitor.visit_ClassDef(self) @@ -337,41 +445,54 @@ visitor._mutate_sequence(self.decorator_list) return visitor.visit_ClassDef(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 63: - self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'body', 'decorator_list'], 'ClassDef') + def to_object(self, space): + w_node = space.call_function(get(space).w_ClassDef) + w_name = space.wrap(self.name) # identifier + space.setattr(w_node, space.wrap('name'), w_name) + if self.bases is None: + bases_w = [] else: - pass - w_list = self.w_bases - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.bases = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.bases = None - if self.bases is not None: - for node in self.bases: - node.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_decorator_list - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.decorator_list = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.decorator_list = None - if self.decorator_list is not None: - for node in self.decorator_list: - node.sync_app_attrs(space) + bases_w = [node.to_object(space) for node in self.bases] # expr + w_bases = space.newlist(bases_w) + space.setattr(w_node, space.wrap('bases'), w_bases) + if self.body is None: + body_w = [] + else: + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.decorator_list is None: + decorator_list_w = [] + else: + decorator_list_w = [node.to_object(space) for node in self.decorator_list] # expr + w_decorator_list = space.newlist(decorator_list_w) + space.setattr(w_node, space.wrap('decorator_list'), w_decorator_list) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_name = get_field(space, w_node, 'name', False) + w_bases = get_field(space, w_node, 'bases', False) + w_body = get_field(space, w_node, 'body', False) + w_decorator_list = get_field(space, w_node, 'decorator_list', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _name = space.realstr_w(w_name) + bases_w = space.unpackiterable(w_bases) + _bases = [expr.from_object(space, w_item) for w_item in bases_w] + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + decorator_list_w = space.unpackiterable(w_decorator_list) + _decorator_list = [expr.from_object(space, w_item) for w_item in decorator_list_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return ClassDef(_name, _bases, _body, _decorator_list, _lineno, _col_offset) + +State.ast_type('ClassDef', 'stmt', ['name', 'bases', 'body', 'decorator_list']) class Return(stmt): @@ -379,7 +500,6 @@ def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Return(self) @@ -389,23 +509,34 @@ self.value = self.value.mutate_over(visitor) return visitor.visit_Return(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~4) ^ 3: - self.missing_field(space, ['lineno', 'col_offset', None], 'Return') - else: - if not self.initialization_state & 4: - self.value = None - if self.value: - self.value.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Return) + w_value = self.value.to_object(space) if self.value is not None else space.w_None # expr + space.setattr(w_node, space.wrap('value'), w_value) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_value = get_field(space, w_node, 'value', True) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _value = expr.from_object(space, w_value) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Return(_value, _lineno, _col_offset) + +State.ast_type('Return', 'stmt', ['value']) class Delete(stmt): def __init__(self, targets, lineno, col_offset): self.targets = targets - self.w_targets = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Delete(self) @@ -415,31 +546,40 @@ visitor._mutate_sequence(self.targets) return visitor.visit_Delete(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete') + def to_object(self, space): + w_node = space.call_function(get(space).w_Delete) + if self.targets is None: + targets_w = [] else: - pass - w_list = self.w_targets - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.targets = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.targets = None - if self.targets is not None: - for node in self.targets: - node.sync_app_attrs(space) + targets_w = [node.to_object(space) for node in self.targets] # expr + w_targets = space.newlist(targets_w) + space.setattr(w_node, space.wrap('targets'), w_targets) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_targets = get_field(space, w_node, 'targets', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + targets_w = space.unpackiterable(w_targets) + _targets = [expr.from_object(space, w_item) for w_item in targets_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Delete(_targets, _lineno, _col_offset) + +State.ast_type('Delete', 'stmt', ['targets']) class Assign(stmt): def __init__(self, targets, value, lineno, col_offset): self.targets = targets - self.w_targets = None self.value = value stmt.__init__(self, lineno, col_offset) - self.initialization_state = 15 def walkabout(self, visitor): visitor.visit_Assign(self) @@ -450,22 +590,36 @@ self.value = self.value.mutate_over(visitor) return visitor.visit_Assign(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 15: - self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign') + def to_object(self, space): + w_node = space.call_function(get(space).w_Assign) + if self.targets is None: + targets_w = [] else: - pass - w_list = self.w_targets - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.targets = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.targets = None - if self.targets is not None: - for node in self.targets: - node.sync_app_attrs(space) - self.value.sync_app_attrs(space) + targets_w = [node.to_object(space) for node in self.targets] # expr + w_targets = space.newlist(targets_w) + space.setattr(w_node, space.wrap('targets'), w_targets) + w_value = self.value.to_object(space) # expr + space.setattr(w_node, space.wrap('value'), w_value) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_targets = get_field(space, w_node, 'targets', False) + w_value = get_field(space, w_node, 'value', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + targets_w = space.unpackiterable(w_targets) + _targets = [expr.from_object(space, w_item) for w_item in targets_w] + _value = expr.from_object(space, w_value) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Assign(_targets, _value, _lineno, _col_offset) + +State.ast_type('Assign', 'stmt', ['targets', 'value']) class AugAssign(stmt): @@ -475,7 +629,6 @@ self.op = op self.value = value stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_AugAssign(self) @@ -485,13 +638,35 @@ self.value = self.value.mutate_over(visitor) return visitor.visit_AugAssign(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 31: - self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign') - else: - pass - self.target.sync_app_attrs(space) - self.value.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_AugAssign) + w_target = self.target.to_object(space) # expr + space.setattr(w_node, space.wrap('target'), w_target) + w_op = operator_to_class[self.op - 1]().to_object(space) # operator + space.setattr(w_node, space.wrap('op'), w_op) + w_value = self.value.to_object(space) # expr + space.setattr(w_node, space.wrap('value'), w_value) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_target = get_field(space, w_node, 'target', False) + w_op = get_field(space, w_node, 'op', False) + w_value = get_field(space, w_node, 'value', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _target = expr.from_object(space, w_target) + _op = operator.from_object(space, w_op) + _value = expr.from_object(space, w_value) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return AugAssign(_target, _op, _value, _lineno, _col_offset) + +State.ast_type('AugAssign', 'stmt', ['target', 'op', 'value']) class Print(stmt): @@ -499,10 +674,8 @@ def __init__(self, dest, values, nl, lineno, col_offset): self.dest = dest self.values = values - self.w_values = None self.nl = nl stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_Print(self) @@ -514,24 +687,40 @@ visitor._mutate_sequence(self.values) return visitor.visit_Print(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~4) ^ 27: - self.missing_field(space, ['lineno', 'col_offset', None, 'values', 'nl'], 'Print') + def to_object(self, space): + w_node = space.call_function(get(space).w_Print) + w_dest = self.dest.to_object(space) if self.dest is not None else space.w_None # expr + space.setattr(w_node, space.wrap('dest'), w_dest) + if self.values is None: + values_w = [] else: - if not self.initialization_state & 4: - self.dest = None - if self.dest: - self.dest.sync_app_attrs(space) - w_list = self.w_values - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.values = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.values = None - if self.values is not None: - for node in self.values: - node.sync_app_attrs(space) + values_w = [node.to_object(space) for node in self.values] # expr + w_values = space.newlist(values_w) + space.setattr(w_node, space.wrap('values'), w_values) + w_nl = space.wrap(self.nl) # bool + space.setattr(w_node, space.wrap('nl'), w_nl) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_dest = get_field(space, w_node, 'dest', True) + w_values = get_field(space, w_node, 'values', False) + w_nl = get_field(space, w_node, 'nl', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _dest = expr.from_object(space, w_dest) + values_w = space.unpackiterable(w_values) + _values = [expr.from_object(space, w_item) for w_item in values_w] + _nl = space.bool_w(w_nl) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Print(_dest, _values, _nl, _lineno, _col_offset) + +State.ast_type('Print', 'stmt', ['dest', 'values', 'nl']) class For(stmt): @@ -540,11 +729,8 @@ self.target = target self.iter = iter self.body = body - self.w_body = None self.orelse = orelse - self.w_orelse = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 63 def walkabout(self, visitor): visitor.visit_For(self) @@ -558,33 +744,49 @@ visitor._mutate_sequence(self.orelse) return visitor.visit_For(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 63: - self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For') + def to_object(self, space): + w_node = space.call_function(get(space).w_For) + w_target = self.target.to_object(space) # expr + space.setattr(w_node, space.wrap('target'), w_target) + w_iter = self.iter.to_object(space) # expr + space.setattr(w_node, space.wrap('iter'), w_iter) + if self.body is None: + body_w = [] else: - pass - self.target.sync_app_attrs(space) - self.iter.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_orelse - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.orelse = None - if self.orelse is not None: - for node in self.orelse: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.orelse is None: + orelse_w = [] + else: + orelse_w = [node.to_object(space) for node in self.orelse] # stmt + w_orelse = space.newlist(orelse_w) + space.setattr(w_node, space.wrap('orelse'), w_orelse) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_target = get_field(space, w_node, 'target', False) + w_iter = get_field(space, w_node, 'iter', False) + w_body = get_field(space, w_node, 'body', False) + w_orelse = get_field(space, w_node, 'orelse', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _target = expr.from_object(space, w_target) + _iter = expr.from_object(space, w_iter) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + orelse_w = space.unpackiterable(w_orelse) + _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return For(_target, _iter, _body, _orelse, _lineno, _col_offset) + +State.ast_type('For', 'stmt', ['target', 'iter', 'body', 'orelse']) class While(stmt): @@ -592,11 +794,8 @@ def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body - self.w_body = None self.orelse = orelse - self.w_orelse = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_While(self) @@ -609,32 +808,45 @@ visitor._mutate_sequence(self.orelse) return visitor.visit_While(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 31: - self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While') + def to_object(self, space): + w_node = space.call_function(get(space).w_While) + w_test = self.test.to_object(space) # expr + space.setattr(w_node, space.wrap('test'), w_test) + if self.body is None: + body_w = [] else: - pass - self.test.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_orelse - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.orelse = None - if self.orelse is not None: - for node in self.orelse: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.orelse is None: + orelse_w = [] + else: + orelse_w = [node.to_object(space) for node in self.orelse] # stmt + w_orelse = space.newlist(orelse_w) + space.setattr(w_node, space.wrap('orelse'), w_orelse) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_test = get_field(space, w_node, 'test', False) + w_body = get_field(space, w_node, 'body', False) + w_orelse = get_field(space, w_node, 'orelse', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _test = expr.from_object(space, w_test) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + orelse_w = space.unpackiterable(w_orelse) + _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return While(_test, _body, _orelse, _lineno, _col_offset) + +State.ast_type('While', 'stmt', ['test', 'body', 'orelse']) class If(stmt): @@ -642,11 +854,8 @@ def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body - self.w_body = None self.orelse = orelse - self.w_orelse = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_If(self) @@ -659,32 +868,45 @@ visitor._mutate_sequence(self.orelse) return visitor.visit_If(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 31: - self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If') + def to_object(self, space): + w_node = space.call_function(get(space).w_If) + w_test = self.test.to_object(space) # expr + space.setattr(w_node, space.wrap('test'), w_test) + if self.body is None: + body_w = [] else: - pass - self.test.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_orelse - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.orelse = None - if self.orelse is not None: - for node in self.orelse: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.orelse is None: + orelse_w = [] + else: + orelse_w = [node.to_object(space) for node in self.orelse] # stmt + w_orelse = space.newlist(orelse_w) + space.setattr(w_node, space.wrap('orelse'), w_orelse) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_test = get_field(space, w_node, 'test', False) + w_body = get_field(space, w_node, 'body', False) + w_orelse = get_field(space, w_node, 'orelse', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _test = expr.from_object(space, w_test) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + orelse_w = space.unpackiterable(w_orelse) + _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return If(_test, _body, _orelse, _lineno, _col_offset) + +State.ast_type('If', 'stmt', ['test', 'body', 'orelse']) class With(stmt): @@ -693,9 +915,7 @@ self.context_expr = context_expr self.optional_vars = optional_vars self.body = body - self.w_body = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_With(self) @@ -708,25 +928,40 @@ visitor._mutate_sequence(self.body) return visitor.visit_With(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~8) ^ 23: - self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With') + def to_object(self, space): + w_node = space.call_function(get(space).w_With) + w_context_expr = self.context_expr.to_object(space) # expr + space.setattr(w_node, space.wrap('context_expr'), w_context_expr) + w_optional_vars = self.optional_vars.to_object(space) if self.optional_vars is not None else space.w_None # expr + space.setattr(w_node, space.wrap('optional_vars'), w_optional_vars) + if self.body is None: + body_w = [] else: - if not self.initialization_state & 8: - self.optional_vars = None - self.context_expr.sync_app_attrs(space) - if self.optional_vars: - self.optional_vars.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_context_expr = get_field(space, w_node, 'context_expr', False) + w_optional_vars = get_field(space, w_node, 'optional_vars', True) + w_body = get_field(space, w_node, 'body', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _context_expr = expr.from_object(space, w_context_expr) + _optional_vars = expr.from_object(space, w_optional_vars) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return With(_context_expr, _optional_vars, _body, _lineno, _col_offset) + +State.ast_type('With', 'stmt', ['context_expr', 'optional_vars', 'body']) class Raise(stmt): @@ -736,7 +971,6 @@ self.inst = inst self.tback = tback stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_Raise(self) @@ -750,35 +984,44 @@ self.tback = self.tback.mutate_over(visitor) return visitor.visit_Raise(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~28) ^ 3: - self.missing_field(space, ['lineno', 'col_offset', None, None, None], 'Raise') - else: - if not self.initialization_state & 4: - self.type = None - if not self.initialization_state & 8: - self.inst = None - if not self.initialization_state & 16: - self.tback = None - if self.type: - self.type.sync_app_attrs(space) - if self.inst: - self.inst.sync_app_attrs(space) - if self.tback: - self.tback.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Raise) + w_type = self.type.to_object(space) if self.type is not None else space.w_None # expr + space.setattr(w_node, space.wrap('type'), w_type) + w_inst = self.inst.to_object(space) if self.inst is not None else space.w_None # expr + space.setattr(w_node, space.wrap('inst'), w_inst) + w_tback = self.tback.to_object(space) if self.tback is not None else space.w_None # expr + space.setattr(w_node, space.wrap('tback'), w_tback) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_type = get_field(space, w_node, 'type', True) + w_inst = get_field(space, w_node, 'inst', True) + w_tback = get_field(space, w_node, 'tback', True) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _type = expr.from_object(space, w_type) + _inst = expr.from_object(space, w_inst) + _tback = expr.from_object(space, w_tback) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Raise(_type, _inst, _tback, _lineno, _col_offset) + +State.ast_type('Raise', 'stmt', ['type', 'inst', 'tback']) class TryExcept(stmt): def __init__(self, body, handlers, orelse, lineno, col_offset): self.body = body - self.w_body = None self.handlers = handlers - self.w_handlers = None self.orelse = orelse - self.w_orelse = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_TryExcept(self) @@ -792,52 +1035,58 @@ visitor._mutate_sequence(self.orelse) return visitor.visit_TryExcept(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 31: - self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept') + def to_object(self, space): + w_node = space.call_function(get(space).w_TryExcept) + if self.body is None: + body_w = [] else: - pass - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_handlers - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.handlers = [space.interp_w(excepthandler, w_obj) for w_obj in list_w] - else: - self.handlers = None - if self.handlers is not None: - for node in self.handlers: - node.sync_app_attrs(space) - w_list = self.w_orelse - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.orelse = None - if self.orelse is not None: - for node in self.orelse: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.handlers is None: + handlers_w = [] + else: + handlers_w = [node.to_object(space) for node in self.handlers] # excepthandler + w_handlers = space.newlist(handlers_w) + space.setattr(w_node, space.wrap('handlers'), w_handlers) + if self.orelse is None: + orelse_w = [] + else: + orelse_w = [node.to_object(space) for node in self.orelse] # stmt + w_orelse = space.newlist(orelse_w) + space.setattr(w_node, space.wrap('orelse'), w_orelse) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + w_handlers = get_field(space, w_node, 'handlers', False) + w_orelse = get_field(space, w_node, 'orelse', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + handlers_w = space.unpackiterable(w_handlers) + _handlers = [excepthandler.from_object(space, w_item) for w_item in handlers_w] + orelse_w = space.unpackiterable(w_orelse) + _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return TryExcept(_body, _handlers, _orelse, _lineno, _col_offset) + +State.ast_type('TryExcept', 'stmt', ['body', 'handlers', 'orelse']) class TryFinally(stmt): def __init__(self, body, finalbody, lineno, col_offset): self.body = body - self.w_body = None self.finalbody = finalbody - self.w_finalbody = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 15 def walkabout(self, visitor): visitor.visit_TryFinally(self) @@ -849,31 +1098,41 @@ visitor._mutate_sequence(self.finalbody) return visitor.visit_TryFinally(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 15: - self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally') + def to_object(self, space): + w_node = space.call_function(get(space).w_TryFinally) + if self.body is None: + body_w = [] else: - pass - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_finalbody - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.finalbody = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.finalbody = None - if self.finalbody is not None: - for node in self.finalbody: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.finalbody is None: + finalbody_w = [] + else: + finalbody_w = [node.to_object(space) for node in self.finalbody] # stmt + w_finalbody = space.newlist(finalbody_w) + space.setattr(w_node, space.wrap('finalbody'), w_finalbody) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + w_finalbody = get_field(space, w_node, 'finalbody', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + finalbody_w = space.unpackiterable(w_finalbody) + _finalbody = [stmt.from_object(space, w_item) for w_item in finalbody_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return TryFinally(_body, _finalbody, _lineno, _col_offset) + +State.ast_type('TryFinally', 'stmt', ['body', 'finalbody']) class Assert(stmt): @@ -882,7 +1141,6 @@ self.test = test self.msg = msg stmt.__init__(self, lineno, col_offset) - self.initialization_state = 15 def walkabout(self, visitor): visitor.visit_Assert(self) @@ -893,24 +1151,38 @@ self.msg = self.msg.mutate_over(visitor) return visitor.visit_Assert(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~8) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert') - else: - if not self.initialization_state & 8: - self.msg = None - self.test.sync_app_attrs(space) - if self.msg: - self.msg.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Assert) + w_test = self.test.to_object(space) # expr + space.setattr(w_node, space.wrap('test'), w_test) + w_msg = self.msg.to_object(space) if self.msg is not None else space.w_None # expr + space.setattr(w_node, space.wrap('msg'), w_msg) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_test = get_field(space, w_node, 'test', False) + w_msg = get_field(space, w_node, 'msg', True) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _test = expr.from_object(space, w_test) + _msg = expr.from_object(space, w_msg) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Assert(_test, _msg, _lineno, _col_offset) + +State.ast_type('Assert', 'stmt', ['test', 'msg']) class Import(stmt): def __init__(self, names, lineno, col_offset): self.names = names - self.w_names = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Import(self) @@ -920,21 +1192,32 @@ visitor._mutate_sequence(self.names) return visitor.visit_Import(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import') + def to_object(self, space): + w_node = space.call_function(get(space).w_Import) + if self.names is None: + names_w = [] else: - pass - w_list = self.w_names - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.names = [space.interp_w(alias, w_obj) for w_obj in list_w] - else: - self.names = None - if self.names is not None: - for node in self.names: - node.sync_app_attrs(space) + names_w = [node.to_object(space) for node in self.names] # alias + w_names = space.newlist(names_w) + space.setattr(w_node, space.wrap('names'), w_names) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_names = get_field(space, w_node, 'names', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + names_w = space.unpackiterable(w_names) + _names = [alias.from_object(space, w_item) for w_item in names_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Import(_names, _lineno, _col_offset) + +State.ast_type('Import', 'stmt', ['names']) class ImportFrom(stmt): @@ -942,10 +1225,8 @@ def __init__(self, module, names, level, lineno, col_offset): self.module = module self.names = names - self.w_names = None self.level = level stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_ImportFrom(self) @@ -955,24 +1236,40 @@ visitor._mutate_sequence(self.names) return visitor.visit_ImportFrom(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~20) ^ 11: - self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom') + def to_object(self, space): + w_node = space.call_function(get(space).w_ImportFrom) + w_module = space.wrap(self.module) # identifier + space.setattr(w_node, space.wrap('module'), w_module) + if self.names is None: + names_w = [] else: - if not self.initialization_state & 4: - self.module = None - if not self.initialization_state & 16: - self.level = 0 - w_list = self.w_names - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.names = [space.interp_w(alias, w_obj) for w_obj in list_w] - else: - self.names = None - if self.names is not None: - for node in self.names: - node.sync_app_attrs(space) + names_w = [node.to_object(space) for node in self.names] # alias + w_names = space.newlist(names_w) + space.setattr(w_node, space.wrap('names'), w_names) + w_level = space.wrap(self.level) # int + space.setattr(w_node, space.wrap('level'), w_level) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_module = get_field(space, w_node, 'module', True) + w_names = get_field(space, w_node, 'names', False) + w_level = get_field(space, w_node, 'level', True) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _module = space.str_or_None_w(w_module) + names_w = space.unpackiterable(w_names) + _names = [alias.from_object(space, w_item) for w_item in names_w] + _level = space.int_w(w_level) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return ImportFrom(_module, _names, _level, _lineno, _col_offset) + +State.ast_type('ImportFrom', 'stmt', ['module', 'names', 'level']) class Exec(stmt): @@ -982,7 +1279,6 @@ self.globals = globals self.locals = locals stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_Exec(self) @@ -995,28 +1291,42 @@ self.locals = self.locals.mutate_over(visitor) return visitor.visit_Exec(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~24) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'body', None, None], 'Exec') - else: - if not self.initialization_state & 8: - self.globals = None - if not self.initialization_state & 16: - self.locals = None - self.body.sync_app_attrs(space) - if self.globals: - self.globals.sync_app_attrs(space) - if self.locals: - self.locals.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Exec) + w_body = self.body.to_object(space) # expr + space.setattr(w_node, space.wrap('body'), w_body) + w_globals = self.globals.to_object(space) if self.globals is not None else space.w_None # expr + space.setattr(w_node, space.wrap('globals'), w_globals) + w_locals = self.locals.to_object(space) if self.locals is not None else space.w_None # expr + space.setattr(w_node, space.wrap('locals'), w_locals) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + w_globals = get_field(space, w_node, 'globals', True) + w_locals = get_field(space, w_node, 'locals', True) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _body = expr.from_object(space, w_body) + _globals = expr.from_object(space, w_globals) + _locals = expr.from_object(space, w_locals) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Exec(_body, _globals, _locals, _lineno, _col_offset) + +State.ast_type('Exec', 'stmt', ['body', 'globals', 'locals']) class Global(stmt): def __init__(self, names, lineno, col_offset): self.names = names - self.w_names = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Global(self) @@ -1024,18 +1334,32 @@ def mutate_over(self, visitor): return visitor.visit_Global(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global') + def to_object(self, space): + w_node = space.call_function(get(space).w_Global) + if self.names is None: + names_w = [] else: - pass - w_list = self.w_names - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.names = [space.realstr_w(w_obj) for w_obj in list_w] - else: - self.names = None + names_w = [space.wrap(node) for node in self.names] # identifier + w_names = space.newlist(names_w) + space.setattr(w_node, space.wrap('names'), w_names) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_names = get_field(space, w_node, 'names', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + names_w = space.unpackiterable(w_names) + _names = [space.realstr_w(w_item) for w_item in names_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Global(_names, _lineno, _col_offset) + +State.ast_type('Global', 'stmt', ['names']) class Expr(stmt): @@ -1043,7 +1367,6 @@ def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Expr(self) @@ -1052,19 +1375,33 @@ self.value = self.value.mutate_over(visitor) return visitor.visit_Expr(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr') - else: - pass - self.value.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Expr) + w_value = self.value.to_object(space) # expr + space.setattr(w_node, space.wrap('value'), w_value) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_value = get_field(space, w_node, 'value', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _value = expr.from_object(space, w_value) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Expr(_value, _lineno, _col_offset) + +State.ast_type('Expr', 'stmt', ['value']) class Pass(stmt): def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) - self.initialization_state = 3 def walkabout(self, visitor): visitor.visit_Pass(self) @@ -1072,18 +1409,29 @@ def mutate_over(self, visitor): return visitor.visit_Pass(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 3: - self.missing_field(space, ['lineno', 'col_offset'], 'Pass') - else: - pass + def to_object(self, space): + w_node = space.call_function(get(space).w_Pass) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Pass(_lineno, _col_offset) + +State.ast_type('Pass', 'stmt', []) class Break(stmt): def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) - self.initialization_state = 3 def walkabout(self, visitor): visitor.visit_Break(self) @@ -1091,18 +1439,29 @@ def mutate_over(self, visitor): return visitor.visit_Break(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 3: - self.missing_field(space, ['lineno', 'col_offset'], 'Break') - else: - pass + def to_object(self, space): + w_node = space.call_function(get(space).w_Break) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Break(_lineno, _col_offset) + +State.ast_type('Break', 'stmt', []) class Continue(stmt): def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) - self.initialization_state = 3 def walkabout(self, visitor): visitor.visit_Continue(self) @@ -1110,11 +1469,23 @@ def mutate_over(self, visitor): return visitor.visit_Continue(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 3: - self.missing_field(space, ['lineno', 'col_offset'], 'Continue') - else: - pass + def to_object(self, space): + w_node = space.call_function(get(space).w_Continue) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Continue(_lineno, _col_offset) + +State.ast_type('Continue', 'stmt', []) class expr(AST): @@ -1123,14 +1494,66 @@ self.lineno = lineno self.col_offset = col_offset + @staticmethod + def from_object(space, w_node): + if space.is_w(w_node, space.w_None): + return None + if space.isinstance_w(w_node, get(space).w_BoolOp): + return BoolOp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_BinOp): + return BinOp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_UnaryOp): + return UnaryOp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Lambda): + return Lambda.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_IfExp): + return IfExp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Dict): + return Dict.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Set): + return Set.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_ListComp): + return ListComp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_SetComp): + return SetComp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_DictComp): + return DictComp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_GeneratorExp): + return GeneratorExp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Yield): + return Yield.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Compare): + return Compare.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Call): + return Call.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Repr): + return Repr.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Num): + return Num.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Str): + return Str.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Attribute): + return Attribute.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Subscript): + return Subscript.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Name): + return Name.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_List): + return List.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Tuple): + return Tuple.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Const): + return Const.from_object(space, w_node) + raise oefmt(space.w_TypeError, + "Expected expr node, got %T", w_node) +State.ast_type('expr', 'AST', None, ['lineno', 'col_offset']) + class BoolOp(expr): def __init__(self, op, values, lineno, col_offset): self.op = op self.values = values - self.w_values = None expr.__init__(self, lineno, col_offset) - self.initialization_state = 15 def walkabout(self, visitor): visitor.visit_BoolOp(self) @@ -1140,21 +1563,36 @@ visitor._mutate_sequence(self.values) return visitor.visit_BoolOp(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 15: - self.missing_field(space, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp') + def to_object(self, space): + w_node = space.call_function(get(space).w_BoolOp) + w_op = boolop_to_class[self.op - 1]().to_object(space) # boolop + space.setattr(w_node, space.wrap('op'), w_op) + if self.values is None: + values_w = [] else: - pass From noreply at buildbot.pypy.org Mon Aug 18 16:05:36 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Mon, 18 Aug 2014 16:05:36 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix merge Message-ID: <20140818140536.0ECF61D22EF@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r72879:0e073fccc124 Date: 2014-08-18 00:55 +0200 http://bitbucket.org/pypy/pypy/changeset/0e073fccc124/ Log: Fix merge diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -10,7 +10,7 @@ def raise_attriberr(space, w_obj, name): raise oefmt(space.w_AttributeError, - \"'%T' object has no attribute '%s'\", w_obj, name) + "'%T' object has no attribute '%s'", w_obj, name) def check_string(space, w_obj): diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -393,7 +393,7 @@ def raise_attriberr(space, w_obj, name): raise oefmt(space.w_AttributeError, - \"'%T' object has no attribute '%s'\", w_obj, name) + "'%T' object has no attribute '%s'", w_obj, name) def check_string(space, w_obj): From noreply at buildbot.pypy.org Mon Aug 18 16:05:37 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Mon, 18 Aug 2014 16:05:37 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix after merge. Message-ID: <20140818140537.337AC1D22EF@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r72880:0da7325d54c6 Date: 2014-08-18 09:16 +0200 http://bitbucket.org/pypy/pypy/changeset/0da7325d54c6/ Log: Fix after merge. diff --git a/pypy/module/__builtin__/compiling.py b/pypy/module/__builtin__/compiling.py --- a/pypy/module/__builtin__/compiling.py +++ b/pypy/module/__builtin__/compiling.py @@ -59,9 +59,8 @@ "compile() expected string without null bytes")) if flags & consts.PyCF_ONLY_AST: - mode = ec.compiler.compile_to_ast(str_, filename, mode, flags) - w_node = node.to_object(space) - return w_node + node = ec.compiler.compile_to_ast(source, filename, mode, flags) + return node.to_object(space) else: code = ec.compiler.compile(source, filename, mode, flags) return space.wrap(code) From noreply at buildbot.pypy.org Mon Aug 18 16:05:38 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Mon, 18 Aug 2014 16:05:38 +0200 (CEST) Subject: [pypy-commit] pypy default: Merge branch split-ast-classes: Message-ID: <20140818140538.4C25F1D22EF@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r72881:e3c463bd6f19 Date: 2014-08-18 15:59 +0200 http://bitbucket.org/pypy/pypy/changeset/e3c463bd6f19/ Log: Merge branch split-ast-classes: classes in the ast modules are now distinct from the node types used by the compiler. This removes all the hacks to keep attributes in sync, and will reduce memory needed to compile a module. From noreply at buildbot.pypy.org Mon Aug 18 16:05:39 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Mon, 18 Aug 2014 16:05:39 +0200 (CEST) Subject: [pypy-commit] pypy default: Add doc for the new merged branch Message-ID: <20140818140539.6CE861D22EF@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r72882:4ebcd372fbbc Date: 2014-08-18 16:02 +0200 http://bitbucket.org/pypy/pypy/changeset/4ebcd372fbbc/ Log: Add doc for the new merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -54,3 +54,6 @@ .. branch: pytest-25 Update our copies of py.test and pylib to versions 2.5.2 and 1.4.20, respectively. + +.. branch: split-ast-classes +Classes in the ast module are now distinct from structures used by the compiler. From noreply at buildbot.pypy.org Mon Aug 18 16:10:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 16:10:10 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Attempt to fix CALL_RELEASE_GIL with stm Message-ID: <20140818141010.9B4631C059C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72883:e9aa2fdab146 Date: 2014-08-18 15:31 +0200 http://bitbucket.org/pypy/pypy/changeset/e9aa2fdab146/ Log: Attempt to fix CALL_RELEASE_GIL with stm diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -361,6 +361,8 @@ lltype.Void)) def _build_release_gil(self, gcrootmap): + if self.gc_ll_descr.stm: + return if gcrootmap is None or gcrootmap.is_shadow_stack: reacqgil_func = llhelper(self._REACQGIL0_FUNC, self._reacquire_gil_shadowstack) diff --git a/rpython/jit/backend/llsupport/callbuilder.py b/rpython/jit/backend/llsupport/callbuilder.py --- a/rpython/jit/backend/llsupport/callbuilder.py +++ b/rpython/jit/backend/llsupport/callbuilder.py @@ -1,5 +1,5 @@ from rpython.rlib.clibffi import FFI_DEFAULT_ABI -from rpython.rlib import rgil +from rpython.rlib import rgc, rgil from rpython.rtyper.lltypesystem import lltype, rffi @@ -45,7 +45,10 @@ def emit_call_release_gil(self): """Emit a CALL_RELEASE_GIL, including calls to releasegil_addr and reacqgil_addr.""" - fastgil = rffi.cast(lltype.Signed, rgil.gil_fetch_fastgil()) + if rgc.stm_is_enabled(): + fastgil = 0 + else: + fastgil = rffi.cast(lltype.Signed, rgil.gil_fetch_fastgil()) self.select_call_release_gil_mode() self.prepare_arguments() self.push_gcmap_for_call_release_gil() diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -95,6 +95,10 @@ def call_releasegil_addr_and_move_real_arguments(self, fastgil): from rpython.jit.backend.x86.assembler import heap # + if self.asm.cpu.gc_ll_descr.stm: + self.call_stm_before_ex_call() + return + # if not self.asm._is_asmgcc(): # shadowstack: change 'rpy_fastgil' to 0 (it should be # non-zero right now). @@ -132,6 +136,10 @@ def move_real_result_and_call_reacqgil_addr(self, fastgil): from rpython.jit.backend.x86 import rx86 # + if self.asm.cpu.gc_ll_descr.stm: + self.call_stm_after_ex_call() + return + # # check if we need to call the reacqgil() function or not # (to acquiring the GIL, remove the asmgcc head from # the chained list, etc.) @@ -482,6 +490,41 @@ assert self.restype == INT self.mc.MOV_rs(eax.value, 0) + def call_stm_before_ex_call(self): + # XXX slowish: before any CALL_RELEASE_GIL, invoke the + # pypy_stm_commit_if_not_atomic() function. Messy because + # we need to save the register arguments first. + # + n = min(self.next_arg_gpr, len(self.ARGUMENTS_GPR)) + for i in range(n): + self.mc.PUSH_r(self.ARGUMENTS_GPR[i].value) # PUSH gpr arg + m = min(self.next_arg_xmm, len(self.ARGUMENTS_XMM)) + extra = m + ((n + m) & 1) + # in total the stack is moved down by (n + extra) words, + # which needs to be an even value for alignment: + assert ((n + extra) & 1) == 0 + if extra > 0: + self.mc.SUB_ri(esp.value, extra * WORD) # SUB rsp, extra + for i in range(m): + self.mc.MOVSD_sx(i * WORD, self.ARGUMENTS_XMM[i].value) + # MOVSD [rsp+..], xmm + # + self.mc.CALL(imm(rstm.adr_pypy_stm_commit_if_not_atomic)) + # + if extra > 0: + for i in range(m): + self.mc.MOVSD_xs(self.ARGUMENTS_XMM[i].value, i * WORD) + self.mc.ADD_ri(esp.value, extra * WORD) + for i in range(n-1, -1, -1): + self.mc.POP_r(self.ARGUMENTS_GPR[i].value) + + def call_stm_after_ex_call(self): + # after any CALL_RELEASE_GIL, invoke the + # pypy_stm_start_if_not_atomic() function + self.save_result_value_reacq() + self.mc.CALL(imm(rstm.adr_pypy_stm_start_if_not_atomic)) + self.restore_result_value_reacq() + if IS_X86_32: CallBuilder = CallBuilder32 diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -35,6 +35,10 @@ adr_pypy__rewind_jmp_copy_stack_slice = ( CFlexSymbolic('((long)&pypy__rewind_jmp_copy_stack_slice)')) +adr_pypy_stm_commit_if_not_atomic = ( + CFlexSymbolic('((long)&pypy_stm_commit_if_not_atomic)')) +adr_pypy_stm_start_if_not_atomic = ( + CFlexSymbolic('((long)&pypy_stm_start_if_not_atomic)')) def rewind_jmp_frame(): From noreply at buildbot.pypy.org Mon Aug 18 16:10:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 16:10:11 +0200 (CEST) Subject: [pypy-commit] pypy default: Improve the test Message-ID: <20140818141011.D73021C059C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72884:7a0f310a4651 Date: 2014-08-18 16:03 +0200 http://bitbucket.org/pypy/pypy/changeset/7a0f310a4651/ Log: Improve the test diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -261,6 +261,7 @@ if isinstance(TP, lltype.Ptr) and TP.TO._gckind == 'gc': assert all_addrs[counter] == frame_adr + jitframe.getofs(name) counter += 1 + assert counter == 5 # gcpattern assert all_addrs[5] == indexof(0) assert all_addrs[6] == indexof(1) @@ -269,13 +270,18 @@ assert all_addrs[9] == indexof(7) if sys.maxint == 2**31 - 1: assert all_addrs[10] == indexof(31) - assert all_addrs[11] == indexof(33 + 32) + assert all_addrs[11] == indexof(65) + assert all_addrs[12] == indexof(68) + assert all_addrs[13] == indexof(69) + assert all_addrs[14] == indexof(71) else: assert all_addrs[10] == indexof(63) - assert all_addrs[11] == indexof(65 + 64) + assert all_addrs[11] == indexof(129) + assert all_addrs[12] == indexof(132) + assert all_addrs[13] == indexof(133) + assert all_addrs[14] == indexof(135) - assert len(all_addrs) == 5 + 6 + 4 - # 5 static fields, 4 addresses from gcmap, 2 from gcpattern + assert len(all_addrs) == 15 lltype.free(frame_info, flavor='raw') lltype.free(frame.jf_gcmap, flavor='raw') From noreply at buildbot.pypy.org Mon Aug 18 16:10:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 16:10:13 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: hg merge default Message-ID: <20140818141013.1F6B71C059C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72885:ed6d5bd73896 Date: 2014-08-18 16:03 +0200 http://bitbucket.org/pypy/pypy/changeset/ed6d5bd73896/ Log: hg merge default diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4301,6 +4301,38 @@ s = a.build_types(f, []) assert isinstance(s, annmodel.SomeString) + def test_isinstance_str_1(self): + def g(): + pass + def f(n): + if n > 5: + s = "foo" + else: + s = None + g() + return isinstance(s, str) + a = self.RPythonAnnotator() + s = a.build_types(f, [int]) + assert isinstance(s, annmodel.SomeBool) + assert not s.is_constant() + + def test_isinstance_str_2(self): + def g(): + pass + def f(n): + if n > 5: + s = "foo" + else: + s = None + g() + if isinstance(s, str): + return s + return "" + a = self.RPythonAnnotator() + s = a.build_types(f, [int]) + assert isinstance(s, annmodel.SomeString) + assert not s.can_be_none() + def g(n): return [0, 1, 2, n] diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -263,6 +263,7 @@ if isinstance(TP, lltype.Ptr) and TP.TO._gckind == 'gc': assert all_addrs[counter] == frame_adr + jitframe.getofs(name) counter += 1 + assert counter == 5 # gcpattern assert all_addrs[5] == indexof(0) assert all_addrs[6] == indexof(1) @@ -271,13 +272,18 @@ assert all_addrs[9] == indexof(7) if sys.maxint == 2**31 - 1: assert all_addrs[10] == indexof(31) - assert all_addrs[11] == indexof(33 + 32) + assert all_addrs[11] == indexof(65) + assert all_addrs[12] == indexof(68) + assert all_addrs[13] == indexof(69) + assert all_addrs[14] == indexof(71) else: assert all_addrs[10] == indexof(63) - assert all_addrs[11] == indexof(65 + 64) + assert all_addrs[11] == indexof(129) + assert all_addrs[12] == indexof(132) + assert all_addrs[13] == indexof(133) + assert all_addrs[14] == indexof(135) - assert len(all_addrs) == 5 + 6 + 4 - # 5 static fields, 4 addresses from gcmap, 2 from gcpattern + assert len(all_addrs) == 15 lltype.free(frame_info, flavor='raw') lltype.free(frame.jf_gcmap, flavor='raw') diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -693,13 +693,14 @@ if hop.s_result.is_constant(): return hop.inputconst(lltype.Bool, hop.s_result.const) - if hop.args_s[1].is_constant() and hop.args_s[1].const == list: - if hop.args_s[0].knowntype != list: - raise TyperError("isinstance(x, list) expects x to be known statically to be a list or None") - rlist = hop.args_r[0] - vlist = hop.inputarg(rlist, arg=0) - cnone = hop.inputconst(rlist, None) - return hop.genop('ptr_ne', [vlist, cnone], resulttype=lltype.Bool) + if hop.args_s[1].is_constant() and hop.args_s[1].const in (str, list): + if hop.args_s[0].knowntype not in (str, list): + raise TyperError("isinstance(x, str/list) expects x to be known" + " statically to be a str/list or None") + rstrlist = hop.args_r[0] + vstrlist = hop.inputarg(rstrlist, arg=0) + cnone = hop.inputconst(rstrlist, None) + return hop.genop('ptr_ne', [vstrlist, cnone], resulttype=lltype.Bool) assert isinstance(hop.args_r[0], rclass.InstanceRepr) return hop.args_r[0].rtype_isinstance(hop) diff --git a/rpython/rtyper/test/test_rbuiltin.py b/rpython/rtyper/test/test_rbuiltin.py --- a/rpython/rtyper/test/test_rbuiltin.py +++ b/rpython/rtyper/test/test_rbuiltin.py @@ -364,17 +364,35 @@ assert res == isinstance([A(), B(), C()][x-1], [A, B, C][y-1]) * 3 def test_isinstance_list(self): + def g(): + pass def f(i): if i == 0: l = [] else: l = None + g() return isinstance(l, list) res = self.interpret(f, [0]) assert res is True res = self.interpret(f, [1]) assert res is False + def test_isinstance_str(self): + def g(): + pass + def f(i): + if i == 0: + l = "foobar" + else: + l = None + g() + return isinstance(l, str) + res = self.interpret(f, [0]) + assert res is True + res = self.interpret(f, [1]) + assert res is False + def test_instantiate(self): class A: pass From noreply at buildbot.pypy.org Mon Aug 18 16:10:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 16:10:14 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Fix Message-ID: <20140818141014.4EF7D1C059C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72886:f7f9ae07a05b Date: 2014-08-18 16:07 +0200 http://bitbucket.org/pypy/pypy/changeset/f7f9ae07a05b/ Log: Fix diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -114,7 +114,8 @@ self._build_cond_call_slowpath(True, True)] self._build_stack_check_slowpath() - self._build_release_gil(gc_ll_descr.gcrootmap) + if not gc_ll_descr.stm: + self._build_release_gil(gc_ll_descr.gcrootmap) if not self._debug: # if self._debug is already set it means that someone called # set_debug by hand before initializing the assembler. Leave it @@ -361,8 +362,6 @@ lltype.Void)) def _build_release_gil(self, gcrootmap): - if self.gc_ll_descr.stm: - return if gcrootmap is None or gcrootmap.is_shadow_stack: reacqgil_func = llhelper(self._REACQGIL0_FUNC, self._reacquire_gil_shadowstack) From noreply at buildbot.pypy.org Mon Aug 18 16:10:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 16:10:15 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Fix test Message-ID: <20140818141015.7DDFD1C059C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72887:0c92aa1a24e2 Date: 2014-08-18 16:08 +0200 http://bitbucket.org/pypy/pypy/changeset/0c92aa1a24e2/ Log: Fix test diff --git a/rpython/jit/backend/llsupport/test/test_gc.py b/rpython/jit/backend/llsupport/test/test_gc.py --- a/rpython/jit/backend/llsupport/test/test_gc.py +++ b/rpython/jit/backend/llsupport/test/test_gc.py @@ -2,7 +2,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rstr from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.annlowlevel import llhelper -from rpython.jit.backend.llsupport import jitframe, gc, descr +from rpython.jit.backend.llsupport import jitframe, gc, descr, gcmap from rpython.jit.backend.llsupport import symbolic from rpython.jit.metainterp.gc import get_description from rpython.jit.metainterp.history import BoxPtr, BoxInt, ConstPtr @@ -242,7 +242,8 @@ frame_info = lltype.malloc(jitframe.JITFRAMEINFO, zero=True, flavor='raw') frame = lltype.malloc(jitframe.JITFRAME, 200, zero=True) frame.jf_frame_info = frame_info - frame.jf_gcmap = lltype.malloc(jitframe.GCMAP, 4, flavor='raw') + frame.jf_gcmap = lltype.malloc(jitframe.GCMAP, 4 + gcmap.GCMAP_STM_LOCATION, + flavor='raw') if sys.maxint == 2**31 - 1: max = r_uint(2 ** 31) else: From noreply at buildbot.pypy.org Mon Aug 18 16:10:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 16:10:16 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: merge heads Message-ID: <20140818141016.A1EDD1C059C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72888:11d083257426 Date: 2014-08-18 16:08 +0200 http://bitbucket.org/pypy/pypy/changeset/11d083257426/ Log: merge heads diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -114,7 +114,8 @@ self._build_cond_call_slowpath(True, True)] self._build_stack_check_slowpath() - self._build_release_gil(gc_ll_descr.gcrootmap) + if not gc_ll_descr.stm: + self._build_release_gil(gc_ll_descr.gcrootmap) if not self._debug: # if self._debug is already set it means that someone called # set_debug by hand before initializing the assembler. Leave it @@ -361,8 +362,6 @@ lltype.Void)) def _build_release_gil(self, gcrootmap): - if self.gc_ll_descr.stm: - return if gcrootmap is None or gcrootmap.is_shadow_stack: reacqgil_func = llhelper(self._REACQGIL0_FUNC, self._reacquire_gil_shadowstack) From noreply at buildbot.pypy.org Mon Aug 18 16:10:18 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 16:10:18 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140818141018.0C7141C059C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72889:16d07ec7276a Date: 2014-08-18 16:09 +0200 http://bitbucket.org/pypy/pypy/changeset/16d07ec7276a/ Log: merge heads diff too long, truncating to 2000 out of 10269 lines diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -54,3 +54,6 @@ .. branch: pytest-25 Update our copies of py.test and pylib to versions 2.5.2 and 1.4.20, respectively. + +.. branch: split-ast-classes +Classes in the ast module are now distinct from structures used by the compiler. diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -1,5 +1,4 @@ # Generated by tools/asdl_py.py -from rpython.rlib.unroll import unrolling_iterable from rpython.tool.pairtype import extendabletype from rpython.tool.sourcetools import func_with_new_name @@ -21,11 +20,15 @@ 'AST string must be of type str or unicode')) return w_obj - -class AST(W_Root): - - w_dict = None - +def get_field(space, w_node, name, optional): + w_obj = w_node.getdictvalue(space, name) + if w_obj is None and not optional: + raise oefmt(space.w_TypeError, + "required field \"%s\" missing from %T", name, w_node) + return w_obj + + +class AST(object): __metaclass__ = extendabletype def walkabout(self, visitor): @@ -34,8 +37,23 @@ def mutate_over(self, visitor): raise AssertionError("mutate_over() implementation not provided") - def sync_app_attrs(self, space): - raise NotImplementedError + +class NodeVisitorNotImplemented(Exception): + pass + + +class _FieldsWrapper(W_Root): + "Hack around the fact we can't store tuples on a TypeDef." + + def __init__(self, fields): + self.fields = fields + + def __spacebind__(self, space): + return space.newtuple([space.wrap(field) for field in self.fields]) + + +class W_AST(W_Root): + w_dict = None def getdict(self, space): if self.w_dict is None: @@ -47,7 +65,7 @@ if w_dict is None: w_dict = space.newdict() w_type = space.type(self) - w_fields = w_type.getdictvalue(space, "_fields") + w_fields = space.getattr(w_type, space.wrap("_fields")) for w_name in space.fixedview(w_fields): try: space.setitem(w_dict, w_name, @@ -71,79 +89,94 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) - def missing_field(self, space, required, host): - "Find which required field is missing." - state = self.initialization_state - for i in range(len(required)): - if (state >> i) & 1: - continue # field is present - missing = required[i] - if missing is None: - continue # field is optional - w_obj = self.getdictvalue(space, missing) - if w_obj is None: - raise oefmt(space.w_TypeError, - "required field \"%s\" missing from %s", - missing, host) - else: - raise oefmt(space.w_TypeError, - "incorrect type for field \"%s\" in %s", - missing, host) - raise AssertionError("should not reach here") - - -class NodeVisitorNotImplemented(Exception): - pass - - -class _FieldsWrapper(W_Root): - "Hack around the fact we can't store tuples on a TypeDef." - - def __init__(self, fields): - self.fields = fields - - def __spacebind__(self, space): - return space.newtuple([space.wrap(field) for field in self.fields]) - - -def get_AST_new(node_class): - def generic_AST_new(space, w_type, __args__): - node = space.allocate_instance(node_class, w_type) - node.initialization_state = 0 - return space.wrap(node) - return func_with_new_name(generic_AST_new, "new_%s" % node_class.__name__) - -def AST_init(space, w_self, __args__): +def W_AST_new(space, w_type, __args__): + node = space.allocate_instance(W_AST, w_type) + return space.wrap(node) + +def W_AST_init(space, w_self, __args__): args_w, kwargs_w = __args__.unpack() - if args_w and len(args_w) != 0: - w_err = space.wrap("_ast.AST constructor takes 0 positional arguments") - raise OperationError(space.w_TypeError, w_err) + fields_w = space.fixedview(space.getattr(space.type(w_self), + space.wrap("_fields"))) + num_fields = len(fields_w) if fields_w else 0 + if args_w and len(args_w) != num_fields: + if num_fields == 0: + raise oefmt(space.w_TypeError, + "%T constructor takes 0 positional arguments", w_self) + elif num_fields == 1: + raise oefmt(space.w_TypeError, + "%T constructor takes either 0 or %d positional argument", w_self, num_fields) + else: + raise oefmt(space.w_TypeError, + "%T constructor takes either 0 or %d positional arguments", w_self, num_fields) + if args_w: + for i, w_field in enumerate(fields_w): + space.setattr(w_self, w_field, args_w[i]) for field, w_value in kwargs_w.iteritems(): space.setattr(w_self, space.wrap(field), w_value) -AST.typedef = typedef.TypeDef("_ast.AST", + +W_AST.typedef = typedef.TypeDef("_ast.AST", _fields=_FieldsWrapper([]), _attributes=_FieldsWrapper([]), - __reduce__=interp2app(AST.reduce_w), - __setstate__=interp2app(AST.setstate_w), + __reduce__=interp2app(W_AST.reduce_w), + __setstate__=interp2app(W_AST.setstate_w), __dict__ = typedef.GetSetProperty(typedef.descr_get_dict, - typedef.descr_set_dict, cls=AST), - __new__=interp2app(get_AST_new(AST)), - __init__=interp2app(AST_init), + typedef.descr_set_dict, cls=W_AST), + __new__=interp2app(W_AST_new), + __init__=interp2app(W_AST_init), ) - - +class State: + AST_TYPES = [] + + @classmethod + def ast_type(cls, name, base, fields, attributes=None): + cls.AST_TYPES.append((name, base, fields, attributes)) + + def __init__(self, space): + self.w_AST = space.gettypeobject(W_AST.typedef) + for (name, base, fields, attributes) in self.AST_TYPES: + self.make_new_type(space, name, base, fields, attributes) + + def make_new_type(self, space, name, base, fields, attributes): + w_base = getattr(self, 'w_%s' % base) + w_dict = space.newdict() + space.setitem_str(w_dict, '__module__', space.wrap('_ast')) + if fields is not None: + space.setitem_str(w_dict, "_fields", + space.newtuple([space.wrap(f) for f in fields])) + if attributes is not None: + space.setitem_str(w_dict, "_attributes", + space.newtuple([space.wrap(a) for a in attributes])) + w_type = space.call_function( + space.w_type, + space.wrap(name), space.newtuple([w_base]), w_dict) + setattr(self, 'w_%s' % name, w_type) + +def get(space): + return space.fromcache(State) class mod(AST): - pass + @staticmethod + def from_object(space, w_node): + if space.is_w(w_node, space.w_None): + return None + if space.isinstance_w(w_node, get(space).w_Module): + return Module.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Interactive): + return Interactive.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Expression): + return Expression.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Suite): + return Suite.from_object(space, w_node) + raise oefmt(space.w_TypeError, + "Expected mod node, got %T", w_node) +State.ast_type('mod', 'AST', None, []) class Module(mod): def __init__(self, body): self.body = body - self.w_body = None - self.initialization_state = 1 def walkabout(self, visitor): visitor.visit_Module(self) @@ -153,29 +186,30 @@ visitor._mutate_sequence(self.body) return visitor.visit_Module(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 1: - self.missing_field(space, ['body'], 'Module') + def to_object(self, space): + w_node = space.call_function(get(space).w_Module) + if self.body is None: + body_w = [] else: - pass - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + return Module(_body) + +State.ast_type('Module', 'mod', ['body']) class Interactive(mod): def __init__(self, body): self.body = body - self.w_body = None - self.initialization_state = 1 def walkabout(self, visitor): visitor.visit_Interactive(self) @@ -185,28 +219,30 @@ visitor._mutate_sequence(self.body) return visitor.visit_Interactive(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 1: - self.missing_field(space, ['body'], 'Interactive') + def to_object(self, space): + w_node = space.call_function(get(space).w_Interactive) + if self.body is None: + body_w = [] else: - pass - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + return Interactive(_body) + +State.ast_type('Interactive', 'mod', ['body']) class Expression(mod): def __init__(self, body): self.body = body - self.initialization_state = 1 def walkabout(self, visitor): visitor.visit_Expression(self) @@ -215,20 +251,25 @@ self.body = self.body.mutate_over(visitor) return visitor.visit_Expression(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 1: - self.missing_field(space, ['body'], 'Expression') - else: - pass - self.body.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Expression) + w_body = self.body.to_object(space) # expr + space.setattr(w_node, space.wrap('body'), w_body) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + _body = expr.from_object(space, w_body) + return Expression(_body) + +State.ast_type('Expression', 'mod', ['body']) class Suite(mod): def __init__(self, body): self.body = body - self.w_body = None - self.initialization_state = 1 def walkabout(self, visitor): visitor.visit_Suite(self) @@ -238,21 +279,24 @@ visitor._mutate_sequence(self.body) return visitor.visit_Suite(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 1: - self.missing_field(space, ['body'], 'Suite') + def to_object(self, space): + w_node = space.call_function(get(space).w_Suite) + if self.body is None: + body_w = [] else: - pass - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + return Suite(_body) + +State.ast_type('Suite', 'mod', ['body']) class stmt(AST): @@ -261,17 +305,68 @@ self.lineno = lineno self.col_offset = col_offset + @staticmethod + def from_object(space, w_node): + if space.is_w(w_node, space.w_None): + return None + if space.isinstance_w(w_node, get(space).w_FunctionDef): + return FunctionDef.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_ClassDef): + return ClassDef.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Return): + return Return.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Delete): + return Delete.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Assign): + return Assign.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_AugAssign): + return AugAssign.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Print): + return Print.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_For): + return For.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_While): + return While.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_If): + return If.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_With): + return With.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Raise): + return Raise.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_TryExcept): + return TryExcept.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_TryFinally): + return TryFinally.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Assert): + return Assert.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Import): + return Import.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_ImportFrom): + return ImportFrom.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Exec): + return Exec.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Global): + return Global.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Expr): + return Expr.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Pass): + return Pass.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Break): + return Break.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Continue): + return Continue.from_object(space, w_node) + raise oefmt(space.w_TypeError, + "Expected stmt node, got %T", w_node) +State.ast_type('stmt', 'AST', None, ['lineno', 'col_offset']) + class FunctionDef(stmt): def __init__(self, name, args, body, decorator_list, lineno, col_offset): self.name = name self.args = args self.body = body - self.w_body = None self.decorator_list = decorator_list - self.w_decorator_list = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 63 def walkabout(self, visitor): visitor.visit_FunctionDef(self) @@ -284,32 +379,49 @@ visitor._mutate_sequence(self.decorator_list) return visitor.visit_FunctionDef(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 63: - self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef') + def to_object(self, space): + w_node = space.call_function(get(space).w_FunctionDef) + w_name = space.wrap(self.name) # identifier + space.setattr(w_node, space.wrap('name'), w_name) + w_args = self.args.to_object(space) # arguments + space.setattr(w_node, space.wrap('args'), w_args) + if self.body is None: + body_w = [] else: - pass - self.args.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_decorator_list - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.decorator_list = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.decorator_list = None - if self.decorator_list is not None: - for node in self.decorator_list: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.decorator_list is None: + decorator_list_w = [] + else: + decorator_list_w = [node.to_object(space) for node in self.decorator_list] # expr + w_decorator_list = space.newlist(decorator_list_w) + space.setattr(w_node, space.wrap('decorator_list'), w_decorator_list) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_name = get_field(space, w_node, 'name', False) + w_args = get_field(space, w_node, 'args', False) + w_body = get_field(space, w_node, 'body', False) + w_decorator_list = get_field(space, w_node, 'decorator_list', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _name = space.realstr_w(w_name) + _args = arguments.from_object(space, w_args) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + decorator_list_w = space.unpackiterable(w_decorator_list) + _decorator_list = [expr.from_object(space, w_item) for w_item in decorator_list_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return FunctionDef(_name, _args, _body, _decorator_list, _lineno, _col_offset) + +State.ast_type('FunctionDef', 'stmt', ['name', 'args', 'body', 'decorator_list']) class ClassDef(stmt): @@ -317,13 +429,9 @@ def __init__(self, name, bases, body, decorator_list, lineno, col_offset): self.name = name self.bases = bases - self.w_bases = None self.body = body - self.w_body = None self.decorator_list = decorator_list - self.w_decorator_list = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 63 def walkabout(self, visitor): visitor.visit_ClassDef(self) @@ -337,41 +445,54 @@ visitor._mutate_sequence(self.decorator_list) return visitor.visit_ClassDef(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 63: - self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'body', 'decorator_list'], 'ClassDef') + def to_object(self, space): + w_node = space.call_function(get(space).w_ClassDef) + w_name = space.wrap(self.name) # identifier + space.setattr(w_node, space.wrap('name'), w_name) + if self.bases is None: + bases_w = [] else: - pass - w_list = self.w_bases - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.bases = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.bases = None - if self.bases is not None: - for node in self.bases: - node.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_decorator_list - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.decorator_list = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.decorator_list = None - if self.decorator_list is not None: - for node in self.decorator_list: - node.sync_app_attrs(space) + bases_w = [node.to_object(space) for node in self.bases] # expr + w_bases = space.newlist(bases_w) + space.setattr(w_node, space.wrap('bases'), w_bases) + if self.body is None: + body_w = [] + else: + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.decorator_list is None: + decorator_list_w = [] + else: + decorator_list_w = [node.to_object(space) for node in self.decorator_list] # expr + w_decorator_list = space.newlist(decorator_list_w) + space.setattr(w_node, space.wrap('decorator_list'), w_decorator_list) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_name = get_field(space, w_node, 'name', False) + w_bases = get_field(space, w_node, 'bases', False) + w_body = get_field(space, w_node, 'body', False) + w_decorator_list = get_field(space, w_node, 'decorator_list', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _name = space.realstr_w(w_name) + bases_w = space.unpackiterable(w_bases) + _bases = [expr.from_object(space, w_item) for w_item in bases_w] + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + decorator_list_w = space.unpackiterable(w_decorator_list) + _decorator_list = [expr.from_object(space, w_item) for w_item in decorator_list_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return ClassDef(_name, _bases, _body, _decorator_list, _lineno, _col_offset) + +State.ast_type('ClassDef', 'stmt', ['name', 'bases', 'body', 'decorator_list']) class Return(stmt): @@ -379,7 +500,6 @@ def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Return(self) @@ -389,23 +509,34 @@ self.value = self.value.mutate_over(visitor) return visitor.visit_Return(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~4) ^ 3: - self.missing_field(space, ['lineno', 'col_offset', None], 'Return') - else: - if not self.initialization_state & 4: - self.value = None - if self.value: - self.value.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Return) + w_value = self.value.to_object(space) if self.value is not None else space.w_None # expr + space.setattr(w_node, space.wrap('value'), w_value) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_value = get_field(space, w_node, 'value', True) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _value = expr.from_object(space, w_value) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Return(_value, _lineno, _col_offset) + +State.ast_type('Return', 'stmt', ['value']) class Delete(stmt): def __init__(self, targets, lineno, col_offset): self.targets = targets - self.w_targets = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Delete(self) @@ -415,31 +546,40 @@ visitor._mutate_sequence(self.targets) return visitor.visit_Delete(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete') + def to_object(self, space): + w_node = space.call_function(get(space).w_Delete) + if self.targets is None: + targets_w = [] else: - pass - w_list = self.w_targets - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.targets = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.targets = None - if self.targets is not None: - for node in self.targets: - node.sync_app_attrs(space) + targets_w = [node.to_object(space) for node in self.targets] # expr + w_targets = space.newlist(targets_w) + space.setattr(w_node, space.wrap('targets'), w_targets) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_targets = get_field(space, w_node, 'targets', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + targets_w = space.unpackiterable(w_targets) + _targets = [expr.from_object(space, w_item) for w_item in targets_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Delete(_targets, _lineno, _col_offset) + +State.ast_type('Delete', 'stmt', ['targets']) class Assign(stmt): def __init__(self, targets, value, lineno, col_offset): self.targets = targets - self.w_targets = None self.value = value stmt.__init__(self, lineno, col_offset) - self.initialization_state = 15 def walkabout(self, visitor): visitor.visit_Assign(self) @@ -450,22 +590,36 @@ self.value = self.value.mutate_over(visitor) return visitor.visit_Assign(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 15: - self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign') + def to_object(self, space): + w_node = space.call_function(get(space).w_Assign) + if self.targets is None: + targets_w = [] else: - pass - w_list = self.w_targets - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.targets = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.targets = None - if self.targets is not None: - for node in self.targets: - node.sync_app_attrs(space) - self.value.sync_app_attrs(space) + targets_w = [node.to_object(space) for node in self.targets] # expr + w_targets = space.newlist(targets_w) + space.setattr(w_node, space.wrap('targets'), w_targets) + w_value = self.value.to_object(space) # expr + space.setattr(w_node, space.wrap('value'), w_value) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_targets = get_field(space, w_node, 'targets', False) + w_value = get_field(space, w_node, 'value', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + targets_w = space.unpackiterable(w_targets) + _targets = [expr.from_object(space, w_item) for w_item in targets_w] + _value = expr.from_object(space, w_value) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Assign(_targets, _value, _lineno, _col_offset) + +State.ast_type('Assign', 'stmt', ['targets', 'value']) class AugAssign(stmt): @@ -475,7 +629,6 @@ self.op = op self.value = value stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_AugAssign(self) @@ -485,13 +638,35 @@ self.value = self.value.mutate_over(visitor) return visitor.visit_AugAssign(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 31: - self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign') - else: - pass - self.target.sync_app_attrs(space) - self.value.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_AugAssign) + w_target = self.target.to_object(space) # expr + space.setattr(w_node, space.wrap('target'), w_target) + w_op = operator_to_class[self.op - 1]().to_object(space) # operator + space.setattr(w_node, space.wrap('op'), w_op) + w_value = self.value.to_object(space) # expr + space.setattr(w_node, space.wrap('value'), w_value) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_target = get_field(space, w_node, 'target', False) + w_op = get_field(space, w_node, 'op', False) + w_value = get_field(space, w_node, 'value', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _target = expr.from_object(space, w_target) + _op = operator.from_object(space, w_op) + _value = expr.from_object(space, w_value) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return AugAssign(_target, _op, _value, _lineno, _col_offset) + +State.ast_type('AugAssign', 'stmt', ['target', 'op', 'value']) class Print(stmt): @@ -499,10 +674,8 @@ def __init__(self, dest, values, nl, lineno, col_offset): self.dest = dest self.values = values - self.w_values = None self.nl = nl stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_Print(self) @@ -514,24 +687,40 @@ visitor._mutate_sequence(self.values) return visitor.visit_Print(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~4) ^ 27: - self.missing_field(space, ['lineno', 'col_offset', None, 'values', 'nl'], 'Print') + def to_object(self, space): + w_node = space.call_function(get(space).w_Print) + w_dest = self.dest.to_object(space) if self.dest is not None else space.w_None # expr + space.setattr(w_node, space.wrap('dest'), w_dest) + if self.values is None: + values_w = [] else: - if not self.initialization_state & 4: - self.dest = None - if self.dest: - self.dest.sync_app_attrs(space) - w_list = self.w_values - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.values = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.values = None - if self.values is not None: - for node in self.values: - node.sync_app_attrs(space) + values_w = [node.to_object(space) for node in self.values] # expr + w_values = space.newlist(values_w) + space.setattr(w_node, space.wrap('values'), w_values) + w_nl = space.wrap(self.nl) # bool + space.setattr(w_node, space.wrap('nl'), w_nl) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_dest = get_field(space, w_node, 'dest', True) + w_values = get_field(space, w_node, 'values', False) + w_nl = get_field(space, w_node, 'nl', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _dest = expr.from_object(space, w_dest) + values_w = space.unpackiterable(w_values) + _values = [expr.from_object(space, w_item) for w_item in values_w] + _nl = space.bool_w(w_nl) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Print(_dest, _values, _nl, _lineno, _col_offset) + +State.ast_type('Print', 'stmt', ['dest', 'values', 'nl']) class For(stmt): @@ -540,11 +729,8 @@ self.target = target self.iter = iter self.body = body - self.w_body = None self.orelse = orelse - self.w_orelse = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 63 def walkabout(self, visitor): visitor.visit_For(self) @@ -558,33 +744,49 @@ visitor._mutate_sequence(self.orelse) return visitor.visit_For(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 63: - self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For') + def to_object(self, space): + w_node = space.call_function(get(space).w_For) + w_target = self.target.to_object(space) # expr + space.setattr(w_node, space.wrap('target'), w_target) + w_iter = self.iter.to_object(space) # expr + space.setattr(w_node, space.wrap('iter'), w_iter) + if self.body is None: + body_w = [] else: - pass - self.target.sync_app_attrs(space) - self.iter.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_orelse - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.orelse = None - if self.orelse is not None: - for node in self.orelse: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.orelse is None: + orelse_w = [] + else: + orelse_w = [node.to_object(space) for node in self.orelse] # stmt + w_orelse = space.newlist(orelse_w) + space.setattr(w_node, space.wrap('orelse'), w_orelse) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_target = get_field(space, w_node, 'target', False) + w_iter = get_field(space, w_node, 'iter', False) + w_body = get_field(space, w_node, 'body', False) + w_orelse = get_field(space, w_node, 'orelse', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _target = expr.from_object(space, w_target) + _iter = expr.from_object(space, w_iter) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + orelse_w = space.unpackiterable(w_orelse) + _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return For(_target, _iter, _body, _orelse, _lineno, _col_offset) + +State.ast_type('For', 'stmt', ['target', 'iter', 'body', 'orelse']) class While(stmt): @@ -592,11 +794,8 @@ def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body - self.w_body = None self.orelse = orelse - self.w_orelse = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_While(self) @@ -609,32 +808,45 @@ visitor._mutate_sequence(self.orelse) return visitor.visit_While(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 31: - self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While') + def to_object(self, space): + w_node = space.call_function(get(space).w_While) + w_test = self.test.to_object(space) # expr + space.setattr(w_node, space.wrap('test'), w_test) + if self.body is None: + body_w = [] else: - pass - self.test.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_orelse - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.orelse = None - if self.orelse is not None: - for node in self.orelse: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.orelse is None: + orelse_w = [] + else: + orelse_w = [node.to_object(space) for node in self.orelse] # stmt + w_orelse = space.newlist(orelse_w) + space.setattr(w_node, space.wrap('orelse'), w_orelse) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_test = get_field(space, w_node, 'test', False) + w_body = get_field(space, w_node, 'body', False) + w_orelse = get_field(space, w_node, 'orelse', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _test = expr.from_object(space, w_test) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + orelse_w = space.unpackiterable(w_orelse) + _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return While(_test, _body, _orelse, _lineno, _col_offset) + +State.ast_type('While', 'stmt', ['test', 'body', 'orelse']) class If(stmt): @@ -642,11 +854,8 @@ def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body - self.w_body = None self.orelse = orelse - self.w_orelse = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_If(self) @@ -659,32 +868,45 @@ visitor._mutate_sequence(self.orelse) return visitor.visit_If(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 31: - self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If') + def to_object(self, space): + w_node = space.call_function(get(space).w_If) + w_test = self.test.to_object(space) # expr + space.setattr(w_node, space.wrap('test'), w_test) + if self.body is None: + body_w = [] else: - pass - self.test.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_orelse - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.orelse = None - if self.orelse is not None: - for node in self.orelse: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.orelse is None: + orelse_w = [] + else: + orelse_w = [node.to_object(space) for node in self.orelse] # stmt + w_orelse = space.newlist(orelse_w) + space.setattr(w_node, space.wrap('orelse'), w_orelse) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_test = get_field(space, w_node, 'test', False) + w_body = get_field(space, w_node, 'body', False) + w_orelse = get_field(space, w_node, 'orelse', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _test = expr.from_object(space, w_test) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + orelse_w = space.unpackiterable(w_orelse) + _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return If(_test, _body, _orelse, _lineno, _col_offset) + +State.ast_type('If', 'stmt', ['test', 'body', 'orelse']) class With(stmt): @@ -693,9 +915,7 @@ self.context_expr = context_expr self.optional_vars = optional_vars self.body = body - self.w_body = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_With(self) @@ -708,25 +928,40 @@ visitor._mutate_sequence(self.body) return visitor.visit_With(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~8) ^ 23: - self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With') + def to_object(self, space): + w_node = space.call_function(get(space).w_With) + w_context_expr = self.context_expr.to_object(space) # expr + space.setattr(w_node, space.wrap('context_expr'), w_context_expr) + w_optional_vars = self.optional_vars.to_object(space) if self.optional_vars is not None else space.w_None # expr + space.setattr(w_node, space.wrap('optional_vars'), w_optional_vars) + if self.body is None: + body_w = [] else: - if not self.initialization_state & 8: - self.optional_vars = None - self.context_expr.sync_app_attrs(space) - if self.optional_vars: - self.optional_vars.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_context_expr = get_field(space, w_node, 'context_expr', False) + w_optional_vars = get_field(space, w_node, 'optional_vars', True) + w_body = get_field(space, w_node, 'body', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _context_expr = expr.from_object(space, w_context_expr) + _optional_vars = expr.from_object(space, w_optional_vars) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return With(_context_expr, _optional_vars, _body, _lineno, _col_offset) + +State.ast_type('With', 'stmt', ['context_expr', 'optional_vars', 'body']) class Raise(stmt): @@ -736,7 +971,6 @@ self.inst = inst self.tback = tback stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_Raise(self) @@ -750,35 +984,44 @@ self.tback = self.tback.mutate_over(visitor) return visitor.visit_Raise(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~28) ^ 3: - self.missing_field(space, ['lineno', 'col_offset', None, None, None], 'Raise') - else: - if not self.initialization_state & 4: - self.type = None - if not self.initialization_state & 8: - self.inst = None - if not self.initialization_state & 16: - self.tback = None - if self.type: - self.type.sync_app_attrs(space) - if self.inst: - self.inst.sync_app_attrs(space) - if self.tback: - self.tback.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Raise) + w_type = self.type.to_object(space) if self.type is not None else space.w_None # expr + space.setattr(w_node, space.wrap('type'), w_type) + w_inst = self.inst.to_object(space) if self.inst is not None else space.w_None # expr + space.setattr(w_node, space.wrap('inst'), w_inst) + w_tback = self.tback.to_object(space) if self.tback is not None else space.w_None # expr + space.setattr(w_node, space.wrap('tback'), w_tback) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_type = get_field(space, w_node, 'type', True) + w_inst = get_field(space, w_node, 'inst', True) + w_tback = get_field(space, w_node, 'tback', True) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _type = expr.from_object(space, w_type) + _inst = expr.from_object(space, w_inst) + _tback = expr.from_object(space, w_tback) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Raise(_type, _inst, _tback, _lineno, _col_offset) + +State.ast_type('Raise', 'stmt', ['type', 'inst', 'tback']) class TryExcept(stmt): def __init__(self, body, handlers, orelse, lineno, col_offset): self.body = body - self.w_body = None self.handlers = handlers - self.w_handlers = None self.orelse = orelse - self.w_orelse = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_TryExcept(self) @@ -792,52 +1035,58 @@ visitor._mutate_sequence(self.orelse) return visitor.visit_TryExcept(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 31: - self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept') + def to_object(self, space): + w_node = space.call_function(get(space).w_TryExcept) + if self.body is None: + body_w = [] else: - pass - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_handlers - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.handlers = [space.interp_w(excepthandler, w_obj) for w_obj in list_w] - else: - self.handlers = None - if self.handlers is not None: - for node in self.handlers: - node.sync_app_attrs(space) - w_list = self.w_orelse - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.orelse = None - if self.orelse is not None: - for node in self.orelse: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.handlers is None: + handlers_w = [] + else: + handlers_w = [node.to_object(space) for node in self.handlers] # excepthandler + w_handlers = space.newlist(handlers_w) + space.setattr(w_node, space.wrap('handlers'), w_handlers) + if self.orelse is None: + orelse_w = [] + else: + orelse_w = [node.to_object(space) for node in self.orelse] # stmt + w_orelse = space.newlist(orelse_w) + space.setattr(w_node, space.wrap('orelse'), w_orelse) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + w_handlers = get_field(space, w_node, 'handlers', False) + w_orelse = get_field(space, w_node, 'orelse', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + handlers_w = space.unpackiterable(w_handlers) + _handlers = [excepthandler.from_object(space, w_item) for w_item in handlers_w] + orelse_w = space.unpackiterable(w_orelse) + _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return TryExcept(_body, _handlers, _orelse, _lineno, _col_offset) + +State.ast_type('TryExcept', 'stmt', ['body', 'handlers', 'orelse']) class TryFinally(stmt): def __init__(self, body, finalbody, lineno, col_offset): self.body = body - self.w_body = None self.finalbody = finalbody - self.w_finalbody = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 15 def walkabout(self, visitor): visitor.visit_TryFinally(self) @@ -849,31 +1098,41 @@ visitor._mutate_sequence(self.finalbody) return visitor.visit_TryFinally(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 15: - self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally') + def to_object(self, space): + w_node = space.call_function(get(space).w_TryFinally) + if self.body is None: + body_w = [] else: - pass - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_finalbody - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.finalbody = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.finalbody = None - if self.finalbody is not None: - for node in self.finalbody: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.finalbody is None: + finalbody_w = [] + else: + finalbody_w = [node.to_object(space) for node in self.finalbody] # stmt + w_finalbody = space.newlist(finalbody_w) + space.setattr(w_node, space.wrap('finalbody'), w_finalbody) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + w_finalbody = get_field(space, w_node, 'finalbody', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + finalbody_w = space.unpackiterable(w_finalbody) + _finalbody = [stmt.from_object(space, w_item) for w_item in finalbody_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return TryFinally(_body, _finalbody, _lineno, _col_offset) + +State.ast_type('TryFinally', 'stmt', ['body', 'finalbody']) class Assert(stmt): @@ -882,7 +1141,6 @@ self.test = test self.msg = msg stmt.__init__(self, lineno, col_offset) - self.initialization_state = 15 def walkabout(self, visitor): visitor.visit_Assert(self) @@ -893,24 +1151,38 @@ self.msg = self.msg.mutate_over(visitor) return visitor.visit_Assert(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~8) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert') - else: - if not self.initialization_state & 8: - self.msg = None - self.test.sync_app_attrs(space) - if self.msg: - self.msg.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Assert) + w_test = self.test.to_object(space) # expr + space.setattr(w_node, space.wrap('test'), w_test) + w_msg = self.msg.to_object(space) if self.msg is not None else space.w_None # expr + space.setattr(w_node, space.wrap('msg'), w_msg) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_test = get_field(space, w_node, 'test', False) + w_msg = get_field(space, w_node, 'msg', True) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _test = expr.from_object(space, w_test) + _msg = expr.from_object(space, w_msg) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Assert(_test, _msg, _lineno, _col_offset) + +State.ast_type('Assert', 'stmt', ['test', 'msg']) class Import(stmt): def __init__(self, names, lineno, col_offset): self.names = names - self.w_names = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Import(self) @@ -920,21 +1192,32 @@ visitor._mutate_sequence(self.names) return visitor.visit_Import(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import') + def to_object(self, space): + w_node = space.call_function(get(space).w_Import) + if self.names is None: + names_w = [] else: - pass - w_list = self.w_names - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.names = [space.interp_w(alias, w_obj) for w_obj in list_w] - else: - self.names = None - if self.names is not None: - for node in self.names: - node.sync_app_attrs(space) + names_w = [node.to_object(space) for node in self.names] # alias + w_names = space.newlist(names_w) + space.setattr(w_node, space.wrap('names'), w_names) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_names = get_field(space, w_node, 'names', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + names_w = space.unpackiterable(w_names) + _names = [alias.from_object(space, w_item) for w_item in names_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Import(_names, _lineno, _col_offset) + +State.ast_type('Import', 'stmt', ['names']) class ImportFrom(stmt): @@ -942,10 +1225,8 @@ def __init__(self, module, names, level, lineno, col_offset): self.module = module self.names = names - self.w_names = None self.level = level stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_ImportFrom(self) @@ -955,24 +1236,40 @@ visitor._mutate_sequence(self.names) return visitor.visit_ImportFrom(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~20) ^ 11: - self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom') + def to_object(self, space): + w_node = space.call_function(get(space).w_ImportFrom) + w_module = space.wrap(self.module) # identifier + space.setattr(w_node, space.wrap('module'), w_module) + if self.names is None: + names_w = [] else: - if not self.initialization_state & 4: - self.module = None - if not self.initialization_state & 16: - self.level = 0 - w_list = self.w_names - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.names = [space.interp_w(alias, w_obj) for w_obj in list_w] - else: - self.names = None - if self.names is not None: - for node in self.names: - node.sync_app_attrs(space) + names_w = [node.to_object(space) for node in self.names] # alias + w_names = space.newlist(names_w) + space.setattr(w_node, space.wrap('names'), w_names) + w_level = space.wrap(self.level) # int + space.setattr(w_node, space.wrap('level'), w_level) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_module = get_field(space, w_node, 'module', True) + w_names = get_field(space, w_node, 'names', False) + w_level = get_field(space, w_node, 'level', True) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _module = space.str_or_None_w(w_module) + names_w = space.unpackiterable(w_names) + _names = [alias.from_object(space, w_item) for w_item in names_w] + _level = space.int_w(w_level) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return ImportFrom(_module, _names, _level, _lineno, _col_offset) + +State.ast_type('ImportFrom', 'stmt', ['module', 'names', 'level']) class Exec(stmt): @@ -982,7 +1279,6 @@ self.globals = globals self.locals = locals stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_Exec(self) @@ -995,28 +1291,42 @@ self.locals = self.locals.mutate_over(visitor) return visitor.visit_Exec(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~24) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'body', None, None], 'Exec') - else: - if not self.initialization_state & 8: - self.globals = None - if not self.initialization_state & 16: - self.locals = None - self.body.sync_app_attrs(space) - if self.globals: - self.globals.sync_app_attrs(space) - if self.locals: - self.locals.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Exec) + w_body = self.body.to_object(space) # expr + space.setattr(w_node, space.wrap('body'), w_body) + w_globals = self.globals.to_object(space) if self.globals is not None else space.w_None # expr + space.setattr(w_node, space.wrap('globals'), w_globals) + w_locals = self.locals.to_object(space) if self.locals is not None else space.w_None # expr + space.setattr(w_node, space.wrap('locals'), w_locals) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + w_globals = get_field(space, w_node, 'globals', True) + w_locals = get_field(space, w_node, 'locals', True) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _body = expr.from_object(space, w_body) + _globals = expr.from_object(space, w_globals) + _locals = expr.from_object(space, w_locals) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Exec(_body, _globals, _locals, _lineno, _col_offset) + +State.ast_type('Exec', 'stmt', ['body', 'globals', 'locals']) class Global(stmt): def __init__(self, names, lineno, col_offset): self.names = names - self.w_names = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Global(self) @@ -1024,18 +1334,32 @@ def mutate_over(self, visitor): return visitor.visit_Global(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global') + def to_object(self, space): + w_node = space.call_function(get(space).w_Global) + if self.names is None: + names_w = [] else: - pass - w_list = self.w_names - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.names = [space.realstr_w(w_obj) for w_obj in list_w] - else: - self.names = None + names_w = [space.wrap(node) for node in self.names] # identifier + w_names = space.newlist(names_w) + space.setattr(w_node, space.wrap('names'), w_names) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_names = get_field(space, w_node, 'names', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + names_w = space.unpackiterable(w_names) + _names = [space.realstr_w(w_item) for w_item in names_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Global(_names, _lineno, _col_offset) + +State.ast_type('Global', 'stmt', ['names']) class Expr(stmt): @@ -1043,7 +1367,6 @@ def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Expr(self) @@ -1052,19 +1375,33 @@ self.value = self.value.mutate_over(visitor) return visitor.visit_Expr(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr') - else: - pass - self.value.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Expr) + w_value = self.value.to_object(space) # expr + space.setattr(w_node, space.wrap('value'), w_value) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_value = get_field(space, w_node, 'value', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _value = expr.from_object(space, w_value) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Expr(_value, _lineno, _col_offset) + +State.ast_type('Expr', 'stmt', ['value']) class Pass(stmt): def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) - self.initialization_state = 3 def walkabout(self, visitor): visitor.visit_Pass(self) @@ -1072,18 +1409,29 @@ def mutate_over(self, visitor): return visitor.visit_Pass(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 3: - self.missing_field(space, ['lineno', 'col_offset'], 'Pass') - else: - pass + def to_object(self, space): + w_node = space.call_function(get(space).w_Pass) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Pass(_lineno, _col_offset) + +State.ast_type('Pass', 'stmt', []) class Break(stmt): def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) - self.initialization_state = 3 def walkabout(self, visitor): visitor.visit_Break(self) @@ -1091,18 +1439,29 @@ def mutate_over(self, visitor): return visitor.visit_Break(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 3: - self.missing_field(space, ['lineno', 'col_offset'], 'Break') - else: - pass + def to_object(self, space): + w_node = space.call_function(get(space).w_Break) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Break(_lineno, _col_offset) + +State.ast_type('Break', 'stmt', []) class Continue(stmt): def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) - self.initialization_state = 3 def walkabout(self, visitor): visitor.visit_Continue(self) @@ -1110,11 +1469,23 @@ def mutate_over(self, visitor): return visitor.visit_Continue(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 3: - self.missing_field(space, ['lineno', 'col_offset'], 'Continue') - else: - pass + def to_object(self, space): + w_node = space.call_function(get(space).w_Continue) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Continue(_lineno, _col_offset) + +State.ast_type('Continue', 'stmt', []) class expr(AST): @@ -1123,14 +1494,66 @@ self.lineno = lineno self.col_offset = col_offset + @staticmethod + def from_object(space, w_node): + if space.is_w(w_node, space.w_None): + return None + if space.isinstance_w(w_node, get(space).w_BoolOp): + return BoolOp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_BinOp): + return BinOp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_UnaryOp): + return UnaryOp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Lambda): + return Lambda.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_IfExp): + return IfExp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Dict): + return Dict.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Set): + return Set.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_ListComp): + return ListComp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_SetComp): + return SetComp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_DictComp): + return DictComp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_GeneratorExp): + return GeneratorExp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Yield): + return Yield.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Compare): + return Compare.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Call): + return Call.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Repr): + return Repr.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Num): + return Num.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Str): + return Str.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Attribute): + return Attribute.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Subscript): + return Subscript.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Name): + return Name.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_List): + return List.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Tuple): + return Tuple.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Const): + return Const.from_object(space, w_node) + raise oefmt(space.w_TypeError, + "Expected expr node, got %T", w_node) +State.ast_type('expr', 'AST', None, ['lineno', 'col_offset']) + class BoolOp(expr): def __init__(self, op, values, lineno, col_offset): self.op = op self.values = values - self.w_values = None expr.__init__(self, lineno, col_offset) - self.initialization_state = 15 def walkabout(self, visitor): visitor.visit_BoolOp(self) @@ -1140,21 +1563,36 @@ visitor._mutate_sequence(self.values) return visitor.visit_BoolOp(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 15: - self.missing_field(space, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp') + def to_object(self, space): + w_node = space.call_function(get(space).w_BoolOp) + w_op = boolop_to_class[self.op - 1]().to_object(space) # boolop + space.setattr(w_node, space.wrap('op'), w_op) + if self.values is None: + values_w = [] else: From noreply at buildbot.pypy.org Mon Aug 18 16:55:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 16:55:25 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Fix Message-ID: <20140818145525.7E2AB1C059C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72890:eb63f04c42a5 Date: 2014-08-18 16:54 +0200 http://bitbucket.org/pypy/pypy/changeset/eb63f04c42a5/ Log: Fix diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -491,6 +491,7 @@ self.mc.MOV_rs(eax.value, 0) def call_stm_before_ex_call(self): + from rpython.rlib import rstm # XXX slowish: before any CALL_RELEASE_GIL, invoke the # pypy_stm_commit_if_not_atomic() function. Messy because # we need to save the register arguments first. @@ -519,6 +520,7 @@ self.mc.POP_r(self.ARGUMENTS_GPR[i].value) def call_stm_after_ex_call(self): + from rpython.rlib import rstm # after any CALL_RELEASE_GIL, invoke the # pypy_stm_start_if_not_atomic() function self.save_result_value_reacq() From noreply at buildbot.pypy.org Mon Aug 18 17:04:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 17:04:23 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Silence C warnings Message-ID: <20140818150423.069871C059C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72891:c872c9937900 Date: 2014-08-18 17:03 +0200 http://bitbucket.org/pypy/pypy/changeset/c872c9937900/ Log: Silence C warnings diff --git a/rpython/translator/c/src/threadlocal.h b/rpython/translator/c/src/threadlocal.h --- a/rpython/translator/c/src/threadlocal.h +++ b/rpython/translator/c/src/threadlocal.h @@ -24,10 +24,15 @@ #ifdef USE___THREAD -#define RPyThreadStaticTLS __thread long +#ifdef RPY_STM +# define RPY_THREAD_LOCAL_TYPE pypy_object0_t * +#else +# define RPY_THREAD_LOCAL_TYPE void * +#endif +#define RPyThreadStaticTLS __thread RPY_THREAD_LOCAL_TYPE #define RPyThreadStaticTLS_Create(tls) (void)0 #define RPyThreadStaticTLS_Get(tls) tls -#define RPyThreadStaticTLS_Set(tls, value) tls = (long)value +#define RPyThreadStaticTLS_Set(tls, value) tls = (RPY_THREAD_LOCAL_TYPE)value #define OP_THREADLOCALREF_GETADDR(tlref, ptr) ptr = tlref #endif From noreply at buildbot.pypy.org Mon Aug 18 17:48:34 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Mon, 18 Aug 2014 17:48:34 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: fix. we now need to turn inevitable before all frees Message-ID: <20140818154834.D5B0F1D2AE7@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7-rewindjmp Changeset: r72892:9c727ae67f53 Date: 2014-08-18 17:48 +0200 http://bitbucket.org/pypy/pypy/changeset/9c727ae67f53/ Log: fix. we now need to turn inevitable before all frees diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py --- a/rpython/translator/stm/inevitable.py +++ b/rpython/translator/stm/inevitable.py @@ -37,7 +37,7 @@ # ____________________________________________________________ -def should_turn_inevitable_getter_setter(op, fresh_mallocs): +def should_turn_inevitable_getter_setter(op): # Getters and setters are allowed if their first argument is a GC pointer. # If it is a RAW pointer, and it is a read from a non-immutable place, # and it doesn't use the hint 'stm_dont_track_raw_accesses', then they @@ -52,7 +52,7 @@ return False if S._hints.get('stm_dont_track_raw_accesses', False): return False - return not fresh_mallocs.is_fresh_malloc(op.args[0]) + return True def should_turn_inevitable_call(op): if op.opname == 'direct_call': @@ -77,7 +77,7 @@ assert False -def should_turn_inevitable(op, block, fresh_mallocs): +def should_turn_inevitable(op, block): # Always-allowed operations never cause a 'turn inevitable' if op.opname in ALWAYS_ALLOW_OPERATIONS: return False @@ -86,22 +86,17 @@ if op.opname in GETTERS: if op.result.concretetype is lltype.Void: return False - return should_turn_inevitable_getter_setter(op, fresh_mallocs) + return should_turn_inevitable_getter_setter(op) if op.opname in SETTERS: if op.args[-1].concretetype is lltype.Void: return False - return should_turn_inevitable_getter_setter(op, fresh_mallocs) + return should_turn_inevitable_getter_setter(op) # # Mallocs & Frees if op.opname in MALLOCS: return False if op.opname in FREES: - # We can only run a CFG in non-inevitable mode from start - # to end in one transaction (every free gets called once - # for every fresh malloc). No need to turn inevitable. - # If the transaction is splitted, the remaining parts of the - # CFG will always run in inevitable mode anyways. - return not fresh_mallocs.is_fresh_malloc(op.args[0]) + return True # # Function calls if op.opname == 'direct_call' or op.opname == 'indirect_call': @@ -117,12 +112,10 @@ varoftype(lltype.Void)) def insert_turn_inevitable(graph): - from rpython.translator.backendopt.writeanalyze import FreshMallocs - fresh_mallocs = FreshMallocs(graph) for block in graph.iterblocks(): for i in range(len(block.operations)-1, -1, -1): op = block.operations[i] - inev = should_turn_inevitable(op, block, fresh_mallocs) + inev = should_turn_inevitable(op, block) if inev: if not isinstance(inev, str): inev = op.opname diff --git a/rpython/translator/stm/test/test_inevitable.py b/rpython/translator/stm/test/test_inevitable.py --- a/rpython/translator/stm/test/test_inevitable.py +++ b/rpython/translator/stm/test/test_inevitable.py @@ -120,7 +120,7 @@ lltype.free(p, flavor='raw') res = self.interpret_inevitable(f1, []) - assert res is None + assert res == 'free' def test_raw_malloc_2(self): X = lltype.Struct('X', ('foo', lltype.Signed)) @@ -130,7 +130,7 @@ llmemory.raw_free(addr) res = self.interpret_inevitable(f1, []) - assert res is None + assert res == 'raw_free' def test_unknown_raw_free(self): X = lltype.Struct('X', ('foo', lltype.Signed)) From noreply at buildbot.pypy.org Mon Aug 18 18:39:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 18:39:47 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: I think this operation should not be placed in the "nosideeffect" group... Message-ID: <20140818163947.E02391C059C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72893:5e74e4d23364 Date: 2014-08-18 18:39 +0200 http://bitbucket.org/pypy/pypy/changeset/5e74e4d23364/ Log: I think this operation should not be placed in the "nosideeffect" group... diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -495,7 +495,6 @@ 'VIRTUAL_REF/2', # removed before it's passed to the backend 'READ_TIMESTAMP/0', 'STM_SHOULD_BREAK_TRANSACTION/0', - 'STM_HINT_COMMIT_SOON/0', 'MARK_OPAQUE_PTR/1b', # this one has no *visible* side effect, since the virtualizable # must be forced, however we need to execute it anyway @@ -522,6 +521,7 @@ 'RECORD_KNOWN_CLASS/2', # [objptr, clsptr] 'KEEPALIVE/1', 'STM_READ/1', + 'STM_HINT_COMMIT_SOON/0', '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', From noreply at buildbot.pypy.org Mon Aug 18 19:54:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 19:54:56 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7-rewindjmp: Ready for merge Message-ID: <20140818175456.507B91D2AC1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7-rewindjmp Changeset: r72894:8fbadacba2a7 Date: 2014-08-18 19:52 +0200 http://bitbucket.org/pypy/pypy/changeset/8fbadacba2a7/ Log: Ready for merge From noreply at buildbot.pypy.org Mon Aug 18 19:55:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 19:55:02 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Merge branch stmgc-c7-rewindjmp Message-ID: <20140818175502.242111D2AC1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72895:baea23f84952 Date: 2014-08-18 19:54 +0200 http://bitbucket.org/pypy/pypy/changeset/baea23f84952/ Log: Merge branch stmgc-c7-rewindjmp Transactions can now continue even across any number of function returns without becoming inevitable. diff too long, truncating to 2000 out of 19427 lines diff --git a/_pytest/__init__.py b/_pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.4.dev2' +__version__ = '2.5.2' diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py new file mode 100644 --- /dev/null +++ b/_pytest/_argcomplete.py @@ -0,0 +1,104 @@ + +"""allow bash-completion for argparse with argcomplete if installed +needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code. + +argcomplete does not support python 2.5 (although the changes for that +are minor). + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*' + ).completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh ) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK + +INSTALL/DEBUGGING +================= +To include this support in another application that has setup.py generated +scripts: +- add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point +- include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + , call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument() +If things do not work right away: +- switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 +- run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not +- sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" + +import sys +import os +from glob import glob + +class FastFilesCompleter: + 'Fast file completer class' + def __init__(self, directories=True): + self.directories = directories + + def __call__(self, prefix, **kwargs): + """only called on non option completions""" + if os.path.sep in prefix[1:]: # + prefix_dir = len(os.path.dirname(prefix) + os.path.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if '*' not in prefix and '?' not in prefix: + if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash + globbed.extend(glob(prefix + '.*')) + prefix += '*' + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += '/' + # append stripping the prefix (like bash, not like compgen) + completion.append(x[prefix_dir:]) + return completion + +if os.environ.get('_ARGCOMPLETE'): + # argcomplete 0.5.6 is not compatible with python 2.5.6: print/with/format + if sys.version_info[:2] < (2, 6): + sys.exit(1) + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter = FastFilesCompleter() + + def try_argcomplete(parser): + argcomplete.autocomplete(parser) +else: + def try_argcomplete(parser): pass + filescompleter = None diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py --- a/_pytest/assertion/__init__.py +++ b/_pytest/assertion/__init__.py @@ -3,7 +3,6 @@ """ import py import sys -import pytest from _pytest.monkeypatch import monkeypatch from _pytest.assertion import util @@ -19,8 +18,8 @@ to provide assert expression information. """) group.addoption('--no-assert', action="store_true", default=False, dest="noassert", help="DEPRECATED equivalent to --assert=plain") - group.addoption('--nomagic', action="store_true", default=False, - dest="nomagic", help="DEPRECATED equivalent to --assert=plain") + group.addoption('--nomagic', '--no-magic', action="store_true", + default=False, help="DEPRECATED equivalent to --assert=plain") class AssertionState: """State for the assertion plugin.""" @@ -35,22 +34,25 @@ mode = "plain" if mode == "rewrite": try: - import ast + import ast # noqa except ImportError: mode = "reinterp" else: - if sys.platform.startswith('java'): + # Both Jython and CPython 2.6.0 have AST bugs that make the + # assertion rewriting hook malfunction. + if (sys.platform.startswith('java') or + sys.version_info[:3] == (2, 6, 0)): mode = "reinterp" if mode != "plain": _load_modules(mode) m = monkeypatch() config._cleanup.append(m.undo) m.setattr(py.builtin.builtins, 'AssertionError', - reinterpret.AssertionError) + reinterpret.AssertionError) # noqa hook = None if mode == "rewrite": - hook = rewrite.AssertionRewritingHook() - sys.meta_path.append(hook) + hook = rewrite.AssertionRewritingHook() # noqa + sys.meta_path.insert(0, hook) warn_about_missing_assertion(mode) config._assertstate = AssertionState(config, mode) config._assertstate.hook = hook @@ -73,9 +75,16 @@ def callbinrepr(op, left, right): hook_result = item.ihook.pytest_assertrepr_compare( config=item.config, op=op, left=left, right=right) + for new_expl in hook_result: if new_expl: - res = '\n~'.join(new_expl) + # Don't include pageloads of data unless we are very + # verbose (-vv) + if (sum(len(p) for p in new_expl[1:]) > 80*8 + and item.config.option.verbose < 2): + new_expl[1:] = [py.builtin._totext( + 'Detailed information truncated, use "-vv" to show')] + res = py.builtin._totext('\n~').join(new_expl) if item.config.getvalue("assertmode") == "rewrite": # The result will be fed back a python % formatting # operation, which will fail if there are extraneous @@ -95,9 +104,9 @@ def _load_modules(mode): """Lazily import assertion related code.""" global rewrite, reinterpret - from _pytest.assertion import reinterpret + from _pytest.assertion import reinterpret # noqa if mode == "rewrite": - from _pytest.assertion import rewrite + from _pytest.assertion import rewrite # noqa def warn_about_missing_assertion(mode): try: diff --git a/_pytest/assertion/newinterpret.py b/_pytest/assertion/newinterpret.py --- a/_pytest/assertion/newinterpret.py +++ b/_pytest/assertion/newinterpret.py @@ -11,7 +11,7 @@ from _pytest.assertion.reinterpret import BuiltinAssertionError -if sys.platform.startswith("java") and sys.version_info < (2, 5, 2): +if sys.platform.startswith("java"): # See http://bugs.jython.org/issue1497 _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict", "ListComp", "GeneratorExp", "Yield", "Compare", "Call", diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py --- a/_pytest/assertion/oldinterpret.py +++ b/_pytest/assertion/oldinterpret.py @@ -526,10 +526,13 @@ # example: def f(): return 5 + def g(): return 3 + def h(x): return 'never' + check("f() * g() == 5") check("not f()") check("not (f() and g() or 0)") diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py --- a/_pytest/assertion/reinterpret.py +++ b/_pytest/assertion/reinterpret.py @@ -1,18 +1,26 @@ import sys import py from _pytest.assertion.util import BuiltinAssertionError +u = py.builtin._totext + class AssertionError(BuiltinAssertionError): def __init__(self, *args): BuiltinAssertionError.__init__(self, *args) if args: + # on Python2.6 we get len(args)==2 for: assert 0, (x,y) + # on Python2.7 and above we always get len(args) == 1 + # with args[0] being the (x,y) tuple. + if len(args) > 1: + toprint = args + else: + toprint = args[0] try: - self.msg = str(args[0]) - except py.builtin._sysex: - raise - except: - self.msg = "<[broken __repr__] %s at %0xd>" %( - args[0].__class__, id(args[0])) + self.msg = u(toprint) + except Exception: + self.msg = u( + "<[broken __repr__] %s at %0xd>" + % (toprint.__class__, id(toprint))) else: f = py.code.Frame(sys._getframe(1)) try: @@ -44,4 +52,3 @@ from _pytest.assertion.newinterpret import interpret as reinterpret else: reinterpret = reinterpret_old - diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py --- a/_pytest/assertion/rewrite.py +++ b/_pytest/assertion/rewrite.py @@ -6,6 +6,7 @@ import imp import marshal import os +import re import struct import sys import types @@ -14,13 +15,7 @@ from _pytest.assertion import util -# Windows gives ENOENT in places *nix gives ENOTDIR. -if sys.platform.startswith("win"): - PATH_COMPONENT_NOT_DIR = errno.ENOENT -else: - PATH_COMPONENT_NOT_DIR = errno.ENOTDIR - -# py.test caches rewritten pycs in __pycache__. +# pytest caches rewritten pycs in __pycache__. if hasattr(imp, "get_tag"): PYTEST_TAG = imp.get_tag() + "-PYTEST" else: @@ -34,17 +29,19 @@ PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1]) del ver, impl -PYC_EXT = ".py" + "c" if __debug__ else "o" +PYC_EXT = ".py" + (__debug__ and "c" or "o") PYC_TAIL = "." + PYTEST_TAG + PYC_EXT REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2) +ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 class AssertionRewritingHook(object): - """Import hook which rewrites asserts.""" + """PEP302 Import hook which rewrites asserts.""" def __init__(self): self.session = None self.modules = {} + self._register_with_pkg_resources() def set_session(self, session): self.fnpats = session.config.getini("python_files") @@ -59,8 +56,12 @@ names = name.rsplit(".", 1) lastname = names[-1] pth = None - if path is not None and len(path) == 1: - pth = path[0] + if path is not None: + # Starting with Python 3.3, path is a _NamespacePath(), which + # causes problems if not converted to list. + path = list(path) + if len(path) == 1: + pth = path[0] if pth is None: try: fd, fn, desc = imp.find_module(lastname, path) @@ -95,12 +96,13 @@ finally: self.session = sess else: - state.trace("matched test file (was specified on cmdline): %r" % (fn,)) + state.trace("matched test file (was specified on cmdline): %r" % + (fn,)) # The requested module looks like a test file, so rewrite it. This is # the most magical part of the process: load the source, rewrite the # asserts, and load the rewritten source. We also cache the rewritten # module code in a special pyc. We must be aware of the possibility of - # concurrent py.test processes rewriting and loading pycs. To avoid + # concurrent pytest processes rewriting and loading pycs. To avoid # tricky race conditions, we maintain the following invariant: The # cached pyc is always a complete, valid pyc. Operations on it must be # atomic. POSIX's atomic rename comes in handy. @@ -116,19 +118,19 @@ # common case) or it's blocked by a non-dir node. In the # latter case, we'll ignore it in _write_pyc. pass - elif e == PATH_COMPONENT_NOT_DIR: + elif e in [errno.ENOENT, errno.ENOTDIR]: # One of the path components was not a directory, likely # because we're in a zip file. write = False elif e == errno.EACCES: - state.trace("read only directory: %r" % (fn_pypath.dirname,)) + state.trace("read only directory: %r" % fn_pypath.dirname) write = False else: raise cache_name = fn_pypath.basename[:-3] + PYC_TAIL pyc = os.path.join(cache_dir, cache_name) - # Notice that even if we're in a read-only directory, I'm going to check - # for a cached pyc. This may not be optimal... + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... co = _read_pyc(fn_pypath, pyc) if co is None: state.trace("rewriting %r" % (fn,)) @@ -153,27 +155,59 @@ mod.__file__ = co.co_filename # Normally, this attribute is 3.2+. mod.__cached__ = pyc + mod.__loader__ = self py.builtin.exec_(co, mod.__dict__) except: del sys.modules[name] raise return sys.modules[name] -def _write_pyc(co, source_path, pyc): - # Technically, we don't have to have the same pyc format as (C)Python, since - # these "pycs" should never be seen by builtin import. However, there's - # little reason deviate, and I hope sometime to be able to use - # imp.load_compiled to load them. (See the comment in load_module above.) + + + def is_package(self, name): + try: + fd, fn, desc = imp.find_module(name) + except ImportError: + return False + if fd is not None: + fd.close() + tp = desc[2] + return tp == imp.PKG_DIRECTORY + + @classmethod + def _register_with_pkg_resources(cls): + """ + Ensure package resources can be loaded from this loader. May be called + multiple times, as the operation is idempotent. + """ + try: + import pkg_resources + # access an attribute in case a deferred importer is present + pkg_resources.__name__ + except ImportError: + return + + # Since pytest tests are always located in the file system, the + # DefaultProvider is appropriate. + pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider) + + +def _write_pyc(state, co, source_path, pyc): + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason deviate, and I hope + # sometime to be able to use imp.load_compiled to load them. (See + # the comment in load_module above.) mtime = int(source_path.mtime()) try: fp = open(pyc, "wb") except IOError: err = sys.exc_info()[1].errno - if err == PATH_COMPONENT_NOT_DIR: - # This happens when we get a EEXIST in find_module creating the - # __pycache__ directory and __pycache__ is by some non-dir node. - return False - raise + state.trace("error writing pyc file at %s: errno=%s" %(pyc, err)) + # we ignore any failure to write the cache file + # there are many reasons, permission-denied, __pycache__ being a + # file etc. + return False try: fp.write(imp.get_magic()) fp.write(struct.pack(">", - ast.Add : "+", - ast.Sub : "-", - ast.Mult : "*", - ast.Div : "/", - ast.FloorDiv : "//", - ast.Mod : "%", - ast.Eq : "==", - ast.NotEq : "!=", - ast.Lt : "<", - ast.LtE : "<=", - ast.Gt : ">", - ast.GtE : ">=", - ast.Pow : "**", - ast.Is : "is", - ast.IsNot : "is not", - ast.In : "in", - ast.NotIn : "not in" + ast.BitOr: "|", + ast.BitXor: "^", + ast.BitAnd: "&", + ast.LShift: "<<", + ast.RShift: ">>", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in" } @@ -341,7 +408,7 @@ lineno = 0 for item in mod.body: if (expect_docstring and isinstance(item, ast.Expr) and - isinstance(item.value, ast.Str)): + isinstance(item.value, ast.Str)): doc = item.value.s if "PYTEST_DONT_REWRITE" in doc: # The module has disabled assertion rewriting. @@ -462,7 +529,8 @@ body.append(raise_) # Clear temporary variables by setting them to None. if self.variables: - variables = [ast.Name(name, ast.Store()) for name in self.variables] + variables = [ast.Name(name, ast.Store()) + for name in self.variables] clear = ast.Assign(variables, ast.Name("None", ast.Load())) self.statements.append(clear) # Fix line numbers. @@ -471,11 +539,12 @@ return self.statements def visit_Name(self, name): - # Check if the name is local or not. + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. locs = ast.Call(self.builtin("locals"), [], [], None, None) - globs = ast.Call(self.builtin("globals"), [], [], None, None) - ops = [ast.In(), ast.IsNot()] - test = ast.Compare(ast.Str(name.id), ops, [locs, globs]) + inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) + dorepr = self.helper("should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) return name, self.explanation_param(expr) @@ -492,7 +561,8 @@ for i, v in enumerate(boolop.values): if i: fail_inner = [] - self.on_failure.append(ast.If(cond, fail_inner, [])) + # cond is set in a prior loop iteration below + self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa self.on_failure = fail_inner self.push_format_context() res, expl = self.visit(v) @@ -548,7 +618,8 @@ new_kwarg, expl = self.visit(call.kwargs) arg_expls.append("**" + expl) expl = "%s(%s)" % (func_expl, ', '.join(arg_expls)) - new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg) + new_call = ast.Call(new_func, new_args, new_kwargs, + new_star, new_kwarg) res = self.assign(new_call) res_expl = self.explanation_param(self.display(res)) outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) @@ -584,7 +655,7 @@ res_expr = ast.Compare(left_res, [op], [next_res]) self.statements.append(ast.Assign([store_names[i]], res_expr)) left_res, left_expl = next_res, next_expl - # Use py.code._reprcompare if that's available. + # Use pytest.assertion.util._reprcompare if that's available. expl_call = self.helper("call_reprcompare", ast.Tuple(syms, ast.Load()), ast.Tuple(load_names, ast.Load()), diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py --- a/_pytest/assertion/util.py +++ b/_pytest/assertion/util.py @@ -1,8 +1,13 @@ """Utilities for assertion debugging""" import py +try: + from collections import Sequence +except ImportError: + Sequence = list BuiltinAssertionError = py.builtin.builtins.AssertionError +u = py.builtin._totext # The _reprcompare attribute on the util module is used by the new assertion # interpretation code and assertion rewriter to detect this plugin was @@ -10,6 +15,7 @@ # DebugInterpreter. _reprcompare = None + def format_explanation(explanation): """This formats an explanation @@ -20,7 +26,18 @@ for when one explanation needs to span multiple lines, e.g. when displaying diffs. """ - # simplify 'assert False where False = ...' + explanation = _collapse_false(explanation) + lines = _split_explanation(explanation) + result = _format_lines(lines) + return u('\n').join(result) + + +def _collapse_false(explanation): + """Collapse expansions of False + + So this strips out any "assert False\n{where False = ...\n}" + blocks. + """ where = 0 while True: start = where = explanation.find("False\n{False = ", where) @@ -42,28 +59,48 @@ explanation = (explanation[:start] + explanation[start+15:end-1] + explanation[end+1:]) where -= 17 - raw_lines = (explanation or '').split('\n') - # escape newlines not followed by {, } and ~ + return explanation + + +def _split_explanation(explanation): + """Return a list of individual lines in the explanation + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or u('')).split('\n') lines = [raw_lines[0]] for l in raw_lines[1:]: if l.startswith('{') or l.startswith('}') or l.startswith('~'): lines.append(l) else: lines[-1] += '\\n' + l + return lines + +def _format_lines(lines): + """Format the individual lines + + This will replace the '{', '}' and '~' characters of our mini + formatting language with the proper 'where ...', 'and ...' and ' + + ...' text, taking care of indentation along the way. + + Return a list of formatted lines. + """ result = lines[:1] stack = [0] stackcnt = [0] for line in lines[1:]: if line.startswith('{'): if stackcnt[-1]: - s = 'and ' + s = u('and ') else: - s = 'where ' + s = u('where ') stack.append(len(result)) stackcnt[-1] += 1 stackcnt.append(0) - result.append(' +' + ' '*(len(stack)-1) + s + line[1:]) + result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:]) elif line.startswith('}'): assert line.startswith('}') stack.pop() @@ -71,9 +108,9 @@ result[stack[-1]] += line[1:] else: assert line.startswith('~') - result.append(' '*len(stack) + line[1:]) + result.append(u(' ')*len(stack) + line[1:]) assert len(stack) == 1 - return '\n'.join(result) + return result # Provide basestring in python3 @@ -83,132 +120,163 @@ basestring = str -def assertrepr_compare(op, left, right): - """return specialised explanations for some operators/operands""" - width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op +def assertrepr_compare(config, op, left, right): + """Return specialised explanations for some operators/operands""" + width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op left_repr = py.io.saferepr(left, maxsize=int(width/2)) right_repr = py.io.saferepr(right, maxsize=width-len(left_repr)) - summary = '%s %s %s' % (left_repr, op, right_repr) + summary = u('%s %s %s') % (left_repr, op, right_repr) - issequence = lambda x: isinstance(x, (list, tuple)) + issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) + and not isinstance(x, basestring)) istext = lambda x: isinstance(x, basestring) isdict = lambda x: isinstance(x, dict) - isset = lambda x: isinstance(x, set) + isset = lambda x: isinstance(x, (set, frozenset)) + verbose = config.getoption('verbose') explanation = None try: if op == '==': if istext(left) and istext(right): - explanation = _diff_text(left, right) + explanation = _diff_text(left, right, verbose) elif issequence(left) and issequence(right): - explanation = _compare_eq_sequence(left, right) + explanation = _compare_eq_sequence(left, right, verbose) elif isset(left) and isset(right): - explanation = _compare_eq_set(left, right) + explanation = _compare_eq_set(left, right, verbose) elif isdict(left) and isdict(right): - explanation = _diff_text(py.std.pprint.pformat(left), - py.std.pprint.pformat(right)) + explanation = _compare_eq_dict(left, right, verbose) elif op == 'not in': if istext(left) and istext(right): - explanation = _notin_text(left, right) - except py.builtin._sysex: - raise - except: + explanation = _notin_text(left, right, verbose) + except Exception: excinfo = py.code.ExceptionInfo() - explanation = ['(pytest_assertion plugin: representation of ' - 'details failed. Probably an object has a faulty __repr__.)', - str(excinfo) - ] - + explanation = [ + u('(pytest_assertion plugin: representation of details failed. ' + 'Probably an object has a faulty __repr__.)'), + u(excinfo)] if not explanation: return None - # Don't include pageloads of data, should be configurable - if len(''.join(explanation)) > 80*8: - explanation = ['Detailed information too verbose, truncated'] - return [summary] + explanation -def _diff_text(left, right): - """Return the explanation for the diff between text +def _diff_text(left, right, verbose=False): + """Return the explanation for the diff between text or bytes - This will skip leading and trailing characters which are - identical to keep the diff minimal. + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + + If the input are bytes they will be safely converted to text. """ explanation = [] - i = 0 # just in case left or right has zero length - for i in range(min(len(left), len(right))): - if left[i] != right[i]: - break - if i > 42: - i -= 10 # Provide some context - explanation = ['Skipping %s identical ' - 'leading characters in diff' % i] - left = left[i:] - right = right[i:] - if len(left) == len(right): - for i in range(len(left)): - if left[-i] != right[-i]: + if isinstance(left, py.builtin.bytes): + left = u(repr(left)[1:-1]).replace(r'\n', '\n') + if isinstance(right, py.builtin.bytes): + right = u(repr(right)[1:-1]).replace(r'\n', '\n') + if not verbose: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: break if i > 42: - i -= 10 # Provide some context - explanation += ['Skipping %s identical ' - 'trailing characters in diff' % i] - left = left[:-i] - right = right[:-i] + i -= 10 # Provide some context + explanation = [u('Skipping %s identical leading ' + 'characters in diff, use -v to show') % i] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [u('Skipping %s identical trailing ' + 'characters in diff, use -v to show') % i] + left = left[:-i] + right = right[:-i] explanation += [line.strip('\n') for line in py.std.difflib.ndiff(left.splitlines(), right.splitlines())] return explanation -def _compare_eq_sequence(left, right): +def _compare_eq_sequence(left, right, verbose=False): explanation = [] for i in range(min(len(left), len(right))): if left[i] != right[i]: - explanation += ['At index %s diff: %r != %r' % - (i, left[i], right[i])] + explanation += [u('At index %s diff: %r != %r') + % (i, left[i], right[i])] break if len(left) > len(right): - explanation += ['Left contains more items, ' - 'first extra item: %s' % py.io.saferepr(left[len(right)],)] + explanation += [u('Left contains more items, first extra item: %s') + % py.io.saferepr(left[len(right)],)] elif len(left) < len(right): - explanation += ['Right contains more items, ' - 'first extra item: %s' % py.io.saferepr(right[len(left)],)] - return explanation # + _diff_text(py.std.pprint.pformat(left), - # py.std.pprint.pformat(right)) + explanation += [ + u('Right contains more items, first extra item: %s') % + py.io.saferepr(right[len(left)],)] + return explanation # + _diff_text(py.std.pprint.pformat(left), + # py.std.pprint.pformat(right)) -def _compare_eq_set(left, right): +def _compare_eq_set(left, right, verbose=False): explanation = [] diff_left = left - right diff_right = right - left if diff_left: - explanation.append('Extra items in the left set:') + explanation.append(u('Extra items in the left set:')) for item in diff_left: explanation.append(py.io.saferepr(item)) if diff_right: - explanation.append('Extra items in the right set:') + explanation.append(u('Extra items in the right set:')) for item in diff_right: explanation.append(py.io.saferepr(item)) return explanation -def _notin_text(term, text): +def _compare_eq_dict(left, right, verbose=False): + explanation = [] + common = set(left).intersection(set(right)) + same = dict((k, left[k]) for k in common if left[k] == right[k]) + if same and not verbose: + explanation += [u('Omitting %s identical items, use -v to show') % + len(same)] + elif same: + explanation += [u('Common items:')] + explanation += py.std.pprint.pformat(same).splitlines() + diff = set(k for k in common if left[k] != right[k]) + if diff: + explanation += [u('Differing items:')] + for k in diff: + explanation += [py.io.saferepr({k: left[k]}) + ' != ' + + py.io.saferepr({k: right[k]})] + extra_left = set(left) - set(right) + if extra_left: + explanation.append(u('Left contains more items:')) + explanation.extend(py.std.pprint.pformat( + dict((k, left[k]) for k in extra_left)).splitlines()) + extra_right = set(right) - set(left) + if extra_right: + explanation.append(u('Right contains more items:')) + explanation.extend(py.std.pprint.pformat( + dict((k, right[k]) for k in extra_right)).splitlines()) + return explanation + + +def _notin_text(term, text, verbose=False): index = text.find(term) head = text[:index] tail = text[index+len(term):] correct_text = head + tail - diff = _diff_text(correct_text, text) - newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)] + diff = _diff_text(correct_text, text, verbose) + newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)] for line in diff: - if line.startswith('Skipping'): + if line.startswith(u('Skipping')): continue - if line.startswith('- '): + if line.startswith(u('- ')): continue - if line.startswith('+ '): - newdiff.append(' ' + line[2:]) + if line.startswith(u('+ ')): + newdiff.append(u(' ') + line[2:]) else: newdiff.append(line) return newdiff diff --git a/_pytest/capture.py b/_pytest/capture.py --- a/_pytest/capture.py +++ b/_pytest/capture.py @@ -1,43 +1,114 @@ -""" per-test stdout/stderr capturing mechanisms, ``capsys`` and ``capfd`` function arguments. """ +""" + per-test stdout/stderr capturing mechanisms, + ``capsys`` and ``capfd`` function arguments. +""" +# note: py.io capture was where copied from +# pylib 1.4.20.dev2 (rev 13d9af95547e) +import sys +import os +import tempfile -import pytest, py -import os +import py +import pytest + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO + +try: + from io import BytesIO +except ImportError: + class BytesIO(StringIO): + def write(self, data): + if isinstance(data, unicode): + raise TypeError("not a byte value: %r" % (data,)) + StringIO.write(self, data) + +if sys.version_info < (3, 0): + class TextIO(StringIO): + def write(self, data): + if not isinstance(data, unicode): + enc = getattr(self, '_encoding', 'UTF-8') + data = unicode(data, enc, 'replace') + StringIO.write(self, data) +else: + TextIO = StringIO + + +patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} + def pytest_addoption(parser): group = parser.getgroup("general") - group._addoption('--capture', action="store", default=None, - metavar="method", type="choice", choices=['fd', 'sys', 'no'], + group._addoption( + '--capture', action="store", default=None, + metavar="method", choices=['fd', 'sys', 'no'], help="per-test capturing method: one of fd (default)|sys|no.") - group._addoption('-s', action="store_const", const="no", dest="capture", + group._addoption( + '-s', action="store_const", const="no", dest="capture", help="shortcut for --capture=no.") + @pytest.mark.tryfirst -def pytest_cmdline_parse(pluginmanager, args): - # we want to perform capturing already for plugin/conftest loading - if '-s' in args or "--capture=no" in args: - method = "no" - elif hasattr(os, 'dup') and '--capture=sys' not in args: +def pytest_load_initial_conftests(early_config, parser, args, __multicall__): + ns = parser.parse_known_args(args) + method = ns.capture + if not method: method = "fd" - else: + if method == "fd" and not hasattr(os, "dup"): method = "sys" capman = CaptureManager(method) - pluginmanager.register(capman, "capturemanager") + early_config.pluginmanager.register(capman, "capturemanager") + + # make sure that capturemanager is properly reset at final shutdown + def teardown(): + try: + capman.reset_capturings() + except ValueError: + pass + + early_config.pluginmanager.add_shutdown(teardown) + + # make sure logging does not raise exceptions at the end + def silence_logging_at_shutdown(): + if "logging" in sys.modules: + sys.modules["logging"].raiseExceptions = False + early_config.pluginmanager.add_shutdown(silence_logging_at_shutdown) + + # finally trigger conftest loading but while capturing (issue93) + capman.resumecapture() + try: + try: + return __multicall__.execute() + finally: + out, err = capman.suspendcapture() + except: + sys.stdout.write(out) + sys.stderr.write(err) + raise + def addouterr(rep, outerr): for secname, content in zip(["out", "err"], outerr): if content: rep.sections.append(("Captured std%s" % secname, content)) + class NoCapture: def startall(self): pass + def resume(self): pass + def reset(self): pass + def suspend(self): return "", "" + class CaptureManager: def __init__(self, defaultmethod=None): self._method2capture = {} @@ -45,21 +116,23 @@ def _maketempfile(self): f = py.std.tempfile.TemporaryFile() - newf = py.io.dupfile(f, encoding="UTF-8") + newf = dupfile(f, encoding="UTF-8") f.close() return newf def _makestringio(self): - return py.io.TextIO() + return TextIO() def _getcapture(self, method): if method == "fd": - return py.io.StdCaptureFD(now=False, - out=self._maketempfile(), err=self._maketempfile() + return StdCaptureFD( + out=self._maketempfile(), + err=self._maketempfile(), ) elif method == "sys": - return py.io.StdCapture(now=False, - out=self._makestringio(), err=self._makestringio() + return StdCapture( + out=self._makestringio(), + err=self._makestringio(), ) elif method == "no": return NoCapture() @@ -74,23 +147,24 @@ method = config._conftest.rget("option_capture", path=fspath) except KeyError: method = "fd" - if method == "fd" and not hasattr(os, 'dup'): # e.g. jython + if method == "fd" and not hasattr(os, 'dup'): # e.g. jython method = "sys" return method def reset_capturings(self): - for name, cap in self._method2capture.items(): + for cap in self._method2capture.values(): cap.reset() def resumecapture_item(self, item): method = self._getmethod(item.config, item.fspath) if not hasattr(item, 'outerr'): - item.outerr = ('', '') # we accumulate outerr on the item + item.outerr = ('', '') # we accumulate outerr on the item return self.resumecapture(method) def resumecapture(self, method=None): if hasattr(self, '_capturing'): - raise ValueError("cannot resume, already capturing with %r" % + raise ValueError( + "cannot resume, already capturing with %r" % (self._capturing,)) if method is None: method = self._defaultmethod @@ -119,30 +193,29 @@ return "", "" def activate_funcargs(self, pyfuncitem): - if not hasattr(pyfuncitem, 'funcargs'): - return - assert not hasattr(self, '_capturing_funcargs') - self._capturing_funcargs = capturing_funcargs = [] - for name, capfuncarg in pyfuncitem.funcargs.items(): - if name in ('capsys', 'capfd'): - capturing_funcargs.append(capfuncarg) - capfuncarg._start() + funcargs = getattr(pyfuncitem, "funcargs", None) + if funcargs is not None: + for name, capfuncarg in funcargs.items(): + if name in ('capsys', 'capfd'): + assert not hasattr(self, '_capturing_funcarg') + self._capturing_funcarg = capfuncarg + capfuncarg._start() def deactivate_funcargs(self): - capturing_funcargs = getattr(self, '_capturing_funcargs', None) - if capturing_funcargs is not None: - while capturing_funcargs: - capfuncarg = capturing_funcargs.pop() - capfuncarg._finalize() - del self._capturing_funcargs + capturing_funcarg = getattr(self, '_capturing_funcarg', None) + if capturing_funcarg: + outerr = capturing_funcarg._finalize() + del self._capturing_funcarg + return outerr def pytest_make_collect_report(self, __multicall__, collector): method = self._getmethod(collector.config, collector.fspath) try: self.resumecapture(method) except ValueError: - return # recursive collect, XXX refactor capturing - # to allow for more lightweight recursive capturing + # recursive collect, XXX refactor capturing + # to allow for more lightweight recursive capturing + return try: rep = __multicall__.execute() finally: @@ -169,46 +242,371 @@ @pytest.mark.tryfirst def pytest_runtest_makereport(self, __multicall__, item, call): - self.deactivate_funcargs() + funcarg_outerr = self.deactivate_funcargs() rep = __multicall__.execute() outerr = self.suspendcapture(item) - if not rep.passed: - addouterr(rep, outerr) + if funcarg_outerr is not None: + outerr = (outerr[0] + funcarg_outerr[0], + outerr[1] + funcarg_outerr[1]) + addouterr(rep, outerr) if not rep.passed or rep.when == "teardown": outerr = ('', '') item.outerr = outerr return rep +error_capsysfderror = "cannot use capsys and capfd at the same time" + + def pytest_funcarg__capsys(request): """enables capturing of writes to sys.stdout/sys.stderr and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ - return CaptureFuncarg(py.io.StdCapture) + if "capfd" in request._funcargs: + raise request.raiseerror(error_capsysfderror) + return CaptureFixture(StdCapture) + def pytest_funcarg__capfd(request): """enables capturing of writes to file descriptors 1 and 2 and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ + if "capsys" in request._funcargs: + request.raiseerror(error_capsysfderror) if not hasattr(os, 'dup'): - py.test.skip("capfd funcarg needs os.dup") - return CaptureFuncarg(py.io.StdCaptureFD) + pytest.skip("capfd funcarg needs os.dup") + return CaptureFixture(StdCaptureFD) -class CaptureFuncarg: + +class CaptureFixture: def __init__(self, captureclass): - self.capture = captureclass(now=False) + self._capture = captureclass() def _start(self): - self.capture.startall() + self._capture.startall() def _finalize(self): - if hasattr(self, 'capture'): - self.capture.reset() - del self.capture + if hasattr(self, '_capture'): + outerr = self._outerr = self._capture.reset() + del self._capture + return outerr def readouterr(self): - return self.capture.readouterr() + try: + return self._capture.readouterr() + except AttributeError: + return self._outerr def close(self): self._finalize() + + +class FDCapture: + """ Capture IO to/from a given os-level filedescriptor. """ + + def __init__(self, targetfd, tmpfile=None, patchsys=False): + """ save targetfd descriptor, and open a new + temporary file there. If no tmpfile is + specified a tempfile.Tempfile() will be opened + in text mode. + """ + self.targetfd = targetfd + if tmpfile is None and targetfd != 0: + f = tempfile.TemporaryFile('wb+') + tmpfile = dupfile(f, encoding="UTF-8") + f.close() + self.tmpfile = tmpfile + self._savefd = os.dup(self.targetfd) + if patchsys: + self._oldsys = getattr(sys, patchsysdict[targetfd]) + + def start(self): + try: + os.fstat(self._savefd) + except OSError: + raise ValueError( + "saved filedescriptor not valid, " + "did you call start() twice?") + if self.targetfd == 0 and not self.tmpfile: + fd = os.open(os.devnull, os.O_RDONLY) + os.dup2(fd, 0) + os.close(fd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], DontReadFromInput()) + else: + os.dup2(self.tmpfile.fileno(), self.targetfd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self.tmpfile) + + def done(self): + """ unpatch and clean up, returns the self.tmpfile (file object) + """ + os.dup2(self._savefd, self.targetfd) + os.close(self._savefd) + if self.targetfd != 0: + self.tmpfile.seek(0) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self._oldsys) + return self.tmpfile + + def writeorg(self, data): + """ write a string to the original file descriptor + """ + tempfp = tempfile.TemporaryFile() + try: + os.dup2(self._savefd, tempfp.fileno()) + tempfp.write(data) + finally: + tempfp.close() + + +def dupfile(f, mode=None, buffering=0, raising=False, encoding=None): + """ return a new open file object that's a duplicate of f + + mode is duplicated if not given, 'buffering' controls + buffer size (defaulting to no buffering) and 'raising' + defines whether an exception is raised when an incompatible + file object is passed in (if raising is False, the file + object itself will be returned) + """ + try: + fd = f.fileno() + mode = mode or f.mode + except AttributeError: + if raising: + raise + return f + newfd = os.dup(fd) + if sys.version_info >= (3, 0): + if encoding is not None: + mode = mode.replace("b", "") + buffering = True + return os.fdopen(newfd, mode, buffering, encoding, closefd=True) + else: + f = os.fdopen(newfd, mode, buffering) + if encoding is not None: + return EncodedFile(f, encoding) + return f + + +class EncodedFile(object): + def __init__(self, _stream, encoding): + self._stream = _stream + self.encoding = encoding + + def write(self, obj): + if isinstance(obj, unicode): + obj = obj.encode(self.encoding) + self._stream.write(obj) + + def writelines(self, linelist): + data = ''.join(linelist) + self.write(data) + + def __getattr__(self, name): + return getattr(self._stream, name) + + +class Capture(object): + def reset(self): + """ reset sys.stdout/stderr and return captured output as strings. """ + if hasattr(self, '_reset'): + raise ValueError("was already reset") + self._reset = True + outfile, errfile = self.done(save=False) + out, err = "", "" + if outfile and not outfile.closed: + out = outfile.read() + outfile.close() + if errfile and errfile != outfile and not errfile.closed: + err = errfile.read() + errfile.close() + return out, err + + def suspend(self): + """ return current snapshot captures, memorize tempfiles. """ + outerr = self.readouterr() + outfile, errfile = self.done() + return outerr + + +class StdCaptureFD(Capture): + """ This class allows to capture writes to FD1 and FD2 + and may connect a NULL file to FD0 (and prevent + reads from sys.stdin). If any of the 0,1,2 file descriptors + is invalid it will not be captured. + """ + def __init__(self, out=True, err=True, in_=True, patchsys=True): + self._options = { + "out": out, + "err": err, + "in_": in_, + "patchsys": patchsys, + } + self._save() + + def _save(self): + in_ = self._options['in_'] + out = self._options['out'] + err = self._options['err'] + patchsys = self._options['patchsys'] + if in_: + try: + self.in_ = FDCapture( + 0, tmpfile=None, + patchsys=patchsys) + except OSError: + pass + if out: + tmpfile = None + if hasattr(out, 'write'): + tmpfile = out + try: + self.out = FDCapture( + 1, tmpfile=tmpfile, + patchsys=patchsys) + self._options['out'] = self.out.tmpfile + except OSError: + pass + if err: + if hasattr(err, 'write'): + tmpfile = err + else: + tmpfile = None + try: + self.err = FDCapture( + 2, tmpfile=tmpfile, + patchsys=patchsys) + self._options['err'] = self.err.tmpfile + except OSError: + pass + + def startall(self): + if hasattr(self, 'in_'): + self.in_.start() + if hasattr(self, 'out'): + self.out.start() + if hasattr(self, 'err'): + self.err.start() + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if hasattr(self, 'out') and not self.out.tmpfile.closed: + outfile = self.out.done() + if hasattr(self, 'err') and not self.err.tmpfile.closed: + errfile = self.err.done() + if hasattr(self, 'in_'): + self.in_.done() + if save: + self._save() + return outfile, errfile + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = self._readsnapshot('out') + err = self._readsnapshot('err') + return out, err + + def _readsnapshot(self, name): + if hasattr(self, name): + f = getattr(self, name).tmpfile + else: + return '' + + f.seek(0) + res = f.read() + enc = getattr(f, "encoding", None) + if enc: + res = py.builtin._totext(res, enc, "replace") + f.truncate(0) + f.seek(0) + return res + + +class StdCapture(Capture): + """ This class allows to capture writes to sys.stdout|stderr "in-memory" + and will raise errors on tries to read from sys.stdin. It only + modifies sys.stdout|stderr|stdin attributes and does not + touch underlying File Descriptors (use StdCaptureFD for that). + """ + def __init__(self, out=True, err=True, in_=True): + self._oldout = sys.stdout + self._olderr = sys.stderr + self._oldin = sys.stdin + if out and not hasattr(out, 'file'): + out = TextIO() + self.out = out + if err: + if not hasattr(err, 'write'): + err = TextIO() + self.err = err + self.in_ = in_ + + def startall(self): + if self.out: + sys.stdout = self.out + if self.err: + sys.stderr = self.err + if self.in_: + sys.stdin = self.in_ = DontReadFromInput() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if self.out and not self.out.closed: + sys.stdout = self._oldout + outfile = self.out + outfile.seek(0) + if self.err and not self.err.closed: + sys.stderr = self._olderr + errfile = self.err + errfile.seek(0) + if self.in_: + sys.stdin = self._oldin + return outfile, errfile + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = err = "" + if self.out: + out = self.out.getvalue() + self.out.truncate(0) + self.out.seek(0) + if self.err: + err = self.err.getvalue() + self.err.truncate(0) + self.err.seek(0) + return out, err + + +class DontReadFromInput: + """Temporary stub class. Ideally when stdin is accessed, the + capturing should be turned off, with possibly all data captured + so far sent to the screen. This should be configurable, though, + because in automated test runs it is better to crash than + hang indefinitely. + """ + def read(self, *args): + raise IOError("reading from stdin while output is captured") + readline = read + readlines = read + __iter__ = read + + def fileno(self): + raise ValueError("redirected Stdin is pseudofile, has no fileno()") + + def isatty(self): + return False + + def close(self): + pass diff --git a/_pytest/config.py b/_pytest/config.py --- a/_pytest/config.py +++ b/_pytest/config.py @@ -1,25 +1,91 @@ """ command line options, ini-file and conftest.py processing. """ import py +# DON't import pytest here because it causes import cycle troubles import sys, os +from _pytest import hookspec # the extension point definitions from _pytest.core import PluginManager -import pytest -def pytest_cmdline_parse(pluginmanager, args): - config = Config(pluginmanager) - config.parse(args) - return config +# pytest startup -def pytest_unconfigure(config): - while 1: - try: - fin = config._cleanup.pop() - except IndexError: - break - fin() +def main(args=None, plugins=None): + """ return exit code, after performing an in-process test run. + + :arg args: list of command line arguments. + + :arg plugins: list of plugin objects to be auto-registered during + initialization. + """ + config = _prepareconfig(args, plugins) + return config.hook.pytest_cmdline_main(config=config) + +class cmdline: # compatibility namespace + main = staticmethod(main) + +class UsageError(Exception): + """ error in pytest usage or invocation""" + +_preinit = [] + +default_plugins = ( + "mark main terminal runner python pdb unittest capture skipping " + "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript " + "junitxml resultlog doctest").split() + +def _preloadplugins(): + assert not _preinit + _preinit.append(get_plugin_manager()) + +def get_plugin_manager(): + if _preinit: + return _preinit.pop(0) + # subsequent calls to main will create a fresh instance + pluginmanager = PytestPluginManager() + pluginmanager.config = Config(pluginmanager) # XXX attr needed? + for spec in default_plugins: + pluginmanager.import_plugin(spec) + return pluginmanager + +def _prepareconfig(args=None, plugins=None): + if args is None: + args = sys.argv[1:] + elif isinstance(args, py.path.local): + args = [str(args)] + elif not isinstance(args, (tuple, list)): + if not isinstance(args, str): + raise ValueError("not a string or argument list: %r" % (args,)) + args = py.std.shlex.split(args) + pluginmanager = get_plugin_manager() + if plugins: + for plugin in plugins: + pluginmanager.register(plugin) + return pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args) + +class PytestPluginManager(PluginManager): + def __init__(self, hookspecs=[hookspec]): + super(PytestPluginManager, self).__init__(hookspecs=hookspecs) + self.register(self) + if os.environ.get('PYTEST_DEBUG'): + err = sys.stderr + encoding = getattr(err, 'encoding', 'utf8') + try: + err = py.io.dupfile(err, encoding=encoding) + except Exception: + pass + self.trace.root.setwriter(err.write) + + def pytest_configure(self, config): + config.addinivalue_line("markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible.") + config.addinivalue_line("markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible.") + class Parser: - """ Parser for command line arguments. """ + """ Parser for command line arguments and ini-file values. """ def __init__(self, usage=None, processopt=None): self._anonymous = OptionGroup("custom options", parser=self) @@ -35,15 +101,17 @@ if option.dest: self._processopt(option) - def addnote(self, note): - self._notes.append(note) - def getgroup(self, name, description="", after=None): """ get (or create) a named option Group. - :name: unique name of the option group. + :name: name of the option group. :description: long description for --help output. :after: name of other group, used for ordering --help output. + + The returned group object has an ``addoption`` method with the same + signature as :py:func:`parser.addoption + <_pytest.config.Parser.addoption>` but will be shown in the + respective group in the output of ``pytest. --help``. """ for group in self._groups: if group.name == name: @@ -57,33 +125,222 @@ return group def addoption(self, *opts, **attrs): - """ add an optparse-style option. """ + """ register a command line option. + + :opts: option names, can be short or long options. + :attrs: same attributes which the ``add_option()`` function of the + `argparse library + `_ + accepts. + + After command line parsing options are available on the pytest config + object via ``config.option.NAME`` where ``NAME`` is usually set + by passing a ``dest`` attribute, for example + ``addoption("--long", dest="NAME", ...)``. + """ self._anonymous.addoption(*opts, **attrs) def parse(self, args): - self.optparser = optparser = MyOptionParser(self) + from _pytest._argcomplete import try_argcomplete + self.optparser = self._getparser() + try_argcomplete(self.optparser) + return self.optparser.parse_args([str(x) for x in args]) + + def _getparser(self): + from _pytest._argcomplete import filescompleter + optparser = MyOptionParser(self) groups = self._groups + [self._anonymous] for group in groups: if group.options: desc = group.description or group.name - optgroup = py.std.optparse.OptionGroup(optparser, desc) - optgroup.add_options(group.options) - optparser.add_option_group(optgroup) - return self.optparser.parse_args([str(x) for x in args]) + arggroup = optparser.add_argument_group(desc) + for option in group.options: + n = option.names() + a = option.attrs() + arggroup.add_argument(*n, **a) + # bash like autocompletion for dirs (appending '/') + optparser.add_argument(FILE_OR_DIR, nargs='*' + ).completer=filescompleter + return optparser def parse_setoption(self, args, option): - parsedoption, args = self.parse(args) + parsedoption = self.parse(args) for name, value in parsedoption.__dict__.items(): setattr(option, name, value) - return args + return getattr(parsedoption, FILE_OR_DIR) + + def parse_known_args(self, args): + optparser = self._getparser() + args = [str(x) for x in args] + return optparser.parse_known_args(args)[0] def addini(self, name, help, type=None, default=None): - """ add an ini-file option with the given name and description. """ + """ register an ini-file option. + + :name: name of the ini-variable + :type: type of the variable, can be ``pathlist``, ``args`` or ``linelist``. + :default: default value if no ini-file option exists but is queried. + + The value of ini-variables can be retrieved via a call to + :py:func:`config.getini(name) <_pytest.config.Config.getini>`. + """ assert type in (None, "pathlist", "args", "linelist") self._inidict[name] = (help, type, default) self._ininames.append(name) +class ArgumentError(Exception): + """ + Raised if an Argument instance is created with invalid or + inconsistent arguments. + """ + + def __init__(self, msg, option): + self.msg = msg + self.option_id = str(option) + + def __str__(self): + if self.option_id: + return "option %s: %s" % (self.option_id, self.msg) + else: + return self.msg + + +class Argument: + """class that mimics the necessary behaviour of py.std.optparse.Option """ + _typ_map = { + 'int': int, + 'string': str, + } + # enable after some grace period for plugin writers + TYPE_WARN = False + + def __init__(self, *names, **attrs): + """store parms in private vars for use in add_argument""" + self._attrs = attrs + self._short_opts = [] + self._long_opts = [] + self.dest = attrs.get('dest') + if self.TYPE_WARN: + try: + help = attrs['help'] + if '%default' in help: + py.std.warnings.warn( + 'pytest now uses argparse. "%default" should be' + ' changed to "%(default)s" ', + FutureWarning, + stacklevel=3) + except KeyError: + pass + try: + typ = attrs['type'] + except KeyError: + pass + else: + # this might raise a keyerror as well, don't want to catch that + if isinstance(typ, py.builtin._basestring): + if typ == 'choice': + if self.TYPE_WARN: + py.std.warnings.warn( + 'type argument to addoption() is a string %r.' + ' For parsearg this is optional and when supplied ' + ' should be a type.' + ' (options: %s)' % (typ, names), + FutureWarning, + stacklevel=3) + # argparse expects a type here take it from + # the type of the first element + attrs['type'] = type(attrs['choices'][0]) + else: + if self.TYPE_WARN: + py.std.warnings.warn( + 'type argument to addoption() is a string %r.' + ' For parsearg this should be a type.' + ' (options: %s)' % (typ, names), + FutureWarning, + stacklevel=3) + attrs['type'] = Argument._typ_map[typ] + # used in test_parseopt -> test_parse_defaultgetter + self.type = attrs['type'] + else: + self.type = typ + try: + # attribute existence is tested in Config._processopt + self.default = attrs['default'] + except KeyError: + pass + self._set_opt_strings(names) + if not self.dest: + if self._long_opts: + self.dest = self._long_opts[0][2:].replace('-', '_') + else: + try: + self.dest = self._short_opts[0][1:] + except IndexError: + raise ArgumentError( + 'need a long or short option', self) + + def names(self): + return self._short_opts + self._long_opts + + def attrs(self): + # update any attributes set by processopt + attrs = 'default dest help'.split() + if self.dest: + attrs.append(self.dest) + for attr in attrs: + try: + self._attrs[attr] = getattr(self, attr) + except AttributeError: + pass + if self._attrs.get('help'): + a = self._attrs['help'] + a = a.replace('%default', '%(default)s') + #a = a.replace('%prog', '%(prog)s') + self._attrs['help'] = a + return self._attrs + + def _set_opt_strings(self, opts): + """directly from optparse + + might not be necessary as this is passed to argparse later on""" + for opt in opts: + if len(opt) < 2: + raise ArgumentError( + "invalid option string %r: " + "must be at least two characters long" % opt, self) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise ArgumentError( + "invalid short option string %r: " + "must be of the form -x, (x any non-dash char)" % opt, + self) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise ArgumentError( + "invalid long option string %r: " + "must start with --, followed by non-dash" % opt, + self) + self._long_opts.append(opt) + + def __repr__(self): + retval = 'Argument(' + if self._short_opts: + retval += '_short_opts: ' + repr(self._short_opts) + ', ' + if self._long_opts: + retval += '_long_opts: ' + repr(self._long_opts) + ', ' + retval += 'dest: ' + repr(self.dest) + ', ' + if hasattr(self, 'type'): + retval += 'type: ' + repr(self.type) + ', ' + if hasattr(self, 'default'): + retval += 'default: ' + repr(self.default) + ', ' + if retval[-2:] == ', ': # always long enough to test ("Argument(" ) + retval = retval[:-2] + retval += ')' + return retval + + class OptionGroup: def __init__(self, name, description="", parser=None): self.name = name @@ -92,12 +349,18 @@ self.parser = parser def addoption(self, *optnames, **attrs): - """ add an option to this group. """ - option = py.std.optparse.Option(*optnames, **attrs) + """ add an option to this group. + + if a shortened version of a long option is specified it will + be suppressed in the help. addoption('--twowords', '--two-words') + results in help showing '--two-words' only, but --twowords gets + accepted **and** the automatic destination is in args.twowords + """ + option = Argument(*optnames, **attrs) self._addoption_instance(option, shortupper=False) def _addoption(self, *optnames, **attrs): - option = py.std.optparse.Option(*optnames, **attrs) + option = Argument(*optnames, **attrs) self._addoption_instance(option, shortupper=True) From noreply at buildbot.pypy.org Mon Aug 18 22:25:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 22:25:47 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Test fix Message-ID: <20140818202547.1F86C1D2AE7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72896:7d12f0c90a20 Date: 2014-08-18 22:12 +0200 http://bitbucket.org/pypy/pypy/changeset/7d12f0c90a20/ Log: Test fix diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -74,10 +74,11 @@ self.gc_minimal_size_in_nursery = gc_ll_descr.minimal_size_in_nursery else: self.gc_minimal_size_in_nursery = 0 - try: - self.gc_size_of_header = gc_ll_descr.gcheaderbuilder.size_gc_header - except AttributeError: - self.gc_size_of_header = WORD # for tests + gc_size_of_header = WORD # for tests + if hasattr(gc_ll_descr, 'gcheaderbuilder'): + if hasattr(gc_ll_descr.gcheaderbuilder, 'size_gc_header'): + gc_size_of_header = gc_ll_descr.gcheaderbuilder.size_gc_header + self.gc_size_of_header = gc_size_of_header self.memcpy_addr = self.cpu.cast_ptr_to_int(memcpy_fn) # building the barriers needs to happen before these: self._build_failure_recovery(False, withfloats=False) From noreply at buildbot.pypy.org Mon Aug 18 22:25:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 18 Aug 2014 22:25:48 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Try to make non-stm jit backend tests passing again, but not done yet Message-ID: <20140818202548.5D4BF1D2AE7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72897:dd825891d2a8 Date: 2014-08-18 22:24 +0200 http://bitbucket.org/pypy/pypy/changeset/dd825891d2a8/ Log: Try to make non-stm jit backend tests passing again, but not done yet diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2607,6 +2607,7 @@ # so if it is followed with a JB, it will follow the jump if # we should break the transaction now. # + assert self.cpu.gc_ll_descr.stm if not IS_X86_64: todo() # "needed for X86_64_SCRATCH_REG" psnlfm_adr = rstm.adr_pypy_stm_nursery_low_fill_mark @@ -2631,6 +2632,7 @@ self.implement_guard(guard_token, 'AE') # JAE goes to "no, don't" def genop_discard_stm_read(self, op, arglocs): + assert self.cpu.gc_ll_descr.stm if not IS_X86_64: todo() # "needed for X86_64_SCRATCH_REG" mc = self.mc diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -310,6 +310,8 @@ line = meth(self, op) elif op.opname.startswith('stm_'): if not self._is_stm(): + if op.opname in ('stm_ignored_start', 'stm_ignored_stop'): + return raise AssertionError("STM transformation not applied. " "You need '--stm'") from rpython.translator.stm import funcgen From noreply at buildbot.pypy.org Tue Aug 19 08:50:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 19 Aug 2014 08:50:41 +0200 (CEST) Subject: [pypy-commit] pypy default: Issue #1848: patch by paulie4 Message-ID: <20140819065041.B5D4C1C0250@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72898:8bc429151c06 Date: 2014-08-19 08:50 +0200 http://bitbucket.org/pypy/pypy/changeset/8bc429151c06/ Log: Issue #1848: patch by paulie4 diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -93,7 +93,7 @@ st = {} for c in map(unichr, range(256)): st[c] = SYNTAX_SYMBOL - for c in [a for a in map(unichr, range(256)) if a.isalpha()]: + for c in [a for a in map(unichr, range(256)) if a.isalnum()]: st[c] = SYNTAX_WORD st[u'\n'] = st[u' '] = SYNTAX_WHITESPACE return st From noreply at buildbot.pypy.org Tue Aug 19 09:32:15 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 19 Aug 2014 09:32:15 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: fix for restoring atomic state in start_if_not_atomic Message-ID: <20140819073215.1F9E31C0250@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r72899:2315ec757d57 Date: 2014-08-19 09:31 +0200 http://bitbucket.org/pypy/pypy/changeset/2315ec757d57/ Log: fix for restoring atomic state in start_if_not_atomic diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -159,7 +159,7 @@ pypy_stm_nursery_low_fill_mark = _stm_nursery_start + limit; } -static long _pypy_stm_start_transaction(void) +long _pypy_stm_start_transaction(void) { pypy_stm_nursery_low_fill_mark = 1; /* will be set to a correct value below */ long counter = stm_start_transaction(&stm_thread_local); diff --git a/rpython/translator/stm/src_stm/stmgcintf.h b/rpython/translator/stm/src_stm/stmgcintf.h --- a/rpython/translator/stm/src_stm/stmgcintf.h +++ b/rpython/translator/stm/src_stm/stmgcintf.h @@ -26,6 +26,7 @@ void _pypy_stm_initialize_nursery_low_fill_mark(long v_counter); void _pypy_stm_inev_state(void); +long _pypy_stm_start_transaction(void); void _pypy_stm_become_inevitable(const char *); void pypy_stm_become_globally_unique_transaction(void); @@ -56,9 +57,7 @@ static inline void pypy_stm_start_if_not_atomic(void) { if (pypy_stm_ready_atomic == 1) { int e = errno; - stm_start_transaction(&stm_thread_local); - _pypy_stm_initialize_nursery_low_fill_mark(0); - _pypy_stm_inev_state(); + _pypy_stm_start_transaction(); errno = e; } } From noreply at buildbot.pypy.org Tue Aug 19 10:42:54 2014 From: noreply at buildbot.pypy.org (groggi) Date: Tue, 19 Aug 2014 10:42:54 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: Merge default into gc-incminimark-pinning Message-ID: <20140819084254.3E7901C0250@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72900:907a486e6ace Date: 2014-08-18 22:19 +0200 http://bitbucket.org/pypy/pypy/changeset/907a486e6ace/ Log: Merge default into gc-incminimark-pinning gc-incminimark-pinning based on default now, instead of release-2.3.x diff too long, truncating to 2000 out of 35830 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -10,3 +10,7 @@ 20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 0000000000000000000000000000000000000000 release-2.3.0 394146e9bb673514c61f0150ab2013ccf78e8de7 release-2.3 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.3.1 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1 +0000000000000000000000000000000000000000 release-2.2=3.1 diff --git a/_pytest/__init__.py b/_pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.4.dev2' +__version__ = '2.5.2' diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py new file mode 100644 --- /dev/null +++ b/_pytest/_argcomplete.py @@ -0,0 +1,104 @@ + +"""allow bash-completion for argparse with argcomplete if installed +needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code. + +argcomplete does not support python 2.5 (although the changes for that +are minor). + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*' + ).completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh ) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK + +INSTALL/DEBUGGING +================= +To include this support in another application that has setup.py generated +scripts: +- add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point +- include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + , call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument() +If things do not work right away: +- switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 +- run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not +- sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" + +import sys +import os +from glob import glob + +class FastFilesCompleter: + 'Fast file completer class' + def __init__(self, directories=True): + self.directories = directories + + def __call__(self, prefix, **kwargs): + """only called on non option completions""" + if os.path.sep in prefix[1:]: # + prefix_dir = len(os.path.dirname(prefix) + os.path.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if '*' not in prefix and '?' not in prefix: + if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash + globbed.extend(glob(prefix + '.*')) + prefix += '*' + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += '/' + # append stripping the prefix (like bash, not like compgen) + completion.append(x[prefix_dir:]) + return completion + +if os.environ.get('_ARGCOMPLETE'): + # argcomplete 0.5.6 is not compatible with python 2.5.6: print/with/format + if sys.version_info[:2] < (2, 6): + sys.exit(1) + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter = FastFilesCompleter() + + def try_argcomplete(parser): + argcomplete.autocomplete(parser) +else: + def try_argcomplete(parser): pass + filescompleter = None diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py --- a/_pytest/assertion/__init__.py +++ b/_pytest/assertion/__init__.py @@ -3,7 +3,6 @@ """ import py import sys -import pytest from _pytest.monkeypatch import monkeypatch from _pytest.assertion import util @@ -19,8 +18,8 @@ to provide assert expression information. """) group.addoption('--no-assert', action="store_true", default=False, dest="noassert", help="DEPRECATED equivalent to --assert=plain") - group.addoption('--nomagic', action="store_true", default=False, - dest="nomagic", help="DEPRECATED equivalent to --assert=plain") + group.addoption('--nomagic', '--no-magic', action="store_true", + default=False, help="DEPRECATED equivalent to --assert=plain") class AssertionState: """State for the assertion plugin.""" @@ -35,22 +34,25 @@ mode = "plain" if mode == "rewrite": try: - import ast + import ast # noqa except ImportError: mode = "reinterp" else: - if sys.platform.startswith('java'): + # Both Jython and CPython 2.6.0 have AST bugs that make the + # assertion rewriting hook malfunction. + if (sys.platform.startswith('java') or + sys.version_info[:3] == (2, 6, 0)): mode = "reinterp" if mode != "plain": _load_modules(mode) m = monkeypatch() config._cleanup.append(m.undo) m.setattr(py.builtin.builtins, 'AssertionError', - reinterpret.AssertionError) + reinterpret.AssertionError) # noqa hook = None if mode == "rewrite": - hook = rewrite.AssertionRewritingHook() - sys.meta_path.append(hook) + hook = rewrite.AssertionRewritingHook() # noqa + sys.meta_path.insert(0, hook) warn_about_missing_assertion(mode) config._assertstate = AssertionState(config, mode) config._assertstate.hook = hook @@ -73,9 +75,16 @@ def callbinrepr(op, left, right): hook_result = item.ihook.pytest_assertrepr_compare( config=item.config, op=op, left=left, right=right) + for new_expl in hook_result: if new_expl: - res = '\n~'.join(new_expl) + # Don't include pageloads of data unless we are very + # verbose (-vv) + if (sum(len(p) for p in new_expl[1:]) > 80*8 + and item.config.option.verbose < 2): + new_expl[1:] = [py.builtin._totext( + 'Detailed information truncated, use "-vv" to show')] + res = py.builtin._totext('\n~').join(new_expl) if item.config.getvalue("assertmode") == "rewrite": # The result will be fed back a python % formatting # operation, which will fail if there are extraneous @@ -95,9 +104,9 @@ def _load_modules(mode): """Lazily import assertion related code.""" global rewrite, reinterpret - from _pytest.assertion import reinterpret + from _pytest.assertion import reinterpret # noqa if mode == "rewrite": - from _pytest.assertion import rewrite + from _pytest.assertion import rewrite # noqa def warn_about_missing_assertion(mode): try: diff --git a/_pytest/assertion/newinterpret.py b/_pytest/assertion/newinterpret.py --- a/_pytest/assertion/newinterpret.py +++ b/_pytest/assertion/newinterpret.py @@ -11,7 +11,7 @@ from _pytest.assertion.reinterpret import BuiltinAssertionError -if sys.platform.startswith("java") and sys.version_info < (2, 5, 2): +if sys.platform.startswith("java"): # See http://bugs.jython.org/issue1497 _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict", "ListComp", "GeneratorExp", "Yield", "Compare", "Call", diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py --- a/_pytest/assertion/oldinterpret.py +++ b/_pytest/assertion/oldinterpret.py @@ -526,10 +526,13 @@ # example: def f(): return 5 + def g(): return 3 + def h(x): return 'never' + check("f() * g() == 5") check("not f()") check("not (f() and g() or 0)") diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py --- a/_pytest/assertion/reinterpret.py +++ b/_pytest/assertion/reinterpret.py @@ -1,18 +1,26 @@ import sys import py from _pytest.assertion.util import BuiltinAssertionError +u = py.builtin._totext + class AssertionError(BuiltinAssertionError): def __init__(self, *args): BuiltinAssertionError.__init__(self, *args) if args: + # on Python2.6 we get len(args)==2 for: assert 0, (x,y) + # on Python2.7 and above we always get len(args) == 1 + # with args[0] being the (x,y) tuple. + if len(args) > 1: + toprint = args + else: + toprint = args[0] try: - self.msg = str(args[0]) - except py.builtin._sysex: - raise - except: - self.msg = "<[broken __repr__] %s at %0xd>" %( - args[0].__class__, id(args[0])) + self.msg = u(toprint) + except Exception: + self.msg = u( + "<[broken __repr__] %s at %0xd>" + % (toprint.__class__, id(toprint))) else: f = py.code.Frame(sys._getframe(1)) try: @@ -44,4 +52,3 @@ from _pytest.assertion.newinterpret import interpret as reinterpret else: reinterpret = reinterpret_old - diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py --- a/_pytest/assertion/rewrite.py +++ b/_pytest/assertion/rewrite.py @@ -6,6 +6,7 @@ import imp import marshal import os +import re import struct import sys import types @@ -14,13 +15,7 @@ from _pytest.assertion import util -# Windows gives ENOENT in places *nix gives ENOTDIR. -if sys.platform.startswith("win"): - PATH_COMPONENT_NOT_DIR = errno.ENOENT -else: - PATH_COMPONENT_NOT_DIR = errno.ENOTDIR - -# py.test caches rewritten pycs in __pycache__. +# pytest caches rewritten pycs in __pycache__. if hasattr(imp, "get_tag"): PYTEST_TAG = imp.get_tag() + "-PYTEST" else: @@ -34,17 +29,19 @@ PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1]) del ver, impl -PYC_EXT = ".py" + "c" if __debug__ else "o" +PYC_EXT = ".py" + (__debug__ and "c" or "o") PYC_TAIL = "." + PYTEST_TAG + PYC_EXT REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2) +ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 class AssertionRewritingHook(object): - """Import hook which rewrites asserts.""" + """PEP302 Import hook which rewrites asserts.""" def __init__(self): self.session = None self.modules = {} + self._register_with_pkg_resources() def set_session(self, session): self.fnpats = session.config.getini("python_files") @@ -59,8 +56,12 @@ names = name.rsplit(".", 1) lastname = names[-1] pth = None - if path is not None and len(path) == 1: - pth = path[0] + if path is not None: + # Starting with Python 3.3, path is a _NamespacePath(), which + # causes problems if not converted to list. + path = list(path) + if len(path) == 1: + pth = path[0] if pth is None: try: fd, fn, desc = imp.find_module(lastname, path) @@ -95,12 +96,13 @@ finally: self.session = sess else: - state.trace("matched test file (was specified on cmdline): %r" % (fn,)) + state.trace("matched test file (was specified on cmdline): %r" % + (fn,)) # The requested module looks like a test file, so rewrite it. This is # the most magical part of the process: load the source, rewrite the # asserts, and load the rewritten source. We also cache the rewritten # module code in a special pyc. We must be aware of the possibility of - # concurrent py.test processes rewriting and loading pycs. To avoid + # concurrent pytest processes rewriting and loading pycs. To avoid # tricky race conditions, we maintain the following invariant: The # cached pyc is always a complete, valid pyc. Operations on it must be # atomic. POSIX's atomic rename comes in handy. @@ -116,19 +118,19 @@ # common case) or it's blocked by a non-dir node. In the # latter case, we'll ignore it in _write_pyc. pass - elif e == PATH_COMPONENT_NOT_DIR: + elif e in [errno.ENOENT, errno.ENOTDIR]: # One of the path components was not a directory, likely # because we're in a zip file. write = False elif e == errno.EACCES: - state.trace("read only directory: %r" % (fn_pypath.dirname,)) + state.trace("read only directory: %r" % fn_pypath.dirname) write = False else: raise cache_name = fn_pypath.basename[:-3] + PYC_TAIL pyc = os.path.join(cache_dir, cache_name) - # Notice that even if we're in a read-only directory, I'm going to check - # for a cached pyc. This may not be optimal... + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... co = _read_pyc(fn_pypath, pyc) if co is None: state.trace("rewriting %r" % (fn,)) @@ -153,27 +155,59 @@ mod.__file__ = co.co_filename # Normally, this attribute is 3.2+. mod.__cached__ = pyc + mod.__loader__ = self py.builtin.exec_(co, mod.__dict__) except: del sys.modules[name] raise return sys.modules[name] -def _write_pyc(co, source_path, pyc): - # Technically, we don't have to have the same pyc format as (C)Python, since - # these "pycs" should never be seen by builtin import. However, there's - # little reason deviate, and I hope sometime to be able to use - # imp.load_compiled to load them. (See the comment in load_module above.) + + + def is_package(self, name): + try: + fd, fn, desc = imp.find_module(name) + except ImportError: + return False + if fd is not None: + fd.close() + tp = desc[2] + return tp == imp.PKG_DIRECTORY + + @classmethod + def _register_with_pkg_resources(cls): + """ + Ensure package resources can be loaded from this loader. May be called + multiple times, as the operation is idempotent. + """ + try: + import pkg_resources + # access an attribute in case a deferred importer is present + pkg_resources.__name__ + except ImportError: + return + + # Since pytest tests are always located in the file system, the + # DefaultProvider is appropriate. + pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider) + + +def _write_pyc(state, co, source_path, pyc): + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason deviate, and I hope + # sometime to be able to use imp.load_compiled to load them. (See + # the comment in load_module above.) mtime = int(source_path.mtime()) try: fp = open(pyc, "wb") except IOError: err = sys.exc_info()[1].errno - if err == PATH_COMPONENT_NOT_DIR: - # This happens when we get a EEXIST in find_module creating the - # __pycache__ directory and __pycache__ is by some non-dir node. - return False - raise + state.trace("error writing pyc file at %s: errno=%s" %(pyc, err)) + # we ignore any failure to write the cache file + # there are many reasons, permission-denied, __pycache__ being a + # file etc. + return False try: fp.write(imp.get_magic()) fp.write(struct.pack(">", - ast.Add : "+", - ast.Sub : "-", - ast.Mult : "*", - ast.Div : "/", - ast.FloorDiv : "//", - ast.Mod : "%", - ast.Eq : "==", - ast.NotEq : "!=", - ast.Lt : "<", - ast.LtE : "<=", - ast.Gt : ">", - ast.GtE : ">=", - ast.Pow : "**", - ast.Is : "is", - ast.IsNot : "is not", - ast.In : "in", - ast.NotIn : "not in" + ast.BitOr: "|", + ast.BitXor: "^", + ast.BitAnd: "&", + ast.LShift: "<<", + ast.RShift: ">>", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in" } @@ -341,7 +408,7 @@ lineno = 0 for item in mod.body: if (expect_docstring and isinstance(item, ast.Expr) and - isinstance(item.value, ast.Str)): + isinstance(item.value, ast.Str)): doc = item.value.s if "PYTEST_DONT_REWRITE" in doc: # The module has disabled assertion rewriting. @@ -462,7 +529,8 @@ body.append(raise_) # Clear temporary variables by setting them to None. if self.variables: - variables = [ast.Name(name, ast.Store()) for name in self.variables] + variables = [ast.Name(name, ast.Store()) + for name in self.variables] clear = ast.Assign(variables, ast.Name("None", ast.Load())) self.statements.append(clear) # Fix line numbers. @@ -471,11 +539,12 @@ return self.statements def visit_Name(self, name): - # Check if the name is local or not. + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. locs = ast.Call(self.builtin("locals"), [], [], None, None) - globs = ast.Call(self.builtin("globals"), [], [], None, None) - ops = [ast.In(), ast.IsNot()] - test = ast.Compare(ast.Str(name.id), ops, [locs, globs]) + inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) + dorepr = self.helper("should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) return name, self.explanation_param(expr) @@ -492,7 +561,8 @@ for i, v in enumerate(boolop.values): if i: fail_inner = [] - self.on_failure.append(ast.If(cond, fail_inner, [])) + # cond is set in a prior loop iteration below + self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa self.on_failure = fail_inner self.push_format_context() res, expl = self.visit(v) @@ -548,7 +618,8 @@ new_kwarg, expl = self.visit(call.kwargs) arg_expls.append("**" + expl) expl = "%s(%s)" % (func_expl, ', '.join(arg_expls)) - new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg) + new_call = ast.Call(new_func, new_args, new_kwargs, + new_star, new_kwarg) res = self.assign(new_call) res_expl = self.explanation_param(self.display(res)) outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) @@ -584,7 +655,7 @@ res_expr = ast.Compare(left_res, [op], [next_res]) self.statements.append(ast.Assign([store_names[i]], res_expr)) left_res, left_expl = next_res, next_expl - # Use py.code._reprcompare if that's available. + # Use pytest.assertion.util._reprcompare if that's available. expl_call = self.helper("call_reprcompare", ast.Tuple(syms, ast.Load()), ast.Tuple(load_names, ast.Load()), diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py --- a/_pytest/assertion/util.py +++ b/_pytest/assertion/util.py @@ -1,8 +1,13 @@ """Utilities for assertion debugging""" import py +try: + from collections import Sequence +except ImportError: + Sequence = list BuiltinAssertionError = py.builtin.builtins.AssertionError +u = py.builtin._totext # The _reprcompare attribute on the util module is used by the new assertion # interpretation code and assertion rewriter to detect this plugin was @@ -10,6 +15,7 @@ # DebugInterpreter. _reprcompare = None + def format_explanation(explanation): """This formats an explanation @@ -20,7 +26,18 @@ for when one explanation needs to span multiple lines, e.g. when displaying diffs. """ - # simplify 'assert False where False = ...' + explanation = _collapse_false(explanation) + lines = _split_explanation(explanation) + result = _format_lines(lines) + return u('\n').join(result) + + +def _collapse_false(explanation): + """Collapse expansions of False + + So this strips out any "assert False\n{where False = ...\n}" + blocks. + """ where = 0 while True: start = where = explanation.find("False\n{False = ", where) @@ -42,28 +59,48 @@ explanation = (explanation[:start] + explanation[start+15:end-1] + explanation[end+1:]) where -= 17 - raw_lines = (explanation or '').split('\n') - # escape newlines not followed by {, } and ~ + return explanation + + +def _split_explanation(explanation): + """Return a list of individual lines in the explanation + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or u('')).split('\n') lines = [raw_lines[0]] for l in raw_lines[1:]: if l.startswith('{') or l.startswith('}') or l.startswith('~'): lines.append(l) else: lines[-1] += '\\n' + l + return lines + +def _format_lines(lines): + """Format the individual lines + + This will replace the '{', '}' and '~' characters of our mini + formatting language with the proper 'where ...', 'and ...' and ' + + ...' text, taking care of indentation along the way. + + Return a list of formatted lines. + """ result = lines[:1] stack = [0] stackcnt = [0] for line in lines[1:]: if line.startswith('{'): if stackcnt[-1]: - s = 'and ' + s = u('and ') else: - s = 'where ' + s = u('where ') stack.append(len(result)) stackcnt[-1] += 1 stackcnt.append(0) - result.append(' +' + ' '*(len(stack)-1) + s + line[1:]) + result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:]) elif line.startswith('}'): assert line.startswith('}') stack.pop() @@ -71,9 +108,9 @@ result[stack[-1]] += line[1:] else: assert line.startswith('~') - result.append(' '*len(stack) + line[1:]) + result.append(u(' ')*len(stack) + line[1:]) assert len(stack) == 1 - return '\n'.join(result) + return result # Provide basestring in python3 @@ -83,132 +120,163 @@ basestring = str -def assertrepr_compare(op, left, right): - """return specialised explanations for some operators/operands""" - width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op +def assertrepr_compare(config, op, left, right): + """Return specialised explanations for some operators/operands""" + width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op left_repr = py.io.saferepr(left, maxsize=int(width/2)) right_repr = py.io.saferepr(right, maxsize=width-len(left_repr)) - summary = '%s %s %s' % (left_repr, op, right_repr) + summary = u('%s %s %s') % (left_repr, op, right_repr) - issequence = lambda x: isinstance(x, (list, tuple)) + issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) + and not isinstance(x, basestring)) istext = lambda x: isinstance(x, basestring) isdict = lambda x: isinstance(x, dict) - isset = lambda x: isinstance(x, set) + isset = lambda x: isinstance(x, (set, frozenset)) + verbose = config.getoption('verbose') explanation = None try: if op == '==': if istext(left) and istext(right): - explanation = _diff_text(left, right) + explanation = _diff_text(left, right, verbose) elif issequence(left) and issequence(right): - explanation = _compare_eq_sequence(left, right) + explanation = _compare_eq_sequence(left, right, verbose) elif isset(left) and isset(right): - explanation = _compare_eq_set(left, right) + explanation = _compare_eq_set(left, right, verbose) elif isdict(left) and isdict(right): - explanation = _diff_text(py.std.pprint.pformat(left), - py.std.pprint.pformat(right)) + explanation = _compare_eq_dict(left, right, verbose) elif op == 'not in': if istext(left) and istext(right): - explanation = _notin_text(left, right) - except py.builtin._sysex: - raise - except: + explanation = _notin_text(left, right, verbose) + except Exception: excinfo = py.code.ExceptionInfo() - explanation = ['(pytest_assertion plugin: representation of ' - 'details failed. Probably an object has a faulty __repr__.)', - str(excinfo) - ] - + explanation = [ + u('(pytest_assertion plugin: representation of details failed. ' + 'Probably an object has a faulty __repr__.)'), + u(excinfo)] if not explanation: return None - # Don't include pageloads of data, should be configurable - if len(''.join(explanation)) > 80*8: - explanation = ['Detailed information too verbose, truncated'] - return [summary] + explanation -def _diff_text(left, right): - """Return the explanation for the diff between text +def _diff_text(left, right, verbose=False): + """Return the explanation for the diff between text or bytes - This will skip leading and trailing characters which are - identical to keep the diff minimal. + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + + If the input are bytes they will be safely converted to text. """ explanation = [] - i = 0 # just in case left or right has zero length - for i in range(min(len(left), len(right))): - if left[i] != right[i]: - break - if i > 42: - i -= 10 # Provide some context - explanation = ['Skipping %s identical ' - 'leading characters in diff' % i] - left = left[i:] - right = right[i:] - if len(left) == len(right): - for i in range(len(left)): - if left[-i] != right[-i]: + if isinstance(left, py.builtin.bytes): + left = u(repr(left)[1:-1]).replace(r'\n', '\n') + if isinstance(right, py.builtin.bytes): + right = u(repr(right)[1:-1]).replace(r'\n', '\n') + if not verbose: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: break if i > 42: - i -= 10 # Provide some context - explanation += ['Skipping %s identical ' - 'trailing characters in diff' % i] - left = left[:-i] - right = right[:-i] + i -= 10 # Provide some context + explanation = [u('Skipping %s identical leading ' + 'characters in diff, use -v to show') % i] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [u('Skipping %s identical trailing ' + 'characters in diff, use -v to show') % i] + left = left[:-i] + right = right[:-i] explanation += [line.strip('\n') for line in py.std.difflib.ndiff(left.splitlines(), right.splitlines())] return explanation -def _compare_eq_sequence(left, right): +def _compare_eq_sequence(left, right, verbose=False): explanation = [] for i in range(min(len(left), len(right))): if left[i] != right[i]: - explanation += ['At index %s diff: %r != %r' % - (i, left[i], right[i])] + explanation += [u('At index %s diff: %r != %r') + % (i, left[i], right[i])] break if len(left) > len(right): - explanation += ['Left contains more items, ' - 'first extra item: %s' % py.io.saferepr(left[len(right)],)] + explanation += [u('Left contains more items, first extra item: %s') + % py.io.saferepr(left[len(right)],)] elif len(left) < len(right): - explanation += ['Right contains more items, ' - 'first extra item: %s' % py.io.saferepr(right[len(left)],)] - return explanation # + _diff_text(py.std.pprint.pformat(left), - # py.std.pprint.pformat(right)) + explanation += [ + u('Right contains more items, first extra item: %s') % + py.io.saferepr(right[len(left)],)] + return explanation # + _diff_text(py.std.pprint.pformat(left), + # py.std.pprint.pformat(right)) -def _compare_eq_set(left, right): +def _compare_eq_set(left, right, verbose=False): explanation = [] diff_left = left - right diff_right = right - left if diff_left: - explanation.append('Extra items in the left set:') + explanation.append(u('Extra items in the left set:')) for item in diff_left: explanation.append(py.io.saferepr(item)) if diff_right: - explanation.append('Extra items in the right set:') + explanation.append(u('Extra items in the right set:')) for item in diff_right: explanation.append(py.io.saferepr(item)) return explanation -def _notin_text(term, text): +def _compare_eq_dict(left, right, verbose=False): + explanation = [] + common = set(left).intersection(set(right)) + same = dict((k, left[k]) for k in common if left[k] == right[k]) + if same and not verbose: + explanation += [u('Omitting %s identical items, use -v to show') % + len(same)] + elif same: + explanation += [u('Common items:')] + explanation += py.std.pprint.pformat(same).splitlines() + diff = set(k for k in common if left[k] != right[k]) + if diff: + explanation += [u('Differing items:')] + for k in diff: + explanation += [py.io.saferepr({k: left[k]}) + ' != ' + + py.io.saferepr({k: right[k]})] + extra_left = set(left) - set(right) + if extra_left: + explanation.append(u('Left contains more items:')) + explanation.extend(py.std.pprint.pformat( + dict((k, left[k]) for k in extra_left)).splitlines()) + extra_right = set(right) - set(left) + if extra_right: + explanation.append(u('Right contains more items:')) + explanation.extend(py.std.pprint.pformat( + dict((k, right[k]) for k in extra_right)).splitlines()) + return explanation + + +def _notin_text(term, text, verbose=False): index = text.find(term) head = text[:index] tail = text[index+len(term):] correct_text = head + tail - diff = _diff_text(correct_text, text) - newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)] + diff = _diff_text(correct_text, text, verbose) + newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)] for line in diff: - if line.startswith('Skipping'): + if line.startswith(u('Skipping')): continue - if line.startswith('- '): + if line.startswith(u('- ')): continue - if line.startswith('+ '): - newdiff.append(' ' + line[2:]) + if line.startswith(u('+ ')): + newdiff.append(u(' ') + line[2:]) else: newdiff.append(line) return newdiff diff --git a/_pytest/capture.py b/_pytest/capture.py --- a/_pytest/capture.py +++ b/_pytest/capture.py @@ -1,43 +1,114 @@ -""" per-test stdout/stderr capturing mechanisms, ``capsys`` and ``capfd`` function arguments. """ +""" + per-test stdout/stderr capturing mechanisms, + ``capsys`` and ``capfd`` function arguments. +""" +# note: py.io capture was where copied from +# pylib 1.4.20.dev2 (rev 13d9af95547e) +import sys +import os +import tempfile -import pytest, py -import os +import py +import pytest + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO + +try: + from io import BytesIO +except ImportError: + class BytesIO(StringIO): + def write(self, data): + if isinstance(data, unicode): + raise TypeError("not a byte value: %r" % (data,)) + StringIO.write(self, data) + +if sys.version_info < (3, 0): + class TextIO(StringIO): + def write(self, data): + if not isinstance(data, unicode): + enc = getattr(self, '_encoding', 'UTF-8') + data = unicode(data, enc, 'replace') + StringIO.write(self, data) +else: + TextIO = StringIO + + +patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} + def pytest_addoption(parser): group = parser.getgroup("general") - group._addoption('--capture', action="store", default=None, - metavar="method", type="choice", choices=['fd', 'sys', 'no'], + group._addoption( + '--capture', action="store", default=None, + metavar="method", choices=['fd', 'sys', 'no'], help="per-test capturing method: one of fd (default)|sys|no.") - group._addoption('-s', action="store_const", const="no", dest="capture", + group._addoption( + '-s', action="store_const", const="no", dest="capture", help="shortcut for --capture=no.") + @pytest.mark.tryfirst -def pytest_cmdline_parse(pluginmanager, args): - # we want to perform capturing already for plugin/conftest loading - if '-s' in args or "--capture=no" in args: - method = "no" - elif hasattr(os, 'dup') and '--capture=sys' not in args: +def pytest_load_initial_conftests(early_config, parser, args, __multicall__): + ns = parser.parse_known_args(args) + method = ns.capture + if not method: method = "fd" - else: + if method == "fd" and not hasattr(os, "dup"): method = "sys" capman = CaptureManager(method) - pluginmanager.register(capman, "capturemanager") + early_config.pluginmanager.register(capman, "capturemanager") + + # make sure that capturemanager is properly reset at final shutdown + def teardown(): + try: + capman.reset_capturings() + except ValueError: + pass + + early_config.pluginmanager.add_shutdown(teardown) + + # make sure logging does not raise exceptions at the end + def silence_logging_at_shutdown(): + if "logging" in sys.modules: + sys.modules["logging"].raiseExceptions = False + early_config.pluginmanager.add_shutdown(silence_logging_at_shutdown) + + # finally trigger conftest loading but while capturing (issue93) + capman.resumecapture() + try: + try: + return __multicall__.execute() + finally: + out, err = capman.suspendcapture() + except: + sys.stdout.write(out) + sys.stderr.write(err) + raise + def addouterr(rep, outerr): for secname, content in zip(["out", "err"], outerr): if content: rep.sections.append(("Captured std%s" % secname, content)) + class NoCapture: def startall(self): pass + def resume(self): pass + def reset(self): pass + def suspend(self): return "", "" + class CaptureManager: def __init__(self, defaultmethod=None): self._method2capture = {} @@ -45,21 +116,23 @@ def _maketempfile(self): f = py.std.tempfile.TemporaryFile() - newf = py.io.dupfile(f, encoding="UTF-8") + newf = dupfile(f, encoding="UTF-8") f.close() return newf def _makestringio(self): - return py.io.TextIO() + return TextIO() def _getcapture(self, method): if method == "fd": - return py.io.StdCaptureFD(now=False, - out=self._maketempfile(), err=self._maketempfile() + return StdCaptureFD( + out=self._maketempfile(), + err=self._maketempfile(), ) elif method == "sys": - return py.io.StdCapture(now=False, - out=self._makestringio(), err=self._makestringio() + return StdCapture( + out=self._makestringio(), + err=self._makestringio(), ) elif method == "no": return NoCapture() @@ -74,23 +147,24 @@ method = config._conftest.rget("option_capture", path=fspath) except KeyError: method = "fd" - if method == "fd" and not hasattr(os, 'dup'): # e.g. jython + if method == "fd" and not hasattr(os, 'dup'): # e.g. jython method = "sys" return method def reset_capturings(self): - for name, cap in self._method2capture.items(): + for cap in self._method2capture.values(): cap.reset() def resumecapture_item(self, item): method = self._getmethod(item.config, item.fspath) if not hasattr(item, 'outerr'): - item.outerr = ('', '') # we accumulate outerr on the item + item.outerr = ('', '') # we accumulate outerr on the item return self.resumecapture(method) def resumecapture(self, method=None): if hasattr(self, '_capturing'): - raise ValueError("cannot resume, already capturing with %r" % + raise ValueError( + "cannot resume, already capturing with %r" % (self._capturing,)) if method is None: method = self._defaultmethod @@ -119,30 +193,29 @@ return "", "" def activate_funcargs(self, pyfuncitem): - if not hasattr(pyfuncitem, 'funcargs'): - return - assert not hasattr(self, '_capturing_funcargs') - self._capturing_funcargs = capturing_funcargs = [] - for name, capfuncarg in pyfuncitem.funcargs.items(): - if name in ('capsys', 'capfd'): - capturing_funcargs.append(capfuncarg) - capfuncarg._start() + funcargs = getattr(pyfuncitem, "funcargs", None) + if funcargs is not None: + for name, capfuncarg in funcargs.items(): + if name in ('capsys', 'capfd'): + assert not hasattr(self, '_capturing_funcarg') + self._capturing_funcarg = capfuncarg + capfuncarg._start() def deactivate_funcargs(self): - capturing_funcargs = getattr(self, '_capturing_funcargs', None) - if capturing_funcargs is not None: - while capturing_funcargs: - capfuncarg = capturing_funcargs.pop() - capfuncarg._finalize() - del self._capturing_funcargs + capturing_funcarg = getattr(self, '_capturing_funcarg', None) + if capturing_funcarg: + outerr = capturing_funcarg._finalize() + del self._capturing_funcarg + return outerr def pytest_make_collect_report(self, __multicall__, collector): method = self._getmethod(collector.config, collector.fspath) try: self.resumecapture(method) except ValueError: - return # recursive collect, XXX refactor capturing - # to allow for more lightweight recursive capturing + # recursive collect, XXX refactor capturing + # to allow for more lightweight recursive capturing + return try: rep = __multicall__.execute() finally: @@ -169,46 +242,371 @@ @pytest.mark.tryfirst def pytest_runtest_makereport(self, __multicall__, item, call): - self.deactivate_funcargs() + funcarg_outerr = self.deactivate_funcargs() rep = __multicall__.execute() outerr = self.suspendcapture(item) - if not rep.passed: - addouterr(rep, outerr) + if funcarg_outerr is not None: + outerr = (outerr[0] + funcarg_outerr[0], + outerr[1] + funcarg_outerr[1]) + addouterr(rep, outerr) if not rep.passed or rep.when == "teardown": outerr = ('', '') item.outerr = outerr return rep +error_capsysfderror = "cannot use capsys and capfd at the same time" + + def pytest_funcarg__capsys(request): """enables capturing of writes to sys.stdout/sys.stderr and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ - return CaptureFuncarg(py.io.StdCapture) + if "capfd" in request._funcargs: + raise request.raiseerror(error_capsysfderror) + return CaptureFixture(StdCapture) + def pytest_funcarg__capfd(request): """enables capturing of writes to file descriptors 1 and 2 and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ + if "capsys" in request._funcargs: + request.raiseerror(error_capsysfderror) if not hasattr(os, 'dup'): - py.test.skip("capfd funcarg needs os.dup") - return CaptureFuncarg(py.io.StdCaptureFD) + pytest.skip("capfd funcarg needs os.dup") + return CaptureFixture(StdCaptureFD) -class CaptureFuncarg: + +class CaptureFixture: def __init__(self, captureclass): - self.capture = captureclass(now=False) + self._capture = captureclass() def _start(self): - self.capture.startall() + self._capture.startall() def _finalize(self): - if hasattr(self, 'capture'): - self.capture.reset() - del self.capture + if hasattr(self, '_capture'): + outerr = self._outerr = self._capture.reset() + del self._capture + return outerr def readouterr(self): - return self.capture.readouterr() + try: + return self._capture.readouterr() + except AttributeError: + return self._outerr def close(self): self._finalize() + + +class FDCapture: + """ Capture IO to/from a given os-level filedescriptor. """ + + def __init__(self, targetfd, tmpfile=None, patchsys=False): + """ save targetfd descriptor, and open a new + temporary file there. If no tmpfile is + specified a tempfile.Tempfile() will be opened + in text mode. + """ + self.targetfd = targetfd + if tmpfile is None and targetfd != 0: + f = tempfile.TemporaryFile('wb+') + tmpfile = dupfile(f, encoding="UTF-8") + f.close() + self.tmpfile = tmpfile + self._savefd = os.dup(self.targetfd) + if patchsys: + self._oldsys = getattr(sys, patchsysdict[targetfd]) + + def start(self): + try: + os.fstat(self._savefd) + except OSError: + raise ValueError( + "saved filedescriptor not valid, " + "did you call start() twice?") + if self.targetfd == 0 and not self.tmpfile: + fd = os.open(os.devnull, os.O_RDONLY) + os.dup2(fd, 0) + os.close(fd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], DontReadFromInput()) + else: + os.dup2(self.tmpfile.fileno(), self.targetfd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self.tmpfile) + + def done(self): + """ unpatch and clean up, returns the self.tmpfile (file object) + """ + os.dup2(self._savefd, self.targetfd) + os.close(self._savefd) + if self.targetfd != 0: + self.tmpfile.seek(0) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self._oldsys) + return self.tmpfile + + def writeorg(self, data): + """ write a string to the original file descriptor + """ + tempfp = tempfile.TemporaryFile() + try: + os.dup2(self._savefd, tempfp.fileno()) + tempfp.write(data) + finally: + tempfp.close() + + +def dupfile(f, mode=None, buffering=0, raising=False, encoding=None): + """ return a new open file object that's a duplicate of f + + mode is duplicated if not given, 'buffering' controls + buffer size (defaulting to no buffering) and 'raising' + defines whether an exception is raised when an incompatible + file object is passed in (if raising is False, the file + object itself will be returned) + """ + try: + fd = f.fileno() + mode = mode or f.mode + except AttributeError: + if raising: + raise + return f + newfd = os.dup(fd) + if sys.version_info >= (3, 0): + if encoding is not None: + mode = mode.replace("b", "") + buffering = True + return os.fdopen(newfd, mode, buffering, encoding, closefd=True) + else: + f = os.fdopen(newfd, mode, buffering) + if encoding is not None: + return EncodedFile(f, encoding) + return f + + +class EncodedFile(object): + def __init__(self, _stream, encoding): + self._stream = _stream + self.encoding = encoding + + def write(self, obj): + if isinstance(obj, unicode): + obj = obj.encode(self.encoding) + self._stream.write(obj) + + def writelines(self, linelist): + data = ''.join(linelist) + self.write(data) + + def __getattr__(self, name): + return getattr(self._stream, name) + + +class Capture(object): + def reset(self): + """ reset sys.stdout/stderr and return captured output as strings. """ + if hasattr(self, '_reset'): + raise ValueError("was already reset") + self._reset = True + outfile, errfile = self.done(save=False) + out, err = "", "" + if outfile and not outfile.closed: + out = outfile.read() + outfile.close() + if errfile and errfile != outfile and not errfile.closed: + err = errfile.read() + errfile.close() + return out, err + + def suspend(self): + """ return current snapshot captures, memorize tempfiles. """ + outerr = self.readouterr() + outfile, errfile = self.done() + return outerr + + +class StdCaptureFD(Capture): + """ This class allows to capture writes to FD1 and FD2 + and may connect a NULL file to FD0 (and prevent + reads from sys.stdin). If any of the 0,1,2 file descriptors + is invalid it will not be captured. + """ + def __init__(self, out=True, err=True, in_=True, patchsys=True): + self._options = { + "out": out, + "err": err, + "in_": in_, + "patchsys": patchsys, + } + self._save() + + def _save(self): + in_ = self._options['in_'] + out = self._options['out'] + err = self._options['err'] + patchsys = self._options['patchsys'] + if in_: + try: + self.in_ = FDCapture( + 0, tmpfile=None, + patchsys=patchsys) + except OSError: + pass + if out: + tmpfile = None + if hasattr(out, 'write'): + tmpfile = out + try: + self.out = FDCapture( + 1, tmpfile=tmpfile, + patchsys=patchsys) + self._options['out'] = self.out.tmpfile + except OSError: + pass + if err: + if hasattr(err, 'write'): + tmpfile = err + else: + tmpfile = None + try: + self.err = FDCapture( + 2, tmpfile=tmpfile, + patchsys=patchsys) + self._options['err'] = self.err.tmpfile + except OSError: + pass + + def startall(self): + if hasattr(self, 'in_'): + self.in_.start() + if hasattr(self, 'out'): + self.out.start() + if hasattr(self, 'err'): + self.err.start() + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if hasattr(self, 'out') and not self.out.tmpfile.closed: + outfile = self.out.done() + if hasattr(self, 'err') and not self.err.tmpfile.closed: + errfile = self.err.done() + if hasattr(self, 'in_'): + self.in_.done() + if save: + self._save() + return outfile, errfile + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = self._readsnapshot('out') + err = self._readsnapshot('err') + return out, err + + def _readsnapshot(self, name): + if hasattr(self, name): + f = getattr(self, name).tmpfile + else: + return '' + + f.seek(0) + res = f.read() + enc = getattr(f, "encoding", None) + if enc: + res = py.builtin._totext(res, enc, "replace") + f.truncate(0) + f.seek(0) + return res + + +class StdCapture(Capture): + """ This class allows to capture writes to sys.stdout|stderr "in-memory" + and will raise errors on tries to read from sys.stdin. It only + modifies sys.stdout|stderr|stdin attributes and does not + touch underlying File Descriptors (use StdCaptureFD for that). + """ + def __init__(self, out=True, err=True, in_=True): + self._oldout = sys.stdout + self._olderr = sys.stderr + self._oldin = sys.stdin + if out and not hasattr(out, 'file'): + out = TextIO() + self.out = out + if err: + if not hasattr(err, 'write'): + err = TextIO() + self.err = err + self.in_ = in_ + + def startall(self): + if self.out: + sys.stdout = self.out + if self.err: + sys.stderr = self.err + if self.in_: + sys.stdin = self.in_ = DontReadFromInput() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if self.out and not self.out.closed: + sys.stdout = self._oldout + outfile = self.out + outfile.seek(0) + if self.err and not self.err.closed: + sys.stderr = self._olderr + errfile = self.err + errfile.seek(0) + if self.in_: + sys.stdin = self._oldin + return outfile, errfile + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = err = "" + if self.out: + out = self.out.getvalue() + self.out.truncate(0) + self.out.seek(0) + if self.err: + err = self.err.getvalue() + self.err.truncate(0) + self.err.seek(0) + return out, err + + +class DontReadFromInput: + """Temporary stub class. Ideally when stdin is accessed, the + capturing should be turned off, with possibly all data captured + so far sent to the screen. This should be configurable, though, + because in automated test runs it is better to crash than + hang indefinitely. + """ + def read(self, *args): + raise IOError("reading from stdin while output is captured") + readline = read + readlines = read + __iter__ = read + + def fileno(self): + raise ValueError("redirected Stdin is pseudofile, has no fileno()") + + def isatty(self): + return False + + def close(self): + pass diff --git a/_pytest/config.py b/_pytest/config.py --- a/_pytest/config.py +++ b/_pytest/config.py @@ -1,25 +1,91 @@ """ command line options, ini-file and conftest.py processing. """ import py +# DON't import pytest here because it causes import cycle troubles import sys, os +from _pytest import hookspec # the extension point definitions from _pytest.core import PluginManager -import pytest -def pytest_cmdline_parse(pluginmanager, args): - config = Config(pluginmanager) - config.parse(args) - return config +# pytest startup -def pytest_unconfigure(config): - while 1: - try: - fin = config._cleanup.pop() - except IndexError: - break - fin() +def main(args=None, plugins=None): + """ return exit code, after performing an in-process test run. + + :arg args: list of command line arguments. + + :arg plugins: list of plugin objects to be auto-registered during + initialization. + """ + config = _prepareconfig(args, plugins) + return config.hook.pytest_cmdline_main(config=config) + +class cmdline: # compatibility namespace + main = staticmethod(main) + +class UsageError(Exception): + """ error in pytest usage or invocation""" + +_preinit = [] + +default_plugins = ( + "mark main terminal runner python pdb unittest capture skipping " + "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript " + "junitxml resultlog doctest").split() + +def _preloadplugins(): + assert not _preinit + _preinit.append(get_plugin_manager()) + +def get_plugin_manager(): + if _preinit: + return _preinit.pop(0) + # subsequent calls to main will create a fresh instance + pluginmanager = PytestPluginManager() + pluginmanager.config = Config(pluginmanager) # XXX attr needed? + for spec in default_plugins: + pluginmanager.import_plugin(spec) + return pluginmanager + +def _prepareconfig(args=None, plugins=None): + if args is None: + args = sys.argv[1:] + elif isinstance(args, py.path.local): + args = [str(args)] + elif not isinstance(args, (tuple, list)): + if not isinstance(args, str): + raise ValueError("not a string or argument list: %r" % (args,)) + args = py.std.shlex.split(args) + pluginmanager = get_plugin_manager() + if plugins: + for plugin in plugins: + pluginmanager.register(plugin) + return pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args) + +class PytestPluginManager(PluginManager): + def __init__(self, hookspecs=[hookspec]): + super(PytestPluginManager, self).__init__(hookspecs=hookspecs) + self.register(self) + if os.environ.get('PYTEST_DEBUG'): + err = sys.stderr + encoding = getattr(err, 'encoding', 'utf8') + try: + err = py.io.dupfile(err, encoding=encoding) + except Exception: + pass + self.trace.root.setwriter(err.write) + + def pytest_configure(self, config): + config.addinivalue_line("markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible.") + config.addinivalue_line("markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible.") + class Parser: - """ Parser for command line arguments. """ + """ Parser for command line arguments and ini-file values. """ def __init__(self, usage=None, processopt=None): self._anonymous = OptionGroup("custom options", parser=self) @@ -35,15 +101,17 @@ if option.dest: self._processopt(option) - def addnote(self, note): - self._notes.append(note) - def getgroup(self, name, description="", after=None): """ get (or create) a named option Group. - :name: unique name of the option group. + :name: name of the option group. :description: long description for --help output. :after: name of other group, used for ordering --help output. + + The returned group object has an ``addoption`` method with the same + signature as :py:func:`parser.addoption + <_pytest.config.Parser.addoption>` but will be shown in the + respective group in the output of ``pytest. --help``. """ for group in self._groups: if group.name == name: @@ -57,33 +125,222 @@ return group def addoption(self, *opts, **attrs): - """ add an optparse-style option. """ + """ register a command line option. + + :opts: option names, can be short or long options. + :attrs: same attributes which the ``add_option()`` function of the + `argparse library + `_ + accepts. + + After command line parsing options are available on the pytest config + object via ``config.option.NAME`` where ``NAME`` is usually set + by passing a ``dest`` attribute, for example + ``addoption("--long", dest="NAME", ...)``. + """ self._anonymous.addoption(*opts, **attrs) def parse(self, args): - self.optparser = optparser = MyOptionParser(self) + from _pytest._argcomplete import try_argcomplete + self.optparser = self._getparser() + try_argcomplete(self.optparser) + return self.optparser.parse_args([str(x) for x in args]) + + def _getparser(self): + from _pytest._argcomplete import filescompleter + optparser = MyOptionParser(self) groups = self._groups + [self._anonymous] for group in groups: if group.options: desc = group.description or group.name - optgroup = py.std.optparse.OptionGroup(optparser, desc) - optgroup.add_options(group.options) - optparser.add_option_group(optgroup) - return self.optparser.parse_args([str(x) for x in args]) + arggroup = optparser.add_argument_group(desc) + for option in group.options: + n = option.names() + a = option.attrs() + arggroup.add_argument(*n, **a) + # bash like autocompletion for dirs (appending '/') + optparser.add_argument(FILE_OR_DIR, nargs='*' + ).completer=filescompleter + return optparser def parse_setoption(self, args, option): - parsedoption, args = self.parse(args) + parsedoption = self.parse(args) for name, value in parsedoption.__dict__.items(): setattr(option, name, value) - return args + return getattr(parsedoption, FILE_OR_DIR) + + def parse_known_args(self, args): + optparser = self._getparser() + args = [str(x) for x in args] + return optparser.parse_known_args(args)[0] def addini(self, name, help, type=None, default=None): - """ add an ini-file option with the given name and description. """ + """ register an ini-file option. + + :name: name of the ini-variable + :type: type of the variable, can be ``pathlist``, ``args`` or ``linelist``. + :default: default value if no ini-file option exists but is queried. + + The value of ini-variables can be retrieved via a call to + :py:func:`config.getini(name) <_pytest.config.Config.getini>`. + """ assert type in (None, "pathlist", "args", "linelist") self._inidict[name] = (help, type, default) self._ininames.append(name) +class ArgumentError(Exception): + """ + Raised if an Argument instance is created with invalid or + inconsistent arguments. + """ + + def __init__(self, msg, option): + self.msg = msg + self.option_id = str(option) + + def __str__(self): + if self.option_id: + return "option %s: %s" % (self.option_id, self.msg) + else: + return self.msg + + +class Argument: + """class that mimics the necessary behaviour of py.std.optparse.Option """ + _typ_map = { + 'int': int, + 'string': str, + } + # enable after some grace period for plugin writers + TYPE_WARN = False + + def __init__(self, *names, **attrs): + """store parms in private vars for use in add_argument""" + self._attrs = attrs + self._short_opts = [] + self._long_opts = [] + self.dest = attrs.get('dest') + if self.TYPE_WARN: + try: + help = attrs['help'] + if '%default' in help: + py.std.warnings.warn( + 'pytest now uses argparse. "%default" should be' + ' changed to "%(default)s" ', + FutureWarning, + stacklevel=3) + except KeyError: + pass + try: + typ = attrs['type'] + except KeyError: + pass + else: + # this might raise a keyerror as well, don't want to catch that + if isinstance(typ, py.builtin._basestring): + if typ == 'choice': + if self.TYPE_WARN: + py.std.warnings.warn( + 'type argument to addoption() is a string %r.' + ' For parsearg this is optional and when supplied ' + ' should be a type.' + ' (options: %s)' % (typ, names), + FutureWarning, + stacklevel=3) + # argparse expects a type here take it from + # the type of the first element + attrs['type'] = type(attrs['choices'][0]) + else: + if self.TYPE_WARN: + py.std.warnings.warn( + 'type argument to addoption() is a string %r.' + ' For parsearg this should be a type.' + ' (options: %s)' % (typ, names), + FutureWarning, + stacklevel=3) + attrs['type'] = Argument._typ_map[typ] + # used in test_parseopt -> test_parse_defaultgetter + self.type = attrs['type'] + else: + self.type = typ + try: + # attribute existence is tested in Config._processopt + self.default = attrs['default'] + except KeyError: + pass + self._set_opt_strings(names) + if not self.dest: + if self._long_opts: + self.dest = self._long_opts[0][2:].replace('-', '_') + else: + try: + self.dest = self._short_opts[0][1:] + except IndexError: + raise ArgumentError( + 'need a long or short option', self) + + def names(self): + return self._short_opts + self._long_opts + + def attrs(self): + # update any attributes set by processopt + attrs = 'default dest help'.split() + if self.dest: + attrs.append(self.dest) + for attr in attrs: + try: + self._attrs[attr] = getattr(self, attr) + except AttributeError: + pass + if self._attrs.get('help'): + a = self._attrs['help'] + a = a.replace('%default', '%(default)s') + #a = a.replace('%prog', '%(prog)s') + self._attrs['help'] = a + return self._attrs + + def _set_opt_strings(self, opts): + """directly from optparse + + might not be necessary as this is passed to argparse later on""" + for opt in opts: + if len(opt) < 2: + raise ArgumentError( + "invalid option string %r: " + "must be at least two characters long" % opt, self) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise ArgumentError( + "invalid short option string %r: " + "must be of the form -x, (x any non-dash char)" % opt, + self) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise ArgumentError( + "invalid long option string %r: " + "must start with --, followed by non-dash" % opt, + self) + self._long_opts.append(opt) + + def __repr__(self): + retval = 'Argument(' + if self._short_opts: + retval += '_short_opts: ' + repr(self._short_opts) + ', ' + if self._long_opts: + retval += '_long_opts: ' + repr(self._long_opts) + ', ' + retval += 'dest: ' + repr(self.dest) + ', ' + if hasattr(self, 'type'): + retval += 'type: ' + repr(self.type) + ', ' + if hasattr(self, 'default'): + retval += 'default: ' + repr(self.default) + ', ' + if retval[-2:] == ', ': # always long enough to test ("Argument(" ) + retval = retval[:-2] + retval += ')' + return retval + + class OptionGroup: def __init__(self, name, description="", parser=None): self.name = name @@ -92,12 +349,18 @@ self.parser = parser def addoption(self, *optnames, **attrs): - """ add an option to this group. """ - option = py.std.optparse.Option(*optnames, **attrs) + """ add an option to this group. + + if a shortened version of a long option is specified it will From noreply at buildbot.pypy.org Tue Aug 19 10:42:55 2014 From: noreply at buildbot.pypy.org (groggi) Date: Tue, 19 Aug 2014 10:42:55 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: check for array size in tests Message-ID: <20140819084255.764771C0250@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72901:3c4167af9ea2 Date: 2014-08-19 10:15 +0200 http://bitbucket.org/pypy/pypy/changeset/3c4167af9ea2/ Log: check for array size in tests diff --git a/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py b/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py @@ -130,6 +130,7 @@ p1 = getarrayitem_gc(ConstPtr(ptr_array_gcref), 0, descr=ptr_array_descr) i0 = getfield_gc(p1, descr=pinned_obj_my_int_descr) """) + assert len(self.gc_ll_descr.last_moving_obj_tracker._indexes) == 1 def test_simple_getfield_twice(self): self.check_rewrite(""" @@ -145,3 +146,4 @@ p2 = getarrayitem_gc(ConstPtr(ptr_array_gcref), 1, descr=ptr_array_descr) i2 = getfield_gc(p2, descr=pinned_obj_my_int_descr) """) + assert len(self.gc_ll_descr.last_moving_obj_tracker._indexes) == 2 From noreply at buildbot.pypy.org Tue Aug 19 10:42:56 2014 From: noreply at buildbot.pypy.org (groggi) Date: Tue, 19 Aug 2014 10:42:56 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: add test with pinned and not pinned objects Message-ID: <20140819084256.963C41C0250@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72902:21551a1f7e19 Date: 2014-08-19 10:40 +0200 http://bitbucket.org/pypy/pypy/changeset/21551a1f7e19/ Log: add test with pinned and not pinned objects diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -10,6 +10,7 @@ from rpython.rtyper.lltypesystem import lltype from rpython.rlib.jit import JitDriver, dont_look_inside from rpython.rlib.jit import elidable, unroll_safe +from rpython.rlib.jit import promote from rpython.jit.backend.llsupport.gc import GcLLDescr_framework from rpython.tool.udir import udir from rpython.config.translationoption import DEFL_GC @@ -780,7 +781,6 @@ self.run('compile_framework_call_assembler') def define_pinned_simple(cls): - from rpython.rlib.jit import promote class H: inst = None helper = H() @@ -808,7 +808,6 @@ self.run('pinned_simple') def define_pinned_unpin(cls): - from rpython.rlib.jit import promote class H: inst = None pinned = False @@ -852,3 +851,49 @@ def test_pinned_unpin(self): self.run('pinned_unpin') + def define_multiple_pinned(cls): + class H: + inst1 = None + inst2 = None + inst3 = None + initialised = False + helper = H() + + @dont_look_inside + def get_instances(): + if not helper.initialised: + helper.inst1 = X() + helper.inst1.x = 101 + check(rgc.pin(helper.inst1)) + # + helper.inst2 = X() + helper.inst2.x = 102 + # + helper.inst3 = X() + helper.inst3.x = 103 + check(rgc.pin(helper.inst3)) + # + helper.initialised = True + # + check(rgc._is_pinned(helper.inst1)) + check(not rgc._is_pinned(helper.inst2)) + check(rgc._is_pinned(helper.inst3)) + return (helper.inst1, helper.inst2, helper.inst3) + + def fn(n, x, *args): + inst1, inst2, inst3 = get_instances() + promote(inst1) + promote(inst2) + promote(inst3) + # + check(inst1.x == 101) + check(inst2.x == 102) + check(inst3.x == 103) + # + n -= 1 + return (n, x) + args + + return None, fn, None + + def test_multiple_pinned(self): + self.run('multiple_pinned') From noreply at buildbot.pypy.org Tue Aug 19 13:08:25 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 19 Aug 2014 13:08:25 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: remove most should_break_transaction guards; also add rstm.is_inevitable() Message-ID: <20140819110825.A1D111D36A6@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r72903:1a671d185d5f Date: 2014-08-19 13:08 +0200 http://bitbucket.org/pypy/pypy/changeset/1a671d185d5f/ Log: remove most should_break_transaction guards; also add rstm.is_inevitable() diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -152,7 +152,7 @@ ec.bytecode_only_trace(self) else: ec.bytecode_trace(self) - rstm.possible_transaction_break() + rstm.possible_transaction_break(0) next_instr = r_uint(self.last_instr) opcode = ord(co_code[next_instr]) next_instr += 1 diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -85,7 +85,7 @@ ec.bytecode_trace(self, decr_by) jumpto = r_uint(self.last_instr) if self.space.threadlocals.threads_running: # quasi-immutable field - rstm.possible_transaction_break() + rstm.possible_transaction_break(1) # pypyjitdriver.can_enter_jit(frame=self, ec=ec, next_instr=jumpto, pycode=self.getcode(), diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -908,10 +908,10 @@ return False - @arguments(returns="i") - def bhimpl_stm_should_break_transaction(): + @arguments("i", returns="i") + def bhimpl_stm_should_break_transaction(keep): from rpython.rlib import rstm - return rstm.should_break_transaction() + return rstm.should_break_transaction(0) @arguments() def bhimpl_stm_hint_commit_soon(): diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -187,14 +187,34 @@ # ------------------------------ - @arguments() - def opimpl_stm_should_break_transaction(self): - # XXX make it return BoxInt(1) instead of BoxInt(0) if there - # is an inevitable transaction, because it's likely that there - # will always be an inevitable transaction here - resbox = history.BoxInt(0) - mi = self.metainterp - mi.history.record(rop.STM_SHOULD_BREAK_TRANSACTION, [], resbox) + @arguments("int") + def opimpl_stm_should_break_transaction(self, keep): + # from rpython.rlib import rstm + + record_break = False + resbox = history.ConstInt(0) + + if bool(keep): + # always keep (i.c. end of loops) + resbox = history.BoxInt(0) + record_break = True + + ## XXX: not working yet. we are always inevitable when tracing + # if we_are_translated() and rstm.is_inevitable(): + # # return BoxInt(1) if there is an inevitable + # # transaction, because it's likely that there + # # will always be an inevitable transaction here + # resbox = history.BoxInt(1) + # record_break = True + + if record_break: + mi = self.metainterp + mi.history.record(rop.STM_SHOULD_BREAK_TRANSACTION, [], resbox) + else: + # don't record the should_break_transaction and optimize + # the guard away + pass + return resbox @arguments() diff --git a/rpython/jit/metainterp/test/test_stm.py b/rpython/jit/metainterp/test/test_stm.py --- a/rpython/jit/metainterp/test/test_stm.py +++ b/rpython/jit/metainterp/test/test_stm.py @@ -11,7 +11,7 @@ class STMTests: def test_simple(self): def g(): - return rstm.should_break_transaction() + return rstm.should_break_transaction(1) res = self.interp_operations(g, [], translationoptions={"stm":True}) assert res == False self.check_operations_history(stm_should_break_transaction=1) diff --git a/rpython/jit/tl/tlc.py b/rpython/jit/tl/tlc.py --- a/rpython/jit/tl/tlc.py +++ b/rpython/jit/tl/tlc.py @@ -15,12 +15,12 @@ def int_o(self): raise TypeError def to_string(self): raise TypeError - + def add(self, other): raise TypeError def sub(self, other): raise TypeError def mul(self, other): raise TypeError def div(self, other): raise TypeError - + def eq(self, other): raise TypeError def lt(self, other): raise TypeError @@ -92,7 +92,7 @@ self.methods = {} for methname, pc in descr.methods: self.methods[methname] = pc - + class InstanceObj(Obj): def __init__(self, cls): @@ -226,7 +226,7 @@ self.pc = pc self.stack = [] - + def make_interp(supports_call, jitted=True): myjitdriver = JitDriver(greens = ['pc', 'code'], reds = ['frame', 'pool']) @@ -234,7 +234,7 @@ def interp(code='', pc=0, inputarg=0, pool=None): if not isinstance(code,str): raise TypeError("code '%s' should be a string" % str(code)) - + if pool is None: pool = ConstantPool() args = [IntObj(inputarg)] @@ -255,7 +255,7 @@ if opcode == NOP: pass - + elif opcode == NIL: stack.append(nil) @@ -268,7 +268,7 @@ elif opcode == CDR: stack.append(stack.pop().cdr()) - + elif opcode == PUSH: stack.append(IntObj(char2int(code[pc]))) pc += 1 @@ -349,32 +349,32 @@ pc += char2int(code[pc]) pc += 1 if jitted and old_pc > pc: - rstm.possible_transaction_break() + rstm.possible_transaction_break(1) myjitdriver.can_enter_jit(code=code, pc=pc, frame=frame, pool=pool) - + elif opcode == BR_COND: cond = stack.pop() if cond.t(): old_pc = pc pc += char2int(code[pc]) + 1 if jitted and old_pc > pc: - rstm.possible_transaction_break() + rstm.possible_transaction_break(1) myjitdriver.can_enter_jit(code=code, pc=pc, frame=frame, pool=pool) else: pc += 1 - + elif opcode == BR_COND_STK: offset = stack.pop().int_o() if stack.pop().t(): old_pc = pc pc += offset if jitted and old_pc > pc: - rstm.possible_transaction_break() + rstm.possible_transaction_break(1) myjitdriver.can_enter_jit(code=code, pc=pc, frame=frame, pool=pool) - + elif supports_call and opcode == CALL: offset = char2int(code[pc]) @@ -451,7 +451,7 @@ return frame.stack[-1] else: return None - + return interp, interp_eval diff --git a/rpython/rlib/rstm.py b/rpython/rlib/rstm.py --- a/rpython/rlib/rstm.py +++ b/rpython/rlib/rstm.py @@ -49,9 +49,13 @@ # special-cased below: the emitted operation must be placed # directly in the caller's graph -def possible_transaction_break(): + at specialize.arg(0) +def possible_transaction_break(keep): + """ keep: should be True for checks that are absolutely + needed. False means the JIT only keeps the check if it + thinks that it helps """ if stm_is_enabled(): - if llop.stm_should_break_transaction(lltype.Bool): + if llop.stm_should_break_transaction(lltype.Bool, keep): break_transaction() def hint_commit_soon(): @@ -70,9 +74,10 @@ def partial_commit_and_resume_other_threads(): pass # for now -def should_break_transaction(): + at specialize.arg(0) +def should_break_transaction(keep): return we_are_translated() and ( - llop.stm_should_break_transaction(lltype.Bool)) + llop.stm_should_break_transaction(lltype.Bool, keep)) @dont_look_inside def break_transaction(): @@ -95,6 +100,10 @@ return llop.stm_get_atomic(lltype.Signed) @dont_look_inside +def is_inevitable(): + return llop.stm_is_inevitable(lltype.Signed) + + at dont_look_inside def abort_and_retry(): llop.stm_abort_and_retry(lltype.Void) diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -956,6 +956,7 @@ op_stm_enter_callback_call = _stm_not_implemented op_stm_leave_callback_call = _stm_not_implemented op_stm_get_atomic = _stm_not_implemented + op_stm_is_inevitable = _stm_not_implemented op_stm_change_atomic = _stm_not_implemented op_stm_set_transaction_length = _stm_not_implemented op_stm_hash = _stm_not_implemented @@ -970,7 +971,7 @@ op_stm_stop_all_other_threads = _stm_not_implemented op_stm_partial_commit_and_resume_other_threads = _stm_not_implemented - def op_stm_should_break_transaction(self): + def op_stm_should_break_transaction(self, keep): return False def op_threadlocalref_set(self, key, value): diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -445,6 +445,8 @@ 'stm_decrement_atomic': LLOp(), 'stm_get_atomic': LLOp(sideeffects=False), + 'stm_is_inevitable': LLOp(sideeffects=False), + 'stm_ignored_start': LLOp(canrun=True), 'stm_ignored_stop': LLOp(canrun=True), diff --git a/rpython/translator/stm/funcgen.py b/rpython/translator/stm/funcgen.py --- a/rpython/translator/stm/funcgen.py +++ b/rpython/translator/stm/funcgen.py @@ -190,6 +190,10 @@ result = funcgen.expr(op.result) return '%s = pypy_stm_get_atomic();' % (result,) +def stm_is_inevitable(funcgen, op): + result = funcgen.expr(op.result) + return '%s = stm_is_inevitable();' % (result,) + def stm_abort_and_retry(funcgen, op): return 'stm_abort_transaction();' From noreply at buildbot.pypy.org Tue Aug 19 13:16:24 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 19 Aug 2014 13:16:24 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: add is_atomic to atomic.py and transaction.py Message-ID: <20140819111624.BA9631C1482@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r72904:0e51cd5f2023 Date: 2014-08-19 13:16 +0200 http://bitbucket.org/pypy/pypy/changeset/0e51cd5f2023/ Log: add is_atomic to atomic.py and transaction.py diff --git a/lib_pypy/atomic.py b/lib_pypy/atomic.py --- a/lib_pypy/atomic.py +++ b/lib_pypy/atomic.py @@ -5,7 +5,8 @@ try: from __pypy__ import thread as _thread - from __pypy__.thread import atomic, getsegmentlimit, hint_commit_soon + from __pypy__.thread import (atomic, getsegmentlimit, + hint_commit_soon, is_atomic) except ImportError: # Not a STM-enabled PyPy. We can still provide a version of 'atomic' # that is good enough for our purposes. With this limited version, @@ -22,6 +23,10 @@ def hint_commit_soon(): pass + def is_atomic(): + return atomic.locked() + + else: import re, sys, linecache diff --git a/lib_pypy/transaction.py b/lib_pypy/transaction.py --- a/lib_pypy/transaction.py +++ b/lib_pypy/transaction.py @@ -15,13 +15,15 @@ import sys, thread, collections, cStringIO, linecache try: - from __pypy__.thread import atomic + from __pypy__.thread import atomic, is_atomic except ImportError: # Not a STM-enabled PyPy. We can use a regular lock for 'atomic', # which is good enough for our purposes. With this limited version, # an atomic block in thread X will not prevent running thread Y, if # thread Y is not within an atomic block at all. atomic = thread.allocate_lock() + def is_atomic(): + return atomic.locked() try: from __pypy__.thread import signals_enabled From noreply at buildbot.pypy.org Tue Aug 19 13:26:39 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 19 Aug 2014 13:26:39 +0200 (CEST) Subject: [pypy-commit] benchmarks default: removing weird use of atomic in quick_sort.py (actually faster now without it) Message-ID: <20140819112639.989851C1482@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r274:0ff2a5c67f95 Date: 2014-08-19 13:26 +0200 http://bitbucket.org/pypy/benchmarks/changeset/0ff2a5c67f95/ Log: removing weird use of atomic in quick_sort.py (actually faster now without it) diff --git a/multithread/quick_sort/quick_sort.py b/multithread/quick_sort/quick_sort.py --- a/multithread/quick_sort/quick_sort.py +++ b/multithread/quick_sort/quick_sort.py @@ -8,6 +8,7 @@ atomic, Future, set_thread_pool, ThreadPool, hint_commit_soon, print_abort_info) + import itertools from collections import deque @@ -50,59 +51,52 @@ l = l0 r = l + n - 1 while l <= r: - with atomic: - xl = xs[l] - if xl < pivot: - l += 1 - continue - xr = xs[r] - if xr > pivot: - r -= 1 - continue - xs[l], xs[r] = xr, xl + xl = xs[l] + if xl < pivot: l += 1 + continue + xr = xs[r] + if xr > pivot: r -= 1 + continue + xs[l], xs[r] = xr, xl + l += 1 + r -= 1 fs = [] - # only start futures on a single level: - do_futures = level == 4 + do_futures = level >= 4 largs = (xs, l0, r - l0 + 1, level+1) rargs = (xs, l, l0 + n - l, level+1) leftf, rightf = False, False if do_futures: - if largs[2] > 2000: + if largs[2] > 1500: fs.append(Future(qsort_f, *largs)) leftf = True - if rargs[2] > 2000: + if rargs[2] > 1500: fs.append(Future(qsort_f, *rargs)) rightf = True if not leftf: - if level >= 4 and largs[2] < 500: - with atomic: - fs.extend(qsort_f(*largs)) - else: - fs.extend(qsort_f(*largs)) + fs.extend(qsort_f(*largs)) if not rightf: - if level >= 4 and rargs[2] < 500: - with atomic: - fs.extend(qsort_f(*rargs)) - else: - fs.extend(qsort_f(*rargs)) + fs.extend(qsort_f(*rargs)) #print_abort_info(0.0000001) return fs def wait_for_futures(fs): + c = 0 while fs: f = fs.pop() fs.extend(f()) + c += 1 + print "Futures:", c -def run(threads=2, n=20000): +def run(threads=2, n=60000): threads = int(threads) n = int(n) @@ -110,11 +104,10 @@ to_sort = range(n) t = 0 - for i in range(20): - with atomic: - random.seed(i) - random.shuffle(to_sort) - s = deque(to_sort) + for i in range(5): + random.seed(i+32) + random.shuffle(to_sort) + s = deque(to_sort) # qsort(s, 0, len(s)) t -= time.time() From noreply at buildbot.pypy.org Tue Aug 19 13:26:41 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 19 Aug 2014 13:26:41 +0200 (CEST) Subject: [pypy-commit] benchmarks default: miscellaneous changes Message-ID: <20140819112641.0B7BB1C1482@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r275:c45103668651 Date: 2014-08-19 13:26 +0200 http://bitbucket.org/pypy/benchmarks/changeset/c45103668651/ Log: miscellaneous changes diff --git a/multithread/bench.py b/multithread/bench.py --- a/multithread/bench.py +++ b/multithread/bench.py @@ -86,6 +86,7 @@ finally: if not args.q: print "== times ==\n", "\n".join(map(str, times)) + print "== times short ==\n", str(times[-min(5, len(times)):]) print "== reported results ==\n", "\n".join( map(str, filter(None, results))) diff --git a/multithread/common/abstract_threading.py b/multithread/common/abstract_threading.py --- a/multithread/common/abstract_threading.py +++ b/multithread/common/abstract_threading.py @@ -4,7 +4,7 @@ try: from atomic import (atomic, getsegmentlimit, print_abort_info, - hint_commit_soon) + hint_commit_soon, is_atomic) except: atomic = RLock() def getsegmentlimit(): @@ -13,6 +13,8 @@ pass def hint_commit_soon(): pass + def is_atomic(): + return atomic._RLock__count > 0 class TLQueue_concurrent(object): diff --git a/multithread/raytrace/raytrace.py b/multithread/raytrace/raytrace.py --- a/multithread/raytrace/raytrace.py +++ b/multithread/raytrace/raytrace.py @@ -7,15 +7,15 @@ print_abort_info, hint_commit_soon) import time -import platform -if platform.python_implementation() == "Jython": - # be fair to jython and don't use a lock where none is required: - class fakeatomic: - def __enter__(self): - pass - def __exit__(self,*args): - pass - atomic = fakeatomic() +#import platform +#if platform.python_implementation() == "Jython": +# # be fair to jython and don't use a lock where none is required: +# class fakeatomic: +# def __enter__(self): +# pass +# def __exit__(self,*args): +# pass +# atomic = fakeatomic() AMBIENT = 0.1 From noreply at buildbot.pypy.org Tue Aug 19 13:51:56 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 19 Aug 2014 13:51:56 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: found a new TODO which would allow pypy-stm to outperform pypy on quick_sort.py Message-ID: <20140819115156.2CD421C1486@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r72905:29fff73e1c07 Date: 2014-08-19 13:51 +0200 http://bitbucket.org/pypy/pypy/changeset/29fff73e1c07/ Log: found a new TODO which would allow pypy-stm to outperform pypy on quick_sort.py diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -28,6 +28,14 @@ ------------------------------------------------------------ +stm_read() should also be optimized (moved out of loops). E.g. +quick_sort.py suffers a 30% slowdown because it uses deque.locate() +extensively. This method happens to have a *very* tight loop +where the read barrier has a big negative effect and could +actually be moved out. + +------------------------------------------------------------ + __pypy__.thread.getsegmentlimit(): XXX This limit is so far a compile time option (STM_NB_SEGMENTS in From noreply at buildbot.pypy.org Tue Aug 19 15:14:49 2014 From: noreply at buildbot.pypy.org (groggi) Date: Tue, 19 Aug 2014 15:14:49 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: failing test: malloc object of type A, pin it, do GC collect and try to malloc object of type B. Message-ID: <20140819131449.A2B171C148A@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72906:20eefbef7004 Date: 2014-08-19 15:13 +0200 http://bitbucket.org/pypy/pypy/changeset/20eefbef7004/ Log: failing test: malloc object of type A, pin it, do GC collect and try to malloc object of type B. diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -4,11 +4,15 @@ from test_direct import BaseDirectGCTest S = lltype.GcForwardReference() -S.become(lltype.GcStruct('pinning_test_struct', +S.become(lltype.GcStruct('pinning_test_struct1', ('someInt', lltype.Signed), ('next', lltype.Ptr(S)), ('data', lltype.Ptr(S)))) +T = lltype.GcForwardReference() +T.become(lltype.GcStruct('pinning_test_struct2', + ('someInt', lltype.Signed))) + class PinningGCTest(BaseDirectGCTest): def test_pin_can_move(self): @@ -545,6 +549,21 @@ self.pin_shadow_1(self.gc.collect) + def test_malloc_different_types(self): + # scenario: malloc two objects of different type and pin them. Do a + # minor and major collection in between. This test showed a bug that was + # present in a previous implementation of pinning. + obj1 = self.malloc(S) + self.stackroots.append(obj1) + assert self.gc.pin(llmemory.cast_ptr_to_adr(obj1)) + # + self.gc.collect() + # + obj2 = self.malloc(T) + self.stackroots.append(obj2) + assert self.gc.pin(llmemory.cast_ptr_to_adr(obj2)) + + def pin_shadow_2(self, collect_func): ptr = self.malloc(S) adr = llmemory.cast_ptr_to_adr(ptr) From noreply at buildbot.pypy.org Tue Aug 19 16:35:46 2014 From: noreply at buildbot.pypy.org (groggi) Date: Tue, 19 Aug 2014 16:35:46 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: fix for tests 'test_malloc_different_types' and 'test_tagged_id'. fijal solved it. Message-ID: <20140819143546.E46681C1482@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72907:887cbc99da9b Date: 2014-08-19 16:00 +0200 http://bitbucket.org/pypy/pypy/changeset/887cbc99da9b/ Log: fix for tests 'test_malloc_different_types' and 'test_tagged_id'. fijal solved it. diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -617,7 +617,7 @@ # Get the memory from the nursery. If there is not enough space # there, do a collect first. result = self.nursery_free - self.nursery_free = result + totalsize + self.nursery_free = result + rawtotalsize if self.nursery_free > self.nursery_top: result = self.collect_and_reserve(result, totalsize) # From noreply at buildbot.pypy.org Tue Aug 19 16:35:48 2014 From: noreply at buildbot.pypy.org (groggi) Date: Tue, 19 Aug 2014 16:35:48 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: don't allow objects to be pinned that can contain GC pointers Message-ID: <20140819143548.3CBD41C1482@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72908:e87564c16230 Date: 2014-08-19 16:24 +0200 http://bitbucket.org/pypy/pypy/changeset/e87564c16230/ Log: don't allow objects to be pinned that can contain GC pointers diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -988,6 +988,12 @@ # to check if can_move(obj) already returns True in which # case a call to pin() is unnecessary. return False + if self.has_gcptr(self.get_type_id(obj)): + # objects containing GC pointers can't be pinned. If we would add + # it, we would have to track all pinned objects and trace them + # every minor collection to make sure the referenced object are + # kept alive. + return False if self._is_pinned(obj): # Already pinned, we do not allow to pin it again. # Reason: It would be possible that the first caller unpins diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -3,29 +3,29 @@ from rpython.memory.gc.incminimark import IncrementalMiniMarkGC, WORD from test_direct import BaseDirectGCTest +T = lltype.GcForwardReference() +T.become(lltype.GcStruct('pinning_test_struct2', + ('someInt', lltype.Signed))) + S = lltype.GcForwardReference() S.become(lltype.GcStruct('pinning_test_struct1', ('someInt', lltype.Signed), - ('next', lltype.Ptr(S)), - ('data', lltype.Ptr(S)))) - -T = lltype.GcForwardReference() -T.become(lltype.GcStruct('pinning_test_struct2', - ('someInt', lltype.Signed))) + ('next', lltype.Ptr(T)), + ('data', lltype.Ptr(T)))) class PinningGCTest(BaseDirectGCTest): def test_pin_can_move(self): # even a pinned object is considered to be movable. Only the caller # of pin() knows if it is currently movable or not. - ptr = self.malloc(S) + ptr = self.malloc(T) adr = llmemory.cast_ptr_to_adr(ptr) assert self.gc.can_move(adr) assert self.gc.pin(adr) assert self.gc.can_move(adr) def test_pin_twice(self): - ptr = self.malloc(S) + ptr = self.malloc(T) adr = llmemory.cast_ptr_to_adr(ptr) assert self.gc.pin(adr) assert not self.gc.pin(adr) @@ -37,7 +37,7 @@ self.gc.unpin, llmemory.cast_ptr_to_adr(ptr)) def test__is_pinned(self): - ptr = self.malloc(S) + ptr = self.malloc(T) adr = llmemory.cast_ptr_to_adr(ptr) assert not self.gc._is_pinned(adr) assert self.gc.pin(adr) @@ -46,7 +46,7 @@ assert not self.gc._is_pinned(adr) def test_prebuilt_not_pinnable(self): - ptr = lltype.malloc(S, immortal=True) + ptr = lltype.malloc(T, immortal=True) self.consider_constant(ptr) assert not self.gc.pin(llmemory.cast_ptr_to_adr(ptr)) self.gc.collect() @@ -59,6 +59,13 @@ from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass from rpython.memory.gc.incminimark import STATE_SCANNING + def test_try_pin_gcref_containing_type(self): + # scenario: incminimark's object pinning can't pin objects that may + # contain GC pointers + obj = self.malloc(S) + assert not self.gc.pin(llmemory.cast_ptr_to_adr(obj)) + + def test_pin_old(self): # scenario: try pinning an old object. This should be not possible and # we want to make sure everything stays as it is. @@ -78,11 +85,11 @@ def pin_pin_pinned_object_count(self, collect_func): # scenario: pin two objects that are referenced from stackroots. Check # if the pinned objects count is correct, even after an other collection - pinned1_ptr = self.malloc(S) + pinned1_ptr = self.malloc(T) pinned1_ptr.someInt = 100 self.stackroots.append(pinned1_ptr) # - pinned2_ptr = self.malloc(S) + pinned2_ptr = self.malloc(T) pinned2_ptr.someInt = 200 self.stackroots.append(pinned2_ptr) # @@ -105,7 +112,7 @@ def pin_unpin_pinned_object_count(self, collect_func): # scenario: pin an object and check the pinned object count. Unpin it # and check the count again. - pinned_ptr = self.malloc(S) + pinned_ptr = self.malloc(T) pinned_ptr.someInt = 100 self.stackroots.append(pinned_ptr) pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr) @@ -131,7 +138,7 @@ # scenario: a pinned object that is part of the stack roots. Check if # it is not moved # - ptr = self.malloc(S) + ptr = self.malloc(T) ptr.someInt = 100 self.stackroots.append(ptr) assert self.stackroots[0] == ptr # validate our assumption @@ -162,7 +169,7 @@ # that we do stepwise major collection and check in each step for # a correct state # - ptr = self.malloc(S) + ptr = self.malloc(T) ptr.someInt = 100 self.stackroots.append(ptr) assert self.stackroots[0] == ptr # validate our assumption @@ -199,7 +206,7 @@ # scenario: test if the pinned object is moved after being unpinned. # the second part of the scenario is the tested one. The first part # is already tests by other tests. - ptr = self.malloc(S) + ptr = self.malloc(T) ptr.someInt = 100 self.stackroots.append(ptr) assert self.stackroots[0] == ptr # validate our assumption @@ -246,7 +253,7 @@ assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old_ptr)) # # create young pinned one and let the old one reference the young one - pinned_ptr = self.malloc(S) + pinned_ptr = self.malloc(T) pinned_ptr.someInt = 100 self.write(old_ptr, 'next', pinned_ptr) pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr) @@ -283,7 +290,7 @@ assert not self.gc.is_in_nursery(old_adr) # # create young pinned one and let the old one reference the young one - pinned_ptr = self.malloc(S) + pinned_ptr = self.malloc(T) pinned_ptr.someInt = 100 self.write(old_ptr, 'next', pinned_ptr) pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr) @@ -329,7 +336,7 @@ collect_func() # make it old old_ptr = self.stackroots[0] # - pinned_ptr = self.malloc(S) + pinned_ptr = self.malloc(T) pinned_ptr.someInt = 100 self.write(old_ptr, 'next', pinned_ptr) pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr) @@ -342,7 +349,7 @@ assert self.gc.is_in_nursery(pinned_adr) assert self.gc._is_pinned(pinned_adr) # remove the reference - self.write(old_ptr, 'next', lltype.nullptr(S)) + self.write(old_ptr, 'next', lltype.nullptr(T)) # from now on the pinned object is dead. Do a collection and make sure # old object still there and the pinned one is gone. collect_func() @@ -377,7 +384,7 @@ collect_func() old_ptr = self.stackroots[0] # - pinned_ptr = self.malloc(S) + pinned_ptr = self.malloc(T) pinned_ptr.someInt = 100 self.write(old_ptr, 'next', pinned_ptr) assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr)) @@ -424,7 +431,7 @@ self.stackroots.append(root_ptr) assert self.stackroots[0] == root_ptr # validate assumption # - pinned_ptr = self.malloc(S) + pinned_ptr = self.malloc(T) pinned_ptr.someInt = 100 self.write(root_ptr, 'next', pinned_ptr) pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr) @@ -467,7 +474,7 @@ prebuilt_adr = llmemory.cast_ptr_to_adr(prebuilt_ptr) collect_func() # - pinned_ptr = self.malloc(S) + pinned_ptr = self.malloc(T) pinned_ptr.someInt = 100 self.write(prebuilt_ptr, 'next', pinned_ptr) pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr) @@ -506,7 +513,7 @@ old2_ptr.someInt = 800 self.stackroots.append(old2_ptr) - pinned_ptr = self.malloc(S) + pinned_ptr = self.malloc(T) pinned_ptr.someInt = 100 assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr)) @@ -528,7 +535,7 @@ def pin_shadow_1(self, collect_func): - ptr = self.malloc(S) + ptr = self.malloc(T) adr = llmemory.cast_ptr_to_adr(ptr) self.stackroots.append(ptr) ptr.someInt = 100 @@ -553,7 +560,7 @@ # scenario: malloc two objects of different type and pin them. Do a # minor and major collection in between. This test showed a bug that was # present in a previous implementation of pinning. - obj1 = self.malloc(S) + obj1 = self.malloc(T) self.stackroots.append(obj1) assert self.gc.pin(llmemory.cast_ptr_to_adr(obj1)) # @@ -565,7 +572,7 @@ def pin_shadow_2(self, collect_func): - ptr = self.malloc(S) + ptr = self.malloc(T) adr = llmemory.cast_ptr_to_adr(ptr) self.stackroots.append(ptr) ptr.someInt = 100 @@ -587,19 +594,19 @@ def test_pin_nursery_top_scenario1(self): - ptr1 = self.malloc(S) + ptr1 = self.malloc(T) adr1 = llmemory.cast_ptr_to_adr(ptr1) ptr1.someInt = 101 self.stackroots.append(ptr1) assert self.gc.pin(adr1) - ptr2 = self.malloc(S) + ptr2 = self.malloc(T) adr2 = llmemory.cast_ptr_to_adr(ptr2) ptr2.someInt = 102 self.stackroots.append(ptr2) assert self.gc.pin(adr2) - ptr3 = self.malloc(S) + ptr3 = self.malloc(T) adr3 = llmemory.cast_ptr_to_adr(ptr3) ptr3.someInt = 103 self.stackroots.append(ptr3) @@ -625,19 +632,19 @@ def test_pin_nursery_top_scenario2(self): - ptr1 = self.malloc(S) + ptr1 = self.malloc(T) adr1 = llmemory.cast_ptr_to_adr(ptr1) ptr1.someInt = 101 self.stackroots.append(ptr1) assert self.gc.pin(adr1) - ptr2 = self.malloc(S) + ptr2 = self.malloc(T) adr2 = llmemory.cast_ptr_to_adr(ptr2) ptr2.someInt = 102 self.stackroots.append(ptr2) assert self.gc.pin(adr2) - ptr3 = self.malloc(S) + ptr3 = self.malloc(T) adr3 = llmemory.cast_ptr_to_adr(ptr3) ptr3.someInt = 103 self.stackroots.append(ptr3) @@ -665,19 +672,19 @@ def test_pin_nursery_top_scenario3(self): - ptr1 = self.malloc(S) + ptr1 = self.malloc(T) adr1 = llmemory.cast_ptr_to_adr(ptr1) ptr1.someInt = 101 self.stackroots.append(ptr1) assert self.gc.pin(adr1) - ptr2 = self.malloc(S) + ptr2 = self.malloc(T) adr2 = llmemory.cast_ptr_to_adr(ptr2) ptr2.someInt = 102 self.stackroots.append(ptr2) assert self.gc.pin(adr2) - ptr3 = self.malloc(S) + ptr3 = self.malloc(T) adr3 = llmemory.cast_ptr_to_adr(ptr3) ptr3.someInt = 103 self.stackroots.append(ptr3) @@ -707,19 +714,19 @@ def test_pin_nursery_top_scenario4(self): - ptr1 = self.malloc(S) + ptr1 = self.malloc(T) adr1 = llmemory.cast_ptr_to_adr(ptr1) ptr1.someInt = 101 self.stackroots.append(ptr1) assert self.gc.pin(adr1) - ptr2 = self.malloc(S) + ptr2 = self.malloc(T) adr2 = llmemory.cast_ptr_to_adr(ptr2) ptr2.someInt = 102 self.stackroots.append(ptr2) assert self.gc.pin(adr2) - ptr3 = self.malloc(S) + ptr3 = self.malloc(T) adr3 = llmemory.cast_ptr_to_adr(ptr3) ptr3.someInt = 103 self.stackroots.append(ptr3) @@ -750,19 +757,19 @@ def test_pin_nursery_top_scenario5(self): - ptr1 = self.malloc(S) + ptr1 = self.malloc(T) adr1 = llmemory.cast_ptr_to_adr(ptr1) ptr1.someInt = 101 self.stackroots.append(ptr1) assert self.gc.pin(adr1) - ptr2 = self.malloc(S) + ptr2 = self.malloc(T) adr2 = llmemory.cast_ptr_to_adr(ptr2) ptr2.someInt = 102 self.stackroots.append(ptr2) assert self.gc.pin(adr2) - ptr3 = self.malloc(S) + ptr3 = self.malloc(T) adr3 = llmemory.cast_ptr_to_adr(ptr3) ptr3.someInt = 103 self.stackroots.append(ptr3) @@ -811,12 +818,12 @@ def fill_nursery_with_pinned_objects(self): - typeid = self.get_type_id(S) + typeid = self.get_type_id(T) size = self.gc.fixed_size(typeid) + self.gc.gcheaderbuilder.size_gc_header raw_size = llmemory.raw_malloc_usage(size) object_mallocs = self.gc.nursery_size // raw_size for instance_nr in xrange(object_mallocs): - ptr = self.malloc(S) + ptr = self.malloc(T) adr = llmemory.cast_ptr_to_adr(ptr) ptr.someInt = 100 + instance_nr self.stackroots.append(ptr) @@ -824,9 +831,9 @@ def test_full_pinned_nursery_pin_fail(self): self.fill_nursery_with_pinned_objects() - # nursery should be full now, at least no space for another `S`. + # nursery should be full now, at least no space for another `T`. # Next malloc should fail. - py.test.raises(Exception, self.malloc, S) + py.test.raises(Exception, self.malloc, T) def test_full_pinned_nursery_arena_reset(self): # there were some bugs regarding the 'arena_reset()' calls at @@ -836,21 +843,21 @@ def test_pinning_limit(self): for instance_nr in xrange(self.gc.max_number_of_pinned_objects): - ptr = self.malloc(S) + ptr = self.malloc(T) adr = llmemory.cast_ptr_to_adr(ptr) ptr.someInt = 100 + instance_nr self.stackroots.append(ptr) self.gc.pin(adr) # # now we reached the maximum amount of pinned objects - ptr = self.malloc(S) + ptr = self.malloc(T) adr = llmemory.cast_ptr_to_adr(ptr) self.stackroots.append(ptr) assert not self.gc.pin(adr) test_pinning_limit.GC_PARAMS = {'max_number_of_pinned_objects': 5} def test_full_pinned_nursery_pin_fail(self): - typeid = self.get_type_id(S) + typeid = self.get_type_id(T) size = self.gc.fixed_size(typeid) + self.gc.gcheaderbuilder.size_gc_header raw_size = llmemory.raw_malloc_usage(size) object_mallocs = self.gc.nursery_size // raw_size @@ -858,15 +865,15 @@ # but rather the case of a nursery full with pinned objects. assert object_mallocs < self.gc.max_number_of_pinned_objects for instance_nr in xrange(object_mallocs): - ptr = self.malloc(S) + ptr = self.malloc(T) adr = llmemory.cast_ptr_to_adr(ptr) ptr.someInt = 100 + instance_nr self.stackroots.append(ptr) self.gc.pin(adr) # - # nursery should be full now, at least no space for another `S`. + # nursery should be full now, at least no space for another `T`. # Next malloc should fail. - py.test.raises(Exception, self.malloc, S) + py.test.raises(Exception, self.malloc, T) test_full_pinned_nursery_pin_fail.GC_PARAMS = \ {'max_number_of_pinned_objects': 50} From noreply at buildbot.pypy.org Tue Aug 19 17:48:53 2014 From: noreply at buildbot.pypy.org (groggi) Date: Tue, 19 Aug 2014 17:48:53 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: fix test to use class without gc pointers. Message-ID: <20140819154853.B8D721C1482@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72910:ea91bbc58941 Date: 2014-08-19 17:47 +0200 http://bitbucket.org/pypy/pypy/changeset/ea91bbc58941/ Log: fix test to use class without gc pointers. As of changeset e87564c1623075847f63d55586c837db7f188f4c objects with gc pointers can't be pinned. diff --git a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py --- a/rpython/jit/backend/llsupport/test/zrpy_gc_test.py +++ b/rpython/jit/backend/llsupport/test/zrpy_gc_test.py @@ -23,6 +23,12 @@ next = None +class Y(object): + # for pinning tests we need an object without references to other + # objects + def __init__(self, x=0): + self.x = x + class CheckError(Exception): pass @@ -788,7 +794,7 @@ @dont_look_inside def get_y(): if not helper.inst: - helper.inst = X() + helper.inst = Y() helper.inst.x = 101 check(rgc.pin(helper.inst)) else: @@ -818,7 +824,7 @@ @dont_look_inside def get_y(n): if not helper.inst: - helper.inst = X() + helper.inst = Y() helper.inst.x = 101 helper.pinned = True check(rgc.pin(helper.inst)) @@ -862,14 +868,14 @@ @dont_look_inside def get_instances(): if not helper.initialised: - helper.inst1 = X() + helper.inst1 = Y() helper.inst1.x = 101 check(rgc.pin(helper.inst1)) # - helper.inst2 = X() + helper.inst2 = Y() helper.inst2.x = 102 # - helper.inst3 = X() + helper.inst3 = Y() helper.inst3.x = 103 check(rgc.pin(helper.inst3)) # From noreply at buildbot.pypy.org Tue Aug 19 19:05:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 19 Aug 2014 19:05:23 +0200 (CEST) Subject: [pypy-commit] stmgc default: Add stm_call_on_commit(), for implementing free() of raw memory Message-ID: <20140819170523.7553C1C14FF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1319:1d2c771f29c4 Date: 2014-08-19 17:29 +0200 http://bitbucket.org/pypy/stmgc/changeset/1d2c771f29c4/ Log: Add stm_call_on_commit(), for implementing free() of raw memory from non-inevitable transactions. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -375,7 +375,8 @@ assert(list_is_empty(STM_PSEGMENT->young_weakrefs)); assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery)); assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows)); - assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_abort)); + assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[0])); + assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[1])); assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL); assert(STM_PSEGMENT->large_overflow_objects == NULL); #ifndef NDEBUG @@ -850,7 +851,7 @@ STM_PSEGMENT->overflow_number_has_been_used = false; } - clear_callbacks_on_abort(); + invoke_and_clear_user_callbacks(0); /* for commit */ /* send what is hopefully the correct signals */ if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { @@ -1044,7 +1045,7 @@ memset(tl->mem_clear_on_abort, 0, tl->mem_bytes_to_clear_on_abort); /* invoke the callbacks */ - invoke_and_clear_callbacks_on_abort(); + invoke_and_clear_user_callbacks(1); /* for abort */ int attribute_to = STM_TIME_RUN_ABORTED_OTHER; @@ -1101,7 +1102,7 @@ wait_for_end_of_inevitable_transaction(NULL); STM_PSEGMENT->transaction_state = TS_INEVITABLE; stm_rewind_jmp_forget(STM_SEGMENT->running_thread); - clear_callbacks_on_abort(); + invoke_and_clear_user_callbacks(0); /* for commit */ } else { assert(STM_PSEGMENT->transaction_state == TS_INEVITABLE); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -132,8 +132,9 @@ weakrefs never point to young objects and never contain NULL. */ struct list_s *old_weakrefs; - /* Tree of 'key->callback' associations from stm_call_on_abort() */ - struct tree_s *callbacks_on_abort; + /* Tree of 'key->callback' associations from stm_call_on_commit() + and stm_call_on_abort() (respectively, array items 0 and 1) */ + struct tree_s *callbacks_on_commit_and_abort[2]; /* Start time: to know approximately for how long a transaction has been running, in contention management */ diff --git a/c7/stm/extra.c b/c7/stm/extra.c --- a/c7/stm/extra.c +++ b/c7/stm/extra.c @@ -3,55 +3,76 @@ #endif -void stm_call_on_abort(stm_thread_local_t *tl, - void *key, void callback(void *)) +static bool register_callbacks(stm_thread_local_t *tl, + void *key, void callback(void *), long index) { if (!_stm_in_transaction(tl)) { /* check that the current thread-local is really running a transaction, and do nothing otherwise. */ - return; + return false; } if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { /* ignore callbacks if we're in an inevitable transaction (which cannot abort) */ - return; + return false; } + struct tree_s *callbacks; + callbacks = STM_PSEGMENT->callbacks_on_commit_and_abort[index]; + if (callback == NULL) { /* ignore the return value: unregistered keys can be "deleted" again */ - tree_delete_item(STM_PSEGMENT->callbacks_on_abort, (uintptr_t)key); + tree_delete_item(callbacks, (uintptr_t)key); } else { /* double-registering the same key will crash */ - tree_insert(STM_PSEGMENT->callbacks_on_abort, - (uintptr_t)key, (uintptr_t)callback); + tree_insert(callbacks, (uintptr_t)key, (uintptr_t)callback); + } + return true; +} + +void stm_call_on_commit(stm_thread_local_t *tl, + void *key, void callback(void *)) +{ + if (!register_callbacks(tl, key, callback, 0)) { + /* no regular transaction running, invoke the callback + immediately */ + callback(key); } } -static void clear_callbacks_on_abort(void) +void stm_call_on_abort(stm_thread_local_t *tl, + void *key, void callback(void *)) { - if (!tree_is_cleared(STM_PSEGMENT->callbacks_on_abort)) - tree_clear(STM_PSEGMENT->callbacks_on_abort); + register_callbacks(tl, key, callback, 1); } -static void invoke_and_clear_callbacks_on_abort(void) +static void invoke_and_clear_user_callbacks(long index) { - wlog_t *item; - struct tree_s *callbacks = STM_PSEGMENT->callbacks_on_abort; + struct tree_s *callbacks; + + /* clear the callbacks that we don't want to invoke at all */ + callbacks = STM_PSEGMENT->callbacks_on_commit_and_abort[1 - index]; + if (!tree_is_cleared(callbacks)) + tree_clear(callbacks); + + /* invoke the callbacks from the other group */ + callbacks = STM_PSEGMENT->callbacks_on_commit_and_abort[index]; if (tree_is_cleared(callbacks)) return; - STM_PSEGMENT->callbacks_on_abort = tree_create(); + STM_PSEGMENT->callbacks_on_commit_and_abort[index] = tree_create(); + wlog_t *item; TREE_LOOP_FORWARD(*callbacks, item) { void *key = (void *)item->addr; void (*callback)(void *) = (void(*)(void *))item->val; assert(key != NULL); assert(callback != NULL); - /* The callback may call stm_call_on_abort(key, NULL). It is - ignored, because 'callbacks_on_abort' was cleared already. */ + /* The callback may call stm_call_on_abort(key, NULL). It is ignored, + because 'callbacks_on_commit_and_abort' was cleared already. */ callback(key); } TREE_LOOP_END; diff --git a/c7/stm/extra.h b/c7/stm/extra.h --- a/c7/stm/extra.h +++ b/c7/stm/extra.h @@ -1,3 +1,3 @@ -static void clear_callbacks_on_abort(void); -static void invoke_and_clear_callbacks_on_abort(void); +static void invoke_and_clear_user_callbacks(long index); +/* 0 = for commit, 1 = for abort */ diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -126,7 +126,8 @@ pr->old_weakrefs = list_create(); pr->young_outside_nursery = tree_create(); pr->nursery_objects_shadows = tree_create(); - pr->callbacks_on_abort = tree_create(); + pr->callbacks_on_commit_and_abort[0] = tree_create(); + pr->callbacks_on_commit_and_abort[1] = tree_create(); pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i; highest_overflow_number = pr->overflow_number; pr->pub.transaction_read_version = 0xff; @@ -166,7 +167,8 @@ list_free(pr->old_weakrefs); tree_free(pr->young_outside_nursery); tree_free(pr->nursery_objects_shadows); - tree_free(pr->callbacks_on_abort); + tree_free(pr->callbacks_on_commit_and_abort[0]); + tree_free(pr->callbacks_on_commit_and_abort[1]); } munmap(stm_object_pages, TOTAL_MEMORY); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -416,6 +416,12 @@ Note: 'key' must be aligned to a multiple of 8 bytes. */ void stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *)); +/* If the current transaction commits later, invoke 'callback(key)'. If + the current transaction aborts, then the callback is forgotten. Same + restrictions as stm_call_on_abort(). If the transaction is or becomes + inevitable, 'callback(key)' is called immediately. */ +void stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *)); + /* Similar to stm_become_inevitable(), but additionally suspend all other threads. A very heavy-handed way to make sure that no other diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -111,6 +111,7 @@ int stm_can_move(object_t *); void stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *)); +void stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *)); #define STM_TIME_OUTSIDE_TRANSACTION ... #define STM_TIME_RUN_CURRENT ... diff --git a/c7/test/test_extra.py b/c7/test/test_extra.py --- a/c7/test/test_extra.py +++ b/c7/test/test_extra.py @@ -81,6 +81,85 @@ self.abort_transaction() assert seen == [] + def test_call_on_commit(self): + p0 = ffi_new_aligned("aaa") + p1 = ffi_new_aligned("hello") + p2 = ffi_new_aligned("removed") + p3 = ffi_new_aligned("world") + # + @ffi.callback("void(void *)") + def clear_me(p): + p = ffi.cast("char *", p) + p[0] = chr(ord(p[0]) + 1) + # + self.start_transaction() + lib.stm_call_on_commit(self.get_stm_thread_local(), p0, clear_me) + # the registered callbacks are not called on abort + self.abort_transaction() + assert ffi.string(p0) == "aaa" + # + self.start_transaction() + lib.stm_call_on_commit(self.get_stm_thread_local(), p1, clear_me) + lib.stm_call_on_commit(self.get_stm_thread_local(), p2, clear_me) + lib.stm_call_on_commit(self.get_stm_thread_local(), p3, clear_me) + lib.stm_call_on_commit(self.get_stm_thread_local(), p2, ffi.NULL) + assert ffi.string(p0) == "aaa" + assert ffi.string(p1) == "hello" + assert ffi.string(p2) == "removed" + assert ffi.string(p3) == "world" + self.commit_transaction() + # + assert ffi.string(p0) == "aaa" + assert ffi.string(p1) == "iello" + assert ffi.string(p2) == "removed" + assert ffi.string(p3) == "xorld" + + def test_call_on_commit_immediately_if_inevitable(self): + p0 = ffi_new_aligned("aaa") + self.start_transaction() + self.become_inevitable() + # + @ffi.callback("void(void *)") + def clear_me(p): + p = ffi.cast("char *", p) + p[0] = chr(ord(p[0]) + 1) + # + lib.stm_call_on_commit(self.get_stm_thread_local(), p0, clear_me) + assert ffi.string(p0) == "baa" + self.commit_transaction() + assert ffi.string(p0) == "baa" + + def test_call_on_commit_as_soon_as_inevitable(self): + p0 = ffi_new_aligned("aaa") + self.start_transaction() + # + @ffi.callback("void(void *)") + def clear_me(p): + p = ffi.cast("char *", p) + p[0] = chr(ord(p[0]) + 1) + # + lib.stm_call_on_commit(self.get_stm_thread_local(), p0, clear_me) + assert ffi.string(p0) == "aaa" + self.become_inevitable() + assert ffi.string(p0) == "baa" + self.commit_transaction() + assert ffi.string(p0) == "baa" + + def test_call_on_commit_immediately_if_outside_transaction(self): + p0 = ffi_new_aligned("aaa") + # + @ffi.callback("void(void *)") + def clear_me(p): + p = ffi.cast("char *", p) + p[0] = chr(ord(p[0]) + 1) + # + lib.stm_call_on_commit(self.get_stm_thread_local(), p0, clear_me) + assert ffi.string(p0) == "baa" + self.start_transaction() + assert ffi.string(p0) == "baa" + self.commit_transaction() + assert ffi.string(p0) == "baa" + def test_stm_become_globally_unique_transaction(self): self.start_transaction() # From noreply at buildbot.pypy.org Tue Aug 19 19:05:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 19 Aug 2014 19:05:24 +0200 (CEST) Subject: [pypy-commit] stmgc default: Add a return value to stm_call_on_xxx() to know if a call with NULL Message-ID: <20140819170524.AC4661C14FF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1320:bea13491352f Date: 2014-08-19 18:52 +0200 http://bitbucket.org/pypy/stmgc/changeset/bea13491352f/ Log: Add a return value to stm_call_on_xxx() to know if a call with NULL really cancelled something or not. diff --git a/c7/stm/extra.c b/c7/stm/extra.c --- a/c7/stm/extra.c +++ b/c7/stm/extra.c @@ -3,50 +3,51 @@ #endif -static bool register_callbacks(stm_thread_local_t *tl, +static long register_callbacks(stm_thread_local_t *tl, void *key, void callback(void *), long index) { if (!_stm_in_transaction(tl)) { /* check that the current thread-local is really running a transaction, and do nothing otherwise. */ - return false; + return -1; } if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { /* ignore callbacks if we're in an inevitable transaction (which cannot abort) */ - return false; + return -1; } struct tree_s *callbacks; callbacks = STM_PSEGMENT->callbacks_on_commit_and_abort[index]; if (callback == NULL) { - /* ignore the return value: unregistered keys can be - "deleted" again */ - tree_delete_item(callbacks, (uintptr_t)key); + /* double-unregistering works, but return 0 */ + return tree_delete_item(callbacks, (uintptr_t)key); } else { /* double-registering the same key will crash */ tree_insert(callbacks, (uintptr_t)key, (uintptr_t)callback); + return 1; } - return true; } -void stm_call_on_commit(stm_thread_local_t *tl, +long stm_call_on_commit(stm_thread_local_t *tl, void *key, void callback(void *)) { - if (!register_callbacks(tl, key, callback, 0)) { + long result = register_callbacks(tl, key, callback, 0); + if (result < 0 && callback != NULL) { /* no regular transaction running, invoke the callback immediately */ callback(key); } + return result; } -void stm_call_on_abort(stm_thread_local_t *tl, +long stm_call_on_abort(stm_thread_local_t *tl, void *key, void callback(void *)) { - register_callbacks(tl, key, callback, 1); + return register_callbacks(tl, key, callback, 1); } static void invoke_and_clear_user_callbacks(long index) diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -412,15 +412,16 @@ /* If the current transaction aborts later, invoke 'callback(key)'. If the current transaction commits, then the callback is forgotten. You can only register one callback per key. You can call - 'stm_call_on_abort(key, NULL)' to cancel an existing callback. + 'stm_call_on_abort(key, NULL)' to cancel an existing callback + (returns 0 if there was no existing callback to cancel). Note: 'key' must be aligned to a multiple of 8 bytes. */ -void stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *)); +long stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *)); /* If the current transaction commits later, invoke 'callback(key)'. If the current transaction aborts, then the callback is forgotten. Same restrictions as stm_call_on_abort(). If the transaction is or becomes inevitable, 'callback(key)' is called immediately. */ -void stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *)); +long stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *)); /* Similar to stm_become_inevitable(), but additionally suspend all diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -109,9 +109,9 @@ long stm_id(object_t *obj); void stm_set_prebuilt_identityhash(object_t *obj, uint64_t hash); -int stm_can_move(object_t *); -void stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *)); -void stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *)); +long stm_can_move(object_t *); +long stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *)); +long stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *)); #define STM_TIME_OUTSIDE_TRANSACTION ... #define STM_TIME_RUN_CURRENT ... diff --git a/c7/test/test_extra.py b/c7/test/test_extra.py --- a/c7/test/test_extra.py +++ b/c7/test/test_extra.py @@ -32,6 +32,7 @@ p1 = ffi_new_aligned("hello") p2 = ffi_new_aligned("removed") p3 = ffi_new_aligned("world") + p4 = ffi_new_aligned("00") # @ffi.callback("void(void *)") def clear_me(p): @@ -39,17 +40,26 @@ p[0] = chr(ord(p[0]) + 1) # self.start_transaction() - lib.stm_call_on_abort(self.get_stm_thread_local(), p0, clear_me) + x = lib.stm_call_on_abort(self.get_stm_thread_local(), p0, clear_me) + assert x != 0 # the registered callbacks are removed on # successful commit self.commit_transaction() assert ffi.string(p0) == "aaa" # self.start_transaction() - lib.stm_call_on_abort(self.get_stm_thread_local(), p1, clear_me) - lib.stm_call_on_abort(self.get_stm_thread_local(), p2, clear_me) - lib.stm_call_on_abort(self.get_stm_thread_local(), p3, clear_me) - lib.stm_call_on_abort(self.get_stm_thread_local(), p2, ffi.NULL) + x = lib.stm_call_on_abort(self.get_stm_thread_local(), p1, clear_me) + assert x != 0 + x = lib.stm_call_on_abort(self.get_stm_thread_local(), p2, clear_me) + assert x != 0 + x = lib.stm_call_on_abort(self.get_stm_thread_local(), p3, clear_me) + assert x != 0 + x = lib.stm_call_on_abort(self.get_stm_thread_local(), p2, ffi.NULL) + assert x != 0 + x = lib.stm_call_on_abort(self.get_stm_thread_local(), p2, ffi.NULL) + assert x == 0 + x = lib.stm_call_on_abort(self.get_stm_thread_local(), p4, ffi.NULL) + assert x == 0 assert ffi.string(p0) == "aaa" assert ffi.string(p1) == "hello" assert ffi.string(p2) == "removed" @@ -68,6 +78,7 @@ assert ffi.string(p1) == "iello" assert ffi.string(p2) == "removed" assert ffi.string(p3) == "xorld" + assert ffi.string(p4) == "00" def test_ignores_if_outside_transaction(self): @ffi.callback("void(void *)") @@ -76,7 +87,8 @@ # seen = [] p0 = ffi_new_aligned("aaa") - lib.stm_call_on_abort(self.get_stm_thread_local(), p0, dont_see_me) + x = lib.stm_call_on_abort(self.get_stm_thread_local(), p0, dont_see_me) + assert x != 0 self.start_transaction() self.abort_transaction() assert seen == [] @@ -86,6 +98,7 @@ p1 = ffi_new_aligned("hello") p2 = ffi_new_aligned("removed") p3 = ffi_new_aligned("world") + p4 = ffi_new_aligned("00") # @ffi.callback("void(void *)") def clear_me(p): @@ -93,16 +106,25 @@ p[0] = chr(ord(p[0]) + 1) # self.start_transaction() - lib.stm_call_on_commit(self.get_stm_thread_local(), p0, clear_me) + x = lib.stm_call_on_commit(self.get_stm_thread_local(), p0, clear_me) + assert x != 0 # the registered callbacks are not called on abort self.abort_transaction() assert ffi.string(p0) == "aaa" # self.start_transaction() - lib.stm_call_on_commit(self.get_stm_thread_local(), p1, clear_me) - lib.stm_call_on_commit(self.get_stm_thread_local(), p2, clear_me) - lib.stm_call_on_commit(self.get_stm_thread_local(), p3, clear_me) - lib.stm_call_on_commit(self.get_stm_thread_local(), p2, ffi.NULL) + x = lib.stm_call_on_commit(self.get_stm_thread_local(), p1, clear_me) + assert x != 0 + x = lib.stm_call_on_commit(self.get_stm_thread_local(), p2, clear_me) + assert x != 0 + x = lib.stm_call_on_commit(self.get_stm_thread_local(), p3, clear_me) + assert x != 0 + x = lib.stm_call_on_commit(self.get_stm_thread_local(), p2, ffi.NULL) + assert x != 0 + x = lib.stm_call_on_commit(self.get_stm_thread_local(), p2, ffi.NULL) + assert x == 0 + x = lib.stm_call_on_commit(self.get_stm_thread_local(), p4, ffi.NULL) + assert x == 0 assert ffi.string(p0) == "aaa" assert ffi.string(p1) == "hello" assert ffi.string(p2) == "removed" @@ -113,6 +135,7 @@ assert ffi.string(p1) == "iello" assert ffi.string(p2) == "removed" assert ffi.string(p3) == "xorld" + assert ffi.string(p4) == "00" def test_call_on_commit_immediately_if_inevitable(self): p0 = ffi_new_aligned("aaa") From noreply at buildbot.pypy.org Tue Aug 19 19:05:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 19 Aug 2014 19:05:55 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/1d2c771f29c4 Message-ID: <20140819170555.2EEEE1C14FF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72911:adc9972b698b Date: 2014-08-19 17:30 +0200 http://bitbucket.org/pypy/pypy/changeset/adc9972b698b/ Log: import stmgc/1d2c771f29c4 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -e85ce411f190 +1d2c771f29c4 diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -376,7 +376,8 @@ assert(list_is_empty(STM_PSEGMENT->young_weakrefs)); assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery)); assert(tree_is_cleared(STM_PSEGMENT->nursery_objects_shadows)); - assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_abort)); + assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[0])); + assert(tree_is_cleared(STM_PSEGMENT->callbacks_on_commit_and_abort[1])); assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL); assert(STM_PSEGMENT->large_overflow_objects == NULL); #ifndef NDEBUG @@ -851,7 +852,7 @@ STM_PSEGMENT->overflow_number_has_been_used = false; } - clear_callbacks_on_abort(); + invoke_and_clear_user_callbacks(0); /* for commit */ /* send what is hopefully the correct signals */ if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { @@ -1045,7 +1046,7 @@ memset(tl->mem_clear_on_abort, 0, tl->mem_bytes_to_clear_on_abort); /* invoke the callbacks */ - invoke_and_clear_callbacks_on_abort(); + invoke_and_clear_user_callbacks(1); /* for abort */ int attribute_to = STM_TIME_RUN_ABORTED_OTHER; @@ -1102,7 +1103,7 @@ wait_for_end_of_inevitable_transaction(NULL); STM_PSEGMENT->transaction_state = TS_INEVITABLE; stm_rewind_jmp_forget(STM_SEGMENT->running_thread); - clear_callbacks_on_abort(); + invoke_and_clear_user_callbacks(0); /* for commit */ } else { assert(STM_PSEGMENT->transaction_state == TS_INEVITABLE); diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h --- a/rpython/translator/stm/src_stm/stm/core.h +++ b/rpython/translator/stm/src_stm/stm/core.h @@ -133,8 +133,9 @@ weakrefs never point to young objects and never contain NULL. */ struct list_s *old_weakrefs; - /* Tree of 'key->callback' associations from stm_call_on_abort() */ - struct tree_s *callbacks_on_abort; + /* Tree of 'key->callback' associations from stm_call_on_commit() + and stm_call_on_abort() (respectively, array items 0 and 1) */ + struct tree_s *callbacks_on_commit_and_abort[2]; /* Start time: to know approximately for how long a transaction has been running, in contention management */ diff --git a/rpython/translator/stm/src_stm/stm/extra.c b/rpython/translator/stm/src_stm/stm/extra.c --- a/rpython/translator/stm/src_stm/stm/extra.c +++ b/rpython/translator/stm/src_stm/stm/extra.c @@ -4,55 +4,76 @@ #endif -void stm_call_on_abort(stm_thread_local_t *tl, - void *key, void callback(void *)) +static bool register_callbacks(stm_thread_local_t *tl, + void *key, void callback(void *), long index) { if (!_stm_in_transaction(tl)) { /* check that the current thread-local is really running a transaction, and do nothing otherwise. */ - return; + return false; } if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { /* ignore callbacks if we're in an inevitable transaction (which cannot abort) */ - return; + return false; } + struct tree_s *callbacks; + callbacks = STM_PSEGMENT->callbacks_on_commit_and_abort[index]; + if (callback == NULL) { /* ignore the return value: unregistered keys can be "deleted" again */ - tree_delete_item(STM_PSEGMENT->callbacks_on_abort, (uintptr_t)key); + tree_delete_item(callbacks, (uintptr_t)key); } else { /* double-registering the same key will crash */ - tree_insert(STM_PSEGMENT->callbacks_on_abort, - (uintptr_t)key, (uintptr_t)callback); + tree_insert(callbacks, (uintptr_t)key, (uintptr_t)callback); + } + return true; +} + +void stm_call_on_commit(stm_thread_local_t *tl, + void *key, void callback(void *)) +{ + if (!register_callbacks(tl, key, callback, 0)) { + /* no regular transaction running, invoke the callback + immediately */ + callback(key); } } -static void clear_callbacks_on_abort(void) +void stm_call_on_abort(stm_thread_local_t *tl, + void *key, void callback(void *)) { - if (!tree_is_cleared(STM_PSEGMENT->callbacks_on_abort)) - tree_clear(STM_PSEGMENT->callbacks_on_abort); + register_callbacks(tl, key, callback, 1); } -static void invoke_and_clear_callbacks_on_abort(void) +static void invoke_and_clear_user_callbacks(long index) { - wlog_t *item; - struct tree_s *callbacks = STM_PSEGMENT->callbacks_on_abort; + struct tree_s *callbacks; + + /* clear the callbacks that we don't want to invoke at all */ + callbacks = STM_PSEGMENT->callbacks_on_commit_and_abort[1 - index]; + if (!tree_is_cleared(callbacks)) + tree_clear(callbacks); + + /* invoke the callbacks from the other group */ + callbacks = STM_PSEGMENT->callbacks_on_commit_and_abort[index]; if (tree_is_cleared(callbacks)) return; - STM_PSEGMENT->callbacks_on_abort = tree_create(); + STM_PSEGMENT->callbacks_on_commit_and_abort[index] = tree_create(); + wlog_t *item; TREE_LOOP_FORWARD(*callbacks, item) { void *key = (void *)item->addr; void (*callback)(void *) = (void(*)(void *))item->val; assert(key != NULL); assert(callback != NULL); - /* The callback may call stm_call_on_abort(key, NULL). It is - ignored, because 'callbacks_on_abort' was cleared already. */ + /* The callback may call stm_call_on_abort(key, NULL). It is ignored, + because 'callbacks_on_commit_and_abort' was cleared already. */ callback(key); } TREE_LOOP_END; diff --git a/rpython/translator/stm/src_stm/stm/extra.h b/rpython/translator/stm/src_stm/stm/extra.h --- a/rpython/translator/stm/src_stm/stm/extra.h +++ b/rpython/translator/stm/src_stm/stm/extra.h @@ -1,4 +1,4 @@ /* Imported by rpython/translator/stm/import_stmgc.py */ -static void clear_callbacks_on_abort(void); -static void invoke_and_clear_callbacks_on_abort(void); +static void invoke_and_clear_user_callbacks(long index); +/* 0 = for commit, 1 = for abort */ diff --git a/rpython/translator/stm/src_stm/stm/setup.c b/rpython/translator/stm/src_stm/stm/setup.c --- a/rpython/translator/stm/src_stm/stm/setup.c +++ b/rpython/translator/stm/src_stm/stm/setup.c @@ -127,7 +127,8 @@ pr->old_weakrefs = list_create(); pr->young_outside_nursery = tree_create(); pr->nursery_objects_shadows = tree_create(); - pr->callbacks_on_abort = tree_create(); + pr->callbacks_on_commit_and_abort[0] = tree_create(); + pr->callbacks_on_commit_and_abort[1] = tree_create(); pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * i; highest_overflow_number = pr->overflow_number; pr->pub.transaction_read_version = 0xff; @@ -167,7 +168,8 @@ list_free(pr->old_weakrefs); tree_free(pr->young_outside_nursery); tree_free(pr->nursery_objects_shadows); - tree_free(pr->callbacks_on_abort); + tree_free(pr->callbacks_on_commit_and_abort[0]); + tree_free(pr->callbacks_on_commit_and_abort[1]); } munmap(stm_object_pages, TOTAL_MEMORY); diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -417,6 +417,12 @@ Note: 'key' must be aligned to a multiple of 8 bytes. */ void stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *)); +/* If the current transaction commits later, invoke 'callback(key)'. If + the current transaction aborts, then the callback is forgotten. Same + restrictions as stm_call_on_abort(). If the transaction is or becomes + inevitable, 'callback(key)' is called immediately. */ +void stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *)); + /* Similar to stm_become_inevitable(), but additionally suspend all other threads. A very heavy-handed way to make sure that no other From noreply at buildbot.pypy.org Tue Aug 19 19:05:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 19 Aug 2014 19:05:56 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/bea13491352f Message-ID: <20140819170556.55D281C14FF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72912:ffabe32cdcb9 Date: 2014-08-19 18:52 +0200 http://bitbucket.org/pypy/pypy/changeset/ffabe32cdcb9/ Log: import stmgc/bea13491352f diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -1d2c771f29c4 +bea13491352f diff --git a/rpython/translator/stm/src_stm/stm/extra.c b/rpython/translator/stm/src_stm/stm/extra.c --- a/rpython/translator/stm/src_stm/stm/extra.c +++ b/rpython/translator/stm/src_stm/stm/extra.c @@ -4,50 +4,51 @@ #endif -static bool register_callbacks(stm_thread_local_t *tl, +static long register_callbacks(stm_thread_local_t *tl, void *key, void callback(void *), long index) { if (!_stm_in_transaction(tl)) { /* check that the current thread-local is really running a transaction, and do nothing otherwise. */ - return false; + return -1; } if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { /* ignore callbacks if we're in an inevitable transaction (which cannot abort) */ - return false; + return -1; } struct tree_s *callbacks; callbacks = STM_PSEGMENT->callbacks_on_commit_and_abort[index]; if (callback == NULL) { - /* ignore the return value: unregistered keys can be - "deleted" again */ - tree_delete_item(callbacks, (uintptr_t)key); + /* double-unregistering works, but return 0 */ + return tree_delete_item(callbacks, (uintptr_t)key); } else { /* double-registering the same key will crash */ tree_insert(callbacks, (uintptr_t)key, (uintptr_t)callback); + return 1; } - return true; } -void stm_call_on_commit(stm_thread_local_t *tl, +long stm_call_on_commit(stm_thread_local_t *tl, void *key, void callback(void *)) { - if (!register_callbacks(tl, key, callback, 0)) { + long result = register_callbacks(tl, key, callback, 0); + if (result < 0 && callback != NULL) { /* no regular transaction running, invoke the callback immediately */ callback(key); } + return result; } -void stm_call_on_abort(stm_thread_local_t *tl, +long stm_call_on_abort(stm_thread_local_t *tl, void *key, void callback(void *)) { - register_callbacks(tl, key, callback, 1); + return register_callbacks(tl, key, callback, 1); } static void invoke_and_clear_user_callbacks(long index) diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -413,15 +413,16 @@ /* If the current transaction aborts later, invoke 'callback(key)'. If the current transaction commits, then the callback is forgotten. You can only register one callback per key. You can call - 'stm_call_on_abort(key, NULL)' to cancel an existing callback. + 'stm_call_on_abort(key, NULL)' to cancel an existing callback + (returns 0 if there was no existing callback to cancel). Note: 'key' must be aligned to a multiple of 8 bytes. */ -void stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *)); +long stm_call_on_abort(stm_thread_local_t *, void *key, void callback(void *)); /* If the current transaction commits later, invoke 'callback(key)'. If the current transaction aborts, then the callback is forgotten. Same restrictions as stm_call_on_abort(). If the transaction is or becomes inevitable, 'callback(key)' is called immediately. */ -void stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *)); +long stm_call_on_commit(stm_thread_local_t *, void *key, void callback(void *)); /* Similar to stm_become_inevitable(), but additionally suspend all From noreply at buildbot.pypy.org Tue Aug 19 19:05:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 19 Aug 2014 19:05:57 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Use stm_call_on_commit() to delay the raw free(). Message-ID: <20140819170557.88F151C14FF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72913:0febb0d1b4e7 Date: 2014-08-19 19:05 +0200 http://bitbucket.org/pypy/pypy/changeset/0febb0d1b4e7/ Log: Use stm_call_on_commit() to delay the raw free(). diff --git a/rpython/translator/c/src/mem.c b/rpython/translator/c/src/mem.c --- a/rpython/translator/c/src/mem.c +++ b/rpython/translator/c/src/mem.c @@ -12,7 +12,7 @@ # else # define try_pypy_debug_alloc_stop(p) /* nothing */ # endif -void _pypy_stm_free(void *ptr) +void _pypy_stm_cb_free(void *ptr) { /* This is called by src_stm/et.c when the transaction is aborted and the 'ptr' was malloced but not freed. We have first to @@ -24,6 +24,22 @@ PyObject_Free(ptr); COUNT_FREE; } +void _pypy_stm_op_free(void *ptr) +{ + /* Called when RPython code contains OP_FREE or OP_RAW_FREE. + */ + if (stm_call_on_abort(&stm_thread_local, ptr, NULL) == 0) { + /* There is a running non-inevitable transaction, but the object + was not registered during it, which means that it was created + before. In this case, we cannot immediately free it, but + only when a commit follows. */ + stm_call_on_commit(&stm_thread_local, ptr, _pypy_stm_cb_free); + } + else { + /* In all other cases, free the object immediately. */ + _pypy_stm_cb_free(ptr); + } +} #endif diff --git a/rpython/translator/c/src/mem.h b/rpython/translator/c/src/mem.h --- a/rpython/translator/c/src/mem.h +++ b/rpython/translator/c/src/mem.h @@ -16,13 +16,14 @@ #ifdef RPY_STM -void _pypy_stm_free(void *); +void _pypy_stm_cb_free(void *); +void _pypy_stm_op_free(void *); #define _OP_RAW_MALLOCED(r) stm_call_on_abort(&stm_thread_local, r, \ - _pypy_stm_free) -#define _OP_RAW_STM_UNREGISTER(r) stm_call_on_abort(&stm_thread_local, r, NULL) + _pypy_stm_cb_free) +#define OP_FREE(p) _pypy_stm_op_free(p) #else -#define _OP_RAW_MALLOCED(r) /* nothing */ -#define _OP_RAW_STM_UNREGISTER(r) /* nothing */ +#define _OP_RAW_MALLOCED(r) /* nothing */ +#define OP_FREE(p) PyObject_Free(p); COUNT_FREE #endif @@ -34,8 +35,7 @@ } \ } -#define OP_RAW_FREE(p, r) PyObject_Free(p); COUNT_FREE; \ - _OP_RAW_STM_UNREGISTER(p); +#define OP_RAW_FREE(p, r) OP_FREE(p) #define OP_RAW_MEMCLEAR(p, size, r) memset((void*)p, 0, size) @@ -54,8 +54,6 @@ /************************************************************/ -#define OP_FREE(p) OP_RAW_FREE(p, do_not_use) - #ifndef COUNT_OP_MALLOCS #define COUNT_MALLOC /* nothing */ @@ -87,7 +85,11 @@ # define OP_TRACK_ALLOC_START(addr, r) pypy_debug_alloc_start(addr, \ __FUNCTION__) -# define OP_TRACK_ALLOC_STOP(addr, r) pypy_debug_alloc_stop(addr) +# ifdef RPY_STM +# define OP_TRACK_ALLOC_STOP(addr, r) /* nothing */ +# else +# define OP_TRACK_ALLOC_STOP(addr, r) pypy_debug_alloc_stop(addr) +# endif void pypy_debug_alloc_start(void*, const char*); void pypy_debug_alloc_stop(void*); diff --git a/rpython/translator/stm/inevitable.py b/rpython/translator/stm/inevitable.py --- a/rpython/translator/stm/inevitable.py +++ b/rpython/translator/stm/inevitable.py @@ -96,7 +96,7 @@ if op.opname in MALLOCS: return False if op.opname in FREES: - return True + return False # # Function calls if op.opname == 'direct_call' or op.opname == 'indirect_call': From noreply at buildbot.pypy.org Tue Aug 19 21:15:55 2014 From: noreply at buildbot.pypy.org (groggi) Date: Tue, 19 Aug 2014 21:15:55 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: reworked comments in incminimark Message-ID: <20140819191555.DCB621C3CB0@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72914:93b928f5cf40 Date: 2014-08-19 21:04 +0200 http://bitbucket.org/pypy/pypy/changeset/93b928f5cf40/ Log: reworked comments in incminimark diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -71,15 +71,15 @@ # * young objects: allocated in the nursery if they are not too large, or # raw-malloced otherwise. The nursery is a fixed-size memory buffer of # 4MB by default. When full, we do a minor collection; -# the surviving objects from the nursery are moved outside, and the -# non-surviving raw-malloced objects are freed. All surviving objects -# become old. +# - surviving objects from the nursery are moved outside and become old, +# - non-surviving raw-malloced objects are freed, +# - and pinned objects are kept at their place inside the nursery and stay +# young. # # * old objects: never move again. These objects are either allocated by # minimarkpage.py (if they are small), or raw-malloced (if they are not # small). Collected by regular mark-n-sweep during major collections. # -# XXX update doc string to contain object pinning (groggi) WORD = LONG_BIT // 8 @@ -132,8 +132,8 @@ # a minor collection. GCFLAG_VISITED_RMY = first_gcflag << 8 -# The following flag is set on nursery objects of which we expect not to -# move. This means that a young object with this flag is not moved out +# The following flag is set on nursery objects to keep them in the nursery. +# This means that a young object with this flag is not moved out # of the nursery during a minor collection. See pin()/unpin() for further # details. GCFLAG_PINNED = first_gcflag << 9 @@ -264,7 +264,7 @@ # nursery. Has to fit at least one large object "nursery_cleanup": 32768 * WORD, - # Number of objects that are allowed to be pinned in the nursery + # Number of objects that are allowed to be pinned in the nursery # at the same time. Must be lesser than or equal to the chunk size # of an AddressStack. "max_number_of_pinned_objects": 100, @@ -379,21 +379,19 @@ # minor collection. self.nursery_objects_shadows = self.AddressDict() # - # A sorted deque containing all pinned objects *before* the last - # minor collection. This deque must be consulted when considering - # next nursery ceiling. + # A sorted deque containing addresses of pinned objects. + # This collection is used to make sure we don't overwrite pinned objects. + # Each minor collection creates a new deque containing the active pinned + # objects. The addresses are used to set the next 'nursery_top'. self.nursery_barriers = self.AddressDeque() # # Counter tracking how many pinned objects currently reside inside # the nursery. self.pinned_objects_in_nursery = 0 # - # Keeps track of objects pointing to pinned objects. These objects - # must be revisited every minor collection. Without this list - # any old object inside this list would only be visited in case a - # write barrier was triggered, which would result in not visiting - # the young pinned object and would therefore result in removing - # the pinned object. + # Keeps track of old objects pointing to pinned objects. These objects + # must be traced every minor collection. Without tracing them the + # referenced pinned object wouldn't be visited and therefore collected. self.old_objects_pointing_to_pinned = self.AddressStack() # # Allocate a nursery. In case of auto_nursery_size, start by @@ -708,8 +706,9 @@ is needed.""" # in general we always move 'self.nursery_top' by 'self.nursery_cleanup'. - # However, because of the presence of pinned objects there are cases where - # the GC can't move by 'self.nursery_cleanup' without overflowing the arena. + # However, because of the presence of pinned objects there are cases + # where the GC can't move by 'self.nursery_cleanup' without overflowing + # the arena. # For such a case we use the space left in the nursery. size = min(self.nursery_cleanup, self.nursery_real_top - self.nursery_top) if llmemory.raw_malloc_usage(totalsize) > size: @@ -749,14 +748,14 @@ self.nursery_free = self.nursery_top + pinned_obj_size self.nursery_top = self.nursery_barriers.popleft() # - # because we encountered a barrier, we also have to fix - # 'prev_result' as the one provided as a method parameter - # can't be used as there is no space between 'prev_result' - # and the barrier for 'totalsize'. + # because we encountered a barrier, we have to fix 'prev_result'. + # The one provided as parameter can't be used further as there + # is not enough space between 'prev_result' and and the barrier + # for an object of 'totalsize' size. prev_result = self.nursery_free else: # - # no barriers (i.e. pinned objects) after 'nursery_free'. + # no barriers (i.e. no pinned objects) after 'nursery_free'. # If possible just enlarge the used part of the nursery. # Otherwise we are forced to clean up the nursery. if self.try_move_nursery_top(totalsize): @@ -783,8 +782,8 @@ # if self.nursery_free + totalsize > self.nursery_real_top: # still not enough space, we need to collect. - # maybe nursery contains too many pinned objects (see - # assert below). + # maybe nursery contains too many pinned objects + # (see assert below). self.minor_collection() else: # execute loop one more time. This should find @@ -803,7 +802,6 @@ break # if self.debug_tiny_nursery >= 0: # for debugging - # XXX solution for this assert? (groggi) ll_assert(not self.nursery_barriers.non_empty(), "no support for nursery debug and pinning") if self.nursery_top - self.nursery_free > self.debug_tiny_nursery: @@ -983,19 +981,18 @@ if self.pinned_objects_in_nursery >= self.max_number_of_pinned_objects: return False if not self.is_in_nursery(obj): - # Old objects are already non-moving, therefore pinning + # old objects are already non-moving, therefore pinning # makes no sense. If you run into this case, you may forgot - # to check if can_move(obj) already returns True in which - # case a call to pin() is unnecessary. + # to check can_move(obj). return False if self.has_gcptr(self.get_type_id(obj)): # objects containing GC pointers can't be pinned. If we would add # it, we would have to track all pinned objects and trace them # every minor collection to make sure the referenced object are - # kept alive. + # kept alive. Right now this is not a use case that's needed. return False if self._is_pinned(obj): - # Already pinned, we do not allow to pin it again. + # already pinned, we do not allow to pin it again. # Reason: It would be possible that the first caller unpins # while the second caller thinks it's still pinned. return False @@ -1193,9 +1190,6 @@ else: ll_assert(self.is_in_nursery(obj), "pinned object not in nursery") - # XXX check if we can support that or if it makes no sense (groggi) - ll_assert(not self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS, - "pinned nursery object with GCFLAG_TRACK_YOUNG_PTRS") if self.gc_state == STATE_SCANNING: self._debug_check_object_scanning(obj) @@ -1544,9 +1538,9 @@ # where this stack is filled. Pinning an object only prevents it from # being moved, not from being collected if it is not reachable anymore. self.surviving_pinned_objects = self.AddressStack() - # - # The following counter keeps track of the amount of alive and pinned - # objects inside the nursery. + # The following counter keeps track of alive and pinned young objects + # inside the nursery. We reset it here and increace it in + # '_trace_drag_out()'. self.pinned_objects_in_nursery = 0 # # Before everything else, remove from 'old_objects_pointing_to_young' @@ -1578,14 +1572,12 @@ # with pinned object that are (only) visible from an old # object. # Additionally we create a new list as it may be that an old object - # no longer points to a pinned one and we want them to remove from - # the list. + # no longer points to a pinned one. Such old objects won't be added + # again to 'old_objects_pointing_to_pinned'. if self.old_objects_pointing_to_pinned.non_empty(): current_old_objects_pointing_to_pinned = \ self.old_objects_pointing_to_pinned - # self.old_objects_pointing_to_pinned = self.AddressStack() - # visit the ones we know of current_old_objects_pointing_to_pinned.foreach( self._visit_old_objects_pointing_to_pinned, None) current_old_objects_pointing_to_pinned.delete() @@ -1639,7 +1631,7 @@ self.free_young_rawmalloced_objects() # # All live nursery objects are out of the nursery or pinned inside - # the nursery. Create nursery barriers to protect the pinned object, + # the nursery. Create nursery barriers to protect the pinned objects, # fill the rest of the nursery with zeros and reset the current nursery # pointer. size_gc_header = self.gcheaderbuilder.size_gc_header @@ -1673,10 +1665,11 @@ llarena.arena_reset(prev, self.nursery_real_top - prev, 0) # # We assume that there are only a few pinned objects. Therefore, if there - # is 'self.nursery_cleanup' space between the nursery's start ('self.nursery') - # and the last pinned object ('prev'), we conclude that there is enough zeroed - # space inside the arena to use for new allocation. Otherwise we fill - # the nursery with zeros for 'self.nursery_cleanup' of space. + # is 'self.nursery_cleanup' space between the nursery's start + # ('self.nursery') and the last pinned object ('prev'), we conclude that + # there is enough zeroed space inside the arena to use for new + # allocation. Otherwise we fill the nursery with zeros for + # 'self.nursery_cleanup' of space. if prev - self.nursery >= self.nursery_cleanup: nursery_barriers.append(prev) else: @@ -1884,21 +1877,25 @@ # elif self._is_pinned(obj): hdr = self.header(obj) - # track parent of pinned object specially + # + # track parent of pinned object specially. This mus be done before + # checking for GCFLAG_VISITED: it may be that the same pinned object + # is reachable from multiple sources (e.g. two old objects pointing + # to the same pinned object). In such a case we need all parents + # of the pinned object in the list. Otherwise he pinned object could + # become dead and be removed just because the first parent of it + # is dead and collected. if parent != llmemory.NULL and \ not self.header(parent).tid & GCFLAG_PINNED_OBJECT_PARENT_KNOWN: # self.old_objects_pointing_to_pinned.append(parent) self.header(parent).tid |= GCFLAG_PINNED - + # if hdr.tid & GCFLAG_VISITED: - # already visited and keeping track of the object return + # hdr.tid |= GCFLAG_VISITED # - # XXX add additional checks for unsupported pinned objects (groggi) - ll_assert(not self.header(obj).tid & GCFLAG_HAS_CARDS, - "pinned object with GCFLAG_HAS_CARDS not supported") self.surviving_pinned_objects.append( llarena.getfakearenaaddress(obj - size_gc_header)) self.pinned_objects_in_nursery += 1 @@ -2124,7 +2121,7 @@ # Light finalizers if self.old_objects_with_light_finalizers.non_empty(): self.deal_with_old_objects_with_finalizers() - #objects_to_trace processed fully, can move on to sweeping + # objects_to_trace processed fully, can move on to sweeping self.ac.mass_free_prepare() self.start_free_rawmalloc_objects() # @@ -2136,7 +2133,8 @@ self._sweep_old_objects_pointing_to_pinned, new_old_objects_pointing_to_pinned) self.old_objects_pointing_to_pinned.delete() - self.old_objects_pointing_to_pinned = new_old_objects_pointing_to_pinned + self.old_objects_pointing_to_pinned = \ + new_old_objects_pointing_to_pinned self.gc_state = STATE_SWEEPING #END MARKING elif self.gc_state == STATE_SWEEPING: @@ -2333,9 +2331,10 @@ # flag set, then the object should be in 'prebuilt_root_objects', # and the GCFLAG_VISITED will be reset at the end of the # collection. + # Objects with GCFLAG_PINNED can't have gcptrs (see pin()), they can be + # ignored. hdr = self.header(obj) if hdr.tid & (GCFLAG_VISITED | GCFLAG_NO_HEAP_PTRS | GCFLAG_PINNED): - # XXX ^^^ update doc in any way because of GCFLAG_PINNED addition? (groggi) return 0 # # It's the first time. We set the flag VISITED. The trick is From noreply at buildbot.pypy.org Tue Aug 19 21:15:57 2014 From: noreply at buildbot.pypy.org (groggi) Date: Tue, 19 Aug 2014 21:15:57 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: remove outdated comments Message-ID: <20140819191557.1E6831C3CB0@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72915:d704858f1a7d Date: 2014-08-19 21:11 +0200 http://bitbucket.org/pypy/pypy/changeset/d704858f1a7d/ Log: remove outdated comments diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -20,7 +20,6 @@ # for test purposes we allow objects to be pinned and use # the following list to keep track of the pinned objects -# XXX think about possible unexpected behavior (groggi) if not we_are_translated(): pinned_objects = [] diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -803,14 +803,6 @@ """ Either free a non-moving buffer or keep the original storage alive. """ - # We cannot rely on rgc.can_move(data) here, because its result - # might have changed since get_nonmovingbuffer(). Instead we check - # if 'buf' points inside 'data'. This is only possible if we - # followed the 2nd case in get_nonmovingbuffer(); in the first case, - # 'buf' points to its own raw-malloced memory. - # XXX fix comment (groggi) - - if is_pinned: rgc.unpin(data) if is_raw: From noreply at buildbot.pypy.org Tue Aug 19 22:14:05 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Tue, 19 Aug 2014 22:14:05 +0200 (CEST) Subject: [pypy-commit] pypy py3.3-fixes2: remove time.accept2dyear (removed in pythong 3.3) Message-ID: <20140819201405.2EE2C1C14FF@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3-fixes2 Changeset: r72916:df851de79c11 Date: 2014-08-19 20:20 +0200 http://bitbucket.org/pypy/pypy/changeset/df851de79c11/ Log: remove time.accept2dyear (removed in pythong 3.3) diff --git a/pypy/module/rctime/__init__.py b/pypy/module/rctime/__init__.py --- a/pypy/module/rctime/__init__.py +++ b/pypy/module/rctime/__init__.py @@ -39,5 +39,3 @@ from pypy.module.rctime import interp_time interp_time._init_timezone(space) - interp_time._init_accept2dyear(space) - diff --git a/pypy/module/rctime/interp_time.py b/pypy/module/rctime/interp_time.py --- a/pypy/module/rctime/interp_time.py +++ b/pypy/module/rctime/interp_time.py @@ -198,13 +198,6 @@ c_strftime = external('strftime', [rffi.CCHARP, rffi.SIZE_T, rffi.CCHARP, TM_P], rffi.SIZE_T) -def _init_accept2dyear(space): - if os.environ.get("PYTHONY2K"): - accept2dyear = 0 - else: - accept2dyear = 1 - _set_module_object(space, "accept2dyear", space.wrap(accept2dyear)) - def _init_timezone(space): timezone = daylight = altzone = 0 tzname = ["", ""] @@ -435,21 +428,6 @@ glob_buf.c_tm_zone = lltype.nullptr(rffi.CCHARP.TO) rffi.setintfield(glob_buf, 'c_tm_gmtoff', 0) - if y < 1000: - w_accept2dyear = _get_module_object(space, "accept2dyear") - accept2dyear = space.is_true(w_accept2dyear) - - if accept2dyear: - if 69 <= y <= 99: - y += 1900 - elif 0 <= y <= 68: - y += 2000 - else: - raise OperationError(space.w_ValueError, - space.wrap("year out of range")) - space.warn(space.wrap("Century info guessed for a 2-digit year."), - space.w_DeprecationWarning) - # tm_wday does not need checking of its upper-bound since taking "% # 7" in _gettmarg() automatically restricts the range. if rffi.getintfield(glob_buf, 'c_tm_wday') < -1: diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py --- a/pypy/module/rctime/test/test_rctime.py +++ b/pypy/module/rctime/test/test_rctime.py @@ -5,7 +5,6 @@ def test_attributes(self): import time as rctime - assert isinstance(rctime.accept2dyear, int) assert isinstance(rctime.altzone, int) assert isinstance(rctime.daylight, int) assert isinstance(rctime.timezone, int) @@ -101,22 +100,16 @@ res = rctime.mktime(rctime.localtime()) assert isinstance(res, float) + # year cannot be -1 ltime = rctime.localtime() - rctime.accept2dyear == 0 ltime = list(ltime) ltime[0] = -1 - raises(ValueError, rctime.mktime, tuple(ltime)) - rctime.accept2dyear == 1 + raises(OverflowError, rctime.mktime, tuple(ltime)) - ltime = list(ltime) - ltime[0] = 67 - ltime = tuple(ltime) - if os.name != "nt" and sys.maxsize < 1<<32: # time_t may be 64bit - raises(OverflowError, rctime.mktime, ltime) - + # year cannot be 100 ltime = list(ltime) ltime[0] = 100 - raises(ValueError, rctime.mktime, tuple(ltime)) + raises(OverflowError, rctime.mktime, tuple(ltime)) t = rctime.time() assert int(rctime.mktime(rctime.localtime(t))) == int(t) @@ -169,28 +162,6 @@ assert asc[-len(str(bigyear)):] == str(bigyear) raises(OverflowError, rctime.asctime, (bigyear + 1,) + (0,)*8) - def test_accept2dyear_access(self): - import time as rctime - - accept2dyear = rctime.accept2dyear - del rctime.accept2dyear - try: - # with year >= 1900 this shouldn't need to access accept2dyear - assert rctime.asctime((2000,) + (0,) * 8).split()[-1] == '2000' - finally: - rctime.accept2dyear = accept2dyear - - def test_accept2dyear_bad(self): - import time as rctime - class X: - def __bool__(self): - raise RuntimeError('boo') - orig, rctime.accept2dyear = rctime.accept2dyear, X() - try: - raises(RuntimeError, rctime.asctime, (200,) + (0,) * 8) - finally: - rctime.accept2dyear = orig - def test_struct_time(self): import time as rctime raises(TypeError, rctime.struct_time) @@ -281,7 +252,7 @@ raises(TypeError, rctime.strftime, ()) raises(TypeError, rctime.strftime, (1,)) raises(TypeError, rctime.strftime, range(8)) - exp = '2000 01 01 00 00 00 1 001' + exp = '0 01 01 00 00 00 1 001' assert rctime.strftime("%Y %m %d %H %M %S %w %j", (0,)*9) == exp # Guard against invalid/non-supported format string @@ -296,6 +267,23 @@ else: assert rctime.strftime('%f') == '%f' + def test_strftime_y2k(self): + '''Port of cpython's datetimetester.test_strftime_y2k.''' + import time as rctime + + ltime = list(rctime.gmtime()) + for y in (1, 49, 70, 99, 100, 999, 1000, 1970): + ltime[0] = y + + def fmt(template): + return rctime.strftime(template, tuple(ltime)) + + if fmt('%Y') != '%04d' % y: + # Year 42 returns '42', not padded + assert fmt("%Y") == '%d' % y + # '0042' is obtained anyway + assert fmt("%4Y") == '%04d' % y + def test_strftime_ext(self): import time as rctime @@ -314,9 +302,6 @@ # of the time tuple. # check year - if rctime.accept2dyear: - raises(ValueError, rctime.strftime, '', (-1, 1, 1, 0, 0, 0, 0, 1, -1)) - raises(ValueError, rctime.strftime, '', (100, 1, 1, 0, 0, 0, 0, 1, -1)) rctime.strftime('', (1899, 1, 1, 0, 0, 0, 0, 1, -1)) rctime.strftime('', (0, 1, 1, 0, 0, 0, 0, 1, -1)) From noreply at buildbot.pypy.org Tue Aug 19 22:14:06 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Tue, 19 Aug 2014 22:14:06 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Merged in numerodix/pypy/py3.3-fixes2 (pull request #271) Message-ID: <20140819201406.9D6961C14FF@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3 Changeset: r72917:6fcd3a9ba6e2 Date: 2014-08-19 22:13 +0200 http://bitbucket.org/pypy/pypy/changeset/6fcd3a9ba6e2/ Log: Merged in numerodix/pypy/py3.3-fixes2 (pull request #271) remove time.accept2dyear (removed in python 3.3) diff --git a/pypy/module/rctime/__init__.py b/pypy/module/rctime/__init__.py --- a/pypy/module/rctime/__init__.py +++ b/pypy/module/rctime/__init__.py @@ -39,5 +39,3 @@ from pypy.module.rctime import interp_time interp_time._init_timezone(space) - interp_time._init_accept2dyear(space) - diff --git a/pypy/module/rctime/interp_time.py b/pypy/module/rctime/interp_time.py --- a/pypy/module/rctime/interp_time.py +++ b/pypy/module/rctime/interp_time.py @@ -198,13 +198,6 @@ c_strftime = external('strftime', [rffi.CCHARP, rffi.SIZE_T, rffi.CCHARP, TM_P], rffi.SIZE_T) -def _init_accept2dyear(space): - if os.environ.get("PYTHONY2K"): - accept2dyear = 0 - else: - accept2dyear = 1 - _set_module_object(space, "accept2dyear", space.wrap(accept2dyear)) - def _init_timezone(space): timezone = daylight = altzone = 0 tzname = ["", ""] @@ -435,21 +428,6 @@ glob_buf.c_tm_zone = lltype.nullptr(rffi.CCHARP.TO) rffi.setintfield(glob_buf, 'c_tm_gmtoff', 0) - if y < 1000: - w_accept2dyear = _get_module_object(space, "accept2dyear") - accept2dyear = space.is_true(w_accept2dyear) - - if accept2dyear: - if 69 <= y <= 99: - y += 1900 - elif 0 <= y <= 68: - y += 2000 - else: - raise OperationError(space.w_ValueError, - space.wrap("year out of range")) - space.warn(space.wrap("Century info guessed for a 2-digit year."), - space.w_DeprecationWarning) - # tm_wday does not need checking of its upper-bound since taking "% # 7" in _gettmarg() automatically restricts the range. if rffi.getintfield(glob_buf, 'c_tm_wday') < -1: diff --git a/pypy/module/rctime/test/test_rctime.py b/pypy/module/rctime/test/test_rctime.py --- a/pypy/module/rctime/test/test_rctime.py +++ b/pypy/module/rctime/test/test_rctime.py @@ -5,7 +5,6 @@ def test_attributes(self): import time as rctime - assert isinstance(rctime.accept2dyear, int) assert isinstance(rctime.altzone, int) assert isinstance(rctime.daylight, int) assert isinstance(rctime.timezone, int) @@ -101,22 +100,16 @@ res = rctime.mktime(rctime.localtime()) assert isinstance(res, float) + # year cannot be -1 ltime = rctime.localtime() - rctime.accept2dyear == 0 ltime = list(ltime) ltime[0] = -1 - raises(ValueError, rctime.mktime, tuple(ltime)) - rctime.accept2dyear == 1 + raises(OverflowError, rctime.mktime, tuple(ltime)) - ltime = list(ltime) - ltime[0] = 67 - ltime = tuple(ltime) - if os.name != "nt" and sys.maxsize < 1<<32: # time_t may be 64bit - raises(OverflowError, rctime.mktime, ltime) - + # year cannot be 100 ltime = list(ltime) ltime[0] = 100 - raises(ValueError, rctime.mktime, tuple(ltime)) + raises(OverflowError, rctime.mktime, tuple(ltime)) t = rctime.time() assert int(rctime.mktime(rctime.localtime(t))) == int(t) @@ -169,28 +162,6 @@ assert asc[-len(str(bigyear)):] == str(bigyear) raises(OverflowError, rctime.asctime, (bigyear + 1,) + (0,)*8) - def test_accept2dyear_access(self): - import time as rctime - - accept2dyear = rctime.accept2dyear - del rctime.accept2dyear - try: - # with year >= 1900 this shouldn't need to access accept2dyear - assert rctime.asctime((2000,) + (0,) * 8).split()[-1] == '2000' - finally: - rctime.accept2dyear = accept2dyear - - def test_accept2dyear_bad(self): - import time as rctime - class X: - def __bool__(self): - raise RuntimeError('boo') - orig, rctime.accept2dyear = rctime.accept2dyear, X() - try: - raises(RuntimeError, rctime.asctime, (200,) + (0,) * 8) - finally: - rctime.accept2dyear = orig - def test_struct_time(self): import time as rctime raises(TypeError, rctime.struct_time) @@ -281,7 +252,7 @@ raises(TypeError, rctime.strftime, ()) raises(TypeError, rctime.strftime, (1,)) raises(TypeError, rctime.strftime, range(8)) - exp = '2000 01 01 00 00 00 1 001' + exp = '0 01 01 00 00 00 1 001' assert rctime.strftime("%Y %m %d %H %M %S %w %j", (0,)*9) == exp # Guard against invalid/non-supported format string @@ -296,6 +267,23 @@ else: assert rctime.strftime('%f') == '%f' + def test_strftime_y2k(self): + '''Port of cpython's datetimetester.test_strftime_y2k.''' + import time as rctime + + ltime = list(rctime.gmtime()) + for y in (1, 49, 70, 99, 100, 999, 1000, 1970): + ltime[0] = y + + def fmt(template): + return rctime.strftime(template, tuple(ltime)) + + if fmt('%Y') != '%04d' % y: + # Year 42 returns '42', not padded + assert fmt("%Y") == '%d' % y + # '0042' is obtained anyway + assert fmt("%4Y") == '%04d' % y + def test_strftime_ext(self): import time as rctime @@ -314,9 +302,6 @@ # of the time tuple. # check year - if rctime.accept2dyear: - raises(ValueError, rctime.strftime, '', (-1, 1, 1, 0, 0, 0, 0, 1, -1)) - raises(ValueError, rctime.strftime, '', (100, 1, 1, 0, 0, 0, 0, 1, -1)) rctime.strftime('', (1899, 1, 1, 0, 0, 0, 0, 1, -1)) rctime.strftime('', (0, 1, 1, 0, 0, 0, 0, 1, -1)) From noreply at buildbot.pypy.org Wed Aug 20 09:32:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 20 Aug 2014 09:32:11 +0200 (CEST) Subject: [pypy-commit] pypy default: Issue #1849: fix for str.split(), str.rsplit() Message-ID: <20140820073211.38EA41C1482@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72918:b9445a658af8 Date: 2014-08-20 09:31 +0200 http://bitbucket.org/pypy/pypy/changeset/b9445a658af8/ Log: Issue #1849: fix for str.split(), str.rsplit() diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -731,6 +731,22 @@ return space.wrap(self._val(space).join(l)) return self._StringMethods_descr_join(space, w_list) + _StringMethods_descr_split = descr_split + @unwrap_spec(maxsplit=int) + def descr_split(self, space, w_sep=None, maxsplit=-1): + if w_sep is not None and space.isinstance_w(w_sep, space.w_unicode): + self_as_uni = unicode_from_encoded_object(space, self, None, None) + return self_as_uni.descr_split(space, w_sep, maxsplit) + return self._StringMethods_descr_split(space, w_sep, maxsplit) + + _StringMethods_descr_rsplit = descr_rsplit + @unwrap_spec(maxsplit=int) + def descr_rsplit(self, space, w_sep=None, maxsplit=-1): + if w_sep is not None and space.isinstance_w(w_sep, space.w_unicode): + self_as_uni = unicode_from_encoded_object(space, self, None, None) + return self_as_uni.descr_rsplit(space, w_sep, maxsplit) + return self._StringMethods_descr_rsplit(space, w_sep, maxsplit) + def _join_return_one(self, space, w_obj): return (space.is_w(space.type(w_obj), space.w_str) or space.is_w(space.type(w_obj), space.w_unicode)) diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -175,6 +175,24 @@ assert u' a b c '.rsplit(None, 0) == [u' a b c'] assert u''.rsplit('aaa') == [u''] + def test_split_rsplit_str_unicode(self): + x = 'abc'.split(u'b') + assert x == [u'a', u'c'] + assert map(type, x) == [unicode, unicode] + x = 'abc'.rsplit(u'b') + assert x == [u'a', u'c'] + assert map(type, x) == [unicode, unicode] + x = 'abc'.split(u'\u4321') + assert x == [u'abc'] + assert map(type, x) == [unicode] + x = 'abc'.rsplit(u'\u4321') + assert x == [u'abc'] + assert map(type, x) == [unicode] + raises(UnicodeDecodeError, '\x80'.split, u'a') + raises(UnicodeDecodeError, '\x80'.split, u'') + raises(UnicodeDecodeError, '\x80'.rsplit, u'a') + raises(UnicodeDecodeError, '\x80'.rsplit, u'') + def test_center(self): s=u"a b" assert s.center(0) == u"a b" From noreply at buildbot.pypy.org Wed Aug 20 09:54:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 20 Aug 2014 09:54:00 +0200 (CEST) Subject: [pypy-commit] pypy default: str.strip(), str.lstrip(), str.rstrip() Message-ID: <20140820075400.53C421C11B8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72919:0ff8cabf7240 Date: 2014-08-20 09:36 +0200 http://bitbucket.org/pypy/pypy/changeset/0ff8cabf7240/ Log: str.strip(), str.lstrip(), str.rstrip() diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -747,6 +747,27 @@ return self_as_uni.descr_rsplit(space, w_sep, maxsplit) return self._StringMethods_descr_rsplit(space, w_sep, maxsplit) + _StringMethods_descr_strip = descr_strip + def descr_strip(self, space, w_chars=None): + if w_chars is not None and space.isinstance_w(w_chars, space.w_unicode): + self_as_uni = unicode_from_encoded_object(space, self, None, None) + return self_as_uni.descr_strip(space, w_chars) + return self._StringMethods_descr_strip(space, w_chars) + + _StringMethods_descr_lstrip = descr_lstrip + def descr_lstrip(self, space, w_chars=None): + if w_chars is not None and space.isinstance_w(w_chars, space.w_unicode): + self_as_uni = unicode_from_encoded_object(space, self, None, None) + return self_as_uni.descr_lstrip(space, w_chars) + return self._StringMethods_descr_lstrip(space, w_chars) + + _StringMethods_descr_rstrip = descr_rstrip + def descr_rstrip(self, space, w_chars=None): + if w_chars is not None and space.isinstance_w(w_chars, space.w_unicode): + self_as_uni = unicode_from_encoded_object(space, self, None, None) + return self_as_uni.descr_rstrip(space, w_chars) + return self._StringMethods_descr_rstrip(space, w_chars) + def _join_return_one(self, space, w_obj): return (space.is_w(space.type(w_obj), space.w_str) or space.is_w(space.type(w_obj), space.w_unicode)) diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -333,6 +333,17 @@ assert u'xyzzyhelloxyzzy'.lstrip('xyz') == u'helloxyzzy' assert u'xyzzyhelloxyzzy'.rstrip(u'xyz') == u'xyzzyhello' + def test_strip_str_unicode(self): + x = "--abc--".strip(u"-") + assert (x, type(x)) == (u"abc", unicode) + x = "--abc--".lstrip(u"-") + assert (x, type(x)) == (u"abc--", unicode) + x = "--abc--".rstrip(u"-") + assert (x, type(x)) == (u"--abc", unicode) + raises(UnicodeDecodeError, "\x80".strip, u"") + raises(UnicodeDecodeError, "\x80".lstrip, u"") + raises(UnicodeDecodeError, "\x80".rstrip, u"") + def test_long_from_unicode(self): assert long(u'12345678901234567890') == 12345678901234567890 assert int(u'12345678901234567890') == 12345678901234567890 From noreply at buildbot.pypy.org Wed Aug 20 09:54:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 20 Aug 2014 09:54:01 +0200 (CEST) Subject: [pypy-commit] pypy default: str.count() Message-ID: <20140820075401.86A031C11B8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72920:4533e4236ff1 Date: 2014-08-20 09:39 +0200 http://bitbucket.org/pypy/pypy/changeset/4533e4236ff1/ Log: str.count() diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -768,6 +768,13 @@ return self_as_uni.descr_rstrip(space, w_chars) return self._StringMethods_descr_rstrip(space, w_chars) + _StringMethods_descr_count = descr_count + def descr_count(self, space, w_sub, w_start=None, w_end=None): + if space.isinstance_w(w_sub, space.w_unicode): + self_as_uni = unicode_from_encoded_object(space, self, None, None) + return self_as_uni.descr_count(space, w_sub, w_start, w_end) + return self._StringMethods_descr_count(space, w_sub, w_start, w_end) + def _join_return_one(self, space, w_obj): return (space.is_w(space.type(w_obj), space.w_str) or space.is_w(space.type(w_obj), space.w_unicode)) diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -680,13 +680,23 @@ assert u"".count(u"") ==1 assert u"Python".count(u"") ==7 assert u"ab aaba".count(u"ab") ==2 - assert 'aaa'.count('a') == 3 - assert 'aaa'.count('b') == 0 - assert 'aaa'.count('a', -1) == 1 - assert 'aaa'.count('a', -10) == 3 - assert 'aaa'.count('a', 0, -1) == 2 - assert 'aaa'.count('a', 0, -10) == 0 - assert 'ababa'.count('aba') == 1 + assert u'aaa'.count(u'a') == 3 + assert u'aaa'.count(u'b') == 0 + assert u'aaa'.count(u'a', -1) == 1 + assert u'aaa'.count(u'a', -10) == 3 + assert u'aaa'.count(u'a', 0, -1) == 2 + assert u'aaa'.count(u'a', 0, -10) == 0 + assert u'ababa'.count(u'aba') == 1 + + def test_count_str_unicode(self): + assert 'aaa'.count(u'a') == 3 + assert 'aaa'.count(u'b') == 0 + assert 'aaa'.count(u'a', -1) == 1 + assert 'aaa'.count(u'a', -10) == 3 + assert 'aaa'.count(u'a', 0, -1) == 2 + assert 'aaa'.count(u'a', 0, -10) == 0 + assert 'ababa'.count(u'aba') == 1 + raises(UnicodeDecodeError, '\x80'.count, u'') def test_swapcase(self): assert u'\xe4\xc4\xdf'.swapcase() == u'\xc4\xe4\xdf' From noreply at buildbot.pypy.org Wed Aug 20 09:54:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 20 Aug 2014 09:54:02 +0200 (CEST) Subject: [pypy-commit] pypy default: str.find(), str.rfind(), str.index(), str.rindex() Message-ID: <20140820075402.CC7E01C11B8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72921:27305d2b0ff9 Date: 2014-08-20 09:43 +0200 http://bitbucket.org/pypy/pypy/changeset/27305d2b0ff9/ Log: str.find(), str.rfind(), str.index(), str.rindex() diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -775,6 +775,34 @@ return self_as_uni.descr_count(space, w_sub, w_start, w_end) return self._StringMethods_descr_count(space, w_sub, w_start, w_end) + _StringMethods_descr_find = descr_find + def descr_find(self, space, w_sub, w_start=None, w_end=None): + if space.isinstance_w(w_sub, space.w_unicode): + self_as_uni = unicode_from_encoded_object(space, self, None, None) + return self_as_uni.descr_find(space, w_sub, w_start, w_end) + return self._StringMethods_descr_find(space, w_sub, w_start, w_end) + + _StringMethods_descr_rfind = descr_rfind + def descr_rfind(self, space, w_sub, w_start=None, w_end=None): + if space.isinstance_w(w_sub, space.w_unicode): + self_as_uni = unicode_from_encoded_object(space, self, None, None) + return self_as_uni.descr_rfind(space, w_sub, w_start, w_end) + return self._StringMethods_descr_rfind(space, w_sub, w_start, w_end) + + _StringMethods_descr_index = descr_index + def descr_index(self, space, w_sub, w_start=None, w_end=None): + if space.isinstance_w(w_sub, space.w_unicode): + self_as_uni = unicode_from_encoded_object(space, self, None, None) + return self_as_uni.descr_index(space, w_sub, w_start, w_end) + return self._StringMethods_descr_index(space, w_sub, w_start, w_end) + + _StringMethods_descr_rindex = descr_rindex + def descr_rindex(self, space, w_sub, w_start=None, w_end=None): + if space.isinstance_w(w_sub, space.w_unicode): + self_as_uni = unicode_from_encoded_object(space, self, None, None) + return self_as_uni.descr_rindex(space, w_sub, w_start, w_end) + return self._StringMethods_descr_rindex(space, w_sub, w_start, w_end) + def _join_return_one(self, space, w_obj): return (space.is_w(space.type(w_obj), space.w_str) or space.is_w(space.type(w_obj), space.w_unicode)) diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -675,6 +675,16 @@ def test_rfind_corner_case(self): assert u'abc'.rfind('', 4) == -1 + def test_find_index_str_unicode(self): + assert 'abcdefghiabc'.find(u'bc') == 1 + assert 'abcdefghiabc'.rfind(u'abc') == 9 + raises(UnicodeDecodeError, '\x80'.find, u'') + raises(UnicodeDecodeError, '\x80'.rfind, u'') + assert 'abcdefghiabc'.index(u'bc') == 1 + assert 'abcdefghiabc'.rindex(u'abc') == 9 + raises(UnicodeDecodeError, '\x80'.index, u'') + raises(UnicodeDecodeError, '\x80'.rindex, u'') + def test_count(self): assert u"".count(u"x") ==0 assert u"".count(u"") ==1 From noreply at buildbot.pypy.org Wed Aug 20 09:54:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 20 Aug 2014 09:54:04 +0200 (CEST) Subject: [pypy-commit] pypy default: str.replace() Message-ID: <20140820075404.13D7F1C11B8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72922:864226266f8c Date: 2014-08-20 09:46 +0200 http://bitbucket.org/pypy/pypy/changeset/864226266f8c/ Log: str.replace() diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -707,19 +707,7 @@ new_is_unicode = space.isinstance_w(w_new, space.w_unicode) if old_is_unicode or new_is_unicode: self_as_uni = unicode_from_encoded_object(space, self, None, None) - if not old_is_unicode: - w_old = unicode_from_encoded_object(space, w_old, None, None) - if not new_is_unicode: - w_new = unicode_from_encoded_object(space, w_new, None, None) - input = self_as_uni._val(space) - sub = self_as_uni._op_val(space, w_old) - by = self_as_uni._op_val(space, w_new) - try: - res = replace(input, sub, by, count) - except OverflowError: - raise oefmt(space.w_OverflowError, - "replace string is too long") - return self_as_uni._new(res) + return self_as_uni.descr_replace(space, w_old, w_new, count) return self._StringMethods_descr_replace(space, w_old, w_new, count) _StringMethods_descr_join = descr_join diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -956,10 +956,12 @@ assert not u'a'.isnumeric() assert u'\u2460'.isnumeric() # CIRCLED DIGIT ONE - def test_replace_autoconvert(self): + def test_replace_str_unicode(self): res = 'one!two!three!'.replace(u'!', u'@', 1) assert res == u'one at two!three!' assert type(res) == unicode + raises(UnicodeDecodeError, '\x80'.replace, 'a', u'b') + raises(UnicodeDecodeError, '\x80'.replace, u'a', 'b') def test_join_subclass(self): class UnicodeSubclass(unicode): From noreply at buildbot.pypy.org Wed Aug 20 09:54:05 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 20 Aug 2014 09:54:05 +0200 (CEST) Subject: [pypy-commit] pypy default: str.partition(), str.rpartition() Message-ID: <20140820075405.5A9751C11B8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72923:f7a311fdf0ac Date: 2014-08-20 09:50 +0200 http://bitbucket.org/pypy/pypy/changeset/f7a311fdf0ac/ Log: str.partition(), str.rpartition() diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -791,6 +791,20 @@ return self_as_uni.descr_rindex(space, w_sub, w_start, w_end) return self._StringMethods_descr_rindex(space, w_sub, w_start, w_end) + _StringMethods_descr_partition = descr_partition + def descr_partition(self, space, w_sub): + if space.isinstance_w(w_sub, space.w_unicode): + self_as_uni = unicode_from_encoded_object(space, self, None, None) + return self_as_uni.descr_partition(space, w_sub) + return self._StringMethods_descr_partition(space, w_sub) + + _StringMethods_descr_rpartition = descr_rpartition + def descr_rpartition(self, space, w_sub): + if space.isinstance_w(w_sub, space.w_unicode): + self_as_uni = unicode_from_encoded_object(space, self, None, None) + return self_as_uni.descr_rpartition(space, w_sub) + return self._StringMethods_descr_rpartition(space, w_sub) + def _join_return_one(self, space, w_obj): return (space.is_w(space.type(w_obj), space.w_str) or space.is_w(space.type(w_obj), space.w_unicode)) diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -628,6 +628,13 @@ raises(ValueError, S.rpartition, u'') raises(TypeError, S.rpartition, None) + def test_partition_str_unicode(self): + x = 'abbbd'.rpartition(u'bb') + assert x == (u'ab', u'bb', u'd') + assert map(type, x) == [unicode, unicode, unicode] + raises(UnicodeDecodeError, '\x80'.partition, u'') + raises(UnicodeDecodeError, '\x80'.rpartition, u'') + def test_mul(self): zero = 0 assert type(u'' * zero) == type(zero * u'') == unicode From noreply at buildbot.pypy.org Wed Aug 20 12:35:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 20 Aug 2014 12:35:37 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Don't insert the dummy malloc between the final GUARD_NOT_FORCED_2 Message-ID: <20140820103537.664F61C1489@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72924:dacc3c52da1f Date: 2014-08-20 12:35 +0200 http://bitbucket.org/pypy/pypy/changeset/dacc3c52da1f/ Log: Don't insert the dummy malloc between the final GUARD_NOT_FORCED_2 and the following FINISH! diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -27,6 +27,12 @@ self._do_stm_call('stm_hint_commit_soon', [], None, op.stm_location) return + # ---------- jump, finish, guard_not_forced_2 ---------- + if (opnum == rop.JUMP or opnum == rop.FINISH + or opnum == rop.GUARD_NOT_FORCED_2): + self.add_dummy_allocation() + self.newops.append(op) + return # ---------- pure operations, guards ---------- if op.is_always_pure() or op.is_guard() or op.is_ovf(): self.newops.append(op) @@ -84,11 +90,6 @@ ): self.newops.append(op) return - # ---------- jump, finish ---------- - if opnum == rop.JUMP or opnum == rop.FINISH: - self.add_dummy_allocation() - self.newops.append(op) - return # ---------- fall-back ---------- # Check that none of the ops handled here can collect. # This is not done by the fallback here diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -1322,3 +1322,18 @@ $DUMMYALLOC jump(i1) """) + + def test_dummy_alloc_is_before_guard_not_forced_2(self): + self.check_rewrite(""" + [] + escape() + guard_not_forced_2() [] + finish() + """, """ + [] + $INEV + escape() + $DUMMYALLOC + guard_not_forced_2() [] + finish() + """) From noreply at buildbot.pypy.org Wed Aug 20 13:02:51 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 20 Aug 2014 13:02:51 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: port the write barrier placement code from c4 to place read barriers in c7 more Message-ID: <20140820110251.46D7D1C14FF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r72925:5f2375647996 Date: 2014-08-20 10:51 +0200 http://bitbucket.org/pypy/pypy/changeset/5f2375647996/ Log: port the write barrier placement code from c4 to place read barriers in c7 more intelligently diff --git a/rpython/translator/stm/breakfinder.py b/rpython/translator/stm/breakfinder.py --- a/rpython/translator/stm/breakfinder.py +++ b/rpython/translator/stm/breakfinder.py @@ -9,6 +9,7 @@ #'jit_assembler_call', 'stm_enter_callback_call', 'stm_leave_callback_call', + 'stm_transaction_break', ]) for tb in TRANSACTION_BREAK: diff --git a/rpython/translator/stm/readbarrier.py b/rpython/translator/stm/readbarrier.py --- a/rpython/translator/stm/readbarrier.py +++ b/rpython/translator/stm/readbarrier.py @@ -1,33 +1,54 @@ from rpython.flowspace.model import SpaceOperation, Constant, Variable -from rpython.translator.unsimplify import varoftype +from rpython.translator.unsimplify import varoftype, insert_empty_block, insert_empty_startblock from rpython.rtyper.lltypesystem import lltype from rpython.translator.stm.support import is_immutable +from rpython.translator.simplify import join_blocks - +MALLOCS = set([ + 'malloc', 'malloc_varsize', + 'malloc_nonmovable', 'malloc_nonmovable_varsize', + ]) READ_OPS = set(['getfield', 'getarrayitem', 'getinteriorfield', 'raw_load']) + + +def needs_barrier(frm, to): + return to > frm + def is_gc_ptr(T): return isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc' +class Renaming(object): + def __init__(self, newvar, category): + self.newvar = newvar # a Variable or a Constant + self.TYPE = newvar.concretetype + self.category = category -def insert_stm_read_barrier(transformer, graph): - # We need to put enough 'stm_read' in the graph so that any - # execution of a READ_OP on some GC object is guaranteed to also - # execute either 'stm_read' or 'stm_write' on the same GC object - # during the same transaction. - # - # XXX this can be optimized a lot, but for now we go with the - # simplest possible solution... - # - gcremovetypeptr = transformer.translator.config.translation.gcremovetypeptr - for block in graph.iterblocks(): - if not block.operations: - continue - newops = [] + +class BlockTransformer(object): + + def __init__(self, stmtransformer, block): + self.stmtransformer = stmtransformer + self.block = block + self.patch = None + self.inputargs_category = None + self.inputargs_category_per_link = {} + + def init_start_block(self): + # all input args have category "any" + from_outside = ['A'] * len(self.block.inputargs) + self.inputargs_category_per_link[None] = from_outside + self.update_inputargs_category() + + + def analyze_inside_block(self, graph): + gcremovetypeptr = self.stmtransformer.translator.config.translation.gcremovetypeptr + + wants_a_barrier = {} stm_ignored = False - for op in block.operations: + for op in self.block.operations: is_getter = (op.opname in READ_OPS and op.result.concretetype is not lltype.Void and is_gc_ptr(op.args[0].concretetype)) @@ -38,25 +59,239 @@ # typeptr is always immutable pass elif ((op.opname in ('getarraysize', 'getinteriorarraysize') and - is_gc_ptr(op.args[0].concretetype)) or + is_gc_ptr(op.args[0].concretetype)) or (is_getter and is_immutable(op))): # immutable getters + pass + elif is_getter: + if not stm_ignored: + wants_a_barrier[op] = 'R' + elif op.opname == 'weakref_deref': # 'weakref_deref': kind of immutable, but the GC has to see # which transactions read from a dying weakref, so we # need the barrier nonetheless... - pass - elif is_getter: - if not stm_ignored: - v_none = varoftype(lltype.Void) - newops.append(SpaceOperation('stm_read', - [op.args[0]], v_none)) - transformer.read_barrier_counts += 1 + wants_a_barrier[op] = 'R' elif op.opname == 'stm_ignored_start': - assert stm_ignored == False + assert not stm_ignored, "nested 'with stm_ignored'" stm_ignored = True elif op.opname == 'stm_ignored_stop': - assert stm_ignored == True + assert stm_ignored, "stm_ignored_stop without start?" stm_ignored = False - newops.append(op) - assert stm_ignored == False - block.operations = newops + + if stm_ignored and op in wants_a_barrier: + assert wants_a_barrier[op] == 'R' + if is_getter and is_gc_ptr(op.result.concretetype): + raise Exception( + "%r: 'with stm_ignored:' contains unsupported " + "operation %r reading a GC pointer" % (graph, op)) + # + if stm_ignored: + raise Exception("%r: 'with stm_ignored:' code body too complex" + % (graph,)) + self.wants_a_barrier = wants_a_barrier + + + def flow_through_block(self): + + def renfetch(v): + try: + return renamings[v] + except KeyError: + ren = Renaming(v, 'A') + renamings[v] = ren + return ren + + def get_category_or_null(v): + # 'v' is an original variable here, or a constant + if isinstance(v, Constant) and not v.value: # a NULL constant + return 'Z' + if v in renamings: + return renamings[v].category + if isinstance(v, Constant): + return 'R' + else: + return 'A' + + def renamings_get(v): + try: + ren = renamings[v] + except KeyError: + return v # unmodified + v2 = ren.newvar + if v2.concretetype == v.concretetype: + return v2 + v3 = varoftype(v.concretetype) + newoperations.append(SpaceOperation('cast_pointer', [v2], v3)) + if lltype.castable(ren.TYPE, v3.concretetype) > 0: + ren.TYPE = v3.concretetype + return v3 + + # note: 'renamings' maps old vars to new vars, but cast_pointers + # are done lazily. It means that the two vars may not have + # exactly the same type. + renamings = {} # {original-var: Renaming(newvar, category)} + newoperations = [] + stmtransformer = self.stmtransformer + + # make the initial trivial renamings needed to have some precise + # categories for the input args + for v, cat in zip(self.block.inputargs, self.inputargs_category): + if is_gc_ptr(v.concretetype): + assert cat is not None + renamings[v] = Renaming(v, cat) + + for op in self.block.operations: + # + if (op.opname in ('cast_pointer', 'same_as') and + is_gc_ptr(op.result.concretetype)): + renamings[op.result] = renfetch(op.args[0]) + continue + # + to = self.wants_a_barrier.get(op) + if to is not None: + ren = renfetch(op.args[0]) + frm = ren.category + if needs_barrier(frm, to): + stmtransformer.read_barrier_counts += 1 + v_none = varoftype(lltype.Void) + newoperations.append( + SpaceOperation('stm_read', [ren.newvar], v_none)) + ren.category = to + # + # XXX: from c4: we can probably just append the original op + newop = SpaceOperation(op.opname, + [renamings_get(v) for v in op.args], + op.result) + newoperations.append(newop) + # + if (stmtransformer.break_analyzer.analyze(op) + or op.opname == 'debug_stm_flush_barrier'): + # this operation can perform a transaction break: + # all pointers are lowered to 'A' + for ren in renamings.values(): + ren.category = 'A' + # + if op.opname in MALLOCS: + assert op.result not in renamings + renamings[op.result] = Renaming(op.result, 'R') + # + if op.opname in ('setfield', 'setarrayitem', 'setinteriorfield', + 'raw_store'): + # compare with logic in stmframework.py + # ops that need a write barrier also make the var 'R' + if (op.args[-1].concretetype is not lltype.Void + and is_gc_ptr(op.args[0].concretetype)): + renfetch(op.args[0]).category = 'R' + + if isinstance(self.block.exitswitch, Variable): + switchv = renamings_get(self.block.exitswitch) + else: + switchv = None + blockoperations = newoperations + linkoperations = [] + for link in self.block.exits: + output_categories = [] + for v in link.args: + if is_gc_ptr(v.concretetype): + cat = get_category_or_null(v) + else: + cat = None + output_categories.append(cat) + newoperations = [] + newargs = [renamings_get(v) for v in link.args] + linkoperations.append((newargs, newoperations, output_categories)) + # + # Record how we'd like to patch the block, but don't do any + # patching yet + self.patch = (blockoperations, switchv, linkoperations) + + + def update_targets(self, block_transformers): + (_, _, linkoperations) = self.patch + assert len(linkoperations) == len(self.block.exits) + targetbts = [] + for link, (_, _, output_categories) in zip(self.block.exits, + linkoperations): + targetblock = link.target + if targetblock not in block_transformers: + continue # ignore the exit block + targetbt = block_transformers[targetblock] + targetbt.inputargs_category_per_link[link] = output_categories + if targetbt.update_inputargs_category(): + targetbts.append(targetbt) + return set(targetbts) + + def update_inputargs_category(self): + values = self.inputargs_category_per_link.values() + newcats = [] + for i, v in enumerate(self.block.inputargs): + if is_gc_ptr(v.concretetype): + cats = [output_categories[i] for output_categories in values] + assert None not in cats + newcats.append(min(cats)) + else: + newcats.append(None) + if newcats != self.inputargs_category: + self.inputargs_category = newcats + return True + else: + return False + + + def patch_now(self): + if self.patch is None: + return + newoperations, switchv, linkoperations = self.patch + self.block.operations = newoperations + if switchv is not None: + self.block.exitswitch = switchv + assert len(linkoperations) == len(self.block.exits) + for link, (newargs, newoperations, _) in zip(self.block.exits, + linkoperations): + link.args[:] = newargs + if newoperations: + # must put them in a fresh block along the link + annotator = self.stmtransformer.translator.annotator + newblock = insert_empty_block(annotator, link, + newoperations) + + +def insert_stm_read_barrier(stmtransformer, graph): + """This function uses the following characters for 'categories': + + * 'A': any general pointer + * 'R': the read (or write) barrier was applied + * 'Z': the null constant + + The letters are chosen so that a barrier is needed to change a + pointer from category x to category y if and only if y > x. + """ + # We need to put enough 'stm_read' in the graph so that any + # execution of a READ_OP on some GC object is guaranteed to also + # execute either 'stm_read' or 'stm_write' on the same GC object + # during the same transaction. + + join_blocks(graph) + annotator = stmtransformer.translator.annotator + insert_empty_startblock(annotator, graph) + + block_transformers = {} + + for block in graph.iterblocks(): + if block.operations == (): + continue + bt = BlockTransformer(stmtransformer, block) + bt.analyze_inside_block(graph) + block_transformers[block] = bt + + bt = block_transformers[graph.startblock] + bt.init_start_block() + pending = set([bt]) + + while pending: + bt = pending.pop() + bt.flow_through_block() + pending |= bt.update_targets(block_transformers) + + for bt in block_transformers.values(): + bt.patch_now() diff --git a/rpython/translator/stm/test/test_readbarrier.py b/rpython/translator/stm/test/test_readbarrier.py --- a/rpython/translator/stm/test/test_readbarrier.py +++ b/rpython/translator/stm/test/test_readbarrier.py @@ -1,6 +1,8 @@ from rpython.rlib.objectmodel import stm_ignored from rpython.translator.stm.test.transform_support import BaseTestTransform -from rpython.rtyper.lltypesystem import lltype +from rpython.rlib.rstm import register_invoke_around_extcall +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem.lloperation import llop class TestReadBarrier(BaseTestTransform): @@ -26,6 +28,19 @@ assert res == 42 assert self.read_barriers == [x1] + def test_simple_read_after_write(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + x1 = lltype.malloc(X, immortal=True) + x1.foo = 42 + + def f1(n): + x1.foo = 7 # write barrier will be done + return x1.foo + + res = self.interpret(f1, [4]) + assert res == 7 + assert self.read_barriers == [] # implicitly by the write-barrier + def test_stm_ignored_read(self): X = lltype.GcStruct('X', ('foo', lltype.Signed)) x1 = lltype.malloc(X, immortal=True) @@ -48,3 +63,198 @@ res = self.interpret(f1, [2]) assert res == 42 assert self.read_barriers == [x1] + + def test_array_size(self): + array_gc = lltype.GcArray(('z', lltype.Signed)) + array_nongc = lltype.Array(('z', lltype.Signed)) + Q = lltype.GcStruct('Q', + ('gc', lltype.Ptr(array_gc)), + ('raw', lltype.Ptr(array_nongc))) + q = lltype.malloc(Q, immortal=True) + q.gc = lltype.malloc(array_gc, n=3, flavor='gc', immortal=True) + q.raw = lltype.malloc(array_nongc, n=5, flavor='raw', immortal=True) + def f1(n): + if n == 1: + return len(q.gc) + else: + return len(q.raw) + res = self.interpret(f1, [1]) + assert self.read_barriers == [q] + res = self.interpret(f1, [0]) + assert self.read_barriers == [q] + + + def test_multiple_reads(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed), + ('bar', lltype.Signed)) + x1 = lltype.malloc(X, immortal=True) + x1.foo = 6 + x1.bar = 7 + x2 = lltype.malloc(X, immortal=True) + x2.foo = 81 + x2.bar = -1 + + def f1(n): + if n > 1: + return x2.foo * x2.bar + else: + return x1.foo * x1.bar + + res = self.interpret(f1, [4]) + assert res == -81 + assert self.read_barriers == [x2] + + + def test_dont_repeat_read_barrier_after_malloc(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + x1 = lltype.malloc(X, immortal=True, zero=True) + def f1(n): + t1 = x1.foo + lltype.malloc(X) + t1 += x1.foo + return t1 + + self.interpret(f1, [4]) + assert self.read_barriers == [x1] + + def test_call_external_release_gil(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + def f1(p): + register_invoke_around_extcall() + x1 = p.foo + external_release_gil() + x2 = p.foo + return x1 * x2 + + x = lltype.malloc(X, immortal=True); x.foo = 6 + res = self.interpret(f1, [x]) + assert res == 36 + assert self.read_barriers == [x, x] + + def test_call_external_any_gcobj(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + def f1(p): + register_invoke_around_extcall() + x1 = p.foo + external_any_gcobj() + x2 = p.foo + return x1 * x2 + + x = lltype.malloc(X, immortal=True); x.foo = 6 + res = self.interpret(f1, [x]) + assert res == 36 + assert self.read_barriers == [x] + + def test_call_external_safest(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + def f1(p): + register_invoke_around_extcall() + x1 = p.foo + external_safest() + x2 = p.foo + return x1 * x2 + + x = lltype.malloc(X, immortal=True); x.foo = 6 + res = self.interpret(f1, [x]) + assert res == 36 + assert self.read_barriers == [x] + + def test_simple_loop(self): + X = lltype.GcStruct('X', ('foo', lltype.Signed)) + def f1(x, i): + while i > 0: + i -= x.foo + return i + x = lltype.malloc(X, immortal=True); x.foo = 1 + res = self.interpret(f1, [x, 5]) + assert res == 0 + # for now we get this. Later, we could probably optimize it + assert self.read_barriers == [x] * 5 + + + def test_read_immutable(self): + class Foo: + _immutable_ = True + + def f1(n): + x = Foo() + x.foo = 4 + llop.debug_stm_flush_barrier(lltype.Void) + if n > 1: + n = x.foo + llop.debug_stm_flush_barrier(lltype.Void) + return x.foo + n + + res = self.interpret(f1, [4]) + assert res == 8 + assert len(self.read_barriers) == 0 + + def test_read_immutable_prebuilt(self): + class Foo: + _immutable_ = True + x1 = Foo() + x1.foo = 42 + x2 = Foo() + x2.foo = 81 + + def f1(n): + if n > 1: + return x2.foo + else: + return x1.foo + + res = self.interpret(f1, [4]) + assert res == 81 + assert self.read_barriers == [] + + def test_immut_barrier_before_weakref_deref(self): + import weakref + class Foo: + pass + + def f1(): + x = Foo() + w = weakref.ref(x) + llop.debug_stm_flush_barrier(lltype.Void) + return w() + + self.interpret(f1, []) + assert len(self.read_barriers) == 1 + + + def test_transaction_breaking_ops(self): + class X: + a = 1 + x = X() + + def f1(f): + x.a = f + t = x.a # no read barrier + llop.stm_commit_if_not_atomic(lltype.Void) + t += x.a + llop.stm_start_if_not_atomic(lltype.Void) + t += x.a + llop.stm_transaction_break(lltype.Void) + t += x.a + llop.stm_enter_callback_call(lltype.Void) + t += x.a + llop.stm_leave_callback_call(lltype.Void) + t += x.a + return t + + self.interpret(f1, [1]) + assert len(self.read_barriers) == 5 + + +external_release_gil = rffi.llexternal('external_release_gil', [], lltype.Void, + _callable=lambda: None, + random_effects_on_gcobjs=True, + releasegil=True) # GIL is released +external_any_gcobj = rffi.llexternal('external_any_gcobj', [], lltype.Void, + _callable=lambda: None, + random_effects_on_gcobjs=True, + releasegil=False) # GIL is not released +external_safest = rffi.llexternal('external_safest', [], lltype.Void, + _callable=lambda: None, + random_effects_on_gcobjs=False, + releasegil=False) # GIL is not released diff --git a/rpython/translator/stm/test/transform_support.py b/rpython/translator/stm/test/transform_support.py --- a/rpython/translator/stm/test/transform_support.py +++ b/rpython/translator/stm/test/transform_support.py @@ -68,10 +68,7 @@ stm_ignored = False def eval(self): - self.gcptrs_actually_read = [] result = LLFrame.eval(self) - for x in self.gcptrs_actually_read: - assert x in self.llinterpreter.tester.read_barriers return result def all_stm_ptrs(self): @@ -83,9 +80,6 @@ def op_stm_read(self, obj): self.llinterpreter.tester.read_barriers.append(obj) - def op_stm_write(self, obj): - self.op_stm_read(obj) # implicitly counts as a read barrier too - def op_stm_ignored_start(self): assert self.stm_ignored == False self.stm_ignored = True @@ -95,61 +89,52 @@ self.stm_ignored = False def op_getfield(self, obj, field): - if obj._TYPE.TO._gckind == 'gc': - if obj._TYPE.TO._immutable_field(field): - if not self.stm_ignored: - self.gcptrs_actually_read.append(obj) return LLFrame.op_getfield(self, obj, field) def op_setfield(self, obj, fieldname, fieldvalue): - if obj._TYPE.TO._gckind == 'gc': - T = lltype.typeOf(fieldvalue) - if isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc': - self.check_category(obj, 'W') - else: - self.check_category(obj, 'V') - # convert R -> Q all other pointers to the same object we can find - for p in self.all_stm_ptrs(): - if p._category == 'R' and p._T == obj._T and p == obj: - _stmptr._category.__set__(p, 'Q') return LLFrame.op_setfield(self, obj, fieldname, fieldvalue) def op_cast_pointer(self, RESTYPE, obj): if obj._TYPE.TO._gckind == 'gc': - cat = self.check_category(obj, None) p = opimpl.op_cast_pointer(RESTYPE, obj) - return _stmptr(p, cat) + return p return lltype.cast_pointer(RESTYPE, obj) op_cast_pointer.need_result_type = True def op_cast_opaque_ptr(self, RESTYPE, obj): if obj._TYPE.TO._gckind == 'gc': - cat = self.check_category(obj, None) p = lltype.cast_opaque_ptr(RESTYPE, obj) - return _stmptr(p, cat) + return p return LLFrame.op_cast_opaque_ptr(self, RESTYPE, obj) op_cast_opaque_ptr.need_result_type = True def op_malloc(self, obj, flags): assert flags['flavor'] == 'gc' - # convert all existing pointers W -> V - for p in self.all_stm_ptrs(): - if p._category == 'W': - _stmptr._category.__set__(p, 'V') p = LLFrame.op_malloc(self, obj, flags) - ptr2 = _stmptr(p, 'W') - self.llinterpreter.tester.writemode.add(ptr2._obj) + ptr2 = p return ptr2 def transaction_break(self): - # convert -> I all other pointers to the same object we can find - for p in self.all_stm_ptrs(): - if p._category > 'I': - _stmptr._category.__set__(p, 'I') + pass def op_stm_commit_transaction(self): self.transaction_break() + def op_stm_transaction_break(self): + self.transaction_break() + + def op_stm_commit_if_not_atomic(self): + self.transaction_break() + + def op_stm_start_if_not_atomic(self): + self.transaction_break() + + def op_stm_enter_callback_call(self): + self.transaction_break() + + def op_stm_leave_callback_call(self): + self.transaction_break() + def op_stm_begin_inevitable_transaction(self): self.transaction_break() diff --git a/rpython/translator/stm/transform.py b/rpython/translator/stm/transform.py --- a/rpython/translator/stm/transform.py +++ b/rpython/translator/stm/transform.py @@ -1,5 +1,6 @@ from rpython.translator.stm.inevitable import insert_turn_inevitable from rpython.translator.stm.readbarrier import insert_stm_read_barrier +from rpython.translator.stm.breakfinder import TransactionBreakAnalyzer from rpython.translator.c.support import log @@ -25,8 +26,12 @@ def transform_read_barrier(self): self.read_barrier_counts = 0 + self.break_analyzer = TransactionBreakAnalyzer(self.translator) + for graph in self.translator.graphs: insert_stm_read_barrier(self, graph) + + del self.break_analyzer log.info("%d read barriers inserted" % (self.read_barrier_counts,)) def transform_turn_inevitable(self): From noreply at buildbot.pypy.org Wed Aug 20 13:02:52 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 20 Aug 2014 13:02:52 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: simplify by removing the renaming Message-ID: <20140820110252.738AB1C14FF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r72926:3b0d4669070d Date: 2014-08-20 11:19 +0200 http://bitbucket.org/pypy/pypy/changeset/3b0d4669070d/ Log: simplify by removing the renaming diff --git a/rpython/translator/stm/readbarrier.py b/rpython/translator/stm/readbarrier.py --- a/rpython/translator/stm/readbarrier.py +++ b/rpython/translator/stm/readbarrier.py @@ -19,12 +19,6 @@ def is_gc_ptr(T): return isinstance(T, lltype.Ptr) and T.TO._gckind == 'gc' -class Renaming(object): - def __init__(self, newvar, category): - self.newvar = newvar # a Variable or a Constant - self.TYPE = newvar.concretetype - self.category = category - class BlockTransformer(object): @@ -93,43 +87,22 @@ def flow_through_block(self): - def renfetch(v): - try: - return renamings[v] - except KeyError: - ren = Renaming(v, 'A') - renamings[v] = ren - return ren + def catfetch(v): + return cat_map.setdefault(v, 'A') def get_category_or_null(v): # 'v' is an original variable here, or a constant if isinstance(v, Constant) and not v.value: # a NULL constant return 'Z' - if v in renamings: - return renamings[v].category + if v in cat_map: + return cat_map[v] if isinstance(v, Constant): return 'R' else: return 'A' - def renamings_get(v): - try: - ren = renamings[v] - except KeyError: - return v # unmodified - v2 = ren.newvar - if v2.concretetype == v.concretetype: - return v2 - v3 = varoftype(v.concretetype) - newoperations.append(SpaceOperation('cast_pointer', [v2], v3)) - if lltype.castable(ren.TYPE, v3.concretetype) > 0: - ren.TYPE = v3.concretetype - return v3 - # note: 'renamings' maps old vars to new vars, but cast_pointers - # are done lazily. It means that the two vars may not have - # exactly the same type. - renamings = {} # {original-var: Renaming(newvar, category)} + cat_map = {} # var: category newoperations = [] stmtransformer = self.stmtransformer @@ -138,42 +111,38 @@ for v, cat in zip(self.block.inputargs, self.inputargs_category): if is_gc_ptr(v.concretetype): assert cat is not None - renamings[v] = Renaming(v, cat) + cat_map[v] = cat for op in self.block.operations: # if (op.opname in ('cast_pointer', 'same_as') and is_gc_ptr(op.result.concretetype)): - renamings[op.result] = renfetch(op.args[0]) - continue + cat_map[op.result] = catfetch(op.args[0]) + assert not self.wants_a_barrier.get(op) # to = self.wants_a_barrier.get(op) if to is not None: - ren = renfetch(op.args[0]) - frm = ren.category + var = op.args[0] + frm = catfetch(op.args[0]) if needs_barrier(frm, to): stmtransformer.read_barrier_counts += 1 v_none = varoftype(lltype.Void) newoperations.append( - SpaceOperation('stm_read', [ren.newvar], v_none)) - ren.category = to + SpaceOperation('stm_read', [var], v_none)) + cat_map[var] = to # - # XXX: from c4: we can probably just append the original op - newop = SpaceOperation(op.opname, - [renamings_get(v) for v in op.args], - op.result) - newoperations.append(newop) + newoperations.append(op) # if (stmtransformer.break_analyzer.analyze(op) or op.opname == 'debug_stm_flush_barrier'): # this operation can perform a transaction break: # all pointers are lowered to 'A' - for ren in renamings.values(): - ren.category = 'A' + for v in cat_map.keys(): + cat_map[v] = 'A' # if op.opname in MALLOCS: - assert op.result not in renamings - renamings[op.result] = Renaming(op.result, 'R') + assert op.result not in cat_map + cat_map[op.result] = 'R' # if op.opname in ('setfield', 'setarrayitem', 'setinteriorfield', 'raw_store'): @@ -181,10 +150,10 @@ # ops that need a write barrier also make the var 'R' if (op.args[-1].concretetype is not lltype.Void and is_gc_ptr(op.args[0].concretetype)): - renfetch(op.args[0]).category = 'R' + cat_map[op.args[0]] = 'R' if isinstance(self.block.exitswitch, Variable): - switchv = renamings_get(self.block.exitswitch) + switchv = self.block.exitswitch else: switchv = None blockoperations = newoperations @@ -198,7 +167,7 @@ cat = None output_categories.append(cat) newoperations = [] - newargs = [renamings_get(v) for v in link.args] + newargs = link.args linkoperations.append((newargs, newoperations, output_categories)) # # Record how we'd like to patch the block, but don't do any From noreply at buildbot.pypy.org Wed Aug 20 13:02:53 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 20 Aug 2014 13:02:53 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: another small simplification Message-ID: <20140820110253.942DC1C14FF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r72927:28c253683fc2 Date: 2014-08-20 11:25 +0200 http://bitbucket.org/pypy/pypy/changeset/28c253683fc2/ Log: another small simplification diff --git a/rpython/translator/stm/readbarrier.py b/rpython/translator/stm/readbarrier.py --- a/rpython/translator/stm/readbarrier.py +++ b/rpython/translator/stm/readbarrier.py @@ -167,8 +167,7 @@ cat = None output_categories.append(cat) newoperations = [] - newargs = link.args - linkoperations.append((newargs, newoperations, output_categories)) + linkoperations.append((newoperations, output_categories)) # # Record how we'd like to patch the block, but don't do any # patching yet @@ -179,7 +178,7 @@ (_, _, linkoperations) = self.patch assert len(linkoperations) == len(self.block.exits) targetbts = [] - for link, (_, _, output_categories) in zip(self.block.exits, + for link, (_, output_categories) in zip(self.block.exits, linkoperations): targetblock = link.target if targetblock not in block_transformers: @@ -215,9 +214,8 @@ if switchv is not None: self.block.exitswitch = switchv assert len(linkoperations) == len(self.block.exits) - for link, (newargs, newoperations, _) in zip(self.block.exits, + for link, (newoperations, _) in zip(self.block.exits, linkoperations): - link.args[:] = newargs if newoperations: # must put them in a fresh block along the link annotator = self.stmtransformer.translator.annotator From noreply at buildbot.pypy.org Wed Aug 20 13:02:54 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Wed, 20 Aug 2014 13:02:54 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Merge Message-ID: <20140820110254.C37C01C14FF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r72928:24b0a79addbe Date: 2014-08-20 13:02 +0200 http://bitbucket.org/pypy/pypy/changeset/24b0a79addbe/ Log: Merge diff --git a/rpython/jit/backend/llsupport/stmrewrite.py b/rpython/jit/backend/llsupport/stmrewrite.py --- a/rpython/jit/backend/llsupport/stmrewrite.py +++ b/rpython/jit/backend/llsupport/stmrewrite.py @@ -27,6 +27,12 @@ self._do_stm_call('stm_hint_commit_soon', [], None, op.stm_location) return + # ---------- jump, finish, guard_not_forced_2 ---------- + if (opnum == rop.JUMP or opnum == rop.FINISH + or opnum == rop.GUARD_NOT_FORCED_2): + self.add_dummy_allocation() + self.newops.append(op) + return # ---------- pure operations, guards ---------- if op.is_always_pure() or op.is_guard() or op.is_ovf(): self.newops.append(op) @@ -84,11 +90,6 @@ ): self.newops.append(op) return - # ---------- jump, finish ---------- - if opnum == rop.JUMP or opnum == rop.FINISH: - self.add_dummy_allocation() - self.newops.append(op) - return # ---------- fall-back ---------- # Check that none of the ops handled here can collect. # This is not done by the fallback here diff --git a/rpython/jit/backend/llsupport/test/test_stmrewrite.py b/rpython/jit/backend/llsupport/test/test_stmrewrite.py --- a/rpython/jit/backend/llsupport/test/test_stmrewrite.py +++ b/rpython/jit/backend/llsupport/test/test_stmrewrite.py @@ -1322,3 +1322,18 @@ $DUMMYALLOC jump(i1) """) + + def test_dummy_alloc_is_before_guard_not_forced_2(self): + self.check_rewrite(""" + [] + escape() + guard_not_forced_2() [] + finish() + """, """ + [] + $INEV + escape() + $DUMMYALLOC + guard_not_forced_2() [] + finish() + """) From noreply at buildbot.pypy.org Wed Aug 20 13:31:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 20 Aug 2014 13:31:00 +0200 (CEST) Subject: [pypy-commit] pypy default: Rename Block.get_graph() to make it clear that it's a slow method, used Message-ID: <20140820113100.722F01C148A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72929:b059fb4e325c Date: 2014-08-20 13:30 +0200 http://bitbucket.org/pypy/pypy/changeset/b059fb4e325c/ Log: Rename Block.get_graph() to make it clear that it's a slow method, used only (so far) by the pygame viewer diff --git a/rpython/flowspace/model.py b/rpython/flowspace/model.py --- a/rpython/flowspace/model.py +++ b/rpython/flowspace/model.py @@ -252,7 +252,7 @@ from rpython.translator.tool.graphpage import try_show try_show(self) - def get_graph(self): + def _slowly_get_graph(self): import gc pending = [self] # pending blocks seen = {self: True, None: True} diff --git a/rpython/translator/tool/graphpage.py b/rpython/translator/tool/graphpage.py --- a/rpython/translator/tool/graphpage.py +++ b/rpython/translator/tool/graphpage.py @@ -409,7 +409,7 @@ elif isinstance(obj, Link): try_show(obj.prevblock) elif isinstance(obj, Block): - graph = obj.get_graph() + graph = obj._slowly_get_graph() if isinstance(graph, FunctionGraph): graph.show() return From noreply at buildbot.pypy.org Wed Aug 20 15:09:03 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 20 Aug 2014 15:09:03 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: fix the case of llstr(char) Message-ID: <20140820130903.D4F661C1482@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: use-file-star-for-file Changeset: r72930:311385061a9b Date: 2014-08-20 14:38 +0200 http://bitbucket.org/pypy/pypy/changeset/311385061a9b/ Log: fix the case of llstr(char) diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -422,11 +422,13 @@ def specialize_call(self, hop): hop.exception_cannot_occur() - assert hop.args_r[0].lowleveltype == hop.r_result.lowleveltype v_ll_str, = hop.inputargs(*hop.args_r) - return hop.genop('same_as', [v_ll_str], - resulttype = hop.r_result.lowleveltype) - + if hop.args_r[0].lowleveltype == hop.r_result.lowleveltype: + return hop.genop('same_as', [v_ll_str], + resulttype = hop.r_result.lowleveltype) + else: + return hop.gendirectcall(hop.args_r[0].ll.ll_chr2str, v_ll_str) + return hlstr, llstr hlstr, llstr = make_string_entries(str) diff --git a/rpython/rtyper/test/test_annlowlevel.py b/rpython/rtyper/test/test_annlowlevel.py --- a/rpython/rtyper/test/test_annlowlevel.py +++ b/rpython/rtyper/test/test_annlowlevel.py @@ -34,6 +34,14 @@ res = self.interpret(f, [self.string_to_ll("abc")]) assert res == 3 + def test_llstr_const_char(self): + def f(arg): + s = llstr(hlstr(arg)[0]) + return len(s.chars) + + res = self.interpret(f, [self.string_to_ll("abc")]) + assert res == 1 + def test_hlunicode(self): s = mallocunicode(3) s.chars[0] = u"a" From noreply at buildbot.pypy.org Wed Aug 20 15:09:05 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 20 Aug 2014 15:09:05 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: (arigo) simplify Message-ID: <20140820130905.07FBE1C1482@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: use-file-star-for-file Changeset: r72931:1c38dd6f99af Date: 2014-08-20 15:07 +0200 http://bitbucket.org/pypy/pypy/changeset/1c38dd6f99af/ Log: (arigo) simplify diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -421,13 +421,15 @@ return lltype_to_annotation(lltype.Ptr(UNICODE)) def specialize_call(self, hop): + from rpython.rtyper.lltypesystem.rstr import (string_repr, + unicode_repr) hop.exception_cannot_occur() - v_ll_str, = hop.inputargs(*hop.args_r) - if hop.args_r[0].lowleveltype == hop.r_result.lowleveltype: - return hop.genop('same_as', [v_ll_str], - resulttype = hop.r_result.lowleveltype) + if strtype is str: + v_ll_str = hop.inputarg(string_repr, 0) else: - return hop.gendirectcall(hop.args_r[0].ll.ll_chr2str, v_ll_str) + v_ll_str = hop.inputarg(unicode_repr, 0) + return hop.genop('same_as', [v_ll_str], + resulttype = hop.r_result.lowleveltype) return hlstr, llstr From noreply at buildbot.pypy.org Wed Aug 20 16:52:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 20 Aug 2014 16:52:42 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: Add the complete EP talks Message-ID: <20140820145242.A51E91D3722@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r528:f25b6226ae59 Date: 2014-08-20 16:52 +0200 http://bitbucket.org/pypy/pypy.org/changeset/f25b6226ae59/ Log: Add the complete EP talks diff --git a/talk/ep2014-status.html b/talk/ep2014-status.html --- a/talk/ep2014-status.html +++ b/talk/ep2014-status.html @@ -368,6 +368,9 @@ EuroPython (except last year; hence the "no no, PyPy is not dead" part of the title of this talk).

          +
          +See the complete talk (in pdf) +
          diff --git a/talk/ep2014-status.pdf b/talk/ep2014-status.pdf new file mode 100644 index 0000000000000000000000000000000000000000..aa6078c72592f3c1704408ce9759135c24a70c28 GIT binary patch [cut] diff --git a/talk/ep2014-stm.html b/talk/ep2014-stm.html --- a/talk/ep2014-stm.html +++ b/talk/ep2014-stm.html @@ -374,6 +374,9 @@ languages like Python. I will also mention CPython: how hard (or not) it would be to change the CPython source code to use the same approach.

          +
          +See the complete talk (in html) +
          diff --git a/talk/ep2014-stm/fig4.svg b/talk/ep2014-stm/fig4.svg new file mode 100644 --- /dev/null +++ b/talk/ep2014-stm/fig4.svg @@ -0,0 +1,4 @@ + + + + diff --git a/talk/ep2014-stm/stylesheet.css b/talk/ep2014-stm/stylesheet.css new file mode 100644 --- /dev/null +++ b/talk/ep2014-stm/stylesheet.css @@ -0,0 +1,333 @@ +/* +:Author: David Goodger (goodger at python.org) +:Id: $Id: html4css1.css 7614 2013-02-21 15:55:51Z milde $ +:Copyright: This stylesheet has been placed in the public domain. + +Default cascading style sheet for the HTML output of Docutils. + +See http://docutils.sf.net/docs/howto/html-stylesheets.html for how to +customize this style sheet. +*/ + +/* used to remove borders from tables and images */ +.borderless, table.borderless td, table.borderless th { + border: 0 } + +table.borderless td, table.borderless th { + /* Override padding for "table.docutils td" with "! important". + The right padding separates the table cells. */ + padding: 0 0.5em 0 0 ! important } + +.first { + /* Override more specific margin styles with "! important". */ + margin-top: 0 ! important } + +.last, .with-subtitle { + margin-bottom: 0 ! important } + +.hidden { + display: none } + +a.toc-backref { + text-decoration: none ; + color: black } + +blockquote.epigraph { + margin: 2em 5em ; } + +dl.docutils dd { + margin-bottom: 0.5em } + +object[type="image/svg+xml"], object[type="application/x-shockwave-flash"] { + overflow: hidden; +} + +/* Uncomment (and remove this text!) to get bold-faced definition list terms +dl.docutils dt { + font-weight: bold } +*/ + +div.abstract { + margin: 2em 5em } + +div.abstract p.topic-title { + font-weight: bold ; + text-align: center } + +div.admonition, div.attention, div.caution, div.danger, div.error, +div.hint, div.important, div.note, div.tip, div.warning { + margin: 2em ; + border: medium outset ; + padding: 1em } + +div.admonition p.admonition-title, div.hint p.admonition-title, +div.important p.admonition-title, div.note p.admonition-title, +div.tip p.admonition-title { + font-weight: bold ; + font-family: sans-serif } + +div.attention p.admonition-title, div.caution p.admonition-title, +div.danger p.admonition-title, div.error p.admonition-title, +div.warning p.admonition-title, .code .error { + color: red ; + font-weight: bold ; + font-family: sans-serif } + +/* Uncomment (and remove this text!) to get reduced vertical space in + compound paragraphs. +div.compound .compound-first, div.compound .compound-middle { + margin-bottom: 0.5em } + +div.compound .compound-last, div.compound .compound-middle { + margin-top: 0.5em } +*/ + +div.dedication { + margin: 2em 5em ; + text-align: center ; + font-style: italic } + +div.dedication p.topic-title { + font-weight: bold ; + font-style: normal } + +div.figure { + margin-left: 2em ; + margin-right: 2em } + +div.footer, div.header { + clear: both; + font-size: smaller } + +div#header { + height: 88px ! important } + +div.line-block { + display: block ; + margin-top: 1em ; + margin-bottom: 1em } + +div.line-block div.line-block { + margin-top: 0 ; + margin-bottom: 0 ; + margin-left: 1.5em } + +div.sidebar { + margin: 0 0 0.5em 1em ; + border: medium outset ; + padding: 1em ; + background-color: #ffffee ; + width: 40% ; + float: right ; + clear: right } + +div.sidebar p.rubric { + font-family: sans-serif ; + font-size: medium } + +div.system-messages { + margin: 5em } + +div.system-messages h1 { + color: red } + +div.system-message { + border: medium outset ; + padding: 1em } + +div.system-message p.system-message-title { + color: red ; + font-weight: bold } + +div.topic { + margin: 2em } + +h1.section-subtitle, h2.section-subtitle, h3.section-subtitle, +h4.section-subtitle, h5.section-subtitle, h6.section-subtitle { + margin-top: 0.4em } + +h1.title { + text-align: center } + +h2.subtitle { + text-align: center } + +hr.docutils { + width: 75% } + +img.align-left, .figure.align-left, object.align-left { + clear: left ; + float: left ; + margin-right: 1em } + +img.align-right, .figure.align-right, object.align-right { + clear: right ; + float: right ; + margin-left: 1em } + +img.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left } + +.align-center { + clear: both ; + text-align: center } + +.align-right { + text-align: right } + +/* reset inner alignment in figures */ +div.align-right { + text-align: inherit } + +/* div.align-center * { */ +/* text-align: left } */ + +ol.simple, ul.simple { + margin-bottom: 1em } + +ol.arabic { + list-style: decimal } + +ol.loweralpha { + list-style: lower-alpha } + +ol.upperalpha { + list-style: upper-alpha } + +ol.lowerroman { + list-style: lower-roman } + +ol.upperroman { + list-style: upper-roman } + +p.attribution { + text-align: right ; + margin-left: 50% } + +p.caption { + font-style: italic } + +p.credits { + font-style: italic ; + font-size: smaller } + +p.label { + white-space: nowrap } + +p.rubric { + font-weight: bold ; + font-size: larger ; + color: maroon ; + text-align: center } + +p.sidebar-title { + font-family: sans-serif ; + font-weight: bold ; + font-size: larger } + +p.sidebar-subtitle { + font-family: sans-serif ; + font-weight: bold } + +p.topic-title { + font-weight: bold } + +pre.address { + margin-bottom: 0 ; + margin-top: 0 ; + font: inherit } + +pre.literal-block, pre.doctest-block, pre.math, pre.code { + margin-left: 2em ; + margin-right: 2em } + +pre.code .ln { color: grey; } /* line numbers */ +pre.code, code { background-color: #eeeeee } +pre.code .comment, code .comment { color: #5C6576 } +pre.code .keyword, code .keyword { color: #3B0D06; font-weight: bold } +pre.code .literal.string, code .literal.string { color: #0C5404 } +pre.code .name.builtin, code .name.builtin { color: #352B84 } +pre.code .deleted, code .deleted { background-color: #DEB0A1} +pre.code .inserted, code .inserted { background-color: #A3D289} + +span.classifier { + font-family: sans-serif ; + font-style: oblique } + +span.classifier-delimiter { + font-family: sans-serif ; + font-weight: bold } + +span.interpreted { + font-family: sans-serif } + +span.option { + white-space: nowrap } + +span.pre { + white-space: pre } + +span.problematic { + color: red } + +span.section-subtitle { + /* font-size relative to parent (h1..h6 element) */ + font-size: 80% } + +table.citation { + border-left: solid 1px gray; + margin-left: 1px } + +table.docinfo { + margin: 2em 4em } + +table.docutils { + margin-top: 0.5em ; + margin-bottom: 0.5em } + +table.footnote { + border-left: solid 1px black; + margin-left: 1px } + +table.docutils td, table.docutils th, +table.docinfo td, table.docinfo th { + padding-left: 0.5em ; + padding-right: 0.5em ; + vertical-align: top } + +table.docutils th.field-name, table.docinfo th.docinfo-name { + font-weight: bold ; + text-align: left ; + white-space: nowrap ; + padding-left: 0 } + +/* "booktabs" style (no vertical lines) */ +table.docutils.booktabs { + border: 0px; + border-top: 2px solid; + border-bottom: 2px solid; + border-collapse: collapse; +} +table.docutils.booktabs * { + border: 0px; +} +table.docutils.booktabs th { + border-bottom: thin solid; + text-align: left; +} + +h1 tt.docutils, h2 tt.docutils, h3 tt.docutils, +h4 tt.docutils, h5 tt.docutils, h6 tt.docutils { + font-size: 100% } + +ul.auto-toc { + list-style-type: none } + +.slide li { + padding-top: 0.2em } diff --git a/talk/ep2014-stm/talk.html b/talk/ep2014-stm/talk.html new file mode 100644 --- /dev/null +++ b/talk/ep2014-stm/talk.html @@ -0,0 +1,704 @@ + + + + + + + +Using All These Cores: Transactional Memory in PyPy + + + + + + + + + + + + + + +
          +
          +
          + + +
          +
          +
          +

          Using All These Cores: Transactional Memory in PyPy

          + +

          Armin Rigo - EuroPython 2014

          +
          +
          +
          +

          Part 1 - Intro and Current Status

          +
          +
          +

          Introduction

          +
            +
          • PyPy-STM: Software Transactional Memory
          • +
          • On-going research project:
              +
            • by Remi Meier and myself
            • +
            • helped by crowdfunding, thanks to all donors
            • +
            +
          • +
          • Started as a EuroPython 2011 lightning talk
          • +
          +
          +
          +

          Why is there a GIL?

          +
            +
          • GIL = Global Interpreter Lock
          • +
          • initially: CPython was single threaded
          • +
          • for concurrency (but not parallelism):
              +
            • provide concurrently running threads
            • +
            +
          • +
          • easiest way to retrofit into interpreter:
              +
            • acquire GIL around the execution of bytecode instructions
            • +
            • easy for refcounting, too
            • +
            +
          • +
          +
          +
          +

          Consequences (+)

          +
            +
          • atomic & isolated instructions:
              +
            • things like list.append() are atomic
            • +
            • tons of websites mention this
            • +
            • latent races if Python becomes really parallel
            • +
            +
          • +
          • sequential consistency:
              +
            • less surprises; "all variables volatile"
            • +
            +
          • +
          +
          +
          +

          Consequences (-)

          +
            +
          • obviously, no parallelism
          • +
          • GIL not available to application:
              +
            • all difficulties of concurrency still there
            • +
            • need application-level locking
            • +
            +
          • +
          +
          +
          +

          Removing the GIL

          +
            +
            1. +
            2. Fine-grained locking
            3. +
            +
          • +
            1. +
            2. Shared-nothing
            3. +
            +
          • +
            1. +
            2. Transactional memory
            3. +
            +
          • +
          +
          +
          +

          Fine-grained locking

          +
            +
          • replace GIL with locks on objs / data structures
          • +
          • accessing different objects can run in parallel
          • +
          • harder to implement:
              +
            • many locks -> deadlock risks
            • +
            • refcounting issue
            • +
            +
          • +
          • overhead of lock/unlock on objs:
              +
            • Jython depends on JVM for good lock removal
            • +
            +
          • +
          • still need application-level locking
          • +
          +
          +
          +

          Shared-nothing

          +
            +
          • each independent part of the program gets its own interpreter
          • +
          • simple implementation
          • +
          • gives workaround instead of direct replacement
          • +
          • not compatible to existing threaded applications, a priori
          • +
          • explicit communication:
              +
            • good: clean model, no locks
            • +
            • bad: limitations, overhead
            • +
            +
          • +
          +
          +
          +

          Transactional Memory

          +
            +
          • like GIL, but instead of blocking, each thread runs optimistically
          • +
          • "easy" to implement:
              +
            • GIL acquire -> transaction start
            • +
            • GIL release -> transaction commit
            • +
            +
          • +
          • overhead: cross-checking conflicting memory reads and writes, +and if necessary, cancel and restart transactions
          • +
          • HTM, STM, or some hybrids exist:
              +
            • but mostly still research-only
            • +
            +
          • +
          +
          +
          +

          PyPy-STM

          +
            +
          • implementation of a specially-tailored STM ("hard" part):
              +
            • a reusable C library
            • +
            • called STMGC-C7
            • +
            +
          • +
          • used in PyPy to replace the GIL ("easy" part)
          • +
          • could also be used in CPython
              +
            • but refcounting needs replacing
            • +
            +
          • +
          +
          +
          +

          How does it work?

          + +fig4.svg +
          +
          +

          Demo

          +
            +
          • counting primes
          • +
          +
          +
          +

          Long Transactions

          +
            +
          • threads and application-level locks still needed...
          • +
          • but can be very coarse:
              +
            • two transactions can optimistically run in parallel
            • +
            • even if they both acquire and release the same lock
            • +
            +
          • +
          +
          +
          +

          Long Transactions

          + +fig4.svg +
          +
          +

          Demo

          +
            +
          • Bottle web server
          • +
          +
          +
          +

          PyPy-STM Programming Model

          +
            +
          • threads-and-locks, fully compatible with the GIL
          • +
          • this is not "everybody should use careful explicit threading +with all the locking issues"
          • +
          • instead, PyPy-STM pushes forward:
              +
            • make or use a thread pool library
            • +
            • coarse locking, inside that library only
            • +
            +
          • +
          +
          +
          +

          PyPy-STM Programming Model

          +
            +
          • e.g.:
              +
            • multiprocessing-like thread pool
            • +
            • Twisted/Tornado/Bottle extension
            • +
            • Stackless/greenlet/gevent extension
            • +
            +
          • +
          +
          +
          +

          PyPy-STM status

          +
            +
          • current status:
              +
            • basics work
            • +
            • best case 25-40% overhead (much better than originally planned)
            • +
            • app locks not done yet ("with atomic" workaround)
            • +
            • tons of things to improve
            • +
            • tons of things to improve
            • +
            • tons of things to improve
            • +
            • tons of things to improve
            • +
            • tons of things to improve
            • +
            • tons of things to improve
            • +
            • tons of things to improve
            • +
            +
          • +
          +
          +
          +

          Summary: Benefits

          +
            +
          • Potential to enable parallelism:
              +
            • in any CPU-bound multithreaded program
            • +
            • or as a replacement of multiprocessing
            • +
            • but also in existing applications not written for that
            • +
            • as long as they do multiple things that are "often independent"
            • +
            +
          • +
          • Keep locks coarse-grained
          • +
          +
          +
          +

          Summary: Issues

          +
            +
          • Keep locks coarse-grained:
              +
            • but in case of systematic conflicts, performance is bad again
            • +
            • need to track and fix them
            • +
            • need tool to support this (debugger/profiler)
            • +
            +
          • +
          • Performance hit: 25-40% slower than a plain PyPy-JIT (may be ok)
          • +
          +
          +
          +

          Summary: PyPy-STM

          + +
          +
          +

          Part 2 - Under The Hood

          +

          STMGC-C7

          +
          +
          +

          Overview

          +
            +
          • Say we want to run N = 2 threads
          • +
          • We reserve twice the memory
          • +
          • Thread 1 reads/writes "memory segment" 1
          • +
          • Thread 2 reads/writes "memory segment" 2
          • +
          • Upon commit, we (try to) copy the changes to the other segment
          • +
          +
          +
          +

          Trick #1

          +
            +
          • Objects contain pointers to each other
          • +
          • These pointers are relative instead of absolute:
              +
            • accessed as if they were "thread-local data"
            • +
            • the x86 has a zero-cost way to do that (%fs, %gs)
            • +
            • supported in clang (not gcc so far)
            • +
            +
          • +
          +
          +
          +

          Trick #2

          +
            +
          • With Trick #1, most objects are exactly identical in all N segments:
              +
            • so we share the memory
            • +
            • mmap() MAP_SHARED
            • +
            • actual memory usage is multiplied by much less than N
            • +
            +
          • +
          • Newly allocated objects are directly in shared pages:
              +
            • we don't actually need to copy all new objects at commit, +but only the few old objects modified
            • +
            +
          • +
          +
          +
          +

          Barriers

          +
            +
          • Need to record all reads and writes done by a transaction
          • +
          • Extremely cheap way to do that:
              +
            • Read: set a flag in thread-local memory (one byte)
            • +
            • Write into a newly allocated object: nothing to do
            • +
            • Write into an old object: add the object to a list
            • +
            +
          • +
          • Commit: check if each object from that list conflicts with +a read flag set in some other thread
          • +
          +
          +
          +

          ...

          +
          +
          +

          Thank You

          + +
          +
          + + diff --git a/talk/ep2014-stm/ui b/talk/ep2014-stm/ui new file mode 120000 --- /dev/null +++ b/talk/ep2014-stm/ui @@ -0,0 +1,1 @@ +../stanford-ee380-2011/ui \ No newline at end of file From noreply at buildbot.pypy.org Wed Aug 20 17:13:28 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 20 Aug 2014 17:13:28 +0200 (CEST) Subject: [pypy-commit] pypy default: use lists instead of dicts for opboolreflex, opboolinvers and opname Message-ID: <20140820151328.3BB301C11B8@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r72932:658f358e5f30 Date: 2014-06-30 18:06 +0200 http://bitbucket.org/pypy/pypy/changeset/658f358e5f30/ Log: use lists instead of dicts for opboolreflex, opboolinvers and opname diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -47,21 +47,15 @@ def find_rewritable_bool(self, op, args): - try: - oldopnum = opboolinvers[op.getopnum()] - except KeyError: - pass - else: + oldopnum = opboolinvers[op.getopnum()] + if oldopnum != -1: targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[0], args[1]], None)) if self.try_boolinvers(op, targs): return True - try: - oldopnum = opboolreflex[op.getopnum()] # FIXME: add INT_ADD, INT_MUL - except KeyError: - pass - else: + oldopnum = opboolreflex[op.getopnum()] # FIXME: add INT_ADD, INT_MUL + if oldopnum != -1: targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[1], args[0]], None)) oldop = self.get_pure_result(targs) @@ -69,15 +63,14 @@ self.make_equal_to(op.result, self.getvalue(oldop.result)) return True - try: - oldopnum = opboolinvers[opboolreflex[op.getopnum()]] - except KeyError: - pass - else: - targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[1], args[0]], - None)) - if self.try_boolinvers(op, targs): - return True + reflex = opboolreflex[op.getopnum()] + if reflex != -1: + oldopnum = opboolinvers[reflex] + if oldopnum != -1: + targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[1], args[0]], + None)) + if self.try_boolinvers(op, targs): + return True return False diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -185,7 +185,7 @@ op = opname[opnum] except KeyError: continue - if 'FLOAT' in op: + if 'float' in op: continue args = [] for _ in range(oparity[opnum]): diff --git a/rpython/jit/metainterp/optimizeopt/util.py b/rpython/jit/metainterp/optimizeopt/util.py --- a/rpython/jit/metainterp/optimizeopt/util.py +++ b/rpython/jit/metainterp/optimizeopt/util.py @@ -19,7 +19,7 @@ opname = name[len(name_prefix):] if opname.isupper(): assert hasattr(resoperation.rop, opname) - for value, name in resoperation.opname.items(): + for value, name in enumerate(resoperation.opname): if op_prefix and not name.startswith(op_prefix): continue if hasattr(Class, name_prefix + name): diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -126,10 +126,7 @@ ['descr=%r' % descr])) def getopname(self): - try: - return opname[self.getopnum()].lower() - except KeyError: - return '<%d>' % self.getopnum() + return opname[self.getopnum()] def is_guard(self): return rop._GUARD_FIRST <= self.getopnum() <= rop._GUARD_LAST @@ -549,7 +546,7 @@ pass opclasses = [] # mapping numbers to the concrete ResOp class -opname = {} # mapping numbers to the original names, for debugging +opname = [] # mapping numbers to the original names, for debugging oparity = [] # mapping numbers to the arity of the operation or -1 opwithdescr = [] # mapping numbers to a flag "takes a descr" @@ -571,15 +568,16 @@ arity, withdescr, boolresult = -1, True, False # default setattr(rop, name, i) if not name.startswith('_'): - opname[i] = name cls = create_class_for_op(name, i, arity, withdescr) cls._cls_has_bool_result = boolresult else: + name = '<%d>' % i cls = None + opname.append(name.lower()) opclasses.append(cls) oparity.append(arity) opwithdescr.append(withdescr) - assert len(opclasses) == len(oparity) == len(opwithdescr) == len(_oplist) + assert len(opclasses) == len(oparity) == len(opwithdescr) == len(_oplist) == len(opname) def get_base_class(mixin, base): try: @@ -619,7 +617,10 @@ setup(__name__ == '__main__') # print out the table when run directly del _oplist -opboolinvers = { +def opdict_to_list(d, default=-1): + return [d.get(i, default) for i in range(len(opname))] + +opboolinvers = opdict_to_list({ rop.INT_EQ: rop.INT_NE, rop.INT_NE: rop.INT_EQ, rop.INT_LT: rop.INT_GE, @@ -641,9 +642,9 @@ rop.PTR_EQ: rop.PTR_NE, rop.PTR_NE: rop.PTR_EQ, -} +}) -opboolreflex = { +opboolreflex = opdict_to_list({ rop.INT_EQ: rop.INT_EQ, rop.INT_NE: rop.INT_NE, rop.INT_LT: rop.INT_GT, @@ -665,7 +666,7 @@ rop.PTR_EQ: rop.PTR_EQ, rop.PTR_NE: rop.PTR_NE, -} +}) def get_deep_immutable_oplist(operations): diff --git a/rpython/jit/metainterp/test/test_executor.py b/rpython/jit/metainterp/test/test_executor.py --- a/rpython/jit/metainterp/test/test_executor.py +++ b/rpython/jit/metainterp/test/test_executor.py @@ -326,13 +326,13 @@ def make_args_for_op(op, a, b): n=opname[op] - if n[0:3] == 'INT' or n[0:4] == 'UINT': + if n[0:3] == 'int' or n[0:4] == 'uint': arg1 = ConstInt(a) arg2 = ConstInt(b) - elif n[0:5] == 'FLOAT': + elif n[0:5] == 'float': arg1 = constfloat(float(a)) arg2 = constfloat(float(b)) - elif n[0:3] == 'PTR': + elif n[0:3] == 'ptr': arg1 = ConstPtr(rffi.cast(llmemory.GCREF, a)) arg2 = ConstPtr(rffi.cast(llmemory.GCREF, b)) else: @@ -343,7 +343,9 @@ def test_opboolinvers(): cpu = FakeCPU() - for op1, op2 in opboolinvers.items(): + for op1, op2 in enumerate(opboolinvers): + if op2 == -1: + continue for a in (1,2,3): for b in (1,2,3): arg1, arg2 = make_args_for_op(op1, a, b) @@ -353,7 +355,9 @@ def test_opboolreflex(): cpu = FakeCPU() - for op1, op2 in opboolreflex.items(): + for op1, op2 in enumerate(opboolreflex): + if op2 == -1: + continue for a in (1,2,3): for b in (1,2,3): arg1, arg2 = make_args_for_op(op1, a, b) From noreply at buildbot.pypy.org Wed Aug 20 17:13:29 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 20 Aug 2014 17:13:29 +0200 (CEST) Subject: [pypy-commit] pypy default: a somewhat hacky Pdb+ command "findv" to find variables of certain names Message-ID: <20140820151329.795B71C11B8@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r72933:7373380c07d3 Date: 2014-08-20 17:10 +0200 http://bitbucket.org/pypy/pypy/changeset/7373380c07d3/ Log: a somewhat hacky Pdb+ command "findv" to find variables of certain names (default is "graph") diff --git a/rpython/translator/tool/pdbplus.py b/rpython/translator/tool/pdbplus.py --- a/rpython/translator/tool/pdbplus.py +++ b/rpython/translator/tool/pdbplus.py @@ -196,6 +196,30 @@ return self._show(page) + def do_findv(self, varname): + """ findv [varname] +find a stack frame that has a certain variable (the default is "graph") +""" + if not varname: + varname = "graph" + printfr = self.print_stack_entry + self.print_stack_entry = lambda *args: None + try: + num = 0 + while self.curindex: + frame = self.curframe + if varname in frame.f_locals: + printfr(self.stack[self.curindex]) + print "%s = %s" % (varname, frame.f_locals[varname]) + return + num += 1 + self.do_up(None) + print "no %s found" % (varname, ) + for i in range(num): + self.do_down(None) + finally: + del self.print_stack_entry + def _attrs(self, arg, pr): arg, expr = self._parse_modif(arg, 'match') if expr == '_': From noreply at buildbot.pypy.org Wed Aug 20 17:13:30 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 20 Aug 2014 17:13:30 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20140820151330.B28AB1C11B8@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r72934:c9b9d0b2ab6c Date: 2014-08-20 17:12 +0200 http://bitbucket.org/pypy/pypy/changeset/c9b9d0b2ab6c/ Log: merge diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -47,21 +47,15 @@ def find_rewritable_bool(self, op, args): - try: - oldopnum = opboolinvers[op.getopnum()] - except KeyError: - pass - else: + oldopnum = opboolinvers[op.getopnum()] + if oldopnum != -1: targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[0], args[1]], None)) if self.try_boolinvers(op, targs): return True - try: - oldopnum = opboolreflex[op.getopnum()] # FIXME: add INT_ADD, INT_MUL - except KeyError: - pass - else: + oldopnum = opboolreflex[op.getopnum()] # FIXME: add INT_ADD, INT_MUL + if oldopnum != -1: targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[1], args[0]], None)) oldop = self.get_pure_result(targs) @@ -69,15 +63,14 @@ self.make_equal_to(op.result, self.getvalue(oldop.result)) return True - try: - oldopnum = opboolinvers[opboolreflex[op.getopnum()]] - except KeyError: - pass - else: - targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[1], args[0]], - None)) - if self.try_boolinvers(op, targs): - return True + reflex = opboolreflex[op.getopnum()] + if reflex != -1: + oldopnum = opboolinvers[reflex] + if oldopnum != -1: + targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[1], args[0]], + None)) + if self.try_boolinvers(op, targs): + return True return False diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -185,7 +185,7 @@ op = opname[opnum] except KeyError: continue - if 'FLOAT' in op: + if 'float' in op: continue args = [] for _ in range(oparity[opnum]): diff --git a/rpython/jit/metainterp/optimizeopt/util.py b/rpython/jit/metainterp/optimizeopt/util.py --- a/rpython/jit/metainterp/optimizeopt/util.py +++ b/rpython/jit/metainterp/optimizeopt/util.py @@ -19,7 +19,7 @@ opname = name[len(name_prefix):] if opname.isupper(): assert hasattr(resoperation.rop, opname) - for value, name in resoperation.opname.items(): + for value, name in enumerate(resoperation.opname): if op_prefix and not name.startswith(op_prefix): continue if hasattr(Class, name_prefix + name): diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -126,10 +126,7 @@ ['descr=%r' % descr])) def getopname(self): - try: - return opname[self.getopnum()].lower() - except KeyError: - return '<%d>' % self.getopnum() + return opname[self.getopnum()] def is_guard(self): return rop._GUARD_FIRST <= self.getopnum() <= rop._GUARD_LAST @@ -549,7 +546,7 @@ pass opclasses = [] # mapping numbers to the concrete ResOp class -opname = {} # mapping numbers to the original names, for debugging +opname = [] # mapping numbers to the original names, for debugging oparity = [] # mapping numbers to the arity of the operation or -1 opwithdescr = [] # mapping numbers to a flag "takes a descr" @@ -571,15 +568,16 @@ arity, withdescr, boolresult = -1, True, False # default setattr(rop, name, i) if not name.startswith('_'): - opname[i] = name cls = create_class_for_op(name, i, arity, withdescr) cls._cls_has_bool_result = boolresult else: + name = '<%d>' % i cls = None + opname.append(name.lower()) opclasses.append(cls) oparity.append(arity) opwithdescr.append(withdescr) - assert len(opclasses) == len(oparity) == len(opwithdescr) == len(_oplist) + assert len(opclasses) == len(oparity) == len(opwithdescr) == len(_oplist) == len(opname) def get_base_class(mixin, base): try: @@ -619,7 +617,10 @@ setup(__name__ == '__main__') # print out the table when run directly del _oplist -opboolinvers = { +def opdict_to_list(d, default=-1): + return [d.get(i, default) for i in range(len(opname))] + +opboolinvers = opdict_to_list({ rop.INT_EQ: rop.INT_NE, rop.INT_NE: rop.INT_EQ, rop.INT_LT: rop.INT_GE, @@ -641,9 +642,9 @@ rop.PTR_EQ: rop.PTR_NE, rop.PTR_NE: rop.PTR_EQ, -} +}) -opboolreflex = { +opboolreflex = opdict_to_list({ rop.INT_EQ: rop.INT_EQ, rop.INT_NE: rop.INT_NE, rop.INT_LT: rop.INT_GT, @@ -665,7 +666,7 @@ rop.PTR_EQ: rop.PTR_EQ, rop.PTR_NE: rop.PTR_NE, -} +}) def get_deep_immutable_oplist(operations): diff --git a/rpython/jit/metainterp/test/test_executor.py b/rpython/jit/metainterp/test/test_executor.py --- a/rpython/jit/metainterp/test/test_executor.py +++ b/rpython/jit/metainterp/test/test_executor.py @@ -326,13 +326,13 @@ def make_args_for_op(op, a, b): n=opname[op] - if n[0:3] == 'INT' or n[0:4] == 'UINT': + if n[0:3] == 'int' or n[0:4] == 'uint': arg1 = ConstInt(a) arg2 = ConstInt(b) - elif n[0:5] == 'FLOAT': + elif n[0:5] == 'float': arg1 = constfloat(float(a)) arg2 = constfloat(float(b)) - elif n[0:3] == 'PTR': + elif n[0:3] == 'ptr': arg1 = ConstPtr(rffi.cast(llmemory.GCREF, a)) arg2 = ConstPtr(rffi.cast(llmemory.GCREF, b)) else: @@ -343,7 +343,9 @@ def test_opboolinvers(): cpu = FakeCPU() - for op1, op2 in opboolinvers.items(): + for op1, op2 in enumerate(opboolinvers): + if op2 == -1: + continue for a in (1,2,3): for b in (1,2,3): arg1, arg2 = make_args_for_op(op1, a, b) @@ -353,7 +355,9 @@ def test_opboolreflex(): cpu = FakeCPU() - for op1, op2 in opboolreflex.items(): + for op1, op2 in enumerate(opboolreflex): + if op2 == -1: + continue for a in (1,2,3): for b in (1,2,3): arg1, arg2 = make_args_for_op(op1, a, b) From noreply at buildbot.pypy.org Wed Aug 20 17:28:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 20 Aug 2014 17:28:04 +0200 (CEST) Subject: [pypy-commit] stmgc default: Fix in debug code (likely) Message-ID: <20140820152804.296111C11B8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1321:b067967930aa Date: 2014-08-20 17:12 +0200 http://bitbucket.org/pypy/stmgc/changeset/b067967930aa/ Log: Fix in debug code (likely) diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -196,7 +196,11 @@ || write_locks[first_card_index] == 255); /* see gcpage.c */ while (card_index <= last_card_index) { uintptr_t card_lock_idx = first_card_index + card_index; - assert(write_locks[card_lock_idx] == CARD_CLEAR); + if (write_locks[card_lock_idx] != CARD_CLEAR) { + /* could occur if the object is immediately re-locked by + another thread */ + assert(write_locks[first_card_index] != 0); + } card_index++; } From noreply at buildbot.pypy.org Wed Aug 20 17:29:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 20 Aug 2014 17:29:16 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/b067967930aa Message-ID: <20140820152916.5F48E1C11B8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72935:157141a53611 Date: 2014-08-20 17:28 +0200 http://bitbucket.org/pypy/pypy/changeset/157141a53611/ Log: import stmgc/b067967930aa diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -bea13491352f +b067967930aa diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -197,7 +197,11 @@ || write_locks[first_card_index] == 255); /* see gcpage.c */ while (card_index <= last_card_index) { uintptr_t card_lock_idx = first_card_index + card_index; - assert(write_locks[card_lock_idx] == CARD_CLEAR); + if (write_locks[card_lock_idx] != CARD_CLEAR) { + /* could occur if the object is immediately re-locked by + another thread */ + assert(write_locks[first_card_index] != 0); + } card_index++; } From noreply at buildbot.pypy.org Wed Aug 20 17:55:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 20 Aug 2014 17:55:48 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Remove "DLS" from the final paper, in order to present it as a technical report. Message-ID: <20140820155548.EEE5A1C11B8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5386:974b856f74b4 Date: 2014-08-20 17:55 +0200 http://bitbucket.org/pypy/extradoc/changeset/974b856f74b4/ Log: Remove "DLS" from the final paper, in order to present it as a technical report. diff --git a/talk/dls2014/paper/paper.pdf b/talk/dls2014/paper/paper.pdf index 1b6090da505763b644a566a54df6a1b33bfeed25..82f177e29577ffb3c99b4b6829fd6e829fe20d59 GIT binary patch [cut] diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -104,7 +104,7 @@ \setlength{\pdfpageheight}{\paperheight} \setlength{\pdfpagewidth}{\paperwidth} -\conferenceinfo{DLS 2014}{to be supplied} +\conferenceinfo{Technical report}{to be supplied} \copyrightyear{2014} %\copyrightdata{978-1-nnnn-nnnn-n/yy/mm} \doi{nnnnnnn.nnnnnnn} @@ -123,7 +123,6 @@ %% \preprintfooter{short description of paper} % 'preprint' option specified. \title{A Transactional Memory System for Parallel Python} -%\subtitle{DLS'14} % \comment{ % A Platform for Parallelism in Dynamic languages From noreply at buildbot.pypy.org Wed Aug 20 17:56:49 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 20 Aug 2014 17:56:49 +0200 (CEST) Subject: [pypy-commit] pypy default: back out 658f358e5f30 Message-ID: <20140820155649.069861C148A@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r72936:936da5f1c6fc Date: 2014-08-20 17:56 +0200 http://bitbucket.org/pypy/pypy/changeset/936da5f1c6fc/ Log: back out 658f358e5f30 (breaks translation) diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -47,15 +47,21 @@ def find_rewritable_bool(self, op, args): - oldopnum = opboolinvers[op.getopnum()] - if oldopnum != -1: + try: + oldopnum = opboolinvers[op.getopnum()] + except KeyError: + pass + else: targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[0], args[1]], None)) if self.try_boolinvers(op, targs): return True - oldopnum = opboolreflex[op.getopnum()] # FIXME: add INT_ADD, INT_MUL - if oldopnum != -1: + try: + oldopnum = opboolreflex[op.getopnum()] # FIXME: add INT_ADD, INT_MUL + except KeyError: + pass + else: targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[1], args[0]], None)) oldop = self.get_pure_result(targs) @@ -63,14 +69,15 @@ self.make_equal_to(op.result, self.getvalue(oldop.result)) return True - reflex = opboolreflex[op.getopnum()] - if reflex != -1: - oldopnum = opboolinvers[reflex] - if oldopnum != -1: - targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[1], args[0]], - None)) - if self.try_boolinvers(op, targs): - return True + try: + oldopnum = opboolinvers[opboolreflex[op.getopnum()]] + except KeyError: + pass + else: + targs = self.optimizer.make_args_key(ResOperation(oldopnum, [args[1], args[0]], + None)) + if self.try_boolinvers(op, targs): + return True return False diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -185,7 +185,7 @@ op = opname[opnum] except KeyError: continue - if 'float' in op: + if 'FLOAT' in op: continue args = [] for _ in range(oparity[opnum]): diff --git a/rpython/jit/metainterp/optimizeopt/util.py b/rpython/jit/metainterp/optimizeopt/util.py --- a/rpython/jit/metainterp/optimizeopt/util.py +++ b/rpython/jit/metainterp/optimizeopt/util.py @@ -19,7 +19,7 @@ opname = name[len(name_prefix):] if opname.isupper(): assert hasattr(resoperation.rop, opname) - for value, name in enumerate(resoperation.opname): + for value, name in resoperation.opname.items(): if op_prefix and not name.startswith(op_prefix): continue if hasattr(Class, name_prefix + name): diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -126,7 +126,10 @@ ['descr=%r' % descr])) def getopname(self): - return opname[self.getopnum()] + try: + return opname[self.getopnum()].lower() + except KeyError: + return '<%d>' % self.getopnum() def is_guard(self): return rop._GUARD_FIRST <= self.getopnum() <= rop._GUARD_LAST @@ -546,7 +549,7 @@ pass opclasses = [] # mapping numbers to the concrete ResOp class -opname = [] # mapping numbers to the original names, for debugging +opname = {} # mapping numbers to the original names, for debugging oparity = [] # mapping numbers to the arity of the operation or -1 opwithdescr = [] # mapping numbers to a flag "takes a descr" @@ -568,16 +571,15 @@ arity, withdescr, boolresult = -1, True, False # default setattr(rop, name, i) if not name.startswith('_'): + opname[i] = name cls = create_class_for_op(name, i, arity, withdescr) cls._cls_has_bool_result = boolresult else: - name = '<%d>' % i cls = None - opname.append(name.lower()) opclasses.append(cls) oparity.append(arity) opwithdescr.append(withdescr) - assert len(opclasses) == len(oparity) == len(opwithdescr) == len(_oplist) == len(opname) + assert len(opclasses) == len(oparity) == len(opwithdescr) == len(_oplist) def get_base_class(mixin, base): try: @@ -617,10 +619,7 @@ setup(__name__ == '__main__') # print out the table when run directly del _oplist -def opdict_to_list(d, default=-1): - return [d.get(i, default) for i in range(len(opname))] - -opboolinvers = opdict_to_list({ +opboolinvers = { rop.INT_EQ: rop.INT_NE, rop.INT_NE: rop.INT_EQ, rop.INT_LT: rop.INT_GE, @@ -642,9 +641,9 @@ rop.PTR_EQ: rop.PTR_NE, rop.PTR_NE: rop.PTR_EQ, -}) +} -opboolreflex = opdict_to_list({ +opboolreflex = { rop.INT_EQ: rop.INT_EQ, rop.INT_NE: rop.INT_NE, rop.INT_LT: rop.INT_GT, @@ -666,7 +665,7 @@ rop.PTR_EQ: rop.PTR_EQ, rop.PTR_NE: rop.PTR_NE, -}) +} def get_deep_immutable_oplist(operations): diff --git a/rpython/jit/metainterp/test/test_executor.py b/rpython/jit/metainterp/test/test_executor.py --- a/rpython/jit/metainterp/test/test_executor.py +++ b/rpython/jit/metainterp/test/test_executor.py @@ -326,13 +326,13 @@ def make_args_for_op(op, a, b): n=opname[op] - if n[0:3] == 'int' or n[0:4] == 'uint': + if n[0:3] == 'INT' or n[0:4] == 'UINT': arg1 = ConstInt(a) arg2 = ConstInt(b) - elif n[0:5] == 'float': + elif n[0:5] == 'FLOAT': arg1 = constfloat(float(a)) arg2 = constfloat(float(b)) - elif n[0:3] == 'ptr': + elif n[0:3] == 'PTR': arg1 = ConstPtr(rffi.cast(llmemory.GCREF, a)) arg2 = ConstPtr(rffi.cast(llmemory.GCREF, b)) else: @@ -343,9 +343,7 @@ def test_opboolinvers(): cpu = FakeCPU() - for op1, op2 in enumerate(opboolinvers): - if op2 == -1: - continue + for op1, op2 in opboolinvers.items(): for a in (1,2,3): for b in (1,2,3): arg1, arg2 = make_args_for_op(op1, a, b) @@ -355,9 +353,7 @@ def test_opboolreflex(): cpu = FakeCPU() - for op1, op2 in enumerate(opboolreflex): - if op2 == -1: - continue + for op1, op2 in opboolreflex.items(): for a in (1,2,3): for b in (1,2,3): arg1, arg2 = make_args_for_op(op1, a, b) From noreply at buildbot.pypy.org Wed Aug 20 18:45:31 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Wed, 20 Aug 2014 18:45:31 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: implement os.truncate Message-ID: <20140820164531.06F3C1C1486@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3 Changeset: r72937:1a0bf771caeb Date: 2014-08-20 18:44 +0200 http://bitbucket.org/pypy/pypy/changeset/1a0bf771caeb/ Log: implement os.truncate diff --git a/pypy/module/posix/__init__.py b/pypy/module/posix/__init__.py --- a/pypy/module/posix/__init__.py +++ b/pypy/module/posix/__init__.py @@ -86,6 +86,7 @@ interpleveldefs['fchmod'] = 'interp_posix.fchmod' if hasattr(os, 'ftruncate'): interpleveldefs['ftruncate'] = 'interp_posix.ftruncate' + interpleveldefs['truncate'] = 'interp_posix.truncate' if hasattr(os, 'fsync'): interpleveldefs['fsync'] = 'interp_posix.fsync' if hasattr(os, 'fdatasync'): diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -159,7 +159,7 @@ @unwrap_spec(fd=c_int, length=r_longlong) def ftruncate(space, fd, length): - """Truncate a file to a specified length.""" + """Truncate a file (by file descriptor) to a specified length.""" try: os.ftruncate(fd, length) except IOError, e: @@ -173,6 +173,25 @@ except OSError, e: raise wrap_oserror(space, e) +def truncate(space, w_path, w_length): + """Truncate a file to a specified length.""" + allocated_fd = False + fd = -1 + try: + if space.isinstance_w(w_path, space.w_int): + w_fd = w_path + else: + w_fd = open(space, w_path, os.O_RDWR | os.O_CREAT) + allocated_fd = True + + fd = space.c_filedescriptor_w(w_fd) + length = space.int_w(w_length) + return ftruncate(space, fd, length) + + finally: + if allocated_fd and fd != -1: + close(space, fd) + def fsync(space, w_fd): """Force write of file with filedescriptor to disk.""" fd = space.c_filedescriptor_w(w_fd) diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -976,6 +976,33 @@ data = f.read() assert data == "who cares?" + if hasattr(os, 'ftruncate'): + def test_truncate(self): + posix = self.posix + dest = self.path + + def mkfile(dest, size=4): + with open(dest, 'wb') as f: + f.write(b'd' * size) + + # Check invalid inputs + mkfile(dest) + raises(OSError, posix.truncate, dest, -1) + raises(OSError, posix.truncate, 1, 1) + raises(TypeError, posix.truncate, dest, None) + raises(TypeError, posix.truncate, None, None) + + # Truncate via file descriptor + mkfile(dest) + with open(dest, 'wb') as f: + posix.truncate(f.fileno(), 1) + assert 1 == posix.stat(dest).st_size + + # Truncate via filename + mkfile(dest) + posix.truncate(dest, 1) + assert 1 == posix.stat(dest).st_size + try: os.getlogin() except (AttributeError, OSError): From noreply at buildbot.pypy.org Wed Aug 20 19:31:57 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 20 Aug 2014 19:31:57 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Add a draft of UCT talk Message-ID: <20140820173157.5D67A1C14FF@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r5387:be99fc4a12c0 Date: 2014-08-20 19:21 +0200 http://bitbucket.org/pypy/extradoc/changeset/be99fc4a12c0/ Log: Add a draft of UCT talk diff --git a/talk/uct2014/example/demo1.py b/talk/uct2014/example/demo1.py new file mode 100644 --- /dev/null +++ b/talk/uct2014/example/demo1.py @@ -0,0 +1,23 @@ + +def adder(x, y): + return x + y + +def entry_point(argv): + if len(argv) != 3: + print "Wrong number of args" + return 1 + try: + arg0 = int(argv[1]) + arg1 = int(argv[2]) + except ValueError: + print "Requires ints" + return 1 + print "Added", arg0 + arg1 + return 0 + +def target(*args): + return entry_point + +if __name__ == '__main__': + import sys + entry_point(sys.argv) diff --git a/talk/uct2014/example/demo2.py b/talk/uct2014/example/demo2.py new file mode 100644 --- /dev/null +++ b/talk/uct2014/example/demo2.py @@ -0,0 +1,26 @@ + +def adder(x, y): + return x + y + +def entry_point(argv): + if len(argv) != 3: + print "Wrong number of args" + return 1 + try: + arg0 = int(argv[1]) + arg1 = int(argv[2]) + except ValueError: + print "Requires ints" + return 1 + x = float(arg1) + for i in xrange(arg0): + x = adder(x, 1.2) + print "Result", x + return 0 + +def target(*args): + return entry_point + +if __name__ == '__main__': + import sys + entry_point(sys.argv) diff --git a/talk/uct2014/example/interp.py b/talk/uct2014/example/interp.py new file mode 100644 --- /dev/null +++ b/talk/uct2014/example/interp.py @@ -0,0 +1,48 @@ + +import time + +class InterpreterError(Exception): + def __init__(self, msg): + self.msg = msg + +def interp(bytecode, start_value): + i = 0 + lgt = len(bytecode) + accumulator = start_value + while i < lgt: + c = bytecode[i] + if c == 'd': + accumulator -= 1 + i += 1 + elif c == 'l': + arg = ord(bytecode[i + 1]) + i += 2 + if accumulator > 0: + i -= arg + 2 + else: + raise InterpreterError("Unknown char %s" % (c,)) + return accumulator + +def entry_point(argv): + if len(argv) != 3: + print "Wrong number of args, requires bytecode and start_value" + return 1 + bytecode = argv[1] + try: + acc = int(argv[2]) + except ValueError: + print "Expected int, got %s" % (argv[2],) + return 2 + try: + t0 = time.time() + res = interp(bytecode, acc) + tk = time.time() + dt = tk - t0 + except InterpreterError, e: + print e.msg + return 3 + print "Got %d, time %f" % (res, dt) + return 0 + +def target(*args): + return entry_point diff --git a/talk/uct2014/example/interp_jit.py b/talk/uct2014/example/interp_jit.py new file mode 100644 --- /dev/null +++ b/talk/uct2014/example/interp_jit.py @@ -0,0 +1,61 @@ + +import time +from rpython.rlib import jit + +driver = jit.JitDriver(greens = ['i', 'lgt', 'start_value', 'bytecode'], + reds = ['accumulator']) + +class InterpreterError(Exception): + def __init__(self, msg): + self.msg = msg + +def interp(bytecode, start_value): + i = 0 + lgt = len(bytecode) + accumulator = start_value + while i < lgt: + driver.jit_merge_point(bytecode=bytecode, i=i, lgt=lgt, + accumulator=accumulator, start_value=start_value) + c = bytecode[i] + if c == 'd': + accumulator -= 1 + i += 1 + elif c == 'l': + arg = ord(bytecode[i + 1]) + i += 2 + if accumulator > 0: + i -= arg + 2 + driver.can_enter_jit(bytecode=bytecode, i=i, lgt=lgt, + accumulator=accumulator, + start_value=start_value) + else: + raise InterpreterError("Unknown char %s (%d)" % (c, ord(c))) + return accumulator + +def entry_point(argv): + if len(argv) != 3: + print "Wrong number of args, requires bytecode and start_value" + return 1 + bytecode = argv[1] + try: + acc = int(argv[2]) + except ValueError: + print "Expected int, got %s" % (argv[2],) + return 2 + try: + t0 = time.time() + res = interp(bytecode, acc) + tk = time.time() + dt = tk - t0 + except InterpreterError, e: + print e.msg + return 3 + print "Got %d, time %f" % (res, dt) + return 0 + +def target(*args): + return entry_point + +if __name__ == '__main__': + import sys + entry_point(sys.argv) diff --git a/talk/uct2014/example/test_interp.py b/talk/uct2014/example/test_interp.py new file mode 100644 --- /dev/null +++ b/talk/uct2014/example/test_interp.py @@ -0,0 +1,6 @@ + +from interp import interp + +def test_interp(): + assert interp('ddl\x02', 13) == -1 + assert interp('ddl\x01', 13) == 0 diff --git a/talk/uct2014/talk.rst b/talk/uct2014/talk.rst new file mode 100644 --- /dev/null +++ b/talk/uct2014/talk.rst @@ -0,0 +1,149 @@ +The PyPy project +================ + +What is this talk about? +------------------------ + +* short introduction to the PyPy project + +* short introduction to RPython + +* just in time compilation and other innovations + +* how virtual machines should be written + +* commercial vs open source (???) + +Who am I? +--------- + +* Maciej Fijałkowski + +* PyPy core developer + +* own company - baroquesoftware.com + +What's PyPy? +------------ + +* a python interpreter + +* implements the full language (no restrictions!) + +* runs faster + +What makes PyPy different? +-------------------------- + +* not written in C/C++ + +* has just in time compiler + +* runs fast + +* core interpreter does not know about the JIT (mostly) + +What's RPython? +--------------- + +* implementation language for PyPy (and other projects, topaz, hippyvm, ...) + +* a subset of Python that can be statically compiled + +* extensive static analysis and transformation (GC, JIT, ...) + +RPython example +--------------- + +* demo + +* like python, but can compile to C + +* quite a bit quicker + +RPython interpreter example +--------------------------- + +* RPython is an ugly language + +* mostly for writing interpreters + +* demo + +Classic compilation +------------------- + +* you take a language X, parse it, compile it to assembler + +* works well for simple "static enough" languages + +Virtual Machine +--------------- + +* you take language X, compile it to imaginary computer + +* you implement that imaginary computer + +JIT - introduction +------------------ + +* you have a virtual machine from the previous slide + +* you compile bits and pieces of the code straight into assembler + +Tracing JIT - introduction +-------------------------- + +* instead of compiling e.g. function at a time you **trace** + what the interpreter does + +* follow steps one by one and generate assembler + +* very natural inlining, hot paths etc. + +Metatracing +----------- + +* trace the **interpreter** instead of the program + +* for complicated languages, like Python, is essential + +* hints necessary to close the semantics gap + +* avoids duplication of code + +RPython interpreter example - JIT +--------------------------------- + +* demo + +* jit adding means adding a few hints (pypy has about 100) + +Recap on virtual machines +------------------------- + +* don't write virtual machines by hand + +* don't write JITs in hand + +* use tools (PyPy/truffle) + +Q&A +--- + +* pypy.org + +* baroquesoftware.com + +* fijal at baroquesoftware.com + +* Any questions? + +Extra slide - what do I do for a living? +---------------------------------------- + +* selling pypy commercial support + +* various grants + +* implementing extra features From noreply at buildbot.pypy.org Wed Aug 20 19:31:58 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 20 Aug 2014 19:31:58 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: merge Message-ID: <20140820173158.9C4AF1C14FF@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r5388:f72b1d32ce88 Date: 2014-08-20 19:31 +0200 http://bitbucket.org/pypy/extradoc/changeset/f72b1d32ce88/ Log: merge diff --git a/talk/dls2014/paper/paper.pdf b/talk/dls2014/paper/paper.pdf index 1b6090da505763b644a566a54df6a1b33bfeed25..82f177e29577ffb3c99b4b6829fd6e829fe20d59 GIT binary patch [cut] diff --git a/talk/dls2014/paper/paper.tex b/talk/dls2014/paper/paper.tex --- a/talk/dls2014/paper/paper.tex +++ b/talk/dls2014/paper/paper.tex @@ -104,7 +104,7 @@ \setlength{\pdfpageheight}{\paperheight} \setlength{\pdfpagewidth}{\paperwidth} -\conferenceinfo{DLS 2014}{to be supplied} +\conferenceinfo{Technical report}{to be supplied} \copyrightyear{2014} %\copyrightdata{978-1-nnnn-nnnn-n/yy/mm} \doi{nnnnnnn.nnnnnnn} @@ -123,7 +123,6 @@ %% \preprintfooter{short description of paper} % 'preprint' option specified. \title{A Transactional Memory System for Parallel Python} -%\subtitle{DLS'14} % \comment{ % A Platform for Parallelism in Dynamic languages From noreply at buildbot.pypy.org Wed Aug 20 20:04:06 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 20 Aug 2014 20:04:06 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: fix and a pdf Message-ID: <20140820180406.BEA251D2837@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r5389:28f75d6555d8 Date: 2014-08-20 20:03 +0200 http://bitbucket.org/pypy/extradoc/changeset/28f75d6555d8/ Log: fix and a pdf diff --git a/talk/uct2014/talk.pdf b/talk/uct2014/talk.pdf new file mode 100644 index 0000000000000000000000000000000000000000..2bce1f4d33135886db6d9bcd9a0256a4002b2442 GIT binary patch [cut] diff --git a/talk/uct2014/talk.rst b/talk/uct2014/talk.rst --- a/talk/uct2014/talk.rst +++ b/talk/uct2014/talk.rst @@ -11,13 +11,11 @@ * just in time compilation and other innovations * how virtual machines should be written - -* commercial vs open source (???) - + Who am I? --------- -* Maciej Fijałkowski +* Maciej Fijalkowski * PyPy core developer From noreply at buildbot.pypy.org Wed Aug 20 22:09:23 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Wed, 20 Aug 2014 22:09:23 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: add missing lzma.LZMAError Message-ID: <20140820200923.E9B381C1482@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: py3.3 Changeset: r72938:5f4688671d8e Date: 2014-08-20 22:08 +0200 http://bitbucket.org/pypy/pypy/changeset/5f4688671d8e/ Log: add missing lzma.LZMAError diff --git a/pypy/module/_lzma/__init__.py b/pypy/module/_lzma/__init__.py --- a/pypy/module/_lzma/__init__.py +++ b/pypy/module/_lzma/__init__.py @@ -8,6 +8,7 @@ interpleveldefs = { 'LZMACompressor': 'interp_lzma.W_LZMACompressor', 'LZMADecompressor': 'interp_lzma.W_LZMADecompressor', + 'LZMAError': 'interp_lzma.W_LZMAError', '_encode_filter_properties': 'interp_lzma.encode_filter_properties', '_decode_filter_properties': 'interp_lzma.decode_filter_properties', 'FORMAT_AUTO': 'space.wrap(interp_lzma.FORMAT_AUTO)', diff --git a/pypy/module/_lzma/interp_lzma.py b/pypy/module/_lzma/interp_lzma.py --- a/pypy/module/_lzma/interp_lzma.py +++ b/pypy/module/_lzma/interp_lzma.py @@ -3,6 +3,7 @@ TypeDef, interp_attrproperty_bytes, interp_attrproperty) from pypy.interpreter.error import oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.module.exceptions.interp_exceptions import _new_exception, W_Exception from pypy.module.thread.os_lock import Lock from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import LONGLONG_MASK, r_ulonglong @@ -346,6 +347,9 @@ ) +W_LZMAError = _new_exception('LZMAError', W_Exception, 'Call to liblzma failed.') + + def encode_filter_properties(space, w_filter): """Return a bytes object encoding the options (properties) of the filter specified by *filter* (a dict). diff --git a/pypy/module/_lzma/test/test_lzma.py b/pypy/module/_lzma/test/test_lzma.py --- a/pypy/module/_lzma/test/test_lzma.py +++ b/pypy/module/_lzma/test/test_lzma.py @@ -15,3 +15,16 @@ b't\x9e\xdfI]\xff\xf4\x9d\x80\x00') decompressed = lzma.decompress(compressed) assert decompressed == b'Insert Data Here' + + def test_exceptions(self): + import _lzma + import lzma + + assert hasattr(_lzma, 'LZMAError') + assert hasattr(lzma, 'LZMAError') + + assert _lzma.LZMAError is lzma.LZMAError + assert _lzma.LZMAError.__doc__ == 'Call to liblzma failed.' + + exc = raises(_lzma.LZMAError, 'raise _lzma.LZMAError') + exc = raises(_lzma.LZMAError, 'raise _lzma.LZMAError("bad thing")') From noreply at buildbot.pypy.org Wed Aug 20 23:27:56 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 20 Aug 2014 23:27:56 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: add and start to test _parse_signature Message-ID: <20140820212756.1C3311C11B8@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r72939:ddd4e75d91b8 Date: 2014-08-20 22:59 +0300 http://bitbucket.org/pypy/pypy/changeset/ddd4e75d91b8/ Log: add and start to test _parse_signature diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -40,3 +40,110 @@ if index < 0: index += size return index + +def _next_non_white_space(s, offset): + ret = offset + while ret < len(s) and (s[ret] == ' ' or s[ret] == '\t'): + ret += 1 + if ret >= len(s): + break + return ret + +def _is_alpha_underscore(ch): + return (ch >= 'A' and ch <= 'Z') or (ch >= 'a' and ch <= 'z') or ch == '_' + +def _is_alnum_underscore(ch): + return _is_alpha_underscore(ch) or (ch >= '0' and ch <='9') + +def _parse_signature(space, ufunc, signature): + ''' + rewritten from _parse_signature in numpy/core/src/umath/ufunc_object.c + it takes a signature like '(),()->()' or '(i)->(i)' or '(i,j),(j,k)->(i,k)' + and sets up the ufunc to handle the actual call appropriately + + cpython numpy chops the dim names i,j,k out of the signature using pointers with + no copying, while faster than this code it seems like a marginally useful optimization. + We copy them out into var_names + ''' + i = _next_non_white_space(signature, 0) + cur_arg = 0 + cur_core_dim = 0 # index into ufunc.cor_dim_ixs + nd = 0 # number of dims of the current argument + var_names = {} + while i < len(signature): + # loop over input/output arguments + if cur_arg == ufunc.nin: + if signature[i:i+2] != '->': + raise oefmt(space.w_ValueError, '%s at %d in "%s"', + "expect '->'", i, signature) + i = _next_non_white_space(signature, i+2) + # parse core dimensions of one argument, + # e.g. "()", "(i)", or "(i,j)" + if signature[i] != '(': + raise oefmt(space.w_ValueError, '%s at %d in "%s"', + "expect '('", i, signature) + i = _next_non_white_space(signature, i+1) + end_of_arg = signature.find(')', i) + if end_of_arg < 0: + raise oefmt(space.w_ValueError, '%s %d in "%s"', + "could not find ')' after", i, signature) + if end_of_arg == i: + # no named arg, skip the next loop + next_comma = -1 + i += 1 + else: + next_comma = signature.find(',', i, end_of_arg) + if next_comma < 0: + next_comma = end_of_arg + while next_comma > 0 and next_comma <= end_of_arg: + # loop over core dimensions + name_end = next_comma - 1 + while signature[name_end] == ' ' or signature[name_end] == '\t': + name_end -= 1 + var_name = signature[i:name_end + 1] + if not all([_is_alpha_underscore(s) for s in var_name]): + raise oefmt(space.w_ValueError, '%s at %d in "%s"', + "expect dimension name", i, signature) + if var_name not in var_names: + var_names[var_name] = ufunc.core_num_dim_ix + ufunc.core_num_dim_ix += 1 + ufunc.core_dim_ixs[cur_core_dim] = var_names[var_name] + cur_core_dim += 1 + nd += 1 + i = next_comma + i = _next_non_white_space(signature, i + 1) + if signature[i] != ',' and signature[i] != ')' and signature[i] != '-': + raise oefmt(space.w_ValueError, '%s at %d in "%s"', + "expect ',' or ')' or '-'", i, signature) + if signature[i] == ',': + i = _next_non_white_space(signature, i + 1); + if signature[i] == ')': + raise oefmt(space.w_ValueError, '%s at %d in "%s"', + "',' must not be followed by ')'", i, signature) + if end_of_arg <= i: + next_comma = -1 + i = end_of_arg + 1 + else: + next_comma = signature.find(',', i, end_of_arg) + if next_comma < 0: + next_comma = end_of_arg + ufunc.core_num_dims[cur_arg] = nd + ufunc.core_offsets[cur_arg] = cur_core_dim - nd + cur_arg += 1 + nd = 0 + if i < len(signature): + i = _next_non_white_space(signature, i) + if cur_arg != ufunc.nin and cur_arg != ufunc.nargs: + # The list of input arguments (or output arguments) was + # only read partially + if signature[i] != ',': + raise oefmt(space.w_ValueError, '%s at %d in "%s"', + "expect ','", i, signature) + i = _next_non_white_space(signature, i + 1); + if cur_arg != ufunc.nargs: + raise oefmt(space.w_ValueError, '%s at %d in "%s"', + "incomplete signature: not all arguments found", i, signature) + ufunc.core_dim_ixs = ufunc.core_dim_ixs[:cur_core_dim] + if cur_core_dim == 0: + ufunc.core_enabled = 0 + return 0 # for historical reasons, any failures will raise diff --git a/pypy/module/micronumpy/test/test_support.py b/pypy/module/micronumpy/test/test_support.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_support.py @@ -0,0 +1,30 @@ +from pypy.module.micronumpy import support +from pypy.module.micronumpy.ufuncs import W_UfuncGeneric +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + +class TestParseSignatureDirect(BaseNumpyAppTest): + def test_signature_basic(self): + space = self.space + funcs = [None] + name = 'dummy ufunc' + identity = None + dtypes = [int, int, int] + + nin = 2 + nout = 1 + signature = '(), () -> ( ) ' + ufunc = W_UfuncGeneric(space, funcs, name, identity, nin, nout, dtypes, signature) + # make sure no attributes are added + attribs = set(ufunc.__dict__.keys()) + support._parse_signature(space, ufunc, ufunc.signature) + new_attribs = set(ufunc.__dict__.keys()) + assert attribs == new_attribs + assert sum(ufunc.core_num_dims) == 0 + assert ufunc.core_enabled == 0 + + nin = 2 + nout = 1 + signature = '(i),(i)->()' + ufunc = W_UfuncGeneric(space, funcs, name, identity, nin, nout, dtypes, signature) + support._parse_signature(space, ufunc, ufunc.signature) + assert ufunc.core_enabled == 1 diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -10,6 +10,7 @@ from pypy.module.micronumpy.base import convert_to_array, W_NDimArray from pypy.module.micronumpy.ctors import numpify from pypy.module.micronumpy.strides import shape_agreement +from pypy.module.micronumpy.support import _parse_signature def done_if_true(dtype, val): @@ -490,8 +491,10 @@ If dtypes == 'match', only one argument is provided and the output dtypes will match the input dtype (not cpython numpy compatible) + + This is the parallel to PyUFuncOjbect, see include/numpy/ufuncobject.h ''' - _immutable_fields_ = ["funcs", "dtypes", "data"] + _immutable_fields_ = ["funcs", "dtypes", "data", "match_dtypes"] def __init__(self, space, funcs, name, identity, nin, nout, dtypes, signature, match_dtypes=False): # XXX make sure funcs, signature, dtypes, nin, nout are consistent @@ -515,6 +518,12 @@ "generic ufunc with %d functions, %d arguments, but %d dtypes", len(funcs), self.nargs, len(dtypes)) self.signature = signature + #These will be filled in by _parse_signature + self.core_enabled = True # False for scalar ufunc, True for generalized ufunc + self.core_num_dim_ix = 0 # number of distinct dimention names in signature + self.core_num_dims = [0] * self.nargs # number of core dimensions of each nargs + self.core_offsets = [0] * self.nargs + self.core_dim_ixs = [0] * len(signature) def reduce(self, space, w_obj, w_axis, keepdims=False, out=None, dtype=None, cumulative=False): @@ -1027,6 +1036,7 @@ w_ret = W_UfuncGeneric(space, func, name, identity, nin, nout, dtypes, signature, match_dtypes=match_dtypes) + _parse_signature(space, w_ret, w_ret.signature) if doc: w_ret.w_doc = space.wrap(doc) return w_ret From noreply at buildbot.pypy.org Wed Aug 20 23:27:57 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 20 Aug 2014 23:27:57 +0200 (CEST) Subject: [pypy-commit] pypy default: test, fix for #1850 Message-ID: <20140820212757.814881C11B8@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r72940:8ad8dd8f0ae3 Date: 2014-08-21 00:27 +0300 http://bitbucket.org/pypy/pypy/changeset/8ad8dd8f0ae3/ Log: test, fix for #1850 diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -78,9 +78,9 @@ # arrays with correct dtype if isinstance(w_object, W_NDimArray) and \ (space.is_none(w_dtype) or w_object.get_dtype() is dtype): - if copy: - return w_object.descr_copy(space) - else: + if copy and (subok or type(w_object) is W_NDimArray): + return w_object.descr_copy(space, w_order) + elif not copy and (subok or type(w_object) is W_NDimArray): return w_object # not an array or incorrect dtype diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -7,7 +7,7 @@ def setup_class(cls): BaseNumpyAppTest.setup_class.im_func(cls) cls.w_NoNew = cls.space.appexec([], '''(): - from numpypy import ndarray + from numpy import ndarray class NoNew(ndarray): def __new__(cls, subtype): raise ValueError('should not call __new__') @@ -16,7 +16,7 @@ self.called_finalize = True return NoNew ''') cls.w_SubType = cls.space.appexec([], '''(): - from numpypy import ndarray, array + from numpy import ndarray, array class SubType(ndarray): def __new__(obj, input_array): obj = array(input_array, copy=False).view(obj) @@ -27,7 +27,7 @@ return SubType ''') def test_subtype_base(self): - from numpypy import ndarray, dtype + from numpy import ndarray, dtype class C(ndarray): def __new__(subtype, shape, dtype): self = ndarray.__new__(subtype, shape, dtype) @@ -65,7 +65,7 @@ assert b.base is a def test_subtype_view(self): - from numpypy import ndarray, array + from numpy import ndarray, array class matrix(ndarray): def __new__(subtype, data, dtype=None, copy=True): if isinstance(data, matrix): @@ -89,7 +89,7 @@ def test_finalize(self): #taken from http://docs.scipy.org/doc/numpy/user/basics.subclassing.html#simple-example-adding-an-extra-attribute-to-ndarray - import numpypy as np + import numpy as np class InfoArray(np.ndarray): def __new__(subtype, shape, dtype=float, buffer=None, offset=0, strides=None, order='C', info=None): @@ -121,7 +121,7 @@ assert cast_arr.info is None def test_sub_where(self): - from numpypy import where, ones, zeros, array + from numpy import where, ones, zeros, array a = array([1, 2, 3, 0, -3]) v = a.view(self.NoNew) b = where(array(v) > 0, ones(5), zeros(5)) @@ -130,14 +130,14 @@ assert not isinstance(b, self.NoNew) def test_sub_repeat(self): - from numpypy import array + from numpy import array a = self.SubType(array([[1, 2], [3, 4]])) b = a.repeat(3) assert (b == [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4]).all() assert isinstance(b, self.SubType) def test_sub_flatiter(self): - from numpypy import array + from numpy import array a = array(range(9)).reshape(3, 3).view(self.NoNew) c = array(range(9)).reshape(3, 3) assert isinstance(a.flat[:] + a.flat[:], self.NoNew) @@ -146,7 +146,7 @@ assert not isinstance(c.flat[:] + c.flat[:], self.NoNew) def test_sub_getitem_filter(self): - from numpypy import array + from numpy import array a = array(range(5)) b = self.SubType(a) c = b[array([False, True, False, True, False])] @@ -158,7 +158,7 @@ assert c.called_finalize def test_sub_getitem_array_int(self): - from numpypy import array + from numpy import array a = array(range(5)) b = self.SubType(a) assert b.called_new @@ -169,7 +169,7 @@ assert c.called_finalize def test_sub_round(self): - from numpypy import array + from numpy import array a = array(range(10), dtype=float).view(self.NoNew) # numpy compatibility b = a.round(decimals=0) @@ -181,7 +181,7 @@ def test_sub_dot(self): # the returned type is that of the first argument - from numpypy import array + from numpy import array a = array(range(12)).reshape(3,4) b = self.SubType(a) c = array(range(12)).reshape(4,3).view(self.SubType) @@ -197,7 +197,7 @@ def test_sub_reduce(self): # i.e. sum, max # test for out as well - from numpypy import array + from numpy import array a = array(range(12)).reshape(3,4) b = self.SubType(a) c = b.sum(axis=0) @@ -216,7 +216,7 @@ def test_sub_call2(self): # c + a vs. a + c, what about array priority? - from numpypy import array + from numpy import array a = array(range(12)).view(self.NoNew) b = self.SubType(range(12)) c = b + a @@ -228,26 +228,26 @@ assert isinstance(e, self.NoNew) def test_sub_call1(self): - from numpypy import array, sqrt + from numpy import array, sqrt a = array(range(12)).view(self.NoNew) b = sqrt(a) assert b.called_finalize == True def test_sub_astype(self): - from numpypy import array + from numpy import array a = array(range(12)).view(self.NoNew) b = a.astype(float) assert b.called_finalize == True def test_sub_reshape(self): - from numpypy import array + from numpy import array a = array(range(12)).view(self.NoNew) b = a.reshape(3, 4) assert b.called_finalize == True def test___array__(self): import sys - from numpypy import ndarray, array, dtype + from numpy import ndarray, array, dtype class D(ndarray): def __new__(subtype, shape, dtype): self = ndarray.__new__(subtype, shape, dtype) @@ -262,16 +262,14 @@ return retVal a = C([2, 2], int) - b = array(a) + b = array(a, subok=True) assert b.shape == (2, 2) - if '__pypy__' in sys.builtin_module_names: - assert b.id == 'subtype' - assert isinstance(b, D) + assert isinstance(b, D) c = array(a, float) assert c.dtype is dtype(float) def test__getitem_modifies_shape(self): - import numpypy as N + import numpy as N # numpy's matrix class caused an infinite loop class matrix(N.ndarray): getcnt = 0 @@ -383,3 +381,9 @@ b = loads(s) assert (a == b).all() assert isinstance(b, D) + + def test_subok(self): + from numpy import array, ndarray + a = self.SubType(array([[1, 2], [3, 4]])) + b = array(a, subok=False) + assert type(b) is ndarray From noreply at buildbot.pypy.org Thu Aug 21 01:13:48 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 21 Aug 2014 01:13:48 +0200 (CEST) Subject: [pypy-commit] pypy vendor/stdlib: update the Python 2.7 stdlib to 2.7.8 Message-ID: <20140820231348.3163C1C148A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: vendor/stdlib Changeset: r72941:a9f89ac29c25 Date: 2014-08-20 15:17 -0700 http://bitbucket.org/pypy/pypy/changeset/a9f89ac29c25/ Log: update the Python 2.7 stdlib to 2.7.8 diff too long, truncating to 2000 out of 32297 lines diff --git a/lib-python/2.7/CGIHTTPServer.py b/lib-python/2.7/CGIHTTPServer.py --- a/lib-python/2.7/CGIHTTPServer.py +++ b/lib-python/2.7/CGIHTTPServer.py @@ -84,7 +84,7 @@ path begins with one of the strings in self.cgi_directories (and the next character is a '/' or the end of the string). """ - collapsed_path = _url_collapse_path(self.path) + collapsed_path = _url_collapse_path(urllib.unquote(self.path)) dir_sep = collapsed_path.find('/', 1) head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] if head in self.cgi_directories: diff --git a/lib-python/2.7/Cookie.py b/lib-python/2.7/Cookie.py --- a/lib-python/2.7/Cookie.py +++ b/lib-python/2.7/Cookie.py @@ -1,6 +1,3 @@ -#!/usr/bin/env python -# - #### # Copyright 2000 by Timothy O'Malley # diff --git a/lib-python/2.7/HTMLParser.py b/lib-python/2.7/HTMLParser.py --- a/lib-python/2.7/HTMLParser.py +++ b/lib-python/2.7/HTMLParser.py @@ -22,9 +22,12 @@ starttagopen = re.compile('<[a-zA-Z]') piclose = re.compile('>') commentclose = re.compile(r'--\s*>') -tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*') + # see http://www.w3.org/TR/html5/tokenization.html#tag-open-state # and http://www.w3.org/TR/html5/tokenization.html#tag-name-state +# note: if you change tagfind/attrfind remember to update locatestarttagend too +tagfind = re.compile('([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*') +# this regex is currently unused, but left for backward compatibility tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*') attrfind = re.compile( @@ -32,7 +35,7 @@ r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*') locatestarttagend = re.compile(r""" - <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name + <[a-zA-Z][^\t\n\r\f />\x00]* # tag name (?:[\s/]* # optional whitespace before attribute name (?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name (?:\s*=+\s* # value indicator @@ -192,9 +195,9 @@ i = self.updatepos(i, k) continue else: - if ";" in rawdata[i:]: #bail by consuming &# - self.handle_data(rawdata[0:2]) - i = self.updatepos(i, 2) + if ";" in rawdata[i:]: # bail by consuming '&#' + self.handle_data(rawdata[i:i+2]) + i = self.updatepos(i, i+2) break elif startswith('&', i): match = entityref.match(rawdata, i) @@ -373,14 +376,14 @@ self.handle_data(rawdata[i:gtpos]) return gtpos # find the name: w3.org/TR/html5/tokenization.html#tag-name-state - namematch = tagfind_tolerant.match(rawdata, i+2) + namematch = tagfind.match(rawdata, i+2) if not namematch: # w3.org/TR/html5/tokenization.html#end-tag-open-state if rawdata[i:i+3] == '': return i+3 else: return self.parse_bogus_comment(i) - tagname = namematch.group().lower() + tagname = namematch.group(1).lower() # consume and ignore other stuff between the name and the > # Note: this is not 100% correct, since we might have things like # , but looking for > after tha name should cover diff --git a/lib-python/2.7/SimpleHTTPServer.py b/lib-python/2.7/SimpleHTTPServer.py --- a/lib-python/2.7/SimpleHTTPServer.py +++ b/lib-python/2.7/SimpleHTTPServer.py @@ -43,8 +43,10 @@ """Serve a GET request.""" f = self.send_head() if f: - self.copyfile(f, self.wfile) - f.close() + try: + self.copyfile(f, self.wfile) + finally: + f.close() def do_HEAD(self): """Serve a HEAD request.""" @@ -88,13 +90,17 @@ except IOError: self.send_error(404, "File not found") return None - self.send_response(200) - self.send_header("Content-type", ctype) - fs = os.fstat(f.fileno()) - self.send_header("Content-Length", str(fs[6])) - self.send_header("Last-Modified", self.date_time_string(fs.st_mtime)) - self.end_headers() - return f + try: + self.send_response(200) + self.send_header("Content-type", ctype) + fs = os.fstat(f.fileno()) + self.send_header("Content-Length", str(fs[6])) + self.send_header("Last-Modified", self.date_time_string(fs.st_mtime)) + self.end_headers() + return f + except: + f.close() + raise def list_directory(self, path): """Helper to produce a directory listing (absent index.html). diff --git a/lib-python/2.7/SimpleXMLRPCServer.py b/lib-python/2.7/SimpleXMLRPCServer.py --- a/lib-python/2.7/SimpleXMLRPCServer.py +++ b/lib-python/2.7/SimpleXMLRPCServer.py @@ -704,4 +704,5 @@ server = SimpleXMLRPCServer(("localhost", 8000)) server.register_function(pow) server.register_function(lambda x,y: x+y, 'add') + server.register_multicall_functions() server.serve_forever() diff --git a/lib-python/2.7/SocketServer.py b/lib-python/2.7/SocketServer.py --- a/lib-python/2.7/SocketServer.py +++ b/lib-python/2.7/SocketServer.py @@ -513,35 +513,37 @@ def collect_children(self): """Internal routine to wait for children that have exited.""" - if self.active_children is None: return + if self.active_children is None: + return + + # If we're above the max number of children, wait and reap them until + # we go back below threshold. Note that we use waitpid(-1) below to be + # able to collect children in size() syscalls instead + # of size(): the downside is that this might reap children + # which we didn't spawn, which is why we only resort to this when we're + # above max_children. while len(self.active_children) >= self.max_children: - # XXX: This will wait for any child process, not just ones - # spawned by this library. This could confuse other - # libraries that expect to be able to wait for their own - # children. try: - pid, status = os.waitpid(0, 0) - except os.error: - pid = None - if pid not in self.active_children: continue - self.active_children.remove(pid) + pid, _ = os.waitpid(-1, 0) + self.active_children.discard(pid) + except OSError as e: + if e.errno == errno.ECHILD: + # we don't have any children, we're done + self.active_children.clear() + elif e.errno != errno.EINTR: + break - # XXX: This loop runs more system calls than it ought - # to. There should be a way to put the active_children into a - # process group and then use os.waitpid(-pgid) to wait for any - # of that set, but I couldn't find a way to allocate pgids - # that couldn't collide. - for child in self.active_children: + # Now reap all defunct children. + for pid in self.active_children.copy(): try: - pid, status = os.waitpid(child, os.WNOHANG) - except os.error: - pid = None - if not pid: continue - try: - self.active_children.remove(pid) - except ValueError, e: - raise ValueError('%s. x=%d and list=%r' % (e.message, pid, - self.active_children)) + pid, _ = os.waitpid(pid, os.WNOHANG) + # if the child hasn't exited yet, pid will be 0 and ignored by + # discard() below + self.active_children.discard(pid) + except OSError as e: + if e.errno == errno.ECHILD: + # someone else reaped it + self.active_children.discard(pid) def handle_timeout(self): """Wait for zombies after self.timeout seconds of inactivity. @@ -557,8 +559,8 @@ if pid: # Parent process if self.active_children is None: - self.active_children = [] - self.active_children.append(pid) + self.active_children = set() + self.active_children.add(pid) self.close_request(request) #close handle in parent process return else: diff --git a/lib-python/2.7/_MozillaCookieJar.py b/lib-python/2.7/_MozillaCookieJar.py --- a/lib-python/2.7/_MozillaCookieJar.py +++ b/lib-python/2.7/_MozillaCookieJar.py @@ -39,7 +39,7 @@ magic_re = "#( Netscape)? HTTP Cookie File" header = """\ # Netscape HTTP Cookie File -# http://www.netscape.com/newsref/std/cookie_spec.html +# http://curl.haxx.se/rfc/cookie_spec.html # This is a generated file! Do not edit. """ diff --git a/lib-python/2.7/_abcoll.py b/lib-python/2.7/_abcoll.py --- a/lib-python/2.7/_abcoll.py +++ b/lib-python/2.7/_abcoll.py @@ -165,12 +165,17 @@ def __gt__(self, other): if not isinstance(other, Set): return NotImplemented - return other < self + return len(self) > len(other) and self.__ge__(other) def __ge__(self, other): if not isinstance(other, Set): return NotImplemented - return other <= self + if len(self) < len(other): + return False + for elem in other: + if elem not in self: + return False + return True def __eq__(self, other): if not isinstance(other, Set): @@ -194,6 +199,8 @@ return NotImplemented return self._from_iterable(value for value in other if value in self) + __rand__ = __and__ + def isdisjoint(self, other): 'Return True if two sets have a null intersection.' for value in other: @@ -207,6 +214,8 @@ chain = (e for s in (self, other) for e in s) return self._from_iterable(chain) + __ror__ = __or__ + def __sub__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): @@ -215,6 +224,14 @@ return self._from_iterable(value for value in self if value not in other) + def __rsub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in other + if value not in self) + def __xor__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): @@ -222,6 +239,8 @@ other = self._from_iterable(other) return (self - other) | (other - self) + __rxor__ = __xor__ + # Sets are not hashable by default, but subclasses can change this __hash__ = None diff --git a/lib-python/2.7/_osx_support.py b/lib-python/2.7/_osx_support.py --- a/lib-python/2.7/_osx_support.py +++ b/lib-python/2.7/_osx_support.py @@ -182,7 +182,7 @@ # Compiler is GCC, check if it is LLVM-GCC data = _read_output("'%s' --version" % (cc.replace("'", "'\"'\"'"),)) - if 'llvm-gcc' in data: + if data and 'llvm-gcc' in data: # Found LLVM-GCC, fall back to clang cc = _find_build_tool('clang') @@ -450,8 +450,16 @@ # case and disallow installs. cflags = _config_vars.get(_INITPRE+'CFLAGS', _config_vars.get('CFLAGS', '')) - if ((macrelease + '.') >= '10.4.' and - '-arch' in cflags.strip()): + if macrelease: + try: + macrelease = tuple(int(i) for i in macrelease.split('.')[0:2]) + except ValueError: + macrelease = (10, 0) + else: + # assume no universal support + macrelease = (10, 0) + + if (macrelease >= (10, 4)) and '-arch' in cflags.strip(): # The universal build will build fat binaries, but not on # systems before 10.4 diff --git a/lib-python/2.7/_pyio.py b/lib-python/2.7/_pyio.py --- a/lib-python/2.7/_pyio.py +++ b/lib-python/2.7/_pyio.py @@ -192,38 +192,45 @@ (appending and "a" or "") + (updating and "+" or ""), closefd) - line_buffering = False - if buffering == 1 or buffering < 0 and raw.isatty(): - buffering = -1 - line_buffering = True - if buffering < 0: - buffering = DEFAULT_BUFFER_SIZE - try: - bs = os.fstat(raw.fileno()).st_blksize - except (os.error, AttributeError): - pass + result = raw + try: + line_buffering = False + if buffering == 1 or buffering < 0 and raw.isatty(): + buffering = -1 + line_buffering = True + if buffering < 0: + buffering = DEFAULT_BUFFER_SIZE + try: + bs = os.fstat(raw.fileno()).st_blksize + except (os.error, AttributeError): + pass + else: + if bs > 1: + buffering = bs + if buffering < 0: + raise ValueError("invalid buffering size") + if buffering == 0: + if binary: + return result + raise ValueError("can't have unbuffered text I/O") + if updating: + buffer = BufferedRandom(raw, buffering) + elif writing or appending: + buffer = BufferedWriter(raw, buffering) + elif reading: + buffer = BufferedReader(raw, buffering) else: - if bs > 1: - buffering = bs - if buffering < 0: - raise ValueError("invalid buffering size") - if buffering == 0: + raise ValueError("unknown mode: %r" % mode) + result = buffer if binary: - return raw - raise ValueError("can't have unbuffered text I/O") - if updating: - buffer = BufferedRandom(raw, buffering) - elif writing or appending: - buffer = BufferedWriter(raw, buffering) - elif reading: - buffer = BufferedReader(raw, buffering) - else: - raise ValueError("unknown mode: %r" % mode) - if binary: - return buffer - text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering) - text.mode = mode - return text + return result + text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering) + result = text + text.mode = mode + return result + except: + result.close() + raise class DocDescriptor: @@ -1997,7 +2004,13 @@ def getvalue(self): self.flush() - return self.buffer.getvalue().decode(self._encoding, self._errors) + decoder = self._decoder or self._get_decoder() + old_state = decoder.getstate() + decoder.reset() + try: + return decoder.decode(self.buffer.getvalue(), final=True) + finally: + decoder.setstate(old_state) def __repr__(self): # TextIOWrapper tells the encoding in its repr. In StringIO, diff --git a/lib-python/2.7/_weakrefset.py b/lib-python/2.7/_weakrefset.py --- a/lib-python/2.7/_weakrefset.py +++ b/lib-python/2.7/_weakrefset.py @@ -60,6 +60,8 @@ for itemref in self.data: item = itemref() if item is not None: + # Caveat: the iterator will keep a strong reference to + # `item` until it is resumed or closed. yield item def __len__(self): diff --git a/lib-python/2.7/aifc.py b/lib-python/2.7/aifc.py --- a/lib-python/2.7/aifc.py +++ b/lib-python/2.7/aifc.py @@ -778,7 +778,7 @@ def _ensure_header_written(self, datasize): if not self._nframeswritten: - if self._comptype in ('ULAW', 'ALAW'): + if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw'): if not self._sampwidth: self._sampwidth = 2 if self._sampwidth != 2: @@ -844,7 +844,7 @@ if self._datalength & 1: self._datalength = self._datalength + 1 if self._aifc: - if self._comptype in ('ULAW', 'ALAW'): + if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw'): self._datalength = self._datalength // 2 if self._datalength & 1: self._datalength = self._datalength + 1 @@ -852,7 +852,10 @@ self._datalength = (self._datalength + 3) // 4 if self._datalength & 1: self._datalength = self._datalength + 1 - self._form_length_pos = self._file.tell() + try: + self._form_length_pos = self._file.tell() + except (AttributeError, IOError): + self._form_length_pos = None commlength = self._write_form_length(self._datalength) if self._aifc: self._file.write('AIFC') @@ -864,7 +867,8 @@ self._file.write('COMM') _write_ulong(self._file, commlength) _write_short(self._file, self._nchannels) - self._nframes_pos = self._file.tell() + if self._form_length_pos is not None: + self._nframes_pos = self._file.tell() _write_ulong(self._file, self._nframes) if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'): _write_short(self._file, 8) @@ -875,7 +879,8 @@ self._file.write(self._comptype) _write_string(self._file, self._compname) self._file.write('SSND') - self._ssnd_length_pos = self._file.tell() + if self._form_length_pos is not None: + self._ssnd_length_pos = self._file.tell() _write_ulong(self._file, self._datalength + 8) _write_ulong(self._file, 0) _write_ulong(self._file, 0) diff --git a/lib-python/2.7/argparse.py b/lib-python/2.7/argparse.py --- a/lib-python/2.7/argparse.py +++ b/lib-python/2.7/argparse.py @@ -168,6 +168,8 @@ self._prog = prog self._indent_increment = indent_increment self._max_help_position = max_help_position + self._max_help_position = min(max_help_position, + max(width - 20, indent_increment * 2)) self._width = width self._current_indent = 0 @@ -339,7 +341,7 @@ else: line_len = len(indent) - 1 for part in parts: - if line_len + 1 + len(part) > text_width: + if line_len + 1 + len(part) > text_width and line: lines.append(indent + ' '.join(line)) line = [] line_len = len(indent) - 1 @@ -478,7 +480,7 @@ def _format_text(self, text): if '%(prog)' in text: text = text % dict(prog=self._prog) - text_width = self._width - self._current_indent + text_width = max(self._width - self._current_indent, 11) indent = ' ' * self._current_indent return self._fill_text(text, text_width, indent) + '\n\n' @@ -486,7 +488,7 @@ # determine the required width and the entry label help_position = min(self._action_max_length + 2, self._max_help_position) - help_width = self._width - help_position + help_width = max(self._width - help_position, 11) action_width = help_position - self._current_indent - 2 action_header = self._format_action_invocation(action) @@ -1155,9 +1157,13 @@ __hash__ = None def __eq__(self, other): + if not isinstance(other, Namespace): + return NotImplemented return vars(self) == vars(other) def __ne__(self, other): + if not isinstance(other, Namespace): + return NotImplemented return not (self == other) def __contains__(self, key): diff --git a/lib-python/2.7/bsddb/dbshelve.py b/lib-python/2.7/bsddb/dbshelve.py --- a/lib-python/2.7/bsddb/dbshelve.py +++ b/lib-python/2.7/bsddb/dbshelve.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python #------------------------------------------------------------------------ # Copyright (c) 1997-2001 by Total Control Software # All Rights Reserved diff --git a/lib-python/2.7/bsddb/test/test_dbtables.py b/lib-python/2.7/bsddb/test/test_dbtables.py --- a/lib-python/2.7/bsddb/test/test_dbtables.py +++ b/lib-python/2.7/bsddb/test/test_dbtables.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python -# #----------------------------------------------------------------------- # A test suite for the table interface built on bsddb.db #----------------------------------------------------------------------- diff --git a/lib-python/2.7/codecs.py b/lib-python/2.7/codecs.py --- a/lib-python/2.7/codecs.py +++ b/lib-python/2.7/codecs.py @@ -456,15 +456,12 @@ # read until we get the required number of characters (if available) while True: - # can the request can be satisfied from the character buffer? - if chars < 0: - if size < 0: - if self.charbuffer: - break - elif len(self.charbuffer) >= size: + # can the request be satisfied from the character buffer? + if chars >= 0: + if len(self.charbuffer) >= chars: break - else: - if len(self.charbuffer) >= chars: + elif size >= 0: + if len(self.charbuffer) >= size: break # we need more data if size < 0: diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py --- a/lib-python/2.7/collections.py +++ b/lib-python/2.7/collections.py @@ -314,6 +314,7 @@ if isinstance(field_names, basestring): field_names = field_names.replace(',', ' ').split() field_names = map(str, field_names) + typename = str(typename) if rename: seen = set() for index, name in enumerate(field_names): @@ -326,6 +327,8 @@ field_names[index] = '_%d' % index seen.add(name) for name in [typename] + field_names: + if type(name) != str: + raise TypeError('Type names and field names must be strings') if not all(c.isalnum() or c=='_' for c in name): raise ValueError('Type names and field names can only contain ' 'alphanumeric characters and underscores: %r' % name) diff --git a/lib-python/2.7/csv.py b/lib-python/2.7/csv.py --- a/lib-python/2.7/csv.py +++ b/lib-python/2.7/csv.py @@ -93,6 +93,10 @@ self.line_num = self.reader.line_num return self._fieldnames + # Issue 20004: Because DictReader is a classic class, this setter is + # ignored. At this point in 2.7's lifecycle, it is too late to change the + # base class for fear of breaking working code. If you want to change + # fieldnames without overwriting the getter, set _fieldnames directly. @fieldnames.setter def fieldnames(self, value): self._fieldnames = value @@ -140,8 +144,8 @@ if self.extrasaction == "raise": wrong_fields = [k for k in rowdict if k not in self.fieldnames] if wrong_fields: - raise ValueError("dict contains fields not in fieldnames: " + - ", ".join(wrong_fields)) + raise ValueError("dict contains fields not in fieldnames: " + + ", ".join([repr(x) for x in wrong_fields])) return [rowdict.get(key, self.restval) for key in self.fieldnames] def writerow(self, rowdict): diff --git a/lib-python/2.7/ctypes/test/__init__.py b/lib-python/2.7/ctypes/test/__init__.py --- a/lib-python/2.7/ctypes/test/__init__.py +++ b/lib-python/2.7/ctypes/test/__init__.py @@ -2,7 +2,15 @@ use_resources = [] -class ResourceDenied(Exception): +import ctypes +ctypes_symbols = dir(ctypes) + +def need_symbol(name): + return unittest.skipUnless(name in ctypes_symbols, + '{!r} is required'.format(name)) + + +class ResourceDenied(unittest.SkipTest): """Test skipped because it requested a disallowed resource. This is raised when a test calls requires() for a resource that diff --git a/lib-python/2.7/ctypes/test/test_arrays.py b/lib-python/2.7/ctypes/test/test_arrays.py --- a/lib-python/2.7/ctypes/test/test_arrays.py +++ b/lib-python/2.7/ctypes/test/test_arrays.py @@ -1,6 +1,8 @@ import unittest from ctypes import * +from ctypes.test import need_symbol + formats = "bBhHiIlLqQfd" formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \ @@ -87,8 +89,8 @@ self.assertEqual(values, [1, 2, 3, 4, 5]) def test_classcache(self): - self.assertTrue(not ARRAY(c_int, 3) is ARRAY(c_int, 4)) - self.assertTrue(ARRAY(c_int, 3) is ARRAY(c_int, 3)) + self.assertIsNot(ARRAY(c_int, 3), ARRAY(c_int, 4)) + self.assertIs(ARRAY(c_int, 3), ARRAY(c_int, 3)) def test_from_address(self): # Failed with 0.9.8, reported by JUrner @@ -101,20 +103,16 @@ self.assertEqual(sz[1:4:2], "o") self.assertEqual(sz.value, "foo") - try: - create_unicode_buffer - except NameError: - pass - else: - def test_from_addressW(self): - p = create_unicode_buffer("foo") - sz = (c_wchar * 3).from_address(addressof(p)) - self.assertEqual(sz[:], "foo") - self.assertEqual(sz[::], "foo") - self.assertEqual(sz[::-1], "oof") - self.assertEqual(sz[::3], "f") - self.assertEqual(sz[1:4:2], "o") - self.assertEqual(sz.value, "foo") + @need_symbol('create_unicode_buffer') + def test_from_addressW(self): + p = create_unicode_buffer("foo") + sz = (c_wchar * 3).from_address(addressof(p)) + self.assertEqual(sz[:], "foo") + self.assertEqual(sz[::], "foo") + self.assertEqual(sz[::-1], "oof") + self.assertEqual(sz[::3], "f") + self.assertEqual(sz[1:4:2], "o") + self.assertEqual(sz.value, "foo") def test_cache(self): # Array types are cached internally in the _ctypes extension, @@ -128,7 +126,7 @@ # Create a new array type based on it: t1 = my_int * 1 t2 = my_int * 1 - self.assertTrue(t1 is t2) + self.assertIs(t1, t2) if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_as_parameter.py b/lib-python/2.7/ctypes/test/test_as_parameter.py --- a/lib-python/2.7/ctypes/test/test_as_parameter.py +++ b/lib-python/2.7/ctypes/test/test_as_parameter.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import need_symbol import _ctypes_test dll = CDLL(_ctypes_test.__file__) @@ -17,11 +18,8 @@ def wrap(self, param): return param + @need_symbol('c_wchar') def test_wchar_parm(self): - try: - c_wchar - except NameError: - return f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double] result = f(self.wrap(1), self.wrap(u"x"), self.wrap(3), self.wrap(4), self.wrap(5.0), self.wrap(6.0)) @@ -134,7 +132,7 @@ f.argtypes = [c_longlong, MyCallback] def callback(value): - self.assertTrue(isinstance(value, (int, long))) + self.assertIsInstance(value, (int, long)) return value & 0x7FFFFFFF cb = MyCallback(callback) diff --git a/lib-python/2.7/ctypes/test/test_bitfields.py b/lib-python/2.7/ctypes/test/test_bitfields.py --- a/lib-python/2.7/ctypes/test/test_bitfields.py +++ b/lib-python/2.7/ctypes/test/test_bitfields.py @@ -1,4 +1,5 @@ from ctypes import * +from ctypes.test import need_symbol import unittest import os @@ -127,20 +128,18 @@ result = self.fail_fields(("a", c_char, 1)) self.assertEqual(result, (TypeError, 'bit fields not allowed for type c_char')) - try: - c_wchar - except NameError: - pass - else: - result = self.fail_fields(("a", c_wchar, 1)) - self.assertEqual(result, (TypeError, 'bit fields not allowed for type c_wchar')) - class Dummy(Structure): _fields_ = [] result = self.fail_fields(("a", Dummy, 1)) self.assertEqual(result, (TypeError, 'bit fields not allowed for type Dummy')) + @need_symbol('c_wchar') + def test_c_wchar(self): + result = self.fail_fields(("a", c_wchar, 1)) + self.assertEqual(result, + (TypeError, 'bit fields not allowed for type c_wchar')) + def test_single_bitfield_size(self): for c_typ in int_types: result = self.fail_fields(("a", c_typ, -1)) @@ -207,7 +206,7 @@ class X(Structure): _fields_ = [("a", c_byte, 4), ("b", c_int, 32)] - self.assertEqual(sizeof(X), sizeof(c_int)*2) + self.assertEqual(sizeof(X), alignment(c_int)+sizeof(c_int)) def test_mixed_3(self): class X(Structure): @@ -240,7 +239,7 @@ _anonymous_ = ["_"] _fields_ = [("_", X)] - @unittest.skipUnless(hasattr(ctypes, "c_uint32"), "c_int32 is required") + @need_symbol('c_uint32') def test_uint32(self): class X(Structure): _fields_ = [("a", c_uint32, 32)] @@ -250,7 +249,7 @@ x.a = 0xFDCBA987 self.assertEqual(x.a, 0xFDCBA987) - @unittest.skipUnless(hasattr(ctypes, "c_uint64"), "c_int64 is required") + @need_symbol('c_uint64') def test_uint64(self): class X(Structure): _fields_ = [("a", c_uint64, 64)] diff --git a/lib-python/2.7/ctypes/test/test_buffers.py b/lib-python/2.7/ctypes/test/test_buffers.py --- a/lib-python/2.7/ctypes/test/test_buffers.py +++ b/lib-python/2.7/ctypes/test/test_buffers.py @@ -1,4 +1,5 @@ from ctypes import * +from ctypes.test import need_symbol import unittest class StringBufferTestCase(unittest.TestCase): @@ -7,12 +8,12 @@ b = create_string_buffer(32) self.assertEqual(len(b), 32) self.assertEqual(sizeof(b), 32 * sizeof(c_char)) - self.assertTrue(type(b[0]) is str) + self.assertIs(type(b[0]), str) b = create_string_buffer("abc") self.assertEqual(len(b), 4) # trailing nul char self.assertEqual(sizeof(b), 4 * sizeof(c_char)) - self.assertTrue(type(b[0]) is str) + self.assertIs(type(b[0]), str) self.assertEqual(b[0], "a") self.assertEqual(b[:], "abc\0") self.assertEqual(b[::], "abc\0") @@ -36,39 +37,36 @@ self.assertEqual(b[::2], "ac") self.assertEqual(b[::5], "a") - try: - c_wchar - except NameError: - pass - else: - def test_unicode_buffer(self): - b = create_unicode_buffer(32) - self.assertEqual(len(b), 32) - self.assertEqual(sizeof(b), 32 * sizeof(c_wchar)) - self.assertTrue(type(b[0]) is unicode) + @need_symbol('c_wchar') + def test_unicode_buffer(self): + b = create_unicode_buffer(32) + self.assertEqual(len(b), 32) + self.assertEqual(sizeof(b), 32 * sizeof(c_wchar)) + self.assertIs(type(b[0]), unicode) - b = create_unicode_buffer(u"abc") - self.assertEqual(len(b), 4) # trailing nul char - self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) - self.assertTrue(type(b[0]) is unicode) - self.assertEqual(b[0], u"a") - self.assertEqual(b[:], "abc\0") - self.assertEqual(b[::], "abc\0") - self.assertEqual(b[::-1], "\0cba") - self.assertEqual(b[::2], "ac") - self.assertEqual(b[::5], "a") + b = create_unicode_buffer(u"abc") + self.assertEqual(len(b), 4) # trailing nul char + self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) + self.assertIs(type(b[0]), unicode) + self.assertEqual(b[0], u"a") + self.assertEqual(b[:], "abc\0") + self.assertEqual(b[::], "abc\0") + self.assertEqual(b[::-1], "\0cba") + self.assertEqual(b[::2], "ac") + self.assertEqual(b[::5], "a") - def test_unicode_conversion(self): - b = create_unicode_buffer("abc") - self.assertEqual(len(b), 4) # trailing nul char - self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) - self.assertTrue(type(b[0]) is unicode) - self.assertEqual(b[0], u"a") - self.assertEqual(b[:], "abc\0") - self.assertEqual(b[::], "abc\0") - self.assertEqual(b[::-1], "\0cba") - self.assertEqual(b[::2], "ac") - self.assertEqual(b[::5], "a") + @need_symbol('c_wchar') + def test_unicode_conversion(self): + b = create_unicode_buffer("abc") + self.assertEqual(len(b), 4) # trailing nul char + self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) + self.assertIs(type(b[0]), unicode) + self.assertEqual(b[0], u"a") + self.assertEqual(b[:], "abc\0") + self.assertEqual(b[::], "abc\0") + self.assertEqual(b[::-1], "\0cba") + self.assertEqual(b[::2], "ac") + self.assertEqual(b[::5], "a") if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_byteswap.py b/lib-python/2.7/ctypes/test/test_byteswap.py --- a/lib-python/2.7/ctypes/test/test_byteswap.py +++ b/lib-python/2.7/ctypes/test/test_byteswap.py @@ -14,7 +14,8 @@ # For Structures and Unions, these types are created on demand. class Test(unittest.TestCase): - def X_test(self): + @unittest.skip('test disabled') + def test_X(self): print >> sys.stderr, sys.byteorder for i in range(32): bits = BITS() @@ -23,11 +24,11 @@ def test_endian_short(self): if sys.byteorder == "little": - self.assertTrue(c_short.__ctype_le__ is c_short) - self.assertTrue(c_short.__ctype_be__.__ctype_le__ is c_short) + self.assertIs(c_short.__ctype_le__, c_short) + self.assertIs(c_short.__ctype_be__.__ctype_le__, c_short) else: - self.assertTrue(c_short.__ctype_be__ is c_short) - self.assertTrue(c_short.__ctype_le__.__ctype_be__ is c_short) + self.assertIs(c_short.__ctype_be__, c_short) + self.assertIs(c_short.__ctype_le__.__ctype_be__, c_short) s = c_short.__ctype_be__(0x1234) self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234") self.assertEqual(bin(s), "1234") @@ -50,11 +51,11 @@ def test_endian_int(self): if sys.byteorder == "little": - self.assertTrue(c_int.__ctype_le__ is c_int) - self.assertTrue(c_int.__ctype_be__.__ctype_le__ is c_int) + self.assertIs(c_int.__ctype_le__, c_int) + self.assertIs(c_int.__ctype_be__.__ctype_le__, c_int) else: - self.assertTrue(c_int.__ctype_be__ is c_int) - self.assertTrue(c_int.__ctype_le__.__ctype_be__ is c_int) + self.assertIs(c_int.__ctype_be__, c_int) + self.assertIs(c_int.__ctype_le__.__ctype_be__, c_int) s = c_int.__ctype_be__(0x12345678) self.assertEqual(bin(struct.pack(">i", 0x12345678)), "12345678") @@ -78,11 +79,11 @@ def test_endian_longlong(self): if sys.byteorder == "little": - self.assertTrue(c_longlong.__ctype_le__ is c_longlong) - self.assertTrue(c_longlong.__ctype_be__.__ctype_le__ is c_longlong) + self.assertIs(c_longlong.__ctype_le__, c_longlong) + self.assertIs(c_longlong.__ctype_be__.__ctype_le__, c_longlong) else: - self.assertTrue(c_longlong.__ctype_be__ is c_longlong) - self.assertTrue(c_longlong.__ctype_le__.__ctype_be__ is c_longlong) + self.assertIs(c_longlong.__ctype_be__, c_longlong) + self.assertIs(c_longlong.__ctype_le__.__ctype_be__, c_longlong) s = c_longlong.__ctype_be__(0x1234567890ABCDEF) self.assertEqual(bin(struct.pack(">q", 0x1234567890ABCDEF)), "1234567890ABCDEF") @@ -106,11 +107,11 @@ def test_endian_float(self): if sys.byteorder == "little": - self.assertTrue(c_float.__ctype_le__ is c_float) - self.assertTrue(c_float.__ctype_be__.__ctype_le__ is c_float) + self.assertIs(c_float.__ctype_le__, c_float) + self.assertIs(c_float.__ctype_be__.__ctype_le__, c_float) else: - self.assertTrue(c_float.__ctype_be__ is c_float) - self.assertTrue(c_float.__ctype_le__.__ctype_be__ is c_float) + self.assertIs(c_float.__ctype_be__, c_float) + self.assertIs(c_float.__ctype_le__.__ctype_be__, c_float) s = c_float(math.pi) self.assertEqual(bin(struct.pack("f", math.pi)), bin(s)) # Hm, what's the precision of a float compared to a double? @@ -124,11 +125,11 @@ def test_endian_double(self): if sys.byteorder == "little": - self.assertTrue(c_double.__ctype_le__ is c_double) - self.assertTrue(c_double.__ctype_be__.__ctype_le__ is c_double) + self.assertIs(c_double.__ctype_le__, c_double) + self.assertIs(c_double.__ctype_be__.__ctype_le__, c_double) else: - self.assertTrue(c_double.__ctype_be__ is c_double) - self.assertTrue(c_double.__ctype_le__.__ctype_be__ is c_double) + self.assertIs(c_double.__ctype_be__, c_double) + self.assertIs(c_double.__ctype_le__.__ctype_be__, c_double) s = c_double(math.pi) self.assertEqual(s.value, math.pi) self.assertEqual(bin(struct.pack("d", math.pi)), bin(s)) @@ -140,14 +141,14 @@ self.assertEqual(bin(struct.pack(">d", math.pi)), bin(s)) def test_endian_other(self): - self.assertTrue(c_byte.__ctype_le__ is c_byte) - self.assertTrue(c_byte.__ctype_be__ is c_byte) + self.assertIs(c_byte.__ctype_le__, c_byte) + self.assertIs(c_byte.__ctype_be__, c_byte) - self.assertTrue(c_ubyte.__ctype_le__ is c_ubyte) - self.assertTrue(c_ubyte.__ctype_be__ is c_ubyte) + self.assertIs(c_ubyte.__ctype_le__, c_ubyte) + self.assertIs(c_ubyte.__ctype_be__, c_ubyte) - self.assertTrue(c_char.__ctype_le__ is c_char) - self.assertTrue(c_char.__ctype_be__ is c_char) + self.assertIs(c_char.__ctype_le__, c_char) + self.assertIs(c_char.__ctype_be__, c_char) def test_struct_fields_1(self): if sys.byteorder == "little": diff --git a/lib-python/2.7/ctypes/test/test_callbacks.py b/lib-python/2.7/ctypes/test/test_callbacks.py --- a/lib-python/2.7/ctypes/test/test_callbacks.py +++ b/lib-python/2.7/ctypes/test/test_callbacks.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import need_symbol import _ctypes_test class Callbacks(unittest.TestCase): @@ -94,9 +95,10 @@ # disabled: would now (correctly) raise a RuntimeWarning about # a memory leak. A callback function cannot return a non-integral # C type without causing a memory leak. -## def test_char_p(self): -## self.check_type(c_char_p, "abc") -## self.check_type(c_char_p, "def") + @unittest.skip('test disabled') + def test_char_p(self): + self.check_type(c_char_p, "abc") + self.check_type(c_char_p, "def") def test_pyobject(self): o = () @@ -148,13 +150,12 @@ CFUNCTYPE(None)(lambda x=Nasty(): None) -try: - WINFUNCTYPE -except NameError: - pass -else: - class StdcallCallbacks(Callbacks): + at need_symbol('WINFUNCTYPE') +class StdcallCallbacks(Callbacks): + try: functype = WINFUNCTYPE + except NameError: + pass ################################################################ @@ -184,7 +185,7 @@ from ctypes.util import find_library libc_path = find_library("c") if not libc_path: - return # cannot test + self.skipTest('could not find libc') libc = CDLL(libc_path) @CFUNCTYPE(c_int, POINTER(c_int), POINTER(c_int)) @@ -196,23 +197,19 @@ libc.qsort(array, len(array), sizeof(c_int), cmp_func) self.assertEqual(array[:], [1, 5, 7, 33, 99]) - try: - WINFUNCTYPE - except NameError: - pass - else: - def test_issue_8959_b(self): - from ctypes.wintypes import BOOL, HWND, LPARAM + @need_symbol('WINFUNCTYPE') + def test_issue_8959_b(self): + from ctypes.wintypes import BOOL, HWND, LPARAM + global windowCount + windowCount = 0 + + @WINFUNCTYPE(BOOL, HWND, LPARAM) + def EnumWindowsCallbackFunc(hwnd, lParam): global windowCount - windowCount = 0 + windowCount += 1 + return True #Allow windows to keep enumerating - @WINFUNCTYPE(BOOL, HWND, LPARAM) - def EnumWindowsCallbackFunc(hwnd, lParam): - global windowCount - windowCount += 1 - return True #Allow windows to keep enumerating - - windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0) + windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0) def test_callback_register_int(self): # Issue #8275: buggy handling of callback args under Win64 diff --git a/lib-python/2.7/ctypes/test/test_cast.py b/lib-python/2.7/ctypes/test/test_cast.py --- a/lib-python/2.7/ctypes/test/test_cast.py +++ b/lib-python/2.7/ctypes/test/test_cast.py @@ -1,4 +1,5 @@ from ctypes import * +from ctypes.test import need_symbol import unittest import sys @@ -38,14 +39,14 @@ p = cast(array, POINTER(c_char_p)) # array and p share a common _objects attribute - self.assertTrue(p._objects is array._objects) + self.assertIs(p._objects, array._objects) self.assertEqual(array._objects, {'0': "foo bar", id(array): array}) p[0] = "spam spam" self.assertEqual(p._objects, {'0': "spam spam", id(array): array}) - self.assertTrue(array._objects is p._objects) + self.assertIs(array._objects, p._objects) p[1] = "foo bar" self.assertEqual(p._objects, {'1': 'foo bar', '0': "spam spam", id(array): array}) - self.assertTrue(array._objects is p._objects) + self.assertIs(array._objects, p._objects) def test_other(self): p = cast((c_int * 4)(1, 2, 3, 4), POINTER(c_int)) @@ -75,15 +76,11 @@ self.assertEqual(cast(cast(s, c_void_p), c_char_p).value, "hiho") - try: - c_wchar_p - except NameError: - pass - else: - def test_wchar_p(self): - s = c_wchar_p("hiho") - self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value, - "hiho") + @need_symbol('c_wchar_p') + def test_wchar_p(self): + s = c_wchar_p("hiho") + self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value, + "hiho") if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_cfuncs.py b/lib-python/2.7/ctypes/test/test_cfuncs.py --- a/lib-python/2.7/ctypes/test/test_cfuncs.py +++ b/lib-python/2.7/ctypes/test/test_cfuncs.py @@ -3,6 +3,7 @@ import unittest from ctypes import * +from ctypes.test import need_symbol import _ctypes_test @@ -193,7 +194,7 @@ try: WinDLL except NameError: - pass + def stdcall_dll(*_): pass else: class stdcall_dll(WinDLL): def __getattr__(self, name): @@ -203,9 +204,9 @@ setattr(self, name, func) return func - class stdcallCFunctions(CFunctions): - _dll = stdcall_dll(_ctypes_test.__file__) - pass + at need_symbol('WinDLL') +class stdcallCFunctions(CFunctions): + _dll = stdcall_dll(_ctypes_test.__file__) if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_checkretval.py b/lib-python/2.7/ctypes/test/test_checkretval.py --- a/lib-python/2.7/ctypes/test/test_checkretval.py +++ b/lib-python/2.7/ctypes/test/test_checkretval.py @@ -1,6 +1,7 @@ import unittest from ctypes import * +from ctypes.test import need_symbol class CHECKED(c_int): def _check_retval_(value): @@ -25,15 +26,11 @@ del dll._testfunc_p_p.restype self.assertEqual(42, dll._testfunc_p_p(42)) - try: - oledll - except NameError: - pass - else: - def test_oledll(self): - self.assertRaises(WindowsError, - oledll.oleaut32.CreateTypeLib2, - 0, None, None) + @need_symbol('oledll') + def test_oledll(self): + self.assertRaises(WindowsError, + oledll.oleaut32.CreateTypeLib2, + 0, None, None) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_errcheck.py b/lib-python/2.7/ctypes/test/test_errcheck.py deleted file mode 100644 --- a/lib-python/2.7/ctypes/test/test_errcheck.py +++ /dev/null @@ -1,19 +0,0 @@ -import sys -from ctypes import * - -##class HMODULE(Structure): -## _fields_ = [("value", c_void_p)] - -## def __repr__(self): -## return "" % self.value - -##windll.kernel32.GetModuleHandleA.restype = HMODULE - -##print windll.kernel32.GetModuleHandleA("python23.dll") -##print hex(sys.dllhandle) - -##def nonzero(handle): -## return (GetLastError(), handle) - -##windll.kernel32.GetModuleHandleA.errcheck = nonzero -##print windll.kernel32.GetModuleHandleA("spam") diff --git a/lib-python/2.7/ctypes/test/test_find.py b/lib-python/2.7/ctypes/test/test_find.py --- a/lib-python/2.7/ctypes/test/test_find.py +++ b/lib-python/2.7/ctypes/test/test_find.py @@ -1,4 +1,5 @@ import unittest +import os import sys from ctypes import * from ctypes.util import find_library @@ -40,43 +41,43 @@ except OSError: pass - if lib_gl: - def test_gl(self): - if self.gl: - self.gl.glClearIndex + @unittest.skipUnless(lib_gl, 'lib_gl not available') + def test_gl(self): + if self.gl: + self.gl.glClearIndex - if lib_glu: - def test_glu(self): - if self.glu: - self.glu.gluBeginCurve + @unittest.skipUnless(lib_glu, 'lib_glu not available') + def test_glu(self): + if self.glu: + self.glu.gluBeginCurve - if lib_gle: - def test_gle(self): - if self.gle: - self.gle.gleGetJoinStyle + @unittest.skipUnless(lib_gle, 'lib_gle not available') + def test_gle(self): + if self.gle: + self.gle.gleGetJoinStyle -##if os.name == "posix" and sys.platform != "darwin": - -## # On platforms where the default shared library suffix is '.so', -## # at least some libraries can be loaded as attributes of the cdll -## # object, since ctypes now tries loading the lib again -## # with '.so' appended of the first try fails. -## # -## # Won't work for libc, unfortunately. OTOH, it isn't -## # needed for libc since this is already mapped into the current -## # process (?) -## # -## # On MAC OSX, it won't work either, because dlopen() needs a full path, -## # and the default suffix is either none or '.dylib'. - -## class LoadLibs(unittest.TestCase): -## def test_libm(self): -## import math -## libm = cdll.libm -## sqrt = libm.sqrt -## sqrt.argtypes = (c_double,) -## sqrt.restype = c_double -## self.assertEqual(sqrt(2), math.sqrt(2)) +# On platforms where the default shared library suffix is '.so', +# at least some libraries can be loaded as attributes of the cdll +# object, since ctypes now tries loading the lib again +# with '.so' appended of the first try fails. +# +# Won't work for libc, unfortunately. OTOH, it isn't +# needed for libc since this is already mapped into the current +# process (?) +# +# On MAC OSX, it won't work either, because dlopen() needs a full path, +# and the default suffix is either none or '.dylib'. + at unittest.skip('test disabled') + at unittest.skipUnless(os.name=="posix" and sys.platform != "darwin", + 'test not suitable for this platform') +class LoadLibs(unittest.TestCase): + def test_libm(self): + import math + libm = cdll.libm + sqrt = libm.sqrt + sqrt.argtypes = (c_double,) + sqrt.restype = c_double + self.assertEqual(sqrt(2), math.sqrt(2)) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_frombuffer.py b/lib-python/2.7/ctypes/test/test_frombuffer.py --- a/lib-python/2.7/ctypes/test/test_frombuffer.py +++ b/lib-python/2.7/ctypes/test/test_frombuffer.py @@ -23,7 +23,7 @@ a[0], a[-1] = 200, -200 self.assertEqual(x[:], a.tolist()) - self.assertTrue(a in x._objects.values()) + self.assertIn(a, x._objects.values()) self.assertRaises(ValueError, c_int.from_buffer, a, -1) diff --git a/lib-python/2.7/ctypes/test/test_funcptr.py b/lib-python/2.7/ctypes/test/test_funcptr.py --- a/lib-python/2.7/ctypes/test/test_funcptr.py +++ b/lib-python/2.7/ctypes/test/test_funcptr.py @@ -75,7 +75,7 @@ ## "lpfnWndProc", WNDPROC_2(wndproc)) # instead: - self.assertTrue(WNDPROC is WNDPROC_2) + self.assertIs(WNDPROC, WNDPROC_2) # 'wndclass.lpfnWndProc' leaks 94 references. Why? self.assertEqual(wndclass.lpfnWndProc(1, 2, 3, 4), 10) diff --git a/lib-python/2.7/ctypes/test/test_functions.py b/lib-python/2.7/ctypes/test/test_functions.py --- a/lib-python/2.7/ctypes/test/test_functions.py +++ b/lib-python/2.7/ctypes/test/test_functions.py @@ -6,6 +6,7 @@ """ from ctypes import * +from ctypes.test import need_symbol import sys, unittest try: @@ -63,22 +64,16 @@ pass + @need_symbol('c_wchar') def test_wchar_parm(self): - try: - c_wchar - except NameError: - return f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double] result = f(1, u"x", 3, 4, 5.0, 6.0) self.assertEqual(result, 139) self.assertEqual(type(result), int) + @need_symbol('c_wchar') def test_wchar_result(self): - try: - c_wchar - except NameError: - return f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double] f.restype = c_wchar @@ -155,11 +150,8 @@ self.assertEqual(result, -21) self.assertEqual(type(result), float) + @need_symbol('c_longlong') def test_longlongresult(self): - try: - c_longlong - except NameError: - return f = dll._testfunc_q_bhilfd f.restype = c_longlong f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double] @@ -296,6 +288,7 @@ result = f(-10, cb) self.assertEqual(result, -18) + @need_symbol('c_longlong') def test_longlong_callbacks(self): f = dll._testfunc_callback_q_qf @@ -306,7 +299,7 @@ f.argtypes = [c_longlong, MyCallback] def callback(value): - self.assertTrue(isinstance(value, (int, long))) + self.assertIsInstance(value, (int, long)) return value & 0x7FFFFFFF cb = MyCallback(callback) @@ -348,16 +341,16 @@ s2h = dll.ret_2h_func(inp) self.assertEqual((s2h.x, s2h.y), (99*2, 88*3)) - if sys.platform == "win32": - def test_struct_return_2H_stdcall(self): - class S2H(Structure): - _fields_ = [("x", c_short), - ("y", c_short)] + @unittest.skipUnless(sys.platform == "win32", 'Windows-specific test') + def test_struct_return_2H_stdcall(self): + class S2H(Structure): + _fields_ = [("x", c_short), + ("y", c_short)] - windll.s_ret_2h_func.restype = S2H - windll.s_ret_2h_func.argtypes = [S2H] - s2h = windll.s_ret_2h_func(S2H(99, 88)) - self.assertEqual((s2h.x, s2h.y), (99*2, 88*3)) + windll.s_ret_2h_func.restype = S2H + windll.s_ret_2h_func.argtypes = [S2H] + s2h = windll.s_ret_2h_func(S2H(99, 88)) + self.assertEqual((s2h.x, s2h.y), (99*2, 88*3)) def test_struct_return_8H(self): class S8I(Structure): @@ -376,23 +369,24 @@ self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h), (9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9)) - if sys.platform == "win32": - def test_struct_return_8H_stdcall(self): - class S8I(Structure): - _fields_ = [("a", c_int), - ("b", c_int), - ("c", c_int), - ("d", c_int), - ("e", c_int), - ("f", c_int), - ("g", c_int), - ("h", c_int)] - windll.s_ret_8i_func.restype = S8I - windll.s_ret_8i_func.argtypes = [S8I] - inp = S8I(9, 8, 7, 6, 5, 4, 3, 2) - s8i = windll.s_ret_8i_func(inp) - self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h), - (9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9)) + @unittest.skipUnless(sys.platform == "win32", 'Windows-specific test') + def test_struct_return_8H_stdcall(self): + class S8I(Structure): + _fields_ = [("a", c_int), + ("b", c_int), + ("c", c_int), + ("d", c_int), + ("e", c_int), + ("f", c_int), + ("g", c_int), + ("h", c_int)] + windll.s_ret_8i_func.restype = S8I + windll.s_ret_8i_func.argtypes = [S8I] + inp = S8I(9, 8, 7, 6, 5, 4, 3, 2) + s8i = windll.s_ret_8i_func(inp) + self.assertEqual( + (s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h), + (9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9)) def test_sf1651235(self): # see http://www.python.org/sf/1651235 diff --git a/lib-python/2.7/ctypes/test/test_integers.py b/lib-python/2.7/ctypes/test/test_integers.py deleted file mode 100644 --- a/lib-python/2.7/ctypes/test/test_integers.py +++ /dev/null @@ -1,5 +0,0 @@ -# superseded by test_numbers.py -import unittest - -if __name__ == '__main__': - unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_keeprefs.py b/lib-python/2.7/ctypes/test/test_keeprefs.py --- a/lib-python/2.7/ctypes/test/test_keeprefs.py +++ b/lib-python/2.7/ctypes/test/test_keeprefs.py @@ -94,7 +94,8 @@ self.assertEqual(x._objects, {'1': i}) class DeletePointerTestCase(unittest.TestCase): - def X_test(self): + @unittest.skip('test disabled') + def test_X(self): class X(Structure): _fields_ = [("p", POINTER(c_char_p))] x = X() diff --git a/lib-python/2.7/ctypes/test/test_loading.py b/lib-python/2.7/ctypes/test/test_loading.py --- a/lib-python/2.7/ctypes/test/test_loading.py +++ b/lib-python/2.7/ctypes/test/test_loading.py @@ -21,18 +21,21 @@ unknowndll = "xxrandomnamexx" - if libc_name is not None: - def test_load(self): - CDLL(libc_name) - CDLL(os.path.basename(libc_name)) - self.assertRaises(OSError, CDLL, self.unknowndll) + @unittest.skipUnless(libc_name is not None, 'could not find libc') + def test_load(self): + CDLL(libc_name) + CDLL(os.path.basename(libc_name)) + self.assertRaises(OSError, CDLL, self.unknowndll) - if libc_name is not None and os.path.basename(libc_name) == "libc.so.6": - def test_load_version(self): - cdll.LoadLibrary("libc.so.6") - # linux uses version, libc 9 should not exist - self.assertRaises(OSError, cdll.LoadLibrary, "libc.so.9") - self.assertRaises(OSError, cdll.LoadLibrary, self.unknowndll) + @unittest.skipUnless(libc_name is not None, 'could not find libc') + @unittest.skipUnless(libc_name is not None and + os.path.basename(libc_name) == "libc.so.6", + 'wrong libc path for test') + def test_load_version(self): + cdll.LoadLibrary("libc.so.6") + # linux uses version, libc 9 should not exist + self.assertRaises(OSError, cdll.LoadLibrary, "libc.so.9") + self.assertRaises(OSError, cdll.LoadLibrary, self.unknowndll) def test_find(self): for name in ("c", "m"): @@ -41,66 +44,71 @@ cdll.LoadLibrary(lib) CDLL(lib) - if os.name in ("nt", "ce"): - def test_load_library(self): - self.assertFalse(libc_name is None) - if is_resource_enabled("printing"): - print find_library("kernel32") - print find_library("user32") + @unittest.skipUnless(os.name in ("nt", "ce"), + 'test specific to Windows (NT/CE)') + def test_load_library(self): + self.assertIsNotNone(libc_name) + if is_resource_enabled("printing"): + print find_library("kernel32") + print find_library("user32") - if os.name == "nt": - windll.kernel32.GetModuleHandleW - windll["kernel32"].GetModuleHandleW - windll.LoadLibrary("kernel32").GetModuleHandleW - WinDLL("kernel32").GetModuleHandleW - elif os.name == "ce": - windll.coredll.GetModuleHandleW - windll["coredll"].GetModuleHandleW - windll.LoadLibrary("coredll").GetModuleHandleW - WinDLL("coredll").GetModuleHandleW + if os.name == "nt": + windll.kernel32.GetModuleHandleW + windll["kernel32"].GetModuleHandleW + windll.LoadLibrary("kernel32").GetModuleHandleW + WinDLL("kernel32").GetModuleHandleW + elif os.name == "ce": + windll.coredll.GetModuleHandleW + windll["coredll"].GetModuleHandleW + windll.LoadLibrary("coredll").GetModuleHandleW + WinDLL("coredll").GetModuleHandleW - def test_load_ordinal_functions(self): - import _ctypes_test - dll = WinDLL(_ctypes_test.__file__) - # We load the same function both via ordinal and name - func_ord = dll[2] - func_name = dll.GetString - # addressof gets the address where the function pointer is stored - a_ord = addressof(func_ord) - a_name = addressof(func_name) - f_ord_addr = c_void_p.from_address(a_ord).value - f_name_addr = c_void_p.from_address(a_name).value - self.assertEqual(hex(f_ord_addr), hex(f_name_addr)) + @unittest.skipUnless(os.name in ("nt", "ce"), + 'test specific to Windows (NT/CE)') + def test_load_ordinal_functions(self): + import _ctypes_test + dll = WinDLL(_ctypes_test.__file__) + # We load the same function both via ordinal and name + func_ord = dll[2] + func_name = dll.GetString + # addressof gets the address where the function pointer is stored + a_ord = addressof(func_ord) + a_name = addressof(func_name) + f_ord_addr = c_void_p.from_address(a_ord).value + f_name_addr = c_void_p.from_address(a_name).value + self.assertEqual(hex(f_ord_addr), hex(f_name_addr)) - self.assertRaises(AttributeError, dll.__getitem__, 1234) + self.assertRaises(AttributeError, dll.__getitem__, 1234) - if os.name == "nt": - def test_1703286_A(self): - from _ctypes import LoadLibrary, FreeLibrary - # On winXP 64-bit, advapi32 loads at an address that does - # NOT fit into a 32-bit integer. FreeLibrary must be able - # to accept this address. + @unittest.skipUnless(os.name == "nt", 'Windows-specific test') + def test_1703286_A(self): + from _ctypes import LoadLibrary, FreeLibrary + # On winXP 64-bit, advapi32 loads at an address that does + # NOT fit into a 32-bit integer. FreeLibrary must be able + # to accept this address. - # These are tests for http://www.python.org/sf/1703286 - handle = LoadLibrary("advapi32") - FreeLibrary(handle) + # These are tests for http://www.python.org/sf/1703286 + handle = LoadLibrary("advapi32") + FreeLibrary(handle) - def test_1703286_B(self): - # Since on winXP 64-bit advapi32 loads like described - # above, the (arbitrarily selected) CloseEventLog function - # also has a high address. 'call_function' should accept - # addresses so large. - from _ctypes import call_function - advapi32 = windll.advapi32 - # Calling CloseEventLog with a NULL argument should fail, - # but the call should not segfault or so. - self.assertEqual(0, advapi32.CloseEventLog(None)) - windll.kernel32.GetProcAddress.argtypes = c_void_p, c_char_p - windll.kernel32.GetProcAddress.restype = c_void_p - proc = windll.kernel32.GetProcAddress(advapi32._handle, "CloseEventLog") - self.assertTrue(proc) - # This is the real test: call the function via 'call_function' - self.assertEqual(0, call_function(proc, (None,))) + @unittest.skipUnless(os.name == "nt", 'Windows-specific test') + def test_1703286_B(self): + # Since on winXP 64-bit advapi32 loads like described + # above, the (arbitrarily selected) CloseEventLog function + # also has a high address. 'call_function' should accept + # addresses so large. + from _ctypes import call_function + advapi32 = windll.advapi32 + # Calling CloseEventLog with a NULL argument should fail, + # but the call should not segfault or so. + self.assertEqual(0, advapi32.CloseEventLog(None)) + windll.kernel32.GetProcAddress.argtypes = c_void_p, c_char_p + windll.kernel32.GetProcAddress.restype = c_void_p + proc = windll.kernel32.GetProcAddress(advapi32._handle, + "CloseEventLog") + self.assertTrue(proc) + # This is the real test: call the function via 'call_function' + self.assertEqual(0, call_function(proc, (None,))) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_macholib.py b/lib-python/2.7/ctypes/test/test_macholib.py --- a/lib-python/2.7/ctypes/test/test_macholib.py +++ b/lib-python/2.7/ctypes/test/test_macholib.py @@ -45,18 +45,21 @@ raise ValueError("%s not found" % (name,)) class MachOTest(unittest.TestCase): - if sys.platform == "darwin": - def test_find(self): + @unittest.skipUnless(sys.platform == "darwin", 'OSX-specific test') + def test_find(self): - self.assertEqual(find_lib('pthread'), - '/usr/lib/libSystem.B.dylib') + self.assertEqual(find_lib('pthread'), + '/usr/lib/libSystem.B.dylib') - result = find_lib('z') - self.assertTrue(result.startswith('/usr/lib/libz.1')) - self.assertTrue(result.endswith('.dylib')) + result = find_lib('z') + # Issue #21093: dyld default search path includes $HOME/lib and + # /usr/local/lib before /usr/lib, which caused test failures if + # a local copy of libz exists in one of them. Now ignore the head + # of the path. + self.assertRegexpMatches(result, r".*/lib/libz\..*.*\.dylib") - self.assertEqual(find_lib('IOKit'), - '/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit') + self.assertEqual(find_lib('IOKit'), + '/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit') if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_memfunctions.py b/lib-python/2.7/ctypes/test/test_memfunctions.py --- a/lib-python/2.7/ctypes/test/test_memfunctions.py +++ b/lib-python/2.7/ctypes/test/test_memfunctions.py @@ -1,17 +1,19 @@ import sys import unittest from ctypes import * +from ctypes.test import need_symbol class MemFunctionsTest(unittest.TestCase): -## def test_overflow(self): -## # string_at and wstring_at must use the Python calling -## # convention (which acquires the GIL and checks the Python -## # error flag). Provoke an error and catch it; see also issue -## # #3554: -## self.assertRaises((OverflowError, MemoryError, SystemError), -## lambda: wstring_at(u"foo", sys.maxint - 1)) -## self.assertRaises((OverflowError, MemoryError, SystemError), -## lambda: string_at("foo", sys.maxint - 1)) + @unittest.skip('test disabled') + def test_overflow(self): + # string_at and wstring_at must use the Python calling + # convention (which acquires the GIL and checks the Python + # error flag). Provoke an error and catch it; see also issue + # #3554: + self.assertRaises((OverflowError, MemoryError, SystemError), + lambda: wstring_at(u"foo", sys.maxint - 1)) + self.assertRaises((OverflowError, MemoryError, SystemError), + lambda: string_at("foo", sys.maxint - 1)) def test_memmove(self): # large buffers apparently increase the chance that the memory @@ -59,21 +61,17 @@ self.assertEqual(string_at("foo bar", 8), "foo bar\0") self.assertEqual(string_at("foo bar", 3), "foo") - try: - create_unicode_buffer - except NameError: - pass - else: - def test_wstring_at(self): - p = create_unicode_buffer("Hello, World") - a = create_unicode_buffer(1000000) - result = memmove(a, p, len(p) * sizeof(c_wchar)) - self.assertEqual(a.value, "Hello, World") + @need_symbol('create_unicode_buffer') + def test_wstring_at(self): + p = create_unicode_buffer("Hello, World") + a = create_unicode_buffer(1000000) + result = memmove(a, p, len(p) * sizeof(c_wchar)) + self.assertEqual(a.value, "Hello, World") - self.assertEqual(wstring_at(a), "Hello, World") - self.assertEqual(wstring_at(a, 5), "Hello") - self.assertEqual(wstring_at(a, 16), "Hello, World\0\0\0\0") - self.assertEqual(wstring_at(a, 0), "") + self.assertEqual(wstring_at(a), "Hello, World") + self.assertEqual(wstring_at(a, 5), "Hello") + self.assertEqual(wstring_at(a, 16), "Hello, World\0\0\0\0") + self.assertEqual(wstring_at(a, 0), "") if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -82,12 +82,13 @@ self.assertRaises(TypeError, t, "") self.assertRaises(TypeError, t, None) -## def test_valid_ranges(self): -## # invalid values of the correct type -## # raise ValueError (not OverflowError) -## for t, (l, h) in zip(unsigned_types, unsigned_ranges): -## self.assertRaises(ValueError, t, l-1) -## self.assertRaises(ValueError, t, h+1) + @unittest.skip('test disabled') + def test_valid_ranges(self): + # invalid values of the correct type + # raise ValueError (not OverflowError) + for t, (l, h) in zip(unsigned_types, unsigned_ranges): + self.assertRaises(ValueError, t, l-1) + self.assertRaises(ValueError, t, h+1) def test_from_param(self): # the from_param class method attribute always @@ -181,10 +182,10 @@ a = array(t._type_, [3.14]) v = t.from_address(a.buffer_info()[0]) self.assertEqual(v.value, a[0]) - self.assertTrue(type(v) is t) + self.assertIs(type(v), t) a[0] = 2.3456e17 self.assertEqual(v.value, a[0]) - self.assertTrue(type(v) is t) + self.assertIs(type(v), t) def test_char_from_address(self): from ctypes import c_char @@ -193,22 +194,23 @@ a = array('c', 'x') v = c_char.from_address(a.buffer_info()[0]) self.assertEqual(v.value, a[0]) - self.assertTrue(type(v) is c_char) + self.assertIs(type(v), c_char) a[0] = '?' self.assertEqual(v.value, a[0]) # array does not support c_bool / 't' - # def test_bool_from_address(self): - # from ctypes import c_bool - # from array import array - # a = array(c_bool._type_, [True]) - # v = t.from_address(a.buffer_info()[0]) - # self.assertEqual(v.value, a[0]) - # self.assertEqual(type(v) is t) - # a[0] = False - # self.assertEqual(v.value, a[0]) - # self.assertEqual(type(v) is t) + @unittest.skip('test disabled') + def test_bool_from_address(self): + from ctypes import c_bool + from array import array + a = array(c_bool._type_, [True]) + v = t.from_address(a.buffer_info()[0]) + self.assertEqual(v.value, a[0]) + self.assertEqual(type(v) is t) + a[0] = False + self.assertEqual(v.value, a[0]) + self.assertEqual(type(v) is t) def test_init(self): # c_int() can be initialized from Python's int, and c_int. @@ -226,8 +228,9 @@ if (hasattr(t, "__ctype_le__")): self.assertRaises(OverflowError, t.__ctype_le__, big_int) -## def test_perf(self): -## check_perf() + @unittest.skip('test disabled') + def test_perf(self): + check_perf() from ctypes import _SimpleCData class c_int_S(_SimpleCData): diff --git a/lib-python/2.7/ctypes/test/test_objects.py b/lib-python/2.7/ctypes/test/test_objects.py --- a/lib-python/2.7/ctypes/test/test_objects.py +++ b/lib-python/2.7/ctypes/test/test_objects.py @@ -59,12 +59,9 @@ import ctypes.test.test_objects class TestCase(unittest.TestCase): - if sys.hexversion > 0x02040000: - # Python 2.3 has no ELLIPSIS flag, so we don't test with this - # version: - def test(self): - doctest.testmod(ctypes.test.test_objects) + def test(self): + failures, tests = doctest.testmod(ctypes.test.test_objects) + self.assertFalse(failures, 'doctests failed, see output above') if __name__ == '__main__': - if sys.hexversion > 0x02040000: - doctest.testmod(ctypes.test.test_objects) + doctest.testmod(ctypes.test.test_objects) diff --git a/lib-python/2.7/ctypes/test/test_parameters.py b/lib-python/2.7/ctypes/test/test_parameters.py --- a/lib-python/2.7/ctypes/test/test_parameters.py +++ b/lib-python/2.7/ctypes/test/test_parameters.py @@ -1,4 +1,5 @@ import unittest, sys +from ctypes.test import need_symbol class SimpleTypesTestCase(unittest.TestCase): @@ -36,10 +37,9 @@ self.assertEqual(CVOIDP.from_param("abc"), "abcabc") self.assertEqual(CCHARP.from_param("abc"), "abcabcabcabc") - try: - from ctypes import c_wchar_p - except ImportError: - return + @need_symbol('c_wchar_p') + def test_subclasses_c_wchar_p(self): + from ctypes import c_wchar_p class CWCHARP(c_wchar_p): def from_param(cls, value): @@ -55,7 +55,7 @@ # c_char_p.from_param on a Python String packs the string # into a cparam object s = "123" - self.assertTrue(c_char_p.from_param(s)._obj is s) + self.assertIs(c_char_p.from_param(s)._obj, s) # new in 0.9.1: convert (encode) unicode to ascii self.assertEqual(c_char_p.from_param(u"123")._obj, "123") @@ -66,15 +66,11 @@ # calling c_char_p.from_param with a c_char_p instance # returns the argument itself: a = c_char_p("123") - self.assertTrue(c_char_p.from_param(a) is a) + self.assertIs(c_char_p.from_param(a), a) + @need_symbol('c_wchar_p') def test_cw_strings(self): - from ctypes import byref - try: - from ctypes import c_wchar_p - except ImportError: -## print "(No c_wchar_p)" - return + from ctypes import byref, c_wchar_p s = u"123" if sys.platform == "win32": self.assertTrue(c_wchar_p.from_param(s)._obj is s) @@ -144,9 +140,6 @@ self.assertRaises(TypeError, LPINT.from_param, c_long*3) self.assertRaises(TypeError, LPINT.from_param, c_uint*3) -## def test_performance(self): -## check_perf() - def test_noctypes_argtype(self): import _ctypes_test from ctypes import CDLL, c_void_p, ArgumentError diff --git a/lib-python/2.7/ctypes/test/test_pep3118.py b/lib-python/2.7/ctypes/test/test_pep3118.py --- a/lib-python/2.7/ctypes/test/test_pep3118.py +++ b/lib-python/2.7/ctypes/test/test_pep3118.py @@ -92,6 +92,10 @@ class aUnion(Union): _fields_ = [("a", c_int)] +class StructWithArrays(Structure): + _fields_ = [("x", c_long * 3 * 2), ("y", Point * 4)] + + class Incomplete(Structure): pass @@ -141,10 +145,10 @@ ## arrays and pointers - (c_double * 4, "(4) Author: Philip Jenvey Branch: vendor/stdlib Changeset: r72942:dfe89b902432 Date: 2014-08-20 15:17 -0700 http://bitbucket.org/pypy/pypy/changeset/dfe89b902432/ Log: bump the id 2.7.8 diff --git a/lib-python/stdlib-version.txt b/lib-python/stdlib-version.txt --- a/lib-python/stdlib-version.txt +++ b/lib-python/stdlib-version.txt @@ -4,6 +4,6 @@ the outputs for hg id of each are: 2.7:: - 3a1db0d2747e (2.7) v2.7.6 + ee879c0ffa11 (2.7) v2.7.8 3:: cef745775b65 (3.2) v3.2.5 From noreply at buildbot.pypy.org Thu Aug 21 01:13:52 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 21 Aug 2014 01:13:52 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: branch for stdlib 2.7.8 Message-ID: <20140820231352.77F561C148A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r72943:06e03dc23c97 Date: 2014-08-20 16:11 -0700 http://bitbucket.org/pypy/pypy/changeset/06e03dc23c97/ Log: branch for stdlib 2.7.8 diff too long, truncating to 2000 out of 33855 lines diff --git a/lib-python/2.7/CGIHTTPServer.py b/lib-python/2.7/CGIHTTPServer.py --- a/lib-python/2.7/CGIHTTPServer.py +++ b/lib-python/2.7/CGIHTTPServer.py @@ -84,7 +84,7 @@ path begins with one of the strings in self.cgi_directories (and the next character is a '/' or the end of the string). """ - collapsed_path = _url_collapse_path(self.path) + collapsed_path = _url_collapse_path(urllib.unquote(self.path)) dir_sep = collapsed_path.find('/', 1) head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] if head in self.cgi_directories: diff --git a/lib-python/2.7/Cookie.py b/lib-python/2.7/Cookie.py --- a/lib-python/2.7/Cookie.py +++ b/lib-python/2.7/Cookie.py @@ -1,6 +1,3 @@ -#!/usr/bin/env python -# - #### # Copyright 2000 by Timothy O'Malley # diff --git a/lib-python/2.7/HTMLParser.py b/lib-python/2.7/HTMLParser.py --- a/lib-python/2.7/HTMLParser.py +++ b/lib-python/2.7/HTMLParser.py @@ -22,9 +22,12 @@ starttagopen = re.compile('<[a-zA-Z]') piclose = re.compile('>') commentclose = re.compile(r'--\s*>') -tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*') + # see http://www.w3.org/TR/html5/tokenization.html#tag-open-state # and http://www.w3.org/TR/html5/tokenization.html#tag-name-state +# note: if you change tagfind/attrfind remember to update locatestarttagend too +tagfind = re.compile('([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*') +# this regex is currently unused, but left for backward compatibility tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*') attrfind = re.compile( @@ -32,7 +35,7 @@ r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*') locatestarttagend = re.compile(r""" - <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name + <[a-zA-Z][^\t\n\r\f />\x00]* # tag name (?:[\s/]* # optional whitespace before attribute name (?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name (?:\s*=+\s* # value indicator @@ -192,9 +195,9 @@ i = self.updatepos(i, k) continue else: - if ";" in rawdata[i:]: #bail by consuming &# - self.handle_data(rawdata[0:2]) - i = self.updatepos(i, 2) + if ";" in rawdata[i:]: # bail by consuming '&#' + self.handle_data(rawdata[i:i+2]) + i = self.updatepos(i, i+2) break elif startswith('&', i): match = entityref.match(rawdata, i) @@ -373,14 +376,14 @@ self.handle_data(rawdata[i:gtpos]) return gtpos # find the name: w3.org/TR/html5/tokenization.html#tag-name-state - namematch = tagfind_tolerant.match(rawdata, i+2) + namematch = tagfind.match(rawdata, i+2) if not namematch: # w3.org/TR/html5/tokenization.html#end-tag-open-state if rawdata[i:i+3] == '': return i+3 else: return self.parse_bogus_comment(i) - tagname = namematch.group().lower() + tagname = namematch.group(1).lower() # consume and ignore other stuff between the name and the > # Note: this is not 100% correct, since we might have things like # , but looking for > after tha name should cover diff --git a/lib-python/2.7/SimpleHTTPServer.py b/lib-python/2.7/SimpleHTTPServer.py --- a/lib-python/2.7/SimpleHTTPServer.py +++ b/lib-python/2.7/SimpleHTTPServer.py @@ -43,8 +43,10 @@ """Serve a GET request.""" f = self.send_head() if f: - self.copyfile(f, self.wfile) - f.close() + try: + self.copyfile(f, self.wfile) + finally: + f.close() def do_HEAD(self): """Serve a HEAD request.""" @@ -88,13 +90,17 @@ except IOError: self.send_error(404, "File not found") return None - self.send_response(200) - self.send_header("Content-type", ctype) - fs = os.fstat(f.fileno()) - self.send_header("Content-Length", str(fs[6])) - self.send_header("Last-Modified", self.date_time_string(fs.st_mtime)) - self.end_headers() - return f + try: + self.send_response(200) + self.send_header("Content-type", ctype) + fs = os.fstat(f.fileno()) + self.send_header("Content-Length", str(fs[6])) + self.send_header("Last-Modified", self.date_time_string(fs.st_mtime)) + self.end_headers() + return f + except: + f.close() + raise def list_directory(self, path): """Helper to produce a directory listing (absent index.html). diff --git a/lib-python/2.7/SimpleXMLRPCServer.py b/lib-python/2.7/SimpleXMLRPCServer.py --- a/lib-python/2.7/SimpleXMLRPCServer.py +++ b/lib-python/2.7/SimpleXMLRPCServer.py @@ -704,4 +704,5 @@ server = SimpleXMLRPCServer(("localhost", 8000)) server.register_function(pow) server.register_function(lambda x,y: x+y, 'add') + server.register_multicall_functions() server.serve_forever() diff --git a/lib-python/2.7/SocketServer.py b/lib-python/2.7/SocketServer.py --- a/lib-python/2.7/SocketServer.py +++ b/lib-python/2.7/SocketServer.py @@ -513,35 +513,37 @@ def collect_children(self): """Internal routine to wait for children that have exited.""" - if self.active_children is None: return + if self.active_children is None: + return + + # If we're above the max number of children, wait and reap them until + # we go back below threshold. Note that we use waitpid(-1) below to be + # able to collect children in size() syscalls instead + # of size(): the downside is that this might reap children + # which we didn't spawn, which is why we only resort to this when we're + # above max_children. while len(self.active_children) >= self.max_children: - # XXX: This will wait for any child process, not just ones - # spawned by this library. This could confuse other - # libraries that expect to be able to wait for their own - # children. try: - pid, status = os.waitpid(0, 0) - except os.error: - pid = None - if pid not in self.active_children: continue - self.active_children.remove(pid) + pid, _ = os.waitpid(-1, 0) + self.active_children.discard(pid) + except OSError as e: + if e.errno == errno.ECHILD: + # we don't have any children, we're done + self.active_children.clear() + elif e.errno != errno.EINTR: + break - # XXX: This loop runs more system calls than it ought - # to. There should be a way to put the active_children into a - # process group and then use os.waitpid(-pgid) to wait for any - # of that set, but I couldn't find a way to allocate pgids - # that couldn't collide. - for child in self.active_children: + # Now reap all defunct children. + for pid in self.active_children.copy(): try: - pid, status = os.waitpid(child, os.WNOHANG) - except os.error: - pid = None - if not pid: continue - try: - self.active_children.remove(pid) - except ValueError, e: - raise ValueError('%s. x=%d and list=%r' % (e.message, pid, - self.active_children)) + pid, _ = os.waitpid(pid, os.WNOHANG) + # if the child hasn't exited yet, pid will be 0 and ignored by + # discard() below + self.active_children.discard(pid) + except OSError as e: + if e.errno == errno.ECHILD: + # someone else reaped it + self.active_children.discard(pid) def handle_timeout(self): """Wait for zombies after self.timeout seconds of inactivity. @@ -557,8 +559,8 @@ if pid: # Parent process if self.active_children is None: - self.active_children = [] - self.active_children.append(pid) + self.active_children = set() + self.active_children.add(pid) self.close_request(request) #close handle in parent process return else: diff --git a/lib-python/2.7/_MozillaCookieJar.py b/lib-python/2.7/_MozillaCookieJar.py --- a/lib-python/2.7/_MozillaCookieJar.py +++ b/lib-python/2.7/_MozillaCookieJar.py @@ -39,7 +39,7 @@ magic_re = "#( Netscape)? HTTP Cookie File" header = """\ # Netscape HTTP Cookie File -# http://www.netscape.com/newsref/std/cookie_spec.html +# http://curl.haxx.se/rfc/cookie_spec.html # This is a generated file! Do not edit. """ diff --git a/lib-python/2.7/_abcoll.py b/lib-python/2.7/_abcoll.py --- a/lib-python/2.7/_abcoll.py +++ b/lib-python/2.7/_abcoll.py @@ -165,12 +165,17 @@ def __gt__(self, other): if not isinstance(other, Set): return NotImplemented - return other < self + return len(self) > len(other) and self.__ge__(other) def __ge__(self, other): if not isinstance(other, Set): return NotImplemented - return other <= self + if len(self) < len(other): + return False + for elem in other: + if elem not in self: + return False + return True def __eq__(self, other): if not isinstance(other, Set): @@ -194,6 +199,8 @@ return NotImplemented return self._from_iterable(value for value in other if value in self) + __rand__ = __and__ + def isdisjoint(self, other): 'Return True if two sets have a null intersection.' for value in other: @@ -207,6 +214,8 @@ chain = (e for s in (self, other) for e in s) return self._from_iterable(chain) + __ror__ = __or__ + def __sub__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): @@ -215,6 +224,14 @@ return self._from_iterable(value for value in self if value not in other) + def __rsub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in other + if value not in self) + def __xor__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): @@ -222,6 +239,8 @@ other = self._from_iterable(other) return (self - other) | (other - self) + __rxor__ = __xor__ + # Sets are not hashable by default, but subclasses can change this __hash__ = None diff --git a/lib-python/2.7/_osx_support.py b/lib-python/2.7/_osx_support.py --- a/lib-python/2.7/_osx_support.py +++ b/lib-python/2.7/_osx_support.py @@ -182,7 +182,7 @@ # Compiler is GCC, check if it is LLVM-GCC data = _read_output("'%s' --version" % (cc.replace("'", "'\"'\"'"),)) - if 'llvm-gcc' in data: + if data and 'llvm-gcc' in data: # Found LLVM-GCC, fall back to clang cc = _find_build_tool('clang') @@ -450,8 +450,16 @@ # case and disallow installs. cflags = _config_vars.get(_INITPRE+'CFLAGS', _config_vars.get('CFLAGS', '')) - if ((macrelease + '.') >= '10.4.' and - '-arch' in cflags.strip()): + if macrelease: + try: + macrelease = tuple(int(i) for i in macrelease.split('.')[0:2]) + except ValueError: + macrelease = (10, 0) + else: + # assume no universal support + macrelease = (10, 0) + + if (macrelease >= (10, 4)) and '-arch' in cflags.strip(): # The universal build will build fat binaries, but not on # systems before 10.4 diff --git a/lib-python/2.7/_pyio.py b/lib-python/2.7/_pyio.py --- a/lib-python/2.7/_pyio.py +++ b/lib-python/2.7/_pyio.py @@ -192,38 +192,45 @@ (appending and "a" or "") + (updating and "+" or ""), closefd) - line_buffering = False - if buffering == 1 or buffering < 0 and raw.isatty(): - buffering = -1 - line_buffering = True - if buffering < 0: - buffering = DEFAULT_BUFFER_SIZE - try: - bs = os.fstat(raw.fileno()).st_blksize - except (os.error, AttributeError): - pass + result = raw + try: + line_buffering = False + if buffering == 1 or buffering < 0 and raw.isatty(): + buffering = -1 + line_buffering = True + if buffering < 0: + buffering = DEFAULT_BUFFER_SIZE + try: + bs = os.fstat(raw.fileno()).st_blksize + except (os.error, AttributeError): + pass + else: + if bs > 1: + buffering = bs + if buffering < 0: + raise ValueError("invalid buffering size") + if buffering == 0: + if binary: + return result + raise ValueError("can't have unbuffered text I/O") + if updating: + buffer = BufferedRandom(raw, buffering) + elif writing or appending: + buffer = BufferedWriter(raw, buffering) + elif reading: + buffer = BufferedReader(raw, buffering) else: - if bs > 1: - buffering = bs - if buffering < 0: - raise ValueError("invalid buffering size") - if buffering == 0: + raise ValueError("unknown mode: %r" % mode) + result = buffer if binary: - return raw - raise ValueError("can't have unbuffered text I/O") - if updating: - buffer = BufferedRandom(raw, buffering) - elif writing or appending: - buffer = BufferedWriter(raw, buffering) - elif reading: - buffer = BufferedReader(raw, buffering) - else: - raise ValueError("unknown mode: %r" % mode) - if binary: - return buffer - text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering) - text.mode = mode - return text + return result + text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering) + result = text + text.mode = mode + return result + except: + result.close() + raise class DocDescriptor: @@ -1997,7 +2004,13 @@ def getvalue(self): self.flush() - return self.buffer.getvalue().decode(self._encoding, self._errors) + decoder = self._decoder or self._get_decoder() + old_state = decoder.getstate() + decoder.reset() + try: + return decoder.decode(self.buffer.getvalue(), final=True) + finally: + decoder.setstate(old_state) def __repr__(self): # TextIOWrapper tells the encoding in its repr. In StringIO, diff --git a/lib-python/2.7/_weakrefset.py b/lib-python/2.7/_weakrefset.py --- a/lib-python/2.7/_weakrefset.py +++ b/lib-python/2.7/_weakrefset.py @@ -60,6 +60,8 @@ for itemref in self.data: item = itemref() if item is not None: + # Caveat: the iterator will keep a strong reference to + # `item` until it is resumed or closed. yield item def __len__(self): diff --git a/lib-python/2.7/aifc.py b/lib-python/2.7/aifc.py --- a/lib-python/2.7/aifc.py +++ b/lib-python/2.7/aifc.py @@ -778,7 +778,7 @@ def _ensure_header_written(self, datasize): if not self._nframeswritten: - if self._comptype in ('ULAW', 'ALAW'): + if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw'): if not self._sampwidth: self._sampwidth = 2 if self._sampwidth != 2: @@ -844,7 +844,7 @@ if self._datalength & 1: self._datalength = self._datalength + 1 if self._aifc: - if self._comptype in ('ULAW', 'ALAW'): + if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw'): self._datalength = self._datalength // 2 if self._datalength & 1: self._datalength = self._datalength + 1 @@ -852,7 +852,10 @@ self._datalength = (self._datalength + 3) // 4 if self._datalength & 1: self._datalength = self._datalength + 1 - self._form_length_pos = self._file.tell() + try: + self._form_length_pos = self._file.tell() + except (AttributeError, IOError): + self._form_length_pos = None commlength = self._write_form_length(self._datalength) if self._aifc: self._file.write('AIFC') @@ -864,7 +867,8 @@ self._file.write('COMM') _write_ulong(self._file, commlength) _write_short(self._file, self._nchannels) - self._nframes_pos = self._file.tell() + if self._form_length_pos is not None: + self._nframes_pos = self._file.tell() _write_ulong(self._file, self._nframes) if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'): _write_short(self._file, 8) @@ -875,7 +879,8 @@ self._file.write(self._comptype) _write_string(self._file, self._compname) self._file.write('SSND') - self._ssnd_length_pos = self._file.tell() + if self._form_length_pos is not None: + self._ssnd_length_pos = self._file.tell() _write_ulong(self._file, self._datalength + 8) _write_ulong(self._file, 0) _write_ulong(self._file, 0) diff --git a/lib-python/2.7/argparse.py b/lib-python/2.7/argparse.py --- a/lib-python/2.7/argparse.py +++ b/lib-python/2.7/argparse.py @@ -168,6 +168,8 @@ self._prog = prog self._indent_increment = indent_increment self._max_help_position = max_help_position + self._max_help_position = min(max_help_position, + max(width - 20, indent_increment * 2)) self._width = width self._current_indent = 0 @@ -339,7 +341,7 @@ else: line_len = len(indent) - 1 for part in parts: - if line_len + 1 + len(part) > text_width: + if line_len + 1 + len(part) > text_width and line: lines.append(indent + ' '.join(line)) line = [] line_len = len(indent) - 1 @@ -478,7 +480,7 @@ def _format_text(self, text): if '%(prog)' in text: text = text % dict(prog=self._prog) - text_width = self._width - self._current_indent + text_width = max(self._width - self._current_indent, 11) indent = ' ' * self._current_indent return self._fill_text(text, text_width, indent) + '\n\n' @@ -486,7 +488,7 @@ # determine the required width and the entry label help_position = min(self._action_max_length + 2, self._max_help_position) - help_width = self._width - help_position + help_width = max(self._width - help_position, 11) action_width = help_position - self._current_indent - 2 action_header = self._format_action_invocation(action) @@ -1155,9 +1157,13 @@ __hash__ = None def __eq__(self, other): + if not isinstance(other, Namespace): + return NotImplemented return vars(self) == vars(other) def __ne__(self, other): + if not isinstance(other, Namespace): + return NotImplemented return not (self == other) def __contains__(self, key): diff --git a/lib-python/2.7/bsddb/dbshelve.py b/lib-python/2.7/bsddb/dbshelve.py --- a/lib-python/2.7/bsddb/dbshelve.py +++ b/lib-python/2.7/bsddb/dbshelve.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python #------------------------------------------------------------------------ # Copyright (c) 1997-2001 by Total Control Software # All Rights Reserved diff --git a/lib-python/2.7/bsddb/test/test_dbtables.py b/lib-python/2.7/bsddb/test/test_dbtables.py --- a/lib-python/2.7/bsddb/test/test_dbtables.py +++ b/lib-python/2.7/bsddb/test/test_dbtables.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python -# #----------------------------------------------------------------------- # A test suite for the table interface built on bsddb.db #----------------------------------------------------------------------- diff --git a/lib-python/2.7/codecs.py b/lib-python/2.7/codecs.py --- a/lib-python/2.7/codecs.py +++ b/lib-python/2.7/codecs.py @@ -456,15 +456,12 @@ # read until we get the required number of characters (if available) while True: - # can the request can be satisfied from the character buffer? - if chars < 0: - if size < 0: - if self.charbuffer: - break - elif len(self.charbuffer) >= size: + # can the request be satisfied from the character buffer? + if chars >= 0: + if len(self.charbuffer) >= chars: break - else: - if len(self.charbuffer) >= chars: + elif size >= 0: + if len(self.charbuffer) >= size: break # we need more data if size < 0: diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py --- a/lib-python/2.7/collections.py +++ b/lib-python/2.7/collections.py @@ -319,6 +319,7 @@ if isinstance(field_names, basestring): field_names = field_names.replace(',', ' ').split() field_names = map(str, field_names) + typename = str(typename) if rename: seen = set() for index, name in enumerate(field_names): @@ -331,6 +332,8 @@ field_names[index] = '_%d' % index seen.add(name) for name in [typename] + field_names: + if type(name) != str: + raise TypeError('Type names and field names must be strings') if not all(c.isalnum() or c=='_' for c in name): raise ValueError('Type names and field names can only contain ' 'alphanumeric characters and underscores: %r' % name) diff --git a/lib-python/2.7/csv.py b/lib-python/2.7/csv.py --- a/lib-python/2.7/csv.py +++ b/lib-python/2.7/csv.py @@ -93,6 +93,10 @@ self.line_num = self.reader.line_num return self._fieldnames + # Issue 20004: Because DictReader is a classic class, this setter is + # ignored. At this point in 2.7's lifecycle, it is too late to change the + # base class for fear of breaking working code. If you want to change + # fieldnames without overwriting the getter, set _fieldnames directly. @fieldnames.setter def fieldnames(self, value): self._fieldnames = value @@ -140,8 +144,8 @@ if self.extrasaction == "raise": wrong_fields = [k for k in rowdict if k not in self.fieldnames] if wrong_fields: - raise ValueError("dict contains fields not in fieldnames: " + - ", ".join(wrong_fields)) + raise ValueError("dict contains fields not in fieldnames: " + + ", ".join([repr(x) for x in wrong_fields])) return [rowdict.get(key, self.restval) for key in self.fieldnames] def writerow(self, rowdict): diff --git a/lib-python/2.7/ctypes/test/__init__.py b/lib-python/2.7/ctypes/test/__init__.py --- a/lib-python/2.7/ctypes/test/__init__.py +++ b/lib-python/2.7/ctypes/test/__init__.py @@ -2,7 +2,15 @@ use_resources = [] -class ResourceDenied(Exception): +import ctypes +ctypes_symbols = dir(ctypes) + +def need_symbol(name): + return unittest.skipUnless(name in ctypes_symbols, + '{!r} is required'.format(name)) + + +class ResourceDenied(unittest.SkipTest): """Test skipped because it requested a disallowed resource. This is raised when a test calls requires() for a resource that diff --git a/lib-python/2.7/ctypes/test/test_arrays.py b/lib-python/2.7/ctypes/test/test_arrays.py --- a/lib-python/2.7/ctypes/test/test_arrays.py +++ b/lib-python/2.7/ctypes/test/test_arrays.py @@ -2,6 +2,8 @@ from ctypes import * from test.test_support import impl_detail +from ctypes.test import need_symbol + formats = "bBhHiIlLqQfd" # c_longdouble commented out for PyPy, look at the commend in test_longdouble @@ -98,8 +100,8 @@ self.assertEqual(values, [1, 2, 3, 4, 5]) def test_classcache(self): - self.assertTrue(not ARRAY(c_int, 3) is ARRAY(c_int, 4)) - self.assertTrue(ARRAY(c_int, 3) is ARRAY(c_int, 3)) + self.assertIsNot(ARRAY(c_int, 3), ARRAY(c_int, 4)) + self.assertIs(ARRAY(c_int, 3), ARRAY(c_int, 3)) def test_from_address(self): # Failed with 0.9.8, reported by JUrner @@ -112,20 +114,16 @@ self.assertEqual(sz[1:4:2], "o") self.assertEqual(sz.value, "foo") - try: - create_unicode_buffer - except NameError: - pass - else: - def test_from_addressW(self): - p = create_unicode_buffer("foo") - sz = (c_wchar * 3).from_address(addressof(p)) - self.assertEqual(sz[:], "foo") - self.assertEqual(sz[::], "foo") - self.assertEqual(sz[::-1], "oof") - self.assertEqual(sz[::3], "f") - self.assertEqual(sz[1:4:2], "o") - self.assertEqual(sz.value, "foo") + @need_symbol('create_unicode_buffer') + def test_from_addressW(self): + p = create_unicode_buffer("foo") + sz = (c_wchar * 3).from_address(addressof(p)) + self.assertEqual(sz[:], "foo") + self.assertEqual(sz[::], "foo") + self.assertEqual(sz[::-1], "oof") + self.assertEqual(sz[::3], "f") + self.assertEqual(sz[1:4:2], "o") + self.assertEqual(sz.value, "foo") def test_cache(self): # Array types are cached internally in the _ctypes extension, @@ -139,7 +137,7 @@ # Create a new array type based on it: t1 = my_int * 1 t2 = my_int * 1 - self.assertTrue(t1 is t2) + self.assertIs(t1, t2) if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_as_parameter.py b/lib-python/2.7/ctypes/test/test_as_parameter.py --- a/lib-python/2.7/ctypes/test/test_as_parameter.py +++ b/lib-python/2.7/ctypes/test/test_as_parameter.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import need_symbol import _ctypes_test dll = CDLL(_ctypes_test.__file__) @@ -17,11 +18,8 @@ def wrap(self, param): return param + @need_symbol('c_wchar') def test_wchar_parm(self): - try: - c_wchar - except NameError: - return f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double] result = f(self.wrap(1), self.wrap(u"x"), self.wrap(3), self.wrap(4), self.wrap(5.0), self.wrap(6.0)) @@ -134,7 +132,7 @@ f.argtypes = [c_longlong, MyCallback] def callback(value): - self.assertTrue(isinstance(value, (int, long))) + self.assertIsInstance(value, (int, long)) return value & 0x7FFFFFFF cb = MyCallback(callback) diff --git a/lib-python/2.7/ctypes/test/test_bitfields.py b/lib-python/2.7/ctypes/test/test_bitfields.py --- a/lib-python/2.7/ctypes/test/test_bitfields.py +++ b/lib-python/2.7/ctypes/test/test_bitfields.py @@ -1,4 +1,5 @@ from ctypes import * +from ctypes.test import need_symbol import unittest import os @@ -131,15 +132,6 @@ self.assertEqual(result[0], TypeError) self.assertIn('bit fields not allowed for type', result[1]) - try: - c_wchar - except NameError: - pass - else: - result = self.fail_fields(("a", c_wchar, 1)) - self.assertEqual(result[0], TypeError) - self.assertIn('bit fields not allowed for type', result[1]) - class Dummy(Structure): _fields_ = [] @@ -147,6 +139,12 @@ self.assertEqual(result[0], TypeError) self.assertIn('bit fields not allowed for type', result[1]) + @need_symbol('c_wchar') + def test_c_wchar(self): + result = self.fail_fields(("a", c_wchar, 1)) + self.assertEqual(result, + (TypeError, 'bit fields not allowed for type c_wchar')) + def test_single_bitfield_size(self): for c_typ in int_types: result = self.fail_fields(("a", c_typ, -1)) @@ -213,7 +211,7 @@ class X(Structure): _fields_ = [("a", c_byte, 4), ("b", c_int, 32)] - self.assertEqual(sizeof(X), sizeof(c_int)*2) + self.assertEqual(sizeof(X), alignment(c_int)+sizeof(c_int)) def test_mixed_3(self): class X(Structure): @@ -246,7 +244,7 @@ _anonymous_ = ["_"] _fields_ = [("_", X)] - @unittest.skipUnless(hasattr(ctypes, "c_uint32"), "c_int32 is required") + @need_symbol('c_uint32') def test_uint32(self): class X(Structure): _fields_ = [("a", c_uint32, 32)] @@ -256,7 +254,7 @@ x.a = 0xFDCBA987 self.assertEqual(x.a, 0xFDCBA987) - @unittest.skipUnless(hasattr(ctypes, "c_uint64"), "c_int64 is required") + @need_symbol('c_uint64') def test_uint64(self): class X(Structure): _fields_ = [("a", c_uint64, 64)] diff --git a/lib-python/2.7/ctypes/test/test_buffers.py b/lib-python/2.7/ctypes/test/test_buffers.py --- a/lib-python/2.7/ctypes/test/test_buffers.py +++ b/lib-python/2.7/ctypes/test/test_buffers.py @@ -1,4 +1,5 @@ from ctypes import * +from ctypes.test import need_symbol import unittest class StringBufferTestCase(unittest.TestCase): @@ -7,12 +8,12 @@ b = create_string_buffer(32) self.assertEqual(len(b), 32) self.assertEqual(sizeof(b), 32 * sizeof(c_char)) - self.assertTrue(type(b[0]) is str) + self.assertIs(type(b[0]), str) b = create_string_buffer("abc") self.assertEqual(len(b), 4) # trailing nul char self.assertEqual(sizeof(b), 4 * sizeof(c_char)) - self.assertTrue(type(b[0]) is str) + self.assertIs(type(b[0]), str) self.assertEqual(b[0], "a") self.assertEqual(b[:], "abc\0") self.assertEqual(b[::], "abc\0") @@ -36,39 +37,36 @@ self.assertEqual(b[::2], "ac") self.assertEqual(b[::5], "a") - try: - c_wchar - except NameError: - pass - else: - def test_unicode_buffer(self): - b = create_unicode_buffer(32) - self.assertEqual(len(b), 32) - self.assertEqual(sizeof(b), 32 * sizeof(c_wchar)) - self.assertTrue(type(b[0]) is unicode) + @need_symbol('c_wchar') + def test_unicode_buffer(self): + b = create_unicode_buffer(32) + self.assertEqual(len(b), 32) + self.assertEqual(sizeof(b), 32 * sizeof(c_wchar)) + self.assertIs(type(b[0]), unicode) - b = create_unicode_buffer(u"abc") - self.assertEqual(len(b), 4) # trailing nul char - self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) - self.assertTrue(type(b[0]) is unicode) - self.assertEqual(b[0], u"a") - self.assertEqual(b[:], "abc\0") - self.assertEqual(b[::], "abc\0") - self.assertEqual(b[::-1], "\0cba") - self.assertEqual(b[::2], "ac") - self.assertEqual(b[::5], "a") + b = create_unicode_buffer(u"abc") + self.assertEqual(len(b), 4) # trailing nul char + self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) + self.assertIs(type(b[0]), unicode) + self.assertEqual(b[0], u"a") + self.assertEqual(b[:], "abc\0") + self.assertEqual(b[::], "abc\0") + self.assertEqual(b[::-1], "\0cba") + self.assertEqual(b[::2], "ac") + self.assertEqual(b[::5], "a") - def test_unicode_conversion(self): - b = create_unicode_buffer("abc") - self.assertEqual(len(b), 4) # trailing nul char - self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) - self.assertTrue(type(b[0]) is unicode) - self.assertEqual(b[0], u"a") - self.assertEqual(b[:], "abc\0") - self.assertEqual(b[::], "abc\0") - self.assertEqual(b[::-1], "\0cba") - self.assertEqual(b[::2], "ac") - self.assertEqual(b[::5], "a") + @need_symbol('c_wchar') + def test_unicode_conversion(self): + b = create_unicode_buffer("abc") + self.assertEqual(len(b), 4) # trailing nul char + self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) + self.assertIs(type(b[0]), unicode) + self.assertEqual(b[0], u"a") + self.assertEqual(b[:], "abc\0") + self.assertEqual(b[::], "abc\0") + self.assertEqual(b[::-1], "\0cba") + self.assertEqual(b[::2], "ac") + self.assertEqual(b[::5], "a") if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_byteswap.py b/lib-python/2.7/ctypes/test/test_byteswap.py --- a/lib-python/2.7/ctypes/test/test_byteswap.py +++ b/lib-python/2.7/ctypes/test/test_byteswap.py @@ -15,7 +15,8 @@ # For Structures and Unions, these types are created on demand. class Test(unittest.TestCase): - def X_test(self): + @unittest.skip('test disabled') + def test_X(self): print >> sys.stderr, sys.byteorder for i in range(32): bits = BITS() @@ -25,11 +26,11 @@ @xfail def test_endian_short(self): if sys.byteorder == "little": - self.assertTrue(c_short.__ctype_le__ is c_short) - self.assertTrue(c_short.__ctype_be__.__ctype_le__ is c_short) + self.assertIs(c_short.__ctype_le__, c_short) + self.assertIs(c_short.__ctype_be__.__ctype_le__, c_short) else: - self.assertTrue(c_short.__ctype_be__ is c_short) - self.assertTrue(c_short.__ctype_le__.__ctype_be__ is c_short) + self.assertIs(c_short.__ctype_be__, c_short) + self.assertIs(c_short.__ctype_le__.__ctype_be__, c_short) s = c_short.__ctype_be__(0x1234) self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234") self.assertEqual(bin(s), "1234") @@ -53,11 +54,11 @@ @xfail def test_endian_int(self): if sys.byteorder == "little": - self.assertTrue(c_int.__ctype_le__ is c_int) - self.assertTrue(c_int.__ctype_be__.__ctype_le__ is c_int) + self.assertIs(c_int.__ctype_le__, c_int) + self.assertIs(c_int.__ctype_be__.__ctype_le__, c_int) else: - self.assertTrue(c_int.__ctype_be__ is c_int) - self.assertTrue(c_int.__ctype_le__.__ctype_be__ is c_int) + self.assertIs(c_int.__ctype_be__, c_int) + self.assertIs(c_int.__ctype_le__.__ctype_be__, c_int) s = c_int.__ctype_be__(0x12345678) self.assertEqual(bin(struct.pack(">i", 0x12345678)), "12345678") @@ -82,11 +83,11 @@ @xfail def test_endian_longlong(self): if sys.byteorder == "little": - self.assertTrue(c_longlong.__ctype_le__ is c_longlong) - self.assertTrue(c_longlong.__ctype_be__.__ctype_le__ is c_longlong) + self.assertIs(c_longlong.__ctype_le__, c_longlong) + self.assertIs(c_longlong.__ctype_be__.__ctype_le__, c_longlong) else: - self.assertTrue(c_longlong.__ctype_be__ is c_longlong) - self.assertTrue(c_longlong.__ctype_le__.__ctype_be__ is c_longlong) + self.assertIs(c_longlong.__ctype_be__, c_longlong) + self.assertIs(c_longlong.__ctype_le__.__ctype_be__, c_longlong) s = c_longlong.__ctype_be__(0x1234567890ABCDEF) self.assertEqual(bin(struct.pack(">q", 0x1234567890ABCDEF)), "1234567890ABCDEF") @@ -111,11 +112,11 @@ @xfail def test_endian_float(self): if sys.byteorder == "little": - self.assertTrue(c_float.__ctype_le__ is c_float) - self.assertTrue(c_float.__ctype_be__.__ctype_le__ is c_float) + self.assertIs(c_float.__ctype_le__, c_float) + self.assertIs(c_float.__ctype_be__.__ctype_le__, c_float) else: - self.assertTrue(c_float.__ctype_be__ is c_float) - self.assertTrue(c_float.__ctype_le__.__ctype_be__ is c_float) + self.assertIs(c_float.__ctype_be__, c_float) + self.assertIs(c_float.__ctype_le__.__ctype_be__, c_float) s = c_float(math.pi) self.assertEqual(bin(struct.pack("f", math.pi)), bin(s)) # Hm, what's the precision of a float compared to a double? @@ -130,11 +131,11 @@ @xfail def test_endian_double(self): if sys.byteorder == "little": - self.assertTrue(c_double.__ctype_le__ is c_double) - self.assertTrue(c_double.__ctype_be__.__ctype_le__ is c_double) + self.assertIs(c_double.__ctype_le__, c_double) + self.assertIs(c_double.__ctype_be__.__ctype_le__, c_double) else: - self.assertTrue(c_double.__ctype_be__ is c_double) - self.assertTrue(c_double.__ctype_le__.__ctype_be__ is c_double) + self.assertIs(c_double.__ctype_be__, c_double) + self.assertIs(c_double.__ctype_le__.__ctype_be__, c_double) s = c_double(math.pi) self.assertEqual(s.value, math.pi) self.assertEqual(bin(struct.pack("d", math.pi)), bin(s)) @@ -146,14 +147,14 @@ self.assertEqual(bin(struct.pack(">d", math.pi)), bin(s)) def test_endian_other(self): - self.assertTrue(c_byte.__ctype_le__ is c_byte) - self.assertTrue(c_byte.__ctype_be__ is c_byte) + self.assertIs(c_byte.__ctype_le__, c_byte) + self.assertIs(c_byte.__ctype_be__, c_byte) - self.assertTrue(c_ubyte.__ctype_le__ is c_ubyte) - self.assertTrue(c_ubyte.__ctype_be__ is c_ubyte) + self.assertIs(c_ubyte.__ctype_le__, c_ubyte) + self.assertIs(c_ubyte.__ctype_be__, c_ubyte) - self.assertTrue(c_char.__ctype_le__ is c_char) - self.assertTrue(c_char.__ctype_be__ is c_char) + self.assertIs(c_char.__ctype_le__, c_char) + self.assertIs(c_char.__ctype_be__, c_char) @xfail def test_struct_fields_1(self): diff --git a/lib-python/2.7/ctypes/test/test_callbacks.py b/lib-python/2.7/ctypes/test/test_callbacks.py --- a/lib-python/2.7/ctypes/test/test_callbacks.py +++ b/lib-python/2.7/ctypes/test/test_callbacks.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import need_symbol from ctypes.test import xfail import _ctypes_test @@ -95,9 +96,10 @@ # disabled: would now (correctly) raise a RuntimeWarning about # a memory leak. A callback function cannot return a non-integral # C type without causing a memory leak. -## def test_char_p(self): -## self.check_type(c_char_p, "abc") -## self.check_type(c_char_p, "def") + @unittest.skip('test disabled') + def test_char_p(self): + self.check_type(c_char_p, "abc") + self.check_type(c_char_p, "def") @xfail def test_pyobject(self): @@ -150,13 +152,12 @@ CFUNCTYPE(None)(lambda x=Nasty(): None) -try: - WINFUNCTYPE -except NameError: - pass -else: - class StdcallCallbacks(Callbacks): + at need_symbol('WINFUNCTYPE') +class StdcallCallbacks(Callbacks): + try: functype = WINFUNCTYPE + except NameError: + pass ################################################################ @@ -186,7 +187,7 @@ from ctypes.util import find_library libc_path = find_library("c") if not libc_path: - return # cannot test + self.skipTest('could not find libc') libc = CDLL(libc_path) @CFUNCTYPE(c_int, POINTER(c_int), POINTER(c_int)) @@ -198,23 +199,19 @@ libc.qsort(array, len(array), sizeof(c_int), cmp_func) self.assertEqual(array[:], [1, 5, 7, 33, 99]) - try: - WINFUNCTYPE - except NameError: - pass - else: - def test_issue_8959_b(self): - from ctypes.wintypes import BOOL, HWND, LPARAM + @need_symbol('WINFUNCTYPE') + def test_issue_8959_b(self): + from ctypes.wintypes import BOOL, HWND, LPARAM + global windowCount + windowCount = 0 + + @WINFUNCTYPE(BOOL, HWND, LPARAM) + def EnumWindowsCallbackFunc(hwnd, lParam): global windowCount - windowCount = 0 + windowCount += 1 + return True #Allow windows to keep enumerating - @WINFUNCTYPE(BOOL, HWND, LPARAM) - def EnumWindowsCallbackFunc(hwnd, lParam): - global windowCount - windowCount += 1 - return True #Allow windows to keep enumerating - - windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0) + windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0) def test_callback_register_int(self): # Issue #8275: buggy handling of callback args under Win64 diff --git a/lib-python/2.7/ctypes/test/test_cast.py b/lib-python/2.7/ctypes/test/test_cast.py --- a/lib-python/2.7/ctypes/test/test_cast.py +++ b/lib-python/2.7/ctypes/test/test_cast.py @@ -1,4 +1,5 @@ from ctypes import * +from ctypes.test import need_symbol import unittest import sys @@ -38,14 +39,14 @@ p = cast(array, POINTER(c_char_p)) # array and p share a common _objects attribute - self.assertTrue(p._objects is array._objects) + self.assertIs(p._objects, array._objects) self.assertEqual(array._objects, {'0': "foo bar", id(array): array}) p[0] = "spam spam" self.assertEqual(p._objects, {'0': "spam spam", id(array): array}) - self.assertTrue(array._objects is p._objects) + self.assertIs(array._objects, p._objects) p[1] = "foo bar" self.assertEqual(p._objects, {'1': 'foo bar', '0': "spam spam", id(array): array}) - self.assertTrue(array._objects is p._objects) + self.assertIs(array._objects, p._objects) def test_other(self): p = cast((c_int * 4)(1, 2, 3, 4), POINTER(c_int)) @@ -75,15 +76,11 @@ self.assertEqual(cast(cast(s, c_void_p), c_char_p).value, "hiho") - try: - c_wchar_p - except NameError: - pass - else: - def test_wchar_p(self): - s = c_wchar_p("hiho") - self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value, - "hiho") + @need_symbol('c_wchar_p') + def test_wchar_p(self): + s = c_wchar_p("hiho") + self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value, + "hiho") if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_cfuncs.py b/lib-python/2.7/ctypes/test/test_cfuncs.py --- a/lib-python/2.7/ctypes/test/test_cfuncs.py +++ b/lib-python/2.7/ctypes/test/test_cfuncs.py @@ -3,6 +3,7 @@ import unittest from ctypes import * +from ctypes.test import need_symbol import _ctypes_test from test.test_support import impl_detail @@ -196,7 +197,7 @@ try: WinDLL except NameError: - pass + def stdcall_dll(*_): pass else: class stdcall_dll(WinDLL): def __getattr__(self, name): @@ -206,9 +207,9 @@ setattr(self, name, func) return func - class stdcallCFunctions(CFunctions): - _dll = stdcall_dll(_ctypes_test.__file__) - pass + at need_symbol('WinDLL') +class stdcallCFunctions(CFunctions): + _dll = stdcall_dll(_ctypes_test.__file__) if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_checkretval.py b/lib-python/2.7/ctypes/test/test_checkretval.py --- a/lib-python/2.7/ctypes/test/test_checkretval.py +++ b/lib-python/2.7/ctypes/test/test_checkretval.py @@ -1,6 +1,7 @@ import unittest from ctypes import * +from ctypes.test import need_symbol class CHECKED(c_int): def _check_retval_(value): @@ -25,15 +26,11 @@ del dll._testfunc_p_p.restype self.assertEqual(42, dll._testfunc_p_p(42)) - try: - oledll - except NameError: - pass - else: - def test_oledll(self): - self.assertRaises(WindowsError, - oledll.oleaut32.CreateTypeLib2, - 0, None, None) + @need_symbol('oledll') + def test_oledll(self): + self.assertRaises(WindowsError, + oledll.oleaut32.CreateTypeLib2, + 0, None, None) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_errcheck.py b/lib-python/2.7/ctypes/test/test_errcheck.py deleted file mode 100644 --- a/lib-python/2.7/ctypes/test/test_errcheck.py +++ /dev/null @@ -1,19 +0,0 @@ -import sys -from ctypes import * - -##class HMODULE(Structure): -## _fields_ = [("value", c_void_p)] - -## def __repr__(self): -## return "" % self.value - -##windll.kernel32.GetModuleHandleA.restype = HMODULE - -##print windll.kernel32.GetModuleHandleA("python23.dll") -##print hex(sys.dllhandle) - -##def nonzero(handle): -## return (GetLastError(), handle) - -##windll.kernel32.GetModuleHandleA.errcheck = nonzero -##print windll.kernel32.GetModuleHandleA("spam") diff --git a/lib-python/2.7/ctypes/test/test_find.py b/lib-python/2.7/ctypes/test/test_find.py --- a/lib-python/2.7/ctypes/test/test_find.py +++ b/lib-python/2.7/ctypes/test/test_find.py @@ -1,4 +1,5 @@ import unittest +import os import sys from ctypes import * from ctypes.util import find_library @@ -40,43 +41,43 @@ except OSError: pass - if lib_gl: - def test_gl(self): - if self.gl: - self.gl.glClearIndex + @unittest.skipUnless(lib_gl, 'lib_gl not available') + def test_gl(self): + if self.gl: + self.gl.glClearIndex - if lib_glu: - def test_glu(self): - if self.glu: - self.glu.gluBeginCurve + @unittest.skipUnless(lib_glu, 'lib_glu not available') + def test_glu(self): + if self.glu: + self.glu.gluBeginCurve - if lib_gle: - def test_gle(self): - if self.gle: - self.gle.gleGetJoinStyle + @unittest.skipUnless(lib_gle, 'lib_gle not available') + def test_gle(self): + if self.gle: + self.gle.gleGetJoinStyle -##if os.name == "posix" and sys.platform != "darwin": - -## # On platforms where the default shared library suffix is '.so', -## # at least some libraries can be loaded as attributes of the cdll -## # object, since ctypes now tries loading the lib again -## # with '.so' appended of the first try fails. -## # -## # Won't work for libc, unfortunately. OTOH, it isn't -## # needed for libc since this is already mapped into the current -## # process (?) -## # -## # On MAC OSX, it won't work either, because dlopen() needs a full path, -## # and the default suffix is either none or '.dylib'. - -## class LoadLibs(unittest.TestCase): -## def test_libm(self): -## import math -## libm = cdll.libm -## sqrt = libm.sqrt -## sqrt.argtypes = (c_double,) -## sqrt.restype = c_double -## self.assertEqual(sqrt(2), math.sqrt(2)) +# On platforms where the default shared library suffix is '.so', +# at least some libraries can be loaded as attributes of the cdll +# object, since ctypes now tries loading the lib again +# with '.so' appended of the first try fails. +# +# Won't work for libc, unfortunately. OTOH, it isn't +# needed for libc since this is already mapped into the current +# process (?) +# +# On MAC OSX, it won't work either, because dlopen() needs a full path, +# and the default suffix is either none or '.dylib'. + at unittest.skip('test disabled') + at unittest.skipUnless(os.name=="posix" and sys.platform != "darwin", + 'test not suitable for this platform') +class LoadLibs(unittest.TestCase): + def test_libm(self): + import math + libm = cdll.libm + sqrt = libm.sqrt + sqrt.argtypes = (c_double,) + sqrt.restype = c_double + self.assertEqual(sqrt(2), math.sqrt(2)) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_frombuffer.py b/lib-python/2.7/ctypes/test/test_frombuffer.py --- a/lib-python/2.7/ctypes/test/test_frombuffer.py +++ b/lib-python/2.7/ctypes/test/test_frombuffer.py @@ -25,7 +25,7 @@ a[0], a[-1] = 200, -200 self.assertEqual(x[:], a.tolist()) - self.assertTrue(a in x._objects.values()) + self.assertIn(a, x._objects.values()) self.assertRaises(ValueError, c_int.from_buffer, a, -1) diff --git a/lib-python/2.7/ctypes/test/test_funcptr.py b/lib-python/2.7/ctypes/test/test_funcptr.py --- a/lib-python/2.7/ctypes/test/test_funcptr.py +++ b/lib-python/2.7/ctypes/test/test_funcptr.py @@ -75,7 +75,7 @@ ## "lpfnWndProc", WNDPROC_2(wndproc)) # instead: - self.assertTrue(WNDPROC is WNDPROC_2) + self.assertIs(WNDPROC, WNDPROC_2) # 'wndclass.lpfnWndProc' leaks 94 references. Why? self.assertEqual(wndclass.lpfnWndProc(1, 2, 3, 4), 10) diff --git a/lib-python/2.7/ctypes/test/test_functions.py b/lib-python/2.7/ctypes/test/test_functions.py --- a/lib-python/2.7/ctypes/test/test_functions.py +++ b/lib-python/2.7/ctypes/test/test_functions.py @@ -6,6 +6,7 @@ """ from ctypes import * +from ctypes.test import need_symbol import sys, unittest from ctypes.test import xfail from test.test_support import impl_detail @@ -65,22 +66,16 @@ pass + @need_symbol('c_wchar') def test_wchar_parm(self): - try: - c_wchar - except NameError: - return f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double] result = f(1, u"x", 3, 4, 5.0, 6.0) self.assertEqual(result, 139) self.assertEqual(type(result), int) + @need_symbol('c_wchar') def test_wchar_result(self): - try: - c_wchar - except NameError: - return f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double] f.restype = c_wchar @@ -158,11 +153,8 @@ self.assertEqual(result, -21) self.assertEqual(type(result), float) + @need_symbol('c_longlong') def test_longlongresult(self): - try: - c_longlong - except NameError: - return f = dll._testfunc_q_bhilfd f.restype = c_longlong f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double] @@ -299,6 +291,7 @@ result = f(-10, cb) self.assertEqual(result, -18) + @need_symbol('c_longlong') def test_longlong_callbacks(self): f = dll._testfunc_callback_q_qf @@ -309,7 +302,7 @@ f.argtypes = [c_longlong, MyCallback] def callback(value): - self.assertTrue(isinstance(value, (int, long))) + self.assertIsInstance(value, (int, long)) return value & 0x7FFFFFFF cb = MyCallback(callback) @@ -351,16 +344,16 @@ s2h = dll.ret_2h_func(inp) self.assertEqual((s2h.x, s2h.y), (99*2, 88*3)) - if sys.platform == "win32": - def test_struct_return_2H_stdcall(self): - class S2H(Structure): - _fields_ = [("x", c_short), - ("y", c_short)] + @unittest.skipUnless(sys.platform == "win32", 'Windows-specific test') + def test_struct_return_2H_stdcall(self): + class S2H(Structure): + _fields_ = [("x", c_short), + ("y", c_short)] - windll.s_ret_2h_func.restype = S2H - windll.s_ret_2h_func.argtypes = [S2H] - s2h = windll.s_ret_2h_func(S2H(99, 88)) - self.assertEqual((s2h.x, s2h.y), (99*2, 88*3)) + windll.s_ret_2h_func.restype = S2H + windll.s_ret_2h_func.argtypes = [S2H] + s2h = windll.s_ret_2h_func(S2H(99, 88)) + self.assertEqual((s2h.x, s2h.y), (99*2, 88*3)) def test_struct_return_8H(self): class S8I(Structure): @@ -379,23 +372,24 @@ self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h), (9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9)) - if sys.platform == "win32": - def test_struct_return_8H_stdcall(self): - class S8I(Structure): - _fields_ = [("a", c_int), - ("b", c_int), - ("c", c_int), - ("d", c_int), - ("e", c_int), - ("f", c_int), - ("g", c_int), - ("h", c_int)] - windll.s_ret_8i_func.restype = S8I - windll.s_ret_8i_func.argtypes = [S8I] - inp = S8I(9, 8, 7, 6, 5, 4, 3, 2) - s8i = windll.s_ret_8i_func(inp) - self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h), - (9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9)) + @unittest.skipUnless(sys.platform == "win32", 'Windows-specific test') + def test_struct_return_8H_stdcall(self): + class S8I(Structure): + _fields_ = [("a", c_int), + ("b", c_int), + ("c", c_int), + ("d", c_int), + ("e", c_int), + ("f", c_int), + ("g", c_int), + ("h", c_int)] + windll.s_ret_8i_func.restype = S8I + windll.s_ret_8i_func.argtypes = [S8I] + inp = S8I(9, 8, 7, 6, 5, 4, 3, 2) + s8i = windll.s_ret_8i_func(inp) + self.assertEqual( + (s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h), + (9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9)) @xfail def test_sf1651235(self): diff --git a/lib-python/2.7/ctypes/test/test_integers.py b/lib-python/2.7/ctypes/test/test_integers.py deleted file mode 100644 --- a/lib-python/2.7/ctypes/test/test_integers.py +++ /dev/null @@ -1,5 +0,0 @@ -# superseded by test_numbers.py -import unittest - -if __name__ == '__main__': - unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_keeprefs.py b/lib-python/2.7/ctypes/test/test_keeprefs.py --- a/lib-python/2.7/ctypes/test/test_keeprefs.py +++ b/lib-python/2.7/ctypes/test/test_keeprefs.py @@ -94,7 +94,8 @@ self.assertEqual(x._objects, {'1': i}) class DeletePointerTestCase(unittest.TestCase): - def X_test(self): + @unittest.skip('test disabled') + def test_X(self): class X(Structure): _fields_ = [("p", POINTER(c_char_p))] x = X() diff --git a/lib-python/2.7/ctypes/test/test_loading.py b/lib-python/2.7/ctypes/test/test_loading.py --- a/lib-python/2.7/ctypes/test/test_loading.py +++ b/lib-python/2.7/ctypes/test/test_loading.py @@ -21,18 +21,21 @@ unknowndll = "xxrandomnamexx" - if libc_name is not None: - def test_load(self): - CDLL(libc_name) - CDLL(os.path.basename(libc_name)) - self.assertRaises(OSError, CDLL, self.unknowndll) + @unittest.skipUnless(libc_name is not None, 'could not find libc') + def test_load(self): + CDLL(libc_name) + CDLL(os.path.basename(libc_name)) + self.assertRaises(OSError, CDLL, self.unknowndll) - if libc_name is not None and os.path.basename(libc_name) == "libc.so.6": - def test_load_version(self): - cdll.LoadLibrary("libc.so.6") - # linux uses version, libc 9 should not exist - self.assertRaises(OSError, cdll.LoadLibrary, "libc.so.9") - self.assertRaises(OSError, cdll.LoadLibrary, self.unknowndll) + @unittest.skipUnless(libc_name is not None, 'could not find libc') + @unittest.skipUnless(libc_name is not None and + os.path.basename(libc_name) == "libc.so.6", + 'wrong libc path for test') + def test_load_version(self): + cdll.LoadLibrary("libc.so.6") + # linux uses version, libc 9 should not exist + self.assertRaises(OSError, cdll.LoadLibrary, "libc.so.9") + self.assertRaises(OSError, cdll.LoadLibrary, self.unknowndll) def test_find(self): for name in ("c", "m"): @@ -41,68 +44,73 @@ cdll.LoadLibrary(lib) CDLL(lib) - if os.name in ("nt", "ce"): - def test_load_library(self): - self.assertFalse(libc_name is None) - if is_resource_enabled("printing"): - print find_library("kernel32") - print find_library("user32") + @unittest.skipUnless(os.name in ("nt", "ce"), + 'test specific to Windows (NT/CE)') + def test_load_library(self): + self.assertIsNotNone(libc_name) + if is_resource_enabled("printing"): + print find_library("kernel32") + print find_library("user32") - if os.name == "nt": - windll.kernel32.GetModuleHandleW - windll["kernel32"].GetModuleHandleW - windll.LoadLibrary("kernel32").GetModuleHandleW - WinDLL("kernel32").GetModuleHandleW - elif os.name == "ce": - windll.coredll.GetModuleHandleW - windll["coredll"].GetModuleHandleW - windll.LoadLibrary("coredll").GetModuleHandleW - WinDLL("coredll").GetModuleHandleW + if os.name == "nt": + windll.kernel32.GetModuleHandleW + windll["kernel32"].GetModuleHandleW + windll.LoadLibrary("kernel32").GetModuleHandleW + WinDLL("kernel32").GetModuleHandleW + elif os.name == "ce": + windll.coredll.GetModuleHandleW + windll["coredll"].GetModuleHandleW + windll.LoadLibrary("coredll").GetModuleHandleW + WinDLL("coredll").GetModuleHandleW - def test_load_ordinal_functions(self): - import _ctypes_test - dll = WinDLL(_ctypes_test.__file__) - # We load the same function both via ordinal and name - func_ord = dll[2] - func_name = dll.GetString - # addressof gets the address where the function pointer is stored - a_ord = addressof(func_ord) - a_name = addressof(func_name) - f_ord_addr = c_void_p.from_address(a_ord).value - f_name_addr = c_void_p.from_address(a_name).value - self.assertEqual(hex(f_ord_addr), hex(f_name_addr)) + @unittest.skipUnless(os.name in ("nt", "ce"), + 'test specific to Windows (NT/CE)') + def test_load_ordinal_functions(self): + import _ctypes_test + dll = WinDLL(_ctypes_test.__file__) + # We load the same function both via ordinal and name + func_ord = dll[2] + func_name = dll.GetString + # addressof gets the address where the function pointer is stored + a_ord = addressof(func_ord) + a_name = addressof(func_name) + f_ord_addr = c_void_p.from_address(a_ord).value + f_name_addr = c_void_p.from_address(a_name).value + self.assertEqual(hex(f_ord_addr), hex(f_name_addr)) - self.assertRaises(AttributeError, dll.__getitem__, 1234) + self.assertRaises(AttributeError, dll.__getitem__, 1234) - if os.name == "nt": - @xfail - def test_1703286_A(self): - from _ctypes import LoadLibrary, FreeLibrary - # On winXP 64-bit, advapi32 loads at an address that does - # NOT fit into a 32-bit integer. FreeLibrary must be able - # to accept this address. + @xfail + @unittest.skipUnless(os.name == "nt", 'Windows-specific test') + def test_1703286_A(self): + from _ctypes import LoadLibrary, FreeLibrary + # On winXP 64-bit, advapi32 loads at an address that does + # NOT fit into a 32-bit integer. FreeLibrary must be able + # to accept this address. - # These are tests for http://www.python.org/sf/1703286 - handle = LoadLibrary("advapi32") - FreeLibrary(handle) + # These are tests for http://www.python.org/sf/1703286 + handle = LoadLibrary("advapi32") + FreeLibrary(handle) - @xfail - def test_1703286_B(self): - # Since on winXP 64-bit advapi32 loads like described - # above, the (arbitrarily selected) CloseEventLog function - # also has a high address. 'call_function' should accept - # addresses so large. - from _ctypes import call_function - advapi32 = windll.advapi32 - # Calling CloseEventLog with a NULL argument should fail, - # but the call should not segfault or so. - self.assertEqual(0, advapi32.CloseEventLog(None)) - windll.kernel32.GetProcAddress.argtypes = c_void_p, c_char_p - windll.kernel32.GetProcAddress.restype = c_void_p - proc = windll.kernel32.GetProcAddress(advapi32._handle, "CloseEventLog") - self.assertTrue(proc) - # This is the real test: call the function via 'call_function' - self.assertEqual(0, call_function(proc, (None,))) + @xfail + @unittest.skipUnless(os.name == "nt", 'Windows-specific test') + def test_1703286_B(self): + # Since on winXP 64-bit advapi32 loads like described + # above, the (arbitrarily selected) CloseEventLog function + # also has a high address. 'call_function' should accept + # addresses so large. + from _ctypes import call_function + advapi32 = windll.advapi32 + # Calling CloseEventLog with a NULL argument should fail, + # but the call should not segfault or so. + self.assertEqual(0, advapi32.CloseEventLog(None)) + windll.kernel32.GetProcAddress.argtypes = c_void_p, c_char_p + windll.kernel32.GetProcAddress.restype = c_void_p + proc = windll.kernel32.GetProcAddress(advapi32._handle, + "CloseEventLog") + self.assertTrue(proc) + # This is the real test: call the function via 'call_function' + self.assertEqual(0, call_function(proc, (None,))) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_macholib.py b/lib-python/2.7/ctypes/test/test_macholib.py --- a/lib-python/2.7/ctypes/test/test_macholib.py +++ b/lib-python/2.7/ctypes/test/test_macholib.py @@ -45,17 +45,21 @@ raise ValueError("%s not found" % (name,)) class MachOTest(unittest.TestCase): - if sys.platform == "darwin": - def test_find(self): + @unittest.skipUnless(sys.platform == "darwin", 'OSX-specific test') + def test_find(self): - self.assertEqual(find_lib('pthread'), - '/usr/lib/libSystem.B.dylib') + self.assertEqual(find_lib('pthread'), + '/usr/lib/libSystem.B.dylib') - result = find_lib('z') - self.assertTrue(result.endswith('.dylib')) + result = find_lib('z') + # Issue #21093: dyld default search path includes $HOME/lib and + # /usr/local/lib before /usr/lib, which caused test failures if + # a local copy of libz exists in one of them. Now ignore the head + # of the path. + self.assertRegexpMatches(result, r".*/lib/libz\..*.*\.dylib") - self.assertEqual(find_lib('IOKit'), - '/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit') + self.assertEqual(find_lib('IOKit'), + '/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit') if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_memfunctions.py b/lib-python/2.7/ctypes/test/test_memfunctions.py --- a/lib-python/2.7/ctypes/test/test_memfunctions.py +++ b/lib-python/2.7/ctypes/test/test_memfunctions.py @@ -1,17 +1,19 @@ import sys import unittest from ctypes import * +from ctypes.test import need_symbol class MemFunctionsTest(unittest.TestCase): -## def test_overflow(self): -## # string_at and wstring_at must use the Python calling -## # convention (which acquires the GIL and checks the Python -## # error flag). Provoke an error and catch it; see also issue -## # #3554: -## self.assertRaises((OverflowError, MemoryError, SystemError), -## lambda: wstring_at(u"foo", sys.maxint - 1)) -## self.assertRaises((OverflowError, MemoryError, SystemError), -## lambda: string_at("foo", sys.maxint - 1)) + @unittest.skip('test disabled') + def test_overflow(self): + # string_at and wstring_at must use the Python calling + # convention (which acquires the GIL and checks the Python + # error flag). Provoke an error and catch it; see also issue + # #3554: + self.assertRaises((OverflowError, MemoryError, SystemError), + lambda: wstring_at(u"foo", sys.maxint - 1)) + self.assertRaises((OverflowError, MemoryError, SystemError), + lambda: string_at("foo", sys.maxint - 1)) def test_memmove(self): # large buffers apparently increase the chance that the memory @@ -60,21 +62,17 @@ self.assertEqual(string_at("foo bar", 8), "foo bar\0") self.assertEqual(string_at("foo bar", 3), "foo") - try: - create_unicode_buffer - except NameError: - pass - else: - def test_wstring_at(self): - p = create_unicode_buffer("Hello, World") - a = create_unicode_buffer(1000000) - result = memmove(a, p, len(p) * sizeof(c_wchar)) - self.assertEqual(a.value, "Hello, World") + @need_symbol('create_unicode_buffer') + def test_wstring_at(self): + p = create_unicode_buffer("Hello, World") + a = create_unicode_buffer(1000000) + result = memmove(a, p, len(p) * sizeof(c_wchar)) + self.assertEqual(a.value, "Hello, World") - self.assertEqual(wstring_at(a), "Hello, World") - self.assertEqual(wstring_at(a, 5), "Hello") - self.assertEqual(wstring_at(a, 16), "Hello, World\0\0\0\0") - self.assertEqual(wstring_at(a, 0), "") + self.assertEqual(wstring_at(a), "Hello, World") + self.assertEqual(wstring_at(a, 5), "Hello") + self.assertEqual(wstring_at(a, 16), "Hello, World\0\0\0\0") + self.assertEqual(wstring_at(a, 0), "") if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -83,12 +83,13 @@ self.assertRaises(TypeError, t, "") self.assertRaises(TypeError, t, None) -## def test_valid_ranges(self): -## # invalid values of the correct type -## # raise ValueError (not OverflowError) -## for t, (l, h) in zip(unsigned_types, unsigned_ranges): -## self.assertRaises(ValueError, t, l-1) -## self.assertRaises(ValueError, t, h+1) + @unittest.skip('test disabled') + def test_valid_ranges(self): + # invalid values of the correct type + # raise ValueError (not OverflowError) + for t, (l, h) in zip(unsigned_types, unsigned_ranges): + self.assertRaises(ValueError, t, l-1) + self.assertRaises(ValueError, t, h+1) @xfail def test_from_param(self): @@ -185,10 +186,10 @@ a = array(t._type_, [3.14]) v = t.from_address(a.buffer_info()[0]) self.assertEqual(v.value, a[0]) - self.assertTrue(type(v) is t) + self.assertIs(type(v), t) a[0] = 2.3456e17 self.assertEqual(v.value, a[0]) - self.assertTrue(type(v) is t) + self.assertIs(type(v), t) def test_char_from_address(self): from ctypes import c_char @@ -197,22 +198,23 @@ a = array('c', 'x') v = c_char.from_address(a.buffer_info()[0]) self.assertEqual(v.value, a[0]) - self.assertTrue(type(v) is c_char) + self.assertIs(type(v), c_char) a[0] = '?' self.assertEqual(v.value, a[0]) # array does not support c_bool / 't' - # def test_bool_from_address(self): - # from ctypes import c_bool - # from array import array - # a = array(c_bool._type_, [True]) - # v = t.from_address(a.buffer_info()[0]) - # self.assertEqual(v.value, a[0]) - # self.assertEqual(type(v) is t) - # a[0] = False - # self.assertEqual(v.value, a[0]) - # self.assertEqual(type(v) is t) + @unittest.skip('test disabled') + def test_bool_from_address(self): + from ctypes import c_bool + from array import array + a = array(c_bool._type_, [True]) + v = t.from_address(a.buffer_info()[0]) + self.assertEqual(v.value, a[0]) + self.assertEqual(type(v) is t) + a[0] = False + self.assertEqual(v.value, a[0]) + self.assertEqual(type(v) is t) def test_init(self): # c_int() can be initialized from Python's int, and c_int. @@ -230,8 +232,9 @@ if (hasattr(t, "__ctype_le__")): self.assertRaises(OverflowError, t.__ctype_le__, big_int) -## def test_perf(self): -## check_perf() + @unittest.skip('test disabled') + def test_perf(self): + check_perf() from ctypes import _SimpleCData class c_int_S(_SimpleCData): diff --git a/lib-python/2.7/ctypes/test/test_objects.py b/lib-python/2.7/ctypes/test/test_objects.py --- a/lib-python/2.7/ctypes/test/test_objects.py +++ b/lib-python/2.7/ctypes/test/test_objects.py @@ -59,12 +59,9 @@ import ctypes.test.test_objects class TestCase(unittest.TestCase): - if sys.hexversion > 0x02040000: - # Python 2.3 has no ELLIPSIS flag, so we don't test with this - # version: - def test(self): - doctest.testmod(ctypes.test.test_objects) + def test(self): + failures, tests = doctest.testmod(ctypes.test.test_objects) + self.assertFalse(failures, 'doctests failed, see output above') if __name__ == '__main__': - if sys.hexversion > 0x02040000: - doctest.testmod(ctypes.test.test_objects) + doctest.testmod(ctypes.test.test_objects) diff --git a/lib-python/2.7/ctypes/test/test_parameters.py b/lib-python/2.7/ctypes/test/test_parameters.py --- a/lib-python/2.7/ctypes/test/test_parameters.py +++ b/lib-python/2.7/ctypes/test/test_parameters.py @@ -1,4 +1,5 @@ import unittest, sys +from ctypes.test import need_symbol from ctypes.test import xfail @@ -38,10 +39,9 @@ self.assertEqual(CVOIDP.from_param("abc"), "abcabc") self.assertEqual(CCHARP.from_param("abc"), "abcabcabcabc") - try: - from ctypes import c_wchar_p - except ImportError: - return + @need_symbol('c_wchar_p') + def test_subclasses_c_wchar_p(self): + from ctypes import c_wchar_p class CWCHARP(c_wchar_p): def from_param(cls, value): @@ -58,7 +58,7 @@ # c_char_p.from_param on a Python String packs the string # into a cparam object s = "123" - self.assertTrue(c_char_p.from_param(s)._obj is s) + self.assertIs(c_char_p.from_param(s)._obj, s) # new in 0.9.1: convert (encode) unicode to ascii self.assertEqual(c_char_p.from_param(u"123")._obj, "123") @@ -69,15 +69,11 @@ # calling c_char_p.from_param with a c_char_p instance # returns the argument itself: a = c_char_p("123") - self.assertTrue(c_char_p.from_param(a) is a) + self.assertIs(c_char_p.from_param(a), a) + @need_symbol('c_wchar_p') def test_cw_strings(self): - from ctypes import byref - try: - from ctypes import c_wchar_p - except ImportError: -## print "(No c_wchar_p)" - return + from ctypes import byref, c_wchar_p s = u"123" if sys.platform == "win32": self.assertTrue(c_wchar_p.from_param(s)._obj is s) @@ -150,9 +146,6 @@ self.assertRaises(TypeError, LPINT.from_param, c_long*3) self.assertRaises(TypeError, LPINT.from_param, c_uint*3) -## def test_performance(self): -## check_perf() - def test_noctypes_argtype(self): import _ctypes_test from ctypes import CDLL, c_void_p, ArgumentError diff --git a/lib-python/2.7/ctypes/test/test_pep3118.py b/lib-python/2.7/ctypes/test/test_pep3118.py --- a/lib-python/2.7/ctypes/test/test_pep3118.py +++ b/lib-python/2.7/ctypes/test/test_pep3118.py @@ -95,6 +95,10 @@ class aUnion(Union): _fields_ = [("a", c_int)] +class StructWithArrays(Structure): + _fields_ = [("x", c_long * 3 * 2), ("y", Point * 4)] + + class Incomplete(Structure): pass @@ -144,10 +148,10 @@ ## arrays and pointers - (c_double * 4, "(4) Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r72944:7d5bcaa33e27 Date: 2014-08-20 17:30 -0700 http://bitbucket.org/pypy/pypy/changeset/7d5bcaa33e27/ Log: Added two missing files to the lib-python conftest, also delete some deadcode diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -265,6 +265,7 @@ RegrTest('test_imageop.py'), RegrTest('test_imaplib.py'), RegrTest('test_imgfile.py'), + RegrTest('test_imghdr.py'), RegrTest('test_imp.py', core=True, usemodules='thread'), RegrTest('test_import.py', core=True), RegrTest('test_importhooks.py', core=True), @@ -397,6 +398,7 @@ RegrTest('test_socketserver.py', usemodules='thread'), RegrTest('test_softspace.py', core=True), RegrTest('test_sort.py', core=True), + RegrTest('test_spwd.py'), RegrTest('test_sqlite.py', usemodules="thread _rawffi zlib"), RegrTest('test_ssl.py', usemodules='_ssl _socket select'), RegrTest('test_startfile.py'), @@ -543,8 +545,6 @@ # invoking in a separate process: py.py TESTFILE # import os -import time -import getpass class ReallyRunFileExternal(py.test.collect.Item): class ExternalFailure(Exception): @@ -663,17 +663,11 @@ timedout = test_stderr.rfind("KeyboardInterrupt") != -1 if test_stderr.rfind(26*"=" + "skipped" + 26*"=") != -1: skipped = True - outcome = 'OK' if not exit_status: # match "FAIL" but not e.g. "FAILURE", which is in the output of a # test in test_zipimport_support.py if re.search(r'\bFAIL\b', test_stdout) or re.search('[^:]ERROR', test_stderr): - outcome = 'FAIL' exit_status = 2 - elif timedout: - outcome = "T/O" - else: - outcome = "ERR" return skipped, exit_status, test_stdout, test_stderr From noreply at buildbot.pypy.org Thu Aug 21 03:19:33 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 21 Aug 2014 03:19:33 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: Initial implementation of operator._compare_digest Message-ID: <20140821011933.6CC911C1486@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r72945:6c4fc35d1793 Date: 2014-08-20 18:19 -0700 http://bitbucket.org/pypy/pypy/changeset/6c4fc35d1793/ Log: Initial implementation of operator._compare_digest diff --git a/pypy/module/operator/__init__.py b/pypy/module/operator/__init__.py --- a/pypy/module/operator/__init__.py +++ b/pypy/module/operator/__init__.py @@ -38,7 +38,9 @@ 'ilshift', 'imod', 'imul', 'ior', 'ipow', 'irepeat', 'irshift', 'isub', 'itruediv', 'ixor', '_length_hint'] - interpleveldefs = {} + interpleveldefs = { + '_compare_digest': 'interp_operator.compare_digest', + } for name in interp_names: interpleveldefs[name] = 'interp_operator.%s' % name diff --git a/pypy/module/operator/interp_operator.py b/pypy/module/operator/interp_operator.py --- a/pypy/module/operator/interp_operator.py +++ b/pypy/module/operator/interp_operator.py @@ -1,6 +1,9 @@ -from pypy.interpreter.error import OperationError +from rpython.rlib.objectmodel import specialize + +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec + def index(space, w_a): return space.index(w_a) @@ -70,7 +73,7 @@ def invert(space, w_obj,): 'invert(a) -- Same as ~a.' - return space.invert(w_obj) + return space.invert(w_obj) def isCallable(space, w_obj): 'isCallable(a) -- Same as callable(a).' @@ -96,7 +99,7 @@ def lshift(space, w_a, w_b): 'lshift(a, b) -- Same as a << b.' - return space.lshift(w_a, w_b) + return space.lshift(w_a, w_b) def lt(space, w_a, w_b): 'lt(a, b) -- Same as a> b.' - return space.rshift(w_a, w_b) + return space.rshift(w_a, w_b) # sequenceIncludes @@ -150,7 +153,7 @@ def sub(space, w_a, w_b): 'sub(a, b) -- Same as a - b.' - return space.sub(w_a, w_b) + return space.sub(w_a, w_b) def truediv(space, w_a, w_b): 'truediv(a, b) -- Same as a / b when __future__.division is in effect.' @@ -184,7 +187,7 @@ def ilshift(space, w_a, w_b): 'ilshift(a, b) -- Same as a <<= b.' - return space.inplace_lshift(w_a, w_b) + return space.inplace_lshift(w_a, w_b) def imod(space, w_a, w_b): 'imod(a, b) -- Same as a %= b.' @@ -204,11 +207,11 @@ def irshift(space, w_a, w_b): 'irshift(a, b) -- Same as a >>= b.' - return space.inplace_rshift(w_a, w_b) + return space.inplace_rshift(w_a, w_b) def isub(space, w_a, w_b): 'isub(a, b) -- Same as a -= b.' - return space.inplace_sub(w_a, w_b) + return space.inplace_sub(w_a, w_b) def itruediv(space, w_a, w_b): 'itruediv(a, b) -- Same as a /= b when __future__.division is in effect.' @@ -246,3 +249,33 @@ @unwrap_spec(default=int) def _length_hint(space, w_iterable, default): return space.wrap(space.length_hint(w_iterable, default)) + +def compare_digest(space, w_a, w_b): + if ( + space.isinstance_w(w_a, space.w_unicode) and + space.isinstance_w(w_b, space.w_unicode) + ): + return space.wrap(tscmp(space.unicode_w(w_a), space.unicode_w(w_b))) + if ( + space.isinstance_w(w_a, space.w_unicode) or + space.isinstance_w(w_b, space.w_unicode) + ): + raise oefmt( + space.w_TypeError, + "unsupported operand types(s) or combination of types: '%N' and '%N'", + w_a, + w_b, + ) + else: + return space.wrap(tscmp(space.bufferstr_w(w_a), space.bufferstr_w(w_b))) + + + at specialize.argtype(0, 1) +def tscmp(a, b): + len_a = len(a) + len_b = len(b) + length = min(len(a), len(b)) + res = len_a ^ len_b + for i in xrange(length): + res |= ord(a[i]) ^ ord(b[i]) + return res == 0 diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py --- a/pypy/module/operator/test/test_operator.py +++ b/pypy/module/operator/test/test_operator.py @@ -15,7 +15,7 @@ assert a.get3("foobar") == "b" assert a.getx(*(a,)) == 5 assert a.get3(obj="foobar") == "b" - + def test_getter_multiple_gest(self): import operator @@ -197,3 +197,110 @@ assert operator.__index__(42) == 42 exc = raises(TypeError, operator.index, "abc") assert str(exc.value) == "'str' object cannot be interpreted as an index" + + def test_compare_digest(self): + import operator + + # Testing input type exception handling + a, b = 100, 200 + raises(TypeError, operator._compare_digest, a, b) + a, b = 100, b"foobar" + raises(TypeError, operator._compare_digest, a, b) + a, b = b"foobar", 200 + raises(TypeError, operator._compare_digest, a, b) + a, b = u"foobar", b"foobar" + raises(TypeError, operator._compare_digest, a, b) + a, b = b"foobar", u"foobar" + raises(TypeError, operator._compare_digest, a, b) + + # Testing bytes of different lengths + a, b = b"foobar", b"foo" + assert not operator._compare_digest(a, b) + a, b = b"\xde\xad\xbe\xef", b"\xde\xad" + assert not operator._compare_digest(a, b) + + # Testing bytes of same lengths, different values + a, b = b"foobar", b"foobaz" + assert not operator._compare_digest(a, b) + a, b = b"\xde\xad\xbe\xef", b"\xab\xad\x1d\xea" + assert not operator._compare_digest(a, b) + + # Testing bytes of same lengths, same values + a, b = b"foobar", b"foobar" + assert operator._compare_digest(a, b) + a, b = b"\xde\xad\xbe\xef", b"\xde\xad\xbe\xef" + assert operator._compare_digest(a, b) + + # Testing bytearrays of same lengths, same values + a, b = bytearray(b"foobar"), bytearray(b"foobar") + assert operator._compare_digest(a, b) + + # Testing bytearrays of diffeent lengths + a, b = bytearray(b"foobar"), bytearray(b"foo") + assert not operator._compare_digest(a, b) + + # Testing bytearrays of same lengths, different values + a, b = bytearray(b"foobar"), bytearray(b"foobaz") + assert not operator._compare_digest(a, b) + + # Testing byte and bytearray of same lengths, same values + a, b = bytearray(b"foobar"), b"foobar" + assert operator._compare_digest(a, b) + assert operator._compare_digest(b, a) + + # Testing byte bytearray of diffeent lengths + a, b = bytearray(b"foobar"), b"foo" + assert not operator._compare_digest(a, b) + assert not operator._compare_digest(b, a) + + # Testing byte and bytearray of same lengths, different values + a, b = bytearray(b"foobar"), b"foobaz" + assert not operator._compare_digest(a, b) + assert not operator._compare_digest(b, a) + + # Testing str of same lengths + a, b = "foobar", "foobar" + assert operator._compare_digest(a, b) + + # Testing str of diffeent lengths + a, b = "foo", "foobar" + assert not operator._compare_digest(a, b) + + # Testing bytes of same lengths, different values + a, b = "foobar", "foobaz" + assert not operator._compare_digest(a, b) + + # Testing error cases + a, b = u"foobar", b"foobar" + raises(TypeError, operator._compare_digest, a, b) + a, b = b"foobar", u"foobar" + raises(TypeError, operator._compare_digest, a, b) + a, b = b"foobar", 1 + raises(TypeError, operator._compare_digest, a, b) + a, b = 100, 200 + raises(TypeError, operator._compare_digest, a, b) + a, b = "fooä", "fooä" + assert operator._compare_digest(a, b) + + # subclasses are supported by ignore __eq__ + class mystr(str): + def __eq__(self, other): + return False + + a, b = mystr("foobar"), mystr("foobar") + assert operator._compare_digest(a, b) + a, b = mystr("foobar"), "foobar" + assert operator._compare_digest(a, b) + a, b = mystr("foobar"), mystr("foobaz") + assert not operator._compare_digest(a, b) + + class mybytes(bytes): + def __eq__(self, other): + return False + + a, b = mybytes(b"foobar"), mybytes(b"foobar") + assert operator._compare_digest(a, b) + a, b = mybytes(b"foobar"), b"foobar" + assert operator._compare_digest(a, b) + a, b = mybytes(b"foobar"), mybytes(b"foobaz") + assert not operator._compare_digest(a, b) From noreply at buildbot.pypy.org Thu Aug 21 06:01:22 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 21 Aug 2014 06:01:22 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: bump the version to 2.7.8 Message-ID: <20140821040122.B54A21C14FF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r72946:9885932c8170 Date: 2014-08-20 21:00 -0700 http://bitbucket.org/pypy/pypy/changeset/9885932c8170/ Log: bump the version to 2.7.8 diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -21,12 +21,12 @@ /* Version parsed out into numeric values */ #define PY_MAJOR_VERSION 2 #define PY_MINOR_VERSION 7 -#define PY_MICRO_VERSION 6 +#define PY_MICRO_VERSION 8 #define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL #define PY_RELEASE_SERIAL 0 /* Version as a string */ -#define PY_VERSION "2.7.6" +#define PY_VERSION "2.7.8" /* PyPy version as a string */ #define PYPY_VERSION "2.4.0-alpha0" diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -6,7 +6,7 @@ from pypy.interpreter import gateway #XXX # the release serial 42 is not in range(16) -CPYTHON_VERSION = (2, 7, 6, "final", 42) +CPYTHON_VERSION = (2, 7, 8, "final", 42) #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h From noreply at buildbot.pypy.org Thu Aug 21 10:53:51 2014 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 21 Aug 2014 10:53:51 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: two typos + regen pdf Message-ID: <20140821085351.C323D1C1482@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r5390:d7879ef523ce Date: 2014-08-21 10:53 +0200 http://bitbucket.org/pypy/extradoc/changeset/d7879ef523ce/ Log: two typos + regen pdf diff --git a/talk/uct2014/talk.pdf b/talk/uct2014/talk.pdf index 2bce1f4d33135886db6d9bcd9a0256a4002b2442..8a7fa878b0d0c94a8e5fa68af66284f198d8b0a1 GIT binary patch [cut] diff --git a/talk/uct2014/talk.rst b/talk/uct2014/talk.rst --- a/talk/uct2014/talk.rst +++ b/talk/uct2014/talk.rst @@ -44,7 +44,7 @@ What's RPython? --------------- -* implementation language for PyPy (and other projects, topaz, hippyvm, ...) +* implementation language for PyPy (and other projects: topaz, hippyvm, ...) * a subset of Python that can be statically compiled @@ -122,7 +122,7 @@ * don't write virtual machines by hand -* don't write JITs in hand + * don't write JITs by hand * use tools (PyPy/truffle) From noreply at buildbot.pypy.org Thu Aug 21 12:54:50 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 21 Aug 2014 12:54:50 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk rstrategies: Work in progress. Message-ID: <20140821105450.C36111D22E9@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: rstrategies Changeset: r1035:4fd727366707 Date: 2014-08-03 13:29 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/4fd727366707/ Log: Work in progress. diff --git a/rstrategies.py b/rstrategies.py new file mode 100644 --- /dev/null +++ b/rstrategies.py @@ -0,0 +1,313 @@ + +import weakref +from rpython.rlib import jit + +class StrategyFactory(object): + _immutable_fields_ = ["strategies[*]"] + + def __init__(self, strategy_root_class, transitions): + self.strategies = [] + self.root_class = strategy_root_class + + for strategy_class, generalized in transitions.items(): + self.strategies.append(strategy_class) + strategy_class._strategy_instance = self.instantiate_empty(strategy_class) + + # Patch root class: Add default handler for visitor + def copy_from_OTHER(self, other): + self.copy_from(other) + funcname = "copy_from_" + strategy_class.__name__ + copy_from_OTHER.func_name = funcname + setattr(self.root_class, funcname, copy_from_OTHER) + + # Patch strategy class: Add polymorphic visitor function + def initiate_copy_into(self, other): + getattr(other, funcname)(self) + strategy_class.initiate_copy_into = initiate_copy_into + + self.create_transition(strategy_class, generalized) + + # Instantiate new_strategy_type with size, replace old_strategy with it, + # and return the new instance + def instantiate_and_switch(self, old_strategy, size, new_strategy_type): + raise NotImplementedError("Abstract method") + + # Return a functional but empty instance of strategy_type + def instantiate_empty(self, strategy_type): + raise NotImplementedError("Abstract method") + + def switch_strategy(self, old_strategy, new_strategy_type): + new_instance = self.instantiate_and_switch(old_strategy, old_strategy.size(), new_strategy_type) + old_strategy.initiate_copy_into(new_instance) + return new_instance + + @jit.unroll_safe + def strategy_type_for(self, objects): + specialized_strategies = len(self.strategies) + can_handle = [True] * specialized_strategies + for obj in objects: + if specialized_strategies <= 1: + break + for i, strategy in enumerate(self.strategies): + if can_handle[i] and not strategy._strategy_instance.check_can_handle(obj): + can_handle[i] = False + specialized_strategies -= 1 + for i, strategy_type in enumerate(self.strategies): + if can_handle[i]: + return strategy_type + + def cannot_handle_value(self, old_strategy, index0, value): + strategy_type = old_strategy.generalized_strategy_for(value) + new_instance = self.switch_strategy(old_strategy, strategy_type) + new_instance.store(index0, value) + + def _freeze_(self): + # Instance will be frozen at compile time, making accesses constant. + return True + + def create_transition(self, strategy_class, generalized): + # Patch strategy class: Add generalized_strategy_for + def generalized_strategy_for(self, value): + for strategy in generalized: + if strategy._strategy_instance.check_can_handle(value): + return strategy + raise Exception("Could not find generalized strategy for %s coming from %s" % (value, self)) + strategy_class.generalized_strategy_for = generalized_strategy_for + +class AbstractStrategy(object): + # == Required: + # strategy_factory(self) - Access to StorageFactory + # __init__(...) - Constructor should invoke the provided init_strategy(self, size) method + + def init_strategy(self, initial_size): + pass + + def store(self, index0, value): + raise NotImplementedError("Abstract method") + + def fetch(self, index0): + raise NotImplementedError("Abstract method") + + def size(self): + raise NotImplementedError("Abstract method") + + def check_can_handle(self, value): + raise NotImplementedError("Abstract method") + + def cannot_handle_value(self, index0, value): + self.strategy_factory().cannot_handle_value(self, index0, value) + + def initiate_copy_into(self, other): + other.copy_from(self) + + def copy_from(self, other): + assert self.size() == other.size() + for i in range(self.size()): + self.copy_field_from(i, other) + + def copy_field_from(self, n0, other): + self.store(n0, other.fetch(n0)) + +# ============== Special Strategies with no storage array ============== + +class EmptyStrategy(AbstractStrategy): + # == Required: + # See AbstractStrategy + + def fetch(self, index0): + raise IndexError + def store(self, index0, value): + self.cannot_handle_value(index0, value) + def size(self): + return 0 + def check_can_handle(self, value): + return False + +class SingleValueStrategy(AbstractStrategy): + _immutable_fields_ = ["_size", "val"] + # == Required: + # See AbstractStrategy + # check_index_*(...) - use mixin SafeIndexingMixin or UnsafeIndexingMixin + # value(self) - the single value contained in this strategy + + def init_strategy(self, initial_size): + self._size = initial_size + self.val = self.value() + def fetch(self, index0): + self.check_index_fetch(index0) + return self.val + def store(self, index0, value): + self.check_index_store(index0) + if self.val is value: + return + self.cannot_handle_value(index0, value) + def size(self): + return self._size + def check_can_handle(self, value): + return value is self.val + +# ============== Basic strategies with storage ============== + +class StrategyWithStorage(AbstractStrategy): + _immutable_fields_ = ["storage"] + # == Required: + # See AbstractStrategy + # check_index_*(...) - use mixin SafeIndexingMixin, UnsafeIndexingMixin or VariableSizeMixin + # default_value(self) - The value to be initially contained in this strategy + + def init_strategy(self, initial_size): + self.init_StrategyWithStorage(initial_size) + + def init_StrategyWithStorage(self, initial_size): + default = self._unwrap(self.default_value()) + self.storage = [default] * initial_size + + def store(self, index0, wrapped_value): + self.check_index_store(index0) + if self.check_can_handle(wrapped_value): + unwrapped = self._unwrap(wrapped_value) + self.storage[index0] = unwrapped + else: + self.cannot_handle_value(index0, wrapped_value) + + def fetch(self, index0): + self.check_index_fetch(index0) + unwrapped = self.storage[index0] + return self._wrap(unwrapped) + + def _wrap(self, value): + raise NotImplementedError("Abstract method") + + def _unwrap(self, value): + raise NotImplementedError("Abstract method") + + def size(self): + return len(self.storage) + +class GenericStrategy(StrategyWithStorage): + # == Required: + # See StrategyWithStorage + + def _wrap(self, value): + return value + def _unwrap(self, value): + return value + def check_can_handle(self, wrapped_value): + return True + +class WeakGenericStrategy(StrategyWithStorage): + # == Required: + # See StrategyWithStorage + + def _wrap(self, value): + return value() or self.default_value() + def _unwrap(self, value): + assert value is not None + return weakref.ref(value) + def check_can_handle(self, wrapped_value): + return True + +# ============== Mixins for StrategyWithStorage ============== + +class SafeIndexingMixin(object): + def check_index_store(self, index0): + self.check_index(index0) + def check_index_fetch(self, index0): + self.check_index(index0) + def check_index(self, index0): + if index0 < 0 or index0 >= self.size(): + raise IndexError + +class UnsafeIndexingMixin(object): + def check_index_store(self, index0): + pass + def check_index_fetch(self, index0): + pass + +class VariableSizeMixin(object): + # This can be used with StrategyWithStorage + # to add functionality for resizing the storage. + # Can be combined with either *IndexingMixin or *AutoresizeMixin + + @jit.unroll_safe + def grow(self, by): + if by <= 0: + raise ValueError + for _ in range(by): + self.storage.append(self.default_value()) + + @jit.unroll_safe + def shrink(self, by): + if by <= 0: + raise ValueError + if by > self.size(): + raise ValueError + for _ in range(by): + self.storage.pop() + +class SafeAutoresizeMixin(object): + def check_index_fetch(self, index0): + if index0 < 0 or index0 > self.size(): + raise IndexError + def check_index_store(self, index0): + size = self.size() + if index0 < 0: + raise IndexError + if index0 >= size: + self.grow(index0 - size + 1) + +class UnsafeAutoresizeMixin(object): + def check_index_fetch(self, index0): + pass + def check_index_store(self, index0): + size = self.size() + if index0 >= size: + self.grow(index0 - size) + +# ============== Specialized Storage Strategies ============== + +class SpecializedStrategy(StrategyWithStorage): + # == Required: + # See StrategyWithStorage + # wrap(self, value) - Return a boxed object for the primitive value + # unwrap(self, value) - Return the unboxed primitive value of value + + def _unwrap(self, value): + return self.unwrap(value) + def _wrap(self, value): + return self.wrap(value) + +class SingleTypeStrategy(SpecializedStrategy): + # == Required Functions: + # See SpecializedStrategy + # contained_type - The wrapped type that can be stored in this strategy + + def check_can_handle(self, value): + return isinstance(value, self.contained_type) + +class TaggingStrategy(SingleTypeStrategy): + """This strategy uses a special tag value to represent a single additional object.""" + # == Required: + # See SingleTypeStrategy + # wrapped_tagged_value(self) - The tagged object + # unwrapped_tagged_value(self) - The unwrapped tag value representing the tagged object + + def init_strategy(self, initial_size): + self.tag = self.unwrapped_tagged_value() + self.w_tag = self.wrapped_tagged_value() + self.init_StrategyWithStorage(initial_size) + + def check_can_handle(self, value): + return value is self.w_tag or \ + (isinstance(value, self.contained_type) and \ + self.unwrap(value) != self.tag) + + def _unwrap(self, value): + if value is self.w_tag: + return self.tag + return self.unwrap(value) + + def _wrap(self, value): + if value == self.tag: + return self.w_tag + return self.wrap(value) diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -578,10 +578,8 @@ self.initialize_storage(space, size, weak) def initialize_storage(self, space, size, weak=False): - from spyvm.storage import empty_storage - storage = empty_storage(space, self, size, weak) - self.store_shadow(storage) - self.log_storage("Initialized") + storage = space.strategy_factory.empty_storage(self, size, weak) + self.store_shadow(storage, operation="Initialized") def fillin(self, space, g_self): W_AbstractObjectWithClassReference.fillin(self, space, g_self) @@ -589,12 +587,10 @@ for g_obj in g_self.pointers: g_obj.fillin(space) pointers = g_self.get_pointers() - # TODO -- Also handle weak objects loaded from images. - from spyvm.storage import find_storage_for_objects - storage = find_storage_for_objects(space, pointers, g_self.isweak())(space, self, len(pointers)) - self.store_shadow(storage) + storage_type = space.strategy_factory.strategy_type_for(pointers, g_self.isweak()) + storage = storage_type(space, self, len(pointers)) + self.store_shadow(storage, operation="Filledin", log_classname=False) self.store_all(space, pointers) - self.log_storage("Filledin", log_classname=False) def is_weak(self): from storage import WeakListStorageShadow @@ -620,12 +616,6 @@ self.store_shadow(new_shadow) old_shadow.copy_into(new_shadow) new_shadow.attach_shadow() - self.log_storage("Switched", old_shadow, w_element=w_element) - - def store_with_new_storage(self, new_storage, n0, w_val): - space = self.space() - self.switch_shadow(new_storage(space, self, self.size()), w_element=w_val) - self.store(space, n0, w_val) def space(self): return self.assert_shadow().space @@ -685,8 +675,10 @@ def instsize(self): return self.class_shadow(self.space()).instsize() - def store_shadow(self, shadow): + def store_shadow(self, shadow, operation="", w_element=None, log_classname=True): + old_shadow = self.shadow self.shadow = shadow + self.log_storage(operation, old_shadow, log_classname, w_element) def _get_shadow(self): return self.shadow @@ -696,8 +688,7 @@ old_shadow = self._get_shadow() shadow = old_shadow if not isinstance(old_shadow, TheClass): - shadow = TheClass(space, self, old_shadow.size()) - self.switch_shadow(shadow) + shadow = space.strategy_factory.switch_strategy(old_shadow, TheClass) return shadow def get_shadow(self, space): diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -1,6 +1,6 @@ import os -from spyvm import constants, model, model_display, wrapper, version, display +from spyvm import constants, model, model_display, wrapper, version, display, storage from spyvm.error import UnwrappingError, WrappingError, PrimitiveFailedError from rpython.rlib import jit, rpath from rpython.rlib.objectmodel import instantiate, specialize, import_from_mixin @@ -66,6 +66,7 @@ w_nil.w_class = None self.add_bootstrap_object("w_nil", w_nil) + self.strategy_factory = storage.StrategyFactory(self) self.make_bootstrap_classes() self.make_bootstrap_objects() diff --git a/spyvm/storage.py b/spyvm/storage.py --- a/spyvm/storage.py +++ b/spyvm/storage.py @@ -1,9 +1,10 @@ -import sys, weakref +import weakref from spyvm import model, version, constants from spyvm.version import elidable_for_version from rpython.rlib import objectmodel, jit from rpython.rlib.objectmodel import import_from_mixin +import rstrategies as rstrat class AbstractShadow(object): """A shadow is an optional extra bit of information that @@ -60,211 +61,94 @@ # ========== Storage classes implementing storage strategies ========== class AbstractStorageShadow(AbstractShadow): - _attrs_ = [] repr_classname = "AbstractStorageShadow" + import_from_mixin(rstrat.SafeIndexingMixin) - def store(self, n0, w_val): - if self.can_contain(w_val): - return self.do_store(n0, w_val) - new_storage = self.generalized_strategy_for(w_val) - return self._w_self.store_with_new_storage(new_storage, n0, w_val) - def can_contain(self, w_val): - return self.static_can_contain(self.space, w_val) - @staticmethod - def static_can_contain(space, w_val): - raise NotImplementedError() - def do_store(self, n0, w_val): - raise NotImplementedError() - def generalized_strategy_for(self, w_val): - raise NotImplementedError() + def __init__(self, space, w_self, size): + AbstractShadow.__init__(self, space, w_self, size) + self.init_strategy(size) - def copy_from_AllNil(self, all_nil_storage): - pass # Already initialized - def copy_from(self, other_shadow): - assert self.size() == other_shadow.size() - for i in range(self.size()): - w_val = other_shadow.fetch(i) - if not w_val.is_nil(self.space): # nil fields already initialized - self.store(i, w_val) + def strategy_factory(self): + return self.space.strategy_factory + + def copy_from_AllNilStrategy(self, all_nil_storage): + pass # Fields already initialized to nil class AllNilStorageShadow(AbstractStorageShadow): repr_classname = "AllNilStorageShadow" - _attrs_ = ['_size'] - _immutable_fields_ = ['_size'] - def __init__(self, space, w_self, size): - AbstractStorageShadow.__init__(self, space, w_self, size) - self._size = size - def fetch(self, n0): - if n0 >= self._size: - raise IndexError - return self.space.w_nil - def copy_into(self, other_shadow): - other_shadow.copy_from_AllNil(self) - def do_store(self, n0, w_value): - pass - def size(self): - return self._size - def generalized_strategy_for(self, w_val): - return find_storage_for_objects(self.space, [w_val]) - @staticmethod - def static_can_contain(space, w_val): - return isinstance(w_val, model.W_Object) and w_val.is_nil(space) - -class AbstractValueOrNilStorageMixin(object): - # Class must provide: wrap, unwrap, nil_value, is_nil_value, wrapper_class - _attrs_ = ['storage'] - _immutable_fields_ = ['storage'] - - def __init__(self, space, w_self, size): - AbstractStorageShadow.__init__(self, space, w_self, size) - self.storage = [self.nil_value] * size - - def size(self): - return len(self.storage) - - def generalized_strategy_for(self, w_val): - return ListStorageShadow - - def fetch(self, n0): - val = self.storage[n0] - if self.is_nil_value(val): - return self.space.w_nil - else: - return self.wrap(self.space, val) - - def do_store(self, n0, w_val): - if w_val.is_nil(self.space): - self.storage[n0] = self.nil_value - else: - self.storage[n0] = self.unwrap(self.space, w_val) - -# This is to avoid code duplication - at objectmodel.specialize.arg(0) -def _value_or_nil_can_handle(cls, space, w_val): - return isinstance(w_val, model.W_Object) and w_val.is_nil(space) or \ - (isinstance(w_val, cls.wrapper_class) \ - and not cls.is_nil_value(cls.unwrap(space, w_val))) + import_from_mixin(rstrat.SingleValueStrategy) + def value(self): return self.space.w_nil class SmallIntegerOrNilStorageShadow(AbstractStorageShadow): repr_classname = "SmallIntegerOrNilStorageShadow" - nil_value = constants.MAXINT - wrapper_class = model.W_SmallInteger - import_from_mixin(AbstractValueOrNilStorageMixin) - - @staticmethod - def static_can_contain(space, w_val): - return _value_or_nil_can_handle(SmallIntegerOrNilStorageShadow, space, w_val) - @staticmethod - def is_nil_value(val): - return val == SmallIntegerOrNilStorageShadow.nil_value - @staticmethod - def wrap(space, val): - return space.wrap_int(val) - @staticmethod - def unwrap(space, w_val): - return space.unwrap_int(w_val) - def copy_into(self, other_shadow): - other_shadow.copy_from_SmallIntegerOrNil(self) + import_from_mixin(rstrat.TaggingStrategy) + contained_type = model.W_SmallInteger + def wrap(self, val): return self.space.wrap_int(val) + def unwrap(self, w_val): return self.space.unwrap_int(w_val) + def default_value(self): return self.space.w_nil + def wrapped_tagged_value(self): return self.space.w_nil + def unwrapped_tagged_value(self): return constants.MAXINT class FloatOrNilStorageShadow(AbstractStorageShadow): repr_classname = "FloatOrNilStorageShadow" - nil_value = sys.float_info.max - wrapper_class = model.W_Float - import_from_mixin(AbstractValueOrNilStorageMixin) - - @staticmethod - def static_can_contain(space, w_val): - return _value_or_nil_can_handle(FloatOrNilStorageShadow, space, w_val) - @staticmethod - def is_nil_value(val): - return val == FloatOrNilStorageShadow.nil_value - @staticmethod - def wrap(space, val): - return space.wrap_float(val) - @staticmethod - def unwrap(space, w_val): - return space.unwrap_float(w_val) - def copy_into(self, other_shadow): - other_shadow.copy_from_FloatOrNil(self) - -def empty_storage(space, w_self, size, weak=False): - if weak: - return WeakListStorageShadow(space, w_self, size) - if space.no_specialized_storage.is_set(): - return ListStorageShadow(space, w_self, size) - return AllNilStorageShadow(space, w_self, size) - - at jit.unroll_safe -def find_storage_for_objects(space, vars, weak=False): - if weak: - return WeakListStorageShadow - if space.no_specialized_storage.is_set(): - return ListStorageShadow - specialized_strategies = 3 - all_nil_can_handle = True - small_int_can_handle = True - float_can_handle = True - for w_obj in vars: - if all_nil_can_handle and not AllNilStorageShadow.static_can_contain(space, w_obj): - all_nil_can_handle = False - specialized_strategies = specialized_strategies - 1 - if small_int_can_handle and not SmallIntegerOrNilStorageShadow.static_can_contain(space, w_obj): - small_int_can_handle = False - specialized_strategies = specialized_strategies - 1 - if float_can_handle and not FloatOrNilStorageShadow.static_can_contain(space, w_obj): - float_can_handle = False - specialized_strategies = specialized_strategies - 1 - - if specialized_strategies <= 0: - return ListStorageShadow - - if all_nil_can_handle: - return AllNilStorageShadow - if small_int_can_handle: - return SmallIntegerOrNilStorageShadow - if float_can_handle: - return FloatOrNilStorageShadow - - # If this happens, please look for a bug in the code above. - assert False, "No strategy could be found for list..." - -class ListStorageMixin(object): - def __init__(self, space, w_self, size): - AbstractStorageShadow.__init__(self, space, w_self, size) - self.initialize_storage(size) - def size(self): - return len(self.storage) + import_from_mixin(rstrat.TaggingStrategy) + contained_type = model.W_Float + def wrap(self, val): return self.space.wrap_float(val) + def unwrap(self, w_val): return self.space.unwrap_float(w_val) + def default_value(self): return self.space.w_nil + def wrapped_tagged_value(self): return self.space.w_nil + def unwrapped_tagged_value(self): import sys; return sys.float_info.max class ListStorageShadow(AbstractStorageShadow): - _attrs_ = ['storage'] - _immutable_fields_ = ['storage'] repr_classname = "ListStorageShadow" - import_from_mixin(ListStorageMixin) - - def initialize_storage(self, size): - self.storage = [self.space.w_nil] * size - def fetch(self, n0): - return self.storage[n0] - def store(self, n0, w_value): - self.storage[n0] = w_value + import_from_mixin(rstrat.GenericStrategy) + def default_value(self): return self.space.w_nil class WeakListStorageShadow(AbstractStorageShadow): - _attrs_ = ['storage'] - _immutable_fields_ = ['storage'] repr_classname = "WeakListStorageShadow" - import_from_mixin(ListStorageMixin) + import_from_mixin(rstrat.WeakGenericStrategy) + def default_value(self): return self.space.w_nil - def initialize_storage(self, size): - self.storage = [weakref.ref(self.space.w_nil)] * size - def fetch(self, n0): - weakobj = self.storage[n0] - return weakobj() or self.space.w_nil - def store(self, n0, w_value): - assert w_value is not None - self.storage[n0] = weakref.ref(w_value) - +class StrategyFactory(rstrat.StrategyFactory): + _immutable_fields_ = ["space", "no_specialized_storage?"] + def __init__(self, space): + from spyvm import objspace + self.space = space + self.no_specialized_storage = objspace.ConstantFlag() + rstrat.StrategyFactory.__init__(self, AbstractStorageShadow, { + AllNilStorageShadow: [SmallIntegerOrNilStorageShadow, + FloatOrNilStorageShadow, + ListStorageShadow], + SmallIntegerOrNilStorageShadow: [ListStorageShadow], + FloatOrNilStorageShadow: [ListStorageShadow], + }) + + def strategy_type_for(self, objects, weak=False): + if weak: + WeakListStorageShadow + if self.no_specialized_storage.is_set(): + return ListStorageShadow + return rstrat.StrategyFactory.strategy_type_for(self, objects) + + def empty_storage(self, w_self, size, weak=False): + if weak: + return WeakListStorageShadow(self.space, w_self, size) + if self.no_specialized_storage.is_set(): + return ListStorageShadow(self.space, w_self, size) + return AllNilStorageShadow(self.space, w_self, size) + + def instantiate_and_switch(self, old_strategy, size, strategy_class): + w_self = old_strategy.w_self() + instance = strategy_class(self.space, w_self, size) + w_self.store_shadow(instance) + instance.attach_shadow() + return instance + + def instantiate_empty(self, strategy_type): + return strategy_type(self.space, None, 0) + # ========== Other storage classes, non-strategies ========== - + class AbstractRedirectingShadow(AbstractShadow): _attrs_ = ['_w_self_size'] repr_classname = "AbstractRedirectingShadow" diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -129,7 +129,7 @@ elif arg in ["--hacks"]: space.run_spy_hacks.activate() elif arg in ["-S"]: - space.no_specialized_storage.activate() + space.strategy_factory.no_specialized_storage.activate() elif arg in ["-u"]: from spyvm.plugins.vmdebugging import stop_ui_process stop_ui_process() From noreply at buildbot.pypy.org Thu Aug 21 12:54:52 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 21 Aug 2014 12:54:52 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk rstrategies: Implementing rstrategies Message-ID: <20140821105452.0B20B1D22E9@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: rstrategies Changeset: r1036:d7e93bfe0dd6 Date: 2014-08-20 16:12 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/d7e93bfe0dd6/ Log: Implementing rstrategies diff --git a/rstrategies.py b/rstrategies.py --- a/rstrategies.py +++ b/rstrategies.py @@ -2,15 +2,23 @@ import weakref from rpython.rlib import jit +def collect_subclasses(cls): + "NOT_RPYTHON" + subclasses = [] + for subcls in cls.__subclasses__(): + subclasses.append(subcls) + subclasses.extend(collect_subclasses(subcls)) + return subclasses + class StrategyFactory(object): - _immutable_fields_ = ["strategies[*]"] + _immutable_fields_ = ["xx[*]"] - def __init__(self, strategy_root_class, transitions): - self.strategies = [] - self.root_class = strategy_root_class + def __init__(self, root_class, all_strategy_classes=None): + if all_strategy_classes is None: + all_strategy_classes = collect_subclasses(root_class) + self.strategies = all_strategy_classes - for strategy_class, generalized in transitions.items(): - self.strategies.append(strategy_class) + for strategy_class in self.strategies: strategy_class._strategy_instance = self.instantiate_empty(strategy_class) # Patch root class: Add default handler for visitor @@ -18,14 +26,17 @@ self.copy_from(other) funcname = "copy_from_" + strategy_class.__name__ copy_from_OTHER.func_name = funcname - setattr(self.root_class, funcname, copy_from_OTHER) + setattr(root_class, funcname, copy_from_OTHER) # Patch strategy class: Add polymorphic visitor function def initiate_copy_into(self, other): getattr(other, funcname)(self) strategy_class.initiate_copy_into = initiate_copy_into - - self.create_transition(strategy_class, generalized) + + def setup_strategy_transitions(self, transitions): + "NOT_RPYTHON" + for strategy_class, generalized in transitions.items(): + generalize(generalized)(strategy_class) # Instantiate new_strategy_type with size, replace old_strategy with it, # and return the new instance @@ -39,6 +50,7 @@ def switch_strategy(self, old_strategy, new_strategy_type): new_instance = self.instantiate_and_switch(old_strategy, old_strategy.size(), new_strategy_type) old_strategy.initiate_copy_into(new_instance) + new_instance.strategy_switched() return new_instance @jit.unroll_safe @@ -55,6 +67,7 @@ for i, strategy_type in enumerate(self.strategies): if can_handle[i]: return strategy_type + raise Exception("Could not find strategy to handle: %s" % objects) def cannot_handle_value(self, old_strategy, index0, value): strategy_type = old_strategy.generalized_strategy_for(value) @@ -64,24 +77,44 @@ def _freeze_(self): # Instance will be frozen at compile time, making accesses constant. return True - - def create_transition(self, strategy_class, generalized): + +def generalize(generalized): + def decorator(strategy_class): # Patch strategy class: Add generalized_strategy_for + # TODO - optimize this method + @jit.unroll_safe def generalized_strategy_for(self, value): for strategy in generalized: if strategy._strategy_instance.check_can_handle(value): return strategy raise Exception("Could not find generalized strategy for %s coming from %s" % (value, self)) strategy_class.generalized_strategy_for = generalized_strategy_for + return strategy_class + return decorator + +class AbstractCollection(object): + # == Required: + # store(self, n0, e) + + def strategy_switched(self): pass + def init_strategy(self, initial_size): pass + + def initiate_copy_into(self, other): + other.copy_from(self) + + def copy_from(self, other): + assert self.size() == other.size() + for i in range(self.size()): + self.copy_field_from(i, other) + + def copy_field_from(self, n0, other): + self.store(n0, other.fetch(n0)) class AbstractStrategy(object): # == Required: # strategy_factory(self) - Access to StorageFactory # __init__(...) - Constructor should invoke the provided init_strategy(self, size) method - def init_strategy(self, initial_size): - pass - def store(self, index0, value): raise NotImplementedError("Abstract method") @@ -97,17 +130,6 @@ def cannot_handle_value(self, index0, value): self.strategy_factory().cannot_handle_value(self, index0, value) - def initiate_copy_into(self, other): - other.copy_from(self) - - def copy_from(self, other): - assert self.size() == other.size() - for i in range(self.size()): - self.copy_field_from(i, other) - - def copy_field_from(self, n0, other): - self.store(n0, other.fetch(n0)) - # ============== Special Strategies with no storage array ============== class EmptyStrategy(AbstractStrategy): diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -611,12 +611,6 @@ assert shadow, "The shadow has not been initialized yet!" return shadow - def switch_shadow(self, new_shadow, w_element=None): - old_shadow = self.assert_shadow() - self.store_shadow(new_shadow) - old_shadow.copy_into(new_shadow) - new_shadow.attach_shadow() - def space(self): return self.assert_shadow().space @@ -675,7 +669,7 @@ def instsize(self): return self.class_shadow(self.space()).instsize() - def store_shadow(self, shadow, operation="", w_element=None, log_classname=True): + def store_shadow(self, shadow, operation="Switched", w_element=None, log_classname=True): old_shadow = self.shadow self.shadow = shadow self.log_storage(operation, old_shadow, log_classname, w_element) diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -47,15 +47,13 @@ class BitBltShadow(AbstractCachingShadow): + repr_classname = "BitBltShadow" WordSize = 32 MaskTable = [r_uint(0)] for i in xrange(WordSize): MaskTable.append(r_uint((2 ** (i + 1)) - 1)) AllOnes = r_uint(0xFFFFFFFF) - def attach_shadow(self): - pass - def intOrIfNil(self, w_int, i): return intOrIfNil(self.space, w_int, i) @@ -724,6 +722,7 @@ class FormShadow(AbstractCachingShadow): + repr_classname = "FormShadow" _attrs_ = ["w_bits", "width", "height", "depth", "offsetX", "offsetY", "msb", "pixPerWord", "pitch", "invalid"] @@ -734,7 +733,7 @@ def intOrIfNil(self, w_int, i): return intOrIfNil(self.space, w_int, i) - def attach_shadow(self): + def strategy_switched(self): self.invalid = True if self.size() < 5: return diff --git a/spyvm/storage.py b/spyvm/storage.py --- a/spyvm/storage.py +++ b/spyvm/storage.py @@ -1,5 +1,4 @@ -import weakref from spyvm import model, version, constants from spyvm.version import elidable_for_version from rpython.rlib import objectmodel, jit @@ -14,7 +13,8 @@ _immutable_fields_ = ['space'] provides_getname = False repr_classname = "AbstractShadow" - + import_from_mixin(rstrat.AbstractCollection) + def __init__(self, space, w_self, size): self.space = space assert w_self is None or isinstance(w_self, model.W_PointersObject) @@ -29,35 +29,6 @@ else: return "<%s>" % self.repr_classname - def fetch(self, n0): - raise NotImplementedError("Abstract class") - def store(self, n0, w_value): - raise NotImplementedError("Abstract class") - def size(self): - raise NotImplementedError("Abstract class") - - # This will invoke an appropriate copy_from_* method. - # Overwriting this allows optimized transitions between certain storage types. - def copy_into(self, other_shadow): - other_shadow.copy_from(self) - - def attach_shadow(self): pass - - def copy_field_from(self, n0, other_shadow): - self.store(n0, other_shadow.fetch(n0)) - - def copy_from(self, other_shadow): - assert self.size() == other_shadow.size() - for i in range(self.size()): - self.copy_field_from(i, other_shadow) - - def copy_from_AllNil(self, all_nil_storage): - self.copy_from(all_nil_storage) - def copy_from_SmallIntegerOrNil(self, small_int_storage): - self.copy_from(small_int_storage) - def copy_from_FloatOrNil(self, float_storage): - self.copy_from(float_storage) - # ========== Storage classes implementing storage strategies ========== class AbstractStorageShadow(AbstractShadow): @@ -74,11 +45,17 @@ def copy_from_AllNilStrategy(self, all_nil_storage): pass # Fields already initialized to nil -class AllNilStorageShadow(AbstractStorageShadow): - repr_classname = "AllNilStorageShadow" - import_from_mixin(rstrat.SingleValueStrategy) - def value(self): return self.space.w_nil +class ListStorageShadow(AbstractStorageShadow): + repr_classname = "ListStorageShadow" + import_from_mixin(rstrat.GenericStrategy) + def default_value(self): return self.space.w_nil +class WeakListStorageShadow(AbstractStorageShadow): + repr_classname = "WeakListStorageShadow" + import_from_mixin(rstrat.WeakGenericStrategy) + def default_value(self): return self.space.w_nil + + at rstrat.generalize([ListStorageShadow]) class SmallIntegerOrNilStorageShadow(AbstractStorageShadow): repr_classname = "SmallIntegerOrNilStorageShadow" import_from_mixin(rstrat.TaggingStrategy) @@ -89,6 +66,7 @@ def wrapped_tagged_value(self): return self.space.w_nil def unwrapped_tagged_value(self): return constants.MAXINT + at rstrat.generalize([ListStorageShadow]) class FloatOrNilStorageShadow(AbstractStorageShadow): repr_classname = "FloatOrNilStorageShadow" import_from_mixin(rstrat.TaggingStrategy) @@ -99,15 +77,14 @@ def wrapped_tagged_value(self): return self.space.w_nil def unwrapped_tagged_value(self): import sys; return sys.float_info.max -class ListStorageShadow(AbstractStorageShadow): - repr_classname = "ListStorageShadow" - import_from_mixin(rstrat.GenericStrategy) - def default_value(self): return self.space.w_nil - -class WeakListStorageShadow(AbstractStorageShadow): - repr_classname = "WeakListStorageShadow" - import_from_mixin(rstrat.WeakGenericStrategy) - def default_value(self): return self.space.w_nil + at rstrat.generalize([ + SmallIntegerOrNilStorageShadow, + FloatOrNilStorageShadow, + ListStorageShadow]) +class AllNilStorageShadow(AbstractStorageShadow): + repr_classname = "AllNilStorageShadow" + import_from_mixin(rstrat.SingleValueStrategy) + def value(self): return self.space.w_nil class StrategyFactory(rstrat.StrategyFactory): _immutable_fields_ = ["space", "no_specialized_storage?"] @@ -115,13 +92,7 @@ from spyvm import objspace self.space = space self.no_specialized_storage = objspace.ConstantFlag() - rstrat.StrategyFactory.__init__(self, AbstractStorageShadow, { - AllNilStorageShadow: [SmallIntegerOrNilStorageShadow, - FloatOrNilStorageShadow, - ListStorageShadow], - SmallIntegerOrNilStorageShadow: [ListStorageShadow], - FloatOrNilStorageShadow: [ListStorageShadow], - }) + rstrat.StrategyFactory.__init__(self, AbstractStorageShadow) def strategy_type_for(self, objects, weak=False): if weak: @@ -141,7 +112,6 @@ w_self = old_strategy.w_self() instance = strategy_class(self.space, w_self, size) w_self.store_shadow(instance) - instance.attach_shadow() return instance def instantiate_empty(self, strategy_type): diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -792,7 +792,7 @@ try: monkeypatch.setattr(w_frame.shadow, "_sendSelfSelector", perform_mock) - monkeypatch.setattr(bitblt.BitBltShadow, "attach_shadow", sync_cache_mock) + monkeypatch.setattr(bitblt.BitBltShadow, "strategy_switched", sync_cache_mock) with py.test.raises(CallCopyBitsSimulation): prim_table[primitives.BITBLT_COPY_BITS](interp, w_frame.as_context_get_shadow(space), argument_count-1) finally: From noreply at buildbot.pypy.org Thu Aug 21 12:54:58 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 21 Aug 2014 12:54:58 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk rstrategies: Merged. Message-ID: <20140821105458.ECD841D22E9@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: rstrategies Changeset: r1037:ef35a189c1ff Date: 2014-08-20 16:13 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/ef35a189c1ff/ Log: Merged. diff too long, truncating to 2000 out of 364571 lines diff --git a/images/Squeak4.5-12568.changes b/images/Squeak4.5-12568.changes deleted file mode 100644 --- a/images/Squeak4.5-12568.changes +++ /dev/null @@ -1,39 +0,0 @@ -'From Squeak4.1 of 17 April 2010 [latest update: #9957] on 17 April 2010 at 5:22:05 pm'! ----STARTUP----{17 April 2010 . 5:21:54 pm} as C:\Squeak\4.0\4.1-final\Squeak4.1.image! Smalltalk appendChangesTo: 'SqueakV41.sources'.! ----QUIT----{17 April 2010 . 5:22:11 pm} Squeak4.1.image priorSource: 89! ----STARTUP----{24 May 2010 . 8:07:26 pm} as C:\Squeak\4.2\Squeak4.1.image! ----SNAPSHOT----{24 May 2010 . 8:08:14 pm} Squeak4.2.image priorSource: 229! !HashedCollection commentStamp: 'ul 4/12/2010 22:37' prior: 0! I am an abstract collection of objects that implement hash and equality in a consitent way. This means that whenever two objects are equal, their hashes have to be equal too. If two objects are equal then I can only store one of them. Hashes are expected to be integers (preferably SmallIntegers). I also expect that the objects contained by me do not change their hashes. If that happens, hash invariants have to be re-established, which can be done by #rehash. Since I'm abstract, no instances of me should exist. My subclasses should implement #scanFor:, #fixCollisionsFrom: and #noCheckNoGrowFillFrom:. Instance Variables array: (typically Array or WeakArray) tally: (non-negative) array - An array whose size is a prime number, it's non-nil elements are the elements of the collection, and whose nil elements are empty slots. There is always at least one nil. In fact I try to keep my "load" at 75% or less so that hashing will work well. tally - The number of elements in the collection. The array size is always greater than this. Implementation details: I implement a hash table which uses open addressing with linear probing as the method of collision resolution. Searching for an element or a free slot for an element is done by #scanFor: which should return the index of the slot in array corresponding to it's argument. When an element is removed #fixCollisionsFrom: should rehash all elements in array between the original index of the removed element, wrapping around after the last slot until reaching an empty slot. My maximum load factor (75%) is hardcoded in #atNewIndex:put:, so it can only be changed by overriding that method. When my load factor reaches this limit I replace my array with a larger one (see #grow) ensuring that my load factor will be less than or equal to 50%. The new array is filled by #noCheckNoGrowFillFrom: which should use #scanForEmptySlotFor: instead of #scanFor: for better performance. I do not shrink. ! !WeakKeyDictionary methodsFor: 'private' stamp: 'ul 4/12/2010 22:59'! compact "Reduce the size of array so that the load factor will be ~75%." | newCapacity | newCapacity := self class goodPrimeAtLeast: self slowSize * 4 // 3. self growTo: newCapacity! ! !Collection methodsFor: 'adding' stamp: 'ul 4/12/2010 22:33' prior: 18816249! add: newObject withOccurrences: anInteger "Add newObject anInteger times to the receiver. Do nothing if anInteger is less than one. Answer newObject." anInteger timesRepeat: [self add: newObject]. ^ newObject! ! !HashedCollection class methodsFor: 'initialize-release' stamp: 'ul 4/12/2010 23:49'! compactAll "HashedCollection compactAll" self allSubclassesDo: #compactAllInstances! ! !HashedCollection class methodsFor: 'initialize-release' stamp: 'ul 4/12/2010 23:49'! compactAllInstances "Do not use #allInstancesDo: because compact may create new instances." self allInstances do: #compact! ! !HashedCollection class methodsFor: 'sizing' stamp: 'ul 4/7/2010 00:17' prior: 55063414! goodPrimes "Answer a sorted array of prime numbers less than one billion that make good hash table sizes. Should be expanded as needed. See comments below code" ^#( 5 11 17 23 31 43 59 79 107 149 199 269 359 479 641 857 1151 1549 2069 2237 2423 2617 2797 2999 3167 3359 3539 3727 3911 4441 4787 5119 5471 5801 6143 6521 6827 7177 7517 7853 8783 9601 10243 10867 11549 12239 12919 13679 14293 15013 15731 17569 19051 20443 21767 23159 24611 25847 27397 28571 30047 31397 35771 38201 40841 43973 46633 48989 51631 54371 57349 60139 62969 70589 76091 80347 85843 90697 95791 101051 106261 111143 115777 120691 126311 140863 150523 160969 170557 181243 190717 201653 211891 221251 232591 242873 251443 282089 300869 321949 341227 362353 383681 401411 422927 443231 464951 482033 504011 562621 605779 647659 681607 723623 763307 808261 844709 886163 926623 967229 1014617 1121987 1201469 1268789 1345651 1429531 1492177 1577839 1651547 1722601 1800377 1878623 1942141 2028401 2242727 2399581 2559173 2686813 2836357 3005579 3144971 3283993 3460133 3582923 3757093 3903769 4061261 4455361 4783837 5068529 5418079 5680243 6000023 6292981 6611497 6884641 7211599 7514189 7798313 8077189 9031853 9612721 10226107 10745291 11338417 11939203 12567671 13212697 13816333 14337529 14938571 15595673 16147291 17851577 18993941 20180239 21228533 22375079 23450491 24635579 25683871 26850101 27921689 29090911 30153841 31292507 32467307 35817611 37983761 40234253 42457253 44750177 46957969 49175831 51442639 53726417 55954637 58126987 60365939 62666977 64826669 71582779 76039231 80534381 84995153 89500331 93956777 98470819 102879613 107400389 111856841 116365721 120819287 125246581 129732203 143163379 152076289 161031319 169981667 179000669 187913573 196826447 205826729 214748357 223713691 232679021 241591901 250504801 259470131 285162679 301939921 318717121 335494331 352271573 369148753 385926017 402603193 419480419 436157621 453034849 469712051 486589307 503366497 520043707 570475349 603929813 637584271 671138659 704693081 738247541 771801929 805356457 838910803 872365267 905919671 939574117 973128521 1006682977 1040137411 1073741833) "The above primes past 2069 were chosen carefully so that they do not interact badly with 1664525 (used by hashMultiply), and so that gcd(p, (256^k) +/- a) = 1, for 0 cost ifTrue: [ cost := newCost ] ]. cost ]."! ! !HashedCollection methodsFor: 'adding' stamp: 'ul 4/12/2010 22:38' prior: 53647096! add: newObject withOccurrences: anInteger "Add newObject anInteger times to the receiver. Do nothing if anInteger is less than one. Answer newObject." anInteger < 1 ifTrue: [ ^newObject ]. ^self add: newObject "I can only store an object once." ! ! !HashedCollection methodsFor: 'private' stamp: 'ul 4/12/2010 22:53'! compact "Reduce the size of array so that the load factor will be ~75%." | newCapacity | newCapacity := self class goodPrimeAtLeast: tally * 4 // 3. self growTo: newCapacity! ! !WeakSet methodsFor: 'private' stamp: 'ul 4/12/2010 22:59'! compact "Reduce the size of array so that the load factor will be ~75%." | newCapacity | newCapacity := self class goodPrimeAtLeast: self slowSize * 4 // 3. self growTo: newCapacity! ! !Symbol class methodsFor: 'class initialization' stamp: 'ul 4/13/2010 00:00' prior: 30357901! compactSymbolTable "Reduce the size of the symbol table so that it holds all existing symbols with 25% free space." | oldSize | Smalltalk garbageCollect. oldSize := SymbolTable capacity. SymbolTable compact. ^(oldSize - SymbolTable capacity) printString, ' slot(s) reclaimed'! ! KeyedIdentitySet class removeSelector: #goodPrimes! WeakIdentityKeyDictionary class removeSelector: #goodPrimes! IdentitySet class removeSelector: #goodPrimes! IdentityDictionary class removeSelector: #goodPrimes! "Collections"! !HashedCollectionTest methodsFor: 'test - class - sizing' stamp: 'ul 4/7/2010 00:18' prior: 58761579! testPrimes: primes | badPrimes | badPrimes := #(3 5 71 139 479 5861 277421). "These primes are less than the hashMultiply constant (1664525) and 1664525 \\ prime is close to 0 (mod prime). The following snippet reproduces these numbers: | hashMultiplyConstant | hashMultiplyConstant := 1 hashMultiply. (Integer primesUpTo: hashMultiplyConstant) select: [ :each | | remainder | remainder := hashMultiplyConstant \\ each. remainder <= 1 or: [ remainder + 1 = each ] ]." self assert: primes isSorted. primes do: [ :each | self assert: each isPrime. self deny: (each > 2069 and: [ badPrimes includes: each ]) ]. self assert: ( primes select: [ :p | | result | result := false. p > 2069 ifTrue: [ 1 to: 8 do: [ :k | 1 to: 32 do: [ :a | (p gcd: (256 raisedTo: k) + a) = 1 ifFalse: [ result := true ]. (p gcd: (256 raisedTo: k) - a) = 1 ifFalse: [ result := true ] ] ] ]. result ]) isEmpty.! ! HashedCollectionTest removeSelector: #testGoodPrimesForIdentityBasedHashedCollections! "CollectionsTests"! !MCMczReader methodsFor: 'as yet unclassified' stamp: 'bf 4/18/2010 18:38' prior: 22938947! extractInfoFrom: dict ^MCWorkingCopy infoFromDictionary: dict cache: self infoCache! ! !MCWorkingCopy class methodsFor: 'as yet unclassified' stamp: 'bf 4/19/2010 00:39' prior: 23215403! infoFromDictionary: aDictionary cache: cache | id | id := (aDictionary at: #id) asString. ^ cache at: id ifAbsentPut: [MCVersionInfo name: (aDictionary at: #name ifAbsent: ['']) id: (UUID fromString: id) message: (aDictionary at: #message ifAbsent: ['']) date: ([Date fromString: (aDictionary at: #date)] ifError: [nil]) time: ([Time fromString: (aDictionary at: #time)] ifError: [nil]) author: (aDictionary at: #author ifAbsent: ['']) ancestors: (self ancestorsFromArray: (aDictionary at: #ancestors ifAbsent: []) cache: cache) stepChildren: (self ancestorsFromArray: (aDictionary at: #stepChildren ifAbsent: []) cache: cache)]! ! !MCVersionInfo methodsFor: 'converting' stamp: 'bf 4/18/2010 23:25' prior: 23175569! asDictionary ^ Dictionary new at: #name put: name; at: #id put: id asString; at: #message put: message; at: #date put: date; at: #time put: time; at: #author put: author; at: #ancestors put: (self ancestors collect: [:a | a asDictionary]); yourself! ! "Monticello"! !BlockContextTest methodsFor: 'running' stamp: 'md 9/6/2005 19:56' prior: 50431957! setUp super setUp. aBlockContext := [100 at 100 corner: 200 at 200]. contextOfaBlockContext := thisContext.! ! !BehaviorTest methodsFor: 'tests' stamp: 'md 2/18/2006 16:42' prior: 17365994! testBinding self assert: Object binding value = Object. self assert: Object binding key = #Object. self assert: Object class binding value = Object class. "returns nil for Metaclasses... like Encoder>>#associationFor:" self assert: Object class binding key = nil.! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:13' prior: 53956757! testEmbeddingSourceCode | trailer newTrailer code | trailer := CompiledMethodTrailer new. code := 'foo'. trailer sourceCode: code. newTrailer := trailer testEncoding. self assert: (trailer kind == #EmbeddedSourceQCompress ). self assert: (newTrailer sourceCode = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). code := 'testEmbeddingSourceCode | trailer newTrailer code | trailer := CompiledMethodTrailer new. trailer sourceCode: code. newTrailer := trailer testEncoding. self assert: (newTrailer sourceCode = code).'. trailer sourceCode: code. self assert: (trailer kind == #EmbeddedSourceZip ). newTrailer := trailer testEncoding. self assert: (newTrailer sourceCode = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:13' prior: 53957691! testEmbeddingTempNames | trailer newTrailer code | trailer := CompiledMethodTrailer new. code := 'foo'. trailer tempNames: code. newTrailer := trailer testEncoding. self assert: (trailer kind == #TempsNamesQCompress ). self assert: (newTrailer tempNames = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). code := 'testEmbeddingSourceCode | trailer newTrailer code | trailer := CompiledMethodTrailer new. trailer sourceCode: code. newTrailer := trailer testEncoding. self assert: (newTrailer sourceCode = code).'. trailer tempNames: code. self assert: (trailer kind == #TempsNamesZip ). newTrailer := trailer testEncoding. self assert: (newTrailer tempNames = code). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:17' prior: 53958613! testEncodingNoTrailer | trailer | trailer := CompiledMethodTrailer new. "by default it should be a no-trailer" self assert: (trailer kind == #NoTrailer ). self assert: (trailer size = 1). trailer := trailer testEncoding. self assert: (trailer kind == #NoTrailer ). self assert: (trailer size = 1). "the last bytecode index must be at 0" self assert: (trailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:14' prior: 53959109! testEncodingSourcePointer | trailer | trailer := CompiledMethodTrailer new. CompiledMethod allInstancesDo: [:method | | ptr | trailer method: method. self assert: ( (ptr := method sourcePointer) == trailer sourcePointer). "the last bytecode index must be at 0" ptr ~= 0 ifTrue: [ self assert: (method endPC = trailer endPC) ]. ].! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:15' prior: 53959564! testEncodingVarLengthSourcePointer | trailer newTrailer | trailer := CompiledMethodTrailer new. trailer sourcePointer: 1. newTrailer := trailer testEncoding. self assert: (newTrailer sourcePointer = 1). trailer sourcePointer: 16r100000000000000. newTrailer := trailer testEncoding. self assert: (newTrailer sourcePointer = 16r100000000000000). "the last bytecode index must be at 0" self assert: (newTrailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:15' prior: 53960108! testSourceByIdentifierEncoding | trailer id | trailer := CompiledMethodTrailer new. id := UUID new asString. trailer sourceIdentifier: id. self assert: (trailer kind == #SourceByStringIdentifier ). trailer := trailer testEncoding. self assert: (trailer kind == #SourceByStringIdentifier ). self assert: (trailer sourceIdentifier = id). "the last bytecode index must be at 0" self assert: (trailer endPC = 0). ! ! !CompledMethodTrailerTest methodsFor: 'testing' stamp: 'Igor.Stasenko 12/13/2009 21:49' prior: 53960643! testSourceBySelectorEncoding | trailer | trailer := CompiledMethodTrailer new. trailer setSourceBySelector. self assert: (trailer kind == #SourceBySelector ). self assert: (trailer size = 1). trailer := trailer testEncoding. self assert: (trailer kind == #SourceBySelector ). self assert: (trailer size = 1). "the last bytecode index must be at 0" self assert: (trailer endPC = 0). ! ! !CategorizerTest methodsFor: 'running' stamp: 'mtf 9/10/2007 10:10' prior: 18074036! setUp categorizer := Categorizer defaultList: #(a b c d e). categorizer classifyAll: #(a b c) under: 'abc'. categorizer addCategory: 'unreal'.! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:17' prior: 18074267! testClassifyNewElementNewCategory categorizer classify: #f under: #nice. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') (''nice'' f) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:18' prior: 18074541! testClassifyNewElementOldCategory categorizer classify: #f under: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'' f) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:17' prior: 18074806! testClassifyOldElementNewCategory categorizer classify: #e under: #nice. self assert: categorizer printString = '(''as yet unclassified'' d) (''abc'' a b c) (''unreal'') (''nice'' e) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:54' prior: 18075078! testClassifyOldElementOldCategory categorizer classify: #e under: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d) (''abc'' a b c) (''unreal'' e) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:22' prior: 18075341! testDefaultCategoryIsTransient "Test that category 'as yet unclassified' disapears when all it's elements are removed'" categorizer classifyAll: #(d e) under: #abc. self assert: categorizer printString = '(''abc'' a b c d e) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/11/2007 15:15' prior: 18075669! testNullCategory "Test that category 'as yet unclassified' disapears when all it's elements are removed'" | aCategorizer | aCategorizer := Categorizer defaultList: #(). self assert: aCategorizer printString = '(''as yet unclassified'') '. self assert: aCategorizer categories = #('no messages'). aCategorizer classify: #a under: #b. self assert: aCategorizer printString = '(''b'' a) '. self assert: aCategorizer categories = #(b).! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:57' prior: 18076194! testRemoveEmptyCategory categorizer removeCategory: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:55' prior: 18076430! testRemoveExistingElement categorizer removeElement: #a. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:59' prior: 18076673! testRemoveNonEmptyCategory self should: [categorizer removeCategory: #abc] raise: Error. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:59' prior: 18076950! testRemoveNonExistingCategory categorizer removeCategory: #nice. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 12:57' prior: 18077203! testRemoveNonExistingElement categorizer removeElement: #f. self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/11/2007 14:49' prior: 18077451! testRemoveThenRename categorizer removeCategory: #unreal. categorizer renameCategory: #abc toBe: #unreal. self assert: categorizer printString = '(''as yet unclassified'' d e) (''unreal'' a b c) '! ! !CategorizerTest methodsFor: 'testing' stamp: 'mtf 9/10/2007 10:14' prior: 18077736! testUnchanged self assert: categorizer printString = '(''as yet unclassified'' d e) (''abc'' a b c) (''unreal'') '! ! "KernelTests"! !SmalltalkImage methodsFor: 'accessing' stamp: 'ul 4/18/2010 22:22'! at: key ifPresentAndInMemory: aBlock "Lookup the given key in the receiver. If it is present, answer the value of evaluating the given block with the value associated with the key. Otherwise, answer nil." ^globals at: key ifPresentAndInMemory: aBlock! ! !SmalltalkImage methodsFor: 'image' stamp: 'dtl 4/11/2010 11:45'! image "Answer the object to query about the current object memory and execution environment." ^self! ! !SmalltalkImage methodsFor: 'image' stamp: 'dtl 4/11/2010 11:47'! imageFormatVersion "Answer an integer identifying the type of image. The image version number may identify the format of the image (e.g. 32 or 64-bit word size) or specific requirements of the image (e.g. block closure support required). This invokes an optional primitive that may not be available on all virtual machines." "Smalltalk image imageFormatVersion" self notify: 'This virtual machine does not support the optional primitive #primitiveImageFormatVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:38'! interpreterSourceVersion "Answer a string corresponding to the version of the interpreter source. This represents the version level of the Smalltalk source code (interpreter and various plugins) that is translated to C by a CCodeGenerator, as distinct from the external platform source code, typically written in C and managed separately for each platform. An optional primitive is invoked that may not be available on all virtual machines." "Smalltalk vm interpreterSourceVersion" self notify: 'This virtual machine does not support the optional primitive #primitiveInterpreterSourceVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:39'! platformSourceVersion "Answer a string corresponding to the version of the external platform source code, typically written in C and managed separately for each platform. This invokes an optional primitive that may not be available on all virtual machines." "Smalltalk vm platformSourceVersion" self notify: 'This virtual machine does not support the optional primitive #primitivePlatformSourceVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'image' stamp: 'md 5/16/2006 12:34' prior: 58536670! version "Answer the version of this release." ^SystemVersion current version! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:39'! versionLabel "Answer a string corresponding to the version of virtual machine. This represents the version level of the Smalltalk source code (interpreter and various plugins) that is translated to C by a CCodeGenerator, in addition to the external platform source code, typically written in C and managed separately for each platform. This invokes an optional primitive that may not be available on all virtual machines. See also vmVersion, which answers a string identifying the image from which virtual machine sources were generated." "Smalltalk vm versionLabel" self notify: 'This virtual machine does not support the optional primitive #primitiveVMVersion' translated. ^''! ! !SmalltalkImage methodsFor: 'vm' stamp: 'dtl 4/11/2010 11:15'! vm "Answer the object to query about virtual machine." ^self! ! !SmalltalkImage methodsFor: 'image' stamp: 'dtl 1/4/2010 21:40' prior: 58537225! wordSize "Answer the size in bytes of an object pointer or word in the object memory. The value does not change for a given image, but may be modified by a SystemTracer when converting the image to another format. The value is cached in WordSize to avoid the performance overhead of repeatedly consulting the VM." "Smalltalk wordSize" ^ WordSize ifNil: [WordSize := [SmalltalkImage current vmParameterAt: 40] on: Error do: [4]]! ! "System"! !SMLoaderPlus commentStamp: 'btr 12/1/2006 15:16' prior: 0! A simple package loader that is currently the standard UI for SqueakMap (the model is an SMSqueakMap instance). It uses ToolBuilder to construct its window. You can open one with: SMLoaderPlus open Instance Variables categoriesToFilterIds: The set of categories to filter the packages list. filters: The set of filters to apply to the packages list. map: The model SqueakMap. packagesList: The list of packages from the map. selectedCategory: The current category. selectedItem: The selected package or release. window: The window, held only so we can reOpen.! !SMLoaderCategoricalPlus commentStamp: 'btr 12/4/2006 15:47' prior: 0! A variant package loader that uses a more-or-less standard Smalltalk-80 browser perspective of selecting categories in one pane and then selecting items within in the next pane. You can open one with: SMLoaderCategoricalPlus open! !SMLoader commentStamp: 'btr 11/30/2006 18:00' prior: 27913009! A simple package loader that is currently the standard UI for SqueakMap (the model is an SMSqueakMap instance). You can open one with: SMLoader open! !SMLoaderCategorical commentStamp: 'btr 12/1/2006 15:16' prior: 0! A variant package loader that uses a more-or-less standard Smalltalk-80 browser perspective of selecting categories in one pane and then selecting items within in the next pane. You can open one with: SMLoaderCategorical open! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 18:06'! initialize Smalltalk at: #ToolBuilder ifPresent: [:tb | (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu registerOpenCommand: {self openMenuString. {self. #open}}]]! ! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 17:34'! openMenuString ^ 'SqueakMap Categories'! ! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 17:34'! removeFromSystem (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString]. self removeFromSystem: true! ! !SMLoaderCategoricalPlus class methodsFor: 'menu registration' stamp: 'btr 12/1/2006 17:34'! unload (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString].! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:50'! buildFancyWith: aBuilder "Creates a variant of the window where the package pane is split between installed and uninstalled packages." | buttonBarHeight searchHeight vertDivide horizDivide | buttonBarHeight := 0.07. searchHeight := 0.07. vertDivide := 0.5. horizDivide := 0.6. builder := aBuilder. window := builder build: (builder pluggableWindowSpec new model: self; label: #label; children: (OrderedCollection new add: ((self buildButtonBarWith: builder) frame: (0 @ 0 corner: 1 @ buttonBarHeight); yourself); add: ((self buildCategoriesListWith: builder) frame: (0 @ buttonBarHeight corner: vertDivide @ horizDivide); yourself); add: ((self buildSearchPaneWith: builder) frame: (vertDivide @ buttonBarHeight corner: 1 @ (buttonBarHeight + searchHeight)); yourself); add: ((self buildNotInstalledPackagesListWith: builder) frame: (vertDivide @ (buttonBarHeight + searchHeight) corner: 1 @ (horizDivide / 2)); yourself); add: ((self buildInstalledPackagesListWith: builder) frame: (vertDivide @ (horizDivide / 2) corner: 1 @ horizDivide); yourself); add: ((self buildPackagePaneWith: builder) frame: (0 @ horizDivide corner: 1 @ 1); yourself); yourself)). window on: #mouseEnter send: #paneTransition: to: window. window on: #mouseLeave send: #paneTransition: to: window. self setUpdatablePanesFrom: #(#installedPackageList #notInstalledPackageList ). currentPackageList := #notInstalled. window extent: self initialExtent. ^ window! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 17:56'! buildInstalledPackagesListWith: aBuilder ^ aBuilder pluggableTreeSpec new model: self; roots: #installedPackageList; getSelectedPath: #selectedItemPath; setSelected: #selectedItem:; menu: #packagesMenu:; label: #itemLabel:; getChildren: #itemChildren:; hasChildren: #itemHasChildren:; autoDeselect: true; wantsDrop: true; yourself! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 17:52'! buildNotInstalledPackagesListWith: aBuilder ^ aBuilder pluggableTreeSpec new model: self; roots: #notInstalledPackageList; getSelectedPath: #selectedItemPath; setSelected: #selectedItem:; menu: #packagesMenu:; label: #itemLabel:; getChildren: #itemChildren:; hasChildren: #itemHasChildren:; autoDeselect: true; wantsDrop: true; yourself! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:55'! buildWith: aBuilder | buttonBarHeight searchHeight vertDivide horizDivide | buttonBarHeight := 0.07. searchHeight := 0.07. vertDivide := 0.5. horizDivide := 0.6. builder := aBuilder. window := builder build: (builder pluggableWindowSpec new model: self; label: #label; children: (OrderedCollection new add: ((self buildButtonBarWith: builder) frame: (0 @ 0 corner: 1 @ buttonBarHeight); yourself); add: ((self buildCategoriesListWith: builder) frame: (0 @ buttonBarHeight corner: vertDivide @ horizDivide); yourself); add: ((self buildSearchPaneWith: builder) frame: (vertDivide @ buttonBarHeight corner: 1 @ (buttonBarHeight + searchHeight))); add: ((self buildPackagesListWith: builder) frame: (vertDivide @ (buttonBarHeight + searchHeight) corner: 1 @ horizDivide)); add: ((self buildPackagePaneWith: builder) frame: (0 @ horizDivide corner: 1 @ 1)); yourself)). window on: #mouseEnter send: #paneTransition: to: window. window on: #mouseLeave send: #paneTransition: to: window. window extent: self initialExtent. ^ window! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! currentPackageList ^currentPackageList! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! currentPackageList: aSymbol currentPackageList := aSymbol. self changed: #installButtonLabel.! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/4/2006 15:55'! defaultLabel ^ 'Categorical ' , super defaultLabel! ! !SMLoaderCategoricalPlus methodsFor: 'interface' stamp: 'btr 12/4/2006 15:58'! installButtonLabel ^ self currentPackageList = #notInstalled ifTrue: ['Install the above package'] ifFalse: ['Remove the above package']! ! !SMLoaderCategoricalPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 17:52'! installedPackageList ^self packageList select: [:e | e isInstalled]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 18:02'! installedPackagesListIndex ^ self currentPackageList = #installed ifTrue: [self packagesListIndex] ifFalse: [0]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! installedPackagesListIndex: anObject packagesListIndex := anObject. self currentPackageList ~= #installed ifTrue: [self currentPackageList: #installed. self changed: #currentPackageList]. self noteChanged! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 17:34'! isOn ^false! ! !SMLoaderCategoricalPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 17:53'! notInstalledPackageList ^self packageList reject: [:e | e isInstalled]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 18:02'! notInstalledPackagesListIndex ^ self currentPackageList = #notInstalled ifTrue: [self packagesListIndex] ifFalse: [0]! ! !SMLoaderCategoricalPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 18:03'! notInstalledPackagesListIndex: anObject packagesListIndex := anObject. self currentPackageList ~= #notInstalled ifTrue: [self currentPackageList: #notInstalled. self changed: #currentPackageList]. self changed: #packagesListIndex. "update my selection" self noteChanged. self contentsChanged! ! !SMLoaderCategoricalPlus methodsFor: 'private' stamp: 'btr 12/1/2006 17:53'! noteChanged self changed: #installedPackageList. self changed: #notInstalledPackageList. super noteChanged." self changed: #packageNameList. self changed: #packagesListIndex. self changed: #categoriesForPackage. self contentsChanged."! ! !SMLoaderCategoricalPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 17:34'! packageList ^ self packages select: [:e | (e categories anySatisfy: [:cat | cat = self selectedCategory]) and: [(filters ifNil: [#()]) allSatisfy: [:currFilter | (self perform: currFilter) value: e]]]! ! !SMLoaderPlus class methodsFor: 'parts bin' stamp: 'btr 11/22/2006 15:02'! descriptionForPartsBin ^self partName: 'Package Loader' categories: #(Tools) documentation: 'SqueakMap UI' ! ! !SMLoaderPlus class methodsFor: 'class initialization' stamp: 'btr 12/1/2006 15:47'! initialize "Hook us up in the world menu." "self initialize" Smalltalk at: #ToolBuilder ifPresent: [:tb | self registerInFlapsRegistry. (Preferences windowColorFor: #SMLoader) = Color white "not set" ifTrue: [ Preferences setWindowColorFor: #SMLoader to: (Color colorFrom: self windowColorSpecification brightColor) ]. (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [| oldCmds | oldCmds := TheWorldMenu registry select: [:cmd | cmd first includesSubString: 'Package Loader']. oldCmds do: [:cmd | TheWorldMenu unregisterOpenCommand: cmd first]. TheWorldMenu registerOpenCommand: {self openMenuString. {self. #open}}]]. DefaultFilters := OrderedCollection new. DefaultCategoriesToFilterIds := OrderedCollection new! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:16'! initializedInstance ^ (ToolBuilder open: self new) extent: 400 at 400! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/22/2006 15:02'! new "Create a SqueakMap loader on the default map." ^self newOn: SMSqueakMap default! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/22/2006 15:02'! newOn: aMap "Create a SqueakMap loader on given map." ^super new on: aMap; yourself! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:16'! newStandAlone ^ ToolBuilder open: self new! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/23/2006 11:13'! open "Create and open a SqueakMap Loader." "SMLoaderPlus open" ^ (Smalltalk at: #ToolBuilder) open: self new! ! !SMLoaderPlus class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:50'! openMenuString ^ 'SqueakMap Catalog'! ! !SMLoaderPlus class methodsFor: 'instance creation' stamp: 'btr 11/23/2006 11:21'! openOn: aSqueakMap "Create and open a SqueakMap Loader on a given map." "self openOn: SqueakMap default" ^ (Smalltalk at: #ToolBuilder) open: (self newOn: aSqueakMap)! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:18'! prototypicalToolWindow ^ ToolBuilder open: self new; applyModelExtent; yourself! ! !SMLoaderPlus class methodsFor: 'new-morph participation' stamp: 'btr 11/22/2006 15:02'! registerInFlapsRegistry "Register the receiver in the system's flaps registry." self environment at: #Flaps ifPresent: [:cl | (cl respondsTo: #registerQuad:forFlapNamed:) ifTrue: [cl registerQuad: #(#SMLoader #prototypicalToolWindow 'Package Loader' 'The SqueakMap Package Loader' ) forFlapNamed: 'Tools']]! ! !SMLoaderPlus class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:50'! unload (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString]. self environment at: #Flaps ifPresent: [:cl | cl unregisterQuadsWithReceiver: self] ! ! !SMLoaderPlus class methodsFor: 'window color' stamp: 'btr 11/22/2006 15:02'! windowColorSpecification "Answer a WindowColorSpec object that declares my preference." ^WindowColorSpec classSymbol: self name wording: 'Package Loader' brightColor: Color yellow muchLighter duller pastelColor: Color yellow veryMuchLighter duller helpMessage: 'The SqueakMap Package Loader'! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! addFiltersToMenu: aMenu | filterSymbol help | self filterSpecs do: [:filterArray | filterSymbol := filterArray second. help := filterArray third. aMenu addUpdating: #showFilterString: target: self selector: #toggleFilterState: argumentList: (Array with: filterSymbol). aMenu balloonTextForLastItem: help]. aMenu addLine; addList: #(('Clear all filters' uncheckFilters 'Unchecks all filters to list all packages')) ! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! addSelectedCategoryAsFilter "Add a new filter that filters on the currently selected category. Make it enabled as default." categoriesToFilterIds add: self selectedCategory id! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 16:11'! askToLoadUpdates "Check how old the map is and ask to update it if it is older than 10 days or if there is no map on disk." | available | available := map isCheckpointAvailable. (available not or: [ (Date today subtractDate: (Date fromSeconds: (map directory directoryEntryFor: map lastCheckpointFilename) modificationTime)) > 3]) ifTrue: [ (self confirm: (available ifTrue: ['The map on disk is more than 10 days old, update it from the Internet?'] ifFalse: ['There is no map on disk, fetch it from the Internet?'])) ifTrue: [self loadUpdates]]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:43'! browseCacheDirectory "Open a FileList2 on the directory for the package or release." | item dir win | item := self selectedPackageOrRelease ifNil: [^ nil]. dir := item isPackage ifTrue: [map cache directoryForPackage: item] ifFalse: [map cache directoryForPackageRelease: item]. win := FileList2 morphicViewOnDirectory: dir. "withLabel: item name, ' cache directory'." win openInWorld! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:56'! buildButtonBarWith: aBuilder ^ aBuilder pluggablePanelSpec new model: self; layout: #horizontal; children: (self commandSpecs select: [ :spec | spec fourth includes: #all] thenCollect: [ :spec | aBuilder pluggableActionButtonSpec new model: self; label: spec first; action: spec second; help: spec third; enabled: ((spec fourth includes: #item) ifTrue: [#hasSelectedItem]); yourself]); name: #buttonBar; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/22/2006 15:02'! buildButtonNamed: labelText helpText: balloon action: action | btn | btn := PluggableButtonMorph on: self getState: nil action: action. btn color: Color transparent; hResizing: #shrinkWrap; vResizing: #spaceFill; label: labelText; setBalloonText: balloon; onColor: Color transparent offColor: Color transparent. ^ btn! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:56'! buildCategoriesListWith: aBuilder "Create the hierarchical list holding the category tree." ^ aBuilder pluggableTreeSpec new model: self; roots: #categoryList; getSelectedPath: #selectedCategoryPath; getChildren: #categoryChildren:; hasChildren: #categoryHasChildren:; setSelected: #selectedCategory:; menu: #categoriesMenu:; label: #categoryLabel:; autoDeselect: true; wantsDrop: true; name: #categoriesList; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:57'! buildPackagePaneWith: aBuilder "Create the text area to the right in the loader." ^ aBuilder pluggableTextSpec new model: self; getText: #itemDescription; name: #packagePane; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:57'! buildPackagesListWith: aBuilder "Create the hierarchical list holding the packages and releases." ^ aBuilder pluggableTreeSpec new model: self; roots: #packageList; getSelectedPath: #selectedItemPath; setSelected: #selectedItem:; menu: #packagesMenu:; label: #itemLabel:; getChildren: #itemChildren:; hasChildren: #itemHasChildren:; autoDeselect: true; wantsDrop: true; name: #packagesList; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:57'! buildSearchPaneWith: aBuilder ^ aBuilder pluggableInputFieldSpec new model: self; selection: #searchSelection; getText: #searchText; setText: #findPackage:notifying:; name: #search; yourself! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 06:54'! buildWith: aBuilder "Create the package loader window." | buttonBarHeight vertDivide horizDivide | buttonBarHeight := 0.07. vertDivide := 0.6. horizDivide := 0.3. builder := aBuilder. window := builder build: (builder pluggableWindowSpec new model: self; label: #label; children: (OrderedCollection new add: ((self buildButtonBarWith: builder) frame: (0 @ 0 corner: 1 @ buttonBarHeight)); add: ((self buildSearchPaneWith: builder) frame: (0 @ buttonBarHeight corner: horizDivide @ (buttonBarHeight * 2))); add: ((self buildPackagesListWith: builder) frame: (0 @ (buttonBarHeight * 2) corner: horizDivide @ vertDivide)); add: ((self buildCategoriesListWith: builder) frame: (0 @ vertDivide corner: horizDivide @ 1)); add: ((self buildPackagePaneWith: builder) frame: (horizDivide @ buttonBarHeight corner: 1 @ 1)); yourself); yourself). window on: #mouseEnter send: #paneTransition: to: window. window on: #mouseLeave send: #paneTransition: to: window. window extent: self initialExtent. ^ window! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:38'! cachePackageReleaseAndOfferToCopy "Cache package release, then offer to copy it somewhere. Answer the chosen file's location after copy, or the cache location if no directory was chosen." | release installer newDir newName newFile oldFile oldName | release := self selectedPackageOrRelease. release isPackageRelease ifFalse: [ self error: 'Should be a package release!!']. installer := SMInstaller forPackageRelease: release. [UIManager default informUser: 'Caching ' , release asString during: [installer cache]] on: Error do: [:ex | | msg | msg := ex messageText ifNil: [ex asString]. self informException: ex msg: ('Error occurred during download:\', msg, '\') withCRs. ^nil ]. installer isCached ifFalse: [self inform: 'Download failed, see transcript for details'. ^nil]. oldName := installer fullFileName. newDir := FileList2 modalFolderSelector: installer directory. newDir ifNil: [ ^oldName ]. newDir = installer directory ifTrue: [ ^oldName ]. newName := newDir fullNameFor: installer fileName. newFile := FileStream newFileNamed: newName. newFile ifNil: [ ^oldName ]. newFile binary. oldFile := FileStream readOnlyFileNamed: oldName. oldFile ifNil: [ ^nil ]. oldFile binary. [[ newDir copyFile: oldFile toFile: newFile ] ensure: [ oldFile close. newFile close ]] on: Error do: [ :ex | ^oldName ]. ^newName! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! categoriesMenu: aMenu "Answer the categories-list menu." self selectedCategory ifNotNil: [aMenu addList: self categorySpecificOptions; addLine]. aMenu addList: self generalOptions. self addFiltersToMenu: aMenu. ^aMenu! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:44'! categoryChildren: aCategory ^ aCategory subCategories! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:45'! categoryHasChildren: aCategory ^ aCategory hasSubCategories! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:46'! categoryLabel: aCategory ^ aCategory name! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 11/30/2006 21:01'! categoryList "Create the category list for the hierarchical list. We sort the categories by name but ensure that 'Squeak versions' is first if it exists." | list first | list := (map categories select: [:each | each parent isNil]) asArray sort: [:c1 :c2 | c1 name <= c2 name]. first := list detect: [:any | any name = 'Squeak versions'] ifNone: []. first ifNotNil: [list := list copyWithout: first. list := {first} , list]. ^ list! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! categorySpecificOptions | choices | choices := OrderedCollection new. (categoriesToFilterIds includes: self selectedCategory id) ifTrue: [ choices add: #('Remove filter' #removeSelectedCategoryAsFilter 'Remove the filter for the selected category.')] ifFalse: [ choices add: #('Add as filter' #addSelectedCategoryAsFilter 'Add the selection as a filter to hide unrelated packages.')]. categoriesToFilterIds isEmpty ifFalse: [ choices add: #('Remove all filters' #removeCategoryFilters 'Remove all category filters.')]. ^ choices! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/22/2006 15:02'! changeFilters: anObject "Update my selection." | oldItem index | oldItem := self selectedPackageOrRelease. filters := anObject. self packagesListIndex: ((index := self packageList indexOf: oldItem) ifNil: [0] ifNotNil: [index]). self noteChanged! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 18:01'! commandSpecFor: selector ^ self commandSpecs detect: [:spec | spec second = selector]! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 18:00'! commandSpecs ^ #(('Install' installPackageRelease 'Install the latest version from the server.' (item all)) ('Email' emailPackageMaintainers 'Open an editor to send an email to the owner and co-maintainers of this package.' (item all)) ('Browse cache' browseCacheDirectory 'Browse cache directory of the selection.' (item all)) ('Copy from cache' cachePackageReleaseAndOfferToCopy 'Download selected release into cache first if needed, and then offer to copy it somewhere else.' (item)) ('Force download into cache' downloadPackageRelease 'Force a download of the selected release into the cache.' (item)) ('Update' loadUpdates 'Update the package index from the servers.' (all)) ('Upgrade All' upgradeInstalledPackagesConfirm 'Upgrade all installed packages (conf8irming each).' (all)) ('Upgrade all installed packages' upgradeInstalledPackagesNoConfirm '' (item)) ('Upgrade all installed packages confirming each' upgradeInstalledPackagesConfirm '' (item)) ('Copy list' listInPasteBuffer 'Puts the list as text into the clipboard.' (all)) ('Save filters' saveFiltersAsDefault 'Saves the current filters as default.' (all)) ('Help' help 'What is this?' (all)))! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/22/2006 15:02'! defaultButtonPaneHeight "Answer the user's preferred default height for new button panes." ^ Preferences parameterAt: #defaultButtonPaneHeight ifAbsentPut: [25]! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:50'! defaultLabel ^ 'SqueakMap Package Loader'! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:38'! downloadPackageRelease "Force a download of the selected package release into the cache." | release | release := self selectedPackageOrRelease. release isPackageRelease ifFalse: [ self error: 'Should be a package release!!']. [UIManager default informUser: 'Downloading ' , release asString during: [ (SMInstaller forPackageRelease: release) download] ] on: Error do: [:ex | | msg | msg := ex messageText ifNil: [ex asString]. self informException: ex msg: ('Error occurred during download:\', msg, '\') withCRs]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! emailPackageMaintainers "Send mail to package owner and co-maintainers." | item package toAddresses | item := self selectedPackageOrRelease ifNil: [^ nil]. package := item isPackageRelease ifTrue: [item package] ifFalse: [item]. "(this logic should be moved to MailMessage as soon as it can handle multiple To: addresses)" toAddresses := '<', package owner email, '>'. package maintainers ifNotNil: [ package maintainers do: [:maintainer | toAddresses := toAddresses, ', <', maintainer email, '>']]. SMUtilities sendMailTo: toAddresses regardingPackageRelease: item! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! filterAdd: anObject self changeFilters: (self filters copyWith: anObject) ! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 11/22/2006 15:02'! filterAutoInstall ^[:package | package isInstallable]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 12/1/2006 01:42'! filterAvailable ^[:package | package isAvailable]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 11/22/2006 15:02'! filterInstalled ^[:package | package isInstalled]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 11/22/2006 15:02'! filterNotInstalledYet ^[:package | package isInstalled not]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 12/1/2006 01:42'! filterNotUptoDate ^[:package | package isAvailable]! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 11/22/2006 15:02'! filterPublished ^[:package | package isPublished]! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! filterRemove: anObject self changeFilters: (self filters copyWithout: anObject) ! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 12/1/2006 01:43'! filterSafelyAvailable ^[:package | package isSafelyAvailable]! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/30/2006 21:07'! filterSpecs "Return a specification for the filter menu. Is called each time." | specs | specs := #(#('Auto-installable packages' #filterAutoInstall 'display only packages that can be installed automatically') #('New available packages' #filterAvailable 'display only packages that are not installed or that have newer releases available.') #('New safely-available packages' #filterSafelyAvailable 'display only packages that are not installed or that have newer releases available that are safe to install, meaning that they are published and meant for the current version of Squeak.') #('Installed packages' #filterInstalled 'Display only packages that are installed.') #('Published packages' #filterPublished 'Display only packages that have at least one published release.') ) asOrderedCollection. categoriesToFilterIds do: [:catId | specs add: {'Packages in ' , (map object: catId) name. catId. 'Display only packages that are in the category.'}]. ^ specs! ! !SMLoaderPlus methodsFor: 'filters' stamp: 'btr 12/1/2006 01:43'! filterVersion "Ignore spaces in the version string, they're sometimes spurious. Not used anymore." ^[:package | package categories anySatisfy: [:cat | (cat name, '*') match: (Smalltalk version copyWithout: $ ) ]]! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! filters ^filters! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/24/2006 13:49'! findPackage: aString notifying: aView "Search and select a package with the given (sub) string in the name or description. " | index list match descriptions | match := aString asString asLowercase. index := self packagesListIndex. list := self packageNameList. list isEmpty ifTrue: [^ self]. descriptions := self packageList collect: [:e | e description]. index + 1 to: list size do: [:i | (((list at: i) includesSubstring: match caseSensitive: false) or: [(descriptions at: i) includesSubstring: match caseSensitive: false]) ifTrue: [^ self packagesListIndex: i]]. "wrap around" 1 to: index do: [:i | (((list at: i) includesSubstring: match caseSensitive: false) or: [(descriptions at: i) includesSubstring: match caseSensitive: false]) ifTrue: [^ self packagesListIndex: i]]. self inform: 'No package matching ' , aString asString! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! generalOptions ^#( #('Upgrade all installed packages' upgradeInstalledPackagesNoConfirm) #('Upgrade all installed packages confirming each' upgradeInstalledPackagesConfirm) #('Put list in paste buffer' listInPasteBuffer) #('Save filters as default' saveFiltersAsDefault) #- ) ! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 18:36'! hasSelectedItem ^ self selectedPackageOrRelease notNil! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:44'! help "Present help text. If there is a web server available, offer to open it. Use the WebBrowser registry if possible, or Scamper if available." | message browserClass | message := 'Welcome to the SqueakMap package loader. The names of packages are followed by versions: (installed -> latest). If there is no arrow, your installed version of the package is the latest. Bold packages and releases have been installed. The checkbox menu items modify which packages you''ll see. Take a look at them - only some packages are shown initially. The options available for a package depend on how it was packaged. Comment on a package by emailing the author or the squeak list.'. browserClass := Smalltalk at: #WebBrowser ifPresent: [ :registry | registry default ]. browserClass := browserClass ifNil: [ Smalltalk at: #Scamper ifAbsent: [ ^self inform: message ]]. (self confirm: message, ' Would you like to view more detailed help on the SqueakMap swiki page?') ifTrue: [ browserClass openOnUrl: 'http://wiki.squeak.org/2726' asUrl]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 15:02'! informException: ex msg: msg "Tell the user that an error has occurred. Offer to open debug notifier." (self confirm: msg, 'Would you like to open a debugger?') ifTrue: [ex pass]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/5/2006 05:28'! initialExtent ^500 at 400! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! installPackageRelease "Install selected package or release. The cache is used." | item release | item := self selectedPackageOrRelease ifNil: [^ nil]. item isPackageRelease ifTrue: [ (item isPublished or: [self confirm: 'Selected release is not published yet, install anyway?']) ifTrue: [^self installPackageRelease: item]] ifFalse: [ release := item lastPublishedReleaseForCurrentSystemVersion. release ifNil: [ (self confirm: 'The package has no published release for your Squeak version, try releases for any Squeak version?') ifTrue: [ release := item lastPublishedRelease. release ifNil: [ (self confirm: 'The package has no published release at all, take the latest of the unpublished releases?') ifTrue: [release := item lastRelease]]]]. release ifNotNil: [^self installPackageRelease: release]]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 12/1/2006 01:53'! installPackageRelease: aRelease "Install a package release. The cache is used." | myRelease installer | aRelease isCompatibleWithCurrentSystemVersion ifFalse: [(self confirm: 'The package you are about to install is not listed as being compatible with your image version (', SystemVersion current majorMinorVersion, '), so the package may not work properly. Do you still want to proceed with the install?') ifFalse: [^ self]]. myRelease := self installedReleaseOfMe. installer := SMInstaller forPackageRelease: aRelease. [UIManager default informUser: 'Downloading ' , aRelease asString during: [installer download]. UIManager default informUser: 'Installing ' , aRelease asString during: [ installer install. myRelease = self installedReleaseOfMe ifFalse: [self reOpen] ifTrue: [self noteChanged]] ] on: Error do: [:ex | | msg | msg := ex messageText ifNil:[ex asString]. self informException: ex msg: ('Error occurred during install:\', msg, '\') withCRs].! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 15:02'! installedReleaseOfMe "Return the release of the installed package loader." ^SMSqueakMap default installedReleaseOf: (SMSqueakMap default packageWithId: '941c0108-4039-4071-9863-a8d7d2b3d4a3').! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:44'! itemChildren: anItem ^ anItem isPackage ifTrue: [anItem releases] ifFalse: [#()]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 19:56'! itemDescription ^ self selectedPackageOrRelease ifNil: [''] ifNotNilDo: [:item | item fullDescription]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:45'! itemHasChildren: anItem ^ anItem isPackage and: [anItem releases notEmpty]! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:44'! itemLabel: anItem | label | label := anItem isPackage ifTrue: [anItem name , (anItem versionLabel ifEmpty: [''] ifNotEmptyDo: [:lbl | ' (' , anItem versionLabel , ')'])] ifFalse: [anItem smartVersion]. ^ anItem isInstalled ifTrue: [label asText allBold] ifFalse: [label]! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 11/24/2006 17:17'! label ^ self labelForShown: (packagesList ifNil: [self packageList])! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! labelForFilter: aFilterSymbol ^(self filterSpecs detect: [:fs | fs second = aFilterSymbol]) first! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:50'! labelForShown: packagesShown "Update the label of the window." ^ self defaultLabel , ' (', (packagesShown size < map packages size ifTrue: [packagesShown size printString, ' shown out of '] ifFalse: ['']) , map packages size printString, ' packages)'! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! listInPasteBuffer "Useful when talking with people etc. Uses the map to produce a nice String." Clipboard clipboardText: (String streamContents: [:s | packagesList do: [:p | s nextPutAll: p nameWithVersionLabel; cr ]]) asText! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:31'! loadUpdates [UIManager default informUser: 'Loading Updates' during: [ map loadUpdates. self noteChanged ] ] on: Error do: [:ex | self informException: ex msg: ('Error occurred when updating map:\', ex messageText, '\') withCRs]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/24/2006 14:05'! noteChanged filters ifNil: [^ self reOpen]. map ifNotNil: [packagesList := nil. selectedCategory := nil. self changed: #categoryList. self changed: #packageList. self changed: #packagesListIndex. "update my selection" self contentsChanged]! ! !SMLoaderPlus methodsFor: 'initialization' stamp: 'btr 11/22/2006 16:11'! on: aSqueakMap "Initialize instance." map := aSqueakMap. map synchWithDisk. filters := DefaultFilters copy. categoriesToFilterIds := DefaultCategoriesToFilterIds copy. self askToLoadUpdates! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! package: aPackage filteredByCategory: aCategory "Answer true if the package should be shown if we filter on . It should be shown if itself or any of its releases has the category." | releases | releases := aPackage releases. ^(aPackage hasCategoryOrSubCategoryOf: aCategory) or: [ releases anySatisfy: [:rel | rel hasCategoryOrSubCategoryOf: aCategory]]! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:49'! packageList "Return a list of the SMPackages that should be visible by applying all the filters. Also filter based on the currently selected category - if any." | list | list := packagesList ifNil: [packagesList := self packageListCalculated]. selectedCategory ifNotNil: [ list := list select: [:each | self package: each filteredByCategory: selectedCategory]]. self updateLabel: list. ^ list! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:49'! packageListCalculated "Return a list of the SMPackages that should be visible by applying all the filters. Also filter based on the currently selected category - if any." ^ self packages select: [:p | filters allSatisfy: [:currFilter | currFilter isSymbol ifTrue: [(self perform: currFilter) value: p] ifFalse: [self package: p filteredByCategory: (map object: currFilter)]]]! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:50'! packageNameList ^ self packageList collect: [:e | e name]! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 18:30'! packageSpecificOptions | choices packageOrRelease | packageOrRelease := self selectedPackageOrRelease. choices := OrderedCollection new. packageOrRelease isInstallable ifTrue: [ choices add: (self commandSpecFor: #installPackageRelease)]. (packageOrRelease isDownloadable and: [packageOrRelease isCached]) ifTrue: [ choices add: (self commandSpecFor: #browseCacheDirectory)]. (packageOrRelease isPackageRelease and: [packageOrRelease isDownloadable]) ifTrue: [ choices add: (self commandSpecFor: #cachePackageReleaseAndOfferToCopy). choices add: (self commandSpecFor: #downloadPackageRelease)]. choices add: (self commandSpecFor: #emailPackageMaintainers). ^ choices! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/22/2006 16:11'! packages "We request the packages as sorted by name by default." ^map packagesByName asArray ! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:01'! packagesListIndex ^ self packageList indexOf: self selectedItem! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:01'! packagesListIndex: anObject self selectedItem: (anObject = 0 ifFalse: [self packageList at: anObject])! ! !SMLoaderPlus methodsFor: 'menus' stamp: 'btr 11/22/2006 15:02'! packagesMenu: aMenu "Answer the packages-list menu." self selectedPackageOrRelease ifNotNil: [aMenu addList: self packageSpecificOptions; addLine]. aMenu addList: self generalOptions. self addFiltersToMenu: aMenu. ^aMenu! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 12/1/2006 01:45'! perform: selector orSendTo: otherTarget "Selector was just chosen from a menu by a user. If can respond, then perform it on myself. If not, send it to otherTarget, presumably the editPane from which the menu was invoked." ^ (self respondsTo: selector) ifTrue: [self perform: selector] ifFalse: [super perform: selector orSendTo: otherTarget]! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 11/26/2006 23:22'! reOpen "Close this package loader, probably because it has been updated, and open a new one." self inform: 'This package loader has been upgraded and will be closed and reopened to avoid strange side effects.'. window delete. (Smalltalk at: self class name) open! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! removeCategoryFilters "Remove all category filters." categoriesToFilterIds := OrderedCollection new! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! removeSelectedCategoryAsFilter "Remove the filter that filters on the currently selected category." categoriesToFilterIds remove: self selectedCategory id! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! saveFiltersAsDefault "Save the current filters as default so that they are selected the next time the loader is opened." DefaultFilters := filters copy. DefaultCategoriesToFilterIds := categoriesToFilterIds copy! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:35'! searchSelection "Selects all of the default search text so that a type-in overwrites it." ^ {1. self searchText size}! ! !SMLoaderPlus methodsFor: 'interface' stamp: 'btr 11/24/2006 14:35'! searchText "A dummy default search text so that the field describes its purpose." ^ 'Search packages'! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:02'! selectedCategory "Return selected category." ^ selectedCategory! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 16:37'! selectedCategory: anSMCategory "Change the selected category." selectedCategory := anSMCategory. selectedCategory ifNotNil: [(selectedCategory objects includes: self selectedItem) ifFalse: [self selectedItem: nil]]. self changed: #selectedCategory. self changed: #packageList! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:52'! selectedCategoryPath "Return selected category's path." | path | path := #(). selectedCategory ifNotNil: [selectedCategory parent ifNotNilDo: [:p | path := path copyWith: p]. path := path copyWith: selectedCategory]. ^ path collect: [:cat | self categoryLabel: cat]! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:02'! selectedItem ^ selectedItem! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 16:27'! selectedItem: anItem "This == workaround protects us from recursion since ToolBuilder's tree widgets will always tell us that the selection has been updated when we tell it that the selection path has been updated. Cleaner solutions invited." anItem == selectedItem ifFalse: [ selectedItem := anItem. self changed: #selectedItemPath. self changed: #itemDescription. self changed: #hasSelectedItem]! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 12/1/2006 16:16'! selectedItemPath | path | path := #(). (selectedItem isKindOf: SMPackageRelease) ifTrue: [path := path copyWith: selectedItem package]. selectedItem ifNotNil: [path := path copyWith: selectedItem]. ^ path! ! !SMLoaderPlus methodsFor: 'accessing' stamp: 'btr 11/24/2006 14:03'! selectedPackageOrRelease "Return selected package or package release." ^ selectedItem! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! showFilterString: aFilterSymbol ^(self stateForFilter: aFilterSymbol), (self labelForFilter: aFilterSymbol)! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! stateForFilter: aFilterSymbol ^(self filters includes: aFilterSymbol) ifTrue: [''] ifFalse: [''] ! ! !SMLoaderPlus methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 15:02'! toggleFilterState: aFilterSymbol ^(self filters includes: (aFilterSymbol)) ifTrue: [self filterRemove: aFilterSymbol] ifFalse: [self filterAdd: aFilterSymbol]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! uncheckFilters "Uncheck all filters." filters := OrderedCollection new. self noteChanged! ! !SMLoaderPlus methodsFor: 'lists' stamp: 'btr 12/1/2006 01:50'! updateLabel: packagesShown "Update the label of the window." window ifNotNilDo: [:w | w setLabel: (self labelForShown: packagesShown)]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 12/1/2006 01:29'! upgradeInstalledPackages "Tries to upgrade all installed packages to the latest published release for this version of Squeak. So this is a conservative approach." | installed old myRelease toUpgrade info | installed := map installedPackages. old := map oldPackages. old isEmpty ifTrue: [ ^self inform: 'All ', installed size printString, ' installed packages are up to date.']. toUpgrade := map upgradeableAndOldPackages. toUpgrade isEmpty ifTrue: [ ^self inform: 'None of the ', old size printString, ' old packages of the ', installed size printString, ' installed can be automatically upgraded. You need to upgrade them manually.']. info := old size < toUpgrade size ifTrue: [ 'Of the ', old size printString, ' old packages only ', toUpgrade size printString, ' can be upgraded. The following packages will not be upgraded: ', (String streamContents: [:s | (old removeAll: toUpgrade; yourself) do: [:p | s nextPutAll: p nameWithVersionLabel; cr]])] ifFalse: ['All old packages upgradeable.']. (self confirm: info, ' About to upgrade the following packages: ', (String streamContents: [:s | toUpgrade do: [:p | s nextPutAll: p nameWithVersionLabel; cr]]), 'Proceed?') ifTrue: [ myRelease := self installedReleaseOfMe. [UIManager default informUser: 'Upgrading Installed Packages' during: [ map upgradeOldPackages. self inform: toUpgrade size printString, ' packages successfully upgraded.'. myRelease = self installedReleaseOfMe ifFalse: [self reOpen] ifTrue: [self noteChanged]] ] on: Error do: [:ex | self informException: ex msg: ('Error occurred when upgrading old packages:\', ex messageText, '\') withCRs]]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! upgradeInstalledPackagesConfirm "Tries to upgrade all installed packages to the latest published release for this version of Squeak. Confirms on each upgrade." ^ self upgradeInstalledPackagesConfirm: true! ! !SMLoaderPlus methodsFor: 'private' stamp: 'btr 12/1/2006 01:29'! upgradeInstalledPackagesConfirm: confirmEach "Tries to upgrade all installed packages to the latest published release for this version of Squeak. If confirmEach is true we ask for every upgrade. " | installed old myRelease toUpgrade info | installed := map installedPackages. old := map oldPackages. old isEmpty ifTrue: [^ self inform: 'All ' , installed size printString , ' installed packages are up to date.']. toUpgrade := map upgradeableAndOldPackages. toUpgrade isEmpty ifTrue: [^ self inform: 'None of the ' , old size printString , ' old packages of the ' , installed size printString , ' installed can be automatically upgraded. You need to upgrade them manually.']. info := old size < toUpgrade size ifTrue: ['Of the ' , old size printString , ' old packages only ' , toUpgrade size printString , ' can be upgraded. The following packages will not be upgraded: ' , (String streamContents: [:s | (old removeAll: toUpgrade; yourself) do: [:p | s nextPutAll: p nameWithVersionLabel; cr]])] ifFalse: ['All old packages upgradeable.']. (self confirm: info , ' About to upgrade the following packages: ' , (String streamContents: [:s | toUpgrade do: [:p | s nextPutAll: p nameWithVersionLabel; cr]]) , 'Proceed?') ifTrue: [myRelease := self installedReleaseOfMe. [UIManager default informUser: 'Upgrading Installed Packages' during: [confirmEach ifTrue: [map upgradeOldPackagesConfirmBlock: [:p | self confirm: 'Upgrade ' , p installedRelease packageNameWithVersion , ' to ' , (p lastPublishedReleaseForCurrentSystemVersionNewerThan: p installedRelease) listName , '?']] ifFalse: [map upgradeOldPackages]. self inform: toUpgrade size printString , ' packages successfully processed.'. myRelease = self installedReleaseOfMe ifTrue: [self noteChanged] ifFalse: [self reOpen]]] on: Error do: [:ex | self informException: ex msg: ('Error occurred when upgrading old packages:\' , ex messageText , '\') withCRs]]! ! !SMLoaderPlus methodsFor: 'actions' stamp: 'btr 11/22/2006 15:02'! upgradeInstalledPackagesNoConfirm "Tries to upgrade all installed packages to the latest published release for this version of Squeak. No confirmation on each upgrade." ^ self upgradeInstalledPackagesConfirm: false! ! !SMPackageWrapper methodsFor: 'comparing' stamp: 'dvf 9/21/2003 16:25' prior: 27998626! = anObject ^self withoutListWrapper = anObject withoutListWrapper! ! !SMPackageWrapper methodsFor: 'converting' stamp: 'btr 11/22/2006 00:54' prior: 27998778! asString | string | string := item name, ' (', item versionLabel, ')'. item isInstalled ifTrue: [string := string asText allBold]. "(string includesSubString: '->') ifTrue: [string := string asText color: Color green]." ^ string! ! !SMPackageWrapper methodsFor: 'accessing' stamp: 'dvf 10/14/2003 18:58' prior: 27998902! contents ^item releases reversed collect: [:e | SMPackageReleaseWrapper with: e]! ! !SMPackageWrapper methodsFor: 'testing' stamp: 'dvf 9/21/2003 16:25' prior: 27999070! hash ^self withoutListWrapper hash! ! !SMPackageWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 16:55'! help ^ 'This shows all packages with their releases that should be displayed according the current filter.'! ! !SMPackageWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 16:49'! label ^ self asString! ! !SMPackageWrapper methodsFor: 'printing' stamp: 'dvf 9/21/2003 16:22' prior: 27999192! printOn: aStream aStream nextPutAll: 'wrapper for: ', item printString! ! !SMCategoryWrapper methodsFor: 'comparing' stamp: 'ar 2/9/2004 02:13' prior: 27849043! = anObject ^self withoutListWrapper = anObject withoutListWrapper! ! !SMCategoryWrapper methodsFor: 'converting' stamp: 'btr 11/30/2006 18:53' prior: 27849195! asString ^ item name , ' (' , self numberOfObjects printString , ')'! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'ar 2/9/2004 02:35' prior: 27849301! category ^item! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'btr 11/30/2006 21:02' prior: 27849402! contents ^ item subCategories collect: [:n | self class with: n model: n]! ! !SMCategoryWrapper methodsFor: 'model access' stamp: 'btr 11/30/2006 21:02'! getList ^ Array with: (self class with: self contents model: model)! ! !SMCategoryWrapper methodsFor: 'testing' stamp: 'btr 11/30/2006 18:53'! hasContents ^ item hasSubCategories! ! !SMCategoryWrapper methodsFor: 'comparing' stamp: 'ar 2/9/2004 02:13' prior: 27849700! hash ^self withoutListWrapper hash! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 16:56'! help ^ 'The categories are structured in a tree. Packages and package releases belong to several categories. You can add one or more categories as filters and enable them in the menu.'! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'BJP 11/22/2002 14:17'! model ^model! ! !SMCategoryWrapper methodsFor: 'accessing' stamp: 'btr 11/30/2006 18:53'! numberOfObjects " | total | total _ 0. model allCategoriesDo: [:c | total _ total + c objects size]. ^total" ^item objects size! ! !SMPackageReleaseWrapper methodsFor: 'converting' stamp: 'btr 11/30/2006 21:30' prior: 27997393! asString "Show installed releases with a trailing asterisk." | string | string := item smartVersion. "Older SMBase versions don't have isInstalled.'" (item respondsTo: #isInstalled) ifTrue: [item isInstalled ifTrue: [string := (string , ' *') asText allBold]]. ^ string! ! !SMPackageReleaseWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 17:14'! contents ^ #()! ! !SMPackageReleaseWrapper methodsFor: 'accessing' stamp: 'btr 11/22/2006 16:49'! label ^ self asString ! ! !SMLoader class methodsFor: 'class initialization' stamp: 'btr 12/1/2006 15:47' prior: 27944626! initialize "Hook us up in the world menu." "self initialize" Smalltalk at: #ToolBuilder ifAbsent: [self registerInFlapsRegistry. (Preferences windowColorFor: #SMLoader) = Color white ifTrue: ["not set" Preferences setWindowColorFor: #SMLoader to: (Color colorFrom: self windowColorSpecification brightColor)]. (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [| oldCmds | oldCmds := TheWorldMenu registry select: [:cmd | cmd first includesSubString: 'Package Loader']. oldCmds do: [:cmd | TheWorldMenu unregisterOpenCommand: cmd first]. TheWorldMenu registerOpenCommand: {self openMenuString. {self. #open}}]]. DefaultFilters := OrderedCollection new. DefaultCategoriesToFilterIds := OrderedCollection new! ! !SMLoader class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:52'! openMenuString ^ 'SqueakMap Catalog'! ! !SMLoader class methodsFor: 'class initialization' stamp: 'btr 11/30/2006 21:52' prior: 27945298! unload (TheWorldMenu respondsTo: #registerOpenCommand:) ifTrue: [TheWorldMenu unregisterOpenCommand: self openMenuString]. self environment at: #Flaps ifPresent: [:cl | cl unregisterQuadsWithReceiver: self] ! ! !SMLoader methodsFor: 'menus' stamp: 'btr 11/21/2006 16:08' prior: 54331069! addFiltersToMenu: aMenu | filterSymbol help | self filterSpecs do: [:filterArray | filterSymbol := filterArray second. help := filterArray third. aMenu addUpdating: #showFilterString: target: self selector: #toggleFilterState: argumentList: (Array with: filterSymbol). aMenu balloonTextForLastItem: help]. aMenu addLine; addList: #(('Clear all filters' uncheckFilters 'Unchecks all filters to list all packages')) ! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 01:15' prior: 27927912! browseCacheDirectory "Open a FileList2 on the directory for the package or release." | item dir win | item := self selectedPackageOrRelease ifNil: [^ nil]. item ifNil: [^nil]. dir := item isPackage ifTrue: [model cache directoryForPackage: item] ifFalse: [model cache directoryForPackageRelease: item]. win := FileList2 morphicViewOnDirectory: dir. " withLabel: item name, ' cache directory'." win openInWorld ! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 14:52'! buildButtonBar | aRow btn | aRow := AlignmentMorph newRow beSticky. aRow color: Color transparent; clipSubmorphs: true. self buttonSpecs do: [:spec | btn := self buildButtonNamed: spec first helpText: spec third action: spec second. aRow addMorphBack: btn] separatedBy: [aRow addTransparentSpacerOfSize: 3 at 0]. ^ aRow! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 01:27'! buildButtonNamed: labelText helpText: balloon action: action | btn | btn := PluggableButtonMorph on: self getState: nil action: action. btn color: Color transparent; hResizing: #shrinkWrap; vResizing: #spaceFill; label: labelText; setBalloonText: balloon; onColor: Color transparent offColor: Color transparent. ^ btn! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/30/2006 19:04' prior: 27928394! buildMorphicCategoriesList "Create the hierarchical list holding the category tree." | list | list := (SimpleHierarchicalListMorph on: self list: #categoryWrapperList selected: #selectedCategoryWrapper changeSelected: #selectedCategoryWrapper: menu: #categoriesMenu: keystroke: nil) autoDeselect: true; enableDrag: false; enableDrop: true; yourself. list setBalloonText: 'The categories are structured in a tree. Packages and package releases belong to several categories. You can add one or more categories as filters and enable them in the menu.'. "list scroller submorphs do:[:each| list expandAll: each]." list adjustSubmorphPositions. ^ list! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 00:22' prior: 27929139! buildMorphicPackagesList "Create the hierarchical list holding the packages and releases." ^(SimpleHierarchicalListMorph on: self list: #packageWrapperList selected: #selectedItemWrapper changeSelected: #selectedItemWrapper: menu: #packagesMenu: keystroke: nil) autoDeselect: false; enableDrag: false; enableDrop: true; setBalloonText: 'This shows all packages with their releases that should be displayed according the current filter.'; yourself! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/30/2006 21:13'! buildPackageButtonBar | aRow | "Somewhat patterned after IRCe's buttonRow method." aRow := AlignmentMorph newRow beSticky. aRow color: Color transparent; clipSubmorphs: true. ^ aRow! ! !SMLoader methodsFor: 'interface' stamp: 'gk 5/5/2006 02:05' prior: 27929686! buildPackagePane "Create the text area to the right in the loader." | ptm | ptm := PluggableTextMorph on: self text: #contents accept: nil readSelection: nil "#packageSelection " menu: nil. ptm setBalloonText: 'This is where the selected package or package release is displayed.'. ptm lock. ^ptm! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/30/2006 21:08' prior: 27930070! buildSearchPane "Cribbed from MessageNames>>inMorphicWindowWithInitialSearchString:" | typeInView searchButton typeInPane | typeInView := PluggableTextMorph on: self text: nil accept: #findPackage:notifying: readSelection: nil menu: nil. typeInView acceptOnCR: true; vResizing: #spaceFill; hResizing: #spaceFill; setTextMorphToSelectAllOnMouseEnter; askBeforeDiscardingEdits: false; setProperty: #alwaysAccept toValue: true. (typeInView respondsTo: #hideScrollBarsIndefinitely) ifTrue: [typeInView hideScrollBarsIndefinitely] ifFalse: [typeInView hideScrollBarIndefinitely]. searchButton := SimpleButtonMorph new target: typeInView; color: Color white; label: 'Search'; actionSelector: #accept; arguments: #(); yourself. typeInPane := AlignmentMorph newRow. typeInPane vResizing: #shrinkWrap; hResizing: #shrinkWrap; listDirection: #leftToRight; addMorphFront: searchButton; addTransparentSpacerOfSize: 6 @ 0; addMorphBack: typeInView; setBalloonText: 'Type into the pane, then press Search (or hit RETURN) to visit the next package matching what you typed.'. ^ typeInPane! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/22/2006 14:24'! buttonSpecs ^ #(('Install' installPackageRelease 'Install the latest version from the server.') ('Email' emailPackageMaintainers 'Open an editor to send an email to the owner and co-maintainers of this package.') ('Browse cache' browseCacheDirectory 'Browse cache directory of the selection.') ('Update' loadUpdates 'Update the package index from the servers.') ('Upgrade All' upgradeInstalledPackagesConfirm 'Upgrade all installed packages (confirming each).') ('Help' help 'What is this?'))! ! !SMLoader methodsFor: 'menus' stamp: 'btr 11/21/2006 16:11' prior: 27936393! categorySpecificOptions | choices | choices := OrderedCollection new. (categoriesToFilterIds includes: self selectedCategory id) ifTrue: [ choices add: #('Remove filter' #removeSelectedCategoryAsFilter 'Remove the filter for the selected category.')] ifFalse: [ choices add: #('Add as filter' #addSelectedCategoryAsFilter 'Add the selection as a filter to hide unrelated packages.')]. categoriesToFilterIds isEmpty ifFalse: [ choices add: #('Remove all filters' #removeCategoryFilters 'Remove all category filters.')]. ^ choices! ! !SMLoader methodsFor: 'lists' stamp: 'btr 11/30/2006 21:01' prior: 27933585! categoryWrapperList "Create the wrapper list for the hierarchical list. We sort the categories by name but ensure that 'Squeak versions' is first if it exists." | list first | list := (model categories select: [:each | each parent isNil]) asArray sort: [:c1 :c2 | c1 name <= c2 name]. first := list detect: [:any | any name = 'Squeak versions'] ifNone: []. first ifNotNil: [list := list copyWithout: first. list := {first} , list]. ^ list collect: [:cat | SMCategoryWrapper with: cat model: self]! ! !SMLoader methodsFor: 'filter utilities' stamp: 'gk 7/10/2004 15:45' prior: 27913226! changeFilters: anObject "Update my selection." | oldItem index | oldItem := self selectedPackageOrRelease. filters := anObject. self packagesListIndex: ((index := self packageList indexOf: oldItem) ifNil: [0] ifNotNil: [index]). self noteChanged! ! !SMLoader methodsFor: 'interface' stamp: 'btr 11/30/2006 17:30' prior: 27930584! createWindow | buttonBarHeight searchHeight vertDivide horizDivide | buttonBarHeight := 0.07. searchHeight := 0.07. vertDivide := 0.3. horizDivide := 0.6. self addMorph: (self buildButtonBar borderWidth: 0) frame: (0.0 @ 0.0 corner: 1.0 @ buttonBarHeight). self addMorph: (self buildSearchPane borderWidth: 0) frame: (0.0 @ buttonBarHeight corner: vertDivide @ searchHeight). self addMorph: (self buildMorphicPackagesList borderWidth: 0) frame: (0.0 @ (buttonBarHeight + searchHeight) corner: vertDivide @ horizDivide). self addMorph: (self buildMorphicCategoriesList borderWidth: 0) frame: (0.0 @ horizDivide corner: vertDivide @ 1.0). self addMorph: (self buildPackagePane borderWidth: 0) frame: (vertDivide @ buttonBarHeight corner: 1.0 @ 1.0). self on: #mouseEnter send: #paneTransition: to: self. self on: #mouseLeave send: #paneTransition: to: self! ! !SMLoader methodsFor: 'interface' stamp: 'gk 7/12/2004 11:14' prior: 27931214! defaultButtonPaneHeight "Answer the user's preferred default height for new button panes." ^ Preferences parameterAt: #defaultButtonPaneHeight ifAbsentPut: [25]! ! !SMLoader methodsFor: 'interface' stamp: 'btr 12/1/2006 02:01'! defaultLabel ^'SqueakMap Package Loader'! ! !SMLoader methodsFor: 'actions' stamp: 'btr 11/22/2006 01:14' prior: 27917579! emailPackageMaintainers "Send mail to package owner and co-maintainers." | item package toAddresses | item := self selectedPackageOrRelease ifNil: [^ nil]. package := item isPackageRelease ifTrue: [item package] ifFalse: [item]. "(this logic should be moved to MailMessage as soon as it can handle multiple To: addresses)" toAddresses := '<', package owner email, '>'. package maintainers ifNotNil: [ package maintainers do: [:maintainer | toAddresses := toAddresses, ', <', maintainer email, '>']]. SMUtilities sendMailTo: toAddresses regardingPackageRelease: item! ! !SMLoader methodsFor: 'filter utilities' stamp: 'btr 11/22/2006 00:14' prior: 27923782! filterSpecs "Return a specification for the filter menu. Is called each time." | specs | specs := #( #('Auto-installable packages' #filterAutoInstall 'display only packages that can be installed automatically') #('New available packages' #filterAvailable 'display only packages that are not installed or that have newer releases available.') #('New safely-available packages' #filterSafelyAvailable 'display only packages that are not installed or that have newer releases available that are safe to install, meaning that they are published and meant for the current version of Squeak.') From noreply at buildbot.pypy.org Thu Aug 21 12:55:00 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 21 Aug 2014 12:55:00 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk rstrategies: Tests green now. Message-ID: <20140821105500.1A02A1D22E9@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: rstrategies Changeset: r1038:baecfe86835c Date: 2014-08-20 16:38 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/baecfe86835c/ Log: Tests green now. diff --git a/rstrategies.py b/rstrategies.py --- a/rstrategies.py +++ b/rstrategies.py @@ -11,15 +11,17 @@ return subclasses class StrategyFactory(object): - _immutable_fields_ = ["xx[*]"] + _immutable_fields_ = ["strategies[*]"] def __init__(self, root_class, all_strategy_classes=None): if all_strategy_classes is None: all_strategy_classes = collect_subclasses(root_class) - self.strategies = all_strategy_classes + self.strategies = [] - for strategy_class in self.strategies: - strategy_class._strategy_instance = self.instantiate_empty(strategy_class) + for strategy_class in all_strategy_classes: + if hasattr(strategy_class, "_is_strategy") and strategy_class._is_strategy: + strategy_class._strategy_instance = self.instantiate_empty(strategy_class) + self.strategies.append(strategy_class) # Patch root class: Add default handler for visitor def copy_from_OTHER(self, other): @@ -33,10 +35,10 @@ getattr(other, funcname)(self) strategy_class.initiate_copy_into = initiate_copy_into - def setup_strategy_transitions(self, transitions): + def decorate_strategies(self, transitions): "NOT_RPYTHON" for strategy_class, generalized in transitions.items(): - generalize(generalized)(strategy_class) + strategy(generalized)(strategy_class) # Instantiate new_strategy_type with size, replace old_strategy with it, # and return the new instance @@ -78,17 +80,19 @@ # Instance will be frozen at compile time, making accesses constant. return True -def generalize(generalized): +def strategy(generalize=None): def decorator(strategy_class): - # Patch strategy class: Add generalized_strategy_for - # TODO - optimize this method - @jit.unroll_safe - def generalized_strategy_for(self, value): - for strategy in generalized: - if strategy._strategy_instance.check_can_handle(value): - return strategy - raise Exception("Could not find generalized strategy for %s coming from %s" % (value, self)) - strategy_class.generalized_strategy_for = generalized_strategy_for + # Patch strategy class: Add generalized_strategy_for and mark as strategy class. + if generalize: + # TODO - optimize this method + @jit.unroll_safe + def generalized_strategy_for(self, value): + for strategy in generalize: + if strategy._strategy_instance.check_can_handle(value): + return strategy + raise Exception("Could not find generalized strategy for %s coming from %s" % (value, self)) + strategy_class.generalized_strategy_for = generalized_strategy_for + strategy_class._is_strategy = True return strategy_class return decorator diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -1,6 +1,6 @@ import os -from spyvm import constants, model, wrapper, display +from spyvm import constants, model, wrapper, display, storage from spyvm.error import UnwrappingError, WrappingError from rpython.rlib import jit, rpath from rpython.rlib.objectmodel import instantiate, specialize, import_from_mixin diff --git a/spyvm/storage.py b/spyvm/storage.py --- a/spyvm/storage.py +++ b/spyvm/storage.py @@ -46,17 +46,19 @@ def copy_from_AllNilStrategy(self, all_nil_storage): pass # Fields already initialized to nil + at rstrat.strategy() class ListStorageShadow(AbstractStorageShadow): repr_classname = "ListStorageShadow" import_from_mixin(rstrat.GenericStrategy) def default_value(self): return self.space.w_nil + at rstrat.strategy() class WeakListStorageShadow(AbstractStorageShadow): repr_classname = "WeakListStorageShadow" import_from_mixin(rstrat.WeakGenericStrategy) def default_value(self): return self.space.w_nil - at rstrat.generalize([ListStorageShadow]) + at rstrat.strategy(generalize=[ListStorageShadow]) class SmallIntegerOrNilStorageShadow(AbstractStorageShadow): repr_classname = "SmallIntegerOrNilStorageShadow" import_from_mixin(rstrat.TaggingStrategy) @@ -67,7 +69,7 @@ def wrapped_tagged_value(self): return self.space.w_nil def unwrapped_tagged_value(self): return constants.MAXINT - at rstrat.generalize([ListStorageShadow]) + at rstrat.strategy(generalize=[ListStorageShadow]) class FloatOrNilStorageShadow(AbstractStorageShadow): repr_classname = "FloatOrNilStorageShadow" import_from_mixin(rstrat.TaggingStrategy) @@ -78,7 +80,7 @@ def wrapped_tagged_value(self): return self.space.w_nil def unwrapped_tagged_value(self): import sys; return sys.float_info.max - at rstrat.generalize([ + at rstrat.strategy(generalize=[ SmallIntegerOrNilStorageShadow, FloatOrNilStorageShadow, ListStorageShadow]) @@ -93,7 +95,7 @@ from spyvm import objspace self.space = space self.no_specialized_storage = objspace.ConstantFlag() - rstrat.StrategyFactory.__init__(self, AbstractStorageShadow) + rstrat.StrategyFactory.__init__(self, AbstractShadow) def strategy_type_for(self, objects, weak=False): if weak: From noreply at buildbot.pypy.org Thu Aug 21 12:55:01 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 21 Aug 2014 12:55:01 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-cleanups: Fixed -A flag to execute very slow tests. Message-ID: <20140821105501.2101E1D22E9@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-cleanups Changeset: r1039:038a4928d7ac Date: 2014-08-20 16:48 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/038a4928d7ac/ Log: Fixed -A flag to execute very slow tests. diff --git a/spyvm/test/conftest.py b/spyvm/test/conftest.py --- a/spyvm/test/conftest.py +++ b/spyvm/test/conftest.py @@ -21,10 +21,10 @@ dest="rsqueak-binary", action="store", default=None, - help="Path to a compiled rsqueak binary" + help="Path to a compiled rsqueak binary. Enables jit tests." ) -# The 'spy' parameter is used in tests under jittest/ +# The 'jit' parameter is used in tests under jittest/ def pytest_funcarg__spy(request): val = request.config.getvalue("rsqueak-binary") if not val: diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -6,7 +6,8 @@ # The according options is configured in conftest.py. # To mark all tests in a module as slow, add this line to the module: # pytestmark = slow_test -slow_test = py.test.mark.skipif('not config.getvalue("execute-slow-tests") or config.getvalue("execute-all-tests")', +slow_test = py.test.mark.skipif(' not config.getvalue("execute-slow-tests")' + + 'and not config.getvalue("execute-all-tests")', reason="Slow tests are being skipped. Add --slow|-S to execute slow tests.") very_slow_test = py.test.mark.skipif('not config.getvalue("execute-all-tests")', From noreply at buildbot.pypy.org Thu Aug 21 12:55:02 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 21 Aug 2014 12:55:02 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk storage-cleanups: Refactored test flags. -Q enables quick tests (9 sec), no flags takes 35 sec, -S flag takes 2:30 min. Message-ID: <20140821105502.45FA61D22E9@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: storage-cleanups Changeset: r1040:d7088a2d1cb8 Date: 2014-08-20 17:48 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/d7088a2d1cb8/ Log: Refactored test flags. -Q enables quick tests (9 sec), no flags takes 35 sec, -S flag takes 2:30 min. diff --git a/spyvm/test/conftest.py b/spyvm/test/conftest.py --- a/spyvm/test/conftest.py +++ b/spyvm/test/conftest.py @@ -3,18 +3,18 @@ def pytest_addoption(parser): group = parser.getgroup("RSqueak test options") group.addoption( + "--quick", "-Q", + dest="execute-quick-tests", + action="store_false", + default=True, + help="Only execute quick tests (no image loading or long execution)" + ) + group.addoption( "--slow", "-S", dest="execute-slow-tests", action="store_true", default=False, - help="Additionally execute slow tests (loading full Squeak image or long execution)" - ) - group.addoption( - "--all", "-A", - dest="execute-all-tests", - action="store_true", - default=False, - help="Execute all tests" + help="Execute all tests (including very slow tests)" ) group.addoption( "--jit", diff --git a/spyvm/test/test_bootstrappedimage.py b/spyvm/test/test_bootstrappedimage.py --- a/spyvm/test/test_bootstrappedimage.py +++ b/spyvm/test/test_bootstrappedimage.py @@ -1,5 +1,5 @@ import py -from .util import read_image, copy_to_module, cleanup_module, slow_test, very_slow_test +from .util import read_image, copy_to_module, cleanup_module, very_slow_test def setup_module(): space, interp, image, reader = read_image("bootstrapped.image") @@ -16,15 +16,6 @@ @very_slow_test def test_retrieve_symbol(): - """asSymbol - "This is the only place that new Symbols are created. A Symbol is created - if and only if there is not already a Symbol with its contents in existance." - Symbol - allInstancesDo: [ :sym | - self = sym - ifTrue: [ ^ sym ] ]. - ^ (Symbol basicNew: self size) initFrom: self""" - space.initialize_class(space.w_String, interp) w_result = perform(w("someString"), "asSymbol") assert w_result.as_string() == "someString" diff --git a/spyvm/test/test_largeinteger.py b/spyvm/test/test_largeinteger.py --- a/spyvm/test/test_largeinteger.py +++ b/spyvm/test/test_largeinteger.py @@ -1,7 +1,7 @@ import operator from spyvm import model, constants, primitives from spyvm.test.test_primitives import MockFrame -from .util import read_image, copy_to_module, cleanup_module, slow_test +from .util import read_image, copy_to_module, cleanup_module from rpython.rlib.rarithmetic import intmask, r_uint def setup_module(): diff --git a/spyvm/test/test_miniimage.py b/spyvm/test/test_miniimage.py --- a/spyvm/test/test_miniimage.py +++ b/spyvm/test/test_miniimage.py @@ -1,6 +1,8 @@ import py, math from spyvm import model, constants, storage_contexts, wrapper, primitives, interpreter, error -from .util import read_image, open_reader, copy_to_module, cleanup_module, TestInterpreter, very_slow_test +from .util import read_image, open_reader, copy_to_module, cleanup_module, TestInterpreter, slow_test, very_slow_test + +pytestmark = slow_test def setup_module(): space, interp, image, reader = read_image("mini.image") diff --git a/spyvm/test/test_miniimage_compiling.py b/spyvm/test/test_miniimage_compiling.py --- a/spyvm/test/test_miniimage_compiling.py +++ b/spyvm/test/test_miniimage_compiling.py @@ -1,6 +1,8 @@ import py, math from spyvm import model, constants, storage_contexts, wrapper, primitives, interpreter, error -from .util import read_image, open_reader, copy_to_module, cleanup_module, TestInterpreter, slow_test +from .util import read_image, open_reader, copy_to_module, cleanup_module, TestInterpreter, slow_test, very_slow_test + +pytestmark = slow_test def setup_module(): space, interp, _, _ = read_image("mini.image") @@ -19,7 +21,7 @@ def test_load_image(): pass - at slow_test + at very_slow_test def test_compile_method(): sourcecode = """fib ^self < 2 @@ -28,7 +30,7 @@ perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None)) assert perform(w(10), "fib").is_same_object(w(89)) - at slow_test + at very_slow_test def test_become(): sourcecode = """ testBecome @@ -55,7 +57,6 @@ w_result = perform(w(10), "testBecome") assert space.unwrap_int(w_result) == 42 - at slow_test def test_cached_methoddict(): sourcecode = """fib ^self < 2 diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -5,7 +5,7 @@ from rpython.rlib.rfloat import isinf, isnan from rpython.rlib.rarithmetic import intmask from rpython.rtyper.lltypesystem import lltype, rffi -from .util import create_space, copy_to_module, cleanup_module, TestInterpreter, slow_test, very_slow_test +from .util import create_space, copy_to_module, cleanup_module, TestInterpreter, very_slow_test def setup_module(): space = create_space(bootstrap = True) diff --git a/spyvm/test/test_strategies.py b/spyvm/test/test_strategies.py --- a/spyvm/test/test_strategies.py +++ b/spyvm/test/test_strategies.py @@ -1,9 +1,9 @@ from spyvm import model, storage -from .util import read_image, copy_to_module, cleanup_module +from .util import create_space_interp, copy_to_module, cleanup_module def setup_module(): - space, interp, _, _ = read_image('bootstrapped.image') + space, interp = create_space_interp() class_Array = space.classtable["w_Array"] w_nil = space.w_nil copy_to_module(locals(), __name__) diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -6,12 +6,11 @@ # The according options is configured in conftest.py. # To mark all tests in a module as slow, add this line to the module: # pytestmark = slow_test -slow_test = py.test.mark.skipif(' not config.getvalue("execute-slow-tests")' + - 'and not config.getvalue("execute-all-tests")', - reason="Slow tests are being skipped. Add --slow|-S to execute slow tests.") +slow_test = py.test.mark.skipif('not config.getvalue("execute-quick-tests")', + reason="Slow tests are being skipped because of -Q|--quick option.") -very_slow_test = py.test.mark.skipif('not config.getvalue("execute-all-tests")', - reason="Very slow tests are being skipped. Add --all|-A to execute all tests.") +very_slow_test = py.test.mark.skipif('not config.getvalue("execute-slow-tests")', + reason="Very slow tests are being skipped. Add --slow|-S to execute all tests.") # Most tests don't need a bootstrapped objspace. Those that do, indicate so explicitely. # This way, as many tests as possible use the real, not-bootstrapped ObjSpace. From noreply at buildbot.pypy.org Thu Aug 21 12:55:03 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 21 Aug 2014 12:55:03 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk rstrategies: Merged Message-ID: <20140821105503.5774B1D22E9@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: rstrategies Changeset: r1041:63ab8de4343a Date: 2014-08-20 17:48 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/63ab8de4343a/ Log: Merged diff --git a/spyvm/test/conftest.py b/spyvm/test/conftest.py --- a/spyvm/test/conftest.py +++ b/spyvm/test/conftest.py @@ -3,28 +3,28 @@ def pytest_addoption(parser): group = parser.getgroup("RSqueak test options") group.addoption( + "--quick", "-Q", + dest="execute-quick-tests", + action="store_false", + default=True, + help="Only execute quick tests (no image loading or long execution)" + ) + group.addoption( "--slow", "-S", dest="execute-slow-tests", action="store_true", default=False, - help="Additionally execute slow tests (loading full Squeak image or long execution)" - ) - group.addoption( - "--all", "-A", - dest="execute-all-tests", - action="store_true", - default=False, - help="Execute all tests" + help="Execute all tests (including very slow tests)" ) group.addoption( "--jit", dest="rsqueak-binary", action="store", default=None, - help="Path to a compiled rsqueak binary" + help="Path to a compiled rsqueak binary. Enables jit tests." ) -# The 'spy' parameter is used in tests under jittest/ +# The 'jit' parameter is used in tests under jittest/ def pytest_funcarg__spy(request): val = request.config.getvalue("rsqueak-binary") if not val: diff --git a/spyvm/test/test_bootstrappedimage.py b/spyvm/test/test_bootstrappedimage.py --- a/spyvm/test/test_bootstrappedimage.py +++ b/spyvm/test/test_bootstrappedimage.py @@ -1,5 +1,5 @@ import py -from .util import read_image, copy_to_module, cleanup_module, slow_test, very_slow_test +from .util import read_image, copy_to_module, cleanup_module, very_slow_test def setup_module(): space, interp, image, reader = read_image("bootstrapped.image") @@ -16,15 +16,6 @@ @very_slow_test def test_retrieve_symbol(): - """asSymbol - "This is the only place that new Symbols are created. A Symbol is created - if and only if there is not already a Symbol with its contents in existance." - Symbol - allInstancesDo: [ :sym | - self = sym - ifTrue: [ ^ sym ] ]. - ^ (Symbol basicNew: self size) initFrom: self""" - space.initialize_class(space.w_String, interp) w_result = perform(w("someString"), "asSymbol") assert w_result.as_string() == "someString" diff --git a/spyvm/test/test_largeinteger.py b/spyvm/test/test_largeinteger.py --- a/spyvm/test/test_largeinteger.py +++ b/spyvm/test/test_largeinteger.py @@ -1,7 +1,7 @@ import operator from spyvm import model, constants, primitives from spyvm.test.test_primitives import MockFrame -from .util import read_image, copy_to_module, cleanup_module, slow_test +from .util import read_image, copy_to_module, cleanup_module from rpython.rlib.rarithmetic import intmask, r_uint def setup_module(): diff --git a/spyvm/test/test_miniimage.py b/spyvm/test/test_miniimage.py --- a/spyvm/test/test_miniimage.py +++ b/spyvm/test/test_miniimage.py @@ -1,6 +1,8 @@ import py, math from spyvm import model, constants, storage_contexts, wrapper, primitives, interpreter, error -from .util import read_image, open_reader, copy_to_module, cleanup_module, TestInterpreter, very_slow_test +from .util import read_image, open_reader, copy_to_module, cleanup_module, TestInterpreter, slow_test, very_slow_test + +pytestmark = slow_test def setup_module(): space, interp, image, reader = read_image("mini.image") diff --git a/spyvm/test/test_miniimage_compiling.py b/spyvm/test/test_miniimage_compiling.py --- a/spyvm/test/test_miniimage_compiling.py +++ b/spyvm/test/test_miniimage_compiling.py @@ -1,6 +1,8 @@ import py, math from spyvm import model, constants, storage_contexts, wrapper, primitives, interpreter, error -from .util import read_image, open_reader, copy_to_module, cleanup_module, TestInterpreter, slow_test +from .util import read_image, open_reader, copy_to_module, cleanup_module, TestInterpreter, slow_test, very_slow_test + +pytestmark = slow_test def setup_module(): space, interp, _, _ = read_image("mini.image") @@ -19,7 +21,7 @@ def test_load_image(): pass - at slow_test + at very_slow_test def test_compile_method(): sourcecode = """fib ^self < 2 @@ -28,7 +30,7 @@ perform(w(10).getclass(space), "compile:classified:notifying:", w(sourcecode), w('pypy'), w(None)) assert perform(w(10), "fib").is_same_object(w(89)) - at slow_test + at very_slow_test def test_become(): sourcecode = """ testBecome @@ -55,7 +57,6 @@ w_result = perform(w(10), "testBecome") assert space.unwrap_int(w_result) == 42 - at slow_test def test_cached_methoddict(): sourcecode = """fib ^self < 2 diff --git a/spyvm/test/test_primitives.py b/spyvm/test/test_primitives.py --- a/spyvm/test/test_primitives.py +++ b/spyvm/test/test_primitives.py @@ -5,7 +5,7 @@ from rpython.rlib.rfloat import isinf, isnan from rpython.rlib.rarithmetic import intmask from rpython.rtyper.lltypesystem import lltype, rffi -from .util import create_space, copy_to_module, cleanup_module, TestInterpreter, slow_test, very_slow_test +from .util import create_space, copy_to_module, cleanup_module, TestInterpreter, very_slow_test def setup_module(): space = create_space(bootstrap = True) diff --git a/spyvm/test/test_strategies.py b/spyvm/test/test_strategies.py --- a/spyvm/test/test_strategies.py +++ b/spyvm/test/test_strategies.py @@ -1,9 +1,9 @@ from spyvm import model, storage -from .util import read_image, copy_to_module, cleanup_module +from .util import create_space_interp, copy_to_module, cleanup_module def setup_module(): - space, interp, _, _ = read_image('bootstrapped.image') + space, interp = create_space_interp() class_Array = space.classtable["w_Array"] w_nil = space.w_nil copy_to_module(locals(), __name__) diff --git a/spyvm/test/util.py b/spyvm/test/util.py --- a/spyvm/test/util.py +++ b/spyvm/test/util.py @@ -6,11 +6,11 @@ # The according options is configured in conftest.py. # To mark all tests in a module as slow, add this line to the module: # pytestmark = slow_test -slow_test = py.test.mark.skipif('not config.getvalue("execute-slow-tests") or config.getvalue("execute-all-tests")', - reason="Slow tests are being skipped. Add --slow|-S to execute slow tests.") +slow_test = py.test.mark.skipif('not config.getvalue("execute-quick-tests")', + reason="Slow tests are being skipped because of -Q|--quick option.") -very_slow_test = py.test.mark.skipif('not config.getvalue("execute-all-tests")', - reason="Very slow tests are being skipped. Add --all|-A to execute all tests.") +very_slow_test = py.test.mark.skipif('not config.getvalue("execute-slow-tests")', + reason="Very slow tests are being skipped. Add --slow|-S to execute all tests.") # Most tests don't need a bootstrapped objspace. Those that do, indicate so explicitely. # This way, as many tests as possible use the real, not-bootstrapped ObjSpace. From noreply at buildbot.pypy.org Thu Aug 21 12:55:04 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 21 Aug 2014 12:55:04 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk rstrategies: Added _attrs_ declarations Message-ID: <20140821105504.6F1F71D22E9@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: rstrategies Changeset: r1042:7638dd4f69b2 Date: 2014-08-20 18:06 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/7638dd4f69b2/ Log: Added _attrs_ declarations diff --git a/rstrategies.py b/rstrategies.py --- a/rstrategies.py +++ b/rstrategies.py @@ -151,6 +151,7 @@ class SingleValueStrategy(AbstractStrategy): _immutable_fields_ = ["_size", "val"] + _attrs_ = ["_size", "val"] # == Required: # See AbstractStrategy # check_index_*(...) - use mixin SafeIndexingMixin or UnsafeIndexingMixin @@ -176,15 +177,13 @@ class StrategyWithStorage(AbstractStrategy): _immutable_fields_ = ["storage"] + _attrs_ = ["storage"] # == Required: # See AbstractStrategy # check_index_*(...) - use mixin SafeIndexingMixin, UnsafeIndexingMixin or VariableSizeMixin # default_value(self) - The value to be initially contained in this strategy def init_strategy(self, initial_size): - self.init_StrategyWithStorage(initial_size) - - def init_StrategyWithStorage(self, initial_size): default = self._unwrap(self.default_value()) self.storage = [default] * initial_size @@ -318,22 +317,17 @@ # wrapped_tagged_value(self) - The tagged object # unwrapped_tagged_value(self) - The unwrapped tag value representing the tagged object - def init_strategy(self, initial_size): - self.tag = self.unwrapped_tagged_value() - self.w_tag = self.wrapped_tagged_value() - self.init_StrategyWithStorage(initial_size) - def check_can_handle(self, value): - return value is self.w_tag or \ + return value is self.wrapped_tagged_value() or \ (isinstance(value, self.contained_type) and \ - self.unwrap(value) != self.tag) + self.unwrap(value) != self.unwrapped_tagged_value()) def _unwrap(self, value): - if value is self.w_tag: - return self.tag + if value is self.wrapped_tagged_value(): + return self.unwrapped_tagged_value() return self.unwrap(value) def _wrap(self, value): - if value == self.tag: - return self.w_tag + if value == self.unwrapped_tagged_value(): + return self.wrapped_tagged_value() return self.wrap(value) diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -45,9 +45,6 @@ class ObjSpace(object): def __init__(self): - # If this flag is set, then no optimizing storage strategies will be used. - # Intended for performance comparisons. Breaks tests. - self.no_specialized_storage = ConstantFlag() # This is a hack; see compile_code() in targetimageloadingsmalltalk.py self.suppress_process_switch = ConstantFlag() self.run_spy_hacks = ConstantFlag() From noreply at buildbot.pypy.org Thu Aug 21 12:55:05 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 21 Aug 2014 12:55:05 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk rstrategies: Added _attrs_ Message-ID: <20140821105505.880FD1D22E9@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: rstrategies Changeset: r1043:663e58f4fdc7 Date: 2014-08-20 19:41 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/663e58f4fdc7/ Log: Added _attrs_ diff --git a/spyvm/storage.py b/spyvm/storage.py --- a/spyvm/storage.py +++ b/spyvm/storage.py @@ -34,6 +34,7 @@ class AbstractStorageShadow(AbstractShadow): repr_classname = "AbstractStorageShadow" + _attrs_ = [] import_from_mixin(rstrat.SafeIndexingMixin) def __init__(self, space, w_self, size): From noreply at buildbot.pypy.org Thu Aug 21 12:55:06 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 21 Aug 2014 12:55:06 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk rstrategies: Marked test as very slow. Message-ID: <20140821105506.978EC1D22E9@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: rstrategies Changeset: r1044:c654217f1617 Date: 2014-08-20 22:52 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/c654217f1617/ Log: Marked test as very slow. diff --git a/spyvm/test/test_zin_squeak_4_5_image.py b/spyvm/test/test_zin_squeak_4_5_image.py --- a/spyvm/test/test_zin_squeak_4_5_image.py +++ b/spyvm/test/test_zin_squeak_4_5_image.py @@ -1,6 +1,6 @@ import operator from spyvm import model -from .util import read_image, copy_to_module, cleanup_module, slow_test +from .util import read_image, copy_to_module, cleanup_module, slow_test, very_slow_test # The tests are quick, but loading the big image takes time. pytestmark = slow_test @@ -17,7 +17,8 @@ def test_all_pointers_are_valid(): from test_miniimage import _test_all_pointers_are_valid _test_all_pointers_are_valid(reader) - + + at very_slow_test def test_lookup_abs_in_integer(): from test_miniimage import _test_lookup_abs_in_integer _test_lookup_abs_in_integer(interp) From noreply at buildbot.pypy.org Thu Aug 21 12:55:07 2014 From: noreply at buildbot.pypy.org (anton_gulenko) Date: Thu, 21 Aug 2014 12:55:07 +0200 (CEST) Subject: [pypy-commit] lang-smalltalk rstrategies: Fixed RPython compilation. Message-ID: <20140821105507.9A3751D22E9@cobra.cs.uni-duesseldorf.de> Author: Anton Gulenko Branch: rstrategies Changeset: r1045:e281e2683374 Date: 2014-08-20 22:53 +0200 http://bitbucket.org/pypy/lang-smalltalk/changeset/e281e2683374/ Log: Fixed RPython compilation. Added StrategyMetaclass for correctness. diff --git a/rstrategies.py b/rstrategies.py --- a/rstrategies.py +++ b/rstrategies.py @@ -2,6 +2,11 @@ import weakref from rpython.rlib import jit +class StrategyMetaclass(type): + def __new__(self, name, bases, attrs): + attrs['_is_strategy'] = False + return super(StrategyMetaclass, self).__new__(self, name, bases, attrs) + def collect_subclasses(cls): "NOT_RPYTHON" subclasses = [] diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -680,6 +680,7 @@ shadow = old_shadow if not isinstance(old_shadow, TheClass): shadow = space.strategy_factory.switch_strategy(old_shadow, TheClass) + assert isinstance(shadow, TheClass) return shadow def get_shadow(self, space): diff --git a/spyvm/storage.py b/spyvm/storage.py --- a/spyvm/storage.py +++ b/spyvm/storage.py @@ -14,6 +14,7 @@ _immutable_fields_ = ['space'] provides_getname = False repr_classname = "AbstractShadow" + __metaclass__ = rstrat.StrategyMetaclass import_from_mixin(rstrat.AbstractCollection) def __init__(self, space, w_self, size): @@ -75,11 +76,12 @@ repr_classname = "FloatOrNilStorageShadow" import_from_mixin(rstrat.TaggingStrategy) contained_type = model.W_Float + tag_float = sys.float_info.max def wrap(self, val): return self.space.wrap_float(val) def unwrap(self, w_val): return self.space.unwrap_float(w_val) def default_value(self): return self.space.w_nil def wrapped_tagged_value(self): return self.space.w_nil - def unwrapped_tagged_value(self): import sys; return sys.float_info.max + def unwrapped_tagged_value(self): return self.tag_float @rstrat.strategy(generalize=[ SmallIntegerOrNilStorageShadow, diff --git a/spyvm/storage_classes.py b/spyvm/storage_classes.py --- a/spyvm/storage_classes.py +++ b/spyvm/storage_classes.py @@ -127,6 +127,7 @@ self.store_s_methoddict(s_new_methoddict) def store_s_methoddict(self, s_methoddict): + assert isinstance(s_methoddict, MethodDictionaryShadow) s_methoddict.s_class = self s_methoddict.sync_method_cache() self._s_methoddict = s_methoddict @@ -193,20 +194,6 @@ # included so that we can reproduce code from the reference impl # more easily - def ispointers(self): - " True if instances of this class have data stored as pointers " - XXX # what about weak pointers? - return self.format == POINTERS - - def iswords(self): - " True if instances of this class have data stored as numerical words " - XXX # what about weak pointers? - return self.format in (POINTERS, WORDS) - - def isbytes(self): - " True if instances of this class have data stored as numerical bytes " - return self.format == BYTES - @constant_for_version def isvariable(self): " True if instances of this class have indexed inst variables " diff --git a/spyvm/storage_contexts.py b/spyvm/storage_contexts.py --- a/spyvm/storage_contexts.py +++ b/spyvm/storage_contexts.py @@ -3,6 +3,7 @@ from spyvm.storage import AbstractRedirectingShadow from rpython.tool.pairtype import extendabletype from rpython.rlib import rarithmetic, jit, objectmodel +import rstrategies as rstrat @objectmodel.specialize.call_location() def fresh_virtualizable(x): @@ -19,9 +20,12 @@ ActiveContext = ContextState("ActiveContext") DirtyContext = ContextState("DirtyContext") +class ExtendableStrategyMetaclass(extendabletype, rstrat.StrategyMetaclass): + pass + class ContextPartShadow(AbstractRedirectingShadow): - __metaclass__ = extendabletype + __metaclass__ = ExtendableStrategyMetaclass _attrs_ = ['_s_sender', '_pc', '_temps_and_stack', '_stack_ptr', 'instances_w', 'state'] From noreply at buildbot.pypy.org Thu Aug 21 14:59:07 2014 From: noreply at buildbot.pypy.org (ISF) Date: Thu, 21 Aug 2014 14:59:07 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Merge branch 'default' Message-ID: <20140821125907.6EB581C0EE9@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r72947:c7a7d1ed8ea7 Date: 2014-08-13 16:23 -0300 http://bitbucket.org/pypy/pypy/changeset/c7a7d1ed8ea7/ Log: Merge branch 'default' diff too long, truncating to 2000 out of 86962 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -6,3 +6,11 @@ 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm +20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 +20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 +0000000000000000000000000000000000000000 release-2.3.0 +394146e9bb673514c61f0150ab2013ccf78e8de7 release-2.3 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.3.1 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1 +0000000000000000000000000000000000000000 release-2.2=3.1 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -44,31 +44,33 @@ Alex Gaynor Michael Hudson David Schneider + Matti Picus + Brian Kearns + Philip Jenvey Holger Krekel Christian Tismer Hakan Ardo Benjamin Peterson - Matti Picus - Philip Jenvey + Manuel Jacob Anders Chrigstrom - Brian Kearns Eric van Riet Paap + Wim Lavrijsen + Ronan Lamy Richard Emslie Alexander Schremmer - Wim Lavrijsen Dan Villiom Podlaski Christiansen - Manuel Jacob Lukas Diekmann Sven Hager Anders Lehmann Aurelien Campeas Niklaus Haldimann - Ronan Lamy Camillo Bruni Laura Creighton Toon Verwaest + Remi Meier Leonardo Santagada Seo Sanghyeon + Romain Guillebert Justin Peel Ronny Pfannschmidt David Edelsohn @@ -80,52 +82,62 @@ Daniel Roberts Niko Matsakis Adrien Di Mascio + Alexander Hesse Ludovic Aubry - Alexander Hesse Jacob Hallen - Romain Guillebert Jason Creighton Alex Martelli Michal Bendowski Jan de Mooij + stian Michael Foord Stephan Diehl Stefan Schwarzer Valentino Volonghi Tomek Meka Patrick Maupin - stian Bob Ippolito Bruno Gola Jean-Paul Calderone Timo Paulssen + Squeaky Alexandre Fayolle Simon Burton Marius Gedminas John Witulski + Konstantin Lopuhin Greg Price Dario Bertini Mark Pearse Simon Cross - Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy + Tobias Oberstein Adrian Kuhn Boris Feigin + Stefano Rivera tav + Taavi Burns Georg Brandl Bert Freudenberg Stian Andreassen - Stefano Rivera + Laurence Tratt Wanja Saatkamp + Ivan Sichmann Freitas Gerald Klix Mike Blume - Taavi Burns Oscar Nierstrasz + Stefan H. Muller + Jeremy Thurgood + Gregor Wegberg + Rami Chowdhury + Tobias Pape + Edd Barrett David Malcolm Eugene Oden Henry Mason @@ -135,18 +147,16 @@ Dusty Phillips Lukas Renggli Guenter Jantzen - Tobias Oberstein - Remi Meier Ned Batchelder Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin + Andrew Chambers Michael Schneider Nicholas Riley Jason Chu Igor Trindade Oliveira - Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey @@ -159,18 +169,19 @@ Karl Bartel Brian Dorsey Victor Stinner + Andrews Medina Stuart Williams Jasper Schulz + Christian Hudon Toby Watson Antoine Pitrou Aaron Iles Michael Cheng Justas Sadzevicius + Mikael Schönenberg Gasper Zejn Neil Shepperd - Mikael Schönenberg Elmo Mäntynen - Tobias Pape Jonathan David Riehl Stanislaw Halik Anders Qvist @@ -182,19 +193,18 @@ Alexander Sedov Corbin Simpson Christopher Pope - Laurence Tratt - Guillebert Romain + wenzhuman Christian Tismer + Marc Abramowitz Dan Stromberg Stefano Parmesan - Christian Hudon Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang Gabriel - Paweł Piotr Przeradowski + Lukas Vacek Andrew Dalke Sylvain Thenault Nathan Taylor @@ -203,8 +213,11 @@ Alejandro J. Cura Jacob Oscarson Travis Francis Athougies + Ryan Gonzalez Kristjan Valur Jonsson + Sebastian Pawluś Neil Blakey-Milner + anatoly techtonik Lutz Paelike Lucio Torre Lars Wassermann @@ -218,13 +231,14 @@ Martin Blais Lene Wagner Tomo Cocoa - Andrews Medina roberto at goyle + Yury V. Zaytsev + Anna Katrina Dominguez William Leslie Bobby Impollonia timo at eistee.fritz.box Andrew Thompson - Yusei Tahara + Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado Godefroid Chappelle @@ -234,28 +248,39 @@ Michael Hudson-Doyle Anders Sigfridsson Yasir Suhail + rafalgalczynski at gmail.com Floris Bruynooghe + Laurens Van Houtven Akira Li Gustavo Niemeyer Stephan Busemann - Anna Katrina Dominguez + Rafał Gałczyński + Yusei Tahara Christian Muirhead James Lan shoma hosaka - Daniel Neuhäuser + Daniel Neuh?user + Matthew Miller Buck Golemon Konrad Delong Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + Rodrigo Araújo + w31rd0 Jim Baker - Rodrigo Araújo + James Robert Armin Ronacher Brett Cannon yrttyr + aliceinwire + OlivierBlanvillain Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + Asmo Soinio + Stefan Marr + jiaaro opassembler.py Antony Lee Jim Hunziker @@ -263,12 +288,13 @@ Even Wiik Thomassen jbs soareschen + Kurt Griffiths + Mike Bayer Flavio Percoco Kristoffer Kleine yasirs Michael Chermside Anna Ravencroft - Andrew Chambers Julien Phalip Dan Loewenherz diff --git a/_pytest/__init__.py b/_pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.4.dev2' +__version__ = '2.5.2' diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py new file mode 100644 --- /dev/null +++ b/_pytest/_argcomplete.py @@ -0,0 +1,104 @@ + +"""allow bash-completion for argparse with argcomplete if installed +needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code. + +argcomplete does not support python 2.5 (although the changes for that +are minor). + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*' + ).completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh ) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK + +INSTALL/DEBUGGING +================= +To include this support in another application that has setup.py generated +scripts: +- add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point +- include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + , call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument() +If things do not work right away: +- switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 +- run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not +- sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" + +import sys +import os +from glob import glob + +class FastFilesCompleter: + 'Fast file completer class' + def __init__(self, directories=True): + self.directories = directories + + def __call__(self, prefix, **kwargs): + """only called on non option completions""" + if os.path.sep in prefix[1:]: # + prefix_dir = len(os.path.dirname(prefix) + os.path.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if '*' not in prefix and '?' not in prefix: + if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash + globbed.extend(glob(prefix + '.*')) + prefix += '*' + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += '/' + # append stripping the prefix (like bash, not like compgen) + completion.append(x[prefix_dir:]) + return completion + +if os.environ.get('_ARGCOMPLETE'): + # argcomplete 0.5.6 is not compatible with python 2.5.6: print/with/format + if sys.version_info[:2] < (2, 6): + sys.exit(1) + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter = FastFilesCompleter() + + def try_argcomplete(parser): + argcomplete.autocomplete(parser) +else: + def try_argcomplete(parser): pass + filescompleter = None diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py --- a/_pytest/assertion/__init__.py +++ b/_pytest/assertion/__init__.py @@ -3,7 +3,6 @@ """ import py import sys -import pytest from _pytest.monkeypatch import monkeypatch from _pytest.assertion import util @@ -19,8 +18,8 @@ to provide assert expression information. """) group.addoption('--no-assert', action="store_true", default=False, dest="noassert", help="DEPRECATED equivalent to --assert=plain") - group.addoption('--nomagic', action="store_true", default=False, - dest="nomagic", help="DEPRECATED equivalent to --assert=plain") + group.addoption('--nomagic', '--no-magic', action="store_true", + default=False, help="DEPRECATED equivalent to --assert=plain") class AssertionState: """State for the assertion plugin.""" @@ -35,22 +34,25 @@ mode = "plain" if mode == "rewrite": try: - import ast + import ast # noqa except ImportError: mode = "reinterp" else: - if sys.platform.startswith('java'): + # Both Jython and CPython 2.6.0 have AST bugs that make the + # assertion rewriting hook malfunction. + if (sys.platform.startswith('java') or + sys.version_info[:3] == (2, 6, 0)): mode = "reinterp" if mode != "plain": _load_modules(mode) m = monkeypatch() config._cleanup.append(m.undo) m.setattr(py.builtin.builtins, 'AssertionError', - reinterpret.AssertionError) + reinterpret.AssertionError) # noqa hook = None if mode == "rewrite": - hook = rewrite.AssertionRewritingHook() - sys.meta_path.append(hook) + hook = rewrite.AssertionRewritingHook() # noqa + sys.meta_path.insert(0, hook) warn_about_missing_assertion(mode) config._assertstate = AssertionState(config, mode) config._assertstate.hook = hook @@ -73,9 +75,16 @@ def callbinrepr(op, left, right): hook_result = item.ihook.pytest_assertrepr_compare( config=item.config, op=op, left=left, right=right) + for new_expl in hook_result: if new_expl: - res = '\n~'.join(new_expl) + # Don't include pageloads of data unless we are very + # verbose (-vv) + if (sum(len(p) for p in new_expl[1:]) > 80*8 + and item.config.option.verbose < 2): + new_expl[1:] = [py.builtin._totext( + 'Detailed information truncated, use "-vv" to show')] + res = py.builtin._totext('\n~').join(new_expl) if item.config.getvalue("assertmode") == "rewrite": # The result will be fed back a python % formatting # operation, which will fail if there are extraneous @@ -95,9 +104,9 @@ def _load_modules(mode): """Lazily import assertion related code.""" global rewrite, reinterpret - from _pytest.assertion import reinterpret + from _pytest.assertion import reinterpret # noqa if mode == "rewrite": - from _pytest.assertion import rewrite + from _pytest.assertion import rewrite # noqa def warn_about_missing_assertion(mode): try: diff --git a/_pytest/assertion/newinterpret.py b/_pytest/assertion/newinterpret.py --- a/_pytest/assertion/newinterpret.py +++ b/_pytest/assertion/newinterpret.py @@ -11,7 +11,7 @@ from _pytest.assertion.reinterpret import BuiltinAssertionError -if sys.platform.startswith("java") and sys.version_info < (2, 5, 2): +if sys.platform.startswith("java"): # See http://bugs.jython.org/issue1497 _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict", "ListComp", "GeneratorExp", "Yield", "Compare", "Call", diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py --- a/_pytest/assertion/oldinterpret.py +++ b/_pytest/assertion/oldinterpret.py @@ -526,10 +526,13 @@ # example: def f(): return 5 + def g(): return 3 + def h(x): return 'never' + check("f() * g() == 5") check("not f()") check("not (f() and g() or 0)") diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py --- a/_pytest/assertion/reinterpret.py +++ b/_pytest/assertion/reinterpret.py @@ -1,18 +1,26 @@ import sys import py from _pytest.assertion.util import BuiltinAssertionError +u = py.builtin._totext + class AssertionError(BuiltinAssertionError): def __init__(self, *args): BuiltinAssertionError.__init__(self, *args) if args: + # on Python2.6 we get len(args)==2 for: assert 0, (x,y) + # on Python2.7 and above we always get len(args) == 1 + # with args[0] being the (x,y) tuple. + if len(args) > 1: + toprint = args + else: + toprint = args[0] try: - self.msg = str(args[0]) - except py.builtin._sysex: - raise - except: - self.msg = "<[broken __repr__] %s at %0xd>" %( - args[0].__class__, id(args[0])) + self.msg = u(toprint) + except Exception: + self.msg = u( + "<[broken __repr__] %s at %0xd>" + % (toprint.__class__, id(toprint))) else: f = py.code.Frame(sys._getframe(1)) try: @@ -44,4 +52,3 @@ from _pytest.assertion.newinterpret import interpret as reinterpret else: reinterpret = reinterpret_old - diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py --- a/_pytest/assertion/rewrite.py +++ b/_pytest/assertion/rewrite.py @@ -6,6 +6,7 @@ import imp import marshal import os +import re import struct import sys import types @@ -14,13 +15,7 @@ from _pytest.assertion import util -# Windows gives ENOENT in places *nix gives ENOTDIR. -if sys.platform.startswith("win"): - PATH_COMPONENT_NOT_DIR = errno.ENOENT -else: - PATH_COMPONENT_NOT_DIR = errno.ENOTDIR - -# py.test caches rewritten pycs in __pycache__. +# pytest caches rewritten pycs in __pycache__. if hasattr(imp, "get_tag"): PYTEST_TAG = imp.get_tag() + "-PYTEST" else: @@ -34,17 +29,19 @@ PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1]) del ver, impl -PYC_EXT = ".py" + "c" if __debug__ else "o" +PYC_EXT = ".py" + (__debug__ and "c" or "o") PYC_TAIL = "." + PYTEST_TAG + PYC_EXT REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2) +ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 class AssertionRewritingHook(object): - """Import hook which rewrites asserts.""" + """PEP302 Import hook which rewrites asserts.""" def __init__(self): self.session = None self.modules = {} + self._register_with_pkg_resources() def set_session(self, session): self.fnpats = session.config.getini("python_files") @@ -59,8 +56,12 @@ names = name.rsplit(".", 1) lastname = names[-1] pth = None - if path is not None and len(path) == 1: - pth = path[0] + if path is not None: + # Starting with Python 3.3, path is a _NamespacePath(), which + # causes problems if not converted to list. + path = list(path) + if len(path) == 1: + pth = path[0] if pth is None: try: fd, fn, desc = imp.find_module(lastname, path) @@ -95,12 +96,13 @@ finally: self.session = sess else: - state.trace("matched test file (was specified on cmdline): %r" % (fn,)) + state.trace("matched test file (was specified on cmdline): %r" % + (fn,)) # The requested module looks like a test file, so rewrite it. This is # the most magical part of the process: load the source, rewrite the # asserts, and load the rewritten source. We also cache the rewritten # module code in a special pyc. We must be aware of the possibility of - # concurrent py.test processes rewriting and loading pycs. To avoid + # concurrent pytest processes rewriting and loading pycs. To avoid # tricky race conditions, we maintain the following invariant: The # cached pyc is always a complete, valid pyc. Operations on it must be # atomic. POSIX's atomic rename comes in handy. @@ -116,19 +118,19 @@ # common case) or it's blocked by a non-dir node. In the # latter case, we'll ignore it in _write_pyc. pass - elif e == PATH_COMPONENT_NOT_DIR: + elif e in [errno.ENOENT, errno.ENOTDIR]: # One of the path components was not a directory, likely # because we're in a zip file. write = False elif e == errno.EACCES: - state.trace("read only directory: %r" % (fn_pypath.dirname,)) + state.trace("read only directory: %r" % fn_pypath.dirname) write = False else: raise cache_name = fn_pypath.basename[:-3] + PYC_TAIL pyc = os.path.join(cache_dir, cache_name) - # Notice that even if we're in a read-only directory, I'm going to check - # for a cached pyc. This may not be optimal... + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... co = _read_pyc(fn_pypath, pyc) if co is None: state.trace("rewriting %r" % (fn,)) @@ -153,27 +155,59 @@ mod.__file__ = co.co_filename # Normally, this attribute is 3.2+. mod.__cached__ = pyc + mod.__loader__ = self py.builtin.exec_(co, mod.__dict__) except: del sys.modules[name] raise return sys.modules[name] -def _write_pyc(co, source_path, pyc): - # Technically, we don't have to have the same pyc format as (C)Python, since - # these "pycs" should never be seen by builtin import. However, there's - # little reason deviate, and I hope sometime to be able to use - # imp.load_compiled to load them. (See the comment in load_module above.) + + + def is_package(self, name): + try: + fd, fn, desc = imp.find_module(name) + except ImportError: + return False + if fd is not None: + fd.close() + tp = desc[2] + return tp == imp.PKG_DIRECTORY + + @classmethod + def _register_with_pkg_resources(cls): + """ + Ensure package resources can be loaded from this loader. May be called + multiple times, as the operation is idempotent. + """ + try: + import pkg_resources + # access an attribute in case a deferred importer is present + pkg_resources.__name__ + except ImportError: + return + + # Since pytest tests are always located in the file system, the + # DefaultProvider is appropriate. + pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider) + + +def _write_pyc(state, co, source_path, pyc): + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason deviate, and I hope + # sometime to be able to use imp.load_compiled to load them. (See + # the comment in load_module above.) mtime = int(source_path.mtime()) try: fp = open(pyc, "wb") except IOError: err = sys.exc_info()[1].errno - if err == PATH_COMPONENT_NOT_DIR: - # This happens when we get a EEXIST in find_module creating the - # __pycache__ directory and __pycache__ is by some non-dir node. - return False - raise + state.trace("error writing pyc file at %s: errno=%s" %(pyc, err)) + # we ignore any failure to write the cache file + # there are many reasons, permission-denied, __pycache__ being a + # file etc. + return False try: fp.write(imp.get_magic()) fp.write(struct.pack(">", - ast.Add : "+", - ast.Sub : "-", - ast.Mult : "*", - ast.Div : "/", - ast.FloorDiv : "//", - ast.Mod : "%", - ast.Eq : "==", - ast.NotEq : "!=", - ast.Lt : "<", - ast.LtE : "<=", - ast.Gt : ">", - ast.GtE : ">=", - ast.Pow : "**", - ast.Is : "is", - ast.IsNot : "is not", - ast.In : "in", - ast.NotIn : "not in" + ast.BitOr: "|", + ast.BitXor: "^", + ast.BitAnd: "&", + ast.LShift: "<<", + ast.RShift: ">>", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in" } @@ -341,7 +408,7 @@ lineno = 0 for item in mod.body: if (expect_docstring and isinstance(item, ast.Expr) and - isinstance(item.value, ast.Str)): + isinstance(item.value, ast.Str)): doc = item.value.s if "PYTEST_DONT_REWRITE" in doc: # The module has disabled assertion rewriting. @@ -462,7 +529,8 @@ body.append(raise_) # Clear temporary variables by setting them to None. if self.variables: - variables = [ast.Name(name, ast.Store()) for name in self.variables] + variables = [ast.Name(name, ast.Store()) + for name in self.variables] clear = ast.Assign(variables, ast.Name("None", ast.Load())) self.statements.append(clear) # Fix line numbers. @@ -471,11 +539,12 @@ return self.statements def visit_Name(self, name): - # Check if the name is local or not. + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. locs = ast.Call(self.builtin("locals"), [], [], None, None) - globs = ast.Call(self.builtin("globals"), [], [], None, None) - ops = [ast.In(), ast.IsNot()] - test = ast.Compare(ast.Str(name.id), ops, [locs, globs]) + inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) + dorepr = self.helper("should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) return name, self.explanation_param(expr) @@ -492,7 +561,8 @@ for i, v in enumerate(boolop.values): if i: fail_inner = [] - self.on_failure.append(ast.If(cond, fail_inner, [])) + # cond is set in a prior loop iteration below + self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa self.on_failure = fail_inner self.push_format_context() res, expl = self.visit(v) @@ -548,7 +618,8 @@ new_kwarg, expl = self.visit(call.kwargs) arg_expls.append("**" + expl) expl = "%s(%s)" % (func_expl, ', '.join(arg_expls)) - new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg) + new_call = ast.Call(new_func, new_args, new_kwargs, + new_star, new_kwarg) res = self.assign(new_call) res_expl = self.explanation_param(self.display(res)) outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) @@ -584,7 +655,7 @@ res_expr = ast.Compare(left_res, [op], [next_res]) self.statements.append(ast.Assign([store_names[i]], res_expr)) left_res, left_expl = next_res, next_expl - # Use py.code._reprcompare if that's available. + # Use pytest.assertion.util._reprcompare if that's available. expl_call = self.helper("call_reprcompare", ast.Tuple(syms, ast.Load()), ast.Tuple(load_names, ast.Load()), diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py --- a/_pytest/assertion/util.py +++ b/_pytest/assertion/util.py @@ -1,8 +1,13 @@ """Utilities for assertion debugging""" import py +try: + from collections import Sequence +except ImportError: + Sequence = list BuiltinAssertionError = py.builtin.builtins.AssertionError +u = py.builtin._totext # The _reprcompare attribute on the util module is used by the new assertion # interpretation code and assertion rewriter to detect this plugin was @@ -10,6 +15,7 @@ # DebugInterpreter. _reprcompare = None + def format_explanation(explanation): """This formats an explanation @@ -20,7 +26,18 @@ for when one explanation needs to span multiple lines, e.g. when displaying diffs. """ - # simplify 'assert False where False = ...' + explanation = _collapse_false(explanation) + lines = _split_explanation(explanation) + result = _format_lines(lines) + return u('\n').join(result) + + +def _collapse_false(explanation): + """Collapse expansions of False + + So this strips out any "assert False\n{where False = ...\n}" + blocks. + """ where = 0 while True: start = where = explanation.find("False\n{False = ", where) @@ -42,28 +59,48 @@ explanation = (explanation[:start] + explanation[start+15:end-1] + explanation[end+1:]) where -= 17 - raw_lines = (explanation or '').split('\n') - # escape newlines not followed by {, } and ~ + return explanation + + +def _split_explanation(explanation): + """Return a list of individual lines in the explanation + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or u('')).split('\n') lines = [raw_lines[0]] for l in raw_lines[1:]: if l.startswith('{') or l.startswith('}') or l.startswith('~'): lines.append(l) else: lines[-1] += '\\n' + l + return lines + +def _format_lines(lines): + """Format the individual lines + + This will replace the '{', '}' and '~' characters of our mini + formatting language with the proper 'where ...', 'and ...' and ' + + ...' text, taking care of indentation along the way. + + Return a list of formatted lines. + """ result = lines[:1] stack = [0] stackcnt = [0] for line in lines[1:]: if line.startswith('{'): if stackcnt[-1]: - s = 'and ' + s = u('and ') else: - s = 'where ' + s = u('where ') stack.append(len(result)) stackcnt[-1] += 1 stackcnt.append(0) - result.append(' +' + ' '*(len(stack)-1) + s + line[1:]) + result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:]) elif line.startswith('}'): assert line.startswith('}') stack.pop() @@ -71,9 +108,9 @@ result[stack[-1]] += line[1:] else: assert line.startswith('~') - result.append(' '*len(stack) + line[1:]) + result.append(u(' ')*len(stack) + line[1:]) assert len(stack) == 1 - return '\n'.join(result) + return result # Provide basestring in python3 @@ -83,132 +120,163 @@ basestring = str -def assertrepr_compare(op, left, right): - """return specialised explanations for some operators/operands""" - width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op +def assertrepr_compare(config, op, left, right): + """Return specialised explanations for some operators/operands""" + width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op left_repr = py.io.saferepr(left, maxsize=int(width/2)) right_repr = py.io.saferepr(right, maxsize=width-len(left_repr)) - summary = '%s %s %s' % (left_repr, op, right_repr) + summary = u('%s %s %s') % (left_repr, op, right_repr) - issequence = lambda x: isinstance(x, (list, tuple)) + issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) + and not isinstance(x, basestring)) istext = lambda x: isinstance(x, basestring) isdict = lambda x: isinstance(x, dict) - isset = lambda x: isinstance(x, set) + isset = lambda x: isinstance(x, (set, frozenset)) + verbose = config.getoption('verbose') explanation = None try: if op == '==': if istext(left) and istext(right): - explanation = _diff_text(left, right) + explanation = _diff_text(left, right, verbose) elif issequence(left) and issequence(right): - explanation = _compare_eq_sequence(left, right) + explanation = _compare_eq_sequence(left, right, verbose) elif isset(left) and isset(right): - explanation = _compare_eq_set(left, right) + explanation = _compare_eq_set(left, right, verbose) elif isdict(left) and isdict(right): - explanation = _diff_text(py.std.pprint.pformat(left), - py.std.pprint.pformat(right)) + explanation = _compare_eq_dict(left, right, verbose) elif op == 'not in': if istext(left) and istext(right): - explanation = _notin_text(left, right) - except py.builtin._sysex: - raise - except: + explanation = _notin_text(left, right, verbose) + except Exception: excinfo = py.code.ExceptionInfo() - explanation = ['(pytest_assertion plugin: representation of ' - 'details failed. Probably an object has a faulty __repr__.)', - str(excinfo) - ] - + explanation = [ + u('(pytest_assertion plugin: representation of details failed. ' + 'Probably an object has a faulty __repr__.)'), + u(excinfo)] if not explanation: return None - # Don't include pageloads of data, should be configurable - if len(''.join(explanation)) > 80*8: - explanation = ['Detailed information too verbose, truncated'] - return [summary] + explanation -def _diff_text(left, right): - """Return the explanation for the diff between text +def _diff_text(left, right, verbose=False): + """Return the explanation for the diff between text or bytes - This will skip leading and trailing characters which are - identical to keep the diff minimal. + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + + If the input are bytes they will be safely converted to text. """ explanation = [] - i = 0 # just in case left or right has zero length - for i in range(min(len(left), len(right))): - if left[i] != right[i]: - break - if i > 42: - i -= 10 # Provide some context - explanation = ['Skipping %s identical ' - 'leading characters in diff' % i] - left = left[i:] - right = right[i:] - if len(left) == len(right): - for i in range(len(left)): - if left[-i] != right[-i]: + if isinstance(left, py.builtin.bytes): + left = u(repr(left)[1:-1]).replace(r'\n', '\n') + if isinstance(right, py.builtin.bytes): + right = u(repr(right)[1:-1]).replace(r'\n', '\n') + if not verbose: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: break if i > 42: - i -= 10 # Provide some context - explanation += ['Skipping %s identical ' - 'trailing characters in diff' % i] - left = left[:-i] - right = right[:-i] + i -= 10 # Provide some context + explanation = [u('Skipping %s identical leading ' + 'characters in diff, use -v to show') % i] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [u('Skipping %s identical trailing ' + 'characters in diff, use -v to show') % i] + left = left[:-i] + right = right[:-i] explanation += [line.strip('\n') for line in py.std.difflib.ndiff(left.splitlines(), right.splitlines())] return explanation -def _compare_eq_sequence(left, right): +def _compare_eq_sequence(left, right, verbose=False): explanation = [] for i in range(min(len(left), len(right))): if left[i] != right[i]: - explanation += ['At index %s diff: %r != %r' % - (i, left[i], right[i])] + explanation += [u('At index %s diff: %r != %r') + % (i, left[i], right[i])] break if len(left) > len(right): - explanation += ['Left contains more items, ' - 'first extra item: %s' % py.io.saferepr(left[len(right)],)] + explanation += [u('Left contains more items, first extra item: %s') + % py.io.saferepr(left[len(right)],)] elif len(left) < len(right): - explanation += ['Right contains more items, ' - 'first extra item: %s' % py.io.saferepr(right[len(left)],)] - return explanation # + _diff_text(py.std.pprint.pformat(left), - # py.std.pprint.pformat(right)) + explanation += [ + u('Right contains more items, first extra item: %s') % + py.io.saferepr(right[len(left)],)] + return explanation # + _diff_text(py.std.pprint.pformat(left), + # py.std.pprint.pformat(right)) -def _compare_eq_set(left, right): +def _compare_eq_set(left, right, verbose=False): explanation = [] diff_left = left - right diff_right = right - left if diff_left: - explanation.append('Extra items in the left set:') + explanation.append(u('Extra items in the left set:')) for item in diff_left: explanation.append(py.io.saferepr(item)) if diff_right: - explanation.append('Extra items in the right set:') + explanation.append(u('Extra items in the right set:')) for item in diff_right: explanation.append(py.io.saferepr(item)) return explanation -def _notin_text(term, text): +def _compare_eq_dict(left, right, verbose=False): + explanation = [] + common = set(left).intersection(set(right)) + same = dict((k, left[k]) for k in common if left[k] == right[k]) + if same and not verbose: + explanation += [u('Omitting %s identical items, use -v to show') % + len(same)] + elif same: + explanation += [u('Common items:')] + explanation += py.std.pprint.pformat(same).splitlines() + diff = set(k for k in common if left[k] != right[k]) + if diff: + explanation += [u('Differing items:')] + for k in diff: + explanation += [py.io.saferepr({k: left[k]}) + ' != ' + + py.io.saferepr({k: right[k]})] + extra_left = set(left) - set(right) + if extra_left: + explanation.append(u('Left contains more items:')) + explanation.extend(py.std.pprint.pformat( + dict((k, left[k]) for k in extra_left)).splitlines()) + extra_right = set(right) - set(left) + if extra_right: + explanation.append(u('Right contains more items:')) + explanation.extend(py.std.pprint.pformat( + dict((k, right[k]) for k in extra_right)).splitlines()) + return explanation + + +def _notin_text(term, text, verbose=False): index = text.find(term) head = text[:index] tail = text[index+len(term):] correct_text = head + tail - diff = _diff_text(correct_text, text) - newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)] + diff = _diff_text(correct_text, text, verbose) + newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)] for line in diff: - if line.startswith('Skipping'): + if line.startswith(u('Skipping')): continue - if line.startswith('- '): + if line.startswith(u('- ')): continue - if line.startswith('+ '): - newdiff.append(' ' + line[2:]) + if line.startswith(u('+ ')): + newdiff.append(u(' ') + line[2:]) else: newdiff.append(line) return newdiff diff --git a/_pytest/capture.py b/_pytest/capture.py --- a/_pytest/capture.py +++ b/_pytest/capture.py @@ -1,43 +1,114 @@ -""" per-test stdout/stderr capturing mechanisms, ``capsys`` and ``capfd`` function arguments. """ +""" + per-test stdout/stderr capturing mechanisms, + ``capsys`` and ``capfd`` function arguments. +""" +# note: py.io capture was where copied from +# pylib 1.4.20.dev2 (rev 13d9af95547e) +import sys +import os +import tempfile -import pytest, py -import os +import py +import pytest + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO + +try: + from io import BytesIO +except ImportError: + class BytesIO(StringIO): + def write(self, data): + if isinstance(data, unicode): + raise TypeError("not a byte value: %r" % (data,)) + StringIO.write(self, data) + +if sys.version_info < (3, 0): + class TextIO(StringIO): + def write(self, data): + if not isinstance(data, unicode): + enc = getattr(self, '_encoding', 'UTF-8') + data = unicode(data, enc, 'replace') + StringIO.write(self, data) +else: + TextIO = StringIO + + +patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} + def pytest_addoption(parser): group = parser.getgroup("general") - group._addoption('--capture', action="store", default=None, - metavar="method", type="choice", choices=['fd', 'sys', 'no'], + group._addoption( + '--capture', action="store", default=None, + metavar="method", choices=['fd', 'sys', 'no'], help="per-test capturing method: one of fd (default)|sys|no.") - group._addoption('-s', action="store_const", const="no", dest="capture", + group._addoption( + '-s', action="store_const", const="no", dest="capture", help="shortcut for --capture=no.") + @pytest.mark.tryfirst -def pytest_cmdline_parse(pluginmanager, args): - # we want to perform capturing already for plugin/conftest loading - if '-s' in args or "--capture=no" in args: - method = "no" - elif hasattr(os, 'dup') and '--capture=sys' not in args: +def pytest_load_initial_conftests(early_config, parser, args, __multicall__): + ns = parser.parse_known_args(args) + method = ns.capture + if not method: method = "fd" - else: + if method == "fd" and not hasattr(os, "dup"): method = "sys" capman = CaptureManager(method) - pluginmanager.register(capman, "capturemanager") + early_config.pluginmanager.register(capman, "capturemanager") + + # make sure that capturemanager is properly reset at final shutdown + def teardown(): + try: + capman.reset_capturings() + except ValueError: + pass + + early_config.pluginmanager.add_shutdown(teardown) + + # make sure logging does not raise exceptions at the end + def silence_logging_at_shutdown(): + if "logging" in sys.modules: + sys.modules["logging"].raiseExceptions = False + early_config.pluginmanager.add_shutdown(silence_logging_at_shutdown) + + # finally trigger conftest loading but while capturing (issue93) + capman.resumecapture() + try: + try: + return __multicall__.execute() + finally: + out, err = capman.suspendcapture() + except: + sys.stdout.write(out) + sys.stderr.write(err) + raise + def addouterr(rep, outerr): for secname, content in zip(["out", "err"], outerr): if content: rep.sections.append(("Captured std%s" % secname, content)) + class NoCapture: def startall(self): pass + def resume(self): pass + def reset(self): pass + def suspend(self): return "", "" + class CaptureManager: def __init__(self, defaultmethod=None): self._method2capture = {} @@ -45,21 +116,23 @@ def _maketempfile(self): f = py.std.tempfile.TemporaryFile() - newf = py.io.dupfile(f, encoding="UTF-8") + newf = dupfile(f, encoding="UTF-8") f.close() return newf def _makestringio(self): - return py.io.TextIO() + return TextIO() def _getcapture(self, method): if method == "fd": - return py.io.StdCaptureFD(now=False, - out=self._maketempfile(), err=self._maketempfile() + return StdCaptureFD( + out=self._maketempfile(), + err=self._maketempfile(), ) elif method == "sys": - return py.io.StdCapture(now=False, - out=self._makestringio(), err=self._makestringio() + return StdCapture( + out=self._makestringio(), + err=self._makestringio(), ) elif method == "no": return NoCapture() @@ -74,23 +147,24 @@ method = config._conftest.rget("option_capture", path=fspath) except KeyError: method = "fd" - if method == "fd" and not hasattr(os, 'dup'): # e.g. jython + if method == "fd" and not hasattr(os, 'dup'): # e.g. jython method = "sys" return method def reset_capturings(self): - for name, cap in self._method2capture.items(): + for cap in self._method2capture.values(): cap.reset() def resumecapture_item(self, item): method = self._getmethod(item.config, item.fspath) if not hasattr(item, 'outerr'): - item.outerr = ('', '') # we accumulate outerr on the item + item.outerr = ('', '') # we accumulate outerr on the item return self.resumecapture(method) def resumecapture(self, method=None): if hasattr(self, '_capturing'): - raise ValueError("cannot resume, already capturing with %r" % + raise ValueError( + "cannot resume, already capturing with %r" % (self._capturing,)) if method is None: method = self._defaultmethod @@ -119,30 +193,29 @@ return "", "" def activate_funcargs(self, pyfuncitem): - if not hasattr(pyfuncitem, 'funcargs'): - return - assert not hasattr(self, '_capturing_funcargs') - self._capturing_funcargs = capturing_funcargs = [] - for name, capfuncarg in pyfuncitem.funcargs.items(): - if name in ('capsys', 'capfd'): - capturing_funcargs.append(capfuncarg) - capfuncarg._start() + funcargs = getattr(pyfuncitem, "funcargs", None) + if funcargs is not None: + for name, capfuncarg in funcargs.items(): + if name in ('capsys', 'capfd'): + assert not hasattr(self, '_capturing_funcarg') + self._capturing_funcarg = capfuncarg + capfuncarg._start() def deactivate_funcargs(self): - capturing_funcargs = getattr(self, '_capturing_funcargs', None) - if capturing_funcargs is not None: - while capturing_funcargs: - capfuncarg = capturing_funcargs.pop() - capfuncarg._finalize() - del self._capturing_funcargs + capturing_funcarg = getattr(self, '_capturing_funcarg', None) + if capturing_funcarg: + outerr = capturing_funcarg._finalize() + del self._capturing_funcarg + return outerr def pytest_make_collect_report(self, __multicall__, collector): method = self._getmethod(collector.config, collector.fspath) try: self.resumecapture(method) except ValueError: - return # recursive collect, XXX refactor capturing - # to allow for more lightweight recursive capturing + # recursive collect, XXX refactor capturing + # to allow for more lightweight recursive capturing + return try: rep = __multicall__.execute() finally: @@ -169,46 +242,371 @@ @pytest.mark.tryfirst def pytest_runtest_makereport(self, __multicall__, item, call): - self.deactivate_funcargs() + funcarg_outerr = self.deactivate_funcargs() rep = __multicall__.execute() outerr = self.suspendcapture(item) - if not rep.passed: - addouterr(rep, outerr) + if funcarg_outerr is not None: + outerr = (outerr[0] + funcarg_outerr[0], + outerr[1] + funcarg_outerr[1]) + addouterr(rep, outerr) if not rep.passed or rep.when == "teardown": outerr = ('', '') item.outerr = outerr return rep +error_capsysfderror = "cannot use capsys and capfd at the same time" + + def pytest_funcarg__capsys(request): """enables capturing of writes to sys.stdout/sys.stderr and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ - return CaptureFuncarg(py.io.StdCapture) + if "capfd" in request._funcargs: + raise request.raiseerror(error_capsysfderror) + return CaptureFixture(StdCapture) + def pytest_funcarg__capfd(request): """enables capturing of writes to file descriptors 1 and 2 and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ + if "capsys" in request._funcargs: + request.raiseerror(error_capsysfderror) if not hasattr(os, 'dup'): - py.test.skip("capfd funcarg needs os.dup") - return CaptureFuncarg(py.io.StdCaptureFD) + pytest.skip("capfd funcarg needs os.dup") + return CaptureFixture(StdCaptureFD) -class CaptureFuncarg: + +class CaptureFixture: def __init__(self, captureclass): - self.capture = captureclass(now=False) + self._capture = captureclass() def _start(self): - self.capture.startall() + self._capture.startall() def _finalize(self): - if hasattr(self, 'capture'): - self.capture.reset() - del self.capture + if hasattr(self, '_capture'): + outerr = self._outerr = self._capture.reset() + del self._capture + return outerr def readouterr(self): - return self.capture.readouterr() + try: + return self._capture.readouterr() + except AttributeError: + return self._outerr def close(self): self._finalize() + + +class FDCapture: + """ Capture IO to/from a given os-level filedescriptor. """ + + def __init__(self, targetfd, tmpfile=None, patchsys=False): + """ save targetfd descriptor, and open a new + temporary file there. If no tmpfile is + specified a tempfile.Tempfile() will be opened + in text mode. + """ + self.targetfd = targetfd + if tmpfile is None and targetfd != 0: + f = tempfile.TemporaryFile('wb+') + tmpfile = dupfile(f, encoding="UTF-8") + f.close() + self.tmpfile = tmpfile + self._savefd = os.dup(self.targetfd) + if patchsys: + self._oldsys = getattr(sys, patchsysdict[targetfd]) + + def start(self): + try: + os.fstat(self._savefd) + except OSError: + raise ValueError( + "saved filedescriptor not valid, " + "did you call start() twice?") + if self.targetfd == 0 and not self.tmpfile: + fd = os.open(os.devnull, os.O_RDONLY) + os.dup2(fd, 0) + os.close(fd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], DontReadFromInput()) + else: + os.dup2(self.tmpfile.fileno(), self.targetfd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self.tmpfile) + + def done(self): + """ unpatch and clean up, returns the self.tmpfile (file object) + """ + os.dup2(self._savefd, self.targetfd) + os.close(self._savefd) + if self.targetfd != 0: + self.tmpfile.seek(0) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self._oldsys) + return self.tmpfile + + def writeorg(self, data): + """ write a string to the original file descriptor + """ + tempfp = tempfile.TemporaryFile() + try: + os.dup2(self._savefd, tempfp.fileno()) + tempfp.write(data) + finally: + tempfp.close() + + +def dupfile(f, mode=None, buffering=0, raising=False, encoding=None): + """ return a new open file object that's a duplicate of f + + mode is duplicated if not given, 'buffering' controls + buffer size (defaulting to no buffering) and 'raising' + defines whether an exception is raised when an incompatible + file object is passed in (if raising is False, the file + object itself will be returned) + """ + try: + fd = f.fileno() + mode = mode or f.mode + except AttributeError: + if raising: + raise + return f + newfd = os.dup(fd) + if sys.version_info >= (3, 0): + if encoding is not None: + mode = mode.replace("b", "") + buffering = True + return os.fdopen(newfd, mode, buffering, encoding, closefd=True) + else: + f = os.fdopen(newfd, mode, buffering) + if encoding is not None: + return EncodedFile(f, encoding) + return f + + +class EncodedFile(object): + def __init__(self, _stream, encoding): + self._stream = _stream + self.encoding = encoding + + def write(self, obj): + if isinstance(obj, unicode): + obj = obj.encode(self.encoding) + self._stream.write(obj) + + def writelines(self, linelist): + data = ''.join(linelist) + self.write(data) + + def __getattr__(self, name): + return getattr(self._stream, name) + + +class Capture(object): + def reset(self): + """ reset sys.stdout/stderr and return captured output as strings. """ + if hasattr(self, '_reset'): + raise ValueError("was already reset") + self._reset = True + outfile, errfile = self.done(save=False) + out, err = "", "" + if outfile and not outfile.closed: + out = outfile.read() + outfile.close() + if errfile and errfile != outfile and not errfile.closed: + err = errfile.read() + errfile.close() + return out, err + + def suspend(self): + """ return current snapshot captures, memorize tempfiles. """ + outerr = self.readouterr() + outfile, errfile = self.done() + return outerr + + +class StdCaptureFD(Capture): + """ This class allows to capture writes to FD1 and FD2 + and may connect a NULL file to FD0 (and prevent + reads from sys.stdin). If any of the 0,1,2 file descriptors + is invalid it will not be captured. + """ + def __init__(self, out=True, err=True, in_=True, patchsys=True): + self._options = { + "out": out, + "err": err, + "in_": in_, + "patchsys": patchsys, + } + self._save() + + def _save(self): + in_ = self._options['in_'] + out = self._options['out'] + err = self._options['err'] + patchsys = self._options['patchsys'] + if in_: + try: + self.in_ = FDCapture( + 0, tmpfile=None, + patchsys=patchsys) + except OSError: + pass + if out: + tmpfile = None + if hasattr(out, 'write'): + tmpfile = out + try: + self.out = FDCapture( + 1, tmpfile=tmpfile, + patchsys=patchsys) + self._options['out'] = self.out.tmpfile + except OSError: + pass + if err: + if hasattr(err, 'write'): + tmpfile = err + else: + tmpfile = None + try: + self.err = FDCapture( + 2, tmpfile=tmpfile, + patchsys=patchsys) + self._options['err'] = self.err.tmpfile + except OSError: + pass + + def startall(self): + if hasattr(self, 'in_'): + self.in_.start() + if hasattr(self, 'out'): + self.out.start() + if hasattr(self, 'err'): + self.err.start() + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if hasattr(self, 'out') and not self.out.tmpfile.closed: + outfile = self.out.done() + if hasattr(self, 'err') and not self.err.tmpfile.closed: + errfile = self.err.done() + if hasattr(self, 'in_'): + self.in_.done() + if save: + self._save() + return outfile, errfile + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = self._readsnapshot('out') + err = self._readsnapshot('err') + return out, err + + def _readsnapshot(self, name): + if hasattr(self, name): + f = getattr(self, name).tmpfile + else: + return '' + + f.seek(0) + res = f.read() + enc = getattr(f, "encoding", None) + if enc: + res = py.builtin._totext(res, enc, "replace") + f.truncate(0) + f.seek(0) + return res + + +class StdCapture(Capture): + """ This class allows to capture writes to sys.stdout|stderr "in-memory" + and will raise errors on tries to read from sys.stdin. It only + modifies sys.stdout|stderr|stdin attributes and does not + touch underlying File Descriptors (use StdCaptureFD for that). + """ + def __init__(self, out=True, err=True, in_=True): + self._oldout = sys.stdout + self._olderr = sys.stderr + self._oldin = sys.stdin + if out and not hasattr(out, 'file'): + out = TextIO() + self.out = out + if err: + if not hasattr(err, 'write'): + err = TextIO() + self.err = err + self.in_ = in_ + + def startall(self): + if self.out: + sys.stdout = self.out + if self.err: + sys.stderr = self.err + if self.in_: + sys.stdin = self.in_ = DontReadFromInput() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if self.out and not self.out.closed: + sys.stdout = self._oldout + outfile = self.out + outfile.seek(0) + if self.err and not self.err.closed: + sys.stderr = self._olderr + errfile = self.err + errfile.seek(0) + if self.in_: + sys.stdin = self._oldin + return outfile, errfile + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = err = "" + if self.out: + out = self.out.getvalue() + self.out.truncate(0) + self.out.seek(0) + if self.err: + err = self.err.getvalue() + self.err.truncate(0) + self.err.seek(0) + return out, err + + +class DontReadFromInput: + """Temporary stub class. Ideally when stdin is accessed, the + capturing should be turned off, with possibly all data captured + so far sent to the screen. This should be configurable, though, + because in automated test runs it is better to crash than + hang indefinitely. + """ + def read(self, *args): + raise IOError("reading from stdin while output is captured") + readline = read + readlines = read + __iter__ = read + + def fileno(self): + raise ValueError("redirected Stdin is pseudofile, has no fileno()") + + def isatty(self): + return False + + def close(self): + pass diff --git a/_pytest/config.py b/_pytest/config.py --- a/_pytest/config.py +++ b/_pytest/config.py @@ -1,25 +1,91 @@ """ command line options, ini-file and conftest.py processing. """ import py +# DON't import pytest here because it causes import cycle troubles import sys, os +from _pytest import hookspec # the extension point definitions from _pytest.core import PluginManager -import pytest -def pytest_cmdline_parse(pluginmanager, args): - config = Config(pluginmanager) - config.parse(args) - return config +# pytest startup -def pytest_unconfigure(config): - while 1: - try: - fin = config._cleanup.pop() - except IndexError: - break - fin() +def main(args=None, plugins=None): + """ return exit code, after performing an in-process test run. + + :arg args: list of command line arguments. + + :arg plugins: list of plugin objects to be auto-registered during + initialization. + """ + config = _prepareconfig(args, plugins) + return config.hook.pytest_cmdline_main(config=config) + +class cmdline: # compatibility namespace + main = staticmethod(main) + +class UsageError(Exception): + """ error in pytest usage or invocation""" + +_preinit = [] + +default_plugins = ( + "mark main terminal runner python pdb unittest capture skipping " + "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript " + "junitxml resultlog doctest").split() + +def _preloadplugins(): + assert not _preinit + _preinit.append(get_plugin_manager()) + +def get_plugin_manager(): + if _preinit: + return _preinit.pop(0) + # subsequent calls to main will create a fresh instance + pluginmanager = PytestPluginManager() + pluginmanager.config = Config(pluginmanager) # XXX attr needed? + for spec in default_plugins: + pluginmanager.import_plugin(spec) + return pluginmanager + +def _prepareconfig(args=None, plugins=None): + if args is None: + args = sys.argv[1:] + elif isinstance(args, py.path.local): + args = [str(args)] + elif not isinstance(args, (tuple, list)): + if not isinstance(args, str): + raise ValueError("not a string or argument list: %r" % (args,)) + args = py.std.shlex.split(args) + pluginmanager = get_plugin_manager() + if plugins: + for plugin in plugins: + pluginmanager.register(plugin) + return pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args) + +class PytestPluginManager(PluginManager): + def __init__(self, hookspecs=[hookspec]): + super(PytestPluginManager, self).__init__(hookspecs=hookspecs) + self.register(self) + if os.environ.get('PYTEST_DEBUG'): + err = sys.stderr + encoding = getattr(err, 'encoding', 'utf8') + try: + err = py.io.dupfile(err, encoding=encoding) + except Exception: + pass + self.trace.root.setwriter(err.write) + + def pytest_configure(self, config): + config.addinivalue_line("markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible.") + config.addinivalue_line("markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible.") + class Parser: From noreply at buildbot.pypy.org Thu Aug 21 17:20:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 21 Aug 2014 17:20:11 +0200 (CEST) Subject: [pypy-commit] stmgc default: Test and fix: we need to decouple enterframe() from taking the Message-ID: <20140821152011.932EC1C1486@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1322:7949c54b03a5 Date: 2014-08-21 17:20 +0200 http://bitbucket.org/pypy/stmgc/changeset/7949c54b03a5/ Log: Test and fix: we need to decouple enterframe() from taking the __builtin_frame_address(0). diff --git a/c7/stm/rewind_setjmp.h b/c7/stm/rewind_setjmp.h --- a/c7/stm/rewind_setjmp.h +++ b/c7/stm/rewind_setjmp.h @@ -74,8 +74,13 @@ /* remember the current stack and ss_stack positions */ #define rewind_jmp_enterframe(rjthread, rjbuf, ss) do { \ + rewind_jmp_prepareframe(rjbuf); \ + rewind_jmp_enterprepframe(rjthread, rjbuf, ss); \ +} while (0) +#define rewind_jmp_prepareframe(rjbuf) \ + ((rjbuf)->frame_base = __builtin_frame_address(0)) +#define rewind_jmp_enterprepframe(rjthread, rjbuf, ss) do { \ assert((((long)(ss)) & 1) == 0); \ - (rjbuf)->frame_base = __builtin_frame_address(0); \ (rjbuf)->shadowstack_base = (char *)(ss); \ (rjbuf)->prev = (rjthread)->head; \ (rjthread)->head = (rjbuf); \ diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -328,6 +328,8 @@ /* At some key places, like the entry point of the thread and in the function with the interpreter's dispatch loop, you need to declare a local variable of type 'rewind_jmp_buf' and call these macros. */ +#define stm_rewind_jmp_enterprepframe(tl, rjbuf) \ + rewind_jmp_enterprepframe(&(tl)->rjthread, rjbuf, (tl)->shadowstack) #define stm_rewind_jmp_enterframe(tl, rjbuf) \ rewind_jmp_enterframe(&(tl)->rjthread, rjbuf, (tl)->shadowstack) #define stm_rewind_jmp_leaveframe(tl, rjbuf) \ diff --git a/c7/test/test_rewind.c b/c7/test/test_rewind.c --- a/c7/test/test_rewind.c +++ b/c7/test/test_rewind.c @@ -285,6 +285,48 @@ /************************************************************/ +__attribute__((noinline)) +int _7start_transaction() +{ + int result = rewind_jmp_setjmp(>hread, NULL); + return result; +} + +__attribute__((noinline)) +int _7enter_callback(rewind_jmp_buf *buf) +{ + rewind_jmp_enterprepframe(>hread, buf, NULL); + return _7start_transaction(); +} + +__attribute__((noinline)) +int _7bootstrap() +{ + rewind_jmp_longjmp(>hread); + return 0; +} + +__attribute__((noinline)) +int _7leave_callback(rewind_jmp_buf *buf) +{ + rewind_jmp_leaveframe(>hread, buf, NULL); + return 0; +} + +void test7(void) +{ + rewind_jmp_buf buf; + register long bla = 3; + rewind_jmp_prepareframe(&buf); + if (_7enter_callback(&buf) == 0) { + _7bootstrap(); + } + _7leave_callback(&buf); + assert(bla == 3); +} + +/************************************************************/ + int rj_malloc_count = 0; void *rj_malloc(size_t size) @@ -313,6 +355,7 @@ else if (!strcmp(argv[1], "4")) test4(); else if (!strcmp(argv[1], "5")) test5(); else if (!strcmp(argv[1], "6")) test6(); + else if (!strcmp(argv[1], "7")) test7(); else if (!strcmp(argv[1], "TL1")) testTL1(); else if (!strcmp(argv[1], "TL2")) testTL2(); else diff --git a/c7/test/test_rewind.py b/c7/test/test_rewind.py --- a/c7/test/test_rewind.py +++ b/c7/test/test_rewind.py @@ -6,7 +6,7 @@ % (opt, opt)) if err != 0: raise OSError("clang failed on test_rewind.c") - for testnum in [1, 2, 3, 4, 5, 6, "TL1", "TL2"]: + for testnum in [1, 2, 3, 4, 5, 6, 7, "TL1", "TL2"]: print '=== O%s: RUNNING TEST %s ===' % (opt, testnum) err = os.system("./test_rewind_O%s %s" % (opt, testnum)) if err != 0: From noreply at buildbot.pypy.org Thu Aug 21 17:25:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 21 Aug 2014 17:25:26 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc/7949c54b03a5 Message-ID: <20140821152526.CF9791C1482@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72948:92734c1dff54 Date: 2014-08-21 17:20 +0200 http://bitbucket.org/pypy/pypy/changeset/92734c1dff54/ Log: import stmgc/7949c54b03a5 diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -b067967930aa +7949c54b03a5 diff --git a/rpython/translator/stm/src_stm/stm/rewind_setjmp.h b/rpython/translator/stm/src_stm/stm/rewind_setjmp.h --- a/rpython/translator/stm/src_stm/stm/rewind_setjmp.h +++ b/rpython/translator/stm/src_stm/stm/rewind_setjmp.h @@ -75,8 +75,13 @@ /* remember the current stack and ss_stack positions */ #define rewind_jmp_enterframe(rjthread, rjbuf, ss) do { \ + rewind_jmp_prepareframe(rjbuf); \ + rewind_jmp_enterprepframe(rjthread, rjbuf, ss); \ +} while (0) +#define rewind_jmp_prepareframe(rjbuf) \ + ((rjbuf)->frame_base = __builtin_frame_address(0)) +#define rewind_jmp_enterprepframe(rjthread, rjbuf, ss) do { \ assert((((long)(ss)) & 1) == 0); \ - (rjbuf)->frame_base = __builtin_frame_address(0); \ (rjbuf)->shadowstack_base = (char *)(ss); \ (rjbuf)->prev = (rjthread)->head; \ (rjthread)->head = (rjbuf); \ diff --git a/rpython/translator/stm/src_stm/stmgc.h b/rpython/translator/stm/src_stm/stmgc.h --- a/rpython/translator/stm/src_stm/stmgc.h +++ b/rpython/translator/stm/src_stm/stmgc.h @@ -329,6 +329,8 @@ /* At some key places, like the entry point of the thread and in the function with the interpreter's dispatch loop, you need to declare a local variable of type 'rewind_jmp_buf' and call these macros. */ +#define stm_rewind_jmp_enterprepframe(tl, rjbuf) \ + rewind_jmp_enterprepframe(&(tl)->rjthread, rjbuf, (tl)->shadowstack) #define stm_rewind_jmp_enterframe(tl, rjbuf) \ rewind_jmp_enterframe(&(tl)->rjthread, rjbuf, (tl)->shadowstack) #define stm_rewind_jmp_leaveframe(tl, rjbuf) \ From noreply at buildbot.pypy.org Thu Aug 21 17:25:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 21 Aug 2014 17:25:27 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Split rewind_jmp_enterframe() into two calls Message-ID: <20140821152527.F38D61C1482@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72949:93fa346570f6 Date: 2014-08-21 17:24 +0200 http://bitbucket.org/pypy/pypy/changeset/93fa346570f6/ Log: Split rewind_jmp_enterframe() into two calls diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -201,6 +201,8 @@ yield 'rewind_jmp_buf rjbuf1;' if self.use_stm_rewind_jmp_frame == "automatic": yield 'stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf1);' + else: + yield 'stm_rewind_jmp_prepareframe(&rjbuf1);' # graph = self.graph yield 'goto block0;' # to avoid a warning "this label is not used" diff --git a/rpython/translator/stm/src_stm/stmgcintf.c b/rpython/translator/stm/src_stm/stmgcintf.c --- a/rpython/translator/stm/src_stm/stmgcintf.c +++ b/rpython/translator/stm/src_stm/stmgcintf.c @@ -96,7 +96,8 @@ assert(pypy_transaction_length >= 0); int e = errno; pypy_stm_register_thread_local(); - stm_rewind_jmp_enterframe(&stm_thread_local, (rewind_jmp_buf *)rjbuf); + stm_rewind_jmp_enterprepframe(&stm_thread_local, + (rewind_jmp_buf *)rjbuf); errno = e; pypy_stm_ready_atomic = 1; pypy_stm_start_if_not_atomic(); @@ -104,7 +105,8 @@ } else { /* callback from C code, itself called from Python code */ - stm_rewind_jmp_enterframe(&stm_thread_local, (rewind_jmp_buf *)rjbuf); + stm_rewind_jmp_enterprepframe(&stm_thread_local, + (rewind_jmp_buf *)rjbuf); pypy_stm_start_if_not_atomic(); return 0; } From noreply at buildbot.pypy.org Thu Aug 21 18:07:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 21 Aug 2014 18:07:59 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Fix Message-ID: <20140821160759.8CBB21C148A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r72950:c13564d2e37b Date: 2014-08-21 18:07 +0200 http://bitbucket.org/pypy/pypy/changeset/c13564d2e37b/ Log: Fix diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -202,7 +202,7 @@ if self.use_stm_rewind_jmp_frame == "automatic": yield 'stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf1);' else: - yield 'stm_rewind_jmp_prepareframe(&rjbuf1);' + yield 'rewind_jmp_prepareframe(&rjbuf1);' # graph = self.graph yield 'goto block0;' # to avoid a warning "this label is not used" From noreply at buildbot.pypy.org Thu Aug 21 21:27:55 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Thu, 21 Aug 2014 21:27:55 +0200 (CEST) Subject: [pypy-commit] pypy improve-docs: explain why unwrap is so strongly discouraged Message-ID: <20140821192755.F3C341C1482@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: improve-docs Changeset: r72951:71a3763555f6 Date: 2014-08-21 19:41 +0200 http://bitbucket.org/pypy/pypy/changeset/71a3763555f6/ Log: explain why unwrap is so strongly discouraged diff --git a/pypy/doc/objspace.rst b/pypy/doc/objspace.rst --- a/pypy/doc/objspace.rst +++ b/pypy/doc/objspace.rst @@ -232,8 +232,10 @@ .. py:function:: unwrap(w_x) - Returns the interpreter-level equivalent of :py:obj:`w_x` -- use this **ONLY** for - testing! In most circumstances you should use the functions described below instead. + Returns the interpreter-level equivalent of :py:obj:`w_x` -- use this + **ONLY** for testing, because this method is not RPython and thus cannot be + translated! In most circumstances you should use the functions described + below instead. .. py:function:: is_true(w_x) From noreply at buildbot.pypy.org Thu Aug 21 22:01:36 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Thu, 21 Aug 2014 22:01:36 +0200 (CEST) Subject: [pypy-commit] pypy default: fix typo Message-ID: <20140821200136.944271C0EE9@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: Changeset: r72952:54408dd5e0f8 Date: 2014-08-21 22:00 +0200 http://bitbucket.org/pypy/pypy/changeset/54408dd5e0f8/ Log: fix typo diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -354,6 +354,6 @@ See the License for the specific language governing permissions and limitations under the License. -Detailled license information is contained in the NOTICE file in the +Detailed license information is contained in the NOTICE file in the directory. From noreply at buildbot.pypy.org Fri Aug 22 01:28:53 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Fri, 22 Aug 2014 01:28:53 +0200 (CEST) Subject: [pypy-commit] pypy gc_no_cleanup_nursery: modified the test and gc flavor pointers don't need to be zeroed Message-ID: <20140821232853.7F4F21C0EE9@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: gc_no_cleanup_nursery Changeset: r72953:43f5c1364387 Date: 2014-08-21 19:23 -0400 http://bitbucket.org/pypy/pypy/changeset/43f5c1364387/ Log: modified the test and gc flavor pointers don't need to be zeroed diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py --- a/rpython/memory/gc/test/test_direct.py +++ b/rpython/memory/gc/test/test_direct.py @@ -11,8 +11,9 @@ from rpython.memory.gctypelayout import TypeLayoutBuilder from rpython.rlib.rarithmetic import LONG_BIT, is_valid_int from rpython.memory.gc import minimark, incminimark -from rpython.memory.gctypelayout import zero_gc_pointers_inside +from rpython.memory.gctypelayout import zero_gc_pointers_inside, zero_gc_pointers from rpython.rlib.debug import debug_print +import pdb WORD = LONG_BIT // 8 ADDR_ARRAY = lltype.Array(llmemory.Address) @@ -106,9 +107,9 @@ p[index] = newvalue def malloc(self, TYPE, n=None): - addr = self.gc.malloc(self.get_type_id(TYPE), n, zero=True) + addr = self.gc.malloc(self.get_type_id(TYPE), n) + debug_print(self.gc) obj_ptr = llmemory.cast_adr_to_ptr(addr, lltype.Ptr(TYPE)) - if not self.gc.malloc_zero_filled: zero_gc_pointers_inside(obj_ptr, TYPE) return obj_ptr @@ -684,22 +685,24 @@ p = self.malloc(VAR1,5) import pytest with pytest.raises(lltype.UninitializedMemoryAccess): + assert isinstance(p[0], lltype._uninitialized) x1 = p[0] def test_malloc_varsize_no_cleanup2(self): - p = self.malloc(VAR,100) + #as VAR is GcArray so the ptr will don't need to be zeroed + p = self.malloc(VAR, 100) for i in range(100): - print type(p[0]) - assert p[0] == lltype.nullptr(S) + assert p[i] == lltype.nullptr(S) - def test_malloc_struct_of_ptr_arr(self): - S2 = lltype.GcForwardReference() - S2.become(lltype.GcStruct('S2', - ('gcptr_arr', VAR))) - s2 = self.malloc(S2) - s2.gcptr_arr = self.malloc(VAR,100) - for i in range(100): - assert s2.gcptr_arr[i] == lltype.nullptr(S) + def test_malloc_varsize_no_cleanup3(self): + VAR1 = lltype.Array(lltype.Ptr(S)) + p1 = lltype.malloc(VAR1, 10, flavor='raw', track_allocation=False) + import pytest + with pytest.raises(lltype.UninitializedMemoryAccess): + for i in range(10): + assert p1[i] == lltype.nullptr(S) + p1[i]._free() + p1._free() def test_malloc_struct_of_ptr_struct(self): S3 = lltype.GcForwardReference() @@ -721,20 +724,22 @@ assert arr_of_ptr_struct[i].prev == lltype.nullptr(S) assert arr_of_ptr_struct[i].next == lltype.nullptr(S) - + #fail for now def test_malloc_array_of_ptr_arr(self): ARR_OF_PTR_ARR = lltype.GcArray(lltype.Ptr(lltype.GcArray(lltype.Ptr(S)))) arr_of_ptr_arr = lltype.malloc(ARR_OF_PTR_ARR, 10) + self.stackroots.append(arr_of_ptr_arr) for i in range(10): assert arr_of_ptr_arr[i] == lltype.nullptr(lltype.GcArray(lltype.Ptr(S))) for i in range(10): arr_of_ptr_arr[i] = self.malloc(lltype.GcArray(lltype.Ptr(S)), i) - debug_print (arr_of_ptr_arr[i]) + self.stackroots.append(arr_of_ptr_arr[i]) + debug_print(arr_of_ptr_arr[i]) for elem in arr_of_ptr_arr[i]: - debug_print(elem) + self.stackroots.append(elem) assert elem == lltype.nullptr(S) elem = self.malloc(S) - #assert elem.prev == lltype.nullptr(S) - #assert elem.next == lltype.nullptr(S) + assert elem.prev == lltype.nullptr(S) + assert elem.next == lltype.nullptr(S) \ No newline at end of file From noreply at buildbot.pypy.org Fri Aug 22 01:28:54 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Fri, 22 Aug 2014 01:28:54 +0200 (CEST) Subject: [pypy-commit] pypy gc_no_cleanup_nursery: deal with uninitialized primitive value Message-ID: <20140821232854.BA8451C0EE9@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: gc_no_cleanup_nursery Changeset: r72954:353cd0558ee5 Date: 2014-08-21 19:25 -0400 http://bitbucket.org/pypy/pypy/changeset/353cd0558ee5/ Log: deal with uninitialized primitive value diff --git a/rpython/translator/c/primitive.py b/rpython/translator/c/primitive.py --- a/rpython/translator/c/primitive.py +++ b/rpython/translator/c/primitive.py @@ -9,7 +9,7 @@ ArrayLengthOffset, GCHeaderOffset, GCREF, AddressAsInt) from rpython.rtyper.lltypesystem.lltype import (Signed, SignedLongLong, Unsigned, UnsignedLongLong, Float, SingleFloat, LongFloat, Char, UniChar, Bool, Void, - FixedSizeArray, Ptr, cast_opaque_ptr, typeOf) + FixedSizeArray, Ptr, cast_opaque_ptr, typeOf, _uninitialized) from rpython.rtyper.lltypesystem.llarena import RoundedUpForAllocation from rpython.rtyper.tool.rffi_platform import memory_alignment from rpython.translator.c.support import cdecl, barebonearray @@ -89,7 +89,7 @@ return '((Signed)%s)' % name_address(value.adr, db) else: raise Exception("unimplemented symbolic %r" % value) - if value is None: + if value is None or isinstance(value, _uninitialized): assert not db.completed return None if value == -sys.maxint-1: # blame C From noreply at buildbot.pypy.org Fri Aug 22 01:28:55 2014 From: noreply at buildbot.pypy.org (wenzhuman) Date: Fri, 22 Aug 2014 01:28:55 +0200 (CEST) Subject: [pypy-commit] pypy gc_no_cleanup_nursery: remove zero flag form jitframe malloc Message-ID: <20140821232855.F18F21C0EE9@cobra.cs.uni-duesseldorf.de> Author: wenzhuman Branch: gc_no_cleanup_nursery Changeset: r72955:6c460fe612a3 Date: 2014-08-21 19:28 -0400 http://bitbucket.org/pypy/pypy/changeset/6c460fe612a3/ Log: remove zero flag form jitframe malloc diff --git a/rpython/jit/backend/llsupport/jitframe.py b/rpython/jit/backend/llsupport/jitframe.py --- a/rpython/jit/backend/llsupport/jitframe.py +++ b/rpython/jit/backend/llsupport/jitframe.py @@ -45,7 +45,7 @@ # detailed explanation how it is on your architecture def jitframe_allocate(frame_info): - frame = lltype.malloc(JITFRAME, frame_info.jfi_frame_depth, zero=True) + frame = lltype.malloc(JITFRAME, frame_info.jfi_frame_depth) frame.jf_frame_info = frame_info return frame From noreply at buildbot.pypy.org Fri Aug 22 09:05:05 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 22 Aug 2014 09:05:05 +0200 (CEST) Subject: [pypy-commit] pypy default: update windows build to link openssl, expat, zlib statically (cpython compatability) Message-ID: <20140822070505.10EF21D36C6@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r72956:74d521ec9314 Date: 2014-08-22 10:04 +0300 http://bitbucket.org/pypy/pypy/changeset/74d521ec9314/ Log: update windows build to link openssl, expat, zlib statically (cpython compatability) update instructions and buildbot to openssl 1.0.1h diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -37,7 +37,7 @@ using a 32 bit Python and vice versa. By default pypy is built using the Multi-threaded DLL (/MD) runtime environment. -**Note:** PyPy is currently not supported for 64 bit Windows, and translation +**Note:** PyPy is currently not supported for 64 bit Python, and translation will fail in this case. Python and a C compiler are all you need to build pypy, but it will miss some @@ -136,7 +136,7 @@ cd zlib-1.2.3 nmake -f win32\Makefile.msc - copy zlib1.lib + copy zlib.lib copy zlib.h zconf.h The bz2 compression library @@ -165,27 +165,29 @@ directory. Version 2.1.0 is known to pass tests. Then open the project file ``expat.dsw`` with Visual Studio; follow the instruction for converting the project files, -switch to the "Release" configuration, reconfigure the runtime for -Multi-threaded DLL (/MD) and build the solution (the ``expat`` project -is actually enough for pypy). +switch to the "Release" configuration, use the ``expat_static`` project, +reconfigure the runtime for Multi-threaded DLL (/MD) and build. -Then, copy the file ``win32\bin\release\libexpat.dll`` somewhere in -your PATH, ``win32\bin\release\libexpat.lib`` somewhere in LIB, and -both ``lib\expat.h`` and ``lib\expat_external.h`` somewhere in INCLUDE. +Then, copy the file ``win32\bin\release\libexpat.lib`` somewhere in +somewhere in LIB, and both ``lib\expat.h`` and ``lib\expat_external.h`` +somewhere in INCLUDE. The OpenSSL library ~~~~~~~~~~~~~~~~~~~ -OpenSSL needs a Perl interpreter to configure its makefile. You may -use the one distributed by ActiveState, or the one from cygwin. In -both case the perl interpreter must be found on the PATH. +OpenSSL is complicated to build. CPython uses a build script, since we +strive for cpython compatability this script is the easiest way to get the +correct version and build options. As of CPython 3.4, the script is available +at http://hg.python.org/cpython/file/30e8a8f22a2a/PCbuild/build_ssl.py - svn export http://svn.python.org/projects/external/openssl-0.9.8y - cd openssl-0.9.8y - perl Configure VC-WIN32 - ms\do_ms.bat - nmake -f ms\nt.mak install + svn export http://svn.python.org/projects/external/openssl-1.0.1h + cd openssl-1.0.1h + python build_ssl.py Release Win32 +Then, copy the files ``out32\*.lib`` somewhere in +somewhere in LIB, and the entire ``include\openssl`` directory as-is somewhere +in INCLUDE. + TkInter module support ~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -14,13 +14,16 @@ if sys.platform == "win32": libname = 'libexpat' + pre_include_bits = ["#define XML_STATIC"] else: libname = 'expat' + pre_include_bits = [] eci = ExternalCompilationInfo( libraries=[libname], library_dirs=platform.preprocess_library_dirs([]), includes=['expat.h'], include_dirs=platform.preprocess_include_dirs([]), + pre_include_bits = pre_include_bits, ) eci = rffi_platform.configure_external_library( diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -164,8 +164,7 @@ if sys.platform == 'win32': # Can't rename a DLL: it is always called 'libpypy-c.dll' - win_extras = ['libpypy-c.dll', 'libexpat.dll', 'sqlite3.dll', - 'libeay32.dll', 'ssleay32.dll'] + win_extras = ['libpypy-c.dll', 'sqlite3.dll'] if not options.no_tk: win_extras += ['tcl85.dll', 'tk85.dll'] diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -8,7 +8,7 @@ if sys.platform == 'win32' and platform.name != 'mingw32': - libraries = ['libeay32', 'ssleay32', 'zlib1', + libraries = ['libeay32', 'ssleay32', 'zlib', 'user32', 'advapi32', 'gdi32', 'msvcrt', 'ws2_32'] includes = [ # ssl.h includes winsock.h, which will conflict with our own diff --git a/rpython/rlib/rzlib.py b/rpython/rlib/rzlib.py --- a/rpython/rlib/rzlib.py +++ b/rpython/rlib/rzlib.py @@ -12,12 +12,15 @@ if compiler.name == "msvc": - libname = 'zlib1' # since version 1.1.4 and later, see http://www.zlib.net/DLL_FAQ.txt + libname = 'zlib' # use the static lib, not zlib1 which is dll import lib + testonly_libraries = ['zlib1'] else: libname = 'z' + testonly_libraries = [] eci = ExternalCompilationInfo( libraries=[libname], - includes=['zlib.h'] + includes=['zlib.h'], + testonly_libraries = testonly_libraries ) try: eci = rffi_platform.configure_external_library( From noreply at buildbot.pypy.org Fri Aug 22 11:01:29 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 22 Aug 2014 11:01:29 +0200 (CEST) Subject: [pypy-commit] pypy default: update windows to openssl-1.0.1i, revert build instructions to simplify Message-ID: <20140822090129.6135A1D2A89@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r72957:14fbd80309d0 Date: 2014-08-22 12:01 +0300 http://bitbucket.org/pypy/pypy/changeset/14fbd80309d0/ Log: update windows to openssl-1.0.1i, revert build instructions to simplify diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -175,14 +175,14 @@ The OpenSSL library ~~~~~~~~~~~~~~~~~~~ -OpenSSL is complicated to build. CPython uses a build script, since we -strive for cpython compatability this script is the easiest way to get the -correct version and build options. As of CPython 3.4, the script is available -at http://hg.python.org/cpython/file/30e8a8f22a2a/PCbuild/build_ssl.py +OpenSSL needs a Perl interpreter to configure its makefile. You may +use the one distributed by ActiveState, or the one from cygwin.:: - svn export http://svn.python.org/projects/external/openssl-1.0.1h - cd openssl-1.0.1h - python build_ssl.py Release Win32 + svn export http://svn.python.org/projects/external/openssl-1.0.1i + cd openssl-1.0.1i + perl Configure VC-WIN32 no-idea no-mdc2 + ms\do_ms.bat + nmake -f ms\nt.mak install Then, copy the files ``out32\*.lib`` somewhere in somewhere in LIB, and the entire ``include\openssl`` directory as-is somewhere From noreply at buildbot.pypy.org Fri Aug 22 12:45:47 2014 From: noreply at buildbot.pypy.org (groggi) Date: Fri, 22 Aug 2014 12:45:47 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: added test with random pinning, unpinning and adding/removing from stackroots. Message-ID: <20140822104547.1EEEA1C148A@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72958:9b7d961fbb87 Date: 2014-08-22 12:41 +0200 http://bitbucket.org/pypy/pypy/changeset/9b7d961fbb87/ Log: added test with random pinning, unpinning and adding/removing from stackroots. Fails right now. Succeeds however if 'self.gc.collect()' is used instead of 'self.gc.debug_gc_step()' diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -54,6 +54,32 @@ # XXX test with multiple mallocs, and only part of them is pinned + def test_random(self): + # scenario: create bunch of objects. randomly pin, unpin, add to + # stackroots and remove from stackroots. + import random + + for i in xrange(10**3): + obj = self.malloc(T) + obj.someInt = 100 + # + if random.random() < 0.5: + self.stackroots.append(obj) + print("+stack") + if random.random() < 0.5: + self.gc.pin(llmemory.cast_ptr_to_adr(obj)) + print("+pin") + self.gc.debug_gc_step() + for o in self.stackroots[:]: + assert o.someInt == 100 + o_adr = llmemory.cast_ptr_to_adr(o) + if random.random() < 0.5 and self.gc._is_pinned(o_adr): + print("-pin") + self.gc.unpin(o_adr) + if random.random() < 0.5: + print("-stack") + self.stackroots.remove(o) + class TestIncminimark(PinningGCTest): from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass From noreply at buildbot.pypy.org Fri Aug 22 12:45:48 2014 From: noreply at buildbot.pypy.org (groggi) Date: Fri, 22 Aug 2014 12:45:48 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: rewrite of 'collect_and_reserve'. Message-ID: <20140822104548.5AED81C148A@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r72959:f67b9f4344ab Date: 2014-08-22 12:43 +0200 http://bitbucket.org/pypy/pypy/changeset/f67b9f4344ab/ Log: rewrite of 'collect_and_reserve'. XXX: needs refactoring diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -614,10 +614,7 @@ # # Get the memory from the nursery. If there is not enough space # there, do a collect first. - result = self.nursery_free - self.nursery_free = result + rawtotalsize - if self.nursery_free > self.nursery_top: - result = self.collect_and_reserve(result, totalsize) + result = self.collect_and_reserve(totalsize) # # Build the object. llarena.arena_reserve(result, totalsize) @@ -673,10 +670,7 @@ # # Get the memory from the nursery. If there is not enough space # there, do a collect first. - result = self.nursery_free - self.nursery_free = result + totalsize - if self.nursery_free > self.nursery_top: - result = self.collect_and_reserve(result, totalsize) + result = self.collect_and_reserve(totalsize) # # Build the object. llarena.arena_reserve(result, totalsize) @@ -718,19 +712,14 @@ return True try_move_nursery_top._always_inline_ = True - def collect_and_reserve(self, prev_result, totalsize): - """To call when nursery_free overflows nursery_top. - In case of pinned objects try to reserve 'totalsize' between - two pinned objects. In this case we can just continue. - Otherwise check first if the nursery_top is the real top, - if not we can just move the top of one cleanup and continue. - - Do a minor collection, and possibly also a major collection, - and finally reserve 'totalsize' bytes at the start of the - now-empty nursery. - """ - # be careful: 'nursery_free' may have been changed by the caller. - # 'prev_result' contains the expected address for the new object. + def collect_and_reserve(self, totalsize): + # XXX (groggi) comments, rename method, refactor (duplicate code) + # + # if enough space exists already, just use it + if self.nursery_free + totalsize <= self.nursery_top: + result = self.nursery_free + self.nursery_free = result + totalsize + return result # keep track how many iteration we've gone trough minor_collection_count = 0 @@ -747,19 +736,15 @@ # nursery. self.nursery_free = self.nursery_top + pinned_obj_size self.nursery_top = self.nursery_barriers.popleft() - # - # because we encountered a barrier, we have to fix 'prev_result'. - # The one provided as parameter can't be used further as there - # is not enough space between 'prev_result' and and the barrier - # for an object of 'totalsize' size. - prev_result = self.nursery_free else: # # no barriers (i.e. no pinned objects) after 'nursery_free'. # If possible just enlarge the used part of the nursery. # Otherwise we are forced to clean up the nursery. if self.try_move_nursery_top(totalsize): - return prev_result + result = self.nursery_free + self.nursery_free = result + totalsize + return result # self.minor_collection() minor_collection_count += 1 @@ -797,8 +782,8 @@ # attempt to get 'totalzise' out of the nursery now. This may # fail again, and then we loop. result = self.nursery_free - self.nursery_free = result + totalsize - if self.nursery_free <= self.nursery_top: + if self.nursery_free + totalsize <= self.nursery_top: + self.nursery_free = result + totalsize break # if self.debug_tiny_nursery >= 0: # for debugging From noreply at buildbot.pypy.org Fri Aug 22 16:13:11 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 22 Aug 2014 16:13:11 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: test and fix signature handling Message-ID: <20140822141311.C378C1C020E@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r72960:03e110d4a60f Date: 2014-08-22 16:04 +0300 http://bitbucket.org/pypy/pypy/changeset/03e110d4a60f/ Log: test and fix signature handling diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -101,7 +101,7 @@ while signature[name_end] == ' ' or signature[name_end] == '\t': name_end -= 1 var_name = signature[i:name_end + 1] - if not all([_is_alpha_underscore(s) for s in var_name]): + if not all([_is_alnum_underscore(s) for s in var_name]): raise oefmt(space.w_ValueError, '%s at %d in "%s"', "expect dimension name", i, signature) if var_name not in var_names: @@ -111,10 +111,6 @@ cur_core_dim += 1 nd += 1 i = next_comma - i = _next_non_white_space(signature, i + 1) - if signature[i] != ',' and signature[i] != ')' and signature[i] != '-': - raise oefmt(space.w_ValueError, '%s at %d in "%s"', - "expect ',' or ')' or '-'", i, signature) if signature[i] == ',': i = _next_non_white_space(signature, i + 1); if signature[i] == ')': diff --git a/pypy/module/micronumpy/test/test_support.py b/pypy/module/micronumpy/test/test_support.py --- a/pypy/module/micronumpy/test/test_support.py +++ b/pypy/module/micronumpy/test/test_support.py @@ -1,6 +1,8 @@ +from py.test import raises from pypy.module.micronumpy import support from pypy.module.micronumpy.ufuncs import W_UfuncGeneric from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest +from pypy.interpreter.error import OperationError class TestParseSignatureDirect(BaseNumpyAppTest): def test_signature_basic(self): @@ -28,3 +30,24 @@ ufunc = W_UfuncGeneric(space, funcs, name, identity, nin, nout, dtypes, signature) support._parse_signature(space, ufunc, ufunc.signature) assert ufunc.core_enabled == 1 + + nin = 2 + nout = 1 + signature = '(i1, i2),(J_1)->(_kAB)' + ufunc = W_UfuncGeneric(space, funcs, name, identity, nin, nout, dtypes, signature) + support._parse_signature(space, ufunc, ufunc.signature) + assert ufunc.core_enabled == 1 + + nin = 2 + nout = 1 + signature = '(i1 i2),(J_1)->(_kAB)' + ufunc = W_UfuncGeneric(space, funcs, name, identity, nin, nout, dtypes, signature) + exc = raises(OperationError, support._parse_signature, space, ufunc, ufunc.signature) + assert "expect dimension name" in exc.value.errorstr(space) + + nin = 2 + nout = 1 + signature = '(i),i(->()' + ufunc = W_UfuncGeneric(space, funcs, name, identity, nin, nout, dtypes, signature) + exc = raises(OperationError, support._parse_signature, space, ufunc, ufunc.signature) + assert "expect '(' at 4" in exc.value.errorstr(space) From noreply at buildbot.pypy.org Fri Aug 22 16:13:13 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 22 Aug 2014 16:13:13 +0200 (CEST) Subject: [pypy-commit] pypy ufuncapi: integrate signature core_enabled into ufunc call Message-ID: <20140822141313.0A5D91C020E@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufuncapi Changeset: r72961:aa2a66acc1f9 Date: 2014-08-22 17:12 +0300 http://bitbucket.org/pypy/pypy/changeset/aa2a66acc1f9/ Log: integrate signature core_enabled into ufunc call diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -322,9 +322,6 @@ char types[] = { NPY_DOUBLE,NPY_DOUBLE, NPY_INT, NPY_INT }; void *array_data[] = {NULL, NULL}; PyObject * retval; - /* XXX should be 'funcs', not 'funcs[1]' but how to define an array of - function pointers in ndarrayobject.py? */ - printf("calling w/funcs[0] = 0x%x, funcs[1] = 0x%x \\n", funcs[0], funcs[1]); retval = _PyUFunc_FromFuncAndDataAndSignature(funcs, array_data, types, 2, 1, 1, PyUFunc_None, "times2", "times2_docstring", 0, "()->()"); diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -137,7 +137,7 @@ in_iters[i] = in_iter in_states[i] = in_state for i in range(nout): - out_i = in_args[i] + out_i = out_args[i] assert isinstance(out_i, W_NDimArray) out_iter, out_state = out_i.create_iter(shape) out_iters[i] = out_iter diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -114,9 +114,9 @@ adder_ufunc0 = frompyfunc(adder, 2, 1, dtypes=['match']) adder_ufunc1 = frompyfunc([adder, adder], 2, 1, dtypes=[int, int, int, float, float, float]) - int_func22 = frompyfunc([int, int], 2, 2, signature='(i)->(i)', + int_func22 = frompyfunc([int, int], 2, 2, signature='(i),(i)->(i),(i)', dtypes=['match']) - int_func12 = frompyfunc([int], 1, 2, signature='(i)->(i)', + int_func12 = frompyfunc([int], 1, 2, signature='(i)->(i),(i)', dtypes=['match']) retype = dtype(int) assert isinstance(adder_ufunc1, ufunc) diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -558,10 +558,8 @@ new_shape = inargs0.get_shape() res_dtype = outargs0.get_dtype() # XXX handle inner-loop indexing - sign_parts = self.signature.split('->') - if len(sign_parts) == 2 and sign_parts[0].strip() == '()' \ - and sign_parts[1].strip() == '()': - + if not self.core_enabled: + # func is going to do all the work arglist = space.newlist(inargs + outargs) func = self.funcs[index] arglist = space.newlist(inargs + outargs) @@ -1034,6 +1032,10 @@ raise oefmt(space.w_ValueError, 'identity must be None or an int') + if len(signature) == 0: + # cpython compatability, func is of the form (i),(i)->(i) + signature = ','.join(['(i)'] * nin) + '->' + ','.join(['(i)'] * nout) + w_ret = W_UfuncGeneric(space, func, name, identity, nin, nout, dtypes, signature, match_dtypes=match_dtypes) _parse_signature(space, w_ret, w_ret.signature) From noreply at buildbot.pypy.org Sat Aug 23 00:05:13 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 00:05:13 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: this belongs in test_sysmodule Message-ID: <20140822220513.DD5881D36E9@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72962:6d940f0447f1 Date: 2014-08-22 12:23 -0700 http://bitbucket.org/pypy/pypy/changeset/6d940f0447f1/ Log: this belongs in test_sysmodule diff --git a/pypy/module/exceptions/test/test_exc.py b/pypy/module/exceptions/test/test_exc.py --- a/pypy/module/exceptions/test/test_exc.py +++ b/pypy/module/exceptions/test/test_exc.py @@ -127,24 +127,6 @@ assert SystemExit("x").code == "x" assert SystemExit(1, 2).code == (1, 2) - def test_sys_exit(self): - import sys - - exc = raises(SystemExit, sys.exit) - assert exc.value.code is None - - exc = raises(SystemExit, sys.exit, 0) - assert exc.value.code == 0 - - exc = raises(SystemExit, sys.exit, 1) - assert exc.value.code == 1 - - exc = raises(SystemExit, sys.exit, 2) - assert exc.value.code == 2 - - exc = raises(SystemExit, sys.exit, (1, 2, 3)) - assert exc.value.code == (1, 2, 3) - def test_str_unicode(self): e = ValueError('àèì') assert str(e) == 'àèì' diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py --- a/pypy/module/sys/test/test_sysmodule.py +++ b/pypy/module/sys/test/test_sysmodule.py @@ -169,6 +169,20 @@ assert isinstance(li.nan, int) assert isinstance(li.imag, int) + def test_sys_exit(self): + import sys + exc = raises(SystemExit, sys.exit) + assert exc.value.code is None + + exc = raises(SystemExit, sys.exit, 0) + assert exc.value.code == 0 + + exc = raises(SystemExit, sys.exit, 1) + assert exc.value.code == 1 + + exc = raises(SystemExit, sys.exit, (1, 2, 3)) + assert exc.value.code == (1, 2, 3) + class AppTestSysModulePortedFromCPython: def setup_class(cls): From noreply at buildbot.pypy.org Sat Aug 23 00:05:15 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 23 Aug 2014 00:05:15 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: sys.exit() should produce a SystemExit with code is None Message-ID: <20140822220515.61B051D36E9@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: stdlib-2.7.8 Changeset: r72963:8ab4bb347653 Date: 2014-08-12 21:34 +0200 http://bitbucket.org/pypy/pypy/changeset/8ab4bb347653/ Log: sys.exit() should produce a SystemExit with code is None (grafted from 4558aef78acc9cca815eb6309c90adecdafc793f) diff --git a/pypy/module/exceptions/test/test_exc.py b/pypy/module/exceptions/test/test_exc.py --- a/pypy/module/exceptions/test/test_exc.py +++ b/pypy/module/exceptions/test/test_exc.py @@ -147,6 +147,28 @@ assert SystemExit("x").code == "x" assert SystemExit(1, 2).code == (1, 2) + def test_sys_exit(self): + import sys + + exc = raises(SystemExit, sys.exit) + assert exc.value.code is None + + exc = raises(SystemExit, sys.exit, 0) + assert exc.value.code == 0 + + exc = raises(SystemExit, sys.exit, 1) + assert exc.value.code == 1 + + exc = raises(SystemExit, sys.exit, 2) + assert exc.value.code == 2 + + exc = raises(SystemExit, sys.exit, (1, 2, 3)) + assert exc.value.code == (1, 2, 3) + + def test_str_unicode(self): + e = ValueError('àèì') + assert str(e) == 'àèì' + def test_unicode_decode_error(self): from exceptions import UnicodeDecodeError ud = UnicodeDecodeError("x", "y", 1, 5, "bah") diff --git a/pypy/module/sys/app.py b/pypy/module/sys/app.py --- a/pypy/module/sys/app.py +++ b/pypy/module/sys/app.py @@ -49,7 +49,7 @@ except: return False # got an exception again... ignore, report the original -def exit(exitcode=0): +def exit(exitcode=None): """Exit the interpreter by raising SystemExit(exitcode). If the exitcode is omitted or None, it defaults to zero (i.e., success). If the exitcode is numeric, it will be used as the system exit status. From noreply at buildbot.pypy.org Sat Aug 23 00:05:16 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 00:05:16 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: this belongs in test_sysmodule Message-ID: <20140822220516.A3B3A1D36E9@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r72964:b5022d1064b2 Date: 2014-08-22 12:23 -0700 http://bitbucket.org/pypy/pypy/changeset/b5022d1064b2/ Log: this belongs in test_sysmodule (grafted from 6d940f0447f1438b9fb337172d25a971e3910f27) diff --git a/pypy/module/exceptions/test/test_exc.py b/pypy/module/exceptions/test/test_exc.py --- a/pypy/module/exceptions/test/test_exc.py +++ b/pypy/module/exceptions/test/test_exc.py @@ -147,24 +147,6 @@ assert SystemExit("x").code == "x" assert SystemExit(1, 2).code == (1, 2) - def test_sys_exit(self): - import sys - - exc = raises(SystemExit, sys.exit) - assert exc.value.code is None - - exc = raises(SystemExit, sys.exit, 0) - assert exc.value.code == 0 - - exc = raises(SystemExit, sys.exit, 1) - assert exc.value.code == 1 - - exc = raises(SystemExit, sys.exit, 2) - assert exc.value.code == 2 - - exc = raises(SystemExit, sys.exit, (1, 2, 3)) - assert exc.value.code == (1, 2, 3) - def test_str_unicode(self): e = ValueError('àèì') assert str(e) == 'àèì' diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py --- a/pypy/module/sys/test/test_sysmodule.py +++ b/pypy/module/sys/test/test_sysmodule.py @@ -123,6 +123,21 @@ assert isinstance(li.bits_per_digit, int) assert isinstance(li.sizeof_digit, int) + def test_sys_exit(self): + import sys + exc = raises(SystemExit, sys.exit) + assert exc.value.code is None + + exc = raises(SystemExit, sys.exit, 0) + assert exc.value.code == 0 + + exc = raises(SystemExit, sys.exit, 1) + assert exc.value.code == 1 + + exc = raises(SystemExit, sys.exit, (1, 2, 3)) + assert exc.value.code == (1, 2, 3) + + class AppTestSysModulePortedFromCPython: def setup_class(cls): From noreply at buildbot.pypy.org Sat Aug 23 00:05:18 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 00:05:18 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: missing import Message-ID: <20140822220518.096F71D36E9@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r72965:9f806e8177c5 Date: 2014-08-22 12:31 -0700 http://bitbucket.org/pypy/pypy/changeset/9f806e8177c5/ Log: missing import diff --git a/lib-python/2.7/test/test_traceback.py b/lib-python/2.7/test/test_traceback.py --- a/lib-python/2.7/test/test_traceback.py +++ b/lib-python/2.7/test/test_traceback.py @@ -5,7 +5,7 @@ import unittest from imp import reload from test.test_support import run_unittest, is_jython, Error, cpython_only -from test.test_support import check_impl_detail +from test.test_support import check_impl_detail, impl_detail import traceback From noreply at buildbot.pypy.org Sat Aug 23 00:06:11 2014 From: noreply at buildbot.pypy.org (armooo) Date: Sat, 23 Aug 2014 00:06:11 +0200 (CEST) Subject: [pypy-commit] pypy armooo-py3.3fixes: Update select.poll register/modify for 3.3 tests Message-ID: <20140822220611.107441D36E9@cobra.cs.uni-duesseldorf.de> Author: Jason Michalski Branch: armooo-py3.3fixes Changeset: r72966:f5e910c88825 Date: 2014-08-20 21:53 -0700 http://bitbucket.org/pypy/pypy/changeset/f5e910c88825/ Log: Update select.poll register/modify for 3.3 tests In python 3.3 select.poll register and modify raise an OverflowError when events is a negative value. diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -19,11 +19,21 @@ @unwrap_spec(events="c_short") def register(self, space, w_fd, events=defaultevents): + if events < 0: + raise OperationError( + space.w_OverflowError, + space.wrap("Python int too large for C unsigned short"), + ) fd = space.c_filedescriptor_w(w_fd) self.fddict[fd] = events @unwrap_spec(events=int) def modify(self, space, w_fd, events): + if events < 0: + raise OperationError( + space.w_OverflowError, + space.wrap("Python int too large for C unsigned short"), + ) fd = space.c_filedescriptor_w(w_fd) if fd not in self.fddict: raise wrap_oserror(space, OSError(errno.ENOENT, "poll.modify"), diff --git a/pypy/module/test_lib_pypy/test_poll.py b/pypy/module/test_lib_pypy/test_poll.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/test_poll.py @@ -0,0 +1,14 @@ +class AppTestPoll: + + spaceconfig = dict(usemodules=('select',)) + + def test_poll3(self): + import select + # test int overflow + pollster = select.poll() + pollster.register(1) + + raises(OverflowError, pollster.register, 0, -1) + raises(OverflowError, pollster.register, 0, 1 << 64) + raises(OverflowError, pollster.modify, 1, -1) + raises(OverflowError, pollster.modify, 1, 1 << 64) From noreply at buildbot.pypy.org Sat Aug 23 00:06:12 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 00:06:12 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Merged in armooo/pypy/armooo-py3.3fixes (pull request #274) Message-ID: <20140822220612.8671A1D36E9@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72967:25f78f167792 Date: 2014-08-22 15:05 -0700 http://bitbucket.org/pypy/pypy/changeset/25f78f167792/ Log: Merged in armooo/pypy/armooo-py3.3fixes (pull request #274) Update select.poll register/modify for 3.3 tests diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -19,11 +19,21 @@ @unwrap_spec(events="c_short") def register(self, space, w_fd, events=defaultevents): + if events < 0: + raise OperationError( + space.w_OverflowError, + space.wrap("Python int too large for C unsigned short"), + ) fd = space.c_filedescriptor_w(w_fd) self.fddict[fd] = events @unwrap_spec(events=int) def modify(self, space, w_fd, events): + if events < 0: + raise OperationError( + space.w_OverflowError, + space.wrap("Python int too large for C unsigned short"), + ) fd = space.c_filedescriptor_w(w_fd) if fd not in self.fddict: raise wrap_oserror(space, OSError(errno.ENOENT, "poll.modify"), diff --git a/pypy/module/test_lib_pypy/test_poll.py b/pypy/module/test_lib_pypy/test_poll.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/test_poll.py @@ -0,0 +1,14 @@ +class AppTestPoll: + + spaceconfig = dict(usemodules=('select',)) + + def test_poll3(self): + import select + # test int overflow + pollster = select.poll() + pollster.register(1) + + raises(OverflowError, pollster.register, 0, -1) + raises(OverflowError, pollster.register, 0, 1 << 64) + raises(OverflowError, pollster.modify, 1, -1) + raises(OverflowError, pollster.modify, 1, 1 << 64) From noreply at buildbot.pypy.org Sat Aug 23 01:31:56 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 01:31:56 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: pep8, kill an invalid XXX comment Message-ID: <20140822233156.A9A9A1C1482@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r72968:e8fac13444fa Date: 2014-08-22 15:41 -0700 http://bitbucket.org/pypy/pypy/changeset/e8fac13444fa/ Log: pep8, kill an invalid XXX comment diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -1,21 +1,29 @@ -from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.error import OperationError, wrap_oserror, oefmt -from rpython.rlib import rpoll import errno +from rpython.rlib import _rsocket_rffi as _c, rpoll +from rpython.rtyper.lltypesystem import lltype, rffi + +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt, wrap_oserror +from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec +from pypy.interpreter.typedef import TypeDef + defaultevents = rpoll.POLLIN | rpoll.POLLOUT | rpoll.POLLPRI + class Cache: def __init__(self, space): self.w_error = space.new_exception_class("select.error") + def poll(space): """Returns a polling object, which supports registering and -unregistering file descriptors, and then polling them for I/O events.""" + unregistering file descriptors, and then polling them for I/O + events. + """ return Poll() + class Poll(W_Root): def __init__(self): self.fddict = {} @@ -39,8 +47,7 @@ try: del self.fddict[fd] except KeyError: - raise OperationError(space.w_KeyError, - space.wrap(fd)) # XXX should this maybe be w_fd? + raise OperationError(space.w_KeyError, space.wrap(fd)) @unwrap_spec(w_timeout=WrappedDefault(None)) def poll(self, space, w_timeout): @@ -54,7 +61,7 @@ w_timeout = space.int(w_timeout) except OperationError: raise oefmt(space.w_TypeError, - "timeout must be an integer or None") + "timeout must be an integer or None") timeout = space.c_int_w(w_timeout) if self.running: @@ -85,10 +92,6 @@ # ____________________________________________________________ -from rpython.rlib import _rsocket_rffi as _c -from rpython.rtyper.lltypesystem import lltype, rffi - - def _build_fd_set(space, list_w, ll_list, nfds): _c.FD_ZERO(ll_list) fdlist = [] @@ -101,17 +104,17 @@ return fdlist, nfds _build_fd_set._always_inline_ = True # get rid of the tuple result + def _unbuild_fd_set(space, list_w, fdlist, ll_list, reslist_w): for i in range(len(fdlist)): fd = fdlist[i] if _c.FD_ISSET(fd, ll_list): reslist_w.append(list_w[i]) + def _call_select(space, iwtd_w, owtd_w, ewtd_w, ll_inl, ll_outl, ll_errl, ll_timeval): - fdlistin = None - fdlistout = None - fdlisterr = None + fdlistin = fdlistout = fdlisterr = None nfds = -1 if ll_inl: fdlistin, nfds = _build_fd_set(space, iwtd_w, ll_inl, nfds) @@ -143,7 +146,8 @@ space.newlist(resout_w), space.newlist(reserr_w)]) - at unwrap_spec(w_timeout = WrappedDefault(None)) + + at unwrap_spec(w_timeout=WrappedDefault(None)) def select(space, w_iwtd, w_owtd, w_ewtd, w_timeout): """Wait until one or more file descriptors are ready for some kind of I/O. The first three arguments are sequences of file descriptors to be waited for: @@ -175,7 +179,7 @@ else: timeout = space.float_w(w_timeout) - ll_inl = lltype.nullptr(_c.fd_set.TO) + ll_inl = lltype.nullptr(_c.fd_set.TO) ll_outl = lltype.nullptr(_c.fd_set.TO) ll_errl = lltype.nullptr(_c.fd_set.TO) ll_timeval = lltype.nullptr(_c.timeval) @@ -199,7 +203,11 @@ return _call_select(space, iwtd_w, owtd_w, ewtd_w, ll_inl, ll_outl, ll_errl, ll_timeval) finally: - if ll_timeval: lltype.free(ll_timeval, flavor='raw') - if ll_errl: lltype.free(ll_errl, flavor='raw') - if ll_outl: lltype.free(ll_outl, flavor='raw') - if ll_inl: lltype.free(ll_inl, flavor='raw') + if ll_timeval: + lltype.free(ll_timeval, flavor='raw') + if ll_errl: + lltype.free(ll_errl, flavor='raw') + if ll_outl: + lltype.free(ll_outl, flavor='raw') + if ll_inl: + lltype.free(ll_inl, flavor='raw') From noreply at buildbot.pypy.org Sat Aug 23 01:32:03 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 01:32:03 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: move/integrate these into the existing test_select Message-ID: <20140822233203.678D51C1482@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72973:3b2796835762 Date: 2014-08-22 15:55 -0700 http://bitbucket.org/pypy/pypy/changeset/3b2796835762/ Log: move/integrate these into the existing test_select diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -219,6 +219,8 @@ skip("no select.poll() on this platform") pollster = select.poll() pollster.register(1) + raises(OverflowError, pollster.register, 0, -1) + raises(OverflowError, pollster.register, 0, 1 << 64) exc = raises(OverflowError, pollster.register, 0, 32768) # SHRT_MAX + 1 assert str(exc.value) == 'signed short integer is greater than maximum' exc = raises(OverflowError, pollster.register, 0, -32768 - 1) @@ -230,6 +232,9 @@ exc = raises(TypeError, pollster.poll, '123') assert str(exc.value) == 'timeout must be an integer or None' + raises(OverflowError, pollster.modify, 1, -1) + raises(OverflowError, pollster.modify, 1, 1 << 64) + class AppTestSelectWithPipes(_AppTestSelect): "Use a pipe to get pairs of file descriptors" diff --git a/pypy/module/test_lib_pypy/test_poll.py b/pypy/module/test_lib_pypy/test_poll.py deleted file mode 100644 --- a/pypy/module/test_lib_pypy/test_poll.py +++ /dev/null @@ -1,14 +0,0 @@ -class AppTestPoll: - - spaceconfig = dict(usemodules=('select',)) - - def test_poll3(self): - import select - # test int overflow - pollster = select.poll() - pollster.register(1) - - raises(OverflowError, pollster.register, 0, -1) - raises(OverflowError, pollster.register, 0, 1 << 64) - raises(OverflowError, pollster.modify, 1, -1) - raises(OverflowError, pollster.modify, 1, 1 << 64) From noreply at buildbot.pypy.org Sat Aug 23 01:31:57 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 01:31:57 +0200 (CEST) Subject: [pypy-commit] pypy default: add USHRT_MAX Message-ID: <20140822233157.F0B941C1482@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r72969:0001c77cf245 Date: 2014-08-22 15:50 -0700 http://bitbucket.org/pypy/pypy/changeset/0001c77cf245/ Log: add USHRT_MAX diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -539,6 +539,7 @@ SHRT_MIN = -2**(_get_bitsize('h') - 1) SHRT_MAX = 2**(_get_bitsize('h') - 1) - 1 +USHRT_MAX = 2**_get_bitsize('h') - 1 INT_MIN = int(-2**(_get_bitsize('i') - 1)) INT_MAX = int(2**(_get_bitsize('i') - 1) - 1) UINT_MAX = r_uint(2**_get_bitsize('i') - 1) From noreply at buildbot.pypy.org Sat Aug 23 01:32:04 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 01:32:04 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: more closely match CPython requiring strict unsigned shorts Message-ID: <20140822233204.A742E1C1482@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72974:ba30358bcb6b Date: 2014-08-22 16:25 -0700 http://bitbucket.org/pypy/pypy/changeset/ba30358bcb6b/ Log: more closely match CPython requiring strict unsigned shorts diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -1,11 +1,13 @@ import errno from rpython.rlib import _rsocket_rffi as _c, rpoll +from rpython.rlib.rarithmetic import USHRT_MAX from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt, wrap_oserror -from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec +from pypy.interpreter.gateway import ( + Unwrapper, WrappedDefault, interp2app, unwrap_spec) from pypy.interpreter.typedef import TypeDef defaultevents = rpoll.POLLIN | rpoll.POLLOUT | rpoll.POLLPRI @@ -19,28 +21,31 @@ return Poll() +class UShortUnwrapper(Unwrapper): + + def unwrap(self, space, w_value): + value = space.int_w(w_value) + if value < 0: + raise oefmt(space.w_OverflowError, + "can't convert negative value to C unsigned short") + if value > USHRT_MAX: + raise oefmt(space.w_OverflowError, + "Python int too large for C unsigned short") + return value + + class Poll(W_Root): def __init__(self): self.fddict = {} self.running = False - @unwrap_spec(events="c_short") + @unwrap_spec(events=UShortUnwrapper) def register(self, space, w_fd, events=defaultevents): - if events < 0: - raise OperationError( - space.w_OverflowError, - space.wrap("Python int too large for C unsigned short"), - ) fd = space.c_filedescriptor_w(w_fd) self.fddict[fd] = events - @unwrap_spec(events=int) + @unwrap_spec(events=UShortUnwrapper) def modify(self, space, w_fd, events): - if events < 0: - raise OperationError( - space.w_OverflowError, - space.wrap("Python int too large for C unsigned short"), - ) fd = space.c_filedescriptor_w(w_fd) if fd not in self.fddict: raise wrap_oserror(space, OSError(errno.ENOENT, "poll.modify"), diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -221,11 +221,11 @@ pollster.register(1) raises(OverflowError, pollster.register, 0, -1) raises(OverflowError, pollster.register, 0, 1 << 64) - exc = raises(OverflowError, pollster.register, 0, 32768) # SHRT_MAX + 1 - assert str(exc.value) == 'signed short integer is greater than maximum' + pollster.register(0, 32768) # SHRT_MAX + 1 exc = raises(OverflowError, pollster.register, 0, -32768 - 1) - assert str(exc.value) == 'signed short integer is less than minimum' - raises(OverflowError, pollster.register, 0, 65535) # USHRT_MAX + 1 + assert "unsigned" in str(exc.value) + pollster.register(0, 65535) # USHRT_MAX + raises(OverflowError, pollster.register, 0, 65536) # USHRT_MAX + 1 raises(OverflowError, pollster.poll, 2147483648) # INT_MAX + 1 raises(OverflowError, pollster.poll, -2147483648 - 1) raises(OverflowError, pollster.poll, 4294967296) # UINT_MAX + 1 From noreply at buildbot.pypy.org Sat Aug 23 01:31:59 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 01:31:59 +0200 (CEST) Subject: [pypy-commit] pypy py3k: pep8, kill an invalid XXX comment Message-ID: <20140822233159.5AB0E1C1482@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72970:a4773e318ffe Date: 2014-08-22 15:41 -0700 http://bitbucket.org/pypy/pypy/changeset/a4773e318ffe/ Log: pep8, kill an invalid XXX comment (grafted from e8fac13444fa1ba5546f2c6af6d278b9b0a0770b) diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -1,21 +1,29 @@ -from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.error import OperationError, wrap_oserror, oefmt -from rpython.rlib import rpoll import errno +from rpython.rlib import _rsocket_rffi as _c, rpoll +from rpython.rtyper.lltypesystem import lltype, rffi + +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt, wrap_oserror +from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec +from pypy.interpreter.typedef import TypeDef + defaultevents = rpoll.POLLIN | rpoll.POLLOUT | rpoll.POLLPRI + class Cache: def __init__(self, space): self.w_error = space.new_exception_class("select.error") + def poll(space): """Returns a polling object, which supports registering and -unregistering file descriptors, and then polling them for I/O events.""" + unregistering file descriptors, and then polling them for I/O + events. + """ return Poll() + class Poll(W_Root): def __init__(self): self.fddict = {} @@ -39,8 +47,7 @@ try: del self.fddict[fd] except KeyError: - raise OperationError(space.w_KeyError, - space.wrap(fd)) # XXX should this maybe be w_fd? + raise OperationError(space.w_KeyError, space.wrap(fd)) @unwrap_spec(w_timeout=WrappedDefault(None)) def poll(self, space, w_timeout): @@ -54,7 +61,7 @@ w_timeout = space.int(w_timeout) except OperationError: raise oefmt(space.w_TypeError, - "timeout must be an integer or None") + "timeout must be an integer or None") timeout = space.c_int_w(w_timeout) if self.running: @@ -85,10 +92,6 @@ # ____________________________________________________________ -from rpython.rlib import _rsocket_rffi as _c -from rpython.rtyper.lltypesystem import lltype, rffi - - def _build_fd_set(space, list_w, ll_list, nfds): _c.FD_ZERO(ll_list) fdlist = [] @@ -101,17 +104,17 @@ return fdlist, nfds _build_fd_set._always_inline_ = True # get rid of the tuple result + def _unbuild_fd_set(space, list_w, fdlist, ll_list, reslist_w): for i in range(len(fdlist)): fd = fdlist[i] if _c.FD_ISSET(fd, ll_list): reslist_w.append(list_w[i]) + def _call_select(space, iwtd_w, owtd_w, ewtd_w, ll_inl, ll_outl, ll_errl, ll_timeval): - fdlistin = None - fdlistout = None - fdlisterr = None + fdlistin = fdlistout = fdlisterr = None nfds = -1 if ll_inl: fdlistin, nfds = _build_fd_set(space, iwtd_w, ll_inl, nfds) @@ -143,7 +146,8 @@ space.newlist(resout_w), space.newlist(reserr_w)]) - at unwrap_spec(w_timeout = WrappedDefault(None)) + + at unwrap_spec(w_timeout=WrappedDefault(None)) def select(space, w_iwtd, w_owtd, w_ewtd, w_timeout): """Wait until one or more file descriptors are ready for some kind of I/O. The first three arguments are sequences of file descriptors to be waited for: @@ -175,7 +179,7 @@ else: timeout = space.float_w(w_timeout) - ll_inl = lltype.nullptr(_c.fd_set.TO) + ll_inl = lltype.nullptr(_c.fd_set.TO) ll_outl = lltype.nullptr(_c.fd_set.TO) ll_errl = lltype.nullptr(_c.fd_set.TO) ll_timeval = lltype.nullptr(_c.timeval) @@ -199,7 +203,11 @@ return _call_select(space, iwtd_w, owtd_w, ewtd_w, ll_inl, ll_outl, ll_errl, ll_timeval) finally: - if ll_timeval: lltype.free(ll_timeval, flavor='raw') - if ll_errl: lltype.free(ll_errl, flavor='raw') - if ll_outl: lltype.free(ll_outl, flavor='raw') - if ll_inl: lltype.free(ll_inl, flavor='raw') + if ll_timeval: + lltype.free(ll_timeval, flavor='raw') + if ll_errl: + lltype.free(ll_errl, flavor='raw') + if ll_outl: + lltype.free(ll_outl, flavor='raw') + if ll_inl: + lltype.free(ll_inl, flavor='raw') From noreply at buildbot.pypy.org Sat Aug 23 01:32:05 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 01:32:05 +0200 (CEST) Subject: [pypy-commit] pypy default: pep8, kill an invalid XXX comment Message-ID: <20140822233205.D19AB1C1482@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r72975:857d8e4c4254 Date: 2014-08-22 15:41 -0700 http://bitbucket.org/pypy/pypy/changeset/857d8e4c4254/ Log: pep8, kill an invalid XXX comment (grafted from e8fac13444fa1ba5546f2c6af6d278b9b0a0770b) diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -1,21 +1,29 @@ -from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.error import OperationError, wrap_oserror, oefmt -from rpython.rlib import rpoll import errno +from rpython.rlib import _rsocket_rffi as _c, rpoll +from rpython.rtyper.lltypesystem import lltype, rffi + +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt, wrap_oserror +from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec +from pypy.interpreter.typedef import TypeDef + defaultevents = rpoll.POLLIN | rpoll.POLLOUT | rpoll.POLLPRI + class Cache: def __init__(self, space): self.w_error = space.new_exception_class("select.error") + def poll(space): """Returns a polling object, which supports registering and -unregistering file descriptors, and then polling them for I/O events.""" + unregistering file descriptors, and then polling them for I/O + events. + """ return Poll() + class Poll(W_Root): def __init__(self): self.fddict = {} @@ -39,8 +47,7 @@ try: del self.fddict[fd] except KeyError: - raise OperationError(space.w_KeyError, - space.wrap(fd)) # XXX should this maybe be w_fd? + raise OperationError(space.w_KeyError, space.wrap(fd)) @unwrap_spec(w_timeout=WrappedDefault(None)) def poll(self, space, w_timeout): @@ -54,7 +61,7 @@ w_timeout = space.int(w_timeout) except OperationError: raise oefmt(space.w_TypeError, - "timeout must be an integer or None") + "timeout must be an integer or None") timeout = space.c_int_w(w_timeout) if self.running: @@ -85,10 +92,6 @@ # ____________________________________________________________ -from rpython.rlib import _rsocket_rffi as _c -from rpython.rtyper.lltypesystem import lltype, rffi - - def _build_fd_set(space, list_w, ll_list, nfds): _c.FD_ZERO(ll_list) fdlist = [] @@ -101,17 +104,17 @@ return fdlist, nfds _build_fd_set._always_inline_ = True # get rid of the tuple result + def _unbuild_fd_set(space, list_w, fdlist, ll_list, reslist_w): for i in range(len(fdlist)): fd = fdlist[i] if _c.FD_ISSET(fd, ll_list): reslist_w.append(list_w[i]) + def _call_select(space, iwtd_w, owtd_w, ewtd_w, ll_inl, ll_outl, ll_errl, ll_timeval): - fdlistin = None - fdlistout = None - fdlisterr = None + fdlistin = fdlistout = fdlisterr = None nfds = -1 if ll_inl: fdlistin, nfds = _build_fd_set(space, iwtd_w, ll_inl, nfds) @@ -143,7 +146,8 @@ space.newlist(resout_w), space.newlist(reserr_w)]) - at unwrap_spec(w_timeout = WrappedDefault(None)) + + at unwrap_spec(w_timeout=WrappedDefault(None)) def select(space, w_iwtd, w_owtd, w_ewtd, w_timeout): """Wait until one or more file descriptors are ready for some kind of I/O. The first three arguments are sequences of file descriptors to be waited for: @@ -175,7 +179,7 @@ else: timeout = space.float_w(w_timeout) - ll_inl = lltype.nullptr(_c.fd_set.TO) + ll_inl = lltype.nullptr(_c.fd_set.TO) ll_outl = lltype.nullptr(_c.fd_set.TO) ll_errl = lltype.nullptr(_c.fd_set.TO) ll_timeval = lltype.nullptr(_c.timeval) @@ -199,7 +203,11 @@ return _call_select(space, iwtd_w, owtd_w, ewtd_w, ll_inl, ll_outl, ll_errl, ll_timeval) finally: - if ll_timeval: lltype.free(ll_timeval, flavor='raw') - if ll_errl: lltype.free(ll_errl, flavor='raw') - if ll_outl: lltype.free(ll_outl, flavor='raw') - if ll_inl: lltype.free(ll_inl, flavor='raw') + if ll_timeval: + lltype.free(ll_timeval, flavor='raw') + if ll_errl: + lltype.free(ll_errl, flavor='raw') + if ll_outl: + lltype.free(ll_outl, flavor='raw') + if ll_inl: + lltype.free(ll_inl, flavor='raw') From noreply at buildbot.pypy.org Sat Aug 23 01:32:00 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 01:32:00 +0200 (CEST) Subject: [pypy-commit] pypy py3k: add USHRT_MAX Message-ID: <20140822233200.A62051C1482@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r72971:711a57a64749 Date: 2014-08-22 15:50 -0700 http://bitbucket.org/pypy/pypy/changeset/711a57a64749/ Log: add USHRT_MAX (grafted from 0001c77cf2459944e9916731b222910cfe177f64) diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -539,6 +539,7 @@ SHRT_MIN = -2**(_get_bitsize('h') - 1) SHRT_MAX = 2**(_get_bitsize('h') - 1) - 1 +USHRT_MAX = 2**_get_bitsize('h') - 1 INT_MIN = int(-2**(_get_bitsize('i') - 1)) INT_MAX = int(2**(_get_bitsize('i') - 1) - 1) UINT_MAX = r_uint(2**_get_bitsize('i') - 1) From noreply at buildbot.pypy.org Sat Aug 23 01:32:07 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 01:32:07 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: merge default Message-ID: <20140822233207.49A0C1C1482@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r72976:e3861364b11f Date: 2014-08-22 16:31 -0700 http://bitbucket.org/pypy/pypy/changeset/e3861364b11f/ Log: merge default diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -354,6 +354,6 @@ See the License for the specific language governing permissions and limitations under the License. -Detailled license information is contained in the NOTICE file in the +Detailed license information is contained in the NOTICE file in the directory. diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -37,7 +37,7 @@ using a 32 bit Python and vice versa. By default pypy is built using the Multi-threaded DLL (/MD) runtime environment. -**Note:** PyPy is currently not supported for 64 bit Windows, and translation +**Note:** PyPy is currently not supported for 64 bit Python, and translation will fail in this case. Python and a C compiler are all you need to build pypy, but it will miss some @@ -136,7 +136,7 @@ cd zlib-1.2.3 nmake -f win32\Makefile.msc - copy zlib1.lib + copy zlib.lib copy zlib.h zconf.h The bz2 compression library @@ -165,27 +165,29 @@ directory. Version 2.1.0 is known to pass tests. Then open the project file ``expat.dsw`` with Visual Studio; follow the instruction for converting the project files, -switch to the "Release" configuration, reconfigure the runtime for -Multi-threaded DLL (/MD) and build the solution (the ``expat`` project -is actually enough for pypy). +switch to the "Release" configuration, use the ``expat_static`` project, +reconfigure the runtime for Multi-threaded DLL (/MD) and build. -Then, copy the file ``win32\bin\release\libexpat.dll`` somewhere in -your PATH, ``win32\bin\release\libexpat.lib`` somewhere in LIB, and -both ``lib\expat.h`` and ``lib\expat_external.h`` somewhere in INCLUDE. +Then, copy the file ``win32\bin\release\libexpat.lib`` somewhere in +somewhere in LIB, and both ``lib\expat.h`` and ``lib\expat_external.h`` +somewhere in INCLUDE. The OpenSSL library ~~~~~~~~~~~~~~~~~~~ OpenSSL needs a Perl interpreter to configure its makefile. You may -use the one distributed by ActiveState, or the one from cygwin. In -both case the perl interpreter must be found on the PATH. +use the one distributed by ActiveState, or the one from cygwin.:: - svn export http://svn.python.org/projects/external/openssl-0.9.8y - cd openssl-0.9.8y - perl Configure VC-WIN32 + svn export http://svn.python.org/projects/external/openssl-1.0.1i + cd openssl-1.0.1i + perl Configure VC-WIN32 no-idea no-mdc2 ms\do_ms.bat nmake -f ms\nt.mak install +Then, copy the files ``out32\*.lib`` somewhere in +somewhere in LIB, and the entire ``include\openssl`` directory as-is somewhere +in INCLUDE. + TkInter module support ~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -14,13 +14,16 @@ if sys.platform == "win32": libname = 'libexpat' + pre_include_bits = ["#define XML_STATIC"] else: libname = 'expat' + pre_include_bits = [] eci = ExternalCompilationInfo( libraries=[libname], library_dirs=platform.preprocess_library_dirs([]), includes=['expat.h'], include_dirs=platform.preprocess_include_dirs([]), + pre_include_bits = pre_include_bits, ) eci = rffi_platform.configure_external_library( diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py --- a/pypy/tool/release/package.py +++ b/pypy/tool/release/package.py @@ -164,8 +164,7 @@ if sys.platform == 'win32': # Can't rename a DLL: it is always called 'libpypy-c.dll' - win_extras = ['libpypy-c.dll', 'libexpat.dll', 'sqlite3.dll', - 'libeay32.dll', 'ssleay32.dll'] + win_extras = ['libpypy-c.dll', 'sqlite3.dll'] if not options.no_tk: win_extras += ['tcl85.dll', 'tk85.dll'] diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -539,6 +539,7 @@ SHRT_MIN = -2**(_get_bitsize('h') - 1) SHRT_MAX = 2**(_get_bitsize('h') - 1) - 1 +USHRT_MAX = 2**_get_bitsize('h') - 1 INT_MIN = int(-2**(_get_bitsize('i') - 1)) INT_MAX = int(2**(_get_bitsize('i') - 1) - 1) UINT_MAX = r_uint(2**_get_bitsize('i') - 1) diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -8,7 +8,7 @@ if sys.platform == 'win32' and platform.name != 'mingw32': - libraries = ['libeay32', 'ssleay32', 'zlib1', + libraries = ['libeay32', 'ssleay32', 'zlib', 'user32', 'advapi32', 'gdi32', 'msvcrt', 'ws2_32'] includes = [ # ssl.h includes winsock.h, which will conflict with our own diff --git a/rpython/rlib/rzlib.py b/rpython/rlib/rzlib.py --- a/rpython/rlib/rzlib.py +++ b/rpython/rlib/rzlib.py @@ -12,12 +12,15 @@ if compiler.name == "msvc": - libname = 'zlib1' # since version 1.1.4 and later, see http://www.zlib.net/DLL_FAQ.txt + libname = 'zlib' # use the static lib, not zlib1 which is dll import lib + testonly_libraries = ['zlib1'] else: libname = 'z' + testonly_libraries = [] eci = ExternalCompilationInfo( libraries=[libname], - includes=['zlib.h'] + includes=['zlib.h'], + testonly_libraries = testonly_libraries ) try: eci = rffi_platform.configure_external_library( From noreply at buildbot.pypy.org Sat Aug 23 01:32:01 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 01:32:01 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge py3k Message-ID: <20140822233201.E08871C1482@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r72972:818fa7069ba0 Date: 2014-08-22 15:55 -0700 http://bitbucket.org/pypy/pypy/changeset/818fa7069ba0/ Log: merge py3k diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -1,17 +1,24 @@ +import errno + +from rpython.rlib import _rsocket_rffi as _c, rpoll +from rpython.rtyper.lltypesystem import lltype, rffi + +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt, wrap_oserror +from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.error import OperationError, wrap_oserror, oefmt -from rpython.rlib import rpoll -import errno defaultevents = rpoll.POLLIN | rpoll.POLLOUT | rpoll.POLLPRI + def poll(space): """Returns a polling object, which supports registering and -unregistering file descriptors, and then polling them for I/O events.""" + unregistering file descriptors, and then polling them for I/O + events. + """ return Poll() + class Poll(W_Root): def __init__(self): self.fddict = {} @@ -45,8 +52,7 @@ try: del self.fddict[fd] except KeyError: - raise OperationError(space.w_KeyError, - space.wrap(fd)) # XXX should this maybe be w_fd? + raise OperationError(space.w_KeyError, space.wrap(fd)) @unwrap_spec(w_timeout=WrappedDefault(None)) def poll(self, space, w_timeout): @@ -60,7 +66,7 @@ w_timeout = space.int(w_timeout) except OperationError: raise oefmt(space.w_TypeError, - "timeout must be an integer or None") + "timeout must be an integer or None") timeout = space.c_int_w(w_timeout) if self.running: @@ -90,10 +96,6 @@ # ____________________________________________________________ -from rpython.rlib import _rsocket_rffi as _c -from rpython.rtyper.lltypesystem import lltype, rffi - - def _build_fd_set(space, list_w, ll_list, nfds): _c.FD_ZERO(ll_list) fdlist = [] @@ -106,17 +108,17 @@ return fdlist, nfds _build_fd_set._always_inline_ = True # get rid of the tuple result + def _unbuild_fd_set(space, list_w, fdlist, ll_list, reslist_w): for i in range(len(fdlist)): fd = fdlist[i] if _c.FD_ISSET(fd, ll_list): reslist_w.append(list_w[i]) + def _call_select(space, iwtd_w, owtd_w, ewtd_w, ll_inl, ll_outl, ll_errl, ll_timeval): - fdlistin = None - fdlistout = None - fdlisterr = None + fdlistin = fdlistout = fdlisterr = None nfds = -1 if ll_inl: fdlistin, nfds = _build_fd_set(space, iwtd_w, ll_inl, nfds) @@ -147,7 +149,8 @@ space.newlist(resout_w), space.newlist(reserr_w)]) - at unwrap_spec(w_timeout = WrappedDefault(None)) + + at unwrap_spec(w_timeout=WrappedDefault(None)) def select(space, w_iwtd, w_owtd, w_ewtd, w_timeout): """Wait until one or more file descriptors are ready for some kind of I/O. The first three arguments are sequences of file descriptors to be waited for: @@ -181,7 +184,7 @@ if timeout < 0.0: raise oefmt(space.w_ValueError, "timeout must be non-negative") - ll_inl = lltype.nullptr(_c.fd_set.TO) + ll_inl = lltype.nullptr(_c.fd_set.TO) ll_outl = lltype.nullptr(_c.fd_set.TO) ll_errl = lltype.nullptr(_c.fd_set.TO) ll_timeval = lltype.nullptr(_c.timeval) @@ -205,7 +208,11 @@ return _call_select(space, iwtd_w, owtd_w, ewtd_w, ll_inl, ll_outl, ll_errl, ll_timeval) finally: - if ll_timeval: lltype.free(ll_timeval, flavor='raw') - if ll_errl: lltype.free(ll_errl, flavor='raw') - if ll_outl: lltype.free(ll_outl, flavor='raw') - if ll_inl: lltype.free(ll_inl, flavor='raw') + if ll_timeval: + lltype.free(ll_timeval, flavor='raw') + if ll_errl: + lltype.free(ll_errl, flavor='raw') + if ll_outl: + lltype.free(ll_outl, flavor='raw') + if ll_inl: + lltype.free(ll_inl, flavor='raw') diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -539,6 +539,7 @@ SHRT_MIN = -2**(_get_bitsize('h') - 1) SHRT_MAX = 2**(_get_bitsize('h') - 1) - 1 +USHRT_MAX = 2**_get_bitsize('h') - 1 INT_MIN = int(-2**(_get_bitsize('i') - 1)) INT_MAX = int(2**(_get_bitsize('i') - 1) - 1) UINT_MAX = r_uint(2**_get_bitsize('i') - 1) From noreply at buildbot.pypy.org Sat Aug 23 01:55:49 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 01:55:49 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: imports of random now require _struct Message-ID: <20140822235549.54CE51D2A89@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r72977:ca5fc8c4ef22 Date: 2014-08-22 16:52 -0700 http://bitbucket.org/pypy/pypy/changeset/ca5fc8c4ef22/ Log: imports of random now require _struct diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -221,7 +221,7 @@ expected_filename = str(udir.join('sample')) expected_mode = 'rb' extra_args = () - spaceconfig = {"usemodules": ["binascii", "rctime"]} + spaceconfig = {'usemodules': ['binascii', 'rctime', 'struct']} def setup_method(self, method): space = self.space @@ -281,7 +281,7 @@ expected_filename = '' expected_mode = 'rb' extra_args = () - spaceconfig = {"usemodules": ["binascii", "rctime"]} + spaceconfig = {'usemodules': ['binascii', 'rctime', 'struct']} def setup_method(self, method): space = self.space @@ -359,7 +359,8 @@ # A few extra tests class AppTestAFewExtra: - spaceconfig = {"usemodules": ['array', '_socket', 'binascii', 'rctime']} + spaceconfig = {'usemodules': ['_socket', 'array', 'binascii', 'rctime', + 'struct']} def setup_method(self, method): fn = str(udir.join('temptestfile')) diff --git a/pypy/module/_md5/test/test_md5.py b/pypy/module/_md5/test/test_md5.py --- a/pypy/module/_md5/test/test_md5.py +++ b/pypy/module/_md5/test/test_md5.py @@ -5,7 +5,7 @@ class AppTestMD5(object): spaceconfig = { - "usemodules": ['_md5', 'rctime', 'binascii'], + 'usemodules': ['_md5', 'binascii', 'rctime', 'struct'], } def setup_class(cls): diff --git a/pypy/module/_sha/test/test_sha.py b/pypy/module/_sha/test/test_sha.py --- a/pypy/module/_sha/test/test_sha.py +++ b/pypy/module/_sha/test/test_sha.py @@ -5,7 +5,7 @@ class AppTestSHA(object): spaceconfig = { - "usemodules": ['_sha', 'rctime', 'binascii'], + 'usemodules': ['_sha', 'binascii', 'rctime', 'struct'], } def setup_class(cls): diff --git a/pypy/module/bz2/test/test_bz2_file.py b/pypy/module/bz2/test/test_bz2_file.py --- a/pypy/module/bz2/test/test_bz2_file.py +++ b/pypy/module/bz2/test/test_bz2_file.py @@ -53,7 +53,7 @@ class AppTestBZ2File(CheckAllocation): spaceconfig = { - "usemodules": ["bz2", "binascii", "rctime"] + 'usemodules': ['bz2', 'binascii', 'rctime', 'struct'] } def setup_class(cls): diff --git a/pypy/module/imp/test/test_app.py b/pypy/module/imp/test/test_app.py --- a/pypy/module/imp/test/test_app.py +++ b/pypy/module/imp/test/test_app.py @@ -4,7 +4,7 @@ class AppTestImpModule: spaceconfig = { - "usemodules": ['imp', 'itertools', 'binascii', 'rctime'], + 'usemodules': ['binascii', 'imp', 'itertools', 'rctime', 'struct'], } def setup_class(cls): diff --git a/pypy/objspace/std/test/test_complexobject.py b/pypy/objspace/std/test/test_complexobject.py --- a/pypy/objspace/std/test/test_complexobject.py +++ b/pypy/objspace/std/test/test_complexobject.py @@ -82,7 +82,7 @@ class AppTestAppComplexTest: - spaceconfig = {"usemodules": ["binascii", "rctime"]} + spaceconfig = {'usemodules': ['binascii', 'rctime', 'struct']} def w_check_div(self, x, y): """Compute complex z=x*y, and check that z/x==y and z/y==x.""" diff --git a/pypy/objspace/std/test/test_floatobject.py b/pypy/objspace/std/test/test_floatobject.py --- a/pypy/objspace/std/test/test_floatobject.py +++ b/pypy/objspace/std/test/test_floatobject.py @@ -459,7 +459,7 @@ class AppTestFloatHex: spaceconfig = { - "usemodules": ["binascii", "rctime"], + 'usemodules': ['binascii', 'rctime', 'struct'], } def w_identical(self, x, y): From noreply at buildbot.pypy.org Sat Aug 23 01:55:50 2014 From: noreply at buildbot.pypy.org (armooo) Date: Sat, 23 Aug 2014 01:55:50 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: Update select.poll register/modify for 3.3 tests Message-ID: <20140822235550.B1A111D2A89@cobra.cs.uni-duesseldorf.de> Author: Jason Michalski Branch: stdlib-2.7.8 Changeset: r72978:529ffff25441 Date: 2014-08-20 21:53 -0700 http://bitbucket.org/pypy/pypy/changeset/529ffff25441/ Log: Update select.poll register/modify for 3.3 tests In python 3.3 select.poll register and modify raise an OverflowError when events is a negative value. (grafted from f5e910c8882588ec4780bda3c6419e7cf6eda9ae) diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -31,11 +31,21 @@ @unwrap_spec(events="c_short") def register(self, space, w_fd, events=defaultevents): + if events < 0: + raise OperationError( + space.w_OverflowError, + space.wrap("Python int too large for C unsigned short"), + ) fd = space.c_filedescriptor_w(w_fd) self.fddict[fd] = events @unwrap_spec(events=int) def modify(self, space, w_fd, events): + if events < 0: + raise OperationError( + space.w_OverflowError, + space.wrap("Python int too large for C unsigned short"), + ) fd = space.c_filedescriptor_w(w_fd) if fd not in self.fddict: raise wrap_oserror(space, OSError(errno.ENOENT, "poll.modify"), diff --git a/pypy/module/test_lib_pypy/test_poll.py b/pypy/module/test_lib_pypy/test_poll.py new file mode 100644 --- /dev/null +++ b/pypy/module/test_lib_pypy/test_poll.py @@ -0,0 +1,14 @@ +class AppTestPoll: + + spaceconfig = dict(usemodules=('select',)) + + def test_poll3(self): + import select + # test int overflow + pollster = select.poll() + pollster.register(1) + + raises(OverflowError, pollster.register, 0, -1) + raises(OverflowError, pollster.register, 0, 1 << 64) + raises(OverflowError, pollster.modify, 1, -1) + raises(OverflowError, pollster.modify, 1, 1 << 64) From noreply at buildbot.pypy.org Sat Aug 23 01:55:51 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 01:55:51 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: move/integrate these into the existing test_select Message-ID: <20140822235551.EF5471D2A89@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r72979:4e771bcf30f6 Date: 2014-08-22 15:55 -0700 http://bitbucket.org/pypy/pypy/changeset/4e771bcf30f6/ Log: move/integrate these into the existing test_select (grafted from 3b2796835762907581e947de99d51e6528027eed) diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -218,6 +218,8 @@ skip("no select.poll() on this platform") pollster = select.poll() pollster.register(1) + raises(OverflowError, pollster.register, 0, -1) + raises(OverflowError, pollster.register, 0, 1 << 64) exc = raises(OverflowError, pollster.register, 0, 32768) # SHRT_MAX + 1 assert str(exc.value) == 'signed short integer is greater than maximum' exc = raises(OverflowError, pollster.register, 0, -32768 - 1) @@ -229,6 +231,9 @@ exc = raises(TypeError, pollster.poll, '123') assert str(exc.value) == 'timeout must be an integer or None' + raises(OverflowError, pollster.modify, 1, -1) + raises(OverflowError, pollster.modify, 1, 1 << 64) + class AppTestSelectWithPipes(_AppTestSelect): "Use a pipe to get pairs of file descriptors" diff --git a/pypy/module/test_lib_pypy/test_poll.py b/pypy/module/test_lib_pypy/test_poll.py deleted file mode 100644 --- a/pypy/module/test_lib_pypy/test_poll.py +++ /dev/null @@ -1,14 +0,0 @@ -class AppTestPoll: - - spaceconfig = dict(usemodules=('select',)) - - def test_poll3(self): - import select - # test int overflow - pollster = select.poll() - pollster.register(1) - - raises(OverflowError, pollster.register, 0, -1) - raises(OverflowError, pollster.register, 0, 1 << 64) - raises(OverflowError, pollster.modify, 1, -1) - raises(OverflowError, pollster.modify, 1, 1 << 64) From noreply at buildbot.pypy.org Sat Aug 23 01:55:53 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 01:55:53 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: more closely match CPython requiring strict unsigned shorts Message-ID: <20140822235553.256FD1D2A89@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r72980:06c030f20d39 Date: 2014-08-22 16:25 -0700 http://bitbucket.org/pypy/pypy/changeset/06c030f20d39/ Log: more closely match CPython requiring strict unsigned shorts (grafted from ba30358bcb6b1073e261e4479206c7b3161012d2) diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -1,11 +1,13 @@ import errno from rpython.rlib import _rsocket_rffi as _c, rpoll +from rpython.rlib.rarithmetic import USHRT_MAX from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt, wrap_oserror -from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec +from pypy.interpreter.gateway import ( + Unwrapper, WrappedDefault, interp2app, unwrap_spec) from pypy.interpreter.typedef import TypeDef defaultevents = rpoll.POLLIN | rpoll.POLLOUT | rpoll.POLLPRI @@ -24,28 +26,31 @@ return Poll() +class UShortUnwrapper(Unwrapper): + + def unwrap(self, space, w_value): + value = space.int_w(w_value) + if value < 0: + raise oefmt(space.w_OverflowError, + "can't convert negative value to C unsigned short") + if value > USHRT_MAX: + raise oefmt(space.w_OverflowError, + "Python int too large for C unsigned short") + return value + + class Poll(W_Root): def __init__(self): self.fddict = {} self.running = False - @unwrap_spec(events="c_short") + @unwrap_spec(events=UShortUnwrapper) def register(self, space, w_fd, events=defaultevents): - if events < 0: - raise OperationError( - space.w_OverflowError, - space.wrap("Python int too large for C unsigned short"), - ) fd = space.c_filedescriptor_w(w_fd) self.fddict[fd] = events - @unwrap_spec(events=int) + @unwrap_spec(events=UShortUnwrapper) def modify(self, space, w_fd, events): - if events < 0: - raise OperationError( - space.w_OverflowError, - space.wrap("Python int too large for C unsigned short"), - ) fd = space.c_filedescriptor_w(w_fd) if fd not in self.fddict: raise wrap_oserror(space, OSError(errno.ENOENT, "poll.modify"), diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -220,11 +220,11 @@ pollster.register(1) raises(OverflowError, pollster.register, 0, -1) raises(OverflowError, pollster.register, 0, 1 << 64) - exc = raises(OverflowError, pollster.register, 0, 32768) # SHRT_MAX + 1 - assert str(exc.value) == 'signed short integer is greater than maximum' + pollster.register(0, 32768) # SHRT_MAX + 1 exc = raises(OverflowError, pollster.register, 0, -32768 - 1) - assert str(exc.value) == 'signed short integer is less than minimum' - raises(OverflowError, pollster.register, 0, 65535) # USHRT_MAX + 1 + assert "unsigned" in str(exc.value) + pollster.register(0, 65535) # USHRT_MAX + raises(OverflowError, pollster.register, 0, 65536) # USHRT_MAX + 1 raises(OverflowError, pollster.poll, 2147483648) # INT_MAX + 1 raises(OverflowError, pollster.poll, -2147483648 - 1) raises(OverflowError, pollster.poll, 4294967296) # UINT_MAX + 1 From noreply at buildbot.pypy.org Sat Aug 23 02:15:24 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 23 Aug 2014 02:15:24 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: add custom __repr__ to Cell Message-ID: <20140823001524.AB93E1D2A89@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: stdlib-2.7.8 Changeset: r72981:43b8e678a5e5 Date: 2014-07-27 11:45 +0200 http://bitbucket.org/pypy/pypy/changeset/43b8e678a5e5/ Log: add custom __repr__ to Cell (grafted from c799b96b7ec531b447afdfeeaf574146af1eb3d3) diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -65,6 +65,14 @@ return "<%s(%s) at 0x%x>" % (self.__class__.__name__, content, uid(self)) + def descr__repr__(self, space): + if self.w_value is None: + content = "empty" + else: + content = "%s object at 0x%x" % (space.type(self.w_value).name, uid(self.w_value)) + s = "" % (uid(self), content) + return space.wrap(s.decode('utf-8')) + def descr__cell_contents(self, space): try: return self.get() diff --git a/pypy/interpreter/test/test_nestedscope.py b/pypy/interpreter/test/test_nestedscope.py --- a/pypy/interpreter/test/test_nestedscope.py +++ b/pypy/interpreter/test/test_nestedscope.py @@ -60,6 +60,28 @@ def test_lambda_in_genexpr(self): assert eval('map(apply, (lambda: t for t in range(10)))') == range(10) + def test_cell_repr(self): + import re + from reprlib import repr as r # Don't shadow builtin repr + + def get_cell(): + x = 42 + def inner(): + return x + return inner + x = get_cell().__closure__[0] + assert re.match(r'', repr(x)) + assert re.match(r'', r(x)) + + def get_cell(): + if False: + x = 42 + def inner(): + return x + return inner + x = get_cell().__closure__[0] + assert re.match(r'', repr(x)) + def test_cell_contents(self): def f(x): def f(y): diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -926,6 +926,7 @@ __cmp__ = interp2app(Cell.descr__cmp__), __hash__ = None, __reduce__ = interp2app(Cell.descr__reduce__), + __repr__ = interp2app(Cell.descr__repr__), __setstate__ = interp2app(Cell.descr__setstate__), cell_contents= GetSetProperty(Cell.descr__cell_contents, cls=Cell), ) From noreply at buildbot.pypy.org Sat Aug 23 02:15:26 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 02:15:26 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: fix translation: uid is based on id (not rpython), we need to use getaddrstring Message-ID: <20140823001526.027AF1D2A89@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r72982:6f4816e0e4a2 Date: 2014-07-27 13:03 -0700 http://bitbucket.org/pypy/pypy/changeset/6f4816e0e4a2/ Log: fix translation: uid is based on id (not rpython), we need to use getaddrstring instead (grafted from 6f49b270e66a0cbd2a2a3edf673bee80066e5099) diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -69,8 +69,9 @@ if self.w_value is None: content = "empty" else: - content = "%s object at 0x%x" % (space.type(self.w_value).name, uid(self.w_value)) - s = "" % (uid(self), content) + content = "%s object at 0x%s" % (space.type(self.w_value).name, + self.w_value.getaddrstring(space)) + s = "" % (self.getaddrstring(space), content) return space.wrap(s.decode('utf-8')) def descr__cell_contents(self, space): From noreply at buildbot.pypy.org Sat Aug 23 02:15:27 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 02:15:27 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: adapt to 2.7 from 3.3 Message-ID: <20140823001527.4E7231D2A89@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r72983:34f22247550e Date: 2014-08-22 17:14 -0700 http://bitbucket.org/pypy/pypy/changeset/34f22247550e/ Log: adapt to 2.7 from 3.3 diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -72,7 +72,7 @@ content = "%s object at 0x%s" % (space.type(self.w_value).name, self.w_value.getaddrstring(space)) s = "" % (self.getaddrstring(space), content) - return space.wrap(s.decode('utf-8')) + return space.wrap(s) def descr__cell_contents(self, space): try: diff --git a/pypy/interpreter/test/test_nestedscope.py b/pypy/interpreter/test/test_nestedscope.py --- a/pypy/interpreter/test/test_nestedscope.py +++ b/pypy/interpreter/test/test_nestedscope.py @@ -62,7 +62,7 @@ def test_cell_repr(self): import re - from reprlib import repr as r # Don't shadow builtin repr + from repr import repr as r # Don't shadow builtin repr def get_cell(): x = 42 @@ -71,7 +71,7 @@ return inner x = get_cell().__closure__[0] assert re.match(r'', repr(x)) - assert re.match(r'', r(x)) + assert re.match(r'', r(x)) def get_cell(): if False: From noreply at buildbot.pypy.org Sat Aug 23 02:26:12 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 02:26:12 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: reapply our workaround Message-ID: <20140823002612.245881D2ABB@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r72984:3388e7753506 Date: 2014-08-22 17:25 -0700 http://bitbucket.org/pypy/pypy/changeset/3388e7753506/ Log: reapply our workaround diff --git a/lib-python/2.7/test/test_descr.py b/lib-python/2.7/test/test_descr.py --- a/lib-python/2.7/test/test_descr.py +++ b/lib-python/2.7/test/test_descr.py @@ -4452,7 +4452,7 @@ self.assertNotEqual(l.__add__, [5].__add__) self.assertNotEqual(l.__add__, l.__mul__) self.assertEqual(l.__add__.__name__, '__add__') - if hasattr(l.__add__, '__self__'): + if hasattr(l.__add__, '__objclass__'): # CPython self.assertIs(l.__add__.__self__, l) self.assertIs(l.__add__.__objclass__, list) From noreply at buildbot.pypy.org Sat Aug 23 02:29:41 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 23 Aug 2014 02:29:41 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: fixing _csv lineterminator exception message Message-ID: <20140823002941.C19721D2ABB@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: stdlib-2.7.8 Changeset: r72985:9446ebe200b3 Date: 2014-07-26 12:12 +0200 http://bitbucket.org/pypy/pypy/changeset/9446ebe200b3/ Log: fixing _csv lineterminator exception message (grafted from cbfe89ead8c2e8ad4d9b6f0cc5884e66bdf6f4ab) diff --git a/pypy/module/_csv/interp_csv.py b/pypy/module/_csv/interp_csv.py --- a/pypy/module/_csv/interp_csv.py +++ b/pypy/module/_csv/interp_csv.py @@ -34,10 +34,15 @@ return default return space.int_w(w_src) -def _get_str(space, w_src, default): +def _get_str(space, w_src, default, attrname): if w_src is None: return default - return space.str_w(w_src) + try: + return space.str_w(w_src) + except OperationError as e: + if e.match(space, space.w_TypeError): + raise oefmt(space.w_TypeError, '"%s" must be a string', attrname) + raise def _get_char(space, w_src, default, name): if w_src is None: @@ -91,7 +96,7 @@ dialect.delimiter = _get_char(space, w_delimiter, ',', 'delimiter') dialect.doublequote = _get_bool(space, w_doublequote, True) dialect.escapechar = _get_char(space, w_escapechar, '\0', 'escapechar') - dialect.lineterminator = _get_str(space, w_lineterminator, '\r\n') + dialect.lineterminator = _get_str(space, w_lineterminator, '\r\n', 'lineterminator') dialect.quotechar = _get_char(space, w_quotechar, '"', 'quotechar') tmp_quoting = _get_int(space, w_quoting, QUOTE_MINIMAL) dialect.skipinitialspace = _get_bool(space, w_skipinitialspace, False) diff --git a/pypy/module/_csv/test/test_dialect.py b/pypy/module/_csv/test/test_dialect.py --- a/pypy/module/_csv/test/test_dialect.py +++ b/pypy/module/_csv/test/test_dialect.py @@ -67,6 +67,9 @@ kwargs = {name: value} raises(TypeError, _csv.register_dialect, 'foo1', **kwargs) + exc_info = raises(TypeError, _csv.register_dialect, 'foo1', lineterminator=4) + assert exc_info.value.args[0] == '"lineterminator" must be a string' + def test_bool_arg(self): # boolean arguments take *any* object and use its truth-value import _csv From noreply at buildbot.pypy.org Sat Aug 23 02:34:44 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 02:34:44 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: we match py3's improved error messages Message-ID: <20140823003444.981EB1D2ABB@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r72986:b6c042c35da7 Date: 2014-08-22 17:32 -0700 http://bitbucket.org/pypy/pypy/changeset/b6c042c35da7/ Log: we match py3's improved error messages diff --git a/lib-python/2.7/test/test_csv.py b/lib-python/2.7/test/test_csv.py --- a/lib-python/2.7/test/test_csv.py +++ b/lib-python/2.7/test/test_csv.py @@ -892,7 +892,7 @@ with self.assertRaises(csv.Error) as cm: mydialect() self.assertEqual(str(cm.exception), - '"quotechar" must be an 1-character string') + '"quotechar" must be a 1-character string') mydialect.quotechar = 4 with self.assertRaises(csv.Error) as cm: @@ -915,13 +915,13 @@ with self.assertRaises(csv.Error) as cm: mydialect() self.assertEqual(str(cm.exception), - '"delimiter" must be an 1-character string') + '"delimiter" must be a 1-character string') mydialect.delimiter = "" with self.assertRaises(csv.Error) as cm: mydialect() self.assertEqual(str(cm.exception), - '"delimiter" must be an 1-character string') + '"delimiter" must be a 1-character string') mydialect.delimiter = u"," with self.assertRaises(csv.Error) as cm: From noreply at buildbot.pypy.org Sat Aug 23 02:34:45 2014 From: noreply at buildbot.pypy.org (numerodix) Date: Sat, 23 Aug 2014 02:34:45 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: add _csv test_delimiter test and replicate cpython error handling Message-ID: <20140823003445.C19E91D2ABB@cobra.cs.uni-duesseldorf.de> Author: Martin Matusiak Branch: stdlib-2.7.8 Changeset: r72987:5d1caa56b7cf Date: 2014-07-27 10:15 +0200 http://bitbucket.org/pypy/pypy/changeset/5d1caa56b7cf/ Log: add _csv test_delimiter test and replicate cpython error handling (grafted from 44f52293da324ce50c0138b5382d210e083de066) diff --git a/pypy/module/_csv/interp_csv.py b/pypy/module/_csv/interp_csv.py --- a/pypy/module/_csv/interp_csv.py +++ b/pypy/module/_csv/interp_csv.py @@ -49,6 +49,8 @@ return default if space.is_w(w_src, space.w_None): return '\0' + if not space.isinstance_w(w_src, space.w_str): + raise oefmt(space.w_TypeError, '"%s" must be string, not %T', name, w_src) src = space.str_w(w_src) if len(src) == 1: return src[0] @@ -109,7 +111,7 @@ if dialect.delimiter == '\0': raise OperationError(space.w_TypeError, - space.wrap('delimiter must be set')) + space.wrap('"delimiter" must be a 1-character string')) if space.is_w(w_quotechar, space.w_None) and w_quoting is None: tmp_quoting = QUOTE_NONE diff --git a/pypy/module/_csv/test/test_dialect.py b/pypy/module/_csv/test/test_dialect.py --- a/pypy/module/_csv/test/test_dialect.py +++ b/pypy/module/_csv/test/test_dialect.py @@ -80,6 +80,21 @@ _csv.register_dialect('foo1', strict=_csv) # :-/ assert _csv.get_dialect('foo1').strict == True + def test_delimiter(self): + import _csv + + exc_info = raises(TypeError, _csv.register_dialect, 'foo1', delimiter=":::") + assert exc_info.value.args[0] == '"delimiter" must be a 1-character string' + + exc_info = raises(TypeError, _csv.register_dialect, 'foo1', delimiter="") + assert exc_info.value.args[0] == '"delimiter" must be a 1-character string' + + exc_info = raises(TypeError, _csv.register_dialect, 'foo1', delimiter=b",") + assert exc_info.value.args[0] == '"delimiter" must be string, not bytes' + + exc_info = raises(TypeError, _csv.register_dialect, 'foo1', delimiter=4) + assert exc_info.value.args[0] == '"delimiter" must be string, not int' + def test_line_terminator(self): # lineterminator can be the empty string import _csv From noreply at buildbot.pypy.org Sat Aug 23 02:34:46 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 02:34:46 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: adapt to 2.7 from 3.3 Message-ID: <20140823003446.EAE531D2ABB@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r72988:8fd30229b5fe Date: 2014-08-22 17:34 -0700 http://bitbucket.org/pypy/pypy/changeset/8fd30229b5fe/ Log: adapt to 2.7 from 3.3 diff --git a/pypy/module/_csv/test/test_dialect.py b/pypy/module/_csv/test/test_dialect.py --- a/pypy/module/_csv/test/test_dialect.py +++ b/pypy/module/_csv/test/test_dialect.py @@ -89,8 +89,8 @@ exc_info = raises(TypeError, _csv.register_dialect, 'foo1', delimiter="") assert exc_info.value.args[0] == '"delimiter" must be a 1-character string' - exc_info = raises(TypeError, _csv.register_dialect, 'foo1', delimiter=b",") - assert exc_info.value.args[0] == '"delimiter" must be string, not bytes' + exc_info = raises(TypeError, _csv.register_dialect, 'foo1', delimiter=u",") + assert exc_info.value.args[0] == '"delimiter" must be string, not unicode' exc_info = raises(TypeError, _csv.register_dialect, 'foo1', delimiter=4) assert exc_info.value.args[0] == '"delimiter" must be string, not int' From noreply at buildbot.pypy.org Sat Aug 23 04:04:07 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sat, 23 Aug 2014 04:04:07 +0200 (CEST) Subject: [pypy-commit] pypy default: Introduce gateway.Unwrapper, a convenient way to write custom unwrap_spec functions, Message-ID: <20140823020407.6EE831D38E3@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: Changeset: r72989:ae9a345ba034 Date: 2014-04-12 17:57 +0200 http://bitbucket.org/pypy/pypy/changeset/ae9a345ba034/ Log: Introduce gateway.Unwrapper, a convenient way to write custom unwrap_spec functions, Similar to the "O&" spec in PyArg_ParseTuple. manually grafted from fb069da001603788db20b69617cf4025e1eb1087 on the py3.3 branch diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -53,10 +53,24 @@ #________________________________________________________________ + +class Unwrapper(object): + """A base class for custom unwrap_spec items. + + Subclasses must override unwrap(). + """ + def _freeze_(self): + return True + + def unwrap(self, space, w_value): + """NOT_RPYTHON""" + raise NotImplementedError + + class UnwrapSpecRecipe(object): "NOT_RPYTHON" - bases_order = [W_Root, ObjSpace, Arguments, object] + bases_order = [W_Root, ObjSpace, Arguments, Unwrapper, object] def dispatch(self, el, *args): if isinstance(el, str): @@ -153,6 +167,9 @@ def visit_truncatedint_w(self, el, app_sig): self.checked_space_method(el, app_sig) + def visit__Unwrapper(self, el, app_sig): + self.checked_space_method(el, app_sig) + def visit__ObjSpace(self, el, app_sig): self.orig_arg() @@ -212,6 +229,10 @@ self.run_args.append("space.descr_self_interp_w(%s, %s)" % (self.use(typ), self.scopenext())) + def visit__Unwrapper(self, typ): + self.run_args.append("%s().unwrap(space, %s)" % + (self.use(typ), self.scopenext())) + def visit__ObjSpace(self, el): self.run_args.append('space') @@ -352,6 +373,10 @@ self.unwrap.append("space.descr_self_interp_w(%s, %s)" % (self.use(typ), self.nextarg())) + def visit__Unwrapper(self, typ): + self.unwrap.append("%s().unwrap(space, %s)" % + (self.use(typ), self.nextarg())) + def visit__ObjSpace(self, el): if self.finger > 1: raise FastFuncNotSupported diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -529,6 +529,23 @@ raises(gateway.OperationError, space.call_function, w_app_g3_u, w(42)) + def test_interp2app_unwrap_spec_unwrapper(self): + space = self.space + class Unwrapper(gateway.Unwrapper): + def unwrap(self, space, w_value): + return space.int_w(w_value) + + w = space.wrap + def g3_u(space, value): + return space.wrap(value + 1) + app_g3_u = gateway.interp2app_temp(g3_u, + unwrap_spec=[gateway.ObjSpace, + Unwrapper]) + assert self.space.eq_w( + space.call_function(w(app_g3_u), w(42)), w(43)) + raises(gateway.OperationError, space.call_function, + w(app_g3_u), w(None)) + def test_interp2app_classmethod(self): space = self.space w = space.wrap From noreply at buildbot.pypy.org Sat Aug 23 04:04:08 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 04:04:08 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: merge default Message-ID: <20140823020408.B5D221D38E4@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r72990:29caf6638cd5 Date: 2014-08-22 19:03 -0700 http://bitbucket.org/pypy/pypy/changeset/29caf6638cd5/ Log: merge default diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -53,10 +53,24 @@ #________________________________________________________________ + +class Unwrapper(object): + """A base class for custom unwrap_spec items. + + Subclasses must override unwrap(). + """ + def _freeze_(self): + return True + + def unwrap(self, space, w_value): + """NOT_RPYTHON""" + raise NotImplementedError + + class UnwrapSpecRecipe(object): "NOT_RPYTHON" - bases_order = [W_Root, ObjSpace, Arguments, object] + bases_order = [W_Root, ObjSpace, Arguments, Unwrapper, object] def dispatch(self, el, *args): if isinstance(el, str): @@ -153,6 +167,9 @@ def visit_truncatedint_w(self, el, app_sig): self.checked_space_method(el, app_sig) + def visit__Unwrapper(self, el, app_sig): + self.checked_space_method(el, app_sig) + def visit__ObjSpace(self, el, app_sig): self.orig_arg() @@ -212,6 +229,10 @@ self.run_args.append("space.descr_self_interp_w(%s, %s)" % (self.use(typ), self.scopenext())) + def visit__Unwrapper(self, typ): + self.run_args.append("%s().unwrap(space, %s)" % + (self.use(typ), self.scopenext())) + def visit__ObjSpace(self, el): self.run_args.append('space') @@ -352,6 +373,10 @@ self.unwrap.append("space.descr_self_interp_w(%s, %s)" % (self.use(typ), self.nextarg())) + def visit__Unwrapper(self, typ): + self.unwrap.append("%s().unwrap(space, %s)" % + (self.use(typ), self.nextarg())) + def visit__ObjSpace(self, el): if self.finger > 1: raise FastFuncNotSupported diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -529,6 +529,23 @@ raises(gateway.OperationError, space.call_function, w_app_g3_u, w(42)) + def test_interp2app_unwrap_spec_unwrapper(self): + space = self.space + class Unwrapper(gateway.Unwrapper): + def unwrap(self, space, w_value): + return space.int_w(w_value) + + w = space.wrap + def g3_u(space, value): + return space.wrap(value + 1) + app_g3_u = gateway.interp2app_temp(g3_u, + unwrap_spec=[gateway.ObjSpace, + Unwrapper]) + assert self.space.eq_w( + space.call_function(w(app_g3_u), w(42)), w(43)) + raises(gateway.OperationError, space.call_function, + w(app_g3_u), w(None)) + def test_interp2app_classmethod(self): space = self.space w = space.wrap From noreply at buildbot.pypy.org Sat Aug 23 05:02:03 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 23 Aug 2014 05:02:03 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: Added encoding cookies to these files Message-ID: <20140823030203.B17B21D349A@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r72991:192d1f3c02bf Date: 2014-08-22 20:01 -0700 http://bitbucket.org/pypy/pypy/changeset/192d1f3c02bf/ Log: Added encoding cookies to these files diff --git a/pypy/module/exceptions/test/test_exc.py b/pypy/module/exceptions/test/test_exc.py --- a/pypy/module/exceptions/test/test_exc.py +++ b/pypy/module/exceptions/test/test_exc.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + class AppTestExc(object): spaceconfig = dict(usemodules=('exceptions',)) diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py --- a/pypy/module/operator/test/test_operator.py +++ b/pypy/module/operator/test/test_operator.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- class AppTestOperator: def test_equality(self): From noreply at buildbot.pypy.org Sat Aug 23 07:22:29 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 23 Aug 2014 07:22:29 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: Fixed test_exceptions.py Message-ID: <20140823052229.967B21C3566@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r72992:bd46b6d4447e Date: 2014-08-22 22:22 -0700 http://bitbucket.org/pypy/pypy/changeset/bd46b6d4447e/ Log: Fixed test_exceptions.py diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -278,10 +278,10 @@ class W_UnicodeTranslateError(W_UnicodeError): """Unicode translation error.""" - object = None - start = None - end = None - reason = None + w_object = None + w_start = None + w_end = None + w_reason = None def descr_init(self, space, w_object, w_start, w_end, w_reason): # typechecking @@ -299,6 +299,8 @@ def descr_str(self, space): return space.appexec([space.wrap(self)], r"""(self): + if self.object is None: + return "" if self.end == self.start + 1: badchar = ord(self.object[self.start]) if badchar <= 0xff: @@ -642,6 +644,8 @@ def descr_str(self, space): return space.appexec([self], """(self): + if self.object is None: + return "" if self.end == self.start + 1: return "'%s' codec can't decode byte 0x%02x in position %d: %s"%( self.encoding, @@ -730,6 +734,8 @@ def descr_str(self, space): return space.appexec([self], r"""(self): + if self.object is None: + return "" if self.end == self.start + 1: badchar = ord(self.object[self.start]) if badchar <= 0xff: diff --git a/pypy/module/exceptions/test/test_exc.py b/pypy/module/exceptions/test/test_exc.py --- a/pypy/module/exceptions/test/test_exc.py +++ b/pypy/module/exceptions/test/test_exc.py @@ -253,3 +253,7 @@ assert fw.z == 1 assert fw.xyz == (1, 2) + def test_unicode_error_uninitialized_str(self): + assert str(UnicodeEncodeError.__new__(UnicodeEncodeError)) == "" + assert str(UnicodeDecodeError.__new__(UnicodeDecodeError)) == "" + assert str(UnicodeTranslateError.__new__(UnicodeTranslateError)) == "" From noreply at buildbot.pypy.org Sat Aug 23 07:47:36 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 23 Aug 2014 07:47:36 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: Fixed a crash in BufferedRWPair Message-ID: <20140823054736.352801C3566@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r72993:505a10d18c25 Date: 2014-08-22 22:47 -0700 http://bitbucket.org/pypy/pypy/changeset/505a10d18c25/ Log: Fixed a crash in BufferedRWPair diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -919,9 +919,17 @@ @func_renamer(method + '_w') def method_w(self, space, __args__): if writer: + if self.w_writer is None: + raise OperationError( + space.w_ValueError, "I/O operation on uninitialized object" + ) w_meth = space.getattr(self.w_writer, space.wrap(method)) w_result = space.call_args(w_meth, __args__) if reader: + if self.w_reader is None: + raise OperationError( + space.w_ValueError, "I/O operation on uninitialized object" + ) w_meth = space.getattr(self.w_reader, space.wrap(method)) w_result = space.call_args(w_meth, __args__) return w_result From noreply at buildbot.pypy.org Sat Aug 23 07:56:29 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 23 Aug 2014 07:56:29 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: Fix translation error Message-ID: <20140823055629.105D01C3566@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r72994:ad3eef187a45 Date: 2014-08-22 22:56 -0700 http://bitbucket.org/pypy/pypy/changeset/ad3eef187a45/ Log: Fix translation error diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -921,14 +921,16 @@ if writer: if self.w_writer is None: raise OperationError( - space.w_ValueError, "I/O operation on uninitialized object" + space.w_ValueError, + space.wrap("I/O operation on uninitialized object") ) w_meth = space.getattr(self.w_writer, space.wrap(method)) w_result = space.call_args(w_meth, __args__) if reader: if self.w_reader is None: raise OperationError( - space.w_ValueError, "I/O operation on uninitialized object" + space.w_ValueError, + space.wrap("I/O operation on uninitialized object") ) w_meth = space.getattr(self.w_reader, space.wrap(method)) w_result = space.call_args(w_meth, __args__) From noreply at buildbot.pypy.org Sat Aug 23 10:36:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 23 Aug 2014 10:36:08 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix? Message-ID: <20140823083608.E9E951D2809@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r72995:fdb4e95f1777 Date: 2014-08-23 10:35 +0200 http://bitbucket.org/pypy/pypy/changeset/fdb4e95f1777/ Log: Fix? diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -380,7 +380,7 @@ # we have one word to align mc.SUB_ri(esp.value, 7 * WORD) # align and reserve some space mc.MOV_sr(WORD, eax.value) # save for later - mc.MOVSD_sx(3 * WORD, xmm0.value) + mc.MOVSD_sx(2 * WORD, xmm0.value) # 32-bit: also 3 * WORD if IS_X86_32: mc.MOV_sr(4 * WORD, edx.value) mc.MOV_sr(0, ebp.value) @@ -419,7 +419,7 @@ else: if IS_X86_32: mc.MOV_rs(edx.value, 4 * WORD) - mc.MOVSD_xs(xmm0.value, 3 * WORD) + mc.MOVSD_xs(xmm0.value, 2 * WORD) mc.MOV_rs(eax.value, WORD) # restore self._restore_exception(mc, exc0, exc1) mc.MOV(exc0, RawEspLoc(WORD * 5, REF)) From noreply at buildbot.pypy.org Sat Aug 23 16:44:17 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 23 Aug 2014 16:44:17 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: make/use a generic c_ushort_w unwrapper Message-ID: <20140823144417.8403E1C033D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r72996:1eaa8b67ec20 Date: 2014-08-23 10:43 -0400 http://bitbucket.org/pypy/pypy/changeset/1eaa8b67ec20/ Log: make/use a generic c_ushort_w unwrapper diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -8,7 +8,7 @@ compute_unique_id, specialize) from rpython.rlib.signature import signature from rpython.rlib.rarithmetic import r_uint, SHRT_MIN, SHRT_MAX, \ - INT_MIN, INT_MAX, UINT_MAX + INT_MIN, INT_MAX, UINT_MAX, USHRT_MAX from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag, UserDelAction) @@ -1646,6 +1646,16 @@ "signed short integer is greater than maximum") return value + def c_ushort_w(self, w_obj): + value = self.int_w(w_obj) + if value < 0: + raise oefmt(self.w_OverflowError, + "can't convert negative value to C unsigned short") + elif value > USHRT_MAX: + raise oefmt(self.w_OverflowError, + "Python int too large for C unsigned short") + return value + def truncatedint_w(self, w_obj, allow_conversion=True): # Like space.gateway_int_w(), but return the integer truncated # instead of raising OverflowError. For obscure cases only. diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -164,6 +164,9 @@ def visit_c_short(self, el, app_sig): self.checked_space_method(el, app_sig) + def visit_c_ushort(self, el, app_sig): + self.checked_space_method(el, app_sig) + def visit_truncatedint_w(self, el, app_sig): self.checked_space_method(el, app_sig) @@ -288,6 +291,9 @@ def visit_c_short(self, typ): self.run_args.append("space.c_short_w(%s)" % (self.scopenext(),)) + def visit_c_ushort(self, typ): + self.run_args.append("space.c_ushort_w(%s)" % (self.scopenext(),)) + def visit_truncatedint_w(self, typ): self.run_args.append("space.truncatedint_w(%s)" % (self.scopenext(),)) @@ -431,6 +437,9 @@ def visit_c_short(self, typ): self.unwrap.append("space.c_short_w(%s)" % (self.nextarg(),)) + def visit_c_ushort(self, typ): + self.unwrap.append("space.c_ushort_w(%s)" % (self.nextarg(),)) + def visit_truncatedint_w(self, typ): self.unwrap.append("space.truncatedint_w(%s)" % (self.nextarg(),)) diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -1,13 +1,11 @@ import errno from rpython.rlib import _rsocket_rffi as _c, rpoll -from rpython.rlib.rarithmetic import USHRT_MAX from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt, wrap_oserror -from pypy.interpreter.gateway import ( - Unwrapper, WrappedDefault, interp2app, unwrap_spec) +from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef defaultevents = rpoll.POLLIN | rpoll.POLLOUT | rpoll.POLLPRI @@ -26,30 +24,17 @@ return Poll() -class UShortUnwrapper(Unwrapper): - - def unwrap(self, space, w_value): - value = space.int_w(w_value) - if value < 0: - raise oefmt(space.w_OverflowError, - "can't convert negative value to C unsigned short") - if value > USHRT_MAX: - raise oefmt(space.w_OverflowError, - "Python int too large for C unsigned short") - return value - - class Poll(W_Root): def __init__(self): self.fddict = {} self.running = False - @unwrap_spec(events=UShortUnwrapper) + @unwrap_spec(events="c_ushort") def register(self, space, w_fd, events=defaultevents): fd = space.c_filedescriptor_w(w_fd) self.fddict[fd] = events - @unwrap_spec(events=UShortUnwrapper) + @unwrap_spec(events="c_ushort") def modify(self, space, w_fd, events): fd = space.c_filedescriptor_w(w_fd) if fd not in self.fddict: From noreply at buildbot.pypy.org Sat Aug 23 17:14:36 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 23 Aug 2014 17:14:36 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: Made functools.partial objects immutable Message-ID: <20140823151436.D71EB1C033D@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r72997:e23159188497 Date: 2014-08-23 08:12 -0700 http://bitbucket.org/pypy/pypy/changeset/e23159188497/ Log: Made functools.partial objects immutable diff --git a/lib-python/2.7/test/test_functools.py b/lib-python/2.7/test/test_functools.py --- a/lib-python/2.7/test/test_functools.py +++ b/lib-python/2.7/test/test_functools.py @@ -43,9 +43,9 @@ self.assertEqual(p.args, (1, 2)) self.assertEqual(p.keywords, dict(a=10, b=20)) # attributes should not be writable - self.assertRaises(TypeError, setattr, p, 'func', map) - self.assertRaises(TypeError, setattr, p, 'args', (1, 2)) - self.assertRaises(TypeError, setattr, p, 'keywords', dict(a=1, b=2)) + self.assertRaises((TypeError, AttributeError), setattr, p, 'func', map) + self.assertRaises((TypeError, AttributeError), setattr, p, 'args', (1, 2)) + self.assertRaises((TypeError, AttributeError), setattr, p, 'keywords', dict(a=1, b=2)) p = self.thetype(hex) try: diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py --- a/lib_pypy/_functools.py +++ b/lib_pypy/_functools.py @@ -12,9 +12,21 @@ def __init__(self, func, *args, **keywords): if not callable(func): raise TypeError("the first argument must be callable") - self.func = func - self.args = args - self.keywords = keywords or None + self._func = func + self._args = args + self._keywords = keywords or None + + @property + def func(self): + return self._func + + @property + def args(self): + return self._args + + @property + def keywords(self): + return self._keywords def __call__(self, *fargs, **fkeywords): if self.keywords is not None: @@ -23,13 +35,13 @@ def __reduce__(self): d = dict((k, v) for k, v in self.__dict__.iteritems() if k not in - ('func', 'args', 'keywords')) + ('_func', '_args', '_keywords')) if len(d) == 0: d = None return (type(self), (self.func,), (self.func, self.args, self.keywords, d)) def __setstate__(self, state): - self.func, self.args, self.keywords, d = state + self._func, self._args, self._keywords, d = state if d is not None: self.__dict__.update(d) diff --git a/pypy/module/test_lib_pypy/test_functools.py b/pypy/module/test_lib_pypy/test_functools.py --- a/pypy/module/test_lib_pypy/test_functools.py +++ b/pypy/module/test_lib_pypy/test_functools.py @@ -1,5 +1,8 @@ +import pytest + from lib_pypy import _functools + def test_partial_reduce(): partial = _functools.partial(test_partial_reduce) state = partial.__reduce__() @@ -17,3 +20,8 @@ string = pickle.dumps(partial1) partial2 = pickle.loads(string) assert partial1.func == partial2.func + +def test_immutable_attributes(): + partial = _functools.partial(object) + with pytest.raises((TypeError, AttributeError)): + partial.func = sum From noreply at buildbot.pypy.org Sat Aug 23 17:14:38 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 23 Aug 2014 17:14:38 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: merged upstream Message-ID: <20140823151438.432111C033D@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r72998:b4818b82a1da Date: 2014-08-23 08:14 -0700 http://bitbucket.org/pypy/pypy/changeset/b4818b82a1da/ Log: merged upstream diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -8,7 +8,7 @@ compute_unique_id, specialize) from rpython.rlib.signature import signature from rpython.rlib.rarithmetic import r_uint, SHRT_MIN, SHRT_MAX, \ - INT_MIN, INT_MAX, UINT_MAX + INT_MIN, INT_MAX, UINT_MAX, USHRT_MAX from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag, UserDelAction) @@ -1646,6 +1646,16 @@ "signed short integer is greater than maximum") return value + def c_ushort_w(self, w_obj): + value = self.int_w(w_obj) + if value < 0: + raise oefmt(self.w_OverflowError, + "can't convert negative value to C unsigned short") + elif value > USHRT_MAX: + raise oefmt(self.w_OverflowError, + "Python int too large for C unsigned short") + return value + def truncatedint_w(self, w_obj, allow_conversion=True): # Like space.gateway_int_w(), but return the integer truncated # instead of raising OverflowError. For obscure cases only. diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -164,6 +164,9 @@ def visit_c_short(self, el, app_sig): self.checked_space_method(el, app_sig) + def visit_c_ushort(self, el, app_sig): + self.checked_space_method(el, app_sig) + def visit_truncatedint_w(self, el, app_sig): self.checked_space_method(el, app_sig) @@ -288,6 +291,9 @@ def visit_c_short(self, typ): self.run_args.append("space.c_short_w(%s)" % (self.scopenext(),)) + def visit_c_ushort(self, typ): + self.run_args.append("space.c_ushort_w(%s)" % (self.scopenext(),)) + def visit_truncatedint_w(self, typ): self.run_args.append("space.truncatedint_w(%s)" % (self.scopenext(),)) @@ -431,6 +437,9 @@ def visit_c_short(self, typ): self.unwrap.append("space.c_short_w(%s)" % (self.nextarg(),)) + def visit_c_ushort(self, typ): + self.unwrap.append("space.c_ushort_w(%s)" % (self.nextarg(),)) + def visit_truncatedint_w(self, typ): self.unwrap.append("space.truncatedint_w(%s)" % (self.nextarg(),)) diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -1,13 +1,11 @@ import errno from rpython.rlib import _rsocket_rffi as _c, rpoll -from rpython.rlib.rarithmetic import USHRT_MAX from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt, wrap_oserror -from pypy.interpreter.gateway import ( - Unwrapper, WrappedDefault, interp2app, unwrap_spec) +from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef defaultevents = rpoll.POLLIN | rpoll.POLLOUT | rpoll.POLLPRI @@ -26,30 +24,17 @@ return Poll() -class UShortUnwrapper(Unwrapper): - - def unwrap(self, space, w_value): - value = space.int_w(w_value) - if value < 0: - raise oefmt(space.w_OverflowError, - "can't convert negative value to C unsigned short") - if value > USHRT_MAX: - raise oefmt(space.w_OverflowError, - "Python int too large for C unsigned short") - return value - - class Poll(W_Root): def __init__(self): self.fddict = {} self.running = False - @unwrap_spec(events=UShortUnwrapper) + @unwrap_spec(events="c_ushort") def register(self, space, w_fd, events=defaultevents): fd = space.c_filedescriptor_w(w_fd) self.fddict[fd] = events - @unwrap_spec(events=UShortUnwrapper) + @unwrap_spec(events="c_ushort") def modify(self, space, w_fd, events): fd = space.c_filedescriptor_w(w_fd) if fd not in self.fddict: From noreply at buildbot.pypy.org Sat Aug 23 17:19:05 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 23 Aug 2014 17:19:05 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: Improved interoperability of set objects Message-ID: <20140823151905.D7B061C033D@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r72999:0c42f2897971 Date: 2014-08-23 08:18 -0700 http://bitbucket.org/pypy/pypy/changeset/0c42f2897971/ Log: Improved interoperability of set objects diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -200,8 +200,7 @@ # correct answer here! def descr_lt(self, space, w_other): if not isinstance(w_other, W_BaseSetObject): - raise OperationError(self.space.w_TypeError, - self.space.wrap('can only compare to a set')) + return space.w_NotImplemented if self.length() >= w_other.length(): return space.w_False @@ -210,8 +209,7 @@ def descr_le(self, space, w_other): if not isinstance(w_other, W_BaseSetObject): - raise OperationError(self.space.w_TypeError, - self.space.wrap('can only compare to a set')) + return space.w_NotImplemented if self.length() > w_other.length(): return space.w_False @@ -219,8 +217,7 @@ def descr_gt(self, space, w_other): if not isinstance(w_other, W_BaseSetObject): - raise OperationError(self.space.w_TypeError, - self.space.wrap('can only compare to a set')) + return space.w_NotImplemented if self.length() <= w_other.length(): return space.w_False @@ -229,8 +226,7 @@ def descr_ge(self, space, w_other): if not isinstance(w_other, W_BaseSetObject): - raise OperationError(self.space.w_TypeError, - self.space.wrap('can only compare to a set')) + return space.w_NotImplemented if self.length() < w_other.length(): return space.w_False diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -8,10 +8,8 @@ This file just contains some basic tests that make sure, the implementation is not too wrong. """ -import py.test from pypy.objspace.std.setobject import W_SetObject, W_FrozensetObject, IntegerSetStrategy from pypy.objspace.std.setobject import _initialize_set -from pypy.objspace.std.setobject import newset from pypy.objspace.std.listobject import W_ListObject letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' From noreply at buildbot.pypy.org Sat Aug 23 17:33:28 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 23 Aug 2014 17:33:28 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: Minimize the stack effect when these instructions are used (from CPython Message-ID: <20140823153328.8DFD61C1482@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73000:d52ea60276d3 Date: 2014-08-23 08:33 -0700 http://bitbucket.org/pypy/pypy/changeset/d52ea60276d3/ Log: Minimize the stack effect when these instructions are used (from CPython diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -406,6 +406,9 @@ target_depth += 3 if target_depth > self._max_depth: self._max_depth = target_depth + elif (jump_op == ops.JUMP_IF_TRUE_OR_POP or + jump_op == ops.JUMP_IF_FALSE_OR_POP): + depth -= 1 self._next_stack_depth_walk(instr.jump[0], target_depth) if jump_op == ops.JUMP_ABSOLUTE or jump_op == ops.JUMP_FORWARD: # Nothing more can occur. From noreply at buildbot.pypy.org Sat Aug 23 18:09:53 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 23 Aug 2014 18:09:53 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: fix socket.recvfrom_into small buffer Message-ID: <20140823160953.1495F1C033D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73001:5797f71813ad Date: 2014-08-23 12:09 -0400 http://bitbucket.org/pypy/pypy/changeset/5797f71813ad/ Log: fix socket.recvfrom_into small buffer diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -446,8 +446,11 @@ def recvfrom_into_w(self, space, w_buffer, nbytes=0, flags=0): rwbuffer = space.getarg_w('w*', w_buffer) lgt = rwbuffer.getlength() - if nbytes == 0 or nbytes > lgt: + if nbytes == 0: nbytes = lgt + elif nbytes > lgt: + raise OperationError(space.w_ValueError, space.wrap( + "nbytes is greater than the length of the buffer")) try: readlgt, addr = self.sock.recvfrom_into(rwbuffer, nbytes, flags) if addr: diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -716,6 +716,11 @@ msg = buf[:len(MSG)] assert msg == MSG + conn.send(MSG) + buf = bytearray(8) + exc = raises(ValueError, cli.recvfrom_into, buf, 1024) + assert str(exc.value) == "nbytes is greater than the length of the buffer" + def test_family(self): import socket cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) From noreply at buildbot.pypy.org Sat Aug 23 18:09:54 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 23 Aug 2014 18:09:54 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: merge heads Message-ID: <20140823160954.506111C033D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73002:f3300a7a8bc9 Date: 2014-08-23 12:09 -0400 http://bitbucket.org/pypy/pypy/changeset/f3300a7a8bc9/ Log: merge heads diff --git a/lib-python/2.7/test/test_functools.py b/lib-python/2.7/test/test_functools.py --- a/lib-python/2.7/test/test_functools.py +++ b/lib-python/2.7/test/test_functools.py @@ -43,9 +43,9 @@ self.assertEqual(p.args, (1, 2)) self.assertEqual(p.keywords, dict(a=10, b=20)) # attributes should not be writable - self.assertRaises(TypeError, setattr, p, 'func', map) - self.assertRaises(TypeError, setattr, p, 'args', (1, 2)) - self.assertRaises(TypeError, setattr, p, 'keywords', dict(a=1, b=2)) + self.assertRaises((TypeError, AttributeError), setattr, p, 'func', map) + self.assertRaises((TypeError, AttributeError), setattr, p, 'args', (1, 2)) + self.assertRaises((TypeError, AttributeError), setattr, p, 'keywords', dict(a=1, b=2)) p = self.thetype(hex) try: diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py --- a/lib_pypy/_functools.py +++ b/lib_pypy/_functools.py @@ -12,9 +12,21 @@ def __init__(self, func, *args, **keywords): if not callable(func): raise TypeError("the first argument must be callable") - self.func = func - self.args = args - self.keywords = keywords or None + self._func = func + self._args = args + self._keywords = keywords or None + + @property + def func(self): + return self._func + + @property + def args(self): + return self._args + + @property + def keywords(self): + return self._keywords def __call__(self, *fargs, **fkeywords): if self.keywords is not None: @@ -23,13 +35,13 @@ def __reduce__(self): d = dict((k, v) for k, v in self.__dict__.iteritems() if k not in - ('func', 'args', 'keywords')) + ('_func', '_args', '_keywords')) if len(d) == 0: d = None return (type(self), (self.func,), (self.func, self.args, self.keywords, d)) def __setstate__(self, state): - self.func, self.args, self.keywords, d = state + self._func, self._args, self._keywords, d = state if d is not None: self.__dict__.update(d) diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py --- a/pypy/interpreter/astcompiler/assemble.py +++ b/pypy/interpreter/astcompiler/assemble.py @@ -406,6 +406,9 @@ target_depth += 3 if target_depth > self._max_depth: self._max_depth = target_depth + elif (jump_op == ops.JUMP_IF_TRUE_OR_POP or + jump_op == ops.JUMP_IF_FALSE_OR_POP): + depth -= 1 self._next_stack_depth_walk(instr.jump[0], target_depth) if jump_op == ops.JUMP_ABSOLUTE or jump_op == ops.JUMP_FORWARD: # Nothing more can occur. diff --git a/pypy/module/test_lib_pypy/test_functools.py b/pypy/module/test_lib_pypy/test_functools.py --- a/pypy/module/test_lib_pypy/test_functools.py +++ b/pypy/module/test_lib_pypy/test_functools.py @@ -1,5 +1,8 @@ +import pytest + from lib_pypy import _functools + def test_partial_reduce(): partial = _functools.partial(test_partial_reduce) state = partial.__reduce__() @@ -17,3 +20,8 @@ string = pickle.dumps(partial1) partial2 = pickle.loads(string) assert partial1.func == partial2.func + +def test_immutable_attributes(): + partial = _functools.partial(object) + with pytest.raises((TypeError, AttributeError)): + partial.func = sum diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -200,8 +200,7 @@ # correct answer here! def descr_lt(self, space, w_other): if not isinstance(w_other, W_BaseSetObject): - raise OperationError(self.space.w_TypeError, - self.space.wrap('can only compare to a set')) + return space.w_NotImplemented if self.length() >= w_other.length(): return space.w_False @@ -210,8 +209,7 @@ def descr_le(self, space, w_other): if not isinstance(w_other, W_BaseSetObject): - raise OperationError(self.space.w_TypeError, - self.space.wrap('can only compare to a set')) + return space.w_NotImplemented if self.length() > w_other.length(): return space.w_False @@ -219,8 +217,7 @@ def descr_gt(self, space, w_other): if not isinstance(w_other, W_BaseSetObject): - raise OperationError(self.space.w_TypeError, - self.space.wrap('can only compare to a set')) + return space.w_NotImplemented if self.length() <= w_other.length(): return space.w_False @@ -229,8 +226,7 @@ def descr_ge(self, space, w_other): if not isinstance(w_other, W_BaseSetObject): - raise OperationError(self.space.w_TypeError, - self.space.wrap('can only compare to a set')) + return space.w_NotImplemented if self.length() < w_other.length(): return space.w_False diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -8,10 +8,8 @@ This file just contains some basic tests that make sure, the implementation is not too wrong. """ -import py.test from pypy.objspace.std.setobject import W_SetObject, W_FrozensetObject, IntegerSetStrategy from pypy.objspace.std.setobject import _initialize_set -from pypy.objspace.std.setobject import newset from pypy.objspace.std.listobject import W_ListObject letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' From noreply at buildbot.pypy.org Sat Aug 23 18:16:14 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 18:16:14 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: oefmt Message-ID: <20140823161614.6DEBE1D370A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r73003:e2f7912c2b72 Date: 2014-08-23 09:05 -0700 http://bitbucket.org/pypy/pypy/changeset/e2f7912c2b72/ Log: oefmt diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -920,18 +920,14 @@ def method_w(self, space, __args__): if writer: if self.w_writer is None: - raise OperationError( - space.w_ValueError, - space.wrap("I/O operation on uninitialized object") - ) + raise oefmt(space.w_ValueError, + "I/O operation on uninitialized object" w_meth = space.getattr(self.w_writer, space.wrap(method)) w_result = space.call_args(w_meth, __args__) if reader: if self.w_reader is None: - raise OperationError( - space.w_ValueError, - space.wrap("I/O operation on uninitialized object") - ) + raise oefmt(space.w_ValueError, + "I/O operation on uninitialized object") w_meth = space.getattr(self.w_reader, space.wrap(method)) w_result = space.call_args(w_meth, __args__) return w_result From noreply at buildbot.pypy.org Sat Aug 23 18:16:15 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 18:16:15 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: merge upstream Message-ID: <20140823161615.B2F751D370A@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r73004:7a68266f03ab Date: 2014-08-23 09:15 -0700 http://bitbucket.org/pypy/pypy/changeset/7a68266f03ab/ Log: merge upstream diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -446,8 +446,11 @@ def recvfrom_into_w(self, space, w_buffer, nbytes=0, flags=0): rwbuffer = space.getarg_w('w*', w_buffer) lgt = rwbuffer.getlength() - if nbytes == 0 or nbytes > lgt: + if nbytes == 0: nbytes = lgt + elif nbytes > lgt: + raise OperationError(space.w_ValueError, space.wrap( + "nbytes is greater than the length of the buffer")) try: readlgt, addr = self.sock.recvfrom_into(rwbuffer, nbytes, flags) if addr: diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py --- a/pypy/module/_socket/test/test_sock_app.py +++ b/pypy/module/_socket/test/test_sock_app.py @@ -716,6 +716,11 @@ msg = buf[:len(MSG)] assert msg == MSG + conn.send(MSG) + buf = bytearray(8) + exc = raises(ValueError, cli.recvfrom_into, buf, 1024) + assert str(exc.value) == "nbytes is greater than the length of the buffer" + def test_family(self): import socket cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM) From noreply at buildbot.pypy.org Sat Aug 23 18:19:04 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 18:19:04 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: sigh, check the namespace up front to satisfy test_execfile_args Message-ID: <20140823161904.CBB911D371F@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r73005:a0a22644ed92 Date: 2014-08-23 09:17 -0700 http://bitbucket.org/pypy/pypy/changeset/a0a22644ed92/ Log: sigh, check the namespace up front to satisfy test_execfile_args diff --git a/pypy/module/__builtin__/app_io.py b/pypy/module/__builtin__/app_io.py --- a/pypy/module/__builtin__/app_io.py +++ b/pypy/module/__builtin__/app_io.py @@ -3,6 +3,7 @@ Plain Python definition of the builtin I/O-related functions. """ +import operator import sys from _ast import PyCF_ACCEPT_NULL_BYTES @@ -12,6 +13,12 @@ Read and execute a Python script from a file. The globals and locals are dictionaries, defaulting to the current globals and locals. If only globals is given, locals defaults to it.""" + if glob is not None and not isinstance(glob, dict): + raise TypeError("execfile() arg 2 must be a dict, not %s", + type(glob).__name__) + if loc is not None and not operator.isMappingType(loc): + raise TypeError("execfile() arg 3 must be a mapping, not %s", + type(loc).__name__) if glob is None: # Warning this is at hidden_applevel glob = globals() diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py --- a/pypy/module/__builtin__/test/test_builtin.py +++ b/pypy/module/__builtin__/test/test_builtin.py @@ -31,8 +31,10 @@ emptyfile.write('') nullbytes = udir.join('nullbytes.py') nullbytes.write('#abc\x00def\n') + nonexistent = udir.join('builtins-nonexistent') cls.w_emptyfile = space.wrap(str(emptyfile)) cls.w_nullbytes = space.wrap(str(nullbytes)) + cls.w_nonexistent = space.wrap(str(nonexistent)) def test_builtin_names(self): import __builtin__ @@ -627,6 +629,9 @@ raises(TypeError, compile, src, 'mymod', 'exec', 0) execfile(self.nullbytes) # works + def test_execfile_args(self): + raises(TypeError, execfile, self.nonexistent, {}, ()) + def test_compile_null_bytes_flag(self): try: from _ast import PyCF_ACCEPT_NULL_BYTES From noreply at buildbot.pypy.org Sat Aug 23 18:20:57 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 18:20:57 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: caret impl detail Message-ID: <20140823162057.09EA41D3732@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r73006:796ba78d41fb Date: 2014-08-23 09:20 -0700 http://bitbucket.org/pypy/pypy/changeset/796ba78d41fb/ Log: caret impl detail diff --git a/lib-python/2.7/test/test_traceback.py b/lib-python/2.7/test/test_traceback.py --- a/lib-python/2.7/test/test_traceback.py +++ b/lib-python/2.7/test/test_traceback.py @@ -122,7 +122,10 @@ self.assertEqual(len(err), 4) self.assertEqual(err[1].strip(), "print(2)") self.assertIn("^", err[2]) - self.assertEqual(err[1].find("p"), err[2].find("^")) + if check_impl_detail(): + self.assertEqual(err[1].find("p"), err[2].find("^")) + if check_impl_detail(pypy=True): + self.assertEqual(err[1].find("2)") + 1, err[2].find("^")) def test_base_exception(self): # Test that exceptions derived from BaseException are formatted right From noreply at buildbot.pypy.org Sat Aug 23 18:22:37 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 18:22:37 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: oefmt Message-ID: <20140823162237.6C4991D3740@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r73007:5a5d46304dbd Date: 2014-08-23 09:22 -0700 http://bitbucket.org/pypy/pypy/changeset/5a5d46304dbd/ Log: oefmt diff --git a/pypy/module/_socket/interp_socket.py b/pypy/module/_socket/interp_socket.py --- a/pypy/module/_socket/interp_socket.py +++ b/pypy/module/_socket/interp_socket.py @@ -449,8 +449,8 @@ if nbytes == 0: nbytes = lgt elif nbytes > lgt: - raise OperationError(space.w_ValueError, space.wrap( - "nbytes is greater than the length of the buffer")) + raise oefmt(space.w_ValueError, + "nbytes is greater than the length of the buffer") try: readlgt, addr = self.sock.recvfrom_into(rwbuffer, nbytes, flags) if addr: From noreply at buildbot.pypy.org Sat Aug 23 19:01:19 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 23 Aug 2014 19:01:19 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: fixed syntax error Message-ID: <20140823170119.B4BB81C3566@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73008:9898ba3f3289 Date: 2014-08-23 10:00 -0700 http://bitbucket.org/pypy/pypy/changeset/9898ba3f3289/ Log: fixed syntax error diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -921,7 +921,7 @@ if writer: if self.w_writer is None: raise oefmt(space.w_ValueError, - "I/O operation on uninitialized object" + "I/O operation on uninitialized object") w_meth = space.getattr(self.w_writer, space.wrap(method)) w_result = space.call_args(w_meth, __args__) if reader: From noreply at buildbot.pypy.org Sat Aug 23 19:02:16 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 23 Aug 2014 19:02:16 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: fix file.writelines handling of buffers Message-ID: <20140823170216.06E3B1C3566@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73009:41f8b9449f35 Date: 2014-08-23 13:01 -0400 http://bitbucket.org/pypy/pypy/changeset/41f8b9449f35/ Log: fix file.writelines handling of buffers diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -466,7 +466,10 @@ for i, w_line in enumerate(lines): if not space.isinstance_w(w_line, space.w_str): try: - line = w_line.charbuf_w(space) + if self.binary: + line = w_line.readbuf_w(space).as_str() + else: + line = w_line.charbuf_w(space) except TypeError: raise OperationError(space.w_TypeError, space.wrap( "writelines() argument must be a sequence of strings")) diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -402,11 +402,12 @@ with file(fn, 'wb') as f: f.writelines(['abc']) f.writelines([u'def']) - exc = raises(TypeError, f.writelines, [array.array('c', 'ghi')]) - assert str(exc.value) == "writelines() argument must be a sequence of strings" + f.writelines([array.array('c', 'ghi')]) exc = raises(TypeError, f.writelines, [memoryview('jkl')]) assert str(exc.value) == "writelines() argument must be a sequence of strings" - assert open(fn, 'rb').readlines() == ['abcdef'] + out = open(fn, 'rb').readlines()[0] + assert out[0:5] == 'abcd\x00' + assert out[-3:] == 'ghi' with file(fn, 'wb') as f: exc = raises(TypeError, f.writelines, ['abc', memoryview('def')]) From noreply at buildbot.pypy.org Sat Aug 23 19:36:57 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 23 Aug 2014 19:36:57 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: use 'raw_malloc_usage' for calculating addresses Message-ID: <20140823173657.B5F891D2809@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r73010:09b914913112 Date: 2014-08-22 17:23 +0200 http://bitbucket.org/pypy/pypy/changeset/09b914913112/ Log: use 'raw_malloc_usage' for calculating addresses diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -614,7 +614,7 @@ # # Get the memory from the nursery. If there is not enough space # there, do a collect first. - result = self.collect_and_reserve(totalsize) + result = self.collect_and_reserve(rawtotalsize) # # Build the object. llarena.arena_reserve(result, totalsize) @@ -670,7 +670,7 @@ # # Get the memory from the nursery. If there is not enough space # there, do a collect first. - result = self.collect_and_reserve(totalsize) + result = self.collect_and_reserve(raw_malloc_usage(totalsize)) # # Build the object. llarena.arena_reserve(result, totalsize) From noreply at buildbot.pypy.org Sat Aug 23 19:36:58 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 23 Aug 2014 19:36:58 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: tweak odds for unpinning and removing from stack. additionally randomize how many gc steps are done Message-ID: <20140823173658.E5E6C1D2809@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r73011:c4573c1c9d31 Date: 2014-08-23 19:36 +0200 http://bitbucket.org/pypy/pypy/changeset/c4573c1c9d31/ Log: tweak odds for unpinning and removing from stack. additionally randomize how many gc steps are done diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -69,14 +69,14 @@ if random.random() < 0.5: self.gc.pin(llmemory.cast_ptr_to_adr(obj)) print("+pin") - self.gc.debug_gc_step() + self.gc.debug_gc_step(random.randint(1, 4)) for o in self.stackroots[:]: assert o.someInt == 100 o_adr = llmemory.cast_ptr_to_adr(o) - if random.random() < 0.5 and self.gc._is_pinned(o_adr): + if random.random() < 0.1 and self.gc._is_pinned(o_adr): print("-pin") self.gc.unpin(o_adr) - if random.random() < 0.5: + if random.random() < 0.1: print("-stack") self.stackroots.remove(o) From noreply at buildbot.pypy.org Sat Aug 23 20:05:42 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 23 Aug 2014 20:05:42 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: complain if the codec doesn't return unicode Message-ID: <20140823180542.D79631D2A89@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: stdlib-2.7.8 Changeset: r73012:deedb883c0e5 Date: 2014-08-23 11:04 -0700 http://bitbucket.org/pypy/pypy/changeset/deedb883c0e5/ Log: complain if the codec doesn't return unicode diff --git a/pypy/interpreter/pyparser/pyparse.py b/pypy/interpreter/pyparser/pyparse.py --- a/pypy/interpreter/pyparser/pyparse.py +++ b/pypy/interpreter/pyparser/pyparse.py @@ -1,17 +1,15 @@ -from pypy.interpreter import gateway from pypy.interpreter.error import OperationError from pypy.interpreter.pyparser import future, parser, pytokenizer, pygram, error from pypy.interpreter.astcompiler import consts +def recode_to_utf8(space, bytes, encoding): + w_text = space.call_method(space.wrap(bytes), "decode", + space.wrap(encoding)) + if not space.isinstance_w(w_text, space.w_unicode): + raise error.SyntaxError("codec did not return a unicode object") + w_recoded = space.call_method(w_text, "encode", space.wrap("utf-8")) + return space.str_w(w_recoded) -_recode_to_utf8 = gateway.applevel(r''' - def _recode_to_utf8(text, encoding): - return unicode(text, encoding).encode("utf-8") -''').interphook('_recode_to_utf8') - -def recode_to_utf8(space, text, encoding): - return space.str_w(_recode_to_utf8(space, space.wrap(text), - space.wrap(encoding))) def _normalize_encoding(encoding): """returns normalized name for diff --git a/pypy/interpreter/pyparser/test/test_pyparse.py b/pypy/interpreter/pyparser/test/test_pyparse.py --- a/pypy/interpreter/pyparser/test/test_pyparse.py +++ b/pypy/interpreter/pyparser/test/test_pyparse.py @@ -64,6 +64,13 @@ assert exc.msg == ("'ascii' codec can't decode byte 0xc3 " "in position 16: ordinal not in range(128)") + def test_non_unicode_codec(self): + exc = py.test.raises(SyntaxError, self.parse, """\ +# coding: string-escape +\x70\x72\x69\x6e\x74\x20\x32\x2b\x32\x0a +""").value + assert exc.msg == "codec did not return a unicode object" + def test_syntax_error(self): parse = self.parse exc = py.test.raises(SyntaxError, parse, "name another for").value From noreply at buildbot.pypy.org Sat Aug 23 21:02:13 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 23 Aug 2014 21:02:13 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: Can now use == and != to compare set objects to collections.Set instances Message-ID: <20140823190213.838841C3566@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73013:f3440715c780 Date: 2014-08-23 12:01 -0700 http://bitbucket.org/pypy/pypy/changeset/f3440715c780/ Log: Can now use == and != to compare set objects to collections.Set instances diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -178,7 +178,7 @@ return space.wrap(self.equals(w_other)) if not space.isinstance_w(w_other, space.w_set): - return space.w_False + return space.w_NotImplemented # tested in test_builtinshortcut.py # XXX do not make new setobject here @@ -190,7 +190,7 @@ return space.wrap(not self.equals(w_other)) if not space.isinstance_w(w_other, space.w_set): - return space.w_True + return space.w_NotImplemented # XXX this is not tested w_other_as_set = self._newobj(space, w_other) From noreply at buildbot.pypy.org Sat Aug 23 21:42:11 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 23 Aug 2014 21:42:11 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: add failing test revealing a bug where a pointer in 'objects_to_trace' becomes dead Message-ID: <20140823194211.BEEAB1C033D@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r73014:259c0aa0d1f7 Date: 2014-08-23 21:39 +0200 http://bitbucket.org/pypy/pypy/changeset/259c0aa0d1f7/ Log: add failing test revealing a bug where a pointer in 'objects_to_trace' becomes dead diff --git a/rpython/memory/gc/test/test_object_pinning.py b/rpython/memory/gc/test/test_object_pinning.py --- a/rpython/memory/gc/test/test_object_pinning.py +++ b/rpython/memory/gc/test/test_object_pinning.py @@ -597,6 +597,22 @@ assert self.gc.pin(llmemory.cast_ptr_to_adr(obj2)) + def test_objects_to_trace_bug(self): + # scenario: In a previous implementation there was a bug because of a + # dead pointer inside 'objects_to_trace'. This was caused by the first + # major collection step that added the pointer to the list and right + # after the collection step the object is unpinned and freed by the minor + # collection, leaving a dead pointer in the list. + pinned_ptr = self.malloc(T) + pinned_ptr.someInt = 101 + self.stackroots.append(pinned_ptr) + pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr) + assert self.gc.pin(pinned_adr) + self.gc.debug_gc_step() + self.gc.unpin(pinned_adr) + self.gc.debug_gc_step() + + def pin_shadow_2(self, collect_func): ptr = self.malloc(T) adr = llmemory.cast_ptr_to_adr(ptr) From noreply at buildbot.pypy.org Sat Aug 23 22:18:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 23 Aug 2014 22:18:29 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix for 'rpy_fastgil' on shadowstack: see comments Message-ID: <20140823201829.026F61D2313@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73015:8357f0cf25ea Date: 2014-08-23 22:18 +0200 http://bitbucket.org/pypy/pypy/changeset/8357f0cf25ea/ Log: Fix for 'rpy_fastgil' on shadowstack: see comments diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1066,13 +1066,16 @@ cb = callbuilder.CallBuilder(self, fnloc, arglocs) cb.emit_no_collect() - def _reload_frame_if_necessary(self, mc, align_stack=False): + def _reload_frame_if_necessary(self, mc, align_stack=False, + shadowstack_reg=None): gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap: if gcrootmap.is_shadow_stack: - rst = gcrootmap.get_root_stack_top_addr() - mc.MOV(ecx, heap(rst)) - mc.MOV(ebp, mem(ecx, -WORD)) + if shadowstack_reg is None: + rst = gcrootmap.get_root_stack_top_addr() + mc.MOV(ecx, heap(rst)) + shadowstack_reg = ecx + mc.MOV(ebp, mem(shadowstack_reg, -WORD)) wbdescr = self.cpu.gc_ll_descr.write_barrier_descr if gcrootmap and wbdescr: # frame never uses card marking, so we enforce this is not diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -1,6 +1,7 @@ import sys from rpython.rlib.clibffi import FFI_DEFAULT_ABI from rpython.rlib.objectmodel import we_are_translated +from rpython.rtyper.lltypesystem import lltype, rffi from rpython.jit.metainterp.history import INT, FLOAT from rpython.jit.backend.x86.arch import (WORD, IS_X86_64, IS_X86_32, PASS_ON_MY_FRAME, FRAME_FIXED_SIZE) @@ -21,6 +22,8 @@ def align_stack_words(words): return (words + CALL_ALIGN - 1) & ~(CALL_ALIGN-1) +NO_ARG_FUNC_PTR = lltype.Ptr(lltype.FuncType([], lltype.Void)) + class CallBuilderX86(AbstractCallBuilder): @@ -87,13 +90,50 @@ self.asm.push_gcmap(self.mc, gcmap, store=True) def pop_gcmap(self): - self.asm._reload_frame_if_necessary(self.mc) + ssreg = None + gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap + if gcrootmap: + if gcrootmap.is_shadow_stack and self.is_call_release_gil: + from rpython.jit.backend.x86.assembler import heap + from rpython.jit.backend.x86 import rx86 + from rpython.rtyper.lltypesystem.lloperation import llop + # + # When doing a call_release_gil with shadowstack, there + # is the risk that the 'rpy_fastgil' was free but the + # current shadowstack can be the one of a different + # thread. So here we check if the shadowstack pointer + # is still the same as before we released the GIL (saved + # in 'ebx'), and if not, we call 'thread_run'. + rst = gcrootmap.get_root_stack_top_addr() + mc = self.mc + mc.CMP(ebx, heap(rst)) + mc.J_il8(rx86.Conditions['E'], 0) + je_location = mc.get_relative_pos() + # call 'thread_run' + t_run = llop.gc_thread_run_ptr(NO_ARG_FUNC_PTR) + mc.CALL(imm(rffi.cast(lltype.Signed, t_run))) + # patch the JE above + offset = mc.get_relative_pos() - je_location + assert 0 < offset <= 127 + mc.overwrite(je_location-1, chr(offset)) + ssreg = ebx + # + self.asm._reload_frame_if_necessary(self.mc, shadowstack_reg=ssreg) if self.change_extra_stack_depth: self.asm.set_extra_stack_depth(self.mc, 0) self.asm.pop_gcmap(self.mc) def call_releasegil_addr_and_move_real_arguments(self, fastgil): from rpython.jit.backend.x86.assembler import heap + assert self.is_call_release_gil + # + # Save this thread's shadowstack pointer into 'ebx', + # for later comparison + gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap + if gcrootmap: + if gcrootmap.is_shadow_stack: + rst = gcrootmap.get_root_stack_top_addr() + self.mc.MOV(ebx, heap(rst)) # if not self.asm._is_asmgcc(): # shadowstack: change 'rpy_fastgil' to 0 (it should be diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -977,6 +977,12 @@ hop.genop("direct_call", [self.root_walker.thread_run_ptr]) self.pop_roots(hop, livevars) + def gct_gc_thread_run_ptr(self, hop): + assert self.translator.config.translation.thread + assert hasattr(self.root_walker, 'thread_run_ptr') + hop.genop("same_as", [self.root_walker.thread_run_ptr], + resultvar=hop.spaceop.result) + def gct_gc_thread_start(self, hop): assert self.translator.config.translation.thread if hasattr(self.root_walker, 'thread_start_ptr'): diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -473,6 +473,7 @@ 'gc_set_max_heap_size': LLOp(), 'gc_can_move' : LLOp(sideeffects=False), 'gc_thread_run' : LLOp(), + 'gc_thread_run_ptr' : LLOp(sideeffects=False), 'gc_thread_start' : LLOp(), 'gc_thread_die' : LLOp(), 'gc_thread_before_fork':LLOp(), # returns an opaque address From noreply at buildbot.pypy.org Sat Aug 23 22:31:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 23 Aug 2014 22:31:49 +0200 (CEST) Subject: [pypy-commit] pypy default: Add comments about memory barriers Message-ID: <20140823203149.CAC911D2313@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73016:bf3e8fa831fd Date: 2014-08-23 22:31 +0200 http://bitbucket.org/pypy/pypy/changeset/bf3e8fa831fd/ Log: Add comments about memory barriers diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -164,6 +164,7 @@ self.asm.set_extra_stack_depth(self.mc, -delta * WORD) css_value = eax # + # <--here--> would come a memory fence, if the CPU needed one. self.mc.MOV(heap(fastgil), css_value) # if not we_are_translated(): # for testing: we should not access @@ -196,6 +197,8 @@ old_value = esi mc.LEA_rs(css_value.value, css) # + # Use XCHG as an atomic test-and-set-lock. It also implicitly + # does a memory barrier. mc.MOV(old_value, imm(1)) if rx86.fits_in_32bits(fastgil): mc.XCHG_rj(old_value.value, fastgil) From noreply at buildbot.pypy.org Sat Aug 23 22:46:57 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sat, 23 Aug 2014 22:46:57 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: Bump version in docs Message-ID: <20140823204657.5C5AF1D2313@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73017:a12f1078d9af Date: 2014-08-23 13:46 -0700 http://bitbucket.org/pypy/pypy/changeset/a12f1078d9af/ Log: Bump version in docs diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst --- a/pypy/doc/coding-guide.rst +++ b/pypy/doc/coding-guide.rst @@ -592,7 +592,7 @@ Modules visible from application programs are imported from interpreter or application level files. PyPy reuses almost all python -modules of CPython's standard library, currently from version 2.7.6. We +modules of CPython's standard library, currently from version 2.7.8. We sometimes need to `modify modules`_ and - more often - regression tests because they rely on implementation details of CPython. From noreply at buildbot.pypy.org Sat Aug 23 23:03:15 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sat, 23 Aug 2014 23:03:15 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: (temporary?) fix for 'test_objects_to_trace_bug'. Needs another thought to be sure. Message-ID: <20140823210315.B6A8F1D2313@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r73018:981aab6596c2 Date: 2014-08-23 23:02 +0200 http://bitbucket.org/pypy/pypy/changeset/981aab6596c2/ Log: (temporary?) fix for 'test_objects_to_trace_bug'. Needs another thought to be sure. diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2286,7 +2286,13 @@ def _collect_ref_stk(self, root): obj = root.address[0] llop.debug_nonnull_pointer(lltype.Void, obj) - self.objects_to_trace.append(obj) + if not self._is_pinned(obj): + # XXX: check if this is the right way (groggi). + # A pinned object can be on the stack. Such an object is handled + # by minor collections and shouldn't be specially handled by + # major collections. Therefore we only add not pinned objects to the + # list below. + self.objects_to_trace.append(obj) def _collect_ref_rec(self, root, ignored): self.objects_to_trace.append(root.address[0]) From noreply at buildbot.pypy.org Sun Aug 24 03:04:07 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 24 Aug 2014 03:04:07 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: Fixed sqlite3 tests Message-ID: <20140824010407.05DB01D257A@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73019:076c750b2635 Date: 2014-08-23 18:03 -0700 http://bitbucket.org/pypy/pypy/changeset/076c750b2635/ Log: Fixed sqlite3 tests diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1369,15 +1369,18 @@ self.description = cursor.description self.values = values + def __len__(self): + return len(self.values) + def __getitem__(self, item): - if type(item) is int: + if isinstance(item, (int, long)): return self.values[item] else: item = item.lower() for idx, desc in enumerate(self.description): if desc[0].lower() == item: return self.values[idx] - raise KeyError + raise IndexError("No item with that key") def keys(self): return [desc[0] for desc in self.description] From noreply at buildbot.pypy.org Sun Aug 24 03:29:53 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 24 Aug 2014 03:29:53 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: Some missing methods on tkapp Message-ID: <20140824012953.72BC81C033D@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73020:9cfd18fbf13f Date: 2014-08-23 18:29 -0700 http://bitbucket.org/pypy/pypy/changeset/9cfd18fbf13f/ Log: Some missing methods on tkapp diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py --- a/lib_pypy/_tkinter/app.py +++ b/lib_pypy/_tkinter/app.py @@ -438,10 +438,41 @@ def getboolean(self, s): if isinstance(s, int): return s + if isinstance(s, unicode): + s = str(s) + if '\x00' in s: + raise TypeError v = tkffi.new("int*") res = tklib.Tcl_GetBoolean(self.interp, s, v) if res == tklib.TCL_ERROR: self.raiseTclError() + return bool(v[0]) + + def getint(self, s): + if isinstance(s, int): + return s + if isinstance(s, unicode): + s = str(s) + if '\x00' in s: + raise TypeError + v = tkffi.new("int*") + res = tklib.Tcl_GetInt(self.interp, s, v) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return v[0] + + def getdouble(self, s): + if isinstance(s, float): + return s + if isinstance(s, unicode): + s = str(s) + if '\x00' in s: + raise TypeError + v = tkffi.new("double*") + res = tklib.Tcl_GetDouble(self.interp, s, v) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return v[0] def mainloop(self, threshold): self._check_tcl_appartment() diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -70,6 +70,8 @@ void Tcl_DecrRefCount(Tcl_Obj* objPtr); int Tcl_GetBoolean(Tcl_Interp* interp, const char* src, int* boolPtr); +int Tcl_GetInt(Tcl_Interp* interp, const char* src, int* intPtr); +int Tcl_GetDouble(Tcl_Interp* interp, const char* src, double* doublePtr); char *Tcl_GetString(Tcl_Obj* objPtr); char *Tcl_GetStringFromObj(Tcl_Obj* objPtr, int* lengthPtr); unsigned char *Tcl_GetByteArrayFromObj(Tcl_Obj* objPtr, int* lengthPtr); From noreply at buildbot.pypy.org Sun Aug 24 09:59:39 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 24 Aug 2014 09:59:39 +0200 (CEST) Subject: [pypy-commit] pypy default: Improve the fix for shadowstack. The previous fix was sometimes Message-ID: <20140824075939.149DF1C3340@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73021:632c832bd32f Date: 2014-08-24 09:56 +0200 http://bitbucket.org/pypy/pypy/changeset/632c832bd32f/ Log: Improve the fix for shadowstack. The previous fix was sometimes overwriting the return value from the call_release_gil. diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -1,7 +1,6 @@ import sys from rpython.rlib.clibffi import FFI_DEFAULT_ABI from rpython.rlib.objectmodel import we_are_translated -from rpython.rtyper.lltypesystem import lltype, rffi from rpython.jit.metainterp.history import INT, FLOAT from rpython.jit.backend.x86.arch import (WORD, IS_X86_64, IS_X86_32, PASS_ON_MY_FRAME, FRAME_FIXED_SIZE) @@ -22,8 +21,6 @@ def align_stack_words(words): return (words + CALL_ALIGN - 1) & ~(CALL_ALIGN-1) -NO_ARG_FUNC_PTR = lltype.Ptr(lltype.FuncType([], lltype.Void)) - class CallBuilderX86(AbstractCallBuilder): @@ -94,30 +91,9 @@ gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap if gcrootmap: if gcrootmap.is_shadow_stack and self.is_call_release_gil: - from rpython.jit.backend.x86.assembler import heap - from rpython.jit.backend.x86 import rx86 - from rpython.rtyper.lltypesystem.lloperation import llop - # - # When doing a call_release_gil with shadowstack, there - # is the risk that the 'rpy_fastgil' was free but the - # current shadowstack can be the one of a different - # thread. So here we check if the shadowstack pointer - # is still the same as before we released the GIL (saved - # in 'ebx'), and if not, we call 'thread_run'. - rst = gcrootmap.get_root_stack_top_addr() - mc = self.mc - mc.CMP(ebx, heap(rst)) - mc.J_il8(rx86.Conditions['E'], 0) - je_location = mc.get_relative_pos() - # call 'thread_run' - t_run = llop.gc_thread_run_ptr(NO_ARG_FUNC_PTR) - mc.CALL(imm(rffi.cast(lltype.Signed, t_run))) - # patch the JE above - offset = mc.get_relative_pos() - je_location - assert 0 < offset <= 127 - mc.overwrite(je_location-1, chr(offset)) + # in this mode, 'ebx' happens to contain the shadowstack + # top at this point, so reuse it instead of loading it again ssreg = ebx - # self.asm._reload_frame_if_necessary(self.mc, shadowstack_reg=ssreg) if self.change_extra_stack_depth: self.asm.set_extra_stack_depth(self.mc, 0) @@ -206,8 +182,35 @@ mc.MOV_ri(X86_64_SCRATCH_REG.value, fastgil) mc.XCHG_rm(old_value.value, (X86_64_SCRATCH_REG.value, 0)) mc.CMP(old_value, css_value) - mc.J_il8(rx86.Conditions['E'], 0) - je_location = mc.get_relative_pos() + # + gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap + if bool(gcrootmap) and gcrootmap.is_shadow_stack: + from rpython.jit.backend.x86.assembler import heap + # + # When doing a call_release_gil with shadowstack, there + # is the risk that the 'rpy_fastgil' was free but the + # current shadowstack can be the one of a different + # thread. So here we check if the shadowstack pointer + # is still the same as before we released the GIL (saved + # in 'ebx'), and if not, we fall back to 'reacqgil_addr'. + mc.J_il8(rx86.Conditions['NE'], 0) + jne_location = mc.get_relative_pos() + # here, ecx is zero (so rpy_fastgil was not acquired) + rst = gcrootmap.get_root_stack_top_addr() + mc = self.mc + mc.CMP(ebx, heap(rst)) + mc.J_il8(rx86.Conditions['E'], 0) + je_location = mc.get_relative_pos() + # revert the rpy_fastgil acquired above, so that the + # general 'reacqgil_addr' below can acquire it again... + mc.MOV(heap(fastgil), ecx) + # patch the JNE above + offset = mc.get_relative_pos() - jne_location + assert 0 < offset <= 127 + mc.overwrite(jne_location-1, chr(offset)) + else: + mc.J_il8(rx86.Conditions['E'], 0) + je_location = mc.get_relative_pos() # # Yes, we need to call the reacqgil() function self.save_result_value_reacq() diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -977,12 +977,6 @@ hop.genop("direct_call", [self.root_walker.thread_run_ptr]) self.pop_roots(hop, livevars) - def gct_gc_thread_run_ptr(self, hop): - assert self.translator.config.translation.thread - assert hasattr(self.root_walker, 'thread_run_ptr') - hop.genop("same_as", [self.root_walker.thread_run_ptr], - resultvar=hop.spaceop.result) - def gct_gc_thread_start(self, hop): assert self.translator.config.translation.thread if hasattr(self.root_walker, 'thread_start_ptr'): diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -473,7 +473,6 @@ 'gc_set_max_heap_size': LLOp(), 'gc_can_move' : LLOp(sideeffects=False), 'gc_thread_run' : LLOp(), - 'gc_thread_run_ptr' : LLOp(sideeffects=False), 'gc_thread_start' : LLOp(), 'gc_thread_die' : LLOp(), 'gc_thread_before_fork':LLOp(), # returns an opaque address From noreply at buildbot.pypy.org Sun Aug 24 16:56:44 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 24 Aug 2014 16:56:44 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: have islice release reference to source iterator when exhausted Message-ID: <20140824145644.7EC061C1482@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73022:5a56be10c6e7 Date: 2014-08-24 10:55 -0400 http://bitbucket.org/pypy/pypy/changeset/5a56be10c6e7/ Log: have islice release reference to source iterator when exhausted diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -391,16 +391,31 @@ # has no effect any more if stop > 0: self._ignore_items(stop) + self.iterable = None raise OperationError(self.space.w_StopIteration, self.space.w_None) self.stop = stop - (ignore + 1) if ignore > 0: self._ignore_items(ignore) - return self.space.next(self.iterable) + if self.iterable is None: + raise OperationError(self.space.w_StopIteration, self.space.w_None) + try: + return self.space.next(self.iterable) + except OperationError as e: + if e.match(self.space, self.space.w_StopIteration): + self.iterable = None + raise def _ignore_items(self, num): + if self.iterable is None: + raise OperationError(self.space.w_StopIteration, self.space.w_None) while True: - self.space.next(self.iterable) + try: + self.space.next(self.iterable) + except OperationError as e: + if e.match(self.space, self.space.w_StopIteration): + self.iterable = None + raise num -= 1 if num <= 0: break diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -237,6 +237,18 @@ assert list(itertools.islice(xrange(10), None,None)) == range(10) assert list(itertools.islice(xrange(10), None,None,None)) == range(10) + # check source iterator is not referenced from islice() + # after the latter has been exhausted + import weakref + for args in [(1,), (None,), (0, None, 2)]: + it = (x for x in (1, 2, 3)) + wr = weakref.ref(it) + it = itertools.islice(it, *args) + assert wr() is not None + list(it) # exhaust the iterator + assert wr() is None + raises(StopIteration, next, it) + def test_islice_dropitems_exact(self): import itertools From noreply at buildbot.pypy.org Sun Aug 24 17:37:20 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 24 Aug 2014 17:37:20 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: cleanup operator mapping/sequence tests Message-ID: <20140824153720.671CD1C1482@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73023:e58e1cf2c0f2 Date: 2014-08-24 11:35 -0400 http://bitbucket.org/pypy/pypy/changeset/e58e1cf2c0f2/ Log: cleanup operator mapping/sequence tests diff --git a/pypy/module/operator/app_operator.py b/pypy/module/operator/app_operator.py --- a/pypy/module/operator/app_operator.py +++ b/pypy/module/operator/app_operator.py @@ -5,8 +5,10 @@ equivalent to x+y. ''' from __pypy__ import builtinify +import types -def countOf(a,b): + +def countOf(a,b): 'countOf(a, b) -- Return the number of times b occurs in a.' count = 0 for x in a: @@ -37,11 +39,11 @@ index += 1 raise ValueError, 'sequence.index(x): x not in sequence' -# XXX the following is approximative def isMappingType(obj,): 'isMappingType(a) -- Return True if a has a mapping type, False otherwise.' - # XXX this is fragile and approximative anyway - return hasattr(obj, '__getitem__') and hasattr(obj, 'keys') + if isinstance(obj, types.InstanceType): + return hasattr(obj, '__getitem__') + return hasattr(obj, '__getitem__') and not hasattr(obj, '__getslice__') def isNumberType(obj,): 'isNumberType(a) -- Return True if a has a numeric type, False otherwise.' @@ -49,7 +51,9 @@ def isSequenceType(obj,): 'isSequenceType(a) -- Return True if a has a sequence type, False otherwise.' - return hasattr(obj, '__getitem__') and not hasattr(obj, 'keys') + if isinstance(obj, dict): + return False + return hasattr(obj, '__getitem__') def repeat(obj, num): 'repeat(a, b) -- Return a * b, where a is a sequence, and b is an integer.' diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py --- a/pypy/module/operator/test/test_operator.py +++ b/pypy/module/operator/test/test_operator.py @@ -140,6 +140,35 @@ assert operator.repeat(a, 0) == [] raises(TypeError, operator.repeat, 6, 7) + def test_isMappingType(self): + import operator + assert not operator.isMappingType([]) + assert operator.isMappingType(dict()) + class M: + def __getitem__(self, key): + return 42 + assert operator.isMappingType(M()) + del M.__getitem__ + assert not operator.isMappingType(M()) + class M(object): + def __getitem__(self, key): + return 42 + assert operator.isMappingType(M()) + del M.__getitem__ + assert not operator.isMappingType(M()) + class M: + def __getitem__(self, key): + return 42 + def __getslice__(self, key): + return 42 + assert operator.isMappingType(M()) + class M(object): + def __getitem__(self, key): + return 42 + def __getslice__(self, key): + return 42 + assert not operator.isMappingType(M()) + def test_isSequenceType(self): import operator From noreply at buildbot.pypy.org Sun Aug 24 18:00:42 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 24 Aug 2014 18:00:42 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: apply changes to dtoa.c from upstream Message-ID: <20140824160042.B01031C0306@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73024:d0a9be02345c Date: 2014-08-24 12:00 -0400 http://bitbucket.org/pypy/pypy/changeset/d0a9be02345c/ Log: apply changes to dtoa.c from upstream diff --git a/rpython/translator/c/src/dtoa.c b/rpython/translator/c/src/dtoa.c --- a/rpython/translator/c/src/dtoa.c +++ b/rpython/translator/c/src/dtoa.c @@ -122,6 +122,7 @@ #define PY_UINT32_T unsigned int #define PY_INT32_T int #define PY_UINT64_T unsigned long long +#include #include #include #include @@ -221,7 +222,24 @@ MAX_ABS_EXP in absolute value get truncated to +-MAX_ABS_EXP. MAX_ABS_EXP should fit into an int. */ #ifndef MAX_ABS_EXP -#define MAX_ABS_EXP 19999U +#define MAX_ABS_EXP 1100000000U +#endif +/* Bound on length of pieces of input strings in _Py_dg_strtod; specifically, + this is used to bound the total number of digits ignoring leading zeros and + the number of digits that follow the decimal point. Ideally, MAX_DIGITS + should satisfy MAX_DIGITS + 400 < MAX_ABS_EXP; that ensures that the + exponent clipping in _Py_dg_strtod can't affect the value of the output. */ +#ifndef MAX_DIGITS +#define MAX_DIGITS 1000000000U +#endif + +/* Guard against trying to use the above values on unusual platforms with ints + * of width less than 32 bits. */ +#if MAX_ABS_EXP > INT_MAX +#error "MAX_ABS_EXP should fit in an int" +#endif +#if MAX_DIGITS > INT_MAX +#error "MAX_DIGITS should fit in an int" #endif /* The following definition of Storeinc is appropriate for MIPS processors. @@ -1515,6 +1533,7 @@ Long L; BCinfo bc; Bigint *bb, *bb1, *bd, *bd0, *bs, *delta; + size_t ndigits, fraclen; dval(&rv) = 0.; @@ -1537,40 +1556,53 @@ c = *++s; lz = s != s1; - /* Point s0 at the first nonzero digit (if any). nd0 will be the position - of the point relative to s0. nd will be the total number of digits - ignoring leading zeros. */ + /* Point s0 at the first nonzero digit (if any). fraclen will be the + number of digits between the decimal point and the end of the + digit string. ndigits will be the total number of digits ignoring + leading zeros. */ s0 = s1 = s; while ('0' <= c && c <= '9') c = *++s; - nd0 = nd = s - s1; + ndigits = s - s1; + fraclen = 0; /* Parse decimal point and following digits. */ if (c == '.') { c = *++s; - if (!nd) { + if (!ndigits) { s1 = s; while (c == '0') c = *++s; lz = lz || s != s1; - nd0 -= s - s1; + fraclen += (s - s1); s0 = s; } s1 = s; while ('0' <= c && c <= '9') c = *++s; - nd += s - s1; + ndigits += s - s1; + fraclen += s - s1; } - /* Now lz is true if and only if there were leading zero digits, and nd - gives the total number of digits ignoring leading zeros. A valid input - must have at least one digit. */ - if (!nd && !lz) { + /* Now lz is true if and only if there were leading zero digits, and + ndigits gives the total number of digits ignoring leading zeros. A + valid input must have at least one digit. */ + if (!ndigits && !lz) { if (se) *se = (char *)s00; goto parse_error; } + /* Range check ndigits and fraclen to make sure that they, and values + computed with them, can safely fit in an int. */ + if (ndigits > MAX_DIGITS || fraclen > MAX_DIGITS) { + if (se) + *se = (char *)s00; + goto parse_error; + } + nd = (int)ndigits; + nd0 = (int)ndigits - (int)fraclen; + /* Parse exponent. */ e = 0; if (c == 'e' || c == 'E') { @@ -1903,20 +1935,20 @@ bd2++; /* At this stage bd5 - bb5 == e == bd2 - bb2 + bbe, bb2 - bs2 == 1, - and bs == 1, so: + and bs == 1, so: tdv == bd * 10**e = bd * 2**(bbe - bb2 + bd2) * 5**(bd5 - bb5) srv == bb * 2**bbe = bb * 2**(bbe - bb2 + bb2) - 0.5 ulp(srv) == 2**(bbe-1) = bs * 2**(bbe - bb2 + bs2) + 0.5 ulp(srv) == 2**(bbe-1) = bs * 2**(bbe - bb2 + bs2) - It follows that: + It follows that: M * tdv = bd * 2**bd2 * 5**bd5 M * srv = bb * 2**bb2 * 5**bb5 M * 0.5 ulp(srv) = bs * 2**bs2 * 5**bb5 - for some constant M. (Actually, M == 2**(bb2 - bbe) * 5**bb5, but - this fact is not needed below.) + for some constant M. (Actually, M == 2**(bb2 - bbe) * 5**bb5, but + this fact is not needed below.) */ /* Remove factor of 2**i, where i = min(bb2, bd2, bs2). */ From noreply at buildbot.pypy.org Sun Aug 24 19:23:19 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 24 Aug 2014 19:23:19 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: (alex, arigo) Move the check for cmp(set(), set()) being illegal to cmp() itself. Message-ID: <20140824172319.633511C3340@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73025:33fb5b7c1051 Date: 2014-08-24 10:22 -0700 http://bitbucket.org/pypy/pypy/changeset/33fb5b7c1051/ Log: (alex, arigo) Move the check for cmp(set(), set()) being illegal to cmp() itself. The problem is that CPython only calls __cmp__ which are written in C (tp_compare) when the types are exactly the same, but PyPy treats __cmp__ written in RPython the same as one in python. diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -452,6 +452,11 @@ # The real comparison if space.is_w(space.type(w_v), space.type(w_w)): + if space.is_w(space.type(w_v), space.w_set): + raise OperationError( + space.w_TypeError, + space.wrap("cannot compare sets using cmp()") + ) # for object of the same type, prefer __cmp__ over rich comparison. w_cmp = space.lookup(w_v, '__cmp__') w_res = _invoke_binop(space, w_cmp, w_v, w_w) diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -168,11 +168,6 @@ w_currently_in_repr = ec._py_repr = space.newdict() return setrepr(space, w_currently_in_repr, self) - def descr_cmp(self, space, w_other): - # hack hack until we get the expected result - raise OperationError(space.w_TypeError, - space.wrap('cannot compare sets using cmp()')) - def descr_eq(self, space, w_other): if isinstance(w_other, W_BaseSetObject): return space.wrap(self.equals(w_other)) @@ -519,7 +514,6 @@ __init__ = gateway.interp2app(W_BaseSetObject.descr_init), __repr__ = gateway.interp2app(W_BaseSetObject.descr_repr), __hash__ = None, - __cmp__ = gateway.interp2app(W_BaseSetObject.descr_cmp), # comparison operators __eq__ = gateway.interp2app(W_BaseSetObject.descr_eq), @@ -619,7 +613,6 @@ __new__ = gateway.interp2app(W_FrozensetObject.descr_new2), __repr__ = gateway.interp2app(W_BaseSetObject.descr_repr), __hash__ = gateway.interp2app(W_FrozensetObject.descr_hash), - __cmp__ = gateway.interp2app(W_BaseSetObject.descr_cmp), # comparison operators __eq__ = gateway.interp2app(W_BaseSetObject.descr_eq), diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -340,7 +340,7 @@ def test_compare(self): raises(TypeError, cmp, set('abc'), set('abd')) assert set('abc') != 'abc' - raises(TypeError, "set('abc') < 42") + assert not set('abc') < 42 assert not (set('abc') < set('def')) assert not (set('abc') <= frozenset('abd')) assert not (set('abc') < frozenset('abd')) From noreply at buildbot.pypy.org Sun Aug 24 19:23:20 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 24 Aug 2014 19:23:20 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: merged upstream Message-ID: <20140824172320.CC5351C3340@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73026:0daa85d1846a Date: 2014-08-24 10:22 -0700 http://bitbucket.org/pypy/pypy/changeset/0daa85d1846a/ Log: merged upstream diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -391,16 +391,31 @@ # has no effect any more if stop > 0: self._ignore_items(stop) + self.iterable = None raise OperationError(self.space.w_StopIteration, self.space.w_None) self.stop = stop - (ignore + 1) if ignore > 0: self._ignore_items(ignore) - return self.space.next(self.iterable) + if self.iterable is None: + raise OperationError(self.space.w_StopIteration, self.space.w_None) + try: + return self.space.next(self.iterable) + except OperationError as e: + if e.match(self.space, self.space.w_StopIteration): + self.iterable = None + raise def _ignore_items(self, num): + if self.iterable is None: + raise OperationError(self.space.w_StopIteration, self.space.w_None) while True: - self.space.next(self.iterable) + try: + self.space.next(self.iterable) + except OperationError as e: + if e.match(self.space, self.space.w_StopIteration): + self.iterable = None + raise num -= 1 if num <= 0: break diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -237,6 +237,18 @@ assert list(itertools.islice(xrange(10), None,None)) == range(10) assert list(itertools.islice(xrange(10), None,None,None)) == range(10) + # check source iterator is not referenced from islice() + # after the latter has been exhausted + import weakref + for args in [(1,), (None,), (0, None, 2)]: + it = (x for x in (1, 2, 3)) + wr = weakref.ref(it) + it = itertools.islice(it, *args) + assert wr() is not None + list(it) # exhaust the iterator + assert wr() is None + raises(StopIteration, next, it) + def test_islice_dropitems_exact(self): import itertools diff --git a/pypy/module/operator/app_operator.py b/pypy/module/operator/app_operator.py --- a/pypy/module/operator/app_operator.py +++ b/pypy/module/operator/app_operator.py @@ -5,8 +5,10 @@ equivalent to x+y. ''' from __pypy__ import builtinify +import types -def countOf(a,b): + +def countOf(a,b): 'countOf(a, b) -- Return the number of times b occurs in a.' count = 0 for x in a: @@ -37,11 +39,11 @@ index += 1 raise ValueError, 'sequence.index(x): x not in sequence' -# XXX the following is approximative def isMappingType(obj,): 'isMappingType(a) -- Return True if a has a mapping type, False otherwise.' - # XXX this is fragile and approximative anyway - return hasattr(obj, '__getitem__') and hasattr(obj, 'keys') + if isinstance(obj, types.InstanceType): + return hasattr(obj, '__getitem__') + return hasattr(obj, '__getitem__') and not hasattr(obj, '__getslice__') def isNumberType(obj,): 'isNumberType(a) -- Return True if a has a numeric type, False otherwise.' @@ -49,7 +51,9 @@ def isSequenceType(obj,): 'isSequenceType(a) -- Return True if a has a sequence type, False otherwise.' - return hasattr(obj, '__getitem__') and not hasattr(obj, 'keys') + if isinstance(obj, dict): + return False + return hasattr(obj, '__getitem__') def repeat(obj, num): 'repeat(a, b) -- Return a * b, where a is a sequence, and b is an integer.' diff --git a/pypy/module/operator/test/test_operator.py b/pypy/module/operator/test/test_operator.py --- a/pypy/module/operator/test/test_operator.py +++ b/pypy/module/operator/test/test_operator.py @@ -140,6 +140,35 @@ assert operator.repeat(a, 0) == [] raises(TypeError, operator.repeat, 6, 7) + def test_isMappingType(self): + import operator + assert not operator.isMappingType([]) + assert operator.isMappingType(dict()) + class M: + def __getitem__(self, key): + return 42 + assert operator.isMappingType(M()) + del M.__getitem__ + assert not operator.isMappingType(M()) + class M(object): + def __getitem__(self, key): + return 42 + assert operator.isMappingType(M()) + del M.__getitem__ + assert not operator.isMappingType(M()) + class M: + def __getitem__(self, key): + return 42 + def __getslice__(self, key): + return 42 + assert operator.isMappingType(M()) + class M(object): + def __getitem__(self, key): + return 42 + def __getslice__(self, key): + return 42 + assert not operator.isMappingType(M()) + def test_isSequenceType(self): import operator diff --git a/rpython/translator/c/src/dtoa.c b/rpython/translator/c/src/dtoa.c --- a/rpython/translator/c/src/dtoa.c +++ b/rpython/translator/c/src/dtoa.c @@ -122,6 +122,7 @@ #define PY_UINT32_T unsigned int #define PY_INT32_T int #define PY_UINT64_T unsigned long long +#include #include #include #include @@ -221,7 +222,24 @@ MAX_ABS_EXP in absolute value get truncated to +-MAX_ABS_EXP. MAX_ABS_EXP should fit into an int. */ #ifndef MAX_ABS_EXP -#define MAX_ABS_EXP 19999U +#define MAX_ABS_EXP 1100000000U +#endif +/* Bound on length of pieces of input strings in _Py_dg_strtod; specifically, + this is used to bound the total number of digits ignoring leading zeros and + the number of digits that follow the decimal point. Ideally, MAX_DIGITS + should satisfy MAX_DIGITS + 400 < MAX_ABS_EXP; that ensures that the + exponent clipping in _Py_dg_strtod can't affect the value of the output. */ +#ifndef MAX_DIGITS +#define MAX_DIGITS 1000000000U +#endif + +/* Guard against trying to use the above values on unusual platforms with ints + * of width less than 32 bits. */ +#if MAX_ABS_EXP > INT_MAX +#error "MAX_ABS_EXP should fit in an int" +#endif +#if MAX_DIGITS > INT_MAX +#error "MAX_DIGITS should fit in an int" #endif /* The following definition of Storeinc is appropriate for MIPS processors. @@ -1515,6 +1533,7 @@ Long L; BCinfo bc; Bigint *bb, *bb1, *bd, *bd0, *bs, *delta; + size_t ndigits, fraclen; dval(&rv) = 0.; @@ -1537,40 +1556,53 @@ c = *++s; lz = s != s1; - /* Point s0 at the first nonzero digit (if any). nd0 will be the position - of the point relative to s0. nd will be the total number of digits - ignoring leading zeros. */ + /* Point s0 at the first nonzero digit (if any). fraclen will be the + number of digits between the decimal point and the end of the + digit string. ndigits will be the total number of digits ignoring + leading zeros. */ s0 = s1 = s; while ('0' <= c && c <= '9') c = *++s; - nd0 = nd = s - s1; + ndigits = s - s1; + fraclen = 0; /* Parse decimal point and following digits. */ if (c == '.') { c = *++s; - if (!nd) { + if (!ndigits) { s1 = s; while (c == '0') c = *++s; lz = lz || s != s1; - nd0 -= s - s1; + fraclen += (s - s1); s0 = s; } s1 = s; while ('0' <= c && c <= '9') c = *++s; - nd += s - s1; + ndigits += s - s1; + fraclen += s - s1; } - /* Now lz is true if and only if there were leading zero digits, and nd - gives the total number of digits ignoring leading zeros. A valid input - must have at least one digit. */ - if (!nd && !lz) { + /* Now lz is true if and only if there were leading zero digits, and + ndigits gives the total number of digits ignoring leading zeros. A + valid input must have at least one digit. */ + if (!ndigits && !lz) { if (se) *se = (char *)s00; goto parse_error; } + /* Range check ndigits and fraclen to make sure that they, and values + computed with them, can safely fit in an int. */ + if (ndigits > MAX_DIGITS || fraclen > MAX_DIGITS) { + if (se) + *se = (char *)s00; + goto parse_error; + } + nd = (int)ndigits; + nd0 = (int)ndigits - (int)fraclen; + /* Parse exponent. */ e = 0; if (c == 'e' || c == 'E') { @@ -1903,20 +1935,20 @@ bd2++; /* At this stage bd5 - bb5 == e == bd2 - bb2 + bbe, bb2 - bs2 == 1, - and bs == 1, so: + and bs == 1, so: tdv == bd * 10**e = bd * 2**(bbe - bb2 + bd2) * 5**(bd5 - bb5) srv == bb * 2**bbe = bb * 2**(bbe - bb2 + bb2) - 0.5 ulp(srv) == 2**(bbe-1) = bs * 2**(bbe - bb2 + bs2) + 0.5 ulp(srv) == 2**(bbe-1) = bs * 2**(bbe - bb2 + bs2) - It follows that: + It follows that: M * tdv = bd * 2**bd2 * 5**bd5 M * srv = bb * 2**bb2 * 5**bb5 M * 0.5 ulp(srv) = bs * 2**bs2 * 5**bb5 - for some constant M. (Actually, M == 2**(bb2 - bbe) * 5**bb5, but - this fact is not needed below.) + for some constant M. (Actually, M == 2**(bb2 - bbe) * 5**bb5, but + this fact is not needed below.) */ /* Remove factor of 2**i, where i = min(bb2, bd2, bs2). */ From noreply at buildbot.pypy.org Sun Aug 24 19:27:48 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 24 Aug 2014 19:27:48 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: Fix for set subclasses Message-ID: <20140824172748.711A91C3340@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73027:fc100e8b8ec1 Date: 2014-08-24 10:27 -0700 http://bitbucket.org/pypy/pypy/changeset/fc100e8b8ec1/ Log: Fix for set subclasses diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -452,7 +452,7 @@ # The real comparison if space.is_w(space.type(w_v), space.type(w_w)): - if space.is_w(space.type(w_v), space.w_set): + if space.isinstance_w(w_v, space.w_set): raise OperationError( space.w_TypeError, space.wrap("cannot compare sets using cmp()") diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -375,6 +375,11 @@ assert set() != set('abc') assert set('abc') != set('abd') + class X(set): + pass + + raises(TypeError, cmp, X(), X()) + def test_libpython_equality(self): for thetype in [frozenset, set]: word = "aaaaaaaaawfpasrtarspawparst" From noreply at buildbot.pypy.org Sun Aug 24 19:36:59 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 24 Aug 2014 19:36:59 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: get/setslice for mmap Message-ID: <20140824173659.26B9A1C33A1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73028:6749e168ae74 Date: 2014-08-24 13:33 -0400 http://bitbucket.org/pypy/pypy/changeset/6749e168ae74/ Log: get/setslice for mmap diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -200,6 +200,46 @@ self.mmap.setitem(start, value[i]) start += step + def descr_getslice(self, space, w_ilow, w_ihigh): + self.check_valid() + i = space.getindex_w(w_ilow, None) + j = space.getindex_w(w_ihigh, None) + if i < 0: + i = 0 + elif i > self.mmap.size: + i = self.mmap.size + if j < 0: + j = 0 + if j < i: + j = i + elif j > self.mmap.size: + j = self.mmap.size + return space.wrap(self.mmap.getslice(i, (j - i))) + + def descr_setslice(self, space, w_ilow, w_ihigh, w_item): + self.check_valid() + i = space.getindex_w(w_ilow, None) + j = space.getindex_w(w_ihigh, None) + if i < 0: + i = 0 + elif i > self.mmap.size: + i = self.mmap.size + if j < 0: + j = 0 + if j < i: + j = i + elif j > self.mmap.size: + j = self.mmap.size + if not space.isinstance_w(w_item, space.w_str): + raise OperationError(space.w_IndexError, space.wrap( + "mmap slice assignment must be a string")) + value = space.realstr_w(w_item) + if len(value) != (j - i): + raise OperationError(space.w_IndexError, space.wrap( + "mmap slice assignment is wrong size")) + self.check_writeable() + self.mmap.setslice(i, value) + if rmmap._POSIX: @unwrap_spec(fileno=int, length=int, flags=int, @@ -255,6 +295,8 @@ __len__ = interp2app(W_MMap.__len__), __getitem__ = interp2app(W_MMap.descr_getitem), __setitem__ = interp2app(W_MMap.descr_setitem), + __getslice__ = interp2app(W_MMap.descr_getslice), + __setslice__ = interp2app(W_MMap.descr_setslice), ) constants = rmmap.constants diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -524,6 +524,11 @@ f.seek(0) m = mmap(f.fileno(), 6) assert m[-3:7] == "bar" + assert m.__getslice__(-3, 7) == "foobar" + m.__setslice__(2, 4, "zz") + assert m.__getslice__(-3, 7) == "fozzar" + raises(TypeError, m.__getslice__, "abc", 2) + raises(IndexError, m.__setslice__, 2, 4, None) assert m[1:0:1] == "" From noreply at buildbot.pypy.org Sun Aug 24 19:37:00 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 24 Aug 2014 19:37:00 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: merge heads Message-ID: <20140824173700.6887A1C33A1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73029:792f248bd1a4 Date: 2014-08-24 13:36 -0400 http://bitbucket.org/pypy/pypy/changeset/792f248bd1a4/ Log: merge heads diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -452,6 +452,11 @@ # The real comparison if space.is_w(space.type(w_v), space.type(w_w)): + if space.isinstance_w(w_v, space.w_set): + raise OperationError( + space.w_TypeError, + space.wrap("cannot compare sets using cmp()") + ) # for object of the same type, prefer __cmp__ over rich comparison. w_cmp = space.lookup(w_v, '__cmp__') w_res = _invoke_binop(space, w_cmp, w_v, w_w) diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -168,11 +168,6 @@ w_currently_in_repr = ec._py_repr = space.newdict() return setrepr(space, w_currently_in_repr, self) - def descr_cmp(self, space, w_other): - # hack hack until we get the expected result - raise OperationError(space.w_TypeError, - space.wrap('cannot compare sets using cmp()')) - def descr_eq(self, space, w_other): if isinstance(w_other, W_BaseSetObject): return space.wrap(self.equals(w_other)) @@ -519,7 +514,6 @@ __init__ = gateway.interp2app(W_BaseSetObject.descr_init), __repr__ = gateway.interp2app(W_BaseSetObject.descr_repr), __hash__ = None, - __cmp__ = gateway.interp2app(W_BaseSetObject.descr_cmp), # comparison operators __eq__ = gateway.interp2app(W_BaseSetObject.descr_eq), @@ -619,7 +613,6 @@ __new__ = gateway.interp2app(W_FrozensetObject.descr_new2), __repr__ = gateway.interp2app(W_BaseSetObject.descr_repr), __hash__ = gateway.interp2app(W_FrozensetObject.descr_hash), - __cmp__ = gateway.interp2app(W_BaseSetObject.descr_cmp), # comparison operators __eq__ = gateway.interp2app(W_BaseSetObject.descr_eq), diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -340,7 +340,7 @@ def test_compare(self): raises(TypeError, cmp, set('abc'), set('abd')) assert set('abc') != 'abc' - raises(TypeError, "set('abc') < 42") + assert not set('abc') < 42 assert not (set('abc') < set('def')) assert not (set('abc') <= frozenset('abd')) assert not (set('abc') < frozenset('abd')) @@ -375,6 +375,11 @@ assert set() != set('abc') assert set('abc') != set('abd') + class X(set): + pass + + raises(TypeError, cmp, X(), X()) + def test_libpython_equality(self): for thetype in [frozenset, set]: word = "aaaaaaaaawfpasrtarspawparst" From noreply at buildbot.pypy.org Sun Aug 24 19:37:43 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 24 Aug 2014 19:37:43 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: some failing file tests to fix Message-ID: <20140824173743.E946B1C33A1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73030:a8d35e0814b0 Date: 2014-08-24 13:37 -0400 http://bitbucket.org/pypy/pypy/changeset/a8d35e0814b0/ Log: some failing file tests to fix diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -261,6 +261,17 @@ with self.file(self.temppath, 'r') as f: raises(IOError, f.truncate, 100) + def test_write_full(self): + try: + f = self.file('/dev/full', 'w', 1) + except IOError: + skip("requires '/dev/full'") + try: + f.write('hello') + raises(IOError, f.write, '\n') + finally: + f.close() + class AppTestNonblocking(object): def setup_class(cls): diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -316,6 +316,13 @@ exc = raises(IOError, os.fdopen, fd, 'r') assert exc.value.errno == errno.EISDIR + def test_fdopen_keeps_fd_open_on_errors(self): + path = self.path + posix = self.posix + fd = posix.open(path, posix.O_RDONLY) + raises(OSError, posix.fdopen, fd, 'w') + posix.close(fd) # fd should not be closed + def test_getcwd(self): assert isinstance(self.posix.getcwd(), str) assert isinstance(self.posix.getcwdu(), unicode) From noreply at buildbot.pypy.org Sun Aug 24 19:38:39 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 24 Aug 2014 19:38:39 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: frozenset also has this behavior, but if you check with cmp(frozenset(), frozenset()) you won't see it... Message-ID: <20140824173839.3EB2E1C33A1@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73031:8a0c7a93dc5d Date: 2014-08-24 10:37 -0700 http://bitbucket.org/pypy/pypy/changeset/8a0c7a93dc5d/ Log: frozenset also has this behavior, but if you check with cmp(frozenset(), frozenset()) you won't see it... diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -452,7 +452,8 @@ # The real comparison if space.is_w(space.type(w_v), space.type(w_w)): - if space.isinstance_w(w_v, space.w_set): + if (space.isinstance_w(w_v, space.w_set) or + space.isinstance_w(w_v, space.w_frozenset)): raise OperationError( space.w_TypeError, space.wrap("cannot compare sets using cmp()") diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py --- a/pypy/objspace/std/test/test_setobject.py +++ b/pypy/objspace/std/test/test_setobject.py @@ -339,6 +339,7 @@ def test_compare(self): raises(TypeError, cmp, set('abc'), set('abd')) + raises(TypeError, cmp, frozenset('abc'), frozenset('abd')) assert set('abc') != 'abc' assert not set('abc') < 42 assert not (set('abc') < set('def')) From noreply at buildbot.pypy.org Sun Aug 24 19:38:40 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 24 Aug 2014 19:38:40 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: merged upstream Message-ID: <20140824173840.8B4471C33A1@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73032:b4661fbba593 Date: 2014-08-24 10:38 -0700 http://bitbucket.org/pypy/pypy/changeset/b4661fbba593/ Log: merged upstream diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -261,6 +261,17 @@ with self.file(self.temppath, 'r') as f: raises(IOError, f.truncate, 100) + def test_write_full(self): + try: + f = self.file('/dev/full', 'w', 1) + except IOError: + skip("requires '/dev/full'") + try: + f.write('hello') + raises(IOError, f.write, '\n') + finally: + f.close() + class AppTestNonblocking(object): def setup_class(cls): diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -200,6 +200,46 @@ self.mmap.setitem(start, value[i]) start += step + def descr_getslice(self, space, w_ilow, w_ihigh): + self.check_valid() + i = space.getindex_w(w_ilow, None) + j = space.getindex_w(w_ihigh, None) + if i < 0: + i = 0 + elif i > self.mmap.size: + i = self.mmap.size + if j < 0: + j = 0 + if j < i: + j = i + elif j > self.mmap.size: + j = self.mmap.size + return space.wrap(self.mmap.getslice(i, (j - i))) + + def descr_setslice(self, space, w_ilow, w_ihigh, w_item): + self.check_valid() + i = space.getindex_w(w_ilow, None) + j = space.getindex_w(w_ihigh, None) + if i < 0: + i = 0 + elif i > self.mmap.size: + i = self.mmap.size + if j < 0: + j = 0 + if j < i: + j = i + elif j > self.mmap.size: + j = self.mmap.size + if not space.isinstance_w(w_item, space.w_str): + raise OperationError(space.w_IndexError, space.wrap( + "mmap slice assignment must be a string")) + value = space.realstr_w(w_item) + if len(value) != (j - i): + raise OperationError(space.w_IndexError, space.wrap( + "mmap slice assignment is wrong size")) + self.check_writeable() + self.mmap.setslice(i, value) + if rmmap._POSIX: @unwrap_spec(fileno=int, length=int, flags=int, @@ -255,6 +295,8 @@ __len__ = interp2app(W_MMap.__len__), __getitem__ = interp2app(W_MMap.descr_getitem), __setitem__ = interp2app(W_MMap.descr_setitem), + __getslice__ = interp2app(W_MMap.descr_getslice), + __setslice__ = interp2app(W_MMap.descr_setslice), ) constants = rmmap.constants diff --git a/pypy/module/mmap/test/test_mmap.py b/pypy/module/mmap/test/test_mmap.py --- a/pypy/module/mmap/test/test_mmap.py +++ b/pypy/module/mmap/test/test_mmap.py @@ -524,6 +524,11 @@ f.seek(0) m = mmap(f.fileno(), 6) assert m[-3:7] == "bar" + assert m.__getslice__(-3, 7) == "foobar" + m.__setslice__(2, 4, "zz") + assert m.__getslice__(-3, 7) == "fozzar" + raises(TypeError, m.__getslice__, "abc", 2) + raises(IndexError, m.__setslice__, 2, 4, None) assert m[1:0:1] == "" diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -316,6 +316,13 @@ exc = raises(IOError, os.fdopen, fd, 'r') assert exc.value.errno == errno.EISDIR + def test_fdopen_keeps_fd_open_on_errors(self): + path = self.path + posix = self.posix + fd = posix.open(path, posix.O_RDONLY) + raises(OSError, posix.fdopen, fd, 'w') + posix.close(fd) # fd should not be closed + def test_getcwd(self): assert isinstance(self.posix.getcwd(), str) assert isinstance(self.posix.getcwdu(), unicode) From noreply at buildbot.pypy.org Sun Aug 24 19:53:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 24 Aug 2014 19:53:02 +0200 (CEST) Subject: [pypy-commit] cffi default: Merged in vbernat/cffi/fix/unaligned-access (pull request #45) Message-ID: <20140824175302.70ABE1C3339@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1559:c8399b61c551 Date: 2014-08-24 19:53 +0200 http://bitbucket.org/cffi/cffi/changeset/c8399b61c551/ Log: Merged in vbernat/cffi/fix/unaligned-access (pull request #45) Use memcpy() instead of unaligned accesses. The fix is relevant on some platforms which would complain in this case. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -740,95 +740,90 @@ return (unsigned PY_LONG_LONG)-1; } +#define _read_raw_data(type) \ + do { \ + if (size == sizeof(type)) { \ + type r; \ + memcpy(&r, target, sizeof(type)); \ + return r; \ + } \ + } while(0) + static PY_LONG_LONG read_raw_signed_data(char *target, int size) { - if (size == sizeof(signed char)) - return *((signed char*)target); - else if (size == sizeof(short)) - return *((short*)target); - else if (size == sizeof(int)) - return *((int*)target); - else if (size == sizeof(long)) - return *((long*)target); - else if (size == sizeof(PY_LONG_LONG)) - return *((PY_LONG_LONG*)target); - else { - Py_FatalError("read_raw_signed_data: bad integer size"); - return 0; - } + _read_raw_data(signed char); + _read_raw_data(short); + _read_raw_data(int); + _read_raw_data(long); + _read_raw_data(PY_LONG_LONG); + Py_FatalError("read_raw_signed_data: bad integer size"); + return 0; } static unsigned PY_LONG_LONG read_raw_unsigned_data(char *target, int size) { - if (size == sizeof(unsigned char)) - return *((unsigned char*)target); - else if (size == sizeof(unsigned short)) - return *((unsigned short*)target); - else if (size == sizeof(unsigned int)) - return *((unsigned int*)target); - else if (size == sizeof(unsigned long)) - return *((unsigned long*)target); - else if (size == sizeof(unsigned PY_LONG_LONG)) - return *((unsigned PY_LONG_LONG*)target); - else { - Py_FatalError("read_raw_unsigned_data: bad integer size"); - return 0; - } + _read_raw_data(unsigned char); + _read_raw_data(unsigned short); + _read_raw_data(unsigned int); + _read_raw_data(unsigned long); + _read_raw_data(unsigned PY_LONG_LONG); + Py_FatalError("read_raw_unsigned_data: bad integer size"); + return 0; } +#define _write_raw_data(type) \ + do { \ + if (size == sizeof(type)) { \ + type r = (type)source; \ + memcpy(target, &r, sizeof(type)); \ + return; \ + } \ + } while(0) + static void write_raw_integer_data(char *target, unsigned PY_LONG_LONG source, int size) { - if (size == sizeof(unsigned char)) - *((unsigned char*)target) = (unsigned char)source; - else if (size == sizeof(unsigned short)) - *((unsigned short*)target) = (unsigned short)source; - else if (size == sizeof(unsigned int)) - *((unsigned int*)target) = (unsigned int)source; - else if (size == sizeof(unsigned long)) - *((unsigned long*)target) = (unsigned long)source; - else if (size == sizeof(unsigned PY_LONG_LONG)) - *((unsigned PY_LONG_LONG*)target) = source; - else - Py_FatalError("write_raw_integer_data: bad integer size"); + _write_raw_data(unsigned char); + _write_raw_data(unsigned short); + _write_raw_data(unsigned int); + _write_raw_data(unsigned long); + _write_raw_data(unsigned PY_LONG_LONG); + Py_FatalError("write_raw_integer_data: bad integer size"); } static double read_raw_float_data(char *target, int size) { - if (size == sizeof(float)) - return *((float*)target); - else if (size == sizeof(double)) - return *((double*)target); - else { - Py_FatalError("read_raw_float_data: bad float size"); - return 0; - } + _read_raw_data(float); + _read_raw_data(double); + Py_FatalError("read_raw_float_data: bad float size"); + return 0; } static long double read_raw_longdouble_data(char *target) { - return *((long double*)target); + int size = sizeof(long double); + _read_raw_data(long double); + Py_FatalError("read_raw_longdouble_data: bad long double size"); + return 0; } static void write_raw_float_data(char *target, double source, int size) { - if (size == sizeof(float)) - *((float*)target) = (float)source; - else if (size == sizeof(double)) - *((double*)target) = source; - else - Py_FatalError("write_raw_float_data: bad float size"); + _write_raw_data(float); + _write_raw_data(double); + Py_FatalError("write_raw_float_data: bad float size"); } static void write_raw_longdouble_data(char *target, long double source) { - *((long double*)target) = source; + int size = sizeof(long double); + _write_raw_data(long double); } static PyObject * From noreply at buildbot.pypy.org Sun Aug 24 19:53:04 2014 From: noreply at buildbot.pypy.org (vbernat) Date: Sun, 24 Aug 2014 19:53:04 +0200 (CEST) Subject: [pypy-commit] cffi fix/unaligned-access: Use memcpy() instead of unaligned accesses. Message-ID: <20140824175304.A29CF1C3339@cobra.cs.uni-duesseldorf.de> Author: Vincent Bernat Branch: fix/unaligned-access Changeset: r1557:4832fbdcf7aa Date: 2014-08-23 22:43 +0200 http://bitbucket.org/cffi/cffi/changeset/4832fbdcf7aa/ Log: Use memcpy() instead of unaligned accesses. When a structure is packed, some members may be unaligned. Some architectures like Sparc are unable to handle those and will be terminated with a SIGBUS. This changeset use memcpy() to avoid any unaligned accesses. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -740,95 +740,90 @@ return (unsigned PY_LONG_LONG)-1; } +#define _read_raw_data(type) \ + do { \ + if (size == sizeof(type)) { \ + type r; \ + memcpy(&r, target, sizeof(type)); \ + return r; \ + } \ + } while(0) + static PY_LONG_LONG read_raw_signed_data(char *target, int size) { - if (size == sizeof(signed char)) - return *((signed char*)target); - else if (size == sizeof(short)) - return *((short*)target); - else if (size == sizeof(int)) - return *((int*)target); - else if (size == sizeof(long)) - return *((long*)target); - else if (size == sizeof(PY_LONG_LONG)) - return *((PY_LONG_LONG*)target); - else { - Py_FatalError("read_raw_signed_data: bad integer size"); - return 0; - } + _read_raw_data(signed char); + _read_raw_data(short); + _read_raw_data(int); + _read_raw_data(long); + _read_raw_data(PY_LONG_LONG); + Py_FatalError("read_raw_signed_data: bad integer size"); + return 0; } static unsigned PY_LONG_LONG read_raw_unsigned_data(char *target, int size) { - if (size == sizeof(unsigned char)) - return *((unsigned char*)target); - else if (size == sizeof(unsigned short)) - return *((unsigned short*)target); - else if (size == sizeof(unsigned int)) - return *((unsigned int*)target); - else if (size == sizeof(unsigned long)) - return *((unsigned long*)target); - else if (size == sizeof(unsigned PY_LONG_LONG)) - return *((unsigned PY_LONG_LONG*)target); - else { - Py_FatalError("read_raw_unsigned_data: bad integer size"); - return 0; - } + _read_raw_data(unsigned char); + _read_raw_data(unsigned short); + _read_raw_data(unsigned int); + _read_raw_data(unsigned long); + _read_raw_data(unsigned PY_LONG_LONG); + Py_FatalError("read_raw_unsigned_data: bad integer size"); + return 0; } +#define _write_raw_data(type) \ + do { \ + if (size == sizeof(type)) { \ + type r = source; \ + memcpy(target, &r, sizeof(type)); \ + return; \ + } \ + } while(0) + static void write_raw_integer_data(char *target, unsigned PY_LONG_LONG source, int size) { - if (size == sizeof(unsigned char)) - *((unsigned char*)target) = (unsigned char)source; - else if (size == sizeof(unsigned short)) - *((unsigned short*)target) = (unsigned short)source; - else if (size == sizeof(unsigned int)) - *((unsigned int*)target) = (unsigned int)source; - else if (size == sizeof(unsigned long)) - *((unsigned long*)target) = (unsigned long)source; - else if (size == sizeof(unsigned PY_LONG_LONG)) - *((unsigned PY_LONG_LONG*)target) = source; - else - Py_FatalError("write_raw_integer_data: bad integer size"); + _write_raw_data(unsigned char); + _write_raw_data(unsigned short); + _write_raw_data(unsigned int); + _write_raw_data(unsigned long); + _write_raw_data(unsigned PY_LONG_LONG); + Py_FatalError("write_raw_integer_data: bad integer size"); } static double read_raw_float_data(char *target, int size) { - if (size == sizeof(float)) - return *((float*)target); - else if (size == sizeof(double)) - return *((double*)target); - else { - Py_FatalError("read_raw_float_data: bad float size"); - return 0; - } + _read_raw_data(float); + _read_raw_data(double); + Py_FatalError("read_raw_float_data: bad float size"); + return 0; } static long double read_raw_longdouble_data(char *target) { - return *((long double*)target); + int size = sizeof(long double); + _read_raw_data(long double); + Py_FatalError("read_raw_longdouble_data: bad long double size"); + return 0; } static void write_raw_float_data(char *target, double source, int size) { - if (size == sizeof(float)) - *((float*)target) = (float)source; - else if (size == sizeof(double)) - *((double*)target) = source; - else - Py_FatalError("write_raw_float_data: bad float size"); + _write_raw_data(float); + _write_raw_data(double); + Py_FatalError("write_raw_float_data: bad float size"); } static void write_raw_longdouble_data(char *target, long double source) { - *((long double*)target) = source; + int size = sizeof(long double); + _write_raw_data(long double); } static PyObject * From noreply at buildbot.pypy.org Sun Aug 24 19:53:05 2014 From: noreply at buildbot.pypy.org (vbernat) Date: Sun, 24 Aug 2014 19:53:05 +0200 (CEST) Subject: [pypy-commit] cffi fix/unaligned-access: Fix precision loss warning when casting integer types. Message-ID: <20140824175305.C65061C3339@cobra.cs.uni-duesseldorf.de> Author: Vincent Bernat Branch: fix/unaligned-access Changeset: r1558:5acc6f8d77a1 Date: 2014-08-24 10:30 +0200 http://bitbucket.org/cffi/cffi/changeset/5acc6f8d77a1/ Log: Fix precision loss warning when casting integer types. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -776,7 +776,7 @@ #define _write_raw_data(type) \ do { \ if (size == sizeof(type)) { \ - type r = source; \ + type r = (type)source; \ memcpy(target, &r, sizeof(type)); \ return; \ } \ From noreply at buildbot.pypy.org Sun Aug 24 20:11:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 24 Aug 2014 20:11:47 +0200 (CEST) Subject: [pypy-commit] cffi default: gcc 4.9 seems to like to inline this huge function. That's a bad idea Message-ID: <20140824181147.CB58F1C1482@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1560:69b1bfc9617a Date: 2014-08-24 20:11 +0200 http://bitbucket.org/cffi/cffi/changeset/69b1bfc9617a/ Log: gcc 4.9 seems to like to inline this huge function. That's a bad idea in my honest opinion, and triggers warnings too. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -1243,6 +1243,16 @@ return _convert_error(init, ct->ct_name, expected); } +#ifdef __GNUC__ +# if __GNUC__ >= 4 +/* Don't go inlining this huge function. Needed because occasionally + it gets inlined in places where is causes a warning: call to + __builtin___memcpy_chk will always overflow destination buffer + (which is places where the 'ct' should never represent such a large + primitive type anyway). */ +__attribute__((noinline)) +# endif +#endif static int convert_from_object(char *data, CTypeDescrObject *ct, PyObject *init) { From noreply at buildbot.pypy.org Sun Aug 24 20:12:35 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 24 Aug 2014 20:12:35 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: call gc.collect before testing weakref Message-ID: <20140824181235.B7B091C1482@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73033:e9d8dfeb3c6a Date: 2014-08-24 14:11 -0400 http://bitbucket.org/pypy/pypy/changeset/e9d8dfeb3c6a/ Log: call gc.collect before testing weakref diff --git a/lib-python/2.7/test/test_itertools.py b/lib-python/2.7/test/test_itertools.py --- a/lib-python/2.7/test/test_itertools.py +++ b/lib-python/2.7/test/test_itertools.py @@ -808,6 +808,7 @@ it = islice(it, 1) self.assertIsNotNone(wr()) list(it) # exhaust the iterator + test_support.gc_collect() self.assertIsNone(wr()) def test_takewhile(self): diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -246,6 +246,7 @@ it = itertools.islice(it, *args) assert wr() is not None list(it) # exhaust the iterator + import gc; gc.collect() assert wr() is None raises(StopIteration, next, it) From noreply at buildbot.pypy.org Sun Aug 24 22:03:51 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 24 Aug 2014 22:03:51 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: ensure bz2 file sets name attribute before opening Message-ID: <20140824200351.497AB1C3339@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73034:b8a0ce713c2e Date: 2014-08-24 16:03 -0400 http://bitbucket.org/pypy/pypy/changeset/b8a0ce713c2e/ Log: ensure bz2 file sets name attribute before opening diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -250,6 +250,7 @@ def direct_bz2__init__(self, w_name, mode='r', buffering=-1, compresslevel=9): self.direct_close() + self.w_name = w_name # the stream should always be opened in binary mode if "b" not in mode: mode = mode + "b" diff --git a/pypy/module/bz2/test/test_bz2_file.py b/pypy/module/bz2/test/test_bz2_file.py --- a/pypy/module/bz2/test/test_bz2_file.py +++ b/pypy/module/bz2/test/test_bz2_file.py @@ -97,6 +97,9 @@ # a large buf size BZ2File(self.temppath, mode='w', buffering=4096) + exc = raises(IOError, BZ2File, 'xxx', 'r') + assert "'xxx'" in str(exc.value) + def test_close(self): from bz2 import BZ2File From noreply at buildbot.pypy.org Sun Aug 24 22:21:26 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 24 Aug 2014 22:21:26 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: fix tarfile.gzopen so it properly closes Message-ID: <20140824202126.9B9741C3340@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73035:924647338b7e Date: 2014-08-24 16:21 -0400 http://bitbucket.org/pypy/pypy/changeset/924647338b7e/ Log: fix tarfile.gzopen so it properly closes diff --git a/lib-python/2.7/tarfile.py b/lib-python/2.7/tarfile.py --- a/lib-python/2.7/tarfile.py +++ b/lib-python/2.7/tarfile.py @@ -1718,10 +1718,10 @@ except (ImportError, AttributeError): raise CompressionError("gzip module is not available") + fileobj = gzip.GzipFile(name, mode, compresslevel, fileobj) + try: - t = cls.taropen(name, mode, - gzip.GzipFile(name, mode, compresslevel, fileobj), - **kwargs) + t = cls.taropen(name, mode, fileobj, **kwargs) except IOError: if mode == 'r': raise ReadError("not a gzip file") From noreply at buildbot.pypy.org Sun Aug 24 22:27:18 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 24 Aug 2014 22:27:18 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add Context.logical* methods Message-ID: <20140824202718.7CAAB1C3340@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73036:766ef22b8834 Date: 2014-05-25 18:10 +0200 http://bitbucket.org/pypy/pypy/changeset/766ef22b8834/ Log: Add Context.logical* methods diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -258,6 +258,8 @@ return self.unary_method(space, rmpdec.mpd_qround_to_int, w_x) def sqrt_w(self, space, w_x): return self.unary_method(space, rmpdec.mpd_qsqrt, w_x) + def logical_invert_w(self, space, w_x): + return self.unary_method(space, rmpdec.mpd_qinvert, w_x) # Binary arithmetic functions def binary_method(self, space, mpd_func, w_x, w_y): @@ -301,6 +303,12 @@ return self.binary_method(space, rmpdec.mpd_qrem, w_x, w_y) def remainder_near_w(self, space, w_x, w_y): return self.binary_method(space, rmpdec.mpd_qrem_near, w_x, w_y) + def logical_and_w(self, space, w_x, w_y): + return self.binary_method(space, rmpdec.mpd_qand, w_x, w_y) + def logical_or_w(self, space, w_x, w_y): + return self.binary_method(space, rmpdec.mpd_qor, w_x, w_y) + def logical_xor_w(self, space, w_x, w_y): + return self.binary_method(space, rmpdec.mpd_qxor, w_x, w_y) # Ternary operations def power_w(self, space, w_a, w_b, w_modulo=None): @@ -369,6 +377,7 @@ to_integral_exact=interp2app(W_Context.to_integral_exact_w), to_integral_value=interp2app(W_Context.to_integral_value_w), sqrt=interp2app(W_Context.sqrt_w), + logical_invert=interp2app(W_Context.logical_invert_w), # Binary Operations add=interp2app(W_Context.add_w), subtract=interp2app(W_Context.subtract_w), @@ -386,6 +395,9 @@ quantize=interp2app(W_Context.quantize_w), remainder=interp2app(W_Context.remainder_w), remainder_near=interp2app(W_Context.remainder_near_w), + logical_and=interp2app(W_Context.logical_and_w), + logical_or=interp2app(W_Context.logical_or_w), + logical_xor=interp2app(W_Context.logical_xor_w), # Ternary operations power=interp2app(W_Context.power_w), fma=interp2app(W_Context.fma_w), diff --git a/pypy/module/_decimal/test/test_context.py b/pypy/module/_decimal/test/test_context.py --- a/pypy/module/_decimal/test/test_context.py +++ b/pypy/module/_decimal/test/test_context.py @@ -14,6 +14,16 @@ return space.wrap(f) cls.w_random_float = space.wrap(gateway.interp2app(random_float)) + # a few functions from unittest library + def assertTrue(space, w_x): + assert space.is_true(w_x) + cls.w_assertTrue = space.wrap(gateway.interp2app(assertTrue)) + def assertEqual(space, w_x, w_y): + assert space.eq_w(w_x, w_y) + cls.w_assertEqual = space.wrap(gateway.interp2app(assertEqual)) + + cls.w_assertRaises = space.appexec([], """(): return raises""") + def test_context_repr(self): c = self.decimal.DefaultContext.copy() @@ -36,30 +46,30 @@ t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \ "Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \ "flags=[], traps=[])" - assert s == t + self.assertEqual(s, t) def test_explicit_context_create_from_float(self): Decimal = self.decimal.Decimal nc = self.decimal.Context() r = nc.create_decimal(0.1) - assert type(r) is Decimal - assert str(r) == '0.1000000000000000055511151231' - assert nc.create_decimal(float('nan')).is_qnan() - assert nc.create_decimal(float('inf')).is_infinite() - assert nc.create_decimal(float('-inf')).is_infinite() - assert (str(nc.create_decimal(float('nan'))) == - str(nc.create_decimal('NaN'))) - assert (str(nc.create_decimal(float('inf'))) == - str(nc.create_decimal('Infinity'))) - assert (str(nc.create_decimal(float('-inf'))) == - str(nc.create_decimal('-Infinity'))) - assert (str(nc.create_decimal(float('-0.0'))) == - str(nc.create_decimal('-0'))) + self.assertEqual(type(r), Decimal) + self.assertEqual(str(r), '0.1000000000000000055511151231') + self.assertTrue(nc.create_decimal(float('nan')).is_qnan()) + self.assertTrue(nc.create_decimal(float('inf')).is_infinite()) + self.assertTrue(nc.create_decimal(float('-inf')).is_infinite()) + self.assertEqual(str(nc.create_decimal(float('nan'))), + str(nc.create_decimal('NaN'))) + self.assertEqual(str(nc.create_decimal(float('inf'))), + str(nc.create_decimal('Infinity'))) + self.assertEqual(str(nc.create_decimal(float('-inf'))), + str(nc.create_decimal('-Infinity'))) + self.assertEqual(str(nc.create_decimal(float('-0.0'))), + str(nc.create_decimal('-0'))) nc.prec = 100 for i in range(200): x = self.random_float() - assert x == float(nc.create_decimal(x)) # roundtrip + self.assertEqual(x, float(nc.create_decimal(x))) # roundtrip def test_add(self): Decimal = self.decimal.Decimal @@ -67,11 +77,11 @@ c = Context() d = c.add(Decimal(1), Decimal(1)) - assert c.add(1, 1) == d - assert c.add(Decimal(1), 1) == d - assert c.add(1, Decimal(1)) == d - raises(TypeError, c.add, '1', 1) - raises(TypeError, c.add, 1, '1') + self.assertEqual(c.add(1, 1), d) + self.assertEqual(c.add(Decimal(1), 1), d) + self.assertEqual(c.add(1, Decimal(1)), d) + self.assertRaises(TypeError, c.add, '1', 1) + self.assertRaises(TypeError, c.add, 1, '1') def test_subtract(self): Decimal = self.decimal.Decimal @@ -79,11 +89,11 @@ c = Context() d = c.subtract(Decimal(1), Decimal(2)) - assert c.subtract(1, 2) == d - assert c.subtract(Decimal(1), 2) == d - assert c.subtract(1, Decimal(2)) == d - raises(TypeError, c.subtract, '1', 2) - raises(TypeError, c.subtract, 1, '2') + self.assertEqual(c.subtract(1, 2), d) + self.assertEqual(c.subtract(Decimal(1), 2), d) + self.assertEqual(c.subtract(1, Decimal(2)), d) + self.assertRaises(TypeError, c.subtract, '1', 2) + self.assertRaises(TypeError, c.subtract, 1, '2') def test_multiply(self): Decimal = self.decimal.Decimal @@ -91,11 +101,11 @@ c = Context() d = c.multiply(Decimal(1), Decimal(2)) - assert c.multiply(1, 2)== d - assert c.multiply(Decimal(1), 2)== d - assert c.multiply(1, Decimal(2))== d - raises(TypeError, c.multiply, '1', 2) - raises(TypeError, c.multiply, 1, '2') + self.assertEqual(c.multiply(1, 2), d) + self.assertEqual(c.multiply(Decimal(1), 2), d) + self.assertEqual(c.multiply(1, Decimal(2)), d) + self.assertRaises(TypeError, c.multiply, '1', 2) + self.assertRaises(TypeError, c.multiply, 1, '2') def test_divide(self): Decimal = self.decimal.Decimal @@ -103,8 +113,54 @@ c = Context() d = c.divide(Decimal(1), Decimal(2)) - assert c.divide(1, 2)== d - assert c.divide(Decimal(1), 2)== d - assert c.divide(1, Decimal(2))== d - raises(TypeError, c.divide, '1', 2) - raises(TypeError, c.divide, 1, '2') + self.assertEqual(c.divide(1, 2), d) + self.assertEqual(c.divide(Decimal(1), 2), d) + self.assertEqual(c.divide(1, Decimal(2)), d) + self.assertRaises(TypeError, c.divide, '1', 2) + self.assertRaises(TypeError, c.divide, 1, '2') + + def test_logical_and(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + + c = Context() + d = c.logical_and(Decimal(1), Decimal(1)) + self.assertEqual(c.logical_and(1, 1), d) + self.assertEqual(c.logical_and(Decimal(1), 1), d) + self.assertEqual(c.logical_and(1, Decimal(1)), d) + self.assertRaises(TypeError, c.logical_and, '1', 1) + self.assertRaises(TypeError, c.logical_and, 1, '1') + + def test_logical_invert(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + + c = Context() + d = c.logical_invert(Decimal(1000)) + self.assertEqual(c.logical_invert(1000), d) + self.assertRaises(TypeError, c.logical_invert, '1000') + + def test_logical_or(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + + c = Context() + d = c.logical_or(Decimal(1), Decimal(1)) + self.assertEqual(c.logical_or(1, 1), d) + self.assertEqual(c.logical_or(Decimal(1), 1), d) + self.assertEqual(c.logical_or(1, Decimal(1)), d) + self.assertRaises(TypeError, c.logical_or, '1', 1) + self.assertRaises(TypeError, c.logical_or, 1, '1') + + def test_logical_xor(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + + c = Context() + d = c.logical_xor(Decimal(1), Decimal(1)) + self.assertEqual(c.logical_xor(1, 1), d) + self.assertEqual(c.logical_xor(Decimal(1), 1), d) + self.assertEqual(c.logical_xor(1, Decimal(1)), d) + self.assertRaises(TypeError, c.logical_xor, '1', 1) + self.assertRaises(TypeError, c.logical_xor, 1, '1') + diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -56,7 +56,8 @@ "mpd_qadd", "mpd_qsub", "mpd_qmul", "mpd_qdiv", "mpd_qdivint", "mpd_qrem", "mpd_qrem_near", "mpd_qdivmod", "mpd_qpow", "mpd_qpowmod", "mpd_qfma", - "mpd_qexp", "mpd_qln", "mpd_qlog10", "mpd_qsqrt", + "mpd_qexp", "mpd_qln", "mpd_qlog10", "mpd_qsqrt", "mpd_qinvert", + "mpd_qand", "mpd_qor", "mpd_qxor", "mpd_qcopy_sign", "mpd_qround_to_int", "mpd_qround_to_intx", ], @@ -313,6 +314,15 @@ mpd_qrem_near = external( 'mpd_qrem_near', [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qand = external( + 'mpd_qand', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qor = external( + 'mpd_qor', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qxor = external( + 'mpd_qxor', + [MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) mpd_qdivmod = external( 'mpd_qdivmod', [MPD_PTR, MPD_PTR, MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], @@ -341,6 +351,9 @@ mpd_qsqrt = external( 'mpd_qsqrt', [MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) +mpd_qinvert = external( + 'mpd_qinvert', + [MPD_PTR, MPD_PTR, MPD_CONTEXT_PTR, rffi.UINTP], lltype.Void) mpd_qcopy_sign = external( 'mpd_qcopy_sign', From noreply at buildbot.pypy.org Sun Aug 24 22:27:19 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 24 Aug 2014 22:27:19 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: The "zero" parameter of lltype.malloc() is checked to be constant. Message-ID: <20140824202719.C26951C3340@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73037:bf6ff9de8a11 Date: 2014-05-25 18:40 +0200 http://bitbucket.org/pypy/pypy/changeset/bf6ff9de8a11/ Log: The "zero" parameter of lltype.malloc() is checked to be constant. This constraint has to be propagated up to scoped_alloc(), to generate specialized versions. diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -2002,9 +2002,9 @@ if track_allocation: leakfinder.remember_free(p._obj0) -def _make_scoped_allocator(T): +def _make_scoped_allocator(T, zero): class ScopedAlloc: - def __init__(self, n=None, zero=False): + def __init__(self, n=None): if n is None: self.buf = malloc(T, flavor='raw', zero=zero) else: @@ -2028,8 +2028,8 @@ ...use array... ...it's freed now. """ - return _make_scoped_allocator(T)(n=n, zero=zero) -scoped_alloc._annspecialcase_ = 'specialize:arg(0)' + return _make_scoped_allocator(T, zero)(n=n) +scoped_alloc._annspecialcase_ = 'specialize:arg(0, 2)' def functionptr(TYPE, name, **attrs): if not isinstance(TYPE, FuncType): From noreply at buildbot.pypy.org Sun Aug 24 22:27:20 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 24 Aug 2014 22:27:20 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add Context.is_* methods Message-ID: <20140824202720.EF91B1C3340@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73038:1df130158ca5 Date: 2014-05-25 19:07 +0200 http://bitbucket.org/pypy/pypy/changeset/1df130158ca5/ Log: Add Context.is_* methods diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -344,6 +344,24 @@ W_Context.__init__(w_result, space) return w_result +def make_bool_function(mpd_func): + @unwrap_spec(w_context=W_Context) + def func_w(space, w_context, w_x): + from pypy.module._decimal import interp_decimal + w_x = interp_decimal.convert_op_raise(space, w_context, w_x) + res = getattr(rmpdec, mpd_func)(w_x.mpd, w_context.ctx) + return space.wrap(bool(res)) + return interp2app(func_w) + +def make_bool_function_noctx(mpd_func): + @unwrap_spec(w_context=W_Context) + def func_w(space, w_context, w_x): + from pypy.module._decimal import interp_decimal + w_x = interp_decimal.convert_op_raise(space, w_context, w_x) + res = getattr(rmpdec, mpd_func)(w_x.mpd) + return space.wrap(bool(res)) + return interp2app(func_w) + W_Context.typedef = TypeDef( 'Context', __new__ = interp2app(descr_new_context), @@ -401,6 +419,16 @@ # Ternary operations power=interp2app(W_Context.power_w), fma=interp2app(W_Context.fma_w), + # Boolean operations + is_signed=make_bool_function_noctx('mpd_issigned'), + is_zero=make_bool_function_noctx('mpd_iszero'), + is_normal=make_bool_function('mpd_isnormal'), + is_subnormal=make_bool_function('mpd_issubnormal'), + is_finite=make_bool_function_noctx('mpd_isfinite'), + is_infinite=make_bool_function_noctx('mpd_isinfinite'), + is_nan=make_bool_function_noctx('mpd_isnan'), + is_qnan=make_bool_function_noctx('mpd_isqnan'), + is_snan=make_bool_function_noctx('mpd_issnan'), ) diff --git a/pypy/module/_decimal/test/test_context.py b/pypy/module/_decimal/test/test_context.py --- a/pypy/module/_decimal/test/test_context.py +++ b/pypy/module/_decimal/test/test_context.py @@ -164,3 +164,84 @@ self.assertRaises(TypeError, c.logical_xor, '1', 1) self.assertRaises(TypeError, c.logical_xor, 1, '1') + def test_is_finite(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + + c = Context() + d = c.is_finite(Decimal(10)) + self.assertEqual(c.is_finite(10), d) + self.assertRaises(TypeError, c.is_finite, '10') + + def test_is_infinite(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + + c = Context() + d = c.is_infinite(Decimal(10)) + self.assertEqual(c.is_infinite(10), d) + self.assertRaises(TypeError, c.is_infinite, '10') + + def test_is_nan(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + + c = Context() + d = c.is_nan(Decimal(10)) + self.assertEqual(c.is_nan(10), d) + self.assertRaises(TypeError, c.is_nan, '10') + + def test_is_normal(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + + c = Context() + d = c.is_normal(Decimal(10)) + self.assertEqual(c.is_normal(10), d) + self.assertRaises(TypeError, c.is_normal, '10') + + def test_is_qnan(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + + c = Context() + d = c.is_qnan(Decimal(10)) + self.assertEqual(c.is_qnan(10), d) + self.assertRaises(TypeError, c.is_qnan, '10') + + def test_is_signed(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + + c = Context() + d = c.is_signed(Decimal(10)) + self.assertEqual(c.is_signed(10), d) + self.assertRaises(TypeError, c.is_signed, '10') + + def test_is_snan(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + + c = Context() + d = c.is_snan(Decimal(10)) + self.assertEqual(c.is_snan(10), d) + self.assertRaises(TypeError, c.is_snan, '10') + + def test_is_subnormal(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + + c = Context() + d = c.is_subnormal(Decimal(10)) + self.assertEqual(c.is_subnormal(10), d) + self.assertRaises(TypeError, c.is_subnormal, '10') + + def test_is_zero(self): + Decimal = self.decimal.Decimal + Context = self.decimal.Context + + c = Context() + d = c.is_zero(Decimal(10)) + self.assertEqual(c.is_zero(10), d) + self.assertRaises(TypeError, c.is_zero, '10') + diff --git a/rpython/rlib/rmpdec.py b/rpython/rlib/rmpdec.py --- a/rpython/rlib/rmpdec.py +++ b/rpython/rlib/rmpdec.py @@ -46,7 +46,9 @@ "mpd_maxcontext", "mpd_qnew", "mpd_del", "mpd_to_sci", "mpd_to_sci_size", - "mpd_iszero", "mpd_isnegative", "mpd_isinfinite", "mpd_isspecial", + "mpd_iszero", "mpd_isnegative", "mpd_issigned", + "mpd_isfinite", "mpd_isinfinite", + "mpd_isnormal", "mpd_issubnormal", "mpd_isspecial", "mpd_isnan", "mpd_issnan", "mpd_isqnan", "mpd_qcmp", "mpd_qcompare", "mpd_qcompare_signal", "mpd_qmin", "mpd_qmax", "mpd_qmin_mag", "mpd_qmax_mag", @@ -233,8 +235,16 @@ 'mpd_iszero', [MPD_PTR], rffi.INT) mpd_isnegative = external( 'mpd_isnegative', [MPD_PTR], rffi.INT) +mpd_issigned = external( + 'mpd_issigned', [MPD_PTR], rffi.INT) +mpd_isfinite = external( + 'mpd_isfinite', [MPD_PTR], rffi.INT) mpd_isinfinite = external( 'mpd_isinfinite', [MPD_PTR], rffi.INT) +mpd_isnormal = external( + 'mpd_isnormal', [MPD_PTR, MPD_CONTEXT_PTR], rffi.INT) +mpd_issubnormal = external( + 'mpd_issubnormal', [MPD_PTR, MPD_CONTEXT_PTR], rffi.INT) mpd_isspecial = external( 'mpd_isspecial', [MPD_PTR], rffi.INT) mpd_isnan = external( From noreply at buildbot.pypy.org Sun Aug 24 22:27:22 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 24 Aug 2014 22:27:22 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Simplify, generate more code. Message-ID: <20140824202722.281F31C3340@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73039:81ca8a34aaca Date: 2014-05-25 19:38 +0200 http://bitbucket.org/pypy/pypy/changeset/81ca8a34aaca/ Log: Simplify, generate more code. diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -223,92 +223,9 @@ self.capitals, rffi.cast(lltype.Signed, self.ctx.c_clamp), flags, traps)) - # Unary arithmetic functions - def unary_method(self, space, mpd_func, w_x): - from pypy.module._decimal import interp_decimal - w_a = interp_decimal.convert_op_raise(space, self, w_x) - w_result = interp_decimal.W_Decimal.allocate(space) - with self.catch_status(space) as (ctx, status_ptr): - mpd_func(w_result.mpd, w_a.mpd, ctx, status_ptr) - return w_result - - def abs_w(self, space, w_x): - return self.unary_method(space, rmpdec.mpd_qabs, w_x) - def exp_w(self, space, w_x): - return self.unary_method(space, rmpdec.mpd_qexp, w_x) - def ln_w(self, space, w_x): - return self.unary_method(space, rmpdec.mpd_qln, w_x) - def log10_w(self, space, w_x): - return self.unary_method(space, rmpdec.mpd_qlog10, w_x) - def minus_w(self, space, w_x): - return self.unary_method(space, rmpdec.mpd_qminus, w_x) - def next_minus_w(self, space, w_x): - return self.unary_method(space, rmpdec.mpd_qnext_minus, w_x) - def next_plus_w(self, space, w_x): - return self.unary_method(space, rmpdec.mpd_qnext_plus, w_x) - def normalize_w(self, space, w_x): - return self.unary_method(space, rmpdec.mpd_qreduce, w_x) - def plus_w(self, space, w_x): - return self.unary_method(space, rmpdec.mpd_qplus, w_x) - def to_integral_w(self, space, w_x): - return self.unary_method(space, rmpdec.mpd_qround_to_int, w_x) - def to_integral_exact_w(self, space, w_x): - return self.unary_method(space, rmpdec.mpd_qround_to_intx, w_x) - def to_integral_value_w(self, space, w_x): - return self.unary_method(space, rmpdec.mpd_qround_to_int, w_x) - def sqrt_w(self, space, w_x): - return self.unary_method(space, rmpdec.mpd_qsqrt, w_x) - def logical_invert_w(self, space, w_x): - return self.unary_method(space, rmpdec.mpd_qinvert, w_x) - - # Binary arithmetic functions - def binary_method(self, space, mpd_func, w_x, w_y): - from pypy.module._decimal import interp_decimal - w_a, w_b = interp_decimal.convert_binop_raise(space, self, w_x, w_y) - w_result = interp_decimal.W_Decimal.allocate(space) - with self.catch_status(space) as (ctx, status_ptr): - mpd_func(w_result.mpd, w_a.mpd, w_b.mpd, ctx, status_ptr) - return w_result - - def add_w(self, space, w_x, w_y): - return self.binary_method(space, rmpdec.mpd_qadd, w_x, w_y) - def subtract_w(self, space, w_x, w_y): - return self.binary_method(space, rmpdec.mpd_qsub, w_x, w_y) - def multiply_w(self, space, w_x, w_y): - return self.binary_method(space, rmpdec.mpd_qmul, w_x, w_y) - def divide_w(self, space, w_x, w_y): - return self.binary_method(space, rmpdec.mpd_qdiv, w_x, w_y) - def compare_w(self, space, w_x, w_y): - return self.binary_method(space, rmpdec.mpd_qcompare, w_x, w_y) - def compare_signal_w(self, space, w_x, w_y): - return self.binary_method(space, rmpdec.mpd_qcompare_signal, w_x, w_y) - def divide_int_w(self, space, w_x, w_y): - return self.binary_method(space, rmpdec.mpd_qdivint, w_x, w_y) def divmod_w(self, space, w_x, w_y): from pypy.module._decimal import interp_decimal return interp_decimal.W_Decimal.divmod_impl(space, self, w_x, w_y) - def max_w(self, space, w_x, w_y): - return self.binary_method(space, rmpdec.mpd_qmax, w_x, w_y) - def max_mag_w(self, space, w_x, w_y): - return self.binary_method(space, rmpdec.mpd_qmax_mag, w_x, w_y) - def min_w(self, space, w_x, w_y): - return self.binary_method(space, rmpdec.mpd_qmin, w_x, w_y) - def min_mag_w(self, space, w_x, w_y): - return self.binary_method(space, rmpdec.mpd_qmin_mag, w_x, w_y) - def next_toward_w(self, space, w_x, w_y): - return self.binary_method(space, rmpdec.mpd_qnext_toward, w_x, w_y) - def quantize_w(self, space, w_x, w_y): - return self.binary_method(space, rmpdec.mpd_qquantize, w_x, w_y) - def remainder_w(self, space, w_x, w_y): - return self.binary_method(space, rmpdec.mpd_qrem, w_x, w_y) - def remainder_near_w(self, space, w_x, w_y): - return self.binary_method(space, rmpdec.mpd_qrem_near, w_x, w_y) - def logical_and_w(self, space, w_x, w_y): - return self.binary_method(space, rmpdec.mpd_qand, w_x, w_y) - def logical_or_w(self, space, w_x, w_y): - return self.binary_method(space, rmpdec.mpd_qor, w_x, w_y) - def logical_xor_w(self, space, w_x, w_y): - return self.binary_method(space, rmpdec.mpd_qxor, w_x, w_y) # Ternary operations def power_w(self, space, w_a, w_b, w_modulo=None): @@ -344,21 +261,48 @@ W_Context.__init__(w_result, space) return w_result -def make_bool_function(mpd_func): +def make_unary_method(mpd_func_name): + mpd_func = getattr(rmpdec, mpd_func_name) + @unwrap_spec(w_context=W_Context) + def func_w(space, w_context, w_x): + from pypy.module._decimal import interp_decimal + w_a = interp_decimal.convert_op_raise(space, w_context, w_x) + w_result = interp_decimal.W_Decimal.allocate(space) + with w_context.catch_status(space) as (ctx, status_ptr): + mpd_func(w_result.mpd, w_a.mpd, ctx, status_ptr) + return w_result + return interp2app(func_w) + +def make_binary_method(mpd_func_name): + mpd_func = getattr(rmpdec, mpd_func_name) + @unwrap_spec(w_context=W_Context) + def func_w(space, w_context, w_x, w_y): + from pypy.module._decimal import interp_decimal + w_a, w_b = interp_decimal.convert_binop_raise( + space, w_context, w_x, w_y) + w_result = interp_decimal.W_Decimal.allocate(space) + with w_context.catch_status(space) as (ctx, status_ptr): + mpd_func(w_result.mpd, w_a.mpd, w_b.mpd, ctx, status_ptr) + return w_result + return interp2app(func_w) + +def make_bool_method(mpd_func_name): + mpd_func = getattr(rmpdec, mpd_func_name) @unwrap_spec(w_context=W_Context) def func_w(space, w_context, w_x): from pypy.module._decimal import interp_decimal w_x = interp_decimal.convert_op_raise(space, w_context, w_x) - res = getattr(rmpdec, mpd_func)(w_x.mpd, w_context.ctx) + res = mpd_func(w_x.mpd, w_context.ctx) return space.wrap(bool(res)) return interp2app(func_w) -def make_bool_function_noctx(mpd_func): +def make_bool_method_noctx(mpd_func_name): + mpd_func = getattr(rmpdec, mpd_func_name) @unwrap_spec(w_context=W_Context) def func_w(space, w_context, w_x): from pypy.module._decimal import interp_decimal w_x = interp_decimal.convert_op_raise(space, w_context, w_x) - res = getattr(rmpdec, mpd_func)(w_x.mpd) + res = mpd_func(w_x.mpd) return space.wrap(bool(res)) return interp2app(func_w) @@ -382,53 +326,53 @@ clear_traps=interp2app(W_Context.clear_traps_w), create_decimal=interp2app(W_Context.create_decimal_w), # Unary Operations - abs=interp2app(W_Context.abs_w), - exp=interp2app(W_Context.exp_w), - ln=interp2app(W_Context.ln_w), - log10=interp2app(W_Context.log10_w), - minus=interp2app(W_Context.minus_w), - next_minus=interp2app(W_Context.next_minus_w), - next_plus=interp2app(W_Context.next_plus_w), - normalize=interp2app(W_Context.normalize_w), - plus=interp2app(W_Context.plus_w), - to_integral=interp2app(W_Context.to_integral_w), - to_integral_exact=interp2app(W_Context.to_integral_exact_w), - to_integral_value=interp2app(W_Context.to_integral_value_w), - sqrt=interp2app(W_Context.sqrt_w), - logical_invert=interp2app(W_Context.logical_invert_w), + abs=make_unary_method('mpd_qabs'), + exp=make_unary_method('mpd_qexp'), + ln=make_unary_method('mpd_qln'), + log10=make_unary_method('mpd_qlog10'), + minus=make_unary_method('mpd_qminus'), + next_minus=make_unary_method('mpd_qnext_minus'), + next_plus=make_unary_method('mpd_qnext_plus'), + normalize=make_unary_method('mpd_qreduce'), + plus=make_unary_method('mpd_qplus'), + to_integral=make_unary_method('mpd_qround_to_int'), + to_integral_exact=make_unary_method('mpd_qround_to_intx'), + to_integral_value=make_unary_method('mpd_qround_to_int'), + sqrt=make_unary_method('mpd_qsqrt'), + logical_invert=make_unary_method('mpd_qinvert'), # Binary Operations - add=interp2app(W_Context.add_w), - subtract=interp2app(W_Context.subtract_w), - multiply=interp2app(W_Context.multiply_w), - divide=interp2app(W_Context.divide_w), - compare=interp2app(W_Context.compare_w), - compare_signal=interp2app(W_Context.compare_signal_w), - divide_int=interp2app(W_Context.divide_int_w), + add=make_binary_method('mpd_qadd'), + subtract=make_binary_method('mpd_qsub'), + multiply=make_binary_method('mpd_qmul'), + divide=make_binary_method('mpd_qdiv'), + compare=make_binary_method('mpd_qcompare'), + compare_signal=make_binary_method('mpd_qcompare_signal'), + divide_int=make_binary_method('mpd_qdivint'), divmod=interp2app(W_Context.divmod_w), - max=interp2app(W_Context.max_w), - max_mag=interp2app(W_Context.max_mag_w), - min=interp2app(W_Context.min_w), - min_mag=interp2app(W_Context.min_mag_w), - next_toward=interp2app(W_Context.next_toward_w), - quantize=interp2app(W_Context.quantize_w), - remainder=interp2app(W_Context.remainder_w), - remainder_near=interp2app(W_Context.remainder_near_w), - logical_and=interp2app(W_Context.logical_and_w), - logical_or=interp2app(W_Context.logical_or_w), - logical_xor=interp2app(W_Context.logical_xor_w), + max=make_binary_method('mpd_qmax'), + max_mag=make_binary_method('mpd_qmax_mag'), + min=make_binary_method('mpd_qmin'), + min_mag=make_binary_method('mpd_qmin_mag'), + next_toward=make_binary_method('mpd_qnext_toward'), + quantize=make_binary_method('mpd_qquantize'), + remainder=make_binary_method('mpd_qrem'), + remainder_near=make_binary_method('mpd_qrem_near'), + logical_and=make_binary_method('mpd_qand'), + logical_or=make_binary_method('mpd_qor'), + logical_xor=make_binary_method('mpd_qxor'), # Ternary operations power=interp2app(W_Context.power_w), fma=interp2app(W_Context.fma_w), # Boolean operations - is_signed=make_bool_function_noctx('mpd_issigned'), - is_zero=make_bool_function_noctx('mpd_iszero'), - is_normal=make_bool_function('mpd_isnormal'), - is_subnormal=make_bool_function('mpd_issubnormal'), - is_finite=make_bool_function_noctx('mpd_isfinite'), - is_infinite=make_bool_function_noctx('mpd_isinfinite'), - is_nan=make_bool_function_noctx('mpd_isnan'), - is_qnan=make_bool_function_noctx('mpd_isqnan'), - is_snan=make_bool_function_noctx('mpd_issnan'), + is_signed=make_bool_method_noctx('mpd_issigned'), + is_zero=make_bool_method_noctx('mpd_iszero'), + is_normal=make_bool_method('mpd_isnormal'), + is_subnormal=make_bool_method('mpd_issubnormal'), + is_finite=make_bool_method_noctx('mpd_isfinite'), + is_infinite=make_bool_method_noctx('mpd_isinfinite'), + is_nan=make_bool_method_noctx('mpd_isnan'), + is_qnan=make_bool_method_noctx('mpd_isqnan'), + is_snan=make_bool_method_noctx('mpd_issnan'), ) From noreply at buildbot.pypy.org Sun Aug 24 22:27:23 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 24 Aug 2014 22:27:23 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add Context.__init__, and create_from_float Message-ID: <20140824202723.6E4901C3340@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73040:189b465ed390 Date: 2014-05-25 21:16 +0200 http://bitbucket.org/pypy/pypy/changeset/189b465ed390/ Log: Add Context.__init__, and create_from_float diff --git a/pypy/module/_decimal/interp_context.py b/pypy/module/_decimal/interp_context.py --- a/pypy/module/_decimal/interp_context.py +++ b/pypy/module/_decimal/interp_context.py @@ -112,6 +112,28 @@ if self.ctx: lltype.free(self.ctx, flavor='raw', track_allocation=False) + def descr_init(self, space, + w_prec=None, w_rounding=None, w_Emin=None, w_Emax=None, + w_capitals=None, w_clamp=None, w_flags=None, w_traps=None): + if not space.is_none(w_prec): + self.set_prec(space, w_prec) + if not space.is_none(w_rounding): + self.set_rounding(space, w_rounding) + if not space.is_none(w_Emin): + self.set_emin(space, w_Emin) + if not space.is_none(w_Emax): + self.set_emax(space, w_Emax) + if not space.is_none(w_capitals): + self.set_capitals(space, w_capitals) + if not space.is_none(w_clamp): + self.set_clamp(space, w_clamp) + if not space.is_none(w_flags): + flags = interp_signals.list_as_flags(space, w_flags) + rffi.setintfield(self.ctx, 'c_status', flags) + if not space.is_none(w_traps): + flags = interp_signals.list_as_flags(space, w_traps) + rffi.setintfield(self.ctx, 'c_traps', flags) + def addstatus(self, space, status): "Add resulting status to context, and eventually raise an exception." new_status = (rffi.cast(lltype.Signed, status) | @@ -204,6 +226,11 @@ return interp_decimal.decimal_from_object( space, None, w_value, self, exact=False) + def create_decimal_from_float_w(self, space, w_value=None): + from pypy.module._decimal import interp_decimal + return interp_decimal.decimal_from_float( + space, None, w_value, self, exact=False) + def descr_repr(self, space): # Rounding string. rounding = rffi.cast(lltype.Signed, self.ctx.c_round) @@ -309,6 +336,7 @@ W_Context.typedef = TypeDef( 'Context', __new__ = interp2app(descr_new_context), + __init__ = interp2app(W_Context.descr_init), # Attributes flags=interp_attrproperty_w('w_flags', W_Context), traps=interp_attrproperty_w('w_traps', W_Context), @@ -325,6 +353,7 @@ clear_flags=interp2app(W_Context.clear_flags_w), clear_traps=interp2app(W_Context.clear_traps_w), create_decimal=interp2app(W_Context.create_decimal_w), + create_decimal_from_float=interp2app(W_Context.create_decimal_from_float_w), # Unary Operations abs=make_unary_method('mpd_qabs'), exp=make_unary_method('mpd_qexp'), diff --git a/pypy/module/_decimal/interp_signals.py b/pypy/module/_decimal/interp_signals.py --- a/pypy/module/_decimal/interp_signals.py +++ b/pypy/module/_decimal/interp_signals.py @@ -47,6 +47,12 @@ "invalid error flag") return OperationError(w_exc, space.w_None) +def list_as_flags(space, w_list): + flags = 0 + for w_item in space.unpackiterable(w_list): + flags |= exception_as_flag(space, w_item) + return flags + def exception_as_flag(space, w_exc): for name, flag in SIGNAL_MAP: if space.is_w(w_exc, getattr(get(space), 'w_' + name)): diff --git a/pypy/module/_decimal/test/test_context.py b/pypy/module/_decimal/test/test_context.py --- a/pypy/module/_decimal/test/test_context.py +++ b/pypy/module/_decimal/test/test_context.py @@ -71,6 +71,35 @@ x = self.random_float() self.assertEqual(x, float(nc.create_decimal(x))) # roundtrip + def test_create_decimal_from_float(self): + import math + Decimal = self.decimal.Decimal + Context = self.decimal.Context + Inexact = self.decimal.Inexact + + context = Context(prec=5, rounding=self.decimal.ROUND_DOWN) + self.assertEqual( + context.create_decimal_from_float(math.pi), + Decimal('3.1415') + ) + context = Context(prec=5, rounding=self.decimal.ROUND_UP) + self.assertEqual( + context.create_decimal_from_float(math.pi), + Decimal('3.1416') + ) + context = Context(prec=5, traps=[Inexact]) + self.assertRaises( + Inexact, + context.create_decimal_from_float, + math.pi + ) + self.assertEqual(repr(context.create_decimal_from_float(-0.0)), + "Decimal('-0')") + self.assertEqual(repr(context.create_decimal_from_float(1.0)), + "Decimal('1')") + self.assertEqual(repr(context.create_decimal_from_float(10)), + "Decimal('10')") + def test_add(self): Decimal = self.decimal.Decimal Context = self.decimal.Context From noreply at buildbot.pypy.org Sun Aug 24 22:27:24 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 24 Aug 2014 22:27:24 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add missing exceptions Message-ID: <20140824202724.9A6001C3340@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73041:54777ea9fbaf Date: 2014-05-25 21:31 +0200 http://bitbucket.org/pypy/pypy/changeset/54777ea9fbaf/ Log: Add missing exceptions diff --git a/pypy/module/_decimal/__init__.py b/pypy/module/_decimal/__init__.py --- a/pypy/module/_decimal/__init__.py +++ b/pypy/module/_decimal/__init__.py @@ -29,4 +29,8 @@ interpleveldefs[name] = 'space.wrap(%r)' % name for name, flag in interp_signals.SIGNAL_MAP: interpleveldefs[name] = 'interp_signals.get(space).w_%s' % name + for name, flag in interp_signals.COND_MAP: + if name == 'InvalidOperation': + pass + interpleveldefs[name] = 'interp_signals.get(space).w_%s' % name diff --git a/pypy/module/_decimal/interp_signals.py b/pypy/module/_decimal/interp_signals.py --- a/pypy/module/_decimal/interp_signals.py +++ b/pypy/module/_decimal/interp_signals.py @@ -127,5 +127,22 @@ getattr(self, 'w_' + name) for name, flag in SIGNAL_MAP]) + # Add remaining exceptions, inherit from InvalidOperation + for name, flag in COND_MAP: + if name == 'InvalidOperation': + # Unfortunately, InvalidOperation is a signal that + # comprises several conditions, including + # InvalidOperation! Naming the signal + # IEEEInvalidOperation would prevent the confusion. + continue + if name == 'DivisionUndefined': + w_bases = space.newtuple([self.w_InvalidOperation, + space.w_ZeroDivisionError]) + else: + w_bases = space.newtuple([self.w_InvalidOperation]) + setattr(self, 'w_' + name, + space.call_function( + space.w_type, space.wrap(name), w_bases, space.newdict())) + def get(space): return space.fromcache(SignalState) diff --git a/pypy/module/_decimal/test/test_module.py b/pypy/module/_decimal/test/test_module.py --- a/pypy/module/_decimal/test/test_module.py +++ b/pypy/module/_decimal/test/test_module.py @@ -51,6 +51,45 @@ assert issubclass(ex, _decimal.DecimalException) assert issubclass(ex, ArithmeticError) + def test_exception_hierarchy(self): + import _decimal as decimal + DecimalException = decimal.DecimalException + InvalidOperation = decimal.InvalidOperation + FloatOperation = decimal.FloatOperation + DivisionByZero = decimal.DivisionByZero + Overflow = decimal.Overflow + Underflow = decimal.Underflow + Subnormal = decimal.Subnormal + Inexact = decimal.Inexact + Rounded = decimal.Rounded + Clamped = decimal.Clamped + + assert issubclass(DecimalException, ArithmeticError) + + assert issubclass(InvalidOperation, DecimalException) + assert issubclass(FloatOperation, DecimalException) + assert issubclass(FloatOperation, TypeError) + assert issubclass(DivisionByZero, DecimalException) + assert issubclass(DivisionByZero, ZeroDivisionError) + assert issubclass(Overflow, Rounded) + assert issubclass(Overflow, Inexact) + assert issubclass(Overflow, DecimalException) + assert issubclass(Underflow, Inexact) + assert issubclass(Underflow, Rounded) + assert issubclass(Underflow, Subnormal) + assert issubclass(Underflow, DecimalException) + + assert issubclass(Subnormal, DecimalException) + assert issubclass(Inexact, DecimalException) + assert issubclass(Rounded, DecimalException) + assert issubclass(Clamped, DecimalException) + + assert issubclass(decimal.ConversionSyntax, InvalidOperation) + assert issubclass(decimal.DivisionImpossible, InvalidOperation) + assert issubclass(decimal.DivisionUndefined, InvalidOperation) + assert issubclass(decimal.DivisionUndefined, ZeroDivisionError) + assert issubclass(decimal.InvalidContext, InvalidOperation) + def test_threads(self): import _decimal assert (_decimal.HAVE_THREADS is False or From noreply at buildbot.pypy.org Sun Aug 24 22:27:25 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 24 Aug 2014 22:27:25 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add Decimal.from_float() Message-ID: <20140824202725.C9DF91C3340@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73042:cf37d2846705 Date: 2014-05-25 21:50 +0200 http://bitbucket.org/pypy/pypy/changeset/cf37d2846705/ Log: Add Decimal.from_float() diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -571,6 +571,10 @@ return interp_context.getcontext(space) return space.interp_w(interp_context.W_Context, w_context) +def decimal_from_float_w(space, w_cls, w_float): + context = interp_context.getcontext(space) + return decimal_from_float(space, w_cls, w_float, context, exact=True) + # Constructors def decimal_from_ssize(space, w_subtype, value, context, exact=True): w_result = W_Decimal.allocate(space, w_subtype) @@ -722,6 +726,10 @@ return w_value.apply(space, context) def decimal_from_float(space, w_subtype, w_value, context, exact=True): + if space.isinstance_w(w_value, space.w_int): + value = space.bigint_w(w_value) + return decimal_from_bigint(space, w_subtype, value, context, + exact=exact) value = space.float_w(w_value) sign = 0 if rfloat.copysign(1.0, value) == 1.0 else 1 @@ -849,4 +857,5 @@ is_infinite = interp2app(W_Decimal.is_infinite_w), # as_tuple = interp2app(W_Decimal.as_tuple_w), + from_float = interp2app(decimal_from_float_w, as_classmethod=True), ) diff --git a/pypy/module/_decimal/test/test_decimal.py b/pypy/module/_decimal/test/test_decimal.py --- a/pypy/module/_decimal/test/test_decimal.py +++ b/pypy/module/_decimal/test/test_decimal.py @@ -14,6 +14,15 @@ return space.wrap(f) cls.w_random_float = space.wrap(gateway.interp2app(random_float)) + # a few functions from unittest library + cls.w_assertTrue = space.appexec([], """(): + def assertTrue(x): assert x + return assertTrue""") + cls.w_assertEqual = space.appexec([], """(): + def assertEqual(x, y): assert x == y + return assertEqual""") + cls.w_assertRaises = space.appexec([], """(): return raises""") + def test_explicit_empty(self): Decimal = self.Decimal assert Decimal() == Decimal("0") @@ -206,6 +215,34 @@ x = self.random_float() assert x == float(Decimal(x)) # roundtrip + def test_from_float(self): + Decimal = self.decimal.Decimal + + class MyDecimal(Decimal): + pass + + self.assertTrue(issubclass(MyDecimal, Decimal)) + + r = MyDecimal.from_float(0.1) + self.assertEqual(type(r), MyDecimal) + self.assertEqual(str(r), + '0.1000000000000000055511151231257827021181583404541015625') + bigint = 12345678901234567890123456789 + self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint)) + self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan()) + self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite()) + self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite()) + self.assertEqual(str(MyDecimal.from_float(float('nan'))), + str(Decimal('NaN'))) + self.assertEqual(str(MyDecimal.from_float(float('inf'))), + str(Decimal('Infinity'))) + self.assertEqual(str(MyDecimal.from_float(float('-inf'))), + str(Decimal('-Infinity'))) + self.assertRaises(TypeError, MyDecimal.from_float, 'abc') + for i in range(200): + x = self.random_float() + self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip + def test_explicit_context_create_decimal(self): Decimal = self.decimal.Decimal InvalidOperation = self.decimal.InvalidOperation From noreply at buildbot.pypy.org Sun Aug 24 22:27:27 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 24 Aug 2014 22:27:27 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Decimal.__trunc__ Message-ID: <20140824202727.028851C3340@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73043:81d69df48cb2 Date: 2014-05-25 21:52 +0200 http://bitbucket.org/pypy/pypy/changeset/81d69df48cb2/ Log: Decimal.__trunc__ diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -221,6 +221,10 @@ context = interp_context.getcontext(space) return self.to_long(space, context, rmpdec.MPD_ROUND_DOWN) + def descr_trunc(self, space): + context = interp_context.getcontext(space) + return self.to_long(space, context, rmpdec.MPD_ROUND_DOWN) + def descr_floor(self, space): context = interp_context.getcontext(space) return self.to_long(space, context, rmpdec.MPD_ROUND_FLOOR) @@ -815,6 +819,7 @@ __bool__ = interp2app(W_Decimal.descr_bool), __float__ = interp2app(W_Decimal.descr_float), __int__ = interp2app(W_Decimal.descr_int), + __trunc__ = interp2app(W_Decimal.descr_trunc), __floor__ = interp2app(W_Decimal.descr_floor), __ceil__ = interp2app(W_Decimal.descr_ceil), __round__ = interp2app(W_Decimal.descr_round), From noreply at buildbot.pypy.org Sun Aug 24 22:27:28 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 24 Aug 2014 22:27:28 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Less duplicated code in interp_decimal Message-ID: <20140824202728.2F4521C3340@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73044:d45a453c7ac0 Date: 2014-05-26 14:10 +0200 http://bitbucket.org/pypy/pypy/changeset/d45a453c7ac0/ Log: Less duplicated code in interp_decimal diff --git a/pypy/module/_decimal/interp_decimal.py b/pypy/module/_decimal/interp_decimal.py --- a/pypy/module/_decimal/interp_decimal.py +++ b/pypy/module/_decimal/interp_decimal.py @@ -302,33 +302,6 @@ return self.compare(space, w_other, 'ge') # Binary operations - - def descr_add(self, space, w_other): - return binary_number_method(space, rmpdec.mpd_qadd, self, w_other) - def descr_sub(self, space, w_other): - return binary_number_method(space, rmpdec.mpd_qsub, self, w_other) - def descr_mul(self, space, w_other): - return binary_number_method(space, rmpdec.mpd_qmul, self, w_other) - def descr_truediv(self, space, w_other): - return binary_number_method(space, rmpdec.mpd_qdiv, self, w_other) - def descr_floordiv(self, space, w_other): - return binary_number_method(space, rmpdec.mpd_qdivint, self, w_other) - def descr_mod(self, space, w_other): - return binary_number_method(space, rmpdec.mpd_qrem, self, w_other) - - def descr_radd(self, space, w_other): - return binary_number_method(space, rmpdec.mpd_qadd, w_other, self) - def descr_rsub(self, space, w_other): - return binary_number_method(space, rmpdec.mpd_qsub, w_other, self) - def descr_rmul(self, space, w_other): - return binary_number_method(space, rmpdec.mpd_qmul, w_other, self) - def descr_rtruediv(self, space, w_other): - return binary_number_method(space, rmpdec.mpd_qdiv, w_other, self) - def descr_rfloordiv(self, space, w_other): - return binary_number_method(space, rmpdec.mpd_qdivint, w_other, self) - def descr_rmod(self, space, w_other): - return binary_number_method(space, rmpdec.mpd_qrem, w_other, self) - @staticmethod def divmod_impl(space, context, w_x, w_y): w_err, w_a, w_b = convert_binop(space, context, w_x, w_y) @@ -377,21 +350,6 @@ def descr_rpow(self, space, w_other): return W_Decimal.pow_impl(space, w_other, self, None) - # Unary operations - def unary_number_method(self, space, mpd_func): - context = interp_context.getcontext(space) - w_result = W_Decimal.allocate(space) - with context.catch_status(space) as (ctx, status_ptr): - mpd_func(w_result.mpd, self.mpd, ctx, status_ptr) - return w_result - - def descr_neg(self, space): - return self.unary_number_method(space, rmpdec.mpd_qminus) - def descr_pos(self, space): - return self.unary_number_method(space, rmpdec.mpd_qplus) - def descr_abs(self, space): - return self.unary_number_method(space, rmpdec.mpd_qabs) - def copy_sign_w(self, space, w_other, w_context=None): context = convert_context(space, w_context) w_other = convert_op_raise(space, context, w_other) @@ -559,6 +517,17 @@ return None, w_v, w_w +def make_unary_number_method(mpd_func_name): + mpd_func = getattr(rmpdec, mpd_func_name) + def descr_method(space, w_self): + self = space.interp_w(W_Decimal, w_self) + context = interp_context.getcontext(space) + w_result = W_Decimal.allocate(space) + with context.catch_status(space) as (ctx, status_ptr): + mpd_func(w_result.mpd, self.mpd, ctx, status_ptr) + return w_result + return interp2app(descr_method) + def binary_number_method(space, mpd_func, w_x, w_y): context = interp_context.getcontext(space) @@ -570,6 +539,18 @@ mpd_func(w_result.mpd, w_a.mpd, w_b.mpd, ctx, status_ptr) return w_result +def make_binary_number_method(mpd_func_name): + mpd_func = getattr(rmpdec, mpd_func_name) + def descr_method(space, w_self, w_other): + return binary_number_method(space, mpd_func, w_self, w_other) + return interp2app(descr_method) + +def make_binary_number_method_right(mpd_func_name): + mpd_func = getattr(rmpdec, mpd_func_name) + def descr_method(space, w_self, w_other): + return binary_number_method(space, mpd_func, w_other, w_self) + return interp2app(descr_method) + def convert_context(space, w_context): if w_context is None: return interp_context.getcontext(space) @@ -830,26 +811,26 @@ __ge__ = interp2app(W_Decimal.descr_ge), __lt__ = interp2app(W_Decimal.descr_lt), __gt__ = interp2app(W_Decimal.descr_gt), - # - __pos__ = interp2app(W_Decimal.descr_pos), - __neg__ = interp2app(W_Decimal.descr_neg), - __abs__ = interp2app(W_Decimal.descr_abs), - # - __add__ = interp2app(W_Decimal.descr_add), - __sub__ = interp2app(W_Decimal.descr_sub), - __mul__ = interp2app(W_Decimal.descr_mul), - __truediv__ = interp2app(W_Decimal.descr_truediv), - __floordiv__ = interp2app(W_Decimal.descr_floordiv), - __mod__ = interp2app(W_Decimal.descr_mod), + # Unary operations + __pos__ = make_unary_number_method('mpd_qplus'), + __neg__ = make_unary_number_method('mpd_qminus'), + __abs__ = make_unary_number_method('mpd_qabs'), + # Binary operations + __add__ = make_binary_number_method('mpd_qadd'), + __sub__ = make_binary_number_method('mpd_qsub'), + __mul__ = make_binary_number_method('mpd_qmul'), + __truediv__ = make_binary_number_method('mpd_qdiv'), + __floordiv__ = make_binary_number_method('mpd_qdivint'), + __mod__ = make_binary_number_method('mpd_qrem'), __divmod__ = interp2app(W_Decimal.descr_divmod), __pow__ = interp2app(W_Decimal.descr_pow), # - __radd__ = interp2app(W_Decimal.descr_radd), - __rsub__ = interp2app(W_Decimal.descr_rsub), - __rmul__ = interp2app(W_Decimal.descr_rmul), - __rtruediv__ = interp2app(W_Decimal.descr_rtruediv), - __rfloordiv__ = interp2app(W_Decimal.descr_rfloordiv), - __rmod__ = interp2app(W_Decimal.descr_rmod), + __radd__ = make_binary_number_method_right('mpd_qadd'), + __rsub__ = make_binary_number_method_right('mpd_qsub'), + __rmul__ = make_binary_number_method_right('mpd_qmul'), + __rtruediv__ = make_binary_number_method_right('mpd_qdiv'), + __rfloordiv__ = make_binary_number_method_right('mpd_qdivint'), + __rmod__ = make_binary_number_method_right('mpd_qrem'), __rdivmod__ = interp2app(W_Decimal.descr_rdivmod), __rpow__ = interp2app(W_Decimal.descr_rpow), # Unary arithmetic functions, optional context arg From noreply at buildbot.pypy.org Sun Aug 24 22:27:29 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 24 Aug 2014 22:27:29 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: add faulthandler.dump_traceback(). Message-ID: <20140824202729.734A11C3340@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r73045:e7c8a2c8d8e8 Date: 2014-08-02 22:03 +0200 http://bitbucket.org/pypy/pypy/changeset/e7c8a2c8d8e8/ Log: add faulthandler.dump_traceback(). Not too difficult. diff --git a/pypy/module/faulthandler/__init__.py b/pypy/module/faulthandler/__init__.py --- a/pypy/module/faulthandler/__init__.py +++ b/pypy/module/faulthandler/__init__.py @@ -9,4 +9,6 @@ 'disable': 'interp_faulthandler.disable', 'is_enabled': 'interp_faulthandler.is_enabled', 'register': 'interp_faulthandler.register', + + 'dump_traceback': 'interp_faulthandler.dump_traceback', } diff --git a/pypy/module/faulthandler/interp_faulthandler.py b/pypy/module/faulthandler/interp_faulthandler.py --- a/pypy/module/faulthandler/interp_faulthandler.py +++ b/pypy/module/faulthandler/interp_faulthandler.py @@ -1,3 +1,8 @@ +import os + +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault + + class FatalErrorState(object): def __init__(self, space): self.enabled = False @@ -13,3 +18,28 @@ def register(space, __args__): pass + + + at unwrap_spec(w_file=WrappedDefault(None), + w_all_threads=WrappedDefault(True)) +def dump_traceback(space, w_file, w_all_threads): + ec = space.getexecutioncontext() + ecs = space.threadlocals.getallvalues() + + if space.is_none(w_file): + w_file = space.sys.get('stderr') + fd = space.c_filedescriptor_w(w_file) + + frame = ec.gettopframe() + while frame: + code = frame.pycode + lineno = frame.get_last_lineno() + if code: + os.write(fd, "File \"%s\", line %s in %s\n" % ( + code.co_filename, lineno, code.co_name)) + else: + os.write(fd, "File ???, line %s in ???\n" % ( + lineno,)) + + frame = frame.f_backref() + diff --git a/pypy/module/faulthandler/test/test_faulthander.py b/pypy/module/faulthandler/test/test_faulthander.py --- a/pypy/module/faulthandler/test/test_faulthander.py +++ b/pypy/module/faulthandler/test/test_faulthander.py @@ -9,3 +9,8 @@ assert faulthandler.is_enabled() is True faulthandler.disable() assert faulthandler.is_enabled() is False + + def test_dump_traceback(self): + import faulthandler + faulthandler.dump_traceback() + From noreply at buildbot.pypy.org Sun Aug 24 22:27:30 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 24 Aug 2014 22:27:30 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Add the functions that crash the interpreter with various signals :-/ Message-ID: <20140824202730.A1FF41C3340@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r73046:898f225422d2 Date: 2014-08-17 23:48 +0200 http://bitbucket.org/pypy/pypy/changeset/898f225422d2/ Log: Add the functions that crash the interpreter with various signals :-/ diff --git a/pypy/module/faulthandler/__init__.py b/pypy/module/faulthandler/__init__.py --- a/pypy/module/faulthandler/__init__.py +++ b/pypy/module/faulthandler/__init__.py @@ -11,4 +11,11 @@ 'register': 'interp_faulthandler.register', 'dump_traceback': 'interp_faulthandler.dump_traceback', + + '_read_null': 'interp_faulthandler.read_null', + '_sigsegv': 'interp_faulthandler.sigsegv', + '_sigfpe': 'interp_faulthandler.sigfpe', + '_sigabrt': 'interp_faulthandler.sigabrt', + '_sigbus': 'interp_faulthandler.sigbus', + '_sigill': 'interp_faulthandler.sigill', } diff --git a/pypy/module/faulthandler/faulthandler.c b/pypy/module/faulthandler/faulthandler.c new file mode 100644 --- /dev/null +++ b/pypy/module/faulthandler/faulthandler.c @@ -0,0 +1,77 @@ +#include +#include + +int +pypy_faulthandler_read_null(void) +{ + volatile int *x; + volatile int y; + + x = NULL; + y = *x; + return y; +} + +void +pypy_faulthandler_sigsegv(void) +{ +#if defined(MS_WINDOWS) + /* For SIGSEGV, faulthandler_fatal_error() restores the previous signal + handler and then gives back the execution flow to the program (without + explicitly calling the previous error handler). In a normal case, the + SIGSEGV was raised by the kernel because of a fault, and so if the + program retries to execute the same instruction, the fault will be + raised again. + + Here the fault is simulated by a fake SIGSEGV signal raised by the + application. We have to raise SIGSEGV at lease twice: once for + faulthandler_fatal_error(), and one more time for the previous signal + handler. */ + while(1) + raise(SIGSEGV); +#else + raise(SIGSEGV); +#endif +} + +int +pypy_faulthandler_sigfpe(void) +{ + /* Do an integer division by zero: raise a SIGFPE on Intel CPU, but not on + PowerPC. Use volatile to disable compile-time optimizations. */ + volatile int x = 1, y = 0, z; + z = x / y; + /* If the division by zero didn't raise a SIGFPE (e.g. on PowerPC), + raise it manually. */ + raise(SIGFPE); + /* This line is never reached, but we pretend to make something with z + to silence a compiler warning. */ + return z; +} + +void +pypy_faulthandler_sigabrt() +{ +#ifdef _MSC_VER + /* Visual Studio: configure abort() to not display an error message nor + open a popup asking to report the fault. */ + _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT); +#endif + abort(); +} + +#ifdef SIGBUS +void +pypy_faulthandler_sigbus(void) +{ + raise(SIGBUS); +} +#endif + +#ifdef SIGILL +void +pypy_faulthandler_sigill(void) +{ + raise(SIGILL); +} +#endif diff --git a/pypy/module/faulthandler/interp_faulthandler.py b/pypy/module/faulthandler/interp_faulthandler.py --- a/pypy/module/faulthandler/interp_faulthandler.py +++ b/pypy/module/faulthandler/interp_faulthandler.py @@ -1,7 +1,44 @@ import os +import py from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from pypy.interpreter.error import OperationError, oefmt +cwd = py.path.local(__file__).dirpath() +eci = ExternalCompilationInfo( + includes=[cwd.join('faulthandler.h')], + include_dirs=[str(cwd)], + separate_module_files=[cwd.join('faulthandler.c')], + export_symbols=['pypy_faulthandler_read_null', + 'pypy_faulthandler_sigsegv', + 'pypy_faulthandler_sigfpe', + 'pypy_faulthandler_sigabrt', + 'pypy_faulthandler_sigbus', + 'pypy_faulthandler_sigill', + ]) + +def llexternal(*args, **kwargs): + kwargs.setdefault('releasegil', False) + kwargs.setdefault('compilation_info', eci) + return rffi.llexternal(*args, **kwargs) + +pypy_faulthandler_read_null = llexternal( + 'pypy_faulthandler_read_null', [], lltype.Void) +pypy_faulthandler_read_null_nogil = llexternal( + 'pypy_faulthandler_read_null', [], lltype.Void, + releasegil=True) +pypy_faulthandler_sigsegv = llexternal( + 'pypy_faulthandler_sigsegv', [], lltype.Void) +pypy_faulthandler_sigfpe = llexternal( + 'pypy_faulthandler_sigfpe', [], lltype.Void) +pypy_faulthandler_sigabrt = llexternal( + 'pypy_faulthandler_sigabrt', [], lltype.Void) +pypy_faulthandler_sigbus = llexternal( + 'pypy_faulthandler_sigbus', [], lltype.Void) +pypy_faulthandler_sigill = llexternal( + 'pypy_faulthandler_sigill', [], lltype.Void) class FatalErrorState(object): def __init__(self, space): @@ -43,3 +80,25 @@ frame = frame.f_backref() + + at unwrap_spec(w_release_gil=WrappedDefault(False)) +def read_null(space, w_release_gil): + if space.is_true(w_release_gil): + pypy_faulthandler_read_null_nogil() + else: + pypy_faulthandler_read_null() + +def sigsegv(): + pypy_faulthandler_sigsegv() + +def sigfpe(): + pypy_faulthandler_sigfpe() + +def sigabrt(): + pypy_faulthandler_sigabrt() + +def sigbus(): + pypy_faulthandler_sigbus() + +def sigill(): + pypy_faulthandler_sigill() From noreply at buildbot.pypy.org Sun Aug 24 22:27:31 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 24 Aug 2014 22:27:31 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Rename the function, to avoid the (unlikely) case where the host CPython tries to pickle this interp-level class. Message-ID: <20140824202731.D50871C3340@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r73047:02521bd8cd1c Date: 2014-08-18 00:01 +0200 http://bitbucket.org/pypy/pypy/changeset/02521bd8cd1c/ Log: Rename the function, to avoid the (unlikely) case where the host CPython tries to pickle this interp-level class. diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -268,7 +268,7 @@ BZ2_bzCompressEnd(self.bzs) lltype.free(self.bzs, flavor='raw') - def __getstate__(self): + def descr_getstate(self): raise oefmt(self.space.w_TypeError, "cannot serialize '%T' object", self) @unwrap_spec(data='bufferstr') @@ -336,7 +336,7 @@ W_BZ2Compressor.typedef = TypeDef("_bz2.BZ2Compressor", __doc__ = W_BZ2Compressor.__doc__, __new__ = interp2app(descr_compressor__new__), - __getstate__ = interp2app(W_BZ2Compressor.__getstate__), + __getstate__ = interp2app(W_BZ2Compressor.descr_getstate), compress = interp2app(W_BZ2Compressor.compress), flush = interp2app(W_BZ2Compressor.flush), ) @@ -376,7 +376,7 @@ BZ2_bzDecompressEnd(self.bzs) lltype.free(self.bzs, flavor='raw') - def __getstate__(self): + def descr_getstate(self): raise oefmt(self.space.w_TypeError, "cannot serialize '%T' object", self) def eof_w(self, space): @@ -436,7 +436,7 @@ W_BZ2Decompressor.typedef = TypeDef("_bz2.BZ2Decompressor", __doc__ = W_BZ2Decompressor.__doc__, __new__ = interp2app(descr_decompressor__new__), - __getstate__ = interp2app(W_BZ2Decompressor.__getstate__), + __getstate__ = interp2app(W_BZ2Decompressor.descr_getstate), unused_data = interp_attrproperty_bytes("unused_data", W_BZ2Decompressor), eof = GetSetProperty(W_BZ2Decompressor.eof_w), decompress = interp2app(W_BZ2Decompressor.decompress), From noreply at buildbot.pypy.org Sun Aug 24 22:27:46 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 24 Aug 2014 22:27:46 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: hg merge py3.3 Message-ID: <20140824202746.86A4E1C3340@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73048:ca99868cb043 Date: 2014-08-24 19:26 +0200 http://bitbucket.org/pypy/pypy/changeset/ca99868cb043/ Log: hg merge py3.3 diff too long, truncating to 2000 out of 78585 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -6,3 +6,11 @@ 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm 9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm +20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 +20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0 +0000000000000000000000000000000000000000 release-2.3.0 +394146e9bb673514c61f0150ab2013ccf78e8de7 release-2.3 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.3.1 +32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1 +0000000000000000000000000000000000000000 release-2.2=3.1 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -44,31 +44,33 @@ Alex Gaynor Michael Hudson David Schneider + Matti Picus + Brian Kearns + Philip Jenvey Holger Krekel Christian Tismer Hakan Ardo Benjamin Peterson - Matti Picus - Philip Jenvey + Manuel Jacob Anders Chrigstrom - Brian Kearns Eric van Riet Paap + Wim Lavrijsen + Ronan Lamy Richard Emslie Alexander Schremmer - Wim Lavrijsen Dan Villiom Podlaski Christiansen - Manuel Jacob Lukas Diekmann Sven Hager Anders Lehmann Aurelien Campeas Niklaus Haldimann - Ronan Lamy Camillo Bruni Laura Creighton Toon Verwaest + Remi Meier Leonardo Santagada Seo Sanghyeon + Romain Guillebert Justin Peel Ronny Pfannschmidt David Edelsohn @@ -80,52 +82,62 @@ Daniel Roberts Niko Matsakis Adrien Di Mascio + Alexander Hesse Ludovic Aubry - Alexander Hesse Jacob Hallen - Romain Guillebert Jason Creighton Alex Martelli Michal Bendowski Jan de Mooij + stian Michael Foord Stephan Diehl Stefan Schwarzer Valentino Volonghi Tomek Meka Patrick Maupin - stian Bob Ippolito Bruno Gola Jean-Paul Calderone Timo Paulssen + Squeaky Alexandre Fayolle Simon Burton Marius Gedminas John Witulski + Konstantin Lopuhin Greg Price Dario Bertini Mark Pearse Simon Cross - Konstantin Lopuhin Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy + Tobias Oberstein Adrian Kuhn Boris Feigin + Stefano Rivera tav + Taavi Burns Georg Brandl Bert Freudenberg Stian Andreassen - Stefano Rivera + Laurence Tratt Wanja Saatkamp + Ivan Sichmann Freitas Gerald Klix Mike Blume - Taavi Burns Oscar Nierstrasz + Stefan H. Muller + Jeremy Thurgood + Gregor Wegberg + Rami Chowdhury + Tobias Pape + Edd Barrett David Malcolm Eugene Oden Henry Mason @@ -135,18 +147,16 @@ Dusty Phillips Lukas Renggli Guenter Jantzen - Tobias Oberstein - Remi Meier Ned Batchelder Amit Regmi Ben Young Nicolas Chauvat Andrew Durdin + Andrew Chambers Michael Schneider Nicholas Riley Jason Chu Igor Trindade Oliveira - Jeremy Thurgood Rocco Moretti Gintautas Miliauskas Michael Twomey @@ -159,18 +169,19 @@ Karl Bartel Brian Dorsey Victor Stinner + Andrews Medina Stuart Williams Jasper Schulz + Christian Hudon Toby Watson Antoine Pitrou Aaron Iles Michael Cheng Justas Sadzevicius + Mikael Schönenberg Gasper Zejn Neil Shepperd - Mikael Schönenberg Elmo Mäntynen - Tobias Pape Jonathan David Riehl Stanislaw Halik Anders Qvist @@ -182,19 +193,18 @@ Alexander Sedov Corbin Simpson Christopher Pope - Laurence Tratt - Guillebert Romain + wenzhuman Christian Tismer + Marc Abramowitz Dan Stromberg Stefano Parmesan - Christian Hudon Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang Gabriel - Paweł Piotr Przeradowski + Lukas Vacek Andrew Dalke Sylvain Thenault Nathan Taylor @@ -203,8 +213,11 @@ Alejandro J. Cura Jacob Oscarson Travis Francis Athougies + Ryan Gonzalez Kristjan Valur Jonsson + Sebastian Pawluś Neil Blakey-Milner + anatoly techtonik Lutz Paelike Lucio Torre Lars Wassermann @@ -218,13 +231,14 @@ Martin Blais Lene Wagner Tomo Cocoa - Andrews Medina roberto at goyle + Yury V. Zaytsev + Anna Katrina Dominguez William Leslie Bobby Impollonia timo at eistee.fritz.box Andrew Thompson - Yusei Tahara + Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado Godefroid Chappelle @@ -234,28 +248,39 @@ Michael Hudson-Doyle Anders Sigfridsson Yasir Suhail + rafalgalczynski at gmail.com Floris Bruynooghe + Laurens Van Houtven Akira Li Gustavo Niemeyer Stephan Busemann - Anna Katrina Dominguez + Rafał Gałczyński + Yusei Tahara Christian Muirhead James Lan shoma hosaka - Daniel Neuhäuser + Daniel Neuh?user + Matthew Miller Buck Golemon Konrad Delong Dinu Gherman Chris Lambacher coolbutuseless at gmail.com + Rodrigo Araújo + w31rd0 Jim Baker - Rodrigo Araújo + James Robert Armin Ronacher Brett Cannon yrttyr + aliceinwire + OlivierBlanvillain Zooko Wilcox-O Hearn Tomer Chachamu Christopher Groskopf + Asmo Soinio + Stefan Marr + jiaaro opassembler.py Antony Lee Jim Hunziker @@ -263,12 +288,13 @@ Even Wiik Thomassen jbs soareschen + Kurt Griffiths + Mike Bayer Flavio Percoco Kristoffer Kleine yasirs Michael Chermside Anna Ravencroft - Andrew Chambers Julien Phalip Dan Loewenherz diff --git a/_pytest/__init__.py b/_pytest/__init__.py --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.2.4.dev2' +__version__ = '2.5.2' diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py new file mode 100644 --- /dev/null +++ b/_pytest/_argcomplete.py @@ -0,0 +1,104 @@ + +"""allow bash-completion for argparse with argcomplete if installed +needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code. + +argcomplete does not support python 2.5 (although the changes for that +are minor). + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*' + ).completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh ) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK + +INSTALL/DEBUGGING +================= +To include this support in another application that has setup.py generated +scripts: +- add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point +- include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + , call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument() +If things do not work right away: +- switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 +- run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not +- sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" + +import sys +import os +from glob import glob + +class FastFilesCompleter: + 'Fast file completer class' + def __init__(self, directories=True): + self.directories = directories + + def __call__(self, prefix, **kwargs): + """only called on non option completions""" + if os.path.sep in prefix[1:]: # + prefix_dir = len(os.path.dirname(prefix) + os.path.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if '*' not in prefix and '?' not in prefix: + if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash + globbed.extend(glob(prefix + '.*')) + prefix += '*' + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += '/' + # append stripping the prefix (like bash, not like compgen) + completion.append(x[prefix_dir:]) + return completion + +if os.environ.get('_ARGCOMPLETE'): + # argcomplete 0.5.6 is not compatible with python 2.5.6: print/with/format + if sys.version_info[:2] < (2, 6): + sys.exit(1) + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter = FastFilesCompleter() + + def try_argcomplete(parser): + argcomplete.autocomplete(parser) +else: + def try_argcomplete(parser): pass + filescompleter = None diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py --- a/_pytest/assertion/__init__.py +++ b/_pytest/assertion/__init__.py @@ -3,7 +3,6 @@ """ import py import sys -import pytest from _pytest.monkeypatch import monkeypatch from _pytest.assertion import util @@ -19,8 +18,8 @@ to provide assert expression information. """) group.addoption('--no-assert', action="store_true", default=False, dest="noassert", help="DEPRECATED equivalent to --assert=plain") - group.addoption('--nomagic', action="store_true", default=False, - dest="nomagic", help="DEPRECATED equivalent to --assert=plain") + group.addoption('--nomagic', '--no-magic', action="store_true", + default=False, help="DEPRECATED equivalent to --assert=plain") class AssertionState: """State for the assertion plugin.""" @@ -35,22 +34,25 @@ mode = "plain" if mode == "rewrite": try: - import ast + import ast # noqa except ImportError: mode = "reinterp" else: - if sys.platform.startswith('java'): + # Both Jython and CPython 2.6.0 have AST bugs that make the + # assertion rewriting hook malfunction. + if (sys.platform.startswith('java') or + sys.version_info[:3] == (2, 6, 0)): mode = "reinterp" if mode != "plain": _load_modules(mode) m = monkeypatch() config._cleanup.append(m.undo) m.setattr(py.builtin.builtins, 'AssertionError', - reinterpret.AssertionError) + reinterpret.AssertionError) # noqa hook = None if mode == "rewrite": - hook = rewrite.AssertionRewritingHook() - sys.meta_path.append(hook) + hook = rewrite.AssertionRewritingHook() # noqa + sys.meta_path.insert(0, hook) warn_about_missing_assertion(mode) config._assertstate = AssertionState(config, mode) config._assertstate.hook = hook @@ -73,9 +75,16 @@ def callbinrepr(op, left, right): hook_result = item.ihook.pytest_assertrepr_compare( config=item.config, op=op, left=left, right=right) + for new_expl in hook_result: if new_expl: - res = '\n~'.join(new_expl) + # Don't include pageloads of data unless we are very + # verbose (-vv) + if (sum(len(p) for p in new_expl[1:]) > 80*8 + and item.config.option.verbose < 2): + new_expl[1:] = [py.builtin._totext( + 'Detailed information truncated, use "-vv" to show')] + res = py.builtin._totext('\n~').join(new_expl) if item.config.getvalue("assertmode") == "rewrite": # The result will be fed back a python % formatting # operation, which will fail if there are extraneous @@ -95,9 +104,9 @@ def _load_modules(mode): """Lazily import assertion related code.""" global rewrite, reinterpret - from _pytest.assertion import reinterpret + from _pytest.assertion import reinterpret # noqa if mode == "rewrite": - from _pytest.assertion import rewrite + from _pytest.assertion import rewrite # noqa def warn_about_missing_assertion(mode): try: diff --git a/_pytest/assertion/newinterpret.py b/_pytest/assertion/newinterpret.py --- a/_pytest/assertion/newinterpret.py +++ b/_pytest/assertion/newinterpret.py @@ -11,7 +11,7 @@ from _pytest.assertion.reinterpret import BuiltinAssertionError -if sys.platform.startswith("java") and sys.version_info < (2, 5, 2): +if sys.platform.startswith("java"): # See http://bugs.jython.org/issue1497 _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict", "ListComp", "GeneratorExp", "Yield", "Compare", "Call", diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py --- a/_pytest/assertion/oldinterpret.py +++ b/_pytest/assertion/oldinterpret.py @@ -526,10 +526,13 @@ # example: def f(): return 5 + def g(): return 3 + def h(x): return 'never' + check("f() * g() == 5") check("not f()") check("not (f() and g() or 0)") diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py --- a/_pytest/assertion/reinterpret.py +++ b/_pytest/assertion/reinterpret.py @@ -1,18 +1,26 @@ import sys import py from _pytest.assertion.util import BuiltinAssertionError +u = py.builtin._totext + class AssertionError(BuiltinAssertionError): def __init__(self, *args): BuiltinAssertionError.__init__(self, *args) if args: + # on Python2.6 we get len(args)==2 for: assert 0, (x,y) + # on Python2.7 and above we always get len(args) == 1 + # with args[0] being the (x,y) tuple. + if len(args) > 1: + toprint = args + else: + toprint = args[0] try: - self.msg = str(args[0]) - except py.builtin._sysex: - raise - except: - self.msg = "<[broken __repr__] %s at %0xd>" %( - args[0].__class__, id(args[0])) + self.msg = u(toprint) + except Exception: + self.msg = u( + "<[broken __repr__] %s at %0xd>" + % (toprint.__class__, id(toprint))) else: f = py.code.Frame(sys._getframe(1)) try: @@ -44,4 +52,3 @@ from _pytest.assertion.newinterpret import interpret as reinterpret else: reinterpret = reinterpret_old - diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py --- a/_pytest/assertion/rewrite.py +++ b/_pytest/assertion/rewrite.py @@ -6,6 +6,7 @@ import imp import marshal import os +import re import struct import sys import types @@ -14,13 +15,7 @@ from _pytest.assertion import util -# Windows gives ENOENT in places *nix gives ENOTDIR. -if sys.platform.startswith("win"): - PATH_COMPONENT_NOT_DIR = errno.ENOENT -else: - PATH_COMPONENT_NOT_DIR = errno.ENOTDIR - -# py.test caches rewritten pycs in __pycache__. +# pytest caches rewritten pycs in __pycache__. if hasattr(imp, "get_tag"): PYTEST_TAG = imp.get_tag() + "-PYTEST" else: @@ -34,17 +29,19 @@ PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1]) del ver, impl -PYC_EXT = ".py" + "c" if __debug__ else "o" +PYC_EXT = ".py" + (__debug__ and "c" or "o") PYC_TAIL = "." + PYTEST_TAG + PYC_EXT REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2) +ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 class AssertionRewritingHook(object): - """Import hook which rewrites asserts.""" + """PEP302 Import hook which rewrites asserts.""" def __init__(self): self.session = None self.modules = {} + self._register_with_pkg_resources() def set_session(self, session): self.fnpats = session.config.getini("python_files") @@ -59,8 +56,12 @@ names = name.rsplit(".", 1) lastname = names[-1] pth = None - if path is not None and len(path) == 1: - pth = path[0] + if path is not None: + # Starting with Python 3.3, path is a _NamespacePath(), which + # causes problems if not converted to list. + path = list(path) + if len(path) == 1: + pth = path[0] if pth is None: try: fd, fn, desc = imp.find_module(lastname, path) @@ -95,12 +96,13 @@ finally: self.session = sess else: - state.trace("matched test file (was specified on cmdline): %r" % (fn,)) + state.trace("matched test file (was specified on cmdline): %r" % + (fn,)) # The requested module looks like a test file, so rewrite it. This is # the most magical part of the process: load the source, rewrite the # asserts, and load the rewritten source. We also cache the rewritten # module code in a special pyc. We must be aware of the possibility of - # concurrent py.test processes rewriting and loading pycs. To avoid + # concurrent pytest processes rewriting and loading pycs. To avoid # tricky race conditions, we maintain the following invariant: The # cached pyc is always a complete, valid pyc. Operations on it must be # atomic. POSIX's atomic rename comes in handy. @@ -116,19 +118,19 @@ # common case) or it's blocked by a non-dir node. In the # latter case, we'll ignore it in _write_pyc. pass - elif e == PATH_COMPONENT_NOT_DIR: + elif e in [errno.ENOENT, errno.ENOTDIR]: # One of the path components was not a directory, likely # because we're in a zip file. write = False elif e == errno.EACCES: - state.trace("read only directory: %r" % (fn_pypath.dirname,)) + state.trace("read only directory: %r" % fn_pypath.dirname) write = False else: raise cache_name = fn_pypath.basename[:-3] + PYC_TAIL pyc = os.path.join(cache_dir, cache_name) - # Notice that even if we're in a read-only directory, I'm going to check - # for a cached pyc. This may not be optimal... + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... co = _read_pyc(fn_pypath, pyc) if co is None: state.trace("rewriting %r" % (fn,)) @@ -153,27 +155,59 @@ mod.__file__ = co.co_filename # Normally, this attribute is 3.2+. mod.__cached__ = pyc + mod.__loader__ = self py.builtin.exec_(co, mod.__dict__) except: del sys.modules[name] raise return sys.modules[name] -def _write_pyc(co, source_path, pyc): - # Technically, we don't have to have the same pyc format as (C)Python, since - # these "pycs" should never be seen by builtin import. However, there's - # little reason deviate, and I hope sometime to be able to use - # imp.load_compiled to load them. (See the comment in load_module above.) + + + def is_package(self, name): + try: + fd, fn, desc = imp.find_module(name) + except ImportError: + return False + if fd is not None: + fd.close() + tp = desc[2] + return tp == imp.PKG_DIRECTORY + + @classmethod + def _register_with_pkg_resources(cls): + """ + Ensure package resources can be loaded from this loader. May be called + multiple times, as the operation is idempotent. + """ + try: + import pkg_resources + # access an attribute in case a deferred importer is present + pkg_resources.__name__ + except ImportError: + return + + # Since pytest tests are always located in the file system, the + # DefaultProvider is appropriate. + pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider) + + +def _write_pyc(state, co, source_path, pyc): + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason deviate, and I hope + # sometime to be able to use imp.load_compiled to load them. (See + # the comment in load_module above.) mtime = int(source_path.mtime()) try: fp = open(pyc, "wb") except IOError: err = sys.exc_info()[1].errno - if err == PATH_COMPONENT_NOT_DIR: - # This happens when we get a EEXIST in find_module creating the - # __pycache__ directory and __pycache__ is by some non-dir node. - return False - raise + state.trace("error writing pyc file at %s: errno=%s" %(pyc, err)) + # we ignore any failure to write the cache file + # there are many reasons, permission-denied, __pycache__ being a + # file etc. + return False try: fp.write(imp.get_magic()) fp.write(struct.pack(">", - ast.Add : "+", - ast.Sub : "-", - ast.Mult : "*", - ast.Div : "/", - ast.FloorDiv : "//", - ast.Mod : "%", - ast.Eq : "==", - ast.NotEq : "!=", - ast.Lt : "<", - ast.LtE : "<=", - ast.Gt : ">", - ast.GtE : ">=", - ast.Pow : "**", - ast.Is : "is", - ast.IsNot : "is not", - ast.In : "in", - ast.NotIn : "not in" + ast.BitOr: "|", + ast.BitXor: "^", + ast.BitAnd: "&", + ast.LShift: "<<", + ast.RShift: ">>", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in" } @@ -341,7 +408,7 @@ lineno = 0 for item in mod.body: if (expect_docstring and isinstance(item, ast.Expr) and - isinstance(item.value, ast.Str)): + isinstance(item.value, ast.Str)): doc = item.value.s if "PYTEST_DONT_REWRITE" in doc: # The module has disabled assertion rewriting. @@ -462,7 +529,8 @@ body.append(raise_) # Clear temporary variables by setting them to None. if self.variables: - variables = [ast.Name(name, ast.Store()) for name in self.variables] + variables = [ast.Name(name, ast.Store()) + for name in self.variables] clear = ast.Assign(variables, ast.Name("None", ast.Load())) self.statements.append(clear) # Fix line numbers. @@ -471,11 +539,12 @@ return self.statements def visit_Name(self, name): - # Check if the name is local or not. + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. locs = ast.Call(self.builtin("locals"), [], [], None, None) - globs = ast.Call(self.builtin("globals"), [], [], None, None) - ops = [ast.In(), ast.IsNot()] - test = ast.Compare(ast.Str(name.id), ops, [locs, globs]) + inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) + dorepr = self.helper("should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) return name, self.explanation_param(expr) @@ -492,7 +561,8 @@ for i, v in enumerate(boolop.values): if i: fail_inner = [] - self.on_failure.append(ast.If(cond, fail_inner, [])) + # cond is set in a prior loop iteration below + self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa self.on_failure = fail_inner self.push_format_context() res, expl = self.visit(v) @@ -548,7 +618,8 @@ new_kwarg, expl = self.visit(call.kwargs) arg_expls.append("**" + expl) expl = "%s(%s)" % (func_expl, ', '.join(arg_expls)) - new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg) + new_call = ast.Call(new_func, new_args, new_kwargs, + new_star, new_kwarg) res = self.assign(new_call) res_expl = self.explanation_param(self.display(res)) outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) @@ -584,7 +655,7 @@ res_expr = ast.Compare(left_res, [op], [next_res]) self.statements.append(ast.Assign([store_names[i]], res_expr)) left_res, left_expl = next_res, next_expl - # Use py.code._reprcompare if that's available. + # Use pytest.assertion.util._reprcompare if that's available. expl_call = self.helper("call_reprcompare", ast.Tuple(syms, ast.Load()), ast.Tuple(load_names, ast.Load()), diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py --- a/_pytest/assertion/util.py +++ b/_pytest/assertion/util.py @@ -1,8 +1,13 @@ """Utilities for assertion debugging""" import py +try: + from collections import Sequence +except ImportError: + Sequence = list BuiltinAssertionError = py.builtin.builtins.AssertionError +u = py.builtin._totext # The _reprcompare attribute on the util module is used by the new assertion # interpretation code and assertion rewriter to detect this plugin was @@ -10,6 +15,7 @@ # DebugInterpreter. _reprcompare = None + def format_explanation(explanation): """This formats an explanation @@ -20,7 +26,18 @@ for when one explanation needs to span multiple lines, e.g. when displaying diffs. """ - # simplify 'assert False where False = ...' + explanation = _collapse_false(explanation) + lines = _split_explanation(explanation) + result = _format_lines(lines) + return u('\n').join(result) + + +def _collapse_false(explanation): + """Collapse expansions of False + + So this strips out any "assert False\n{where False = ...\n}" + blocks. + """ where = 0 while True: start = where = explanation.find("False\n{False = ", where) @@ -42,28 +59,48 @@ explanation = (explanation[:start] + explanation[start+15:end-1] + explanation[end+1:]) where -= 17 - raw_lines = (explanation or '').split('\n') - # escape newlines not followed by {, } and ~ + return explanation + + +def _split_explanation(explanation): + """Return a list of individual lines in the explanation + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or u('')).split('\n') lines = [raw_lines[0]] for l in raw_lines[1:]: if l.startswith('{') or l.startswith('}') or l.startswith('~'): lines.append(l) else: lines[-1] += '\\n' + l + return lines + +def _format_lines(lines): + """Format the individual lines + + This will replace the '{', '}' and '~' characters of our mini + formatting language with the proper 'where ...', 'and ...' and ' + + ...' text, taking care of indentation along the way. + + Return a list of formatted lines. + """ result = lines[:1] stack = [0] stackcnt = [0] for line in lines[1:]: if line.startswith('{'): if stackcnt[-1]: - s = 'and ' + s = u('and ') else: - s = 'where ' + s = u('where ') stack.append(len(result)) stackcnt[-1] += 1 stackcnt.append(0) - result.append(' +' + ' '*(len(stack)-1) + s + line[1:]) + result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:]) elif line.startswith('}'): assert line.startswith('}') stack.pop() @@ -71,9 +108,9 @@ result[stack[-1]] += line[1:] else: assert line.startswith('~') - result.append(' '*len(stack) + line[1:]) + result.append(u(' ')*len(stack) + line[1:]) assert len(stack) == 1 - return '\n'.join(result) + return result # Provide basestring in python3 @@ -83,132 +120,163 @@ basestring = str -def assertrepr_compare(op, left, right): - """return specialised explanations for some operators/operands""" - width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op +def assertrepr_compare(config, op, left, right): + """Return specialised explanations for some operators/operands""" + width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op left_repr = py.io.saferepr(left, maxsize=int(width/2)) right_repr = py.io.saferepr(right, maxsize=width-len(left_repr)) - summary = '%s %s %s' % (left_repr, op, right_repr) + summary = u('%s %s %s') % (left_repr, op, right_repr) - issequence = lambda x: isinstance(x, (list, tuple)) + issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) + and not isinstance(x, basestring)) istext = lambda x: isinstance(x, basestring) isdict = lambda x: isinstance(x, dict) - isset = lambda x: isinstance(x, set) + isset = lambda x: isinstance(x, (set, frozenset)) + verbose = config.getoption('verbose') explanation = None try: if op == '==': if istext(left) and istext(right): - explanation = _diff_text(left, right) + explanation = _diff_text(left, right, verbose) elif issequence(left) and issequence(right): - explanation = _compare_eq_sequence(left, right) + explanation = _compare_eq_sequence(left, right, verbose) elif isset(left) and isset(right): - explanation = _compare_eq_set(left, right) + explanation = _compare_eq_set(left, right, verbose) elif isdict(left) and isdict(right): - explanation = _diff_text(py.std.pprint.pformat(left), - py.std.pprint.pformat(right)) + explanation = _compare_eq_dict(left, right, verbose) elif op == 'not in': if istext(left) and istext(right): - explanation = _notin_text(left, right) - except py.builtin._sysex: - raise - except: + explanation = _notin_text(left, right, verbose) + except Exception: excinfo = py.code.ExceptionInfo() - explanation = ['(pytest_assertion plugin: representation of ' - 'details failed. Probably an object has a faulty __repr__.)', - str(excinfo) - ] - + explanation = [ + u('(pytest_assertion plugin: representation of details failed. ' + 'Probably an object has a faulty __repr__.)'), + u(excinfo)] if not explanation: return None - # Don't include pageloads of data, should be configurable - if len(''.join(explanation)) > 80*8: - explanation = ['Detailed information too verbose, truncated'] - return [summary] + explanation -def _diff_text(left, right): - """Return the explanation for the diff between text +def _diff_text(left, right, verbose=False): + """Return the explanation for the diff between text or bytes - This will skip leading and trailing characters which are - identical to keep the diff minimal. + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + + If the input are bytes they will be safely converted to text. """ explanation = [] - i = 0 # just in case left or right has zero length - for i in range(min(len(left), len(right))): - if left[i] != right[i]: - break - if i > 42: - i -= 10 # Provide some context - explanation = ['Skipping %s identical ' - 'leading characters in diff' % i] - left = left[i:] - right = right[i:] - if len(left) == len(right): - for i in range(len(left)): - if left[-i] != right[-i]: + if isinstance(left, py.builtin.bytes): + left = u(repr(left)[1:-1]).replace(r'\n', '\n') + if isinstance(right, py.builtin.bytes): + right = u(repr(right)[1:-1]).replace(r'\n', '\n') + if not verbose: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: break if i > 42: - i -= 10 # Provide some context - explanation += ['Skipping %s identical ' - 'trailing characters in diff' % i] - left = left[:-i] - right = right[:-i] + i -= 10 # Provide some context + explanation = [u('Skipping %s identical leading ' + 'characters in diff, use -v to show') % i] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [u('Skipping %s identical trailing ' + 'characters in diff, use -v to show') % i] + left = left[:-i] + right = right[:-i] explanation += [line.strip('\n') for line in py.std.difflib.ndiff(left.splitlines(), right.splitlines())] return explanation -def _compare_eq_sequence(left, right): +def _compare_eq_sequence(left, right, verbose=False): explanation = [] for i in range(min(len(left), len(right))): if left[i] != right[i]: - explanation += ['At index %s diff: %r != %r' % - (i, left[i], right[i])] + explanation += [u('At index %s diff: %r != %r') + % (i, left[i], right[i])] break if len(left) > len(right): - explanation += ['Left contains more items, ' - 'first extra item: %s' % py.io.saferepr(left[len(right)],)] + explanation += [u('Left contains more items, first extra item: %s') + % py.io.saferepr(left[len(right)],)] elif len(left) < len(right): - explanation += ['Right contains more items, ' - 'first extra item: %s' % py.io.saferepr(right[len(left)],)] - return explanation # + _diff_text(py.std.pprint.pformat(left), - # py.std.pprint.pformat(right)) + explanation += [ + u('Right contains more items, first extra item: %s') % + py.io.saferepr(right[len(left)],)] + return explanation # + _diff_text(py.std.pprint.pformat(left), + # py.std.pprint.pformat(right)) -def _compare_eq_set(left, right): +def _compare_eq_set(left, right, verbose=False): explanation = [] diff_left = left - right diff_right = right - left if diff_left: - explanation.append('Extra items in the left set:') + explanation.append(u('Extra items in the left set:')) for item in diff_left: explanation.append(py.io.saferepr(item)) if diff_right: - explanation.append('Extra items in the right set:') + explanation.append(u('Extra items in the right set:')) for item in diff_right: explanation.append(py.io.saferepr(item)) return explanation -def _notin_text(term, text): +def _compare_eq_dict(left, right, verbose=False): + explanation = [] + common = set(left).intersection(set(right)) + same = dict((k, left[k]) for k in common if left[k] == right[k]) + if same and not verbose: + explanation += [u('Omitting %s identical items, use -v to show') % + len(same)] + elif same: + explanation += [u('Common items:')] + explanation += py.std.pprint.pformat(same).splitlines() + diff = set(k for k in common if left[k] != right[k]) + if diff: + explanation += [u('Differing items:')] + for k in diff: + explanation += [py.io.saferepr({k: left[k]}) + ' != ' + + py.io.saferepr({k: right[k]})] + extra_left = set(left) - set(right) + if extra_left: + explanation.append(u('Left contains more items:')) + explanation.extend(py.std.pprint.pformat( + dict((k, left[k]) for k in extra_left)).splitlines()) + extra_right = set(right) - set(left) + if extra_right: + explanation.append(u('Right contains more items:')) + explanation.extend(py.std.pprint.pformat( + dict((k, right[k]) for k in extra_right)).splitlines()) + return explanation + + +def _notin_text(term, text, verbose=False): index = text.find(term) head = text[:index] tail = text[index+len(term):] correct_text = head + tail - diff = _diff_text(correct_text, text) - newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)] + diff = _diff_text(correct_text, text, verbose) + newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)] for line in diff: - if line.startswith('Skipping'): + if line.startswith(u('Skipping')): continue - if line.startswith('- '): + if line.startswith(u('- ')): continue - if line.startswith('+ '): - newdiff.append(' ' + line[2:]) + if line.startswith(u('+ ')): + newdiff.append(u(' ') + line[2:]) else: newdiff.append(line) return newdiff diff --git a/_pytest/capture.py b/_pytest/capture.py --- a/_pytest/capture.py +++ b/_pytest/capture.py @@ -1,43 +1,114 @@ -""" per-test stdout/stderr capturing mechanisms, ``capsys`` and ``capfd`` function arguments. """ +""" + per-test stdout/stderr capturing mechanisms, + ``capsys`` and ``capfd`` function arguments. +""" +# note: py.io capture was where copied from +# pylib 1.4.20.dev2 (rev 13d9af95547e) +import sys +import os +import tempfile -import pytest, py -import os +import py +import pytest + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO + +try: + from io import BytesIO +except ImportError: + class BytesIO(StringIO): + def write(self, data): + if isinstance(data, unicode): + raise TypeError("not a byte value: %r" % (data,)) + StringIO.write(self, data) + +if sys.version_info < (3, 0): + class TextIO(StringIO): + def write(self, data): + if not isinstance(data, unicode): + enc = getattr(self, '_encoding', 'UTF-8') + data = unicode(data, enc, 'replace') + StringIO.write(self, data) +else: + TextIO = StringIO + + +patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} + def pytest_addoption(parser): group = parser.getgroup("general") - group._addoption('--capture', action="store", default=None, - metavar="method", type="choice", choices=['fd', 'sys', 'no'], + group._addoption( + '--capture', action="store", default=None, + metavar="method", choices=['fd', 'sys', 'no'], help="per-test capturing method: one of fd (default)|sys|no.") - group._addoption('-s', action="store_const", const="no", dest="capture", + group._addoption( + '-s', action="store_const", const="no", dest="capture", help="shortcut for --capture=no.") + @pytest.mark.tryfirst -def pytest_cmdline_parse(pluginmanager, args): - # we want to perform capturing already for plugin/conftest loading - if '-s' in args or "--capture=no" in args: - method = "no" - elif hasattr(os, 'dup') and '--capture=sys' not in args: +def pytest_load_initial_conftests(early_config, parser, args, __multicall__): + ns = parser.parse_known_args(args) + method = ns.capture + if not method: method = "fd" - else: + if method == "fd" and not hasattr(os, "dup"): method = "sys" capman = CaptureManager(method) - pluginmanager.register(capman, "capturemanager") + early_config.pluginmanager.register(capman, "capturemanager") + + # make sure that capturemanager is properly reset at final shutdown + def teardown(): + try: + capman.reset_capturings() + except ValueError: + pass + + early_config.pluginmanager.add_shutdown(teardown) + + # make sure logging does not raise exceptions at the end + def silence_logging_at_shutdown(): + if "logging" in sys.modules: + sys.modules["logging"].raiseExceptions = False + early_config.pluginmanager.add_shutdown(silence_logging_at_shutdown) + + # finally trigger conftest loading but while capturing (issue93) + capman.resumecapture() + try: + try: + return __multicall__.execute() + finally: + out, err = capman.suspendcapture() + except: + sys.stdout.write(out) + sys.stderr.write(err) + raise + def addouterr(rep, outerr): for secname, content in zip(["out", "err"], outerr): if content: rep.sections.append(("Captured std%s" % secname, content)) + class NoCapture: def startall(self): pass + def resume(self): pass + def reset(self): pass + def suspend(self): return "", "" + class CaptureManager: def __init__(self, defaultmethod=None): self._method2capture = {} @@ -45,21 +116,23 @@ def _maketempfile(self): f = py.std.tempfile.TemporaryFile() - newf = py.io.dupfile(f, encoding="UTF-8") + newf = dupfile(f, encoding="UTF-8") f.close() return newf def _makestringio(self): - return py.io.TextIO() + return TextIO() def _getcapture(self, method): if method == "fd": - return py.io.StdCaptureFD(now=False, - out=self._maketempfile(), err=self._maketempfile() + return StdCaptureFD( + out=self._maketempfile(), + err=self._maketempfile(), ) elif method == "sys": - return py.io.StdCapture(now=False, - out=self._makestringio(), err=self._makestringio() + return StdCapture( + out=self._makestringio(), + err=self._makestringio(), ) elif method == "no": return NoCapture() @@ -74,23 +147,24 @@ method = config._conftest.rget("option_capture", path=fspath) except KeyError: method = "fd" - if method == "fd" and not hasattr(os, 'dup'): # e.g. jython + if method == "fd" and not hasattr(os, 'dup'): # e.g. jython method = "sys" return method def reset_capturings(self): - for name, cap in self._method2capture.items(): + for cap in self._method2capture.values(): cap.reset() def resumecapture_item(self, item): method = self._getmethod(item.config, item.fspath) if not hasattr(item, 'outerr'): - item.outerr = ('', '') # we accumulate outerr on the item + item.outerr = ('', '') # we accumulate outerr on the item return self.resumecapture(method) def resumecapture(self, method=None): if hasattr(self, '_capturing'): - raise ValueError("cannot resume, already capturing with %r" % + raise ValueError( + "cannot resume, already capturing with %r" % (self._capturing,)) if method is None: method = self._defaultmethod @@ -119,30 +193,29 @@ return "", "" def activate_funcargs(self, pyfuncitem): - if not hasattr(pyfuncitem, 'funcargs'): - return - assert not hasattr(self, '_capturing_funcargs') - self._capturing_funcargs = capturing_funcargs = [] - for name, capfuncarg in pyfuncitem.funcargs.items(): - if name in ('capsys', 'capfd'): - capturing_funcargs.append(capfuncarg) - capfuncarg._start() + funcargs = getattr(pyfuncitem, "funcargs", None) + if funcargs is not None: + for name, capfuncarg in funcargs.items(): + if name in ('capsys', 'capfd'): + assert not hasattr(self, '_capturing_funcarg') + self._capturing_funcarg = capfuncarg + capfuncarg._start() def deactivate_funcargs(self): - capturing_funcargs = getattr(self, '_capturing_funcargs', None) - if capturing_funcargs is not None: - while capturing_funcargs: - capfuncarg = capturing_funcargs.pop() - capfuncarg._finalize() - del self._capturing_funcargs + capturing_funcarg = getattr(self, '_capturing_funcarg', None) + if capturing_funcarg: + outerr = capturing_funcarg._finalize() + del self._capturing_funcarg + return outerr def pytest_make_collect_report(self, __multicall__, collector): method = self._getmethod(collector.config, collector.fspath) try: self.resumecapture(method) except ValueError: - return # recursive collect, XXX refactor capturing - # to allow for more lightweight recursive capturing + # recursive collect, XXX refactor capturing + # to allow for more lightweight recursive capturing + return try: rep = __multicall__.execute() finally: @@ -169,46 +242,371 @@ @pytest.mark.tryfirst def pytest_runtest_makereport(self, __multicall__, item, call): - self.deactivate_funcargs() + funcarg_outerr = self.deactivate_funcargs() rep = __multicall__.execute() outerr = self.suspendcapture(item) - if not rep.passed: - addouterr(rep, outerr) + if funcarg_outerr is not None: + outerr = (outerr[0] + funcarg_outerr[0], + outerr[1] + funcarg_outerr[1]) + addouterr(rep, outerr) if not rep.passed or rep.when == "teardown": outerr = ('', '') item.outerr = outerr return rep +error_capsysfderror = "cannot use capsys and capfd at the same time" + + def pytest_funcarg__capsys(request): """enables capturing of writes to sys.stdout/sys.stderr and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ - return CaptureFuncarg(py.io.StdCapture) + if "capfd" in request._funcargs: + raise request.raiseerror(error_capsysfderror) + return CaptureFixture(StdCapture) + def pytest_funcarg__capfd(request): """enables capturing of writes to file descriptors 1 and 2 and makes captured output available via ``capsys.readouterr()`` method calls which return a ``(out, err)`` tuple. """ + if "capsys" in request._funcargs: + request.raiseerror(error_capsysfderror) if not hasattr(os, 'dup'): - py.test.skip("capfd funcarg needs os.dup") - return CaptureFuncarg(py.io.StdCaptureFD) + pytest.skip("capfd funcarg needs os.dup") + return CaptureFixture(StdCaptureFD) -class CaptureFuncarg: + +class CaptureFixture: def __init__(self, captureclass): - self.capture = captureclass(now=False) + self._capture = captureclass() def _start(self): - self.capture.startall() + self._capture.startall() def _finalize(self): - if hasattr(self, 'capture'): - self.capture.reset() - del self.capture + if hasattr(self, '_capture'): + outerr = self._outerr = self._capture.reset() + del self._capture + return outerr def readouterr(self): - return self.capture.readouterr() + try: + return self._capture.readouterr() + except AttributeError: + return self._outerr def close(self): self._finalize() + + +class FDCapture: + """ Capture IO to/from a given os-level filedescriptor. """ + + def __init__(self, targetfd, tmpfile=None, patchsys=False): + """ save targetfd descriptor, and open a new + temporary file there. If no tmpfile is + specified a tempfile.Tempfile() will be opened + in text mode. + """ + self.targetfd = targetfd + if tmpfile is None and targetfd != 0: + f = tempfile.TemporaryFile('wb+') + tmpfile = dupfile(f, encoding="UTF-8") + f.close() + self.tmpfile = tmpfile + self._savefd = os.dup(self.targetfd) + if patchsys: + self._oldsys = getattr(sys, patchsysdict[targetfd]) + + def start(self): + try: + os.fstat(self._savefd) + except OSError: + raise ValueError( + "saved filedescriptor not valid, " + "did you call start() twice?") + if self.targetfd == 0 and not self.tmpfile: + fd = os.open(os.devnull, os.O_RDONLY) + os.dup2(fd, 0) + os.close(fd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], DontReadFromInput()) + else: + os.dup2(self.tmpfile.fileno(), self.targetfd) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self.tmpfile) + + def done(self): + """ unpatch and clean up, returns the self.tmpfile (file object) + """ + os.dup2(self._savefd, self.targetfd) + os.close(self._savefd) + if self.targetfd != 0: + self.tmpfile.seek(0) + if hasattr(self, '_oldsys'): + setattr(sys, patchsysdict[self.targetfd], self._oldsys) + return self.tmpfile + + def writeorg(self, data): + """ write a string to the original file descriptor + """ + tempfp = tempfile.TemporaryFile() + try: + os.dup2(self._savefd, tempfp.fileno()) + tempfp.write(data) + finally: + tempfp.close() + + +def dupfile(f, mode=None, buffering=0, raising=False, encoding=None): + """ return a new open file object that's a duplicate of f + + mode is duplicated if not given, 'buffering' controls + buffer size (defaulting to no buffering) and 'raising' + defines whether an exception is raised when an incompatible + file object is passed in (if raising is False, the file + object itself will be returned) + """ + try: + fd = f.fileno() + mode = mode or f.mode + except AttributeError: + if raising: + raise + return f + newfd = os.dup(fd) + if sys.version_info >= (3, 0): + if encoding is not None: + mode = mode.replace("b", "") + buffering = True + return os.fdopen(newfd, mode, buffering, encoding, closefd=True) + else: + f = os.fdopen(newfd, mode, buffering) + if encoding is not None: + return EncodedFile(f, encoding) + return f + + +class EncodedFile(object): + def __init__(self, _stream, encoding): + self._stream = _stream + self.encoding = encoding + + def write(self, obj): + if isinstance(obj, unicode): + obj = obj.encode(self.encoding) + self._stream.write(obj) + + def writelines(self, linelist): + data = ''.join(linelist) + self.write(data) + + def __getattr__(self, name): + return getattr(self._stream, name) + + +class Capture(object): + def reset(self): + """ reset sys.stdout/stderr and return captured output as strings. """ + if hasattr(self, '_reset'): + raise ValueError("was already reset") + self._reset = True + outfile, errfile = self.done(save=False) + out, err = "", "" + if outfile and not outfile.closed: + out = outfile.read() + outfile.close() + if errfile and errfile != outfile and not errfile.closed: + err = errfile.read() + errfile.close() + return out, err + + def suspend(self): + """ return current snapshot captures, memorize tempfiles. """ + outerr = self.readouterr() + outfile, errfile = self.done() + return outerr + + +class StdCaptureFD(Capture): + """ This class allows to capture writes to FD1 and FD2 + and may connect a NULL file to FD0 (and prevent + reads from sys.stdin). If any of the 0,1,2 file descriptors + is invalid it will not be captured. + """ + def __init__(self, out=True, err=True, in_=True, patchsys=True): + self._options = { + "out": out, + "err": err, + "in_": in_, + "patchsys": patchsys, + } + self._save() + + def _save(self): + in_ = self._options['in_'] + out = self._options['out'] + err = self._options['err'] + patchsys = self._options['patchsys'] + if in_: + try: + self.in_ = FDCapture( + 0, tmpfile=None, + patchsys=patchsys) + except OSError: + pass + if out: + tmpfile = None + if hasattr(out, 'write'): + tmpfile = out + try: + self.out = FDCapture( + 1, tmpfile=tmpfile, + patchsys=patchsys) + self._options['out'] = self.out.tmpfile + except OSError: + pass + if err: + if hasattr(err, 'write'): + tmpfile = err + else: + tmpfile = None + try: + self.err = FDCapture( + 2, tmpfile=tmpfile, + patchsys=patchsys) + self._options['err'] = self.err.tmpfile + except OSError: + pass + + def startall(self): + if hasattr(self, 'in_'): + self.in_.start() + if hasattr(self, 'out'): + self.out.start() + if hasattr(self, 'err'): + self.err.start() + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if hasattr(self, 'out') and not self.out.tmpfile.closed: + outfile = self.out.done() + if hasattr(self, 'err') and not self.err.tmpfile.closed: + errfile = self.err.done() + if hasattr(self, 'in_'): + self.in_.done() + if save: + self._save() + return outfile, errfile + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = self._readsnapshot('out') + err = self._readsnapshot('err') + return out, err + + def _readsnapshot(self, name): + if hasattr(self, name): + f = getattr(self, name).tmpfile + else: + return '' + + f.seek(0) + res = f.read() + enc = getattr(f, "encoding", None) + if enc: + res = py.builtin._totext(res, enc, "replace") + f.truncate(0) + f.seek(0) + return res + + +class StdCapture(Capture): + """ This class allows to capture writes to sys.stdout|stderr "in-memory" + and will raise errors on tries to read from sys.stdin. It only + modifies sys.stdout|stderr|stdin attributes and does not + touch underlying File Descriptors (use StdCaptureFD for that). + """ + def __init__(self, out=True, err=True, in_=True): + self._oldout = sys.stdout + self._olderr = sys.stderr + self._oldin = sys.stdin + if out and not hasattr(out, 'file'): + out = TextIO() + self.out = out + if err: + if not hasattr(err, 'write'): + err = TextIO() + self.err = err + self.in_ = in_ + + def startall(self): + if self.out: + sys.stdout = self.out + if self.err: + sys.stderr = self.err + if self.in_: + sys.stdin = self.in_ = DontReadFromInput() + + def done(self, save=True): + """ return (outfile, errfile) and stop capturing. """ + outfile = errfile = None + if self.out and not self.out.closed: + sys.stdout = self._oldout + outfile = self.out + outfile.seek(0) + if self.err and not self.err.closed: + sys.stderr = self._olderr + errfile = self.err + errfile.seek(0) + if self.in_: + sys.stdin = self._oldin + return outfile, errfile + + def resume(self): + """ resume capturing with original temp files. """ + self.startall() + + def readouterr(self): + """ return snapshot value of stdout/stderr capturings. """ + out = err = "" + if self.out: + out = self.out.getvalue() + self.out.truncate(0) + self.out.seek(0) + if self.err: + err = self.err.getvalue() + self.err.truncate(0) + self.err.seek(0) + return out, err + + +class DontReadFromInput: + """Temporary stub class. Ideally when stdin is accessed, the + capturing should be turned off, with possibly all data captured + so far sent to the screen. This should be configurable, though, + because in automated test runs it is better to crash than + hang indefinitely. + """ + def read(self, *args): + raise IOError("reading from stdin while output is captured") + readline = read + readlines = read + __iter__ = read + + def fileno(self): + raise ValueError("redirected Stdin is pseudofile, has no fileno()") + + def isatty(self): + return False + + def close(self): + pass diff --git a/_pytest/config.py b/_pytest/config.py --- a/_pytest/config.py +++ b/_pytest/config.py @@ -1,25 +1,91 @@ """ command line options, ini-file and conftest.py processing. """ import py +# DON't import pytest here because it causes import cycle troubles import sys, os +from _pytest import hookspec # the extension point definitions from _pytest.core import PluginManager -import pytest -def pytest_cmdline_parse(pluginmanager, args): - config = Config(pluginmanager) - config.parse(args) - return config +# pytest startup -def pytest_unconfigure(config): - while 1: - try: - fin = config._cleanup.pop() - except IndexError: - break - fin() +def main(args=None, plugins=None): + """ return exit code, after performing an in-process test run. + + :arg args: list of command line arguments. + + :arg plugins: list of plugin objects to be auto-registered during + initialization. + """ + config = _prepareconfig(args, plugins) + return config.hook.pytest_cmdline_main(config=config) + +class cmdline: # compatibility namespace + main = staticmethod(main) + +class UsageError(Exception): + """ error in pytest usage or invocation""" + +_preinit = [] + +default_plugins = ( + "mark main terminal runner python pdb unittest capture skipping " + "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript " + "junitxml resultlog doctest").split() + +def _preloadplugins(): + assert not _preinit + _preinit.append(get_plugin_manager()) + +def get_plugin_manager(): + if _preinit: + return _preinit.pop(0) + # subsequent calls to main will create a fresh instance + pluginmanager = PytestPluginManager() + pluginmanager.config = Config(pluginmanager) # XXX attr needed? + for spec in default_plugins: + pluginmanager.import_plugin(spec) + return pluginmanager + +def _prepareconfig(args=None, plugins=None): + if args is None: + args = sys.argv[1:] + elif isinstance(args, py.path.local): + args = [str(args)] + elif not isinstance(args, (tuple, list)): + if not isinstance(args, str): + raise ValueError("not a string or argument list: %r" % (args,)) + args = py.std.shlex.split(args) + pluginmanager = get_plugin_manager() + if plugins: + for plugin in plugins: + pluginmanager.register(plugin) + return pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args) + +class PytestPluginManager(PluginManager): + def __init__(self, hookspecs=[hookspec]): + super(PytestPluginManager, self).__init__(hookspecs=hookspecs) + self.register(self) + if os.environ.get('PYTEST_DEBUG'): + err = sys.stderr + encoding = getattr(err, 'encoding', 'utf8') + try: + err = py.io.dupfile(err, encoding=encoding) + except Exception: + pass + self.trace.root.setwriter(err.write) + + def pytest_configure(self, config): + config.addinivalue_line("markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible.") + config.addinivalue_line("markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible.") + class Parser: From noreply at buildbot.pypy.org Sun Aug 24 22:27:47 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 24 Aug 2014 22:27:47 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Hack differently, so that unicode.__new__ is analyzed even if previous tests already have loaded the module. Message-ID: <20140824202747.CB3761C3340@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73049:82dc94208ca9 Date: 2014-08-24 19:57 +0200 http://bitbucket.org/pypy/pypy/changeset/82dc94208ca9/ Log: Hack differently, so that unicode.__new__ is analyzed even if previous tests already have loaded the module. diff --git a/pypy/module/_decimal/test/test_ztranslation.py b/pypy/module/_decimal/test/test_ztranslation.py --- a/pypy/module/_decimal/test/test_ztranslation.py +++ b/pypy/module/_decimal/test/test_ztranslation.py @@ -1,9 +1,11 @@ from pypy.objspace.fake.checkmodule import checkmodule +from pypy.interpreter.mixedmodule import getinterpevalloader from pypy.module._decimal import Module def test_checkmodule(): - Module.interpleveldefs['__hack'] = ( - 'interp_decimal.unicodeobject.W_UnicodeObject(u"")') + Module.buildloaders() + Module.loaders['__hack'] = getinterpevalloader( + 'pypy.objspace.std', 'unicodeobject.W_UnicodeObject(u"")') checkmodule('_decimal') From noreply at buildbot.pypy.org Sun Aug 24 22:27:49 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 24 Aug 2014 22:27:49 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Kill the ztranslation hack, I found the reason: Message-ID: <20140824202749.05E161C3340@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73050:9c253f67327d Date: 2014-08-24 22:21 +0200 http://bitbucket.org/pypy/pypy/changeset/9c253f67327d/ Log: Kill the ztranslation hack, I found the reason: with the FakeObjectSpace, no subclass of W_Root should be annotated, unless we also annotate its constructor. Use the virtual method call instead of isinstance(). diff --git a/pypy/module/_decimal/test/test_ztranslation.py b/pypy/module/_decimal/test/test_ztranslation.py --- a/pypy/module/_decimal/test/test_ztranslation.py +++ b/pypy/module/_decimal/test/test_ztranslation.py @@ -1,11 +1,6 @@ from pypy.objspace.fake.checkmodule import checkmodule -from pypy.interpreter.mixedmodule import getinterpevalloader -from pypy.module._decimal import Module def test_checkmodule(): - Module.buildloaders() - Module.loaders['__hack'] = getinterpevalloader( - 'pypy.objspace.std', 'unicodeobject.W_UnicodeObject(u"")') checkmodule('_decimal') diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -1151,9 +1151,7 @@ # to PyUnicode_TransformDecimalToASCII, which is much simpler. Here, we do the # equivalent plus the final step of encoding the result to utf-8. def unicode_to_decimal_w(space, w_unistr): - if not isinstance(w_unistr, W_UnicodeObject): - raise oefmt(space.w_TypeError, "expected unicode, got '%T'", w_unistr) - unistr = w_unistr._value + unistr = w_unistr.unicode_w(space) result = [u'\0'] * len(unistr) for i in xrange(len(unistr)): uchr = ord(unistr[i]) From noreply at buildbot.pypy.org Sun Aug 24 23:20:17 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Sun, 24 Aug 2014 23:20:17 +0200 (CEST) Subject: [pypy-commit] pypy decimal-libmpdec: Add missing file Message-ID: <20140824212017.9EBF01C3339@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: decimal-libmpdec Changeset: r73051:b763ba8d4b7e Date: 2014-08-24 23:19 +0200 http://bitbucket.org/pypy/pypy/changeset/b763ba8d4b7e/ Log: Add missing file diff --git a/pypy/module/_decimal/app_context.py b/pypy/module/_decimal/app_context.py new file mode 100644 --- /dev/null +++ b/pypy/module/_decimal/app_context.py @@ -0,0 +1,17 @@ +from _decimal import getcontext, setcontext + +class _ContextManager(object): + """Context manager class to support localcontext().""" + def __init__(self, new_context): + self.new_context = new_context.copy() + def __enter__(self): + self.saved_context = getcontext() + setcontext(self.new_context) + return self.new_context + def __exit__(self, t, v, tb): + setcontext(self.saved_context) + +def localcontext(ctx=None): + if ctx is None: + ctx = getcontext() + return _ContextManager(ctx) From noreply at buildbot.pypy.org Sun Aug 24 23:33:21 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 24 Aug 2014 23:33:21 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: fix reliance on refcounting in test_weakref Message-ID: <20140824213321.5D4961C3340@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73052:62c9c61a36cf Date: 2014-08-24 17:29 -0400 http://bitbucket.org/pypy/pypy/changeset/62c9c61a36cf/ Log: fix reliance on refcounting in test_weakref diff --git a/lib-python/2.7/test/test_weakref.py b/lib-python/2.7/test/test_weakref.py --- a/lib-python/2.7/test/test_weakref.py +++ b/lib-python/2.7/test/test_weakref.py @@ -1162,6 +1162,7 @@ yield Object(v), v finally: it = None # should commit all removals + gc.collect() self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext) def test_weak_values_destroy_while_iterating(self): @@ -1183,6 +1184,7 @@ yield k, Object(k) finally: it = None # should commit all removals + gc.collect() self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext) def test_make_weak_keyed_dict_from_dict(self): From noreply at buildbot.pypy.org Sun Aug 24 23:50:49 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 24 Aug 2014 23:50:49 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: hack partial so test_functools passes Message-ID: <20140824215049.95FC71C0306@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73053:f3332cf0c172 Date: 2014-08-24 17:50 -0400 http://bitbucket.org/pypy/pypy/changeset/f3332cf0c172/ Log: hack partial so test_functools passes diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py --- a/lib_pypy/_functools.py +++ b/lib_pypy/_functools.py @@ -16,6 +16,11 @@ self._args = args self._keywords = keywords or None + def __delattr__(self, key): + if key == '__dict__': + raise TypeError("a partial object's dictionary may not be deleted") + object.__delattr__(self, key) + @property def func(self): return self._func diff --git a/pypy/module/test_lib_pypy/test_functools.py b/pypy/module/test_lib_pypy/test_functools.py --- a/pypy/module/test_lib_pypy/test_functools.py +++ b/pypy/module/test_lib_pypy/test_functools.py @@ -25,3 +25,8 @@ partial = _functools.partial(object) with pytest.raises((TypeError, AttributeError)): partial.func = sum + with pytest.raises(TypeError) as exc: + del partial.__dict__ + assert str(exc.value) == "a partial object's dictionary may not be deleted" + with pytest.raises(AttributeError): + del partial.zzz From noreply at buildbot.pypy.org Mon Aug 25 06:45:33 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 25 Aug 2014 06:45:33 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: update this change to preserve existing behavior Message-ID: <20140825044533.B33E01C07AB@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73054:aa5697b998df Date: 2014-08-25 00:44 -0400 http://bitbucket.org/pypy/pypy/changeset/aa5697b998df/ Log: update this change to preserve existing behavior diff --git a/lib-python/2.7/tarfile.py b/lib-python/2.7/tarfile.py --- a/lib-python/2.7/tarfile.py +++ b/lib-python/2.7/tarfile.py @@ -1718,7 +1718,12 @@ except (ImportError, AttributeError): raise CompressionError("gzip module is not available") - fileobj = gzip.GzipFile(name, mode, compresslevel, fileobj) + try: + fileobj = gzip.GzipFile(name, mode, compresslevel, fileobj) + except OSError: + if fileobj is not None and mode == 'r': + raise ReadError("not a gzip file") + raise try: t = cls.taropen(name, mode, fileobj, **kwargs) From noreply at buildbot.pypy.org Mon Aug 25 17:03:07 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 25 Aug 2014 17:03:07 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: have tarfile explicitly close fileobj in more cases Message-ID: <20140825150307.307601D2ACA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73055:cdd1dd2f249e Date: 2014-08-25 11:01 -0400 http://bitbucket.org/pypy/pypy/changeset/cdd1dd2f249e/ Log: have tarfile explicitly close fileobj in more cases diff --git a/lib-python/2.7/tarfile.py b/lib-python/2.7/tarfile.py --- a/lib-python/2.7/tarfile.py +++ b/lib-python/2.7/tarfile.py @@ -417,28 +417,34 @@ self.pos = 0L self.closed = False - if comptype == "gz": - try: - import zlib - except ImportError: - raise CompressionError("zlib module is not available") - self.zlib = zlib - self.crc = zlib.crc32("") & 0xffffffffL - if mode == "r": - self._init_read_gz() - else: - self._init_write_gz() + try: + if comptype == "gz": + try: + import zlib + except ImportError: + raise CompressionError("zlib module is not available") + self.zlib = zlib + self.crc = zlib.crc32("") & 0xffffffffL + if mode == "r": + self._init_read_gz() + else: + self._init_write_gz() - if comptype == "bz2": - try: - import bz2 - except ImportError: - raise CompressionError("bz2 module is not available") - if mode == "r": - self.dbuf = "" - self.cmp = bz2.BZ2Decompressor() - else: - self.cmp = bz2.BZ2Compressor() + if comptype == "bz2": + try: + import bz2 + except ImportError: + raise CompressionError("bz2 module is not available") + if mode == "r": + self.dbuf = "" + self.cmp = bz2.BZ2Decompressor() + else: + self.cmp = bz2.BZ2Compressor() + except: + if not self._extfileobj: + self.fileobj.close() + self.closed = True + raise def __del__(self): if hasattr(self, "closed") and not self.closed: @@ -1685,9 +1691,12 @@ if filemode not in ("r", "w"): raise ValueError("mode must be 'r' or 'w'") - t = cls(name, filemode, - _Stream(name, filemode, comptype, fileobj, bufsize), - **kwargs) + stream = _Stream(name, filemode, comptype, fileobj, bufsize) + try: + t = cls(name, filemode, stream, **kwargs) + except: + stream.close() + raise t._extfileobj = False return t @@ -1728,9 +1737,13 @@ try: t = cls.taropen(name, mode, fileobj, **kwargs) except IOError: + fileobj.close() if mode == 'r': raise ReadError("not a gzip file") raise + except: + fileobj.close() + raise t._extfileobj = False return t @@ -1755,9 +1768,13 @@ try: t = cls.taropen(name, mode, fileobj, **kwargs) except (IOError, EOFError): + fileobj.close() if mode == 'r': raise ReadError("not a bzip2 file") raise + except: + fileobj.close() + raise t._extfileobj = False return t From noreply at buildbot.pypy.org Mon Aug 25 17:12:17 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 25 Aug 2014 17:12:17 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: small tweak Message-ID: <20140825151217.87A541D2A6B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73056:2a9100e1c59d Date: 2014-08-25 11:11 -0400 http://bitbucket.org/pypy/pypy/changeset/2a9100e1c59d/ Log: small tweak diff --git a/lib-python/2.7/tarfile.py b/lib-python/2.7/tarfile.py --- a/lib-python/2.7/tarfile.py +++ b/lib-python/2.7/tarfile.py @@ -430,7 +430,7 @@ else: self._init_write_gz() - if comptype == "bz2": + elif comptype == "bz2": try: import bz2 except ImportError: From noreply at buildbot.pypy.org Mon Aug 25 22:29:05 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 25 Aug 2014 22:29:05 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: fix test_tarfile so it passes on win32 Message-ID: <20140825202905.BC8F31D34AC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73057:969e90db5911 Date: 2014-08-25 12:50 -0700 http://bitbucket.org/pypy/pypy/changeset/969e90db5911/ Log: fix test_tarfile so it passes on win32 diff --git a/lib-python/2.7/test/test_tarfile.py b/lib-python/2.7/test/test_tarfile.py --- a/lib-python/2.7/test/test_tarfile.py +++ b/lib-python/2.7/test/test_tarfile.py @@ -399,16 +399,16 @@ # Test hardlink extraction (e.g. bug #857297). with tarfile.open(tarname, errorlevel=1, encoding="iso8859-1") as tar: tar.extract("ustar/regtype", TEMPDIR) - self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/regtype")) + self.addCleanup(test_support.unlink, os.path.join(TEMPDIR, "ustar/regtype")) tar.extract("ustar/lnktype", TEMPDIR) - self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/lnktype")) + self.addCleanup(test_support.unlink, os.path.join(TEMPDIR, "ustar/lnktype")) with open(os.path.join(TEMPDIR, "ustar/lnktype"), "rb") as f: data = f.read() self.assertEqual(md5sum(data), md5_regtype) tar.extract("ustar/symtype", TEMPDIR) - self.addCleanup(os.remove, os.path.join(TEMPDIR, "ustar/symtype")) + self.addCleanup(test_support.unlink, os.path.join(TEMPDIR, "ustar/symtype")) with open(os.path.join(TEMPDIR, "ustar/symtype"), "rb") as f: data = f.read() self.assertEqual(md5sum(data), md5_regtype) From noreply at buildbot.pypy.org Mon Aug 25 22:29:06 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 25 Aug 2014 22:29:06 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: modernize/fix up test_tarfile Message-ID: <20140825202906.F3EC01D34AC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73058:feb74dc93383 Date: 2014-08-25 13:28 -0700 http://bitbucket.org/pypy/pypy/changeset/feb74dc93383/ Log: modernize/fix up test_tarfile diff --git a/lib-python/2.7/test/test_tarfile.py b/lib-python/2.7/test/test_tarfile.py --- a/lib-python/2.7/test/test_tarfile.py +++ b/lib-python/2.7/test/test_tarfile.py @@ -59,12 +59,13 @@ def test_fileobj_readlines(self): self.tar.extract("ustar/regtype", TEMPDIR) + with open(os.path.join(TEMPDIR, "ustar/regtype"), "rU") as fobj1: + lines1 = fobj1.readlines() + tarinfo = self.tar.getmember("ustar/regtype") - fobj1 = open(os.path.join(TEMPDIR, "ustar/regtype"), "rU") fobj2 = self.tar.extractfile(tarinfo) + lines2 = fobj2.readlines() - lines1 = fobj1.readlines() - lines2 = fobj2.readlines() self.assertTrue(lines1 == lines2, "fileobj.readlines() failed") self.assertTrue(len(lines2) == 114, @@ -75,19 +76,20 @@ def test_fileobj_iter(self): self.tar.extract("ustar/regtype", TEMPDIR) + with open(os.path.join(TEMPDIR, "ustar/regtype"), "rU") as fobj1: + lines1 = fobj1.readlines() + tarinfo = self.tar.getmember("ustar/regtype") - fobj1 = open(os.path.join(TEMPDIR, "ustar/regtype"), "rU") fobj2 = self.tar.extractfile(tarinfo) - lines1 = fobj1.readlines() lines2 = [line for line in fobj2] + self.assertTrue(lines1 == lines2, "fileobj.__iter__() failed") def test_fileobj_seek(self): self.tar.extract("ustar/regtype", TEMPDIR) - fobj = open(os.path.join(TEMPDIR, "ustar/regtype"), "rb") - data = fobj.read() - fobj.close() + with open(os.path.join(TEMPDIR, "ustar/regtype"), "rb") as fobj: + data = fobj.read() tarinfo = self.tar.getmember("ustar/regtype") fobj = self.tar.extractfile(tarinfo) @@ -245,8 +247,10 @@ tar.getnames() except tarfile.ReadError: self.fail("tarfile.open() failed on empty archive") - self.assertListEqual(tar.getmembers(), []) - tar.close() + else: + self.assertListEqual(tar.getmembers(), []) + finally: + tar.close() def test_null_tarfile(self): # Test for issue6123: Allow opening empty archives. @@ -291,24 +295,21 @@ taropen = tarfile.TarFile.taropen def test_no_name_argument(self): - fobj = open(self.tarname, "rb") - tar = tarfile.open(fileobj=fobj, mode=self.mode) - self.assertEqual(tar.name, os.path.abspath(fobj.name)) - tar.close() + with open(self.tarname, "rb") as fobj: + with tarfile.open(fileobj=fobj, mode=self.mode) as tar: + self.assertEqual(tar.name, os.path.abspath(fobj.name)) def test_no_name_attribute(self): - f = open(self.tarname, "rb") - data = f.read() - f.close() + with open(self.tarname, "rb") as fobj: + data = fobj.read() fobj = StringIO.StringIO(data) self.assertRaises(AttributeError, getattr, fobj, "name") tar = tarfile.open(fileobj=fobj, mode=self.mode) self.assertEqual(tar.name, None) def test_empty_name_attribute(self): - f = open(self.tarname, "rb") - data = f.read() - f.close() + with open(self.tarname, "rb") as fobj: + data = fobj.read() fobj = StringIO.StringIO(data) fobj.name = "" tar = tarfile.open(fileobj=fobj, mode=self.mode) @@ -432,7 +433,8 @@ # constructor in case of an error. For the test we rely on # the fact that opening an empty file raises a ReadError. empty = os.path.join(TEMPDIR, "empty") - open(empty, "wb").close() + with open(empty, "wb") as fobj: + fobj.write("") try: tar = object.__new__(tarfile.TarFile) @@ -633,10 +635,10 @@ self._test_member(tarinfo, size=7011, chksum=md5_regtype) def test_find_pax_umlauts(self): + self.tar.close() self.tar = tarfile.open(self.tarname, mode=self.mode, encoding="iso8859-1") tarinfo = self.tar.getmember("pax/umlauts-�������") self._test_member(tarinfo, size=7011, chksum=md5_regtype) - self.tar.close() class LongnameTest(ReadTest): @@ -801,9 +803,11 @@ os.mkdir(path) try: tar = tarfile.open(tmpname, self.mode) - tarinfo = tar.gettarinfo(path) - self.assertEqual(tarinfo.size, 0) - tar.close() + try: + tarinfo = tar.gettarinfo(path) + self.assertEqual(tarinfo.size, 0) + finally: + tar.close() finally: os.rmdir(path) @@ -817,11 +821,13 @@ os.link(target, link) try: tar = tarfile.open(tmpname, self.mode) - # Record the link target in the inodes list. - tar.gettarinfo(target) - tarinfo = tar.gettarinfo(link) - self.assertEqual(tarinfo.size, 0) - tar.close() + try: + # Record the link target in the inodes list. + tar.gettarinfo(target) + tarinfo = tar.gettarinfo(link) + self.assertEqual(tarinfo.size, 0) + finally: + tar.close() finally: os.remove(target) os.remove(link) @@ -832,28 +838,33 @@ os.symlink("link_target", path) try: tar = tarfile.open(tmpname, self.mode) - tarinfo = tar.gettarinfo(path) - self.assertEqual(tarinfo.size, 0) - tar.close() + try: + tarinfo = tar.gettarinfo(path) + self.assertEqual(tarinfo.size, 0) + finally: + tar.close() finally: os.remove(path) def test_add_self(self): # Test for #1257255. dstname = os.path.abspath(tmpname) + tar = tarfile.open(tmpname, self.mode) + try: + self.assertEqual(tar.name, dstname, + "archive name must be absolute") + tar.add(dstname) + self.assertEqual(tar.getnames(), [], + "added the archive to itself") - tar = tarfile.open(tmpname, self.mode) - self.assertTrue(tar.name == dstname, "archive name must be absolute") - - tar.add(dstname) - self.assertTrue(tar.getnames() == [], "added the archive to itself") - - cwd = os.getcwd() - os.chdir(TEMPDIR) - tar.add(dstname) - os.chdir(cwd) - self.assertTrue(tar.getnames() == [], "added the archive to itself") - tar.close() + cwd = os.getcwd() + os.chdir(TEMPDIR) + tar.add(dstname) + os.chdir(cwd) + self.assertEqual(tar.getnames(), [], + "added the archive to itself") + finally: + tar.close() def test_exclude(self): tempdir = os.path.join(TEMPDIR, "exclude") @@ -866,15 +877,19 @@ exclude = os.path.isfile tar = tarfile.open(tmpname, self.mode, encoding="iso8859-1") - with test_support.check_warnings(("use the filter argument", - DeprecationWarning)): - tar.add(tempdir, arcname="empty_dir", exclude=exclude) - tar.close() + try: + with test_support.check_warnings(("use the filter argument", + DeprecationWarning)): + tar.add(tempdir, arcname="empty_dir", exclude=exclude) + finally: + tar.close() tar = tarfile.open(tmpname, "r") - self.assertEqual(len(tar.getmembers()), 1) - self.assertEqual(tar.getnames()[0], "empty_dir") - tar.close() + try: + self.assertEqual(len(tar.getmembers()), 1) + self.assertEqual(tar.getnames()[0], "empty_dir") + finally: + tar.close() finally: shutil.rmtree(tempdir) @@ -894,15 +909,19 @@ return tarinfo tar = tarfile.open(tmpname, self.mode, encoding="iso8859-1") - tar.add(tempdir, arcname="empty_dir", filter=filter) - tar.close() + try: + tar.add(tempdir, arcname="empty_dir", filter=filter) + finally: + tar.close() tar = tarfile.open(tmpname, "r") - for tarinfo in tar: - self.assertEqual(tarinfo.uid, 123) - self.assertEqual(tarinfo.uname, "foo") - self.assertEqual(len(tar.getmembers()), 3) - tar.close() + try: + for tarinfo in tar: + self.assertEqual(tarinfo.uid, 123) + self.assertEqual(tarinfo.uname, "foo") + self.assertEqual(len(tar.getmembers()), 3) + finally: + tar.close() finally: shutil.rmtree(tempdir) @@ -1096,9 +1115,8 @@ fobj.close() elif self.mode.endswith("bz2"): dec = bz2.BZ2Decompressor() - f = open(tmpname, "rb") - data = f.read() - f.close() + with open(tmpname, "rb") as fobj: + data = fobj.read() data = dec.decompress(data) self.assertTrue(len(dec.unused_data) == 0, "found trailing data") @@ -1176,14 +1194,16 @@ tar.close() tar = tarfile.open(tmpname) - member = tar.next() - self.assertIsNotNone(member, - "unable to read longname member") - self.assertEqual(tarinfo.name, member.name, - "unable to read longname member") - self.assertEqual(tarinfo.linkname, member.linkname, - "unable to read longname member") - tar.close() + try: + member = tar.next() + self.assertIsNotNone(member, + "unable to read longname member") + self.assertEqual(tarinfo.name, member.name, + "unable to read longname member") + self.assertEqual(tarinfo.linkname, member.linkname, + "unable to read longname member") + finally: + tar.close() def test_longname_1023(self): self._test(("longnam/" * 127) + "longnam") @@ -1270,13 +1290,15 @@ tar.close() tar = tarfile.open(tmpname) - if link: - l = tar.getmembers()[0].linkname - self.assertTrue(link == l, "PAX longlink creation failed") - else: - n = tar.getmembers()[0].name - self.assertTrue(name == n, "PAX longname creation failed") - tar.close() + try: + if link: + l = tar.getmembers()[0].linkname + self.assertTrue(link == l, "PAX longlink creation failed") + else: + n = tar.getmembers()[0].name + self.assertTrue(name == n, "PAX longname creation failed") + finally: + tar.close() def test_pax_global_header(self): pax_headers = { @@ -1293,39 +1315,45 @@ # Test if the global header was written correctly. tar = tarfile.open(tmpname, encoding="iso8859-1") - self.assertEqual(tar.pax_headers, pax_headers) - self.assertEqual(tar.getmembers()[0].pax_headers, pax_headers) - - # Test if all the fields are unicode. - for key, val in tar.pax_headers.iteritems(): - self.assertTrue(type(key) is unicode) - self.assertTrue(type(val) is unicode) - if key in tarfile.PAX_NUMBER_FIELDS: - try: - tarfile.PAX_NUMBER_FIELDS[key](val) - except (TypeError, ValueError): - self.fail("unable to convert pax header field") - tar.close() + try: + self.assertEqual(tar.pax_headers, pax_headers) + self.assertEqual(tar.getmembers()[0].pax_headers, pax_headers) + # Test if all the fields are unicode. + for key, val in tar.pax_headers.iteritems(): + self.assertTrue(type(key) is unicode) + self.assertTrue(type(val) is unicode) + if key in tarfile.PAX_NUMBER_FIELDS: + try: + tarfile.PAX_NUMBER_FIELDS[key](val) + except (TypeError, ValueError): + self.fail("unable to convert pax header field") + finally: + tar.close() def test_pax_extended_header(self): # The fields from the pax header have priority over the # TarInfo. pax_headers = {u"path": u"foo", u"uid": u"123"} - tar = tarfile.open(tmpname, "w", format=tarfile.PAX_FORMAT, encoding="iso8859-1") - t = tarfile.TarInfo() - t.name = u"���" # non-ASCII - t.uid = 8**8 # too large - t.pax_headers = pax_headers - tar.addfile(t) - tar.close() + tar = tarfile.open(tmpname, "w", format=tarfile.PAX_FORMAT, + encoding="iso8859-1") + try: + t = tarfile.TarInfo() + t.name = u"���" # non-ASCII + t.uid = 8**8 # too large + t.pax_headers = pax_headers + tar.addfile(t) + finally: + tar.close() tar = tarfile.open(tmpname, encoding="iso8859-1") - t = tar.getmembers()[0] - self.assertEqual(t.pax_headers, pax_headers) - self.assertEqual(t.name, "foo") - self.assertEqual(t.uid, 123) - tar.close() + try: + t = tar.getmembers()[0] + self.assertEqual(t.pax_headers, pax_headers) + self.assertEqual(t.name, "foo") + self.assertEqual(t.uid, 123) + finally: + tar.close() class UstarUnicodeTest(unittest.TestCase): @@ -1344,41 +1372,49 @@ def _test_unicode_filename(self, encoding): tar = tarfile.open(tmpname, "w", format=self.format, encoding=encoding, errors="strict") - name = u"���" - tar.addfile(tarfile.TarInfo(name)) - tar.close() + try: + name = u"���" + tar.addfile(tarfile.TarInfo(name)) + finally: + tar.close() tar = tarfile.open(tmpname, encoding=encoding) - self.assertTrue(type(tar.getnames()[0]) is not unicode) - self.assertEqual(tar.getmembers()[0].name, name.encode(encoding)) - tar.close() + try: + self.assertTrue(type(tar.getnames()[0]) is not unicode) + self.assertEqual(tar.getmembers()[0].name, name.encode(encoding)) + finally: + tar.close() def test_unicode_filename_error(self): tar = tarfile.open(tmpname, "w", format=self.format, encoding="ascii", errors="strict") - tarinfo = tarfile.TarInfo() + try: + tarinfo = tarfile.TarInfo() - tarinfo.name = "���" - if self.format == tarfile.PAX_FORMAT: + tarinfo.name = "���" + if self.format == tarfile.PAX_FORMAT: + self.assertRaises(UnicodeError, tar.addfile, tarinfo) + else: + tar.addfile(tarinfo) + + tarinfo.name = u"���" self.assertRaises(UnicodeError, tar.addfile, tarinfo) - else: - tar.addfile(tarinfo) - tarinfo.name = u"���" - self.assertRaises(UnicodeError, tar.addfile, tarinfo) - - tarinfo.name = "foo" - tarinfo.uname = u"���" - self.assertRaises(UnicodeError, tar.addfile, tarinfo) - tar.close() + tarinfo.name = "foo" + tarinfo.uname = u"���" + self.assertRaises(UnicodeError, tar.addfile, tarinfo) + finally: + tar.close() def test_unicode_argument(self): tar = tarfile.open(tarname, "r", encoding="iso8859-1", errors="strict") - for t in tar: - self.assertTrue(type(t.name) is str) - self.assertTrue(type(t.linkname) is str) - self.assertTrue(type(t.uname) is str) - self.assertTrue(type(t.gname) is str) - tar.close() + try: + for t in tar: + self.assertTrue(type(t.name) is str) + self.assertTrue(type(t.linkname) is str) + self.assertTrue(type(t.uname) is str) + self.assertTrue(type(t.gname) is str) + finally: + tar.close() def test_uname_unicode(self): for name in (u"���", "���"): @@ -1449,23 +1485,20 @@ test_support.unlink(self.tarname) def _add_testfile(self, fileobj=None): - tar = tarfile.open(self.tarname, "a", fileobj=fileobj) - tar.addfile(tarfile.TarInfo("bar")) - tar.close() + with tarfile.open(self.tarname, "a", fileobj=fileobj) as tar: + tar.addfile(tarfile.TarInfo("bar")) def _create_testtar(self, mode="w:"): - src = tarfile.open(tarname, encoding="iso8859-1") - t = src.getmember("ustar/regtype") - t.name = "foo" - f = src.extractfile(t) - tar = tarfile.open(self.tarname, mode) - tar.addfile(t, f) - tar.close() + with tarfile.open(tarname, encoding="iso8859-1") as src: + t = src.getmember("ustar/regtype") + t.name = "foo" + f = src.extractfile(t) + with tarfile.open(self.tarname, mode) as tar: + tar.addfile(t, f) def _test(self, names=["bar"], fileobj=None): - tar = tarfile.open(self.tarname, fileobj=fileobj) - self.assertEqual(tar.getnames(), names) - tar.close() + with tarfile.open(self.tarname, fileobj=fileobj) as tar: + self.assertEqual(tar.getnames(), names) def test_non_existing(self): self._add_testfile() @@ -1484,9 +1517,8 @@ def test_fileobj(self): self._create_testtar() - f = open(self.tarname) - data = f.read() - f.close() + with open(self.tarname) as fobj: + data = fobj.read() fobj = StringIO.StringIO(data) self._add_testfile(fobj) fobj.seek(0) @@ -1510,9 +1542,8 @@ # Append mode is supposed to fail if the tarfile to append to # does not end with a zero block. def _test_error(self, data): - f = open(self.tarname, "wb") - f.write(data) - f.close() + with open(self.tarname, "wb") as fobj: + fobj.write(data) self.assertRaises(tarfile.ReadError, self._add_testfile) def test_null(self): @@ -1640,15 +1671,14 @@ def test_fileobj(self): # Test that __exit__() did not close the external file # object. - fobj = open(tmpname, "wb") - try: - with tarfile.open(fileobj=fobj, mode="w") as tar: - raise Exception - except: - pass - self.assertFalse(fobj.closed, "external file object was closed") - self.assertTrue(tar.closed, "context manager failed") - fobj.close() + with open(tmpname, "wb") as fobj: + try: + with tarfile.open(fileobj=fobj, mode="w") as tar: + raise Exception + except: + pass + self.assertFalse(fobj.closed, "external file object was closed") + self.assertTrue(tar.closed, "context manager failed") class LinkEmulationTest(ReadTest): From noreply at buildbot.pypy.org Mon Aug 25 23:04:55 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 25 Aug 2014 23:04:55 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: further improvements for test_tarfile Message-ID: <20140825210455.E43DC1D34AC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73059:0ebbdd28755b Date: 2014-08-25 17:00 -0400 http://bitbucket.org/pypy/pypy/changeset/0ebbdd28755b/ Log: further improvements for test_tarfile diff --git a/lib-python/2.7/test/test_tarfile.py b/lib-python/2.7/test/test_tarfile.py --- a/lib-python/2.7/test/test_tarfile.py +++ b/lib-python/2.7/test/test_tarfile.py @@ -42,7 +42,8 @@ mode = "r:" def setUp(self): - self.tar = tarfile.open(self.tarname, mode=self.mode, encoding="iso8859-1") + self.tar = tarfile.open(self.tarname, mode=self.mode, + encoding="iso8859-1") def tearDown(self): self.tar.close() @@ -148,13 +149,15 @@ self._test_fileobj_link("ustar/lnktype", "ustar/regtype") def test_fileobj_link2(self): - self._test_fileobj_link("./ustar/linktest2/lnktype", "ustar/linktest1/regtype") + self._test_fileobj_link("./ustar/linktest2/lnktype", + "ustar/linktest1/regtype") def test_fileobj_symlink1(self): self._test_fileobj_link("ustar/symtype", "ustar/regtype") def test_fileobj_symlink2(self): - self._test_fileobj_link("./ustar/linktest2/symtype", "ustar/linktest1/regtype") + self._test_fileobj_link("./ustar/linktest2/symtype", + "ustar/linktest1/regtype") def test_issue14160(self): self._test_fileobj_link("symtype2", "ustar/regtype") @@ -241,7 +244,8 @@ # This test checks if tarfile.open() is able to open an empty tar # archive successfully. Note that an empty tar archive is not the # same as an empty file! - tarfile.open(tmpname, self.mode.replace("r", "w")).close() + with tarfile.open(tmpname, self.mode.replace("r", "w")): + pass try: tar = tarfile.open(tmpname, self.mode) tar.getnames() @@ -256,7 +260,8 @@ # Test for issue6123: Allow opening empty archives. # This test guarantees that tarfile.open() does not treat an empty # file as an empty tar archive. - open(tmpname, "wb").close() + with open(tmpname, "wb"): + pass self.assertRaises(tarfile.ReadError, tarfile.open, tmpname, self.mode) self.assertRaises(tarfile.ReadError, tarfile.open, tmpname) @@ -280,15 +285,17 @@ for char in ('\0', 'a'): # Test if EOFHeaderError ('\0') and InvalidHeaderError ('a') # are ignored correctly. - fobj = _open(tmpname, "wb") - fobj.write(char * 1024) - fobj.write(tarfile.TarInfo("foo").tobuf()) - fobj.close() + with _open(tmpname, "wb") as fobj: + fobj.write(char * 1024) + fobj.write(tarfile.TarInfo("foo").tobuf()) tar = tarfile.open(tmpname, mode="r", ignore_zeros=True) - self.assertListEqual(tar.getnames(), ["foo"], - "ignore_zeros=True should have skipped the %r-blocks" % char) - tar.close() + try: + self.assertListEqual(tar.getnames(), ["foo"], + "ignore_zeros=True should have skipped the %r-blocks" % + char) + finally: + tar.close() class MiscReadTest(CommonReadTest): @@ -312,8 +319,8 @@ data = fobj.read() fobj = StringIO.StringIO(data) fobj.name = "" - tar = tarfile.open(fileobj=fobj, mode=self.mode) - self.assertEqual(tar.name, None) + with tarfile.open(fileobj=fobj, mode=self.mode) as tar: + self.assertEqual(tar.name, None) def test_illegal_mode_arg(self): with open(tmpname, 'wb'): @@ -330,12 +337,14 @@ # Skip the first member and store values from the second member # of the testtar. tar = tarfile.open(self.tarname, mode=self.mode) - tar.next() - t = tar.next() - name = t.name - offset = t.offset - data = tar.extractfile(t).read() - tar.close() + try: + tar.next() + t = tar.next() + name = t.name + offset = t.offset + data = tar.extractfile(t).read() + finally: + tar.close() # Open the testtar and seek to the offset of the second member. if self.mode.endswith(":gz"): @@ -344,27 +353,28 @@ _open = bz2.BZ2File else: _open = open - fobj = _open(self.tarname, "rb") - fobj.seek(offset) + with _open(self.tarname, "rb") as fobj: + fobj.seek(offset) - # Test if the tarfile starts with the second member. - tar = tar.open(self.tarname, mode="r:", fileobj=fobj) - t = tar.next() - self.assertEqual(t.name, name) - # Read to the end of fileobj and test if seeking back to the - # beginning works. - tar.getmembers() - self.assertEqual(tar.extractfile(t).read(), data, - "seek back did not work") - tar.close() + # Test if the tarfile starts with the second member. + tar = tar.open(self.tarname, mode="r:", fileobj=fobj) + t = tar.next() + self.assertEqual(t.name, name) + # Read to the end of fileobj and test if seeking back to the + # beginning works. + tar.getmembers() + self.assertEqual(tar.extractfile(t).read(), data, + "seek back did not work") + tar.close() def test_fail_comp(self): # For Gzip and Bz2 Tests: fail with a ReadError on an uncompressed file. if self.mode == "r:": self.skipTest('needs a gz or bz2 mode') self.assertRaises(tarfile.ReadError, tarfile.open, tarname, self.mode) - fobj = open(tarname, "rb") - self.assertRaises(tarfile.ReadError, tarfile.open, fileobj=fobj, mode=self.mode) + with open(tarname, "rb") as fobj: + self.assertRaises(tarfile.ReadError, tarfile.open, + fileobj=fobj, mode=self.mode) def test_v7_dirtype(self): # Test old style dirtype member (bug #1336623): @@ -418,15 +428,17 @@ # Test if extractall() correctly restores directory permissions # and times (see issue1735). tar = tarfile.open(tarname, encoding="iso8859-1") - directories = [t for t in tar if t.isdir()] - tar.extractall(TEMPDIR, directories) - for tarinfo in directories: - path = os.path.join(TEMPDIR, tarinfo.name) - if sys.platform != "win32": - # Win32 has no support for fine grained permissions. - self.assertEqual(tarinfo.mode & 0777, os.stat(path).st_mode & 0777) - self.assertEqual(tarinfo.mtime, os.path.getmtime(path)) - tar.close() + try: + directories = [t for t in tar if t.isdir()] + tar.extractall(TEMPDIR, directories) + for tarinfo in directories: + path = os.path.join(TEMPDIR, tarinfo.name) + if sys.platform != "win32": + # Win32 has no support for fine grained permissions. + self.assertEqual(tarinfo.mode & 0777, os.stat(path).st_mode & 0777) + self.assertEqual(tarinfo.mtime, os.path.getmtime(path)) + finally: + tar.close() def test_init_close_fobj(self): # Issue #7341: Close the internal file object in the TarFile @@ -434,7 +446,7 @@ # the fact that opening an empty file raises a ReadError. empty = os.path.join(TEMPDIR, "empty") with open(empty, "wb") as fobj: - fobj.write("") + fobj.write(b"") try: tar = object.__new__(tarfile.TarFile) @@ -474,42 +486,50 @@ def test_compare_members(self): tar1 = tarfile.open(tarname, encoding="iso8859-1") - tar2 = self.tar + try: + tar2 = self.tar - while True: - t1 = tar1.next() - t2 = tar2.next() - if t1 is None: - break - self.assertTrue(t2 is not None, "stream.next() failed.") + while True: + t1 = tar1.next() + t2 = tar2.next() + if t1 is None: + break + self.assertIsNotNone(t2, "stream.next() failed.") - if t2.islnk() or t2.issym(): - self.assertRaises(tarfile.StreamError, tar2.extractfile, t2) - continue + if t2.islnk() or t2.issym(): + with self.assertRaises(tarfile.StreamError): + tar2.extractfile(t2) + continue - v1 = tar1.extractfile(t1) - v2 = tar2.extractfile(t2) - if v1 is None: - continue - self.assertTrue(v2 is not None, "stream.extractfile() failed") - self.assertTrue(v1.read() == v2.read(), "stream extraction failed") - - tar1.close() + v1 = tar1.extractfile(t1) + v2 = tar2.extractfile(t2) + if v1 is None: + continue + self.assertIsNotNone(v2, "stream.extractfile() failed") + self.assertEqual(v1.read(), v2.read(), + "stream extraction failed") + finally: + tar1.close() class DetectReadTest(unittest.TestCase): def _testfunc_file(self, name, mode): try: - tarfile.open(name, mode) - except tarfile.ReadError: + tar = tarfile.open(name, mode) + except tarfile.ReadError as e: self.fail() + else: + tar.close() def _testfunc_fileobj(self, name, mode): try: - tarfile.open(name, mode, fileobj=open(name, "rb")) - except tarfile.ReadError: + with open(name, "rb") as f: + tar = tarfile.open(name, mode, fileobj=f) + except tarfile.ReadError as e: self.fail() + else: + tar.close() def _test_modes(self, testfunc): testfunc(tarname, "r") @@ -636,7 +656,8 @@ def test_find_pax_umlauts(self): self.tar.close() - self.tar = tarfile.open(self.tarname, mode=self.mode, encoding="iso8859-1") + self.tar = tarfile.open(self.tarname, mode=self.mode, + encoding="iso8859-1") tarinfo = self.tar.getmember("pax/umlauts-�������") self._test_member(tarinfo, size=7011, chksum=md5_regtype) @@ -667,17 +688,18 @@ offset = tarinfo.offset self.tar.fileobj.seek(offset) fobj = StringIO.StringIO(self.tar.fileobj.read(3 * 512)) - self.assertRaises(tarfile.ReadError, tarfile.open, name="foo.tar", fileobj=fobj) + with self.assertRaises(tarfile.ReadError): + tarfile.open(name="foo.tar", fileobj=fobj) def test_header_offset(self): # Test if the start offset of the TarInfo object includes # the preceding extended header. longname = self.subdir + "/" + "123/" * 125 + "longname" offset = self.tar.getmember(longname).offset - fobj = open(tarname) - fobj.seek(offset) - tarinfo = tarfile.TarInfo.frombuf(fobj.read(512)) - self.assertEqual(tarinfo.type, self.longnametype) + with open(tarname, "rb") as fobj: + fobj.seek(offset) + tarinfo = tarfile.TarInfo.frombuf(fobj.read(512)) + self.assertEqual(tarinfo.type, self.longnametype) class GNUReadTest(LongnameTest): @@ -759,44 +781,48 @@ # a trailing '\0'. name = "0123456789" * 10 tar = tarfile.open(tmpname, self.mode) - t = tarfile.TarInfo(name) - tar.addfile(t) - tar.close() + try: + t = tarfile.TarInfo(name) + tar.addfile(t) + finally: + tar.close() tar = tarfile.open(tmpname) - self.assertTrue(tar.getnames()[0] == name, - "failed to store 100 char filename") - tar.close() + try: + self.assertTrue(tar.getnames()[0] == name, + "failed to store 100 char filename") + finally: + tar.close() def test_tar_size(self): # Test for bug #1013882. tar = tarfile.open(tmpname, self.mode) - path = os.path.join(TEMPDIR, "file") - fobj = open(path, "wb") - fobj.write("aaa") - fobj.close() - tar.add(path) - tar.close() + try: + path = os.path.join(TEMPDIR, "file") + with open(path, "wb") as fobj: + fobj.write(b"aaa") + tar.add(path) + finally: + tar.close() self.assertTrue(os.path.getsize(tmpname) > 0, "tarfile is empty") # The test_*_size tests test for bug #1167128. def test_file_size(self): tar = tarfile.open(tmpname, self.mode) + try: + path = os.path.join(TEMPDIR, "file") + with open(path, "wb"): + pass + tarinfo = tar.gettarinfo(path) + self.assertEqual(tarinfo.size, 0) - path = os.path.join(TEMPDIR, "file") - fobj = open(path, "wb") - fobj.close() - tarinfo = tar.gettarinfo(path) - self.assertEqual(tarinfo.size, 0) - - fobj = open(path, "wb") - fobj.write("aaa") - fobj.close() - tarinfo = tar.gettarinfo(path) - self.assertEqual(tarinfo.size, 3) - - tar.close() + with open(path, "wb") as fobj: + fobj.write(b"aaa") + tarinfo = tar.gettarinfo(path) + self.assertEqual(tarinfo.size, 3) + finally: + tar.close() def test_directory_size(self): path = os.path.join(TEMPDIR, "directory") @@ -811,40 +837,41 @@ finally: os.rmdir(path) + @unittest.skipUnless(hasattr(os, "link"), + "Missing hardlink implementation") def test_link_size(self): - if hasattr(os, "link"): - link = os.path.join(TEMPDIR, "link") - target = os.path.join(TEMPDIR, "link_target") - fobj = open(target, "wb") - fobj.write("aaa") - fobj.close() - os.link(target, link) + link = os.path.join(TEMPDIR, "link") + target = os.path.join(TEMPDIR, "link_target") + with open(target, "wb") as fobj: + fobj.write(b"aaa") + os.link(target, link) + try: + tar = tarfile.open(tmpname, self.mode) try: - tar = tarfile.open(tmpname, self.mode) - try: - # Record the link target in the inodes list. - tar.gettarinfo(target) - tarinfo = tar.gettarinfo(link) - self.assertEqual(tarinfo.size, 0) - finally: - tar.close() + # Record the link target in the inodes list. + tar.gettarinfo(target) + tarinfo = tar.gettarinfo(link) + self.assertEqual(tarinfo.size, 0) finally: - os.remove(target) - os.remove(link) + tar.close() + finally: + os.remove(target) + os.remove(link) + @unittest.skipUnless(hasattr(os, "symlink"), + "Missing symlink implementation") def test_symlink_size(self): - if hasattr(os, "symlink"): - path = os.path.join(TEMPDIR, "symlink") - os.symlink("link_target", path) + path = os.path.join(TEMPDIR, "symlink") + os.symlink("link_target", path) + try: + tar = tarfile.open(tmpname, self.mode) try: - tar = tarfile.open(tmpname, self.mode) - try: - tarinfo = tar.gettarinfo(path) - self.assertEqual(tarinfo.size, 0) - finally: - tar.close() + tarinfo = tar.gettarinfo(path) + self.assertEqual(tarinfo.size, 0) finally: - os.remove(path) + tar.close() + finally: + os.remove(path) def test_add_self(self): # Test for #1257255. @@ -939,12 +966,16 @@ os.mkdir(foo) tar = tarfile.open(tmpname, self.mode) - tar.add(foo, arcname=path) - tar.close() + try: + tar.add(foo, arcname=path) + finally: + tar.close() tar = tarfile.open(tmpname, "r") - t = tar.next() - tar.close() + try: + t = tar.next() + finally: + tar.close() if not dir: os.remove(foo) @@ -1386,7 +1417,8 @@ tar.close() def test_unicode_filename_error(self): - tar = tarfile.open(tmpname, "w", format=self.format, encoding="ascii", errors="strict") + tar = tarfile.open(tmpname, "w", format=self.format, + encoding="ascii", errors="strict") try: tarinfo = tarfile.TarInfo() @@ -1406,7 +1438,8 @@ tar.close() def test_unicode_argument(self): - tar = tarfile.open(tarname, "r", encoding="iso8859-1", errors="strict") + tar = tarfile.open(tarname, "r", + encoding="iso8859-1", errors="strict") try: for t in tar: self.assertTrue(type(t.name) is str) @@ -1688,7 +1721,8 @@ # the regular files they point to. def _test_link_extraction(self, name): self.tar.extract(name, TEMPDIR) - data = open(os.path.join(TEMPDIR, name), "rb").read() + with open(os.path.join(TEMPDIR, name), "rb") as f: + data = f.read() self.assertEqual(md5sum(data), md5_regtype) def test_hardlink_extraction1(self): From noreply at buildbot.pypy.org Tue Aug 26 15:16:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 26 Aug 2014 15:16:34 +0200 (CEST) Subject: [pypy-commit] stmgc default: Port tests back from pypy/duhton about variable-argument && and || Message-ID: <20140826131634.5F9671D2339@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1323:ec32e70ca45c Date: 2014-08-26 15:16 +0200 http://bitbucket.org/pypy/stmgc/changeset/ec32e70ca45c/ Log: Port tests back from pypy/duhton about variable-argument && and || diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -144,24 +144,13 @@ DuObject *du_xor(DuObject *cons, DuObject *locals) { int result = 0; - /* _du_read1(cons); IMMUTABLE */ - DuObject *expr = _DuCons_CAR(cons); - DuObject *next = _DuCons_NEXT(cons); - - _du_save2(next, locals); - DuObject *obj = Du_Eval(expr, locals); - result = DuInt_AsInt(obj); - _du_restore2(next, locals); - - cons = next; - while (cons != Du_None) { /* _du_read1(cons); IMMUTABLE */ - expr = _DuCons_CAR(cons); - next = _DuCons_NEXT(cons); + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); _du_save2(next, locals); - obj = Du_Eval(expr, locals); + DuObject *obj = Du_Eval(expr, locals); result ^= DuInt_AsInt(obj); _du_restore2(next, locals); @@ -353,8 +342,6 @@ case 3: r = a != b; break; case 4: r = a > b; break; case 5: r = a >= b; break; - case 6: r = a && b; break; - case 7: r = a || b; break; } return DuInt_FromInt(r); } @@ -371,11 +358,48 @@ { return _du_intcmp(cons, locals, 4); } DuObject *du_ge(DuObject *cons, DuObject *locals) { return _du_intcmp(cons, locals, 5); } + DuObject *du_and(DuObject *cons, DuObject *locals) -{ return _du_intcmp(cons, locals, 6); } +{ + while (cons != Du_None) { + /* _du_read1(cons); IMMUTABLE */ + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); + + _du_save2(next, locals); + DuObject *obj = Du_Eval(expr, locals); + int result = DuObject_IsTrue(obj); + _du_restore2(next, locals); + + if (!result) + return DuInt_FromInt(0); + + cons = next; + } + + return DuInt_FromInt(1); +} + DuObject *du_or(DuObject *cons, DuObject *locals) -{ return _du_intcmp(cons, locals, 7); } +{ + while (cons != Du_None) { + /* _du_read1(cons); IMMUTABLE */ + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); + _du_save2(next, locals); + DuObject *obj = Du_Eval(expr, locals); + int result = DuObject_IsTrue(obj); + _du_restore2(next, locals); + + if (result) + return DuInt_FromInt(1); + + cons = next; + } + + return DuInt_FromInt(0); +} DuObject *du_type(DuObject *cons, DuObject *locals) diff --git a/duhton/test/test_int.py b/duhton/test/test_int.py --- a/duhton/test/test_int.py +++ b/duhton/test/test_int.py @@ -20,9 +20,11 @@ assert evaluate("(* 2 3 7)") == 42 assert evaluate("(* (+ 5 1) (+ 6 1))") == 42 -def test_div(): +def test_div_mod(): assert evaluate("(/ 11 2)") == 5 assert evaluate("(/ 29 2 3)") == 4 + assert evaluate("(% 29 2)") == 1 + assert evaluate("(% 29 10 3)") == 0 def test_cmp(): assert evaluate("(< 6 6)") == 0 @@ -47,3 +49,35 @@ assert evaluate("(>= 7 6)") == 1 # assert evaluate("(< (+ 10 2) (+ 4 5))") == 0 + +def test_and_or(): + assert evaluate("(&& 1 1 1)") == 1 + assert evaluate("(&& 1 0 1)") == 0 + assert evaluate("(&& 0 sdfdsfsd)") == 0 + assert evaluate("(&& None)") == 0 + assert evaluate("(&& (quote bla))") == 1 + assert evaluate("(&& )") == 1 + + assert evaluate("(|| 0 1)") == 1 + assert evaluate("(|| 0 0 0 1)") == 1 + assert evaluate("(|| 0 0 0)") == 0 + assert evaluate("(|| 1 sdfdsfafds)") == 1 + assert evaluate("(|| None)") == 0 + assert evaluate("(|| (quote bla))") == 1 + assert evaluate("(|| )") == 0 + + +def test_shifts_bitwise(): + assert evaluate("(<< 1 1)") == 2 + assert evaluate("(<< 12)") == 12 + assert evaluate("(<< 1 1 1)") == 4 + assert evaluate("(<< 0 1)") == 0 + + assert evaluate("(>> 4 1 1)") == 1 + assert evaluate("(>> 4 3)") == 0 + assert evaluate("(>> 4)") == 4 + + assert evaluate("(^ 1 4)") == 1 ^ 4 + assert evaluate("(^ 1 4 122)") == 1 ^ 4 ^ 122 + assert evaluate("(^ 1)") == 1 + assert evaluate("(^)") == 0 From noreply at buildbot.pypy.org Tue Aug 26 17:34:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 26 Aug 2014 17:34:53 +0200 (CEST) Subject: [pypy-commit] pypy default: Add an enforceargs, bah Message-ID: <20140826153453.AD28A1D2931@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73060:818f09dbcaed Date: 2014-08-26 17:34 +0200 http://bitbucket.org/pypy/pypy/changeset/818f09dbcaed/ Log: Add an enforceargs, bah diff --git a/rpython/rtyper/rlist.py b/rpython/rtyper/rlist.py --- a/rpython/rtyper/rlist.py +++ b/rpython/rtyper/rlist.py @@ -2,7 +2,7 @@ from rpython.flowspace.model import Constant from rpython.rlib import rgc, jit, types from rpython.rlib.debug import ll_assert -from rpython.rlib.objectmodel import malloc_zero_filled +from rpython.rlib.objectmodel import malloc_zero_filled, enforceargs from rpython.rlib.signature import signature from rpython.rlib.rarithmetic import ovfcheck, widen, r_uint, intmask from rpython.rtyper.annlowlevel import ADTInterface @@ -722,6 +722,7 @@ l.ll_setitem_fast(index, newitem) # no oopspec -- the function is inlined by the JIT + at enforceargs(None, None, int) def ll_delitem_nonneg(func, l, index): ll_assert(index >= 0, "unexpectedly negative list delitem index") length = l.ll_length() From noreply at buildbot.pypy.org Tue Aug 26 18:15:59 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 26 Aug 2014 18:15:59 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: PYPY_NO_OP is transactionsafe Message-ID: <20140826161559.C53521C02AF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r73061:2a2fff134037 Date: 2014-08-26 18:12 +0200 http://bitbucket.org/pypy/pypy/changeset/2a2fff134037/ Log: PYPY_NO_OP is transactionsafe diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -393,7 +393,7 @@ eci = eci.merge(compilation_info) return llexternal('PYPY_NO_OP', [], lltype.Void, compilation_info=eci, sandboxsafe=True, _nowrapper=True, - _callable=lambda: None) + _callable=lambda: None, transactionsafe=True) def generate_macro_wrapper(name, macro, functype, eci): """Wraps a function-like macro inside a real function, and expose From noreply at buildbot.pypy.org Tue Aug 26 18:16:01 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 26 Aug 2014 18:16:01 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: stm_collect needs roots pushed Message-ID: <20140826161601.335AA1C02AF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r73062:7d2441485839 Date: 2014-08-26 18:12 +0200 http://bitbucket.org/pypy/pypy/changeset/7d2441485839/ Log: stm_collect needs roots pushed diff --git a/rpython/memory/gctransform/stmframework.py b/rpython/memory/gctransform/stmframework.py --- a/rpython/memory/gctransform/stmframework.py +++ b/rpython/memory/gctransform/stmframework.py @@ -159,6 +159,7 @@ gct_stm_become_inevitable = _gct_with_roots_pushed gct_stm_become_globally_unique_transaction = _gct_with_roots_pushed gct_stm_transaction_break = _gct_with_roots_pushed + gct_stm_collect = _gct_with_roots_pushed class StmRootWalker(BaseRootWalker): From noreply at buildbot.pypy.org Tue Aug 26 18:16:02 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 26 Aug 2014 18:16:02 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: break transactions during tracing Message-ID: <20140826161602.6884D1C02AF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r73063:27ec5df9b53f Date: 2014-08-26 18:13 +0200 http://bitbucket.org/pypy/pypy/changeset/27ec5df9b53f/ Log: break transactions during tracing diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -189,7 +189,7 @@ @arguments("int") def opimpl_stm_should_break_transaction(self, keep): - # from rpython.rlib import rstm + from rpython.rlib import rstm record_break = False resbox = history.ConstInt(0) @@ -199,13 +199,7 @@ resbox = history.BoxInt(0) record_break = True - ## XXX: not working yet. we are always inevitable when tracing - # if we_are_translated() and rstm.is_inevitable(): - # # return BoxInt(1) if there is an inevitable - # # transaction, because it's likely that there - # # will always be an inevitable transaction here - # resbox = history.BoxInt(1) - # record_break = True + rstm.possible_transaction_break(0) if record_break: mi = self.metainterp From noreply at buildbot.pypy.org Tue Aug 26 18:16:03 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 26 Aug 2014 18:16:03 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: tracing was ignoring stm_dont_track_raw_accesses Message-ID: <20140826161603.8E6F21C02AF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r73064:ba0e9a5ccb55 Date: 2014-08-26 18:15 +0200 http://bitbucket.org/pypy/pypy/changeset/ba0e9a5ccb55/ Log: tracing was ignoring stm_dont_track_raw_accesses diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -103,7 +103,7 @@ def is_immutable(self): return self._immutable - + def is_pointer_field(self): return self.flag == FLAG_POINTER @@ -139,6 +139,9 @@ def repr_of_descr(self): return '' % (self.flag, self.name, self.offset) + def stm_should_track_raw_accesses(self): + return not self.stm_dont_track_raw_accesses + def get_field_descr(gccache, STRUCT, fieldname): cache = gccache._cache_field @@ -215,7 +218,7 @@ def is_immutable(self): return self._immutable - + def is_array_of_pointers(self): return self.flag == FLAG_POINTER @@ -295,7 +298,7 @@ def is_immutable(self): return self._immutable - + def sort_key(self): return self.fielddescr.sort_key() diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -188,7 +188,8 @@ return BoxInt(cpu.bh_getfield_raw_i(struct, fielddescr, pure)) def do_getfield_raw(cpu, _, structbox, fielddescr): - return _do_getfield_raw(cpu, False, structbox, fielddescr) + pure = not fielddescr.stm_should_track_raw_accesses() + return _do_getfield_raw(cpu, pure, structbox, fielddescr) def do_getfield_raw_pure(cpu, _, structbox, fielddescr): return _do_getfield_raw(cpu, True, structbox, fielddescr) diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -155,6 +155,9 @@ def get_vinfo(self): raise NotImplementedError + def stm_should_track_raw_accesses(self): + return True + class AbstractFailDescr(AbstractDescr): index = -1 final_descr = False From noreply at buildbot.pypy.org Tue Aug 26 18:16:04 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Tue, 26 Aug 2014 18:16:04 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: make thread number optional in logparser Message-ID: <20140826161604.AA9E61C02AF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r73065:c9e3663343e3 Date: 2014-08-26 18:16 +0200 http://bitbucket.org/pypy/pypy/changeset/c9e3663343e3/ Log: make thread number optional in logparser diff --git a/rpython/tool/logparser.py b/rpython/tool/logparser.py --- a/rpython/tool/logparser.py +++ b/rpython/tool/logparser.py @@ -26,8 +26,8 @@ def parse_log(lines, verbose=False): color = "(?:\x1b.*?m)?" - thread = "\d+#\s" - r_start = re.compile(color + thread + + thread = "\d*#?\s?" + r_start = re.compile(color + thread + r"\[([0-9a-fA-F]+)\] \{([\w-]+)" + color + "$") r_stop = re.compile(color + thread + r"\[([0-9a-fA-F]+)\] ([\w-]+)\}" + color + "$") @@ -117,7 +117,7 @@ for entry in log: if not entry[0].startswith(catprefix): if len(entry) > 3: - newlog.append(entry[:3] + + newlog.append(entry[:3] + (kill_category(entry[3], catprefix),)) else: newlog.append(entry) From noreply at buildbot.pypy.org Tue Aug 26 19:58:23 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 26 Aug 2014 19:58:23 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default (bf3e8fa831fd) Message-ID: <20140826175823.57C731C02AF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r73066:9310ca287cdd Date: 2014-08-24 15:54 -0700 http://bitbucket.org/pypy/pypy/changeset/9310ca287cdd/ Log: merge default (bf3e8fa831fd) diff too long, truncating to 2000 out of 5869 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -354,6 +354,6 @@ See the License for the specific language governing permissions and limitations under the License. -Detailled license information is contained in the NOTICE file in the +Detailed license information is contained in the NOTICE file in the directory. diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -101,7 +101,7 @@ st = {} for c in map(unichr, range(256)): st[c] = SYNTAX_SYMBOL - for c in [a for a in map(unichr, range(256)) if a.isalpha()]: + for c in [a for a in map(unichr, range(256)) if a.isalnum()]: st[c] = SYNTAX_WORD st[unicode('\n')] = st[unicode(' ')] = SYNTAX_WHITESPACE return st diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -54,3 +54,6 @@ .. branch: pytest-25 Update our copies of py.test and pylib to versions 2.5.2 and 1.4.20, respectively. + +.. branch: split-ast-classes +Classes in the ast module are now distinct from structures used by the compiler. diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -37,7 +37,7 @@ using a 32 bit Python and vice versa. By default pypy is built using the Multi-threaded DLL (/MD) runtime environment. -**Note:** PyPy is currently not supported for 64 bit Windows, and translation +**Note:** PyPy is currently not supported for 64 bit Python, and translation will fail in this case. Python and a C compiler are all you need to build pypy, but it will miss some @@ -136,7 +136,7 @@ cd zlib-1.2.3 nmake -f win32\Makefile.msc - copy zlib1.lib + copy zlib.lib copy zlib.h zconf.h The bz2 compression library @@ -165,27 +165,29 @@ directory. Version 2.1.0 is known to pass tests. Then open the project file ``expat.dsw`` with Visual Studio; follow the instruction for converting the project files, -switch to the "Release" configuration, reconfigure the runtime for -Multi-threaded DLL (/MD) and build the solution (the ``expat`` project -is actually enough for pypy). +switch to the "Release" configuration, use the ``expat_static`` project, +reconfigure the runtime for Multi-threaded DLL (/MD) and build. -Then, copy the file ``win32\bin\release\libexpat.dll`` somewhere in -your PATH, ``win32\bin\release\libexpat.lib`` somewhere in LIB, and -both ``lib\expat.h`` and ``lib\expat_external.h`` somewhere in INCLUDE. +Then, copy the file ``win32\bin\release\libexpat.lib`` somewhere in +somewhere in LIB, and both ``lib\expat.h`` and ``lib\expat_external.h`` +somewhere in INCLUDE. The OpenSSL library ~~~~~~~~~~~~~~~~~~~ OpenSSL needs a Perl interpreter to configure its makefile. You may -use the one distributed by ActiveState, or the one from cygwin. In -both case the perl interpreter must be found on the PATH. +use the one distributed by ActiveState, or the one from cygwin.:: - svn export http://svn.python.org/projects/external/openssl-0.9.8y - cd openssl-0.9.8y - perl Configure VC-WIN32 + svn export http://svn.python.org/projects/external/openssl-1.0.1i + cd openssl-1.0.1i + perl Configure VC-WIN32 no-idea no-mdc2 ms\do_ms.bat nmake -f ms\nt.mak install +Then, copy the files ``out32\*.lib`` somewhere in +somewhere in LIB, and the entire ``include\openssl`` directory as-is somewhere +in INCLUDE. + TkInter module support ~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -1,5 +1,4 @@ # Generated by tools/asdl_py.py -from rpython.rlib.unroll import unrolling_iterable from rpython.tool.pairtype import extendabletype from rpython.tool.sourcetools import func_with_new_name @@ -21,11 +20,15 @@ 'AST string must be of type str or unicode')) return w_obj - -class AST(W_Root): - - w_dict = None - +def get_field(space, w_node, name, optional): + w_obj = w_node.getdictvalue(space, name) + if w_obj is None and not optional: + raise oefmt(space.w_TypeError, + "required field \"%s\" missing from %T", name, w_node) + return w_obj + + +class AST(object): __metaclass__ = extendabletype def walkabout(self, visitor): @@ -34,8 +37,23 @@ def mutate_over(self, visitor): raise AssertionError("mutate_over() implementation not provided") - def sync_app_attrs(self, space): - raise NotImplementedError + +class NodeVisitorNotImplemented(Exception): + pass + + +class _FieldsWrapper(W_Root): + "Hack around the fact we can't store tuples on a TypeDef." + + def __init__(self, fields): + self.fields = fields + + def __spacebind__(self, space): + return space.newtuple([space.wrap(field) for field in self.fields]) + + +class W_AST(W_Root): + w_dict = None def getdict(self, space): if self.w_dict is None: @@ -47,7 +65,7 @@ if w_dict is None: w_dict = space.newdict() w_type = space.type(self) - w_fields = w_type.getdictvalue(space, "_fields") + w_fields = space.getattr(w_type, space.wrap("_fields")) for w_name in space.fixedview(w_fields): try: space.setitem(w_dict, w_name, @@ -71,79 +89,94 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) - def missing_field(self, space, required, host): - "Find which required field is missing." - state = self.initialization_state - for i in range(len(required)): - if (state >> i) & 1: - continue # field is present - missing = required[i] - if missing is None: - continue # field is optional - w_obj = self.getdictvalue(space, missing) - if w_obj is None: - raise oefmt(space.w_TypeError, - "required field \"%s\" missing from %s", - missing, host) - else: - raise oefmt(space.w_TypeError, - "incorrect type for field \"%s\" in %s", - missing, host) - raise AssertionError("should not reach here") - - -class NodeVisitorNotImplemented(Exception): - pass - - -class _FieldsWrapper(W_Root): - "Hack around the fact we can't store tuples on a TypeDef." - - def __init__(self, fields): - self.fields = fields - - def __spacebind__(self, space): - return space.newtuple([space.wrap(field) for field in self.fields]) - - -def get_AST_new(node_class): - def generic_AST_new(space, w_type, __args__): - node = space.allocate_instance(node_class, w_type) - node.initialization_state = 0 - return space.wrap(node) - return func_with_new_name(generic_AST_new, "new_%s" % node_class.__name__) - -def AST_init(space, w_self, __args__): +def W_AST_new(space, w_type, __args__): + node = space.allocate_instance(W_AST, w_type) + return space.wrap(node) + +def W_AST_init(space, w_self, __args__): args_w, kwargs_w = __args__.unpack() - if args_w and len(args_w) != 0: - w_err = space.wrap("_ast.AST constructor takes 0 positional arguments") - raise OperationError(space.w_TypeError, w_err) + fields_w = space.fixedview(space.getattr(space.type(w_self), + space.wrap("_fields"))) + num_fields = len(fields_w) if fields_w else 0 + if args_w and len(args_w) != num_fields: + if num_fields == 0: + raise oefmt(space.w_TypeError, + "%T constructor takes 0 positional arguments", w_self) + elif num_fields == 1: + raise oefmt(space.w_TypeError, + "%T constructor takes either 0 or %d positional argument", w_self, num_fields) + else: + raise oefmt(space.w_TypeError, + "%T constructor takes either 0 or %d positional arguments", w_self, num_fields) + if args_w: + for i, w_field in enumerate(fields_w): + space.setattr(w_self, w_field, args_w[i]) for field, w_value in kwargs_w.iteritems(): space.setattr(w_self, space.wrap(field), w_value) -AST.typedef = typedef.TypeDef("_ast.AST", + +W_AST.typedef = typedef.TypeDef("_ast.AST", _fields=_FieldsWrapper([]), _attributes=_FieldsWrapper([]), - __reduce__=interp2app(AST.reduce_w), - __setstate__=interp2app(AST.setstate_w), + __reduce__=interp2app(W_AST.reduce_w), + __setstate__=interp2app(W_AST.setstate_w), __dict__ = typedef.GetSetProperty(typedef.descr_get_dict, - typedef.descr_set_dict, cls=AST), - __new__=interp2app(get_AST_new(AST)), - __init__=interp2app(AST_init), -) - - - + typedef.descr_set_dict, cls=W_AST), + __new__=interp2app(W_AST_new), + __init__=interp2app(W_AST_init), +) + +class State: + AST_TYPES = [] + + @classmethod + def ast_type(cls, name, base, fields, attributes=None): + cls.AST_TYPES.append((name, base, fields, attributes)) + + def __init__(self, space): + self.w_AST = space.gettypeobject(W_AST.typedef) + for (name, base, fields, attributes) in self.AST_TYPES: + self.make_new_type(space, name, base, fields, attributes) + + def make_new_type(self, space, name, base, fields, attributes): + w_base = getattr(self, 'w_%s' % base) + w_dict = space.newdict() + space.setitem_str(w_dict, '__module__', space.wrap('_ast')) + if fields is not None: + space.setitem_str(w_dict, "_fields", + space.newtuple([space.wrap(f) for f in fields])) + if attributes is not None: + space.setitem_str(w_dict, "_attributes", + space.newtuple([space.wrap(a) for a in attributes])) + w_type = space.call_function( + space.w_type, + space.wrap(name), space.newtuple([w_base]), w_dict) + setattr(self, 'w_%s' % name, w_type) + +def get(space): + return space.fromcache(State) class mod(AST): - pass + @staticmethod + def from_object(space, w_node): + if space.is_w(w_node, space.w_None): + return None + if space.isinstance_w(w_node, get(space).w_Module): + return Module.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Interactive): + return Interactive.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Expression): + return Expression.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Suite): + return Suite.from_object(space, w_node) + raise oefmt(space.w_TypeError, + "Expected mod node, got %T", w_node) +State.ast_type('mod', 'AST', None, []) class Module(mod): def __init__(self, body): self.body = body - self.w_body = None - self.initialization_state = 1 def walkabout(self, visitor): visitor.visit_Module(self) @@ -153,29 +186,30 @@ visitor._mutate_sequence(self.body) return visitor.visit_Module(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 1: - self.missing_field(space, ['body'], 'Module') - else: - pass - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Module) + if self.body is None: + body_w = [] + else: + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + return Module(_body) + +State.ast_type('Module', 'mod', ['body']) class Interactive(mod): def __init__(self, body): self.body = body - self.w_body = None - self.initialization_state = 1 def walkabout(self, visitor): visitor.visit_Interactive(self) @@ -185,28 +219,30 @@ visitor._mutate_sequence(self.body) return visitor.visit_Interactive(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 1: - self.missing_field(space, ['body'], 'Interactive') - else: - pass - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Interactive) + if self.body is None: + body_w = [] + else: + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + return Interactive(_body) + +State.ast_type('Interactive', 'mod', ['body']) class Expression(mod): def __init__(self, body): self.body = body - self.initialization_state = 1 def walkabout(self, visitor): visitor.visit_Expression(self) @@ -215,20 +251,25 @@ self.body = self.body.mutate_over(visitor) return visitor.visit_Expression(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 1: - self.missing_field(space, ['body'], 'Expression') - else: - pass - self.body.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Expression) + w_body = self.body.to_object(space) # expr + space.setattr(w_node, space.wrap('body'), w_body) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + _body = expr.from_object(space, w_body) + return Expression(_body) + +State.ast_type('Expression', 'mod', ['body']) class Suite(mod): def __init__(self, body): self.body = body - self.w_body = None - self.initialization_state = 1 def walkabout(self, visitor): visitor.visit_Suite(self) @@ -238,21 +279,24 @@ visitor._mutate_sequence(self.body) return visitor.visit_Suite(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 1: - self.missing_field(space, ['body'], 'Suite') - else: - pass - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Suite) + if self.body is None: + body_w = [] + else: + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + return Suite(_body) + +State.ast_type('Suite', 'mod', ['body']) class stmt(AST): @@ -261,18 +305,77 @@ self.lineno = lineno self.col_offset = col_offset + @staticmethod + def from_object(space, w_node): + if space.is_w(w_node, space.w_None): + return None + if space.isinstance_w(w_node, get(space).w_FunctionDef): + return FunctionDef.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_ClassDef): + return ClassDef.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Return): + return Return.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Delete): + return Delete.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Assign): + return Assign.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_AugAssign): + return AugAssign.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Print): + return Print.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_For): + return For.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_While): + return While.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_If): + return If.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_With): + return With.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Raise): + return Raise.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_TryExcept): + return TryExcept.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_TryFinally): + return TryFinally.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Assert): + return Assert.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Import): + return Import.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_ImportFrom): + return ImportFrom.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Exec): + return Exec.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Global): + return Global.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Expr): + return Expr.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Pass): + return Pass.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Break): + return Break.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Continue): + return Continue.from_object(space, w_node) + raise oefmt(space.w_TypeError, + "Expected stmt node, got %T", w_node) +State.ast_type('stmt', 'AST', None, ['lineno', 'col_offset']) + class FunctionDef(stmt): def __init__(self, name, args, body, decorator_list, returns, lineno, col_offset): self.name = name self.args = args self.body = body - self.w_body = None self.decorator_list = decorator_list +<<<<<<< mine self.w_decorator_list = None self.returns = returns +======= +>>>>>>> theirs stmt.__init__(self, lineno, col_offset) +<<<<<<< mine self.initialization_state = 127 +======= +>>>>>>> theirs def walkabout(self, visitor): visitor.visit_FunctionDef(self) @@ -287,6 +390,7 @@ self.returns = self.returns.mutate_over(visitor) return visitor.visit_FunctionDef(self) +<<<<<<< mine def sync_app_attrs(self, space): if (self.initialization_state & ~64) ^ 63: self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list', None], 'FunctionDef') @@ -316,6 +420,51 @@ node.sync_app_attrs(space) if self.returns: self.returns.sync_app_attrs(space) +======= + def to_object(self, space): + w_node = space.call_function(get(space).w_FunctionDef) + w_name = space.wrap(self.name) # identifier + space.setattr(w_node, space.wrap('name'), w_name) + w_args = self.args.to_object(space) # arguments + space.setattr(w_node, space.wrap('args'), w_args) + if self.body is None: + body_w = [] + else: + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.decorator_list is None: + decorator_list_w = [] + else: + decorator_list_w = [node.to_object(space) for node in self.decorator_list] # expr + w_decorator_list = space.newlist(decorator_list_w) + space.setattr(w_node, space.wrap('decorator_list'), w_decorator_list) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_name = get_field(space, w_node, 'name', False) + w_args = get_field(space, w_node, 'args', False) + w_body = get_field(space, w_node, 'body', False) + w_decorator_list = get_field(space, w_node, 'decorator_list', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _name = space.realstr_w(w_name) + _args = arguments.from_object(space, w_args) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + decorator_list_w = space.unpackiterable(w_decorator_list) + _decorator_list = [expr.from_object(space, w_item) for w_item in decorator_list_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return FunctionDef(_name, _args, _body, _decorator_list, _lineno, _col_offset) + +State.ast_type('FunctionDef', 'stmt', ['name', 'args', 'body', 'decorator_list']) +>>>>>>> theirs class ClassDef(stmt): @@ -323,17 +472,21 @@ def __init__(self, name, bases, keywords, starargs, kwargs, body, decorator_list, lineno, col_offset): self.name = name self.bases = bases +<<<<<<< mine self.w_bases = None self.keywords = keywords self.w_keywords = None self.starargs = starargs self.kwargs = kwargs +======= +>>>>>>> theirs self.body = body - self.w_body = None self.decorator_list = decorator_list - self.w_decorator_list = None stmt.__init__(self, lineno, col_offset) +<<<<<<< mine self.initialization_state = 511 +======= +>>>>>>> theirs def walkabout(self, visitor): visitor.visit_ClassDef(self) @@ -353,6 +506,7 @@ visitor._mutate_sequence(self.decorator_list) return visitor.visit_ClassDef(self) +<<<<<<< mine def sync_app_attrs(self, space): if (self.initialization_state & ~96) ^ 415: self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'keywords', None, None, 'body', 'decorator_list'], 'ClassDef') @@ -405,6 +559,56 @@ if self.decorator_list is not None: for node in self.decorator_list: node.sync_app_attrs(space) +======= + def to_object(self, space): + w_node = space.call_function(get(space).w_ClassDef) + w_name = space.wrap(self.name) # identifier + space.setattr(w_node, space.wrap('name'), w_name) + if self.bases is None: + bases_w = [] + else: + bases_w = [node.to_object(space) for node in self.bases] # expr + w_bases = space.newlist(bases_w) + space.setattr(w_node, space.wrap('bases'), w_bases) + if self.body is None: + body_w = [] + else: + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.decorator_list is None: + decorator_list_w = [] + else: + decorator_list_w = [node.to_object(space) for node in self.decorator_list] # expr + w_decorator_list = space.newlist(decorator_list_w) + space.setattr(w_node, space.wrap('decorator_list'), w_decorator_list) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_name = get_field(space, w_node, 'name', False) + w_bases = get_field(space, w_node, 'bases', False) + w_body = get_field(space, w_node, 'body', False) + w_decorator_list = get_field(space, w_node, 'decorator_list', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _name = space.realstr_w(w_name) + bases_w = space.unpackiterable(w_bases) + _bases = [expr.from_object(space, w_item) for w_item in bases_w] + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + decorator_list_w = space.unpackiterable(w_decorator_list) + _decorator_list = [expr.from_object(space, w_item) for w_item in decorator_list_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return ClassDef(_name, _bases, _body, _decorator_list, _lineno, _col_offset) + +State.ast_type('ClassDef', 'stmt', ['name', 'bases', 'body', 'decorator_list']) +>>>>>>> theirs class Return(stmt): @@ -412,7 +616,6 @@ def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Return(self) @@ -422,23 +625,34 @@ self.value = self.value.mutate_over(visitor) return visitor.visit_Return(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~4) ^ 3: - self.missing_field(space, ['lineno', 'col_offset', None], 'Return') - else: - if not self.initialization_state & 4: - self.value = None - if self.value: - self.value.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Return) + w_value = self.value.to_object(space) if self.value is not None else space.w_None # expr + space.setattr(w_node, space.wrap('value'), w_value) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_value = get_field(space, w_node, 'value', True) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _value = expr.from_object(space, w_value) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Return(_value, _lineno, _col_offset) + +State.ast_type('Return', 'stmt', ['value']) class Delete(stmt): def __init__(self, targets, lineno, col_offset): self.targets = targets - self.w_targets = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Delete(self) @@ -448,31 +662,40 @@ visitor._mutate_sequence(self.targets) return visitor.visit_Delete(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete') - else: - pass - w_list = self.w_targets - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.targets = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.targets = None - if self.targets is not None: - for node in self.targets: - node.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Delete) + if self.targets is None: + targets_w = [] + else: + targets_w = [node.to_object(space) for node in self.targets] # expr + w_targets = space.newlist(targets_w) + space.setattr(w_node, space.wrap('targets'), w_targets) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_targets = get_field(space, w_node, 'targets', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + targets_w = space.unpackiterable(w_targets) + _targets = [expr.from_object(space, w_item) for w_item in targets_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Delete(_targets, _lineno, _col_offset) + +State.ast_type('Delete', 'stmt', ['targets']) class Assign(stmt): def __init__(self, targets, value, lineno, col_offset): self.targets = targets - self.w_targets = None self.value = value stmt.__init__(self, lineno, col_offset) - self.initialization_state = 15 def walkabout(self, visitor): visitor.visit_Assign(self) @@ -483,22 +706,36 @@ self.value = self.value.mutate_over(visitor) return visitor.visit_Assign(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 15: - self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign') - else: - pass - w_list = self.w_targets - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.targets = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.targets = None - if self.targets is not None: - for node in self.targets: - node.sync_app_attrs(space) - self.value.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Assign) + if self.targets is None: + targets_w = [] + else: + targets_w = [node.to_object(space) for node in self.targets] # expr + w_targets = space.newlist(targets_w) + space.setattr(w_node, space.wrap('targets'), w_targets) + w_value = self.value.to_object(space) # expr + space.setattr(w_node, space.wrap('value'), w_value) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_targets = get_field(space, w_node, 'targets', False) + w_value = get_field(space, w_node, 'value', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + targets_w = space.unpackiterable(w_targets) + _targets = [expr.from_object(space, w_item) for w_item in targets_w] + _value = expr.from_object(space, w_value) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Assign(_targets, _value, _lineno, _col_offset) + +State.ast_type('Assign', 'stmt', ['targets', 'value']) class AugAssign(stmt): @@ -508,7 +745,6 @@ self.op = op self.value = value stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_AugAssign(self) @@ -518,26 +754,102 @@ self.value = self.value.mutate_over(visitor) return visitor.visit_AugAssign(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 31: - self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign') - else: - pass - self.target.sync_app_attrs(space) - self.value.sync_app_attrs(space) - - + def to_object(self, space): + w_node = space.call_function(get(space).w_AugAssign) + w_target = self.target.to_object(space) # expr + space.setattr(w_node, space.wrap('target'), w_target) + w_op = operator_to_class[self.op - 1]().to_object(space) # operator + space.setattr(w_node, space.wrap('op'), w_op) + w_value = self.value.to_object(space) # expr + space.setattr(w_node, space.wrap('value'), w_value) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_target = get_field(space, w_node, 'target', False) + w_op = get_field(space, w_node, 'op', False) + w_value = get_field(space, w_node, 'value', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _target = expr.from_object(space, w_target) + _op = operator.from_object(space, w_op) + _value = expr.from_object(space, w_value) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return AugAssign(_target, _op, _value, _lineno, _col_offset) + +State.ast_type('AugAssign', 'stmt', ['target', 'op', 'value']) + + +<<<<<<< mine +======= +class Print(stmt): + + def __init__(self, dest, values, nl, lineno, col_offset): + self.dest = dest + self.values = values + self.nl = nl + stmt.__init__(self, lineno, col_offset) + + def walkabout(self, visitor): + visitor.visit_Print(self) + + def mutate_over(self, visitor): + if self.dest: + self.dest = self.dest.mutate_over(visitor) + if self.values: + visitor._mutate_sequence(self.values) + return visitor.visit_Print(self) + + def to_object(self, space): + w_node = space.call_function(get(space).w_Print) + w_dest = self.dest.to_object(space) if self.dest is not None else space.w_None # expr + space.setattr(w_node, space.wrap('dest'), w_dest) + if self.values is None: + values_w = [] + else: + values_w = [node.to_object(space) for node in self.values] # expr + w_values = space.newlist(values_w) + space.setattr(w_node, space.wrap('values'), w_values) + w_nl = space.wrap(self.nl) # bool + space.setattr(w_node, space.wrap('nl'), w_nl) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_dest = get_field(space, w_node, 'dest', True) + w_values = get_field(space, w_node, 'values', False) + w_nl = get_field(space, w_node, 'nl', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _dest = expr.from_object(space, w_dest) + values_w = space.unpackiterable(w_values) + _values = [expr.from_object(space, w_item) for w_item in values_w] + _nl = space.bool_w(w_nl) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Print(_dest, _values, _nl, _lineno, _col_offset) + +State.ast_type('Print', 'stmt', ['dest', 'values', 'nl']) + + +>>>>>>> theirs class For(stmt): def __init__(self, target, iter, body, orelse, lineno, col_offset): self.target = target self.iter = iter self.body = body - self.w_body = None self.orelse = orelse - self.w_orelse = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 63 def walkabout(self, visitor): visitor.visit_For(self) @@ -551,33 +863,49 @@ visitor._mutate_sequence(self.orelse) return visitor.visit_For(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 63: - self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For') - else: - pass - self.target.sync_app_attrs(space) - self.iter.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_orelse - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.orelse = None - if self.orelse is not None: - for node in self.orelse: - node.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_For) + w_target = self.target.to_object(space) # expr + space.setattr(w_node, space.wrap('target'), w_target) + w_iter = self.iter.to_object(space) # expr + space.setattr(w_node, space.wrap('iter'), w_iter) + if self.body is None: + body_w = [] + else: + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.orelse is None: + orelse_w = [] + else: + orelse_w = [node.to_object(space) for node in self.orelse] # stmt + w_orelse = space.newlist(orelse_w) + space.setattr(w_node, space.wrap('orelse'), w_orelse) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_target = get_field(space, w_node, 'target', False) + w_iter = get_field(space, w_node, 'iter', False) + w_body = get_field(space, w_node, 'body', False) + w_orelse = get_field(space, w_node, 'orelse', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _target = expr.from_object(space, w_target) + _iter = expr.from_object(space, w_iter) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + orelse_w = space.unpackiterable(w_orelse) + _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return For(_target, _iter, _body, _orelse, _lineno, _col_offset) + +State.ast_type('For', 'stmt', ['target', 'iter', 'body', 'orelse']) class While(stmt): @@ -585,11 +913,8 @@ def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body - self.w_body = None self.orelse = orelse - self.w_orelse = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_While(self) @@ -602,32 +927,45 @@ visitor._mutate_sequence(self.orelse) return visitor.visit_While(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 31: - self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While') - else: - pass - self.test.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_orelse - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.orelse = None - if self.orelse is not None: - for node in self.orelse: - node.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_While) + w_test = self.test.to_object(space) # expr + space.setattr(w_node, space.wrap('test'), w_test) + if self.body is None: + body_w = [] + else: + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.orelse is None: + orelse_w = [] + else: + orelse_w = [node.to_object(space) for node in self.orelse] # stmt + w_orelse = space.newlist(orelse_w) + space.setattr(w_node, space.wrap('orelse'), w_orelse) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_test = get_field(space, w_node, 'test', False) + w_body = get_field(space, w_node, 'body', False) + w_orelse = get_field(space, w_node, 'orelse', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _test = expr.from_object(space, w_test) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + orelse_w = space.unpackiterable(w_orelse) + _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return While(_test, _body, _orelse, _lineno, _col_offset) + +State.ast_type('While', 'stmt', ['test', 'body', 'orelse']) class If(stmt): @@ -635,11 +973,8 @@ def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body - self.w_body = None self.orelse = orelse - self.w_orelse = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_If(self) @@ -652,32 +987,45 @@ visitor._mutate_sequence(self.orelse) return visitor.visit_If(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 31: - self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If') - else: - pass - self.test.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_orelse - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.orelse = None - if self.orelse is not None: - for node in self.orelse: - node.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_If) + w_test = self.test.to_object(space) # expr + space.setattr(w_node, space.wrap('test'), w_test) + if self.body is None: + body_w = [] + else: + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.orelse is None: + orelse_w = [] + else: + orelse_w = [node.to_object(space) for node in self.orelse] # stmt + w_orelse = space.newlist(orelse_w) + space.setattr(w_node, space.wrap('orelse'), w_orelse) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_test = get_field(space, w_node, 'test', False) + w_body = get_field(space, w_node, 'body', False) + w_orelse = get_field(space, w_node, 'orelse', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _test = expr.from_object(space, w_test) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + orelse_w = space.unpackiterable(w_orelse) + _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return If(_test, _body, _orelse, _lineno, _col_offset) + +State.ast_type('If', 'stmt', ['test', 'body', 'orelse']) class With(stmt): @@ -686,9 +1034,7 @@ self.context_expr = context_expr self.optional_vars = optional_vars self.body = body - self.w_body = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_With(self) @@ -701,25 +1047,40 @@ visitor._mutate_sequence(self.body) return visitor.visit_With(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~8) ^ 23: - self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With') - else: - if not self.initialization_state & 8: - self.optional_vars = None - self.context_expr.sync_app_attrs(space) - if self.optional_vars: - self.optional_vars.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_With) + w_context_expr = self.context_expr.to_object(space) # expr + space.setattr(w_node, space.wrap('context_expr'), w_context_expr) + w_optional_vars = self.optional_vars.to_object(space) if self.optional_vars is not None else space.w_None # expr + space.setattr(w_node, space.wrap('optional_vars'), w_optional_vars) + if self.body is None: + body_w = [] + else: + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_context_expr = get_field(space, w_node, 'context_expr', False) + w_optional_vars = get_field(space, w_node, 'optional_vars', True) + w_body = get_field(space, w_node, 'body', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _context_expr = expr.from_object(space, w_context_expr) + _optional_vars = expr.from_object(space, w_optional_vars) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return With(_context_expr, _optional_vars, _body, _lineno, _col_offset) + +State.ast_type('With', 'stmt', ['context_expr', 'optional_vars', 'body']) class Raise(stmt): @@ -728,7 +1089,10 @@ self.exc = exc self.cause = cause stmt.__init__(self, lineno, col_offset) +<<<<<<< mine self.initialization_state = 15 +======= +>>>>>>> theirs def walkabout(self, visitor): visitor.visit_Raise(self) @@ -740,6 +1104,7 @@ self.cause = self.cause.mutate_over(visitor) return visitor.visit_Raise(self) +<<<<<<< mine def sync_app_attrs(self, space): if (self.initialization_state & ~12) ^ 3: self.missing_field(space, ['lineno', 'col_offset', None, None], 'Raise') @@ -752,19 +1117,46 @@ self.exc.sync_app_attrs(space) if self.cause: self.cause.sync_app_attrs(space) +======= + def to_object(self, space): + w_node = space.call_function(get(space).w_Raise) + w_type = self.type.to_object(space) if self.type is not None else space.w_None # expr + space.setattr(w_node, space.wrap('type'), w_type) + w_inst = self.inst.to_object(space) if self.inst is not None else space.w_None # expr + space.setattr(w_node, space.wrap('inst'), w_inst) + w_tback = self.tback.to_object(space) if self.tback is not None else space.w_None # expr + space.setattr(w_node, space.wrap('tback'), w_tback) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_type = get_field(space, w_node, 'type', True) + w_inst = get_field(space, w_node, 'inst', True) + w_tback = get_field(space, w_node, 'tback', True) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _type = expr.from_object(space, w_type) + _inst = expr.from_object(space, w_inst) + _tback = expr.from_object(space, w_tback) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Raise(_type, _inst, _tback, _lineno, _col_offset) + +State.ast_type('Raise', 'stmt', ['type', 'inst', 'tback']) +>>>>>>> theirs class TryExcept(stmt): def __init__(self, body, handlers, orelse, lineno, col_offset): self.body = body - self.w_body = None self.handlers = handlers - self.w_handlers = None self.orelse = orelse - self.w_orelse = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_TryExcept(self) @@ -778,52 +1170,58 @@ visitor._mutate_sequence(self.orelse) return visitor.visit_TryExcept(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 31: - self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept') - else: - pass - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_handlers - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.handlers = [space.interp_w(excepthandler, w_obj) for w_obj in list_w] - else: - self.handlers = None - if self.handlers is not None: - for node in self.handlers: - node.sync_app_attrs(space) - w_list = self.w_orelse - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.orelse = None - if self.orelse is not None: - for node in self.orelse: - node.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_TryExcept) + if self.body is None: + body_w = [] + else: + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.handlers is None: + handlers_w = [] + else: + handlers_w = [node.to_object(space) for node in self.handlers] # excepthandler + w_handlers = space.newlist(handlers_w) + space.setattr(w_node, space.wrap('handlers'), w_handlers) + if self.orelse is None: + orelse_w = [] + else: + orelse_w = [node.to_object(space) for node in self.orelse] # stmt + w_orelse = space.newlist(orelse_w) + space.setattr(w_node, space.wrap('orelse'), w_orelse) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + w_handlers = get_field(space, w_node, 'handlers', False) + w_orelse = get_field(space, w_node, 'orelse', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + handlers_w = space.unpackiterable(w_handlers) + _handlers = [excepthandler.from_object(space, w_item) for w_item in handlers_w] + orelse_w = space.unpackiterable(w_orelse) + _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return TryExcept(_body, _handlers, _orelse, _lineno, _col_offset) + +State.ast_type('TryExcept', 'stmt', ['body', 'handlers', 'orelse']) class TryFinally(stmt): def __init__(self, body, finalbody, lineno, col_offset): self.body = body - self.w_body = None self.finalbody = finalbody - self.w_finalbody = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 15 def walkabout(self, visitor): visitor.visit_TryFinally(self) @@ -835,31 +1233,41 @@ visitor._mutate_sequence(self.finalbody) return visitor.visit_TryFinally(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 15: - self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally') - else: - pass - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_finalbody - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.finalbody = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.finalbody = None - if self.finalbody is not None: - for node in self.finalbody: - node.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_TryFinally) + if self.body is None: + body_w = [] + else: + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.finalbody is None: + finalbody_w = [] + else: + finalbody_w = [node.to_object(space) for node in self.finalbody] # stmt + w_finalbody = space.newlist(finalbody_w) + space.setattr(w_node, space.wrap('finalbody'), w_finalbody) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + w_finalbody = get_field(space, w_node, 'finalbody', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + finalbody_w = space.unpackiterable(w_finalbody) + _finalbody = [stmt.from_object(space, w_item) for w_item in finalbody_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return TryFinally(_body, _finalbody, _lineno, _col_offset) + +State.ast_type('TryFinally', 'stmt', ['body', 'finalbody']) class Assert(stmt): @@ -868,7 +1276,6 @@ self.test = test self.msg = msg stmt.__init__(self, lineno, col_offset) - self.initialization_state = 15 def walkabout(self, visitor): visitor.visit_Assert(self) @@ -879,95 +1286,137 @@ self.msg = self.msg.mutate_over(visitor) return visitor.visit_Assert(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~8) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert') - else: - if not self.initialization_state & 8: - self.msg = None - self.test.sync_app_attrs(space) - if self.msg: - self.msg.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Assert) + w_test = self.test.to_object(space) # expr + space.setattr(w_node, space.wrap('test'), w_test) + w_msg = self.msg.to_object(space) if self.msg is not None else space.w_None # expr + space.setattr(w_node, space.wrap('msg'), w_msg) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_test = get_field(space, w_node, 'test', False) + w_msg = get_field(space, w_node, 'msg', True) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _test = expr.from_object(space, w_test) + _msg = expr.from_object(space, w_msg) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Assert(_test, _msg, _lineno, _col_offset) + +State.ast_type('Assert', 'stmt', ['test', 'msg']) class Import(stmt): def __init__(self, names, lineno, col_offset): self.names = names + stmt.__init__(self, lineno, col_offset) + + def walkabout(self, visitor): + visitor.visit_Import(self) + + def mutate_over(self, visitor): + if self.names: + visitor._mutate_sequence(self.names) + return visitor.visit_Import(self) + + def to_object(self, space): + w_node = space.call_function(get(space).w_Import) + if self.names is None: + names_w = [] + else: + names_w = [node.to_object(space) for node in self.names] # alias + w_names = space.newlist(names_w) + space.setattr(w_node, space.wrap('names'), w_names) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_names = get_field(space, w_node, 'names', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + names_w = space.unpackiterable(w_names) + _names = [alias.from_object(space, w_item) for w_item in names_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Import(_names, _lineno, _col_offset) + +State.ast_type('Import', 'stmt', ['names']) + + +class ImportFrom(stmt): + + def __init__(self, module, names, level, lineno, col_offset): + self.module = module + self.names = names + self.level = level + stmt.__init__(self, lineno, col_offset) + + def walkabout(self, visitor): + visitor.visit_ImportFrom(self) + + def mutate_over(self, visitor): + if self.names: + visitor._mutate_sequence(self.names) + return visitor.visit_ImportFrom(self) + + def to_object(self, space): + w_node = space.call_function(get(space).w_ImportFrom) + w_module = space.wrap(self.module) # identifier + space.setattr(w_node, space.wrap('module'), w_module) + if self.names is None: + names_w = [] + else: + names_w = [node.to_object(space) for node in self.names] # alias + w_names = space.newlist(names_w) + space.setattr(w_node, space.wrap('names'), w_names) + w_level = space.wrap(self.level) # int + space.setattr(w_node, space.wrap('level'), w_level) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_module = get_field(space, w_node, 'module', True) + w_names = get_field(space, w_node, 'names', False) + w_level = get_field(space, w_node, 'level', True) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _module = space.str_or_None_w(w_module) + names_w = space.unpackiterable(w_names) + _names = [alias.from_object(space, w_item) for w_item in names_w] + _level = space.int_w(w_level) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return ImportFrom(_module, _names, _level, _lineno, _col_offset) + +State.ast_type('ImportFrom', 'stmt', ['module', 'names', 'level']) + + +class Global(stmt): + + def __init__(self, names, lineno, col_offset): + self.names = names self.w_names = None stmt.__init__(self, lineno, col_offset) +<<<<<<< mine self.initialization_state = 7 - - def walkabout(self, visitor): - visitor.visit_Import(self) - - def mutate_over(self, visitor): - if self.names: - visitor._mutate_sequence(self.names) - return visitor.visit_Import(self) - - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import') - else: - pass - w_list = self.w_names - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.names = [space.interp_w(alias, w_obj) for w_obj in list_w] - else: - self.names = None - if self.names is not None: - for node in self.names: - node.sync_app_attrs(space) - - -class ImportFrom(stmt): - - def __init__(self, module, names, level, lineno, col_offset): - self.module = module - self.names = names - self.w_names = None - self.level = level - stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 - - def walkabout(self, visitor): - visitor.visit_ImportFrom(self) - - def mutate_over(self, visitor): - if self.names: - visitor._mutate_sequence(self.names) - return visitor.visit_ImportFrom(self) - - def sync_app_attrs(self, space): - if (self.initialization_state & ~20) ^ 11: - self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom') - else: - if not self.initialization_state & 4: - self.module = None - if not self.initialization_state & 16: - self.level = 0 - w_list = self.w_names - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.names = [space.interp_w(alias, w_obj) for w_obj in list_w] - else: - self.names = None - if self.names is not None: - for node in self.names: - node.sync_app_attrs(space) - - -class Global(stmt): - - def __init__(self, names, lineno, col_offset): - self.names = names - self.w_names = None - stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 +======= +>>>>>>> theirs def walkabout(self, visitor): visitor.visit_Global(self) @@ -975,6 +1424,7 @@ def mutate_over(self, visitor): return visitor.visit_Global(self) +<<<<<<< mine def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global') @@ -987,15 +1437,44 @@ self.names = [space.identifier_w(w_obj) for w_obj in list_w] else: self.names = None +======= + def to_object(self, space): + w_node = space.call_function(get(space).w_Exec) + w_body = self.body.to_object(space) # expr + space.setattr(w_node, space.wrap('body'), w_body) + w_globals = self.globals.to_object(space) if self.globals is not None else space.w_None # expr + space.setattr(w_node, space.wrap('globals'), w_globals) + w_locals = self.locals.to_object(space) if self.locals is not None else space.w_None # expr + space.setattr(w_node, space.wrap('locals'), w_locals) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + w_globals = get_field(space, w_node, 'globals', True) + w_locals = get_field(space, w_node, 'locals', True) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _body = expr.from_object(space, w_body) + _globals = expr.from_object(space, w_globals) + _locals = expr.from_object(space, w_locals) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Exec(_body, _globals, _locals, _lineno, _col_offset) + +State.ast_type('Exec', 'stmt', ['body', 'globals', 'locals']) +>>>>>>> theirs class Nonlocal(stmt): def __init__(self, names, lineno, col_offset): self.names = names - self.w_names = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Nonlocal(self) @@ -1003,6 +1482,7 @@ def mutate_over(self, visitor): return visitor.visit_Nonlocal(self) +<<<<<<< mine def sync_app_attrs(self, space): if (self.initialization_state & ~0) ^ 7: self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Nonlocal') @@ -1015,6 +1495,34 @@ self.names = [space.identifier_w(w_obj) for w_obj in list_w] else: self.names = None +======= + def to_object(self, space): + w_node = space.call_function(get(space).w_Global) + if self.names is None: + names_w = [] + else: + names_w = [space.wrap(node) for node in self.names] # identifier + w_names = space.newlist(names_w) + space.setattr(w_node, space.wrap('names'), w_names) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_names = get_field(space, w_node, 'names', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + names_w = space.unpackiterable(w_names) + _names = [space.realstr_w(w_item) for w_item in names_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Global(_names, _lineno, _col_offset) + +State.ast_type('Global', 'stmt', ['names']) +>>>>>>> theirs class Expr(stmt): @@ -1022,7 +1530,6 @@ def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Expr(self) @@ -1031,19 +1538,33 @@ self.value = self.value.mutate_over(visitor) return visitor.visit_Expr(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr') - else: - pass - self.value.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Expr) + w_value = self.value.to_object(space) # expr + space.setattr(w_node, space.wrap('value'), w_value) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_value = get_field(space, w_node, 'value', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _value = expr.from_object(space, w_value) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Expr(_value, _lineno, _col_offset) + +State.ast_type('Expr', 'stmt', ['value']) class Pass(stmt): def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) - self.initialization_state = 3 def walkabout(self, visitor): visitor.visit_Pass(self) @@ -1051,18 +1572,29 @@ def mutate_over(self, visitor): return visitor.visit_Pass(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 3: - self.missing_field(space, ['lineno', 'col_offset'], 'Pass') - else: - pass + def to_object(self, space): + w_node = space.call_function(get(space).w_Pass) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Pass(_lineno, _col_offset) + +State.ast_type('Pass', 'stmt', []) class Break(stmt): def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) - self.initialization_state = 3 def walkabout(self, visitor): visitor.visit_Break(self) @@ -1070,18 +1602,29 @@ def mutate_over(self, visitor): return visitor.visit_Break(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 3: - self.missing_field(space, ['lineno', 'col_offset'], 'Break') - else: - pass + def to_object(self, space): + w_node = space.call_function(get(space).w_Break) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Break(_lineno, _col_offset) + +State.ast_type('Break', 'stmt', []) class Continue(stmt): def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) - self.initialization_state = 3 def walkabout(self, visitor): visitor.visit_Continue(self) @@ -1089,11 +1632,23 @@ def mutate_over(self, visitor): return visitor.visit_Continue(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 3: - self.missing_field(space, ['lineno', 'col_offset'], 'Continue') - else: From noreply at buildbot.pypy.org Tue Aug 26 19:58:24 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 26 Aug 2014 19:58:24 +0200 (CEST) Subject: [pypy-commit] pypy py3k: regenerate ast.py Message-ID: <20140826175824.A53301C02AF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r73067:276f1b668967 Date: 2014-08-24 15:55 -0700 http://bitbucket.org/pypy/pypy/changeset/276f1b668967/ Log: regenerate ast.py diff too long, truncating to 2000 out of 6594 lines diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -321,8 +321,6 @@ return Assign.from_object(space, w_node) if space.isinstance_w(w_node, get(space).w_AugAssign): return AugAssign.from_object(space, w_node) - if space.isinstance_w(w_node, get(space).w_Print): - return Print.from_object(space, w_node) if space.isinstance_w(w_node, get(space).w_For): return For.from_object(space, w_node) if space.isinstance_w(w_node, get(space).w_While): @@ -343,10 +341,10 @@ return Import.from_object(space, w_node) if space.isinstance_w(w_node, get(space).w_ImportFrom): return ImportFrom.from_object(space, w_node) - if space.isinstance_w(w_node, get(space).w_Exec): - return Exec.from_object(space, w_node) if space.isinstance_w(w_node, get(space).w_Global): return Global.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Nonlocal): + return Nonlocal.from_object(space, w_node) if space.isinstance_w(w_node, get(space).w_Expr): return Expr.from_object(space, w_node) if space.isinstance_w(w_node, get(space).w_Pass): @@ -366,16 +364,8 @@ self.args = args self.body = body self.decorator_list = decorator_list -<<<<<<< mine - self.w_decorator_list = None self.returns = returns -======= ->>>>>>> theirs stmt.__init__(self, lineno, col_offset) -<<<<<<< mine - self.initialization_state = 127 -======= ->>>>>>> theirs def walkabout(self, visitor): visitor.visit_FunctionDef(self) @@ -390,37 +380,6 @@ self.returns = self.returns.mutate_over(visitor) return visitor.visit_FunctionDef(self) -<<<<<<< mine - def sync_app_attrs(self, space): - if (self.initialization_state & ~64) ^ 63: - self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list', None], 'FunctionDef') - else: - if not self.initialization_state & 64: - self.returns = None - self.args.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_decorator_list - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.decorator_list = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.decorator_list = None - if self.decorator_list is not None: - for node in self.decorator_list: - node.sync_app_attrs(space) - if self.returns: - self.returns.sync_app_attrs(space) -======= def to_object(self, space): w_node = space.call_function(get(space).w_FunctionDef) w_name = space.wrap(self.name) # identifier @@ -439,6 +398,8 @@ decorator_list_w = [node.to_object(space) for node in self.decorator_list] # expr w_decorator_list = space.newlist(decorator_list_w) space.setattr(w_node, space.wrap('decorator_list'), w_decorator_list) + w_returns = self.returns.to_object(space) if self.returns is not None else space.w_None # expr + space.setattr(w_node, space.wrap('returns'), w_returns) w_lineno = space.wrap(self.lineno) # int space.setattr(w_node, space.wrap('lineno'), w_lineno) w_col_offset = space.wrap(self.col_offset) # int @@ -451,6 +412,7 @@ w_args = get_field(space, w_node, 'args', False) w_body = get_field(space, w_node, 'body', False) w_decorator_list = get_field(space, w_node, 'decorator_list', False) + w_returns = get_field(space, w_node, 'returns', True) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _name = space.realstr_w(w_name) @@ -459,12 +421,12 @@ _body = [stmt.from_object(space, w_item) for w_item in body_w] decorator_list_w = space.unpackiterable(w_decorator_list) _decorator_list = [expr.from_object(space, w_item) for w_item in decorator_list_w] + _returns = expr.from_object(space, w_returns) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return FunctionDef(_name, _args, _body, _decorator_list, _lineno, _col_offset) - -State.ast_type('FunctionDef', 'stmt', ['name', 'args', 'body', 'decorator_list']) ->>>>>>> theirs + return FunctionDef(_name, _args, _body, _decorator_list, _returns, _lineno, _col_offset) + +State.ast_type('FunctionDef', 'stmt', ['name', 'args', 'body', 'decorator_list', 'returns']) class ClassDef(stmt): @@ -472,21 +434,12 @@ def __init__(self, name, bases, keywords, starargs, kwargs, body, decorator_list, lineno, col_offset): self.name = name self.bases = bases -<<<<<<< mine - self.w_bases = None self.keywords = keywords - self.w_keywords = None self.starargs = starargs self.kwargs = kwargs -======= ->>>>>>> theirs self.body = body self.decorator_list = decorator_list stmt.__init__(self, lineno, col_offset) -<<<<<<< mine - self.initialization_state = 511 -======= ->>>>>>> theirs def walkabout(self, visitor): visitor.visit_ClassDef(self) @@ -506,60 +459,6 @@ visitor._mutate_sequence(self.decorator_list) return visitor.visit_ClassDef(self) -<<<<<<< mine - def sync_app_attrs(self, space): - if (self.initialization_state & ~96) ^ 415: - self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'keywords', None, None, 'body', 'decorator_list'], 'ClassDef') - else: - if not self.initialization_state & 32: - self.starargs = None - if not self.initialization_state & 64: - self.kwargs = None - w_list = self.w_bases - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.bases = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.bases = None - if self.bases is not None: - for node in self.bases: - node.sync_app_attrs(space) - w_list = self.w_keywords - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.keywords = [space.interp_w(keyword, w_obj) for w_obj in list_w] - else: - self.keywords = None - if self.keywords is not None: - for node in self.keywords: - node.sync_app_attrs(space) - if self.starargs: - self.starargs.sync_app_attrs(space) - if self.kwargs: - self.kwargs.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_decorator_list - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.decorator_list = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.decorator_list = None - if self.decorator_list is not None: - for node in self.decorator_list: - node.sync_app_attrs(space) -======= def to_object(self, space): w_node = space.call_function(get(space).w_ClassDef) w_name = space.wrap(self.name) # identifier @@ -570,6 +469,16 @@ bases_w = [node.to_object(space) for node in self.bases] # expr w_bases = space.newlist(bases_w) space.setattr(w_node, space.wrap('bases'), w_bases) + if self.keywords is None: + keywords_w = [] + else: + keywords_w = [node.to_object(space) for node in self.keywords] # keyword + w_keywords = space.newlist(keywords_w) + space.setattr(w_node, space.wrap('keywords'), w_keywords) + w_starargs = self.starargs.to_object(space) if self.starargs is not None else space.w_None # expr + space.setattr(w_node, space.wrap('starargs'), w_starargs) + w_kwargs = self.kwargs.to_object(space) if self.kwargs is not None else space.w_None # expr + space.setattr(w_node, space.wrap('kwargs'), w_kwargs) if self.body is None: body_w = [] else: @@ -592,6 +501,9 @@ def from_object(space, w_node): w_name = get_field(space, w_node, 'name', False) w_bases = get_field(space, w_node, 'bases', False) + w_keywords = get_field(space, w_node, 'keywords', False) + w_starargs = get_field(space, w_node, 'starargs', True) + w_kwargs = get_field(space, w_node, 'kwargs', True) w_body = get_field(space, w_node, 'body', False) w_decorator_list = get_field(space, w_node, 'decorator_list', False) w_lineno = get_field(space, w_node, 'lineno', False) @@ -599,16 +511,19 @@ _name = space.realstr_w(w_name) bases_w = space.unpackiterable(w_bases) _bases = [expr.from_object(space, w_item) for w_item in bases_w] + keywords_w = space.unpackiterable(w_keywords) + _keywords = [keyword.from_object(space, w_item) for w_item in keywords_w] + _starargs = expr.from_object(space, w_starargs) + _kwargs = expr.from_object(space, w_kwargs) body_w = space.unpackiterable(w_body) _body = [stmt.from_object(space, w_item) for w_item in body_w] decorator_list_w = space.unpackiterable(w_decorator_list) _decorator_list = [expr.from_object(space, w_item) for w_item in decorator_list_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return ClassDef(_name, _bases, _body, _decorator_list, _lineno, _col_offset) - -State.ast_type('ClassDef', 'stmt', ['name', 'bases', 'body', 'decorator_list']) ->>>>>>> theirs + return ClassDef(_name, _bases, _keywords, _starargs, _kwargs, _body, _decorator_list, _lineno, _col_offset) + +State.ast_type('ClassDef', 'stmt', ['name', 'bases', 'keywords', 'starargs', 'kwargs', 'body', 'decorator_list']) class Return(stmt): @@ -785,63 +700,6 @@ State.ast_type('AugAssign', 'stmt', ['target', 'op', 'value']) -<<<<<<< mine -======= -class Print(stmt): - - def __init__(self, dest, values, nl, lineno, col_offset): - self.dest = dest - self.values = values - self.nl = nl - stmt.__init__(self, lineno, col_offset) - - def walkabout(self, visitor): - visitor.visit_Print(self) - - def mutate_over(self, visitor): - if self.dest: - self.dest = self.dest.mutate_over(visitor) - if self.values: - visitor._mutate_sequence(self.values) - return visitor.visit_Print(self) - - def to_object(self, space): - w_node = space.call_function(get(space).w_Print) - w_dest = self.dest.to_object(space) if self.dest is not None else space.w_None # expr - space.setattr(w_node, space.wrap('dest'), w_dest) - if self.values is None: - values_w = [] - else: - values_w = [node.to_object(space) for node in self.values] # expr - w_values = space.newlist(values_w) - space.setattr(w_node, space.wrap('values'), w_values) - w_nl = space.wrap(self.nl) # bool - space.setattr(w_node, space.wrap('nl'), w_nl) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) - return w_node - - @staticmethod - def from_object(space, w_node): - w_dest = get_field(space, w_node, 'dest', True) - w_values = get_field(space, w_node, 'values', False) - w_nl = get_field(space, w_node, 'nl', False) - w_lineno = get_field(space, w_node, 'lineno', False) - w_col_offset = get_field(space, w_node, 'col_offset', False) - _dest = expr.from_object(space, w_dest) - values_w = space.unpackiterable(w_values) - _values = [expr.from_object(space, w_item) for w_item in values_w] - _nl = space.bool_w(w_nl) - _lineno = space.int_w(w_lineno) - _col_offset = space.int_w(w_col_offset) - return Print(_dest, _values, _nl, _lineno, _col_offset) - -State.ast_type('Print', 'stmt', ['dest', 'values', 'nl']) - - ->>>>>>> theirs class For(stmt): def __init__(self, target, iter, body, orelse, lineno, col_offset): @@ -1089,10 +947,6 @@ self.exc = exc self.cause = cause stmt.__init__(self, lineno, col_offset) -<<<<<<< mine - self.initialization_state = 15 -======= ->>>>>>> theirs def walkabout(self, visitor): visitor.visit_Raise(self) @@ -1104,28 +958,12 @@ self.cause = self.cause.mutate_over(visitor) return visitor.visit_Raise(self) -<<<<<<< mine - def sync_app_attrs(self, space): - if (self.initialization_state & ~12) ^ 3: - self.missing_field(space, ['lineno', 'col_offset', None, None], 'Raise') - else: - if not self.initialization_state & 4: - self.exc = None - if not self.initialization_state & 8: - self.cause = None - if self.exc: - self.exc.sync_app_attrs(space) - if self.cause: - self.cause.sync_app_attrs(space) -======= def to_object(self, space): w_node = space.call_function(get(space).w_Raise) - w_type = self.type.to_object(space) if self.type is not None else space.w_None # expr - space.setattr(w_node, space.wrap('type'), w_type) - w_inst = self.inst.to_object(space) if self.inst is not None else space.w_None # expr - space.setattr(w_node, space.wrap('inst'), w_inst) - w_tback = self.tback.to_object(space) if self.tback is not None else space.w_None # expr - space.setattr(w_node, space.wrap('tback'), w_tback) + w_exc = self.exc.to_object(space) if self.exc is not None else space.w_None # expr + space.setattr(w_node, space.wrap('exc'), w_exc) + w_cause = self.cause.to_object(space) if self.cause is not None else space.w_None # expr + space.setattr(w_node, space.wrap('cause'), w_cause) w_lineno = space.wrap(self.lineno) # int space.setattr(w_node, space.wrap('lineno'), w_lineno) w_col_offset = space.wrap(self.col_offset) # int @@ -1134,20 +972,17 @@ @staticmethod def from_object(space, w_node): - w_type = get_field(space, w_node, 'type', True) - w_inst = get_field(space, w_node, 'inst', True) - w_tback = get_field(space, w_node, 'tback', True) + w_exc = get_field(space, w_node, 'exc', True) + w_cause = get_field(space, w_node, 'cause', True) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _type = expr.from_object(space, w_type) - _inst = expr.from_object(space, w_inst) - _tback = expr.from_object(space, w_tback) + _exc = expr.from_object(space, w_exc) + _cause = expr.from_object(space, w_cause) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) - return Raise(_type, _inst, _tback, _lineno, _col_offset) - -State.ast_type('Raise', 'stmt', ['type', 'inst', 'tback']) ->>>>>>> theirs + return Raise(_exc, _cause, _lineno, _col_offset) + +State.ast_type('Raise', 'stmt', ['exc', 'cause']) class TryExcept(stmt): @@ -1411,12 +1246,7 @@ def __init__(self, names, lineno, col_offset): self.names = names - self.w_names = None stmt.__init__(self, lineno, col_offset) -<<<<<<< mine - self.initialization_state = 7 -======= ->>>>>>> theirs def walkabout(self, visitor): visitor.visit_Global(self) @@ -1424,78 +1254,6 @@ def mutate_over(self, visitor): return visitor.visit_Global(self) -<<<<<<< mine - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global') - else: - pass - w_list = self.w_names - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.names = [space.identifier_w(w_obj) for w_obj in list_w] - else: - self.names = None -======= - def to_object(self, space): - w_node = space.call_function(get(space).w_Exec) - w_body = self.body.to_object(space) # expr - space.setattr(w_node, space.wrap('body'), w_body) - w_globals = self.globals.to_object(space) if self.globals is not None else space.w_None # expr - space.setattr(w_node, space.wrap('globals'), w_globals) - w_locals = self.locals.to_object(space) if self.locals is not None else space.w_None # expr - space.setattr(w_node, space.wrap('locals'), w_locals) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) - return w_node - - @staticmethod - def from_object(space, w_node): - w_body = get_field(space, w_node, 'body', False) - w_globals = get_field(space, w_node, 'globals', True) - w_locals = get_field(space, w_node, 'locals', True) - w_lineno = get_field(space, w_node, 'lineno', False) - w_col_offset = get_field(space, w_node, 'col_offset', False) - _body = expr.from_object(space, w_body) - _globals = expr.from_object(space, w_globals) - _locals = expr.from_object(space, w_locals) - _lineno = space.int_w(w_lineno) - _col_offset = space.int_w(w_col_offset) - return Exec(_body, _globals, _locals, _lineno, _col_offset) - -State.ast_type('Exec', 'stmt', ['body', 'globals', 'locals']) ->>>>>>> theirs - - -class Nonlocal(stmt): - - def __init__(self, names, lineno, col_offset): - self.names = names - stmt.__init__(self, lineno, col_offset) - - def walkabout(self, visitor): - visitor.visit_Nonlocal(self) - - def mutate_over(self, visitor): - return visitor.visit_Nonlocal(self) - -<<<<<<< mine - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Nonlocal') - else: - pass - w_list = self.w_names - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.names = [space.identifier_w(w_obj) for w_obj in list_w] - else: - self.names = None -======= def to_object(self, space): w_node = space.call_function(get(space).w_Global) if self.names is None: @@ -1522,7 +1280,46 @@ return Global(_names, _lineno, _col_offset) State.ast_type('Global', 'stmt', ['names']) ->>>>>>> theirs + + +class Nonlocal(stmt): + + def __init__(self, names, lineno, col_offset): + self.names = names + stmt.__init__(self, lineno, col_offset) + + def walkabout(self, visitor): + visitor.visit_Nonlocal(self) + + def mutate_over(self, visitor): + return visitor.visit_Nonlocal(self) + + def to_object(self, space): + w_node = space.call_function(get(space).w_Nonlocal) + if self.names is None: + names_w = [] + else: + names_w = [space.wrap(node) for node in self.names] # identifier + w_names = space.newlist(names_w) + space.setattr(w_node, space.wrap('names'), w_names) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_names = get_field(space, w_node, 'names', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + names_w = space.unpackiterable(w_names) + _names = [space.realstr_w(w_item) for w_item in names_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Nonlocal(_names, _lineno, _col_offset) + +State.ast_type('Nonlocal', 'stmt', ['names']) class Expr(stmt): @@ -1689,16 +1486,20 @@ return Compare.from_object(space, w_node) if space.isinstance_w(w_node, get(space).w_Call): return Call.from_object(space, w_node) - if space.isinstance_w(w_node, get(space).w_Repr): - return Repr.from_object(space, w_node) if space.isinstance_w(w_node, get(space).w_Num): return Num.from_object(space, w_node) if space.isinstance_w(w_node, get(space).w_Str): return Str.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Bytes): + return Bytes.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Ellipsis): + return Ellipsis.from_object(space, w_node) if space.isinstance_w(w_node, get(space).w_Attribute): return Attribute.from_object(space, w_node) if space.isinstance_w(w_node, get(space).w_Subscript): return Subscript.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Starred): + return Starred.from_object(space, w_node) if space.isinstance_w(w_node, get(space).w_Name): return Name.from_object(space, w_node) if space.isinstance_w(w_node, get(space).w_List): @@ -2411,56 +2212,6 @@ def mutate_over(self, visitor): return visitor.visit_Num(self) -<<<<<<< mine - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'n'], 'Num') - else: - pass -======= - def to_object(self, space): - w_node = space.call_function(get(space).w_Repr) - w_value = self.value.to_object(space) # expr - space.setattr(w_node, space.wrap('value'), w_value) - w_lineno = space.wrap(self.lineno) # int - space.setattr(w_node, space.wrap('lineno'), w_lineno) - w_col_offset = space.wrap(self.col_offset) # int - space.setattr(w_node, space.wrap('col_offset'), w_col_offset) - return w_node - - @staticmethod - def from_object(space, w_node): - w_value = get_field(space, w_node, 'value', False) - w_lineno = get_field(space, w_node, 'lineno', False) - w_col_offset = get_field(space, w_node, 'col_offset', False) - _value = expr.from_object(space, w_value) - _lineno = space.int_w(w_lineno) - _col_offset = space.int_w(w_col_offset) - return Repr(_value, _lineno, _col_offset) - -State.ast_type('Repr', 'expr', ['value']) ->>>>>>> theirs - - -class Str(expr): - - def __init__(self, s, lineno, col_offset): - self.s = s - expr.__init__(self, lineno, col_offset) - - def walkabout(self, visitor): - visitor.visit_Str(self) - - def mutate_over(self, visitor): - return visitor.visit_Str(self) - -<<<<<<< mine - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 's'], 'Str') - else: - pass -======= def to_object(self, space): w_node = space.call_function(get(space).w_Num) w_n = self.n # object @@ -2482,47 +2233,20 @@ return Num(_n, _lineno, _col_offset) State.ast_type('Num', 'expr', ['n']) ->>>>>>> theirs - - -class Bytes(expr): + + +class Str(expr): def __init__(self, s, lineno, col_offset): self.s = s expr.__init__(self, lineno, col_offset) def walkabout(self, visitor): - visitor.visit_Bytes(self) + visitor.visit_Str(self) def mutate_over(self, visitor): - return visitor.visit_Bytes(self) - -<<<<<<< mine - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 's'], 'Bytes') - else: - pass - - -class Ellipsis(expr): - - def __init__(self, lineno, col_offset): - expr.__init__(self, lineno, col_offset) - self.initialization_state = 3 - - def walkabout(self, visitor): - visitor.visit_Ellipsis(self) - - def mutate_over(self, visitor): - return visitor.visit_Ellipsis(self) - - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 3: - self.missing_field(space, ['lineno', 'col_offset'], 'Ellipsis') - else: - pass -======= + return visitor.visit_Str(self) + def to_object(self, space): w_node = space.call_function(get(space).w_Str) w_s = self.s # string @@ -2544,7 +2268,71 @@ return Str(_s, _lineno, _col_offset) State.ast_type('Str', 'expr', ['s']) ->>>>>>> theirs + + +class Bytes(expr): + + def __init__(self, s, lineno, col_offset): + self.s = s + expr.__init__(self, lineno, col_offset) + + def walkabout(self, visitor): + visitor.visit_Bytes(self) + + def mutate_over(self, visitor): + return visitor.visit_Bytes(self) + + def to_object(self, space): + w_node = space.call_function(get(space).w_Bytes) + w_s = self.s # string + space.setattr(w_node, space.wrap('s'), w_s) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_s = get_field(space, w_node, 's', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _s = check_string(space, w_s) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Bytes(_s, _lineno, _col_offset) + +State.ast_type('Bytes', 'expr', ['s']) + + +class Ellipsis(expr): + + def __init__(self, lineno, col_offset): + expr.__init__(self, lineno, col_offset) + + def walkabout(self, visitor): + visitor.visit_Ellipsis(self) + + def mutate_over(self, visitor): + return visitor.visit_Ellipsis(self) + + def to_object(self, space): + w_node = space.call_function(get(space).w_Ellipsis) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Ellipsis(_lineno, _col_offset) + +State.ast_type('Ellipsis', 'expr', []) class Attribute(expr): @@ -2646,7 +2434,6 @@ self.value = value self.ctx = ctx expr.__init__(self, lineno, col_offset) - self.initialization_state = 15 def walkabout(self, visitor): visitor.visit_Starred(self) @@ -2655,12 +2442,31 @@ self.value = self.value.mutate_over(visitor) return visitor.visit_Starred(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 15: - self.missing_field(space, ['lineno', 'col_offset', 'value', 'ctx'], 'Starred') - else: - pass - self.value.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Starred) + w_value = self.value.to_object(space) # expr + space.setattr(w_node, space.wrap('value'), w_value) + w_ctx = expr_context_to_class[self.ctx - 1]().to_object(space) # expr_context + space.setattr(w_node, space.wrap('ctx'), w_ctx) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_value = get_field(space, w_node, 'value', False) + w_ctx = get_field(space, w_node, 'ctx', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _value = expr.from_object(space, w_value) + _ctx = expr_context.from_object(space, w_ctx) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Starred(_value, _ctx, _lineno, _col_offset) + +State.ast_type('Starred', 'expr', ['value', 'ctx']) class Name(expr): @@ -2902,8 +2708,6 @@ def from_object(space, w_node): if space.is_w(w_node, space.w_None): return None - if space.isinstance_w(w_node, get(space).w_Ellipsis): - return Ellipsis.from_object(space, w_node) if space.isinstance_w(w_node, get(space).w_Slice): return Slice.from_object(space, w_node) if space.isinstance_w(w_node, get(space).w_ExtSlice): @@ -2914,29 +2718,6 @@ "Expected slice node, got %T", w_node) State.ast_type('slice', 'AST', None, []) -<<<<<<< mine -======= -class Ellipsis(slice): - - - def walkabout(self, visitor): - visitor.visit_Ellipsis(self) - - def mutate_over(self, visitor): - return visitor.visit_Ellipsis(self) - - def to_object(self, space): - w_node = space.call_function(get(space).w_Ellipsis) - return w_node - - @staticmethod - def from_object(space, w_node): - return Ellipsis() - -State.ast_type('Ellipsis', 'slice', []) - - ->>>>>>> theirs class Slice(slice): def __init__(self, lower, upper, step): @@ -3413,33 +3194,11 @@ visitor._mutate_sequence(self.body) return visitor.visit_ExceptHandler(self) -<<<<<<< mine - def sync_app_attrs(self, space): - if (self.initialization_state & ~12) ^ 19: - self.missing_field(space, ['lineno', 'col_offset', None, None, 'body'], 'ExceptHandler') - else: - if not self.initialization_state & 4: - self.type = None - if not self.initialization_state & 8: - self.name = None - if self.type: - self.type.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) -======= def to_object(self, space): w_node = space.call_function(get(space).w_ExceptHandler) w_type = self.type.to_object(space) if self.type is not None else space.w_None # expr space.setattr(w_node, space.wrap('type'), w_type) - w_name = self.name.to_object(space) if self.name is not None else space.w_None # expr + w_name = space.wrap(self.name) # identifier space.setattr(w_node, space.wrap('name'), w_name) if self.body is None: body_w = [] @@ -3461,7 +3220,7 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _type = expr.from_object(space, w_type) - _name = expr.from_object(space, w_name) + _name = space.str_or_None_w(w_name) body_w = space.unpackiterable(w_body) _body = [stmt.from_object(space, w_item) for w_item in body_w] _lineno = space.int_w(w_lineno) @@ -3469,7 +3228,6 @@ return ExceptHandler(_type, _name, _body, _lineno, _col_offset) State.ast_type('ExceptHandler', 'excepthandler', ['type', 'name', 'body']) ->>>>>>> theirs class arguments(AST): @@ -3479,17 +3237,10 @@ self.vararg = vararg self.varargannotation = varargannotation self.kwonlyargs = kwonlyargs - self.w_kwonlyargs = None self.kwarg = kwarg self.kwargannotation = kwargannotation self.defaults = defaults -<<<<<<< mine - self.w_defaults = None self.kw_defaults = kw_defaults - self.w_kw_defaults = None - self.initialization_state = 255 -======= ->>>>>>> theirs def mutate_over(self, visitor): if self.args: @@ -3509,125 +3260,99 @@ def walkabout(self, visitor): visitor.visit_arguments(self) -<<<<<<< mine - def sync_app_attrs(self, space): - if (self.initialization_state & ~54) ^ 201: - self.missing_field(space, ['args', None, None, 'kwonlyargs', None, None, 'defaults', 'kw_defaults'], 'arguments') - else: - if not self.initialization_state & 2: - self.vararg = None - if not self.initialization_state & 4: - self.varargannotation = None - if not self.initialization_state & 16: - self.kwarg = None - if not self.initialization_state & 32: - self.kwargannotation = None - w_list = self.w_args - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.args = [space.interp_w(arg, w_obj) for w_obj in list_w] - else: - self.args = None - if self.args is not None: - for node in self.args: - node.sync_app_attrs(space) - if self.varargannotation: - self.varargannotation.sync_app_attrs(space) - w_list = self.w_kwonlyargs - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.kwonlyargs = [space.interp_w(arg, w_obj) for w_obj in list_w] - else: - self.kwonlyargs = None - if self.kwonlyargs is not None: - for node in self.kwonlyargs: - node.sync_app_attrs(space) - if self.kwargannotation: - self.kwargannotation.sync_app_attrs(space) - w_list = self.w_defaults - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.defaults = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.defaults = None - if self.defaults is not None: - for node in self.defaults: - node.sync_app_attrs(space) - w_list = self.w_kw_defaults - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.kw_defaults = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.kw_defaults = None - if self.kw_defaults is not None: - for node in self.kw_defaults: - if node: - node.sync_app_attrs(space) - -class arg(AST): - - def __init__(self, arg, annotation): - self.arg = arg - self.annotation = annotation - self.initialization_state = 3 - - def mutate_over(self, visitor): - if self.annotation: - self.annotation = self.annotation.mutate_over(visitor) - return visitor.visit_arg(self) - - def walkabout(self, visitor): - visitor.visit_arg(self) - - def sync_app_attrs(self, space): - if (self.initialization_state & ~2) ^ 1: - self.missing_field(space, ['arg', None], 'arg') - else: - if not self.initialization_state & 2: - self.annotation = None - if self.annotation: - self.annotation.sync_app_attrs(space) -======= def to_object(self, space): w_node = space.call_function(get(space).w_arguments) if self.args is None: args_w = [] else: - args_w = [node.to_object(space) for node in self.args] # expr + args_w = [node.to_object(space) for node in self.args] # arg w_args = space.newlist(args_w) space.setattr(w_node, space.wrap('args'), w_args) w_vararg = space.wrap(self.vararg) # identifier space.setattr(w_node, space.wrap('vararg'), w_vararg) + w_varargannotation = self.varargannotation.to_object(space) if self.varargannotation is not None else space.w_None # expr + space.setattr(w_node, space.wrap('varargannotation'), w_varargannotation) + if self.kwonlyargs is None: + kwonlyargs_w = [] + else: + kwonlyargs_w = [node.to_object(space) for node in self.kwonlyargs] # arg + w_kwonlyargs = space.newlist(kwonlyargs_w) + space.setattr(w_node, space.wrap('kwonlyargs'), w_kwonlyargs) w_kwarg = space.wrap(self.kwarg) # identifier space.setattr(w_node, space.wrap('kwarg'), w_kwarg) + w_kwargannotation = self.kwargannotation.to_object(space) if self.kwargannotation is not None else space.w_None # expr + space.setattr(w_node, space.wrap('kwargannotation'), w_kwargannotation) if self.defaults is None: defaults_w = [] else: defaults_w = [node.to_object(space) for node in self.defaults] # expr w_defaults = space.newlist(defaults_w) space.setattr(w_node, space.wrap('defaults'), w_defaults) + if self.kw_defaults is None: + kw_defaults_w = [] + else: + kw_defaults_w = [node.to_object(space) for node in self.kw_defaults] # expr + w_kw_defaults = space.newlist(kw_defaults_w) + space.setattr(w_node, space.wrap('kw_defaults'), w_kw_defaults) return w_node @staticmethod def from_object(space, w_node): w_args = get_field(space, w_node, 'args', False) w_vararg = get_field(space, w_node, 'vararg', True) + w_varargannotation = get_field(space, w_node, 'varargannotation', True) + w_kwonlyargs = get_field(space, w_node, 'kwonlyargs', False) w_kwarg = get_field(space, w_node, 'kwarg', True) + w_kwargannotation = get_field(space, w_node, 'kwargannotation', True) w_defaults = get_field(space, w_node, 'defaults', False) + w_kw_defaults = get_field(space, w_node, 'kw_defaults', False) args_w = space.unpackiterable(w_args) - _args = [expr.from_object(space, w_item) for w_item in args_w] + _args = [arg.from_object(space, w_item) for w_item in args_w] _vararg = space.str_or_None_w(w_vararg) + _varargannotation = expr.from_object(space, w_varargannotation) + kwonlyargs_w = space.unpackiterable(w_kwonlyargs) + _kwonlyargs = [arg.from_object(space, w_item) for w_item in kwonlyargs_w] _kwarg = space.str_or_None_w(w_kwarg) + _kwargannotation = expr.from_object(space, w_kwargannotation) defaults_w = space.unpackiterable(w_defaults) _defaults = [expr.from_object(space, w_item) for w_item in defaults_w] - return arguments(_args, _vararg, _kwarg, _defaults) - -State.ast_type('arguments', 'AST', ['args', 'vararg', 'kwarg', 'defaults']) ->>>>>>> theirs + kw_defaults_w = space.unpackiterable(w_kw_defaults) + _kw_defaults = [expr.from_object(space, w_item) for w_item in kw_defaults_w] + return arguments(_args, _vararg, _varargannotation, _kwonlyargs, _kwarg, _kwargannotation, _defaults, _kw_defaults) + +State.ast_type('arguments', 'AST', ['args', 'vararg', 'varargannotation', 'kwonlyargs', 'kwarg', 'kwargannotation', 'defaults', 'kw_defaults']) + +class arg(AST): + + def __init__(self, arg, annotation): + self.arg = arg + self.annotation = annotation + + def mutate_over(self, visitor): + if self.annotation: + self.annotation = self.annotation.mutate_over(visitor) + return visitor.visit_arg(self) + + def walkabout(self, visitor): + visitor.visit_arg(self) + + def to_object(self, space): + w_node = space.call_function(get(space).w_arg) + w_arg = space.wrap(self.arg) # identifier + space.setattr(w_node, space.wrap('arg'), w_arg) + w_annotation = self.annotation.to_object(space) if self.annotation is not None else space.w_None # expr + space.setattr(w_node, space.wrap('annotation'), w_annotation) + return w_node + + @staticmethod + def from_object(space, w_node): + w_arg = get_field(space, w_node, 'arg', False) + w_annotation = get_field(space, w_node, 'annotation', True) + _arg = space.realstr_w(w_arg) + _annotation = expr.from_object(space, w_annotation) + return arg(_arg, _annotation) + +State.ast_type('arg', 'AST', ['arg', 'annotation']) class keyword(AST): @@ -4083,5518 +3808,3 @@ pass -<<<<<<< mine -mod.typedef = typedef.TypeDef("mod", - AST.typedef, - __module__='_ast', - _attributes=_FieldsWrapper([]), - __new__=interp2app(get_AST_new(mod)), -) -mod.typedef.heaptype = True - -def Module_get_body(space, w_self): - if not w_self.initialization_state & 1: - raise_attriberr(space, w_self, 'body') - if w_self.w_body is None: - if w_self.body is None: - list_w = [] - else: - list_w = [space.wrap(node) for node in w_self.body] - w_list = space.newlist(list_w) - w_self.w_body = w_list - return w_self.w_body - -def Module_set_body(space, w_self, w_new_value): - w_self.w_body = w_new_value - w_self.initialization_state |= 1 - -def Module_del_body(space, w_self): - # Check if the element exists, raise appropriate exceptions - Module_get_body(space, w_self) - w_self.deldictvalue(space, 'body') - w_self.initialization_state &= ~1 - -_Module_field_unroller = unrolling_iterable(['body']) -def Module_init(space, w_self, __args__): - w_self = space.descr_self_interp_w(Module, w_self) - w_self.w_body = None - args_w, kwargs_w = __args__.unpack() - if args_w: - if len(args_w) != 1: - w_err = space.wrap("Module constructor takes either 0 or 1 positional argument") - raise OperationError(space.w_TypeError, w_err) - i = 0 - for field in _Module_field_unroller: - space.setattr(w_self, space.wrap(field), args_w[i]) - i += 1 - for field, w_value in kwargs_w.iteritems(): - space.setattr(w_self, space.wrap(field), w_value) - -Module.typedef = typedef.TypeDef("Module", - mod.typedef, - __module__='_ast', - _fields=_FieldsWrapper(['body']), - body=typedef.GetSetProperty(Module_get_body, Module_set_body, Module_del_body, cls=Module), - __new__=interp2app(get_AST_new(Module)), - __init__=interp2app(Module_init), -) -Module.typedef.heaptype = True - -def Interactive_get_body(space, w_self): - if not w_self.initialization_state & 1: - raise_attriberr(space, w_self, 'body') - if w_self.w_body is None: - if w_self.body is None: - list_w = [] - else: - list_w = [space.wrap(node) for node in w_self.body] - w_list = space.newlist(list_w) - w_self.w_body = w_list - return w_self.w_body - -def Interactive_set_body(space, w_self, w_new_value): - w_self.w_body = w_new_value - w_self.initialization_state |= 1 - -def Interactive_del_body(space, w_self): - # Check if the element exists, raise appropriate exceptions - Interactive_get_body(space, w_self) - w_self.deldictvalue(space, 'body') - w_self.initialization_state &= ~1 - -_Interactive_field_unroller = unrolling_iterable(['body']) -def Interactive_init(space, w_self, __args__): - w_self = space.descr_self_interp_w(Interactive, w_self) - w_self.w_body = None - args_w, kwargs_w = __args__.unpack() - if args_w: - if len(args_w) != 1: - w_err = space.wrap("Interactive constructor takes either 0 or 1 positional argument") - raise OperationError(space.w_TypeError, w_err) - i = 0 - for field in _Interactive_field_unroller: - space.setattr(w_self, space.wrap(field), args_w[i]) - i += 1 - for field, w_value in kwargs_w.iteritems(): - space.setattr(w_self, space.wrap(field), w_value) - -Interactive.typedef = typedef.TypeDef("Interactive", - mod.typedef, - __module__='_ast', - _fields=_FieldsWrapper(['body']), - body=typedef.GetSetProperty(Interactive_get_body, Interactive_set_body, Interactive_del_body, cls=Interactive), - __new__=interp2app(get_AST_new(Interactive)), - __init__=interp2app(Interactive_init), -) -Interactive.typedef.heaptype = True - -def Expression_get_body(space, w_self): - if w_self.w_dict is not None: - w_obj = w_self.getdictvalue(space, 'body') - if w_obj is not None: - return w_obj - if not w_self.initialization_state & 1: - raise_attriberr(space, w_self, 'body') - return space.wrap(w_self.body) - -def Expression_set_body(space, w_self, w_new_value): - try: - w_self.body = space.interp_w(expr, w_new_value, False) - if type(w_self.body) is expr: - raise OperationError(space.w_TypeError, space.w_None) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - w_self.setdictvalue(space, 'body', w_new_value) - w_self.initialization_state &= ~1 - return - w_self.deldictvalue(space, 'body') - w_self.initialization_state |= 1 - -def Expression_del_body(space, w_self): - # Check if the element exists, raise appropriate exceptions - Expression_get_body(space, w_self) - w_self.deldictvalue(space, 'body') - w_self.initialization_state &= ~1 - -_Expression_field_unroller = unrolling_iterable(['body']) -def Expression_init(space, w_self, __args__): - w_self = space.descr_self_interp_w(Expression, w_self) - args_w, kwargs_w = __args__.unpack() - if args_w: - if len(args_w) != 1: - w_err = space.wrap("Expression constructor takes either 0 or 1 positional argument") - raise OperationError(space.w_TypeError, w_err) - i = 0 - for field in _Expression_field_unroller: - space.setattr(w_self, space.wrap(field), args_w[i]) - i += 1 - for field, w_value in kwargs_w.iteritems(): - space.setattr(w_self, space.wrap(field), w_value) - -Expression.typedef = typedef.TypeDef("Expression", - mod.typedef, - __module__='_ast', - _fields=_FieldsWrapper(['body']), - body=typedef.GetSetProperty(Expression_get_body, Expression_set_body, Expression_del_body, cls=Expression), - __new__=interp2app(get_AST_new(Expression)), - __init__=interp2app(Expression_init), -) -Expression.typedef.heaptype = True - -def Suite_get_body(space, w_self): - if not w_self.initialization_state & 1: - raise_attriberr(space, w_self, 'body') - if w_self.w_body is None: - if w_self.body is None: - list_w = [] - else: - list_w = [space.wrap(node) for node in w_self.body] - w_list = space.newlist(list_w) - w_self.w_body = w_list - return w_self.w_body - -def Suite_set_body(space, w_self, w_new_value): - w_self.w_body = w_new_value - w_self.initialization_state |= 1 - -def Suite_del_body(space, w_self): - # Check if the element exists, raise appropriate exceptions - Suite_get_body(space, w_self) - w_self.deldictvalue(space, 'body') - w_self.initialization_state &= ~1 - -_Suite_field_unroller = unrolling_iterable(['body']) -def Suite_init(space, w_self, __args__): - w_self = space.descr_self_interp_w(Suite, w_self) - w_self.w_body = None - args_w, kwargs_w = __args__.unpack() - if args_w: - if len(args_w) != 1: - w_err = space.wrap("Suite constructor takes either 0 or 1 positional argument") - raise OperationError(space.w_TypeError, w_err) - i = 0 - for field in _Suite_field_unroller: - space.setattr(w_self, space.wrap(field), args_w[i]) - i += 1 - for field, w_value in kwargs_w.iteritems(): - space.setattr(w_self, space.wrap(field), w_value) - -Suite.typedef = typedef.TypeDef("Suite", - mod.typedef, - __module__='_ast', - _fields=_FieldsWrapper(['body']), - body=typedef.GetSetProperty(Suite_get_body, Suite_set_body, Suite_del_body, cls=Suite), - __new__=interp2app(get_AST_new(Suite)), - __init__=interp2app(Suite_init), -) -Suite.typedef.heaptype = True - -def stmt_get_lineno(space, w_self): - if w_self.w_dict is not None: - w_obj = w_self.getdictvalue(space, 'lineno') - if w_obj is not None: - return w_obj - if not w_self.initialization_state & 1: - raise_attriberr(space, w_self, 'lineno') - return space.wrap(w_self.lineno) - -def stmt_set_lineno(space, w_self, w_new_value): - try: - w_self.lineno = space.int_w(w_new_value) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - w_self.setdictvalue(space, 'lineno', w_new_value) - w_self.initialization_state &= ~1 - return - # need to save the original object too - w_self.setdictvalue(space, 'lineno', w_new_value) - w_self.initialization_state |= 1 - -def stmt_del_lineno(space, w_self): - # Check if the element exists, raise appropriate exceptions - stmt_get_lineno(space, w_self) - w_self.deldictvalue(space, 'lineno') - w_self.initialization_state &= ~1 - -def stmt_get_col_offset(space, w_self): - if w_self.w_dict is not None: - w_obj = w_self.getdictvalue(space, 'col_offset') - if w_obj is not None: - return w_obj - if not w_self.initialization_state & 2: - raise_attriberr(space, w_self, 'col_offset') - return space.wrap(w_self.col_offset) - -def stmt_set_col_offset(space, w_self, w_new_value): - try: - w_self.col_offset = space.int_w(w_new_value) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - w_self.setdictvalue(space, 'col_offset', w_new_value) - w_self.initialization_state &= ~2 - return - # need to save the original object too - w_self.setdictvalue(space, 'col_offset', w_new_value) - w_self.initialization_state |= 2 - -def stmt_del_col_offset(space, w_self): - # Check if the element exists, raise appropriate exceptions - stmt_get_col_offset(space, w_self) - w_self.deldictvalue(space, 'col_offset') - w_self.initialization_state &= ~2 - -stmt.typedef = typedef.TypeDef("stmt", - AST.typedef, - __module__='_ast', - _attributes=_FieldsWrapper(['lineno', 'col_offset']), - lineno=typedef.GetSetProperty(stmt_get_lineno, stmt_set_lineno, stmt_del_lineno, cls=stmt), - col_offset=typedef.GetSetProperty(stmt_get_col_offset, stmt_set_col_offset, stmt_del_col_offset, cls=stmt), - __new__=interp2app(get_AST_new(stmt)), -) -stmt.typedef.heaptype = True - -def FunctionDef_get_name(space, w_self): - if w_self.w_dict is not None: - w_obj = w_self.getdictvalue(space, 'name') - if w_obj is not None: - return w_obj - if not w_self.initialization_state & 4: - raise_attriberr(space, w_self, 'name') - if w_self.name is None: - return space.w_None - return space.wrap(w_self.name.decode('utf-8')) - -def FunctionDef_set_name(space, w_self, w_new_value): - try: - w_self.name = space.identifier_w(w_new_value) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - w_self.setdictvalue(space, 'name', w_new_value) - w_self.initialization_state &= ~4 - return - # need to save the original object too - w_self.setdictvalue(space, 'name', w_new_value) - w_self.initialization_state |= 4 - -def FunctionDef_del_name(space, w_self): - # Check if the element exists, raise appropriate exceptions - FunctionDef_get_name(space, w_self) - w_self.deldictvalue(space, 'name') - w_self.initialization_state &= ~4 - -def FunctionDef_get_args(space, w_self): - if w_self.w_dict is not None: - w_obj = w_self.getdictvalue(space, 'args') - if w_obj is not None: - return w_obj - if not w_self.initialization_state & 8: - raise_attriberr(space, w_self, 'args') - return space.wrap(w_self.args) - -def FunctionDef_set_args(space, w_self, w_new_value): - try: - w_self.args = space.interp_w(arguments, w_new_value, False) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - w_self.setdictvalue(space, 'args', w_new_value) - w_self.initialization_state &= ~8 - return - w_self.deldictvalue(space, 'args') - w_self.initialization_state |= 8 - -def FunctionDef_del_args(space, w_self): - # Check if the element exists, raise appropriate exceptions - FunctionDef_get_args(space, w_self) - w_self.deldictvalue(space, 'args') - w_self.initialization_state &= ~8 - -def FunctionDef_get_body(space, w_self): - if not w_self.initialization_state & 16: - raise_attriberr(space, w_self, 'body') - if w_self.w_body is None: - if w_self.body is None: - list_w = [] - else: - list_w = [space.wrap(node) for node in w_self.body] - w_list = space.newlist(list_w) - w_self.w_body = w_list - return w_self.w_body - -def FunctionDef_set_body(space, w_self, w_new_value): - w_self.w_body = w_new_value - w_self.initialization_state |= 16 - -def FunctionDef_del_body(space, w_self): - # Check if the element exists, raise appropriate exceptions - FunctionDef_get_body(space, w_self) - w_self.deldictvalue(space, 'body') - w_self.initialization_state &= ~16 - -def FunctionDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 32: - raise_attriberr(space, w_self, 'decorator_list') - if w_self.w_decorator_list is None: - if w_self.decorator_list is None: - list_w = [] - else: - list_w = [space.wrap(node) for node in w_self.decorator_list] - w_list = space.newlist(list_w) - w_self.w_decorator_list = w_list - return w_self.w_decorator_list - -def FunctionDef_set_decorator_list(space, w_self, w_new_value): - w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 32 - -def FunctionDef_del_decorator_list(space, w_self): - # Check if the element exists, raise appropriate exceptions - FunctionDef_get_decorator_list(space, w_self) - w_self.deldictvalue(space, 'decorator_list') - w_self.initialization_state &= ~32 - -def FunctionDef_get_returns(space, w_self): - if w_self.w_dict is not None: - w_obj = w_self.getdictvalue(space, 'returns') - if w_obj is not None: - return w_obj - if not w_self.initialization_state & 64: - raise_attriberr(space, w_self, 'returns') - return space.wrap(w_self.returns) - -def FunctionDef_set_returns(space, w_self, w_new_value): - try: - w_self.returns = space.interp_w(expr, w_new_value, True) - if type(w_self.returns) is expr: - raise OperationError(space.w_TypeError, space.w_None) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - w_self.setdictvalue(space, 'returns', w_new_value) - w_self.initialization_state &= ~64 - return - w_self.deldictvalue(space, 'returns') - w_self.initialization_state |= 64 - -def FunctionDef_del_returns(space, w_self): - # Check if the element exists, raise appropriate exceptions - FunctionDef_get_returns(space, w_self) - w_self.deldictvalue(space, 'returns') - w_self.initialization_state &= ~64 - -_FunctionDef_field_unroller = unrolling_iterable(['name', 'args', 'body', 'decorator_list', 'returns']) -def FunctionDef_init(space, w_self, __args__): - w_self = space.descr_self_interp_w(FunctionDef, w_self) - w_self.w_body = None - w_self.w_decorator_list = None - args_w, kwargs_w = __args__.unpack() - if args_w: - if len(args_w) != 5: - w_err = space.wrap("FunctionDef constructor takes either 0 or 5 positional arguments") - raise OperationError(space.w_TypeError, w_err) - i = 0 - for field in _FunctionDef_field_unroller: - space.setattr(w_self, space.wrap(field), args_w[i]) - i += 1 - for field, w_value in kwargs_w.iteritems(): - space.setattr(w_self, space.wrap(field), w_value) - -FunctionDef.typedef = typedef.TypeDef("FunctionDef", - stmt.typedef, - __module__='_ast', - _fields=_FieldsWrapper(['name', 'args', 'body', 'decorator_list', 'returns']), - name=typedef.GetSetProperty(FunctionDef_get_name, FunctionDef_set_name, FunctionDef_del_name, cls=FunctionDef), - args=typedef.GetSetProperty(FunctionDef_get_args, FunctionDef_set_args, FunctionDef_del_args, cls=FunctionDef), - body=typedef.GetSetProperty(FunctionDef_get_body, FunctionDef_set_body, FunctionDef_del_body, cls=FunctionDef), - decorator_list=typedef.GetSetProperty(FunctionDef_get_decorator_list, FunctionDef_set_decorator_list, FunctionDef_del_decorator_list, cls=FunctionDef), - returns=typedef.GetSetProperty(FunctionDef_get_returns, FunctionDef_set_returns, FunctionDef_del_returns, cls=FunctionDef), - __new__=interp2app(get_AST_new(FunctionDef)), - __init__=interp2app(FunctionDef_init), -) -FunctionDef.typedef.heaptype = True - -def ClassDef_get_name(space, w_self): - if w_self.w_dict is not None: - w_obj = w_self.getdictvalue(space, 'name') - if w_obj is not None: - return w_obj - if not w_self.initialization_state & 4: - raise_attriberr(space, w_self, 'name') - if w_self.name is None: - return space.w_None - return space.wrap(w_self.name.decode('utf-8')) - -def ClassDef_set_name(space, w_self, w_new_value): - try: - w_self.name = space.identifier_w(w_new_value) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - w_self.setdictvalue(space, 'name', w_new_value) - w_self.initialization_state &= ~4 - return - # need to save the original object too - w_self.setdictvalue(space, 'name', w_new_value) - w_self.initialization_state |= 4 - -def ClassDef_del_name(space, w_self): - # Check if the element exists, raise appropriate exceptions - ClassDef_get_name(space, w_self) - w_self.deldictvalue(space, 'name') - w_self.initialization_state &= ~4 - -def ClassDef_get_bases(space, w_self): - if not w_self.initialization_state & 8: - raise_attriberr(space, w_self, 'bases') - if w_self.w_bases is None: - if w_self.bases is None: - list_w = [] - else: - list_w = [space.wrap(node) for node in w_self.bases] - w_list = space.newlist(list_w) - w_self.w_bases = w_list - return w_self.w_bases - -def ClassDef_set_bases(space, w_self, w_new_value): - w_self.w_bases = w_new_value - w_self.initialization_state |= 8 - -def ClassDef_del_bases(space, w_self): - # Check if the element exists, raise appropriate exceptions - ClassDef_get_bases(space, w_self) - w_self.deldictvalue(space, 'bases') - w_self.initialization_state &= ~8 - -def ClassDef_get_keywords(space, w_self): - if not w_self.initialization_state & 16: - raise_attriberr(space, w_self, 'keywords') - if w_self.w_keywords is None: - if w_self.keywords is None: - list_w = [] - else: - list_w = [space.wrap(node) for node in w_self.keywords] - w_list = space.newlist(list_w) - w_self.w_keywords = w_list - return w_self.w_keywords - -def ClassDef_set_keywords(space, w_self, w_new_value): - w_self.w_keywords = w_new_value - w_self.initialization_state |= 16 - -def ClassDef_del_keywords(space, w_self): - # Check if the element exists, raise appropriate exceptions - ClassDef_get_keywords(space, w_self) - w_self.deldictvalue(space, 'keywords') - w_self.initialization_state &= ~16 - -def ClassDef_get_starargs(space, w_self): - if w_self.w_dict is not None: - w_obj = w_self.getdictvalue(space, 'starargs') - if w_obj is not None: - return w_obj - if not w_self.initialization_state & 32: - raise_attriberr(space, w_self, 'starargs') - return space.wrap(w_self.starargs) - -def ClassDef_set_starargs(space, w_self, w_new_value): - try: - w_self.starargs = space.interp_w(expr, w_new_value, True) - if type(w_self.starargs) is expr: - raise OperationError(space.w_TypeError, space.w_None) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - w_self.setdictvalue(space, 'starargs', w_new_value) - w_self.initialization_state &= ~32 - return - w_self.deldictvalue(space, 'starargs') - w_self.initialization_state |= 32 - -def ClassDef_del_starargs(space, w_self): - # Check if the element exists, raise appropriate exceptions - ClassDef_get_starargs(space, w_self) - w_self.deldictvalue(space, 'starargs') - w_self.initialization_state &= ~32 - -def ClassDef_get_kwargs(space, w_self): - if w_self.w_dict is not None: - w_obj = w_self.getdictvalue(space, 'kwargs') - if w_obj is not None: - return w_obj - if not w_self.initialization_state & 64: - raise_attriberr(space, w_self, 'kwargs') - return space.wrap(w_self.kwargs) - -def ClassDef_set_kwargs(space, w_self, w_new_value): - try: - w_self.kwargs = space.interp_w(expr, w_new_value, True) - if type(w_self.kwargs) is expr: - raise OperationError(space.w_TypeError, space.w_None) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - w_self.setdictvalue(space, 'kwargs', w_new_value) - w_self.initialization_state &= ~64 - return - w_self.deldictvalue(space, 'kwargs') - w_self.initialization_state |= 64 - -def ClassDef_del_kwargs(space, w_self): - # Check if the element exists, raise appropriate exceptions - ClassDef_get_kwargs(space, w_self) - w_self.deldictvalue(space, 'kwargs') - w_self.initialization_state &= ~64 - -def ClassDef_get_body(space, w_self): - if not w_self.initialization_state & 128: - raise_attriberr(space, w_self, 'body') - if w_self.w_body is None: - if w_self.body is None: - list_w = [] - else: - list_w = [space.wrap(node) for node in w_self.body] - w_list = space.newlist(list_w) - w_self.w_body = w_list - return w_self.w_body - -def ClassDef_set_body(space, w_self, w_new_value): - w_self.w_body = w_new_value - w_self.initialization_state |= 128 - -def ClassDef_del_body(space, w_self): - # Check if the element exists, raise appropriate exceptions - ClassDef_get_body(space, w_self) - w_self.deldictvalue(space, 'body') - w_self.initialization_state &= ~128 - -def ClassDef_get_decorator_list(space, w_self): - if not w_self.initialization_state & 256: - raise_attriberr(space, w_self, 'decorator_list') - if w_self.w_decorator_list is None: - if w_self.decorator_list is None: - list_w = [] - else: - list_w = [space.wrap(node) for node in w_self.decorator_list] - w_list = space.newlist(list_w) - w_self.w_decorator_list = w_list - return w_self.w_decorator_list - -def ClassDef_set_decorator_list(space, w_self, w_new_value): - w_self.w_decorator_list = w_new_value - w_self.initialization_state |= 256 - -def ClassDef_del_decorator_list(space, w_self): - # Check if the element exists, raise appropriate exceptions - ClassDef_get_decorator_list(space, w_self) - w_self.deldictvalue(space, 'decorator_list') - w_self.initialization_state &= ~256 - -_ClassDef_field_unroller = unrolling_iterable(['name', 'bases', 'keywords', 'starargs', 'kwargs', 'body', 'decorator_list']) -def ClassDef_init(space, w_self, __args__): - w_self = space.descr_self_interp_w(ClassDef, w_self) - w_self.w_bases = None - w_self.w_keywords = None - w_self.w_body = None - w_self.w_decorator_list = None - args_w, kwargs_w = __args__.unpack() - if args_w: - if len(args_w) != 7: - w_err = space.wrap("ClassDef constructor takes either 0 or 7 positional arguments") - raise OperationError(space.w_TypeError, w_err) - i = 0 - for field in _ClassDef_field_unroller: - space.setattr(w_self, space.wrap(field), args_w[i]) - i += 1 - for field, w_value in kwargs_w.iteritems(): - space.setattr(w_self, space.wrap(field), w_value) - -ClassDef.typedef = typedef.TypeDef("ClassDef", - stmt.typedef, - __module__='_ast', - _fields=_FieldsWrapper(['name', 'bases', 'keywords', 'starargs', 'kwargs', 'body', 'decorator_list']), - name=typedef.GetSetProperty(ClassDef_get_name, ClassDef_set_name, ClassDef_del_name, cls=ClassDef), - bases=typedef.GetSetProperty(ClassDef_get_bases, ClassDef_set_bases, ClassDef_del_bases, cls=ClassDef), - keywords=typedef.GetSetProperty(ClassDef_get_keywords, ClassDef_set_keywords, ClassDef_del_keywords, cls=ClassDef), - starargs=typedef.GetSetProperty(ClassDef_get_starargs, ClassDef_set_starargs, ClassDef_del_starargs, cls=ClassDef), - kwargs=typedef.GetSetProperty(ClassDef_get_kwargs, ClassDef_set_kwargs, ClassDef_del_kwargs, cls=ClassDef), - body=typedef.GetSetProperty(ClassDef_get_body, ClassDef_set_body, ClassDef_del_body, cls=ClassDef), - decorator_list=typedef.GetSetProperty(ClassDef_get_decorator_list, ClassDef_set_decorator_list, ClassDef_del_decorator_list, cls=ClassDef), - __new__=interp2app(get_AST_new(ClassDef)), - __init__=interp2app(ClassDef_init), -) -ClassDef.typedef.heaptype = True - -def Return_get_value(space, w_self): - if w_self.w_dict is not None: - w_obj = w_self.getdictvalue(space, 'value') - if w_obj is not None: - return w_obj - if not w_self.initialization_state & 4: - raise_attriberr(space, w_self, 'value') - return space.wrap(w_self.value) - -def Return_set_value(space, w_self, w_new_value): - try: - w_self.value = space.interp_w(expr, w_new_value, True) - if type(w_self.value) is expr: - raise OperationError(space.w_TypeError, space.w_None) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - w_self.setdictvalue(space, 'value', w_new_value) - w_self.initialization_state &= ~4 - return - w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 4 - -def Return_del_value(space, w_self): - # Check if the element exists, raise appropriate exceptions - Return_get_value(space, w_self) - w_self.deldictvalue(space, 'value') - w_self.initialization_state &= ~4 - -_Return_field_unroller = unrolling_iterable(['value']) -def Return_init(space, w_self, __args__): - w_self = space.descr_self_interp_w(Return, w_self) - args_w, kwargs_w = __args__.unpack() - if args_w: - if len(args_w) != 1: - w_err = space.wrap("Return constructor takes either 0 or 1 positional argument") - raise OperationError(space.w_TypeError, w_err) - i = 0 - for field in _Return_field_unroller: - space.setattr(w_self, space.wrap(field), args_w[i]) - i += 1 - for field, w_value in kwargs_w.iteritems(): - space.setattr(w_self, space.wrap(field), w_value) - -Return.typedef = typedef.TypeDef("Return", - stmt.typedef, - __module__='_ast', - _fields=_FieldsWrapper(['value']), - value=typedef.GetSetProperty(Return_get_value, Return_set_value, Return_del_value, cls=Return), - __new__=interp2app(get_AST_new(Return)), - __init__=interp2app(Return_init), -) -Return.typedef.heaptype = True - -def Delete_get_targets(space, w_self): - if not w_self.initialization_state & 4: - raise_attriberr(space, w_self, 'targets') - if w_self.w_targets is None: - if w_self.targets is None: - list_w = [] - else: - list_w = [space.wrap(node) for node in w_self.targets] - w_list = space.newlist(list_w) - w_self.w_targets = w_list - return w_self.w_targets - -def Delete_set_targets(space, w_self, w_new_value): - w_self.w_targets = w_new_value - w_self.initialization_state |= 4 - -def Delete_del_targets(space, w_self): - # Check if the element exists, raise appropriate exceptions - Delete_get_targets(space, w_self) - w_self.deldictvalue(space, 'targets') - w_self.initialization_state &= ~4 - -_Delete_field_unroller = unrolling_iterable(['targets']) -def Delete_init(space, w_self, __args__): - w_self = space.descr_self_interp_w(Delete, w_self) - w_self.w_targets = None - args_w, kwargs_w = __args__.unpack() - if args_w: - if len(args_w) != 1: - w_err = space.wrap("Delete constructor takes either 0 or 1 positional argument") - raise OperationError(space.w_TypeError, w_err) - i = 0 - for field in _Delete_field_unroller: - space.setattr(w_self, space.wrap(field), args_w[i]) - i += 1 - for field, w_value in kwargs_w.iteritems(): - space.setattr(w_self, space.wrap(field), w_value) - -Delete.typedef = typedef.TypeDef("Delete", - stmt.typedef, - __module__='_ast', - _fields=_FieldsWrapper(['targets']), - targets=typedef.GetSetProperty(Delete_get_targets, Delete_set_targets, Delete_del_targets, cls=Delete), - __new__=interp2app(get_AST_new(Delete)), - __init__=interp2app(Delete_init), -) -Delete.typedef.heaptype = True - -def Assign_get_targets(space, w_self): - if not w_self.initialization_state & 4: - raise_attriberr(space, w_self, 'targets') - if w_self.w_targets is None: - if w_self.targets is None: - list_w = [] - else: - list_w = [space.wrap(node) for node in w_self.targets] - w_list = space.newlist(list_w) - w_self.w_targets = w_list - return w_self.w_targets - -def Assign_set_targets(space, w_self, w_new_value): - w_self.w_targets = w_new_value - w_self.initialization_state |= 4 - -def Assign_del_targets(space, w_self): - # Check if the element exists, raise appropriate exceptions - Assign_get_targets(space, w_self) - w_self.deldictvalue(space, 'targets') - w_self.initialization_state &= ~4 - -def Assign_get_value(space, w_self): - if w_self.w_dict is not None: - w_obj = w_self.getdictvalue(space, 'value') - if w_obj is not None: - return w_obj - if not w_self.initialization_state & 8: - raise_attriberr(space, w_self, 'value') - return space.wrap(w_self.value) - -def Assign_set_value(space, w_self, w_new_value): - try: - w_self.value = space.interp_w(expr, w_new_value, False) - if type(w_self.value) is expr: - raise OperationError(space.w_TypeError, space.w_None) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - w_self.setdictvalue(space, 'value', w_new_value) - w_self.initialization_state &= ~8 - return - w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 8 - -def Assign_del_value(space, w_self): - # Check if the element exists, raise appropriate exceptions - Assign_get_value(space, w_self) - w_self.deldictvalue(space, 'value') - w_self.initialization_state &= ~8 - -_Assign_field_unroller = unrolling_iterable(['targets', 'value']) -def Assign_init(space, w_self, __args__): - w_self = space.descr_self_interp_w(Assign, w_self) - w_self.w_targets = None - args_w, kwargs_w = __args__.unpack() - if args_w: - if len(args_w) != 2: - w_err = space.wrap("Assign constructor takes either 0 or 2 positional arguments") - raise OperationError(space.w_TypeError, w_err) - i = 0 - for field in _Assign_field_unroller: - space.setattr(w_self, space.wrap(field), args_w[i]) - i += 1 - for field, w_value in kwargs_w.iteritems(): - space.setattr(w_self, space.wrap(field), w_value) - -Assign.typedef = typedef.TypeDef("Assign", - stmt.typedef, - __module__='_ast', - _fields=_FieldsWrapper(['targets', 'value']), - targets=typedef.GetSetProperty(Assign_get_targets, Assign_set_targets, Assign_del_targets, cls=Assign), - value=typedef.GetSetProperty(Assign_get_value, Assign_set_value, Assign_del_value, cls=Assign), - __new__=interp2app(get_AST_new(Assign)), - __init__=interp2app(Assign_init), -) -Assign.typedef.heaptype = True - -def AugAssign_get_target(space, w_self): - if w_self.w_dict is not None: - w_obj = w_self.getdictvalue(space, 'target') - if w_obj is not None: - return w_obj - if not w_self.initialization_state & 4: - raise_attriberr(space, w_self, 'target') - return space.wrap(w_self.target) - -def AugAssign_set_target(space, w_self, w_new_value): - try: - w_self.target = space.interp_w(expr, w_new_value, False) - if type(w_self.target) is expr: - raise OperationError(space.w_TypeError, space.w_None) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - w_self.setdictvalue(space, 'target', w_new_value) - w_self.initialization_state &= ~4 - return - w_self.deldictvalue(space, 'target') - w_self.initialization_state |= 4 - -def AugAssign_del_target(space, w_self): - # Check if the element exists, raise appropriate exceptions - AugAssign_get_target(space, w_self) - w_self.deldictvalue(space, 'target') - w_self.initialization_state &= ~4 - -def AugAssign_get_op(space, w_self): - if w_self.w_dict is not None: - w_obj = w_self.getdictvalue(space, 'op') - if w_obj is not None: - return w_obj - if not w_self.initialization_state & 8: - raise_attriberr(space, w_self, 'op') - return operator_to_class[w_self.op - 1]() - -def AugAssign_set_op(space, w_self, w_new_value): - try: - obj = space.interp_w(operator, w_new_value) - w_self.op = obj.to_simple_int(space) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state &= ~8 - return - # need to save the original object too - w_self.setdictvalue(space, 'op', w_new_value) - w_self.initialization_state |= 8 - -def AugAssign_del_op(space, w_self): - # Check if the element exists, raise appropriate exceptions - AugAssign_get_op(space, w_self) - w_self.deldictvalue(space, 'op') - w_self.initialization_state &= ~8 - -def AugAssign_get_value(space, w_self): - if w_self.w_dict is not None: - w_obj = w_self.getdictvalue(space, 'value') - if w_obj is not None: - return w_obj - if not w_self.initialization_state & 16: - raise_attriberr(space, w_self, 'value') - return space.wrap(w_self.value) - -def AugAssign_set_value(space, w_self, w_new_value): - try: - w_self.value = space.interp_w(expr, w_new_value, False) - if type(w_self.value) is expr: - raise OperationError(space.w_TypeError, space.w_None) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - w_self.setdictvalue(space, 'value', w_new_value) - w_self.initialization_state &= ~16 - return - w_self.deldictvalue(space, 'value') - w_self.initialization_state |= 16 - -def AugAssign_del_value(space, w_self): - # Check if the element exists, raise appropriate exceptions - AugAssign_get_value(space, w_self) - w_self.deldictvalue(space, 'value') - w_self.initialization_state &= ~16 - -_AugAssign_field_unroller = unrolling_iterable(['target', 'op', 'value']) -def AugAssign_init(space, w_self, __args__): - w_self = space.descr_self_interp_w(AugAssign, w_self) - args_w, kwargs_w = __args__.unpack() - if args_w: - if len(args_w) != 3: - w_err = space.wrap("AugAssign constructor takes either 0 or 3 positional arguments") - raise OperationError(space.w_TypeError, w_err) - i = 0 From noreply at buildbot.pypy.org Tue Aug 26 19:58:25 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 26 Aug 2014 19:58:25 +0200 (CEST) Subject: [pypy-commit] pypy py3k: py3 _fields Message-ID: <20140826175825.D358A1C02AF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r73068:5c7c51c7c4f7 Date: 2014-08-24 16:07 -0700 http://bitbucket.org/pypy/pypy/changeset/5c7c51c7c4f7/ Log: py3 _fields diff --git a/pypy/interpreter/astcompiler/test/test_ast.py b/pypy/interpreter/astcompiler/test/test_ast.py --- a/pypy/interpreter/astcompiler/test/test_ast.py +++ b/pypy/interpreter/astcompiler/test/test_ast.py @@ -41,11 +41,12 @@ w_fields = space.getattr(ast.get(space).w_FunctionDef, space.wrap("_fields")) assert space.eq_w(w_fields, space.wrap( - ('name', 'args', 'body', 'decorator_list'))) + ('name', 'args', 'body', 'decorator_list', 'returns'))) w_fields = space.getattr(ast.get(space).w_arguments, space.wrap("_fields")) assert space.eq_w(w_fields, space.wrap( - ('args', 'vararg', 'kwarg', 'defaults'))) + ('args', 'vararg', 'varargannotation', 'kwonlyargs', 'kwarg', + 'kwargannotation', 'defaults', 'kw_defaults'))) def test_attributes(self, space): w_attrs = space.getattr(ast.get(space).w_FunctionDef, From noreply at buildbot.pypy.org Tue Aug 26 19:58:27 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 26 Aug 2014 19:58:27 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix handling of None values in kw_defaults again Message-ID: <20140826175827.152C21C02AF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r73069:f9d726dbb392 Date: 2014-08-24 16:31 -0700 http://bitbucket.org/pypy/pypy/changeset/f9d726dbb392/ Log: fix handling of None values in kw_defaults again diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -3291,7 +3291,7 @@ if self.kw_defaults is None: kw_defaults_w = [] else: - kw_defaults_w = [node.to_object(space) for node in self.kw_defaults] # expr + kw_defaults_w = [node.to_object(space) if node is not None else space.w_None for node in self.kw_defaults] # expr w_kw_defaults = space.newlist(kw_defaults_w) space.setattr(w_node, space.wrap('kw_defaults'), w_kw_defaults) return w_node diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -131,7 +131,9 @@ return "space.wrap(%s)" % (value,) else: wrapper = "%s.to_object(space)" % (value,) - if field.opt: + # XXX: kw_defaults, unlike other sequences, allows None + # values + if field.opt or field.name.value == 'kw_defaults': wrapper += " if %s is not None else space.w_None" % (value,) return wrapper From noreply at buildbot.pypy.org Tue Aug 26 19:58:28 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 26 Aug 2014 19:58:28 +0200 (CEST) Subject: [pypy-commit] pypy py3k: (mattip) fix test_fillWithObject Message-ID: <20140826175828.3CF781C02AF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r73070:f7c113013604 Date: 2014-08-26 10:56 -0700 http://bitbucket.org/pypy/pypy/changeset/f7c113013604/ Log: (mattip) fix test_fillWithObject diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -1,6 +1,5 @@ from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import buffer -from rpython.rlib.objectmodel import import_from_mixin from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, Py_buffer) from pypy.module.cpyext.pyobject import PyObject, Py_DecRef @@ -13,7 +12,8 @@ # PyPy only supports contiguous Py_buffers for now. return 1 -class CBufferMixin(object): +class CBuffer(buffer.Buffer): + _immutable_ = True def __init__(self, space, c_buf, c_len, w_obj): self.space = space @@ -21,8 +21,7 @@ self.c_len = c_len self.w_obj = w_obj - def destructor(self): - assert isinstance(self, CBufferMixin) + def __del__(self): Py_DecRef(self.space, self.w_obj) def getlength(self): @@ -34,10 +33,3 @@ def as_str(self): return rffi.charpsize2str(rffi.cast(rffi.CCHARP, self.c_buf), self.c_len) - -class CBuffer(buffer.Buffer): - import_from_mixin(CBufferMixin) - _immutable_ = True - - def __del__(self): - CBufferMixin.destructor(self) From noreply at buildbot.pypy.org Tue Aug 26 19:58:29 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 26 Aug 2014 19:58:29 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140826175829.69CF91C02AF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r73071:e15d0862520d Date: 2014-08-26 10:57 -0700 http://bitbucket.org/pypy/pypy/changeset/e15d0862520d/ Log: merge default diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -1,7 +1,6 @@ import sys from rpython.rlib.clibffi import FFI_DEFAULT_ABI from rpython.rlib.objectmodel import we_are_translated -from rpython.rtyper.lltypesystem import lltype, rffi from rpython.jit.metainterp.history import INT, FLOAT from rpython.jit.backend.x86.arch import (WORD, IS_X86_64, IS_X86_32, PASS_ON_MY_FRAME, FRAME_FIXED_SIZE) @@ -22,8 +21,6 @@ def align_stack_words(words): return (words + CALL_ALIGN - 1) & ~(CALL_ALIGN-1) -NO_ARG_FUNC_PTR = lltype.Ptr(lltype.FuncType([], lltype.Void)) - class CallBuilderX86(AbstractCallBuilder): @@ -94,30 +91,9 @@ gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap if gcrootmap: if gcrootmap.is_shadow_stack and self.is_call_release_gil: - from rpython.jit.backend.x86.assembler import heap - from rpython.jit.backend.x86 import rx86 - from rpython.rtyper.lltypesystem.lloperation import llop - # - # When doing a call_release_gil with shadowstack, there - # is the risk that the 'rpy_fastgil' was free but the - # current shadowstack can be the one of a different - # thread. So here we check if the shadowstack pointer - # is still the same as before we released the GIL (saved - # in 'ebx'), and if not, we call 'thread_run'. - rst = gcrootmap.get_root_stack_top_addr() - mc = self.mc - mc.CMP(ebx, heap(rst)) - mc.J_il8(rx86.Conditions['E'], 0) - je_location = mc.get_relative_pos() - # call 'thread_run' - t_run = llop.gc_thread_run_ptr(NO_ARG_FUNC_PTR) - mc.CALL(imm(rffi.cast(lltype.Signed, t_run))) - # patch the JE above - offset = mc.get_relative_pos() - je_location - assert 0 < offset <= 127 - mc.overwrite(je_location-1, chr(offset)) + # in this mode, 'ebx' happens to contain the shadowstack + # top at this point, so reuse it instead of loading it again ssreg = ebx - # self.asm._reload_frame_if_necessary(self.mc, shadowstack_reg=ssreg) if self.change_extra_stack_depth: self.asm.set_extra_stack_depth(self.mc, 0) @@ -206,8 +182,35 @@ mc.MOV_ri(X86_64_SCRATCH_REG.value, fastgil) mc.XCHG_rm(old_value.value, (X86_64_SCRATCH_REG.value, 0)) mc.CMP(old_value, css_value) - mc.J_il8(rx86.Conditions['E'], 0) - je_location = mc.get_relative_pos() + # + gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap + if bool(gcrootmap) and gcrootmap.is_shadow_stack: + from rpython.jit.backend.x86.assembler import heap + # + # When doing a call_release_gil with shadowstack, there + # is the risk that the 'rpy_fastgil' was free but the + # current shadowstack can be the one of a different + # thread. So here we check if the shadowstack pointer + # is still the same as before we released the GIL (saved + # in 'ebx'), and if not, we fall back to 'reacqgil_addr'. + mc.J_il8(rx86.Conditions['NE'], 0) + jne_location = mc.get_relative_pos() + # here, ecx is zero (so rpy_fastgil was not acquired) + rst = gcrootmap.get_root_stack_top_addr() + mc = self.mc + mc.CMP(ebx, heap(rst)) + mc.J_il8(rx86.Conditions['E'], 0) + je_location = mc.get_relative_pos() + # revert the rpy_fastgil acquired above, so that the + # general 'reacqgil_addr' below can acquire it again... + mc.MOV(heap(fastgil), ecx) + # patch the JNE above + offset = mc.get_relative_pos() - jne_location + assert 0 < offset <= 127 + mc.overwrite(jne_location-1, chr(offset)) + else: + mc.J_il8(rx86.Conditions['E'], 0) + je_location = mc.get_relative_pos() # # Yes, we need to call the reacqgil() function self.save_result_value_reacq() diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -977,12 +977,6 @@ hop.genop("direct_call", [self.root_walker.thread_run_ptr]) self.pop_roots(hop, livevars) - def gct_gc_thread_run_ptr(self, hop): - assert self.translator.config.translation.thread - assert hasattr(self.root_walker, 'thread_run_ptr') - hop.genop("same_as", [self.root_walker.thread_run_ptr], - resultvar=hop.spaceop.result) - def gct_gc_thread_start(self, hop): assert self.translator.config.translation.thread if hasattr(self.root_walker, 'thread_start_ptr'): diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -473,7 +473,6 @@ 'gc_set_max_heap_size': LLOp(), 'gc_can_move' : LLOp(sideeffects=False), 'gc_thread_run' : LLOp(), - 'gc_thread_run_ptr' : LLOp(sideeffects=False), 'gc_thread_start' : LLOp(), 'gc_thread_die' : LLOp(), 'gc_thread_before_fork':LLOp(), # returns an opaque address From noreply at buildbot.pypy.org Tue Aug 26 20:06:00 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 26 Aug 2014 20:06:00 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fix generic extraction of optional fields Message-ID: <20140826180601.137761C02AF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r73072:f82f65bb5aec Date: 2014-08-26 11:03 -0700 http://bitbucket.org/pypy/pypy/changeset/f82f65bb5aec/ Log: fix generic extraction of optional fields diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -153,7 +153,10 @@ elif field.type.value in ("bool",): return "space.bool_w(%s)" % (value,) else: - return "%s.from_object(space, %s)" % (field.type, value) + extractor = "%s.from_object(space, %s)" % (field.type, value) + if field.opt: + extractor += " if %s is not None else None" % (value,) + return extractor def get_field_converter(self, field): if field.seq: From noreply at buildbot.pypy.org Tue Aug 26 20:06:03 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 26 Aug 2014 20:06:03 +0200 (CEST) Subject: [pypy-commit] pypy py3k: reapply py3k's special handling of identifiers Message-ID: <20140826180603.091501C02AF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r73073:1be2c936628c Date: 2014-08-26 11:03 -0700 http://bitbucket.org/pypy/pypy/changeset/1be2c936628c/ Log: reapply py3k's special handling of identifiers diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -125,9 +125,14 @@ def get_value_converter(self, field, value): if field.type.value in self.data.simple_types: return "%s_to_class[%s - 1]().to_object(space)" % (field.type, value) + elif field.type.value == "identifier": + wrapper = "space.wrap(%s.decode('utf-8'))" % (value,) + if field.opt: + wrapper += " if %s is not None else space.w_None" % (value,) + return wrapper elif field.type.value in ("object", "string"): return value - elif field.type.value in ("identifier", "int", "bool"): + elif field.type.value in ("int", "bool"): return "space.wrap(%s)" % (value,) else: wrapper = "%s.to_object(space)" % (value,) @@ -147,7 +152,7 @@ elif field.type.value in ("identifier",): if field.opt: return "space.str_or_None_w(%s)" % (value,) - return "space.realstr_w(%s)" % (value,) + return "space.identifier_w(%s)" % (value,) elif field.type.value in ("int",): return "space.int_w(%s)" % (value,) elif field.type.value in ("bool",): From noreply at buildbot.pypy.org Tue Aug 26 20:06:04 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 26 Aug 2014 20:06:04 +0200 (CEST) Subject: [pypy-commit] pypy py3k: regenerate ast.py Message-ID: <20140826180604.53BBF1C02AF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r73074:ce5d419573a8 Date: 2014-08-26 11:03 -0700 http://bitbucket.org/pypy/pypy/changeset/ce5d419573a8/ Log: regenerate ast.py diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -382,7 +382,7 @@ def to_object(self, space): w_node = space.call_function(get(space).w_FunctionDef) - w_name = space.wrap(self.name) # identifier + w_name = space.wrap(self.name.decode('utf-8')) # identifier space.setattr(w_node, space.wrap('name'), w_name) w_args = self.args.to_object(space) # arguments space.setattr(w_node, space.wrap('args'), w_args) @@ -415,13 +415,13 @@ w_returns = get_field(space, w_node, 'returns', True) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _name = space.realstr_w(w_name) + _name = space.identifier_w(w_name) _args = arguments.from_object(space, w_args) body_w = space.unpackiterable(w_body) _body = [stmt.from_object(space, w_item) for w_item in body_w] decorator_list_w = space.unpackiterable(w_decorator_list) _decorator_list = [expr.from_object(space, w_item) for w_item in decorator_list_w] - _returns = expr.from_object(space, w_returns) + _returns = expr.from_object(space, w_returns) if w_returns is not None else None _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return FunctionDef(_name, _args, _body, _decorator_list, _returns, _lineno, _col_offset) @@ -461,7 +461,7 @@ def to_object(self, space): w_node = space.call_function(get(space).w_ClassDef) - w_name = space.wrap(self.name) # identifier + w_name = space.wrap(self.name.decode('utf-8')) # identifier space.setattr(w_node, space.wrap('name'), w_name) if self.bases is None: bases_w = [] @@ -508,13 +508,13 @@ w_decorator_list = get_field(space, w_node, 'decorator_list', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _name = space.realstr_w(w_name) + _name = space.identifier_w(w_name) bases_w = space.unpackiterable(w_bases) _bases = [expr.from_object(space, w_item) for w_item in bases_w] keywords_w = space.unpackiterable(w_keywords) _keywords = [keyword.from_object(space, w_item) for w_item in keywords_w] - _starargs = expr.from_object(space, w_starargs) - _kwargs = expr.from_object(space, w_kwargs) + _starargs = expr.from_object(space, w_starargs) if w_starargs is not None else None + _kwargs = expr.from_object(space, w_kwargs) if w_kwargs is not None else None body_w = space.unpackiterable(w_body) _body = [stmt.from_object(space, w_item) for w_item in body_w] decorator_list_w = space.unpackiterable(w_decorator_list) @@ -555,7 +555,7 @@ w_value = get_field(space, w_node, 'value', True) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _value = expr.from_object(space, w_value) + _value = expr.from_object(space, w_value) if w_value is not None else None _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return Return(_value, _lineno, _col_offset) @@ -931,7 +931,7 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _context_expr = expr.from_object(space, w_context_expr) - _optional_vars = expr.from_object(space, w_optional_vars) + _optional_vars = expr.from_object(space, w_optional_vars) if w_optional_vars is not None else None body_w = space.unpackiterable(w_body) _body = [stmt.from_object(space, w_item) for w_item in body_w] _lineno = space.int_w(w_lineno) @@ -976,8 +976,8 @@ w_cause = get_field(space, w_node, 'cause', True) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _exc = expr.from_object(space, w_exc) - _cause = expr.from_object(space, w_cause) + _exc = expr.from_object(space, w_exc) if w_exc is not None else None + _cause = expr.from_object(space, w_cause) if w_cause is not None else None _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return Raise(_exc, _cause, _lineno, _col_offset) @@ -1140,7 +1140,7 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _test = expr.from_object(space, w_test) - _msg = expr.from_object(space, w_msg) + _msg = expr.from_object(space, w_msg) if w_msg is not None else None _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return Assert(_test, _msg, _lineno, _col_offset) @@ -1208,7 +1208,7 @@ def to_object(self, space): w_node = space.call_function(get(space).w_ImportFrom) - w_module = space.wrap(self.module) # identifier + w_module = space.wrap(self.module.decode('utf-8')) if self.module is not None else space.w_None # identifier space.setattr(w_node, space.wrap('module'), w_module) if self.names is None: names_w = [] @@ -1259,7 +1259,7 @@ if self.names is None: names_w = [] else: - names_w = [space.wrap(node) for node in self.names] # identifier + names_w = [space.wrap(node.decode('utf-8')) for node in self.names] # identifier w_names = space.newlist(names_w) space.setattr(w_node, space.wrap('names'), w_names) w_lineno = space.wrap(self.lineno) # int @@ -1274,7 +1274,7 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) names_w = space.unpackiterable(w_names) - _names = [space.realstr_w(w_item) for w_item in names_w] + _names = [space.identifier_w(w_item) for w_item in names_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return Global(_names, _lineno, _col_offset) @@ -1299,7 +1299,7 @@ if self.names is None: names_w = [] else: - names_w = [space.wrap(node) for node in self.names] # identifier + names_w = [space.wrap(node.decode('utf-8')) for node in self.names] # identifier w_names = space.newlist(names_w) space.setattr(w_node, space.wrap('names'), w_names) w_lineno = space.wrap(self.lineno) # int @@ -1314,7 +1314,7 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) names_w = space.unpackiterable(w_names) - _names = [space.realstr_w(w_item) for w_item in names_w] + _names = [space.identifier_w(w_item) for w_item in names_w] _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return Nonlocal(_names, _lineno, _col_offset) @@ -2060,7 +2060,7 @@ w_value = get_field(space, w_node, 'value', True) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _value = expr.from_object(space, w_value) + _value = expr.from_object(space, w_value) if w_value is not None else None _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return Yield(_value, _lineno, _col_offset) @@ -2191,8 +2191,8 @@ _args = [expr.from_object(space, w_item) for w_item in args_w] keywords_w = space.unpackiterable(w_keywords) _keywords = [keyword.from_object(space, w_item) for w_item in keywords_w] - _starargs = expr.from_object(space, w_starargs) - _kwargs = expr.from_object(space, w_kwargs) + _starargs = expr.from_object(space, w_starargs) if w_starargs is not None else None + _kwargs = expr.from_object(space, w_kwargs) if w_kwargs is not None else None _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) return Call(_func, _args, _keywords, _starargs, _kwargs, _lineno, _col_offset) @@ -2354,7 +2354,7 @@ w_node = space.call_function(get(space).w_Attribute) w_value = self.value.to_object(space) # expr space.setattr(w_node, space.wrap('value'), w_value) - w_attr = space.wrap(self.attr) # identifier + w_attr = space.wrap(self.attr.decode('utf-8')) # identifier space.setattr(w_node, space.wrap('attr'), w_attr) w_ctx = expr_context_to_class[self.ctx - 1]().to_object(space) # expr_context space.setattr(w_node, space.wrap('ctx'), w_ctx) @@ -2372,7 +2372,7 @@ w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) _value = expr.from_object(space, w_value) - _attr = space.realstr_w(w_attr) + _attr = space.identifier_w(w_attr) _ctx = expr_context.from_object(space, w_ctx) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) @@ -2484,7 +2484,7 @@ def to_object(self, space): w_node = space.call_function(get(space).w_Name) - w_id = space.wrap(self.id) # identifier + w_id = space.wrap(self.id.decode('utf-8')) # identifier space.setattr(w_node, space.wrap('id'), w_id) w_ctx = expr_context_to_class[self.ctx - 1]().to_object(space) # expr_context space.setattr(w_node, space.wrap('ctx'), w_ctx) @@ -2500,7 +2500,7 @@ w_ctx = get_field(space, w_node, 'ctx', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _id = space.realstr_w(w_id) + _id = space.identifier_w(w_id) _ctx = expr_context.from_object(space, w_ctx) _lineno = space.int_w(w_lineno) _col_offset = space.int_w(w_col_offset) @@ -2752,9 +2752,9 @@ w_lower = get_field(space, w_node, 'lower', True) w_upper = get_field(space, w_node, 'upper', True) w_step = get_field(space, w_node, 'step', True) - _lower = expr.from_object(space, w_lower) - _upper = expr.from_object(space, w_upper) - _step = expr.from_object(space, w_step) + _lower = expr.from_object(space, w_lower) if w_lower is not None else None + _upper = expr.from_object(space, w_upper) if w_upper is not None else None + _step = expr.from_object(space, w_step) if w_step is not None else None return Slice(_lower, _upper, _step) State.ast_type('Slice', 'slice', ['lower', 'upper', 'step']) @@ -3198,7 +3198,7 @@ w_node = space.call_function(get(space).w_ExceptHandler) w_type = self.type.to_object(space) if self.type is not None else space.w_None # expr space.setattr(w_node, space.wrap('type'), w_type) - w_name = space.wrap(self.name) # identifier + w_name = space.wrap(self.name.decode('utf-8')) if self.name is not None else space.w_None # identifier space.setattr(w_node, space.wrap('name'), w_name) if self.body is None: body_w = [] @@ -3219,7 +3219,7 @@ w_body = get_field(space, w_node, 'body', False) w_lineno = get_field(space, w_node, 'lineno', False) w_col_offset = get_field(space, w_node, 'col_offset', False) - _type = expr.from_object(space, w_type) + _type = expr.from_object(space, w_type) if w_type is not None else None _name = space.str_or_None_w(w_name) body_w = space.unpackiterable(w_body) _body = [stmt.from_object(space, w_item) for w_item in body_w] @@ -3268,7 +3268,7 @@ args_w = [node.to_object(space) for node in self.args] # arg w_args = space.newlist(args_w) space.setattr(w_node, space.wrap('args'), w_args) - w_vararg = space.wrap(self.vararg) # identifier + w_vararg = space.wrap(self.vararg.decode('utf-8')) if self.vararg is not None else space.w_None # identifier space.setattr(w_node, space.wrap('vararg'), w_vararg) w_varargannotation = self.varargannotation.to_object(space) if self.varargannotation is not None else space.w_None # expr space.setattr(w_node, space.wrap('varargannotation'), w_varargannotation) @@ -3278,7 +3278,7 @@ kwonlyargs_w = [node.to_object(space) for node in self.kwonlyargs] # arg w_kwonlyargs = space.newlist(kwonlyargs_w) space.setattr(w_node, space.wrap('kwonlyargs'), w_kwonlyargs) - w_kwarg = space.wrap(self.kwarg) # identifier + w_kwarg = space.wrap(self.kwarg.decode('utf-8')) if self.kwarg is not None else space.w_None # identifier space.setattr(w_node, space.wrap('kwarg'), w_kwarg) w_kwargannotation = self.kwargannotation.to_object(space) if self.kwargannotation is not None else space.w_None # expr space.setattr(w_node, space.wrap('kwargannotation'), w_kwargannotation) @@ -3309,11 +3309,11 @@ args_w = space.unpackiterable(w_args) _args = [arg.from_object(space, w_item) for w_item in args_w] _vararg = space.str_or_None_w(w_vararg) - _varargannotation = expr.from_object(space, w_varargannotation) + _varargannotation = expr.from_object(space, w_varargannotation) if w_varargannotation is not None else None kwonlyargs_w = space.unpackiterable(w_kwonlyargs) _kwonlyargs = [arg.from_object(space, w_item) for w_item in kwonlyargs_w] _kwarg = space.str_or_None_w(w_kwarg) - _kwargannotation = expr.from_object(space, w_kwargannotation) + _kwargannotation = expr.from_object(space, w_kwargannotation) if w_kwargannotation is not None else None defaults_w = space.unpackiterable(w_defaults) _defaults = [expr.from_object(space, w_item) for w_item in defaults_w] kw_defaults_w = space.unpackiterable(w_kw_defaults) @@ -3338,7 +3338,7 @@ def to_object(self, space): w_node = space.call_function(get(space).w_arg) - w_arg = space.wrap(self.arg) # identifier + w_arg = space.wrap(self.arg.decode('utf-8')) # identifier space.setattr(w_node, space.wrap('arg'), w_arg) w_annotation = self.annotation.to_object(space) if self.annotation is not None else space.w_None # expr space.setattr(w_node, space.wrap('annotation'), w_annotation) @@ -3348,8 +3348,8 @@ def from_object(space, w_node): w_arg = get_field(space, w_node, 'arg', False) w_annotation = get_field(space, w_node, 'annotation', True) - _arg = space.realstr_w(w_arg) - _annotation = expr.from_object(space, w_annotation) + _arg = space.identifier_w(w_arg) + _annotation = expr.from_object(space, w_annotation) if w_annotation is not None else None return arg(_arg, _annotation) State.ast_type('arg', 'AST', ['arg', 'annotation']) @@ -3369,7 +3369,7 @@ def to_object(self, space): w_node = space.call_function(get(space).w_keyword) - w_arg = space.wrap(self.arg) # identifier + w_arg = space.wrap(self.arg.decode('utf-8')) # identifier space.setattr(w_node, space.wrap('arg'), w_arg) w_value = self.value.to_object(space) # expr space.setattr(w_node, space.wrap('value'), w_value) @@ -3379,7 +3379,7 @@ def from_object(space, w_node): w_arg = get_field(space, w_node, 'arg', False) w_value = get_field(space, w_node, 'value', False) - _arg = space.realstr_w(w_arg) + _arg = space.identifier_w(w_arg) _value = expr.from_object(space, w_value) return keyword(_arg, _value) @@ -3399,9 +3399,9 @@ def to_object(self, space): w_node = space.call_function(get(space).w_alias) - w_name = space.wrap(self.name) # identifier + w_name = space.wrap(self.name.decode('utf-8')) # identifier space.setattr(w_node, space.wrap('name'), w_name) - w_asname = space.wrap(self.asname) # identifier + w_asname = space.wrap(self.asname.decode('utf-8')) if self.asname is not None else space.w_None # identifier space.setattr(w_node, space.wrap('asname'), w_asname) return w_node @@ -3409,7 +3409,7 @@ def from_object(space, w_node): w_name = get_field(space, w_node, 'name', False) w_asname = get_field(space, w_node, 'asname', True) - _name = space.realstr_w(w_name) + _name = space.identifier_w(w_name) _asname = space.str_or_None_w(w_asname) return alias(_name, _asname) From noreply at buildbot.pypy.org Tue Aug 26 20:06:05 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 26 Aug 2014 20:06:05 +0200 (CEST) Subject: [pypy-commit] pypy py3k: kill realstr_w which is no longer used nor makes much sense on py3k Message-ID: <20140826180605.93F551C02AF@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r73075:d5ad5952d919 Date: 2014-08-26 11:04 -0700 http://bitbucket.org/pypy/pypy/changeset/d5ad5952d919/ Log: kill realstr_w which is no longer used nor makes much sense on py3k diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1529,13 +1529,6 @@ """ return w_obj.float_w(self, allow_conversion) - def realstr_w(self, w_obj): - # Like str_w, but only works if w_obj is really of type 'str'. - if not self.isinstance_w(w_obj, self.w_str): - raise OperationError(self.w_TypeError, - self.wrap('argument must be a string')) - return self.str_w(w_obj) - def unicode_w(self, w_obj): return w_obj.unicode_w(self) diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py --- a/pypy/module/mmap/interp_mmap.py +++ b/pypy/module/mmap/interp_mmap.py @@ -196,7 +196,7 @@ "mmap item value must be in range(0, 256)")) self.mmap.setitem(start, chr(value)) else: - value = space.realstr_w(w_value) + value = space.bytes_w(w_value) if len(value) != length: raise OperationError(space.w_ValueError, space.wrap("mmap slice assignment is wrong size")) From noreply at buildbot.pypy.org Tue Aug 26 22:10:40 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 26 Aug 2014 22:10:40 +0200 (CEST) Subject: [pypy-commit] pypy py3k: back out changeset: f7c113013604, breaks translation Message-ID: <20140826201040.50B4F1C063C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: py3k Changeset: r73076:b3072cefc086 Date: 2014-08-26 23:09 +0300 http://bitbucket.org/pypy/pypy/changeset/b3072cefc086/ Log: back out changeset: f7c113013604, breaks translation diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -1,5 +1,6 @@ from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import buffer +from rpython.rlib.objectmodel import import_from_mixin from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, Py_buffer) from pypy.module.cpyext.pyobject import PyObject, Py_DecRef @@ -12,8 +13,7 @@ # PyPy only supports contiguous Py_buffers for now. return 1 -class CBuffer(buffer.Buffer): - _immutable_ = True +class CBufferMixin(object): def __init__(self, space, c_buf, c_len, w_obj): self.space = space @@ -21,7 +21,8 @@ self.c_len = c_len self.w_obj = w_obj - def __del__(self): + def destructor(self): + assert isinstance(self, CBufferMixin) Py_DecRef(self.space, self.w_obj) def getlength(self): @@ -33,3 +34,10 @@ def as_str(self): return rffi.charpsize2str(rffi.cast(rffi.CCHARP, self.c_buf), self.c_len) + +class CBuffer(buffer.Buffer): + import_from_mixin(CBufferMixin) + _immutable_ = True + + def __del__(self): + CBufferMixin.destructor(self) From noreply at buildbot.pypy.org Tue Aug 26 23:22:41 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Tue, 26 Aug 2014 23:22:41 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: merge py3k Message-ID: <20140826212241.02BEF1C063C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r73077:664cef61a119 Date: 2014-08-26 14:22 -0700 http://bitbucket.org/pypy/pypy/changeset/664cef61a119/ Log: merge py3k diff too long, truncating to 2000 out of 11591 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -354,6 +354,6 @@ See the License for the specific language governing permissions and limitations under the License. -Detailled license information is contained in the NOTICE file in the +Detailed license information is contained in the NOTICE file in the directory. diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -101,7 +101,7 @@ st = {} for c in map(unichr, range(256)): st[c] = SYNTAX_SYMBOL - for c in [a for a in map(unichr, range(256)) if a.isalpha()]: + for c in [a for a in map(unichr, range(256)) if a.isalnum()]: st[c] = SYNTAX_WORD st[unicode('\n')] = st[unicode(' ')] = SYNTAX_WHITESPACE return st diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -54,3 +54,6 @@ .. branch: pytest-25 Update our copies of py.test and pylib to versions 2.5.2 and 1.4.20, respectively. + +.. branch: split-ast-classes +Classes in the ast module are now distinct from structures used by the compiler. diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst --- a/pypy/doc/windows.rst +++ b/pypy/doc/windows.rst @@ -37,7 +37,7 @@ using a 32 bit Python and vice versa. By default pypy is built using the Multi-threaded DLL (/MD) runtime environment. -**Note:** PyPy is currently not supported for 64 bit Windows, and translation +**Note:** PyPy is currently not supported for 64 bit Python, and translation will fail in this case. Python and a C compiler are all you need to build pypy, but it will miss some @@ -136,7 +136,7 @@ cd zlib-1.2.3 nmake -f win32\Makefile.msc - copy zlib1.lib + copy zlib.lib copy zlib.h zconf.h The bz2 compression library @@ -165,27 +165,29 @@ directory. Version 2.1.0 is known to pass tests. Then open the project file ``expat.dsw`` with Visual Studio; follow the instruction for converting the project files, -switch to the "Release" configuration, reconfigure the runtime for -Multi-threaded DLL (/MD) and build the solution (the ``expat`` project -is actually enough for pypy). +switch to the "Release" configuration, use the ``expat_static`` project, +reconfigure the runtime for Multi-threaded DLL (/MD) and build. -Then, copy the file ``win32\bin\release\libexpat.dll`` somewhere in -your PATH, ``win32\bin\release\libexpat.lib`` somewhere in LIB, and -both ``lib\expat.h`` and ``lib\expat_external.h`` somewhere in INCLUDE. +Then, copy the file ``win32\bin\release\libexpat.lib`` somewhere in +somewhere in LIB, and both ``lib\expat.h`` and ``lib\expat_external.h`` +somewhere in INCLUDE. The OpenSSL library ~~~~~~~~~~~~~~~~~~~ OpenSSL needs a Perl interpreter to configure its makefile. You may -use the one distributed by ActiveState, or the one from cygwin. In -both case the perl interpreter must be found on the PATH. +use the one distributed by ActiveState, or the one from cygwin.:: - svn export http://svn.python.org/projects/external/openssl-0.9.8y - cd openssl-0.9.8y - perl Configure VC-WIN32 + svn export http://svn.python.org/projects/external/openssl-1.0.1i + cd openssl-1.0.1i + perl Configure VC-WIN32 no-idea no-mdc2 ms\do_ms.bat nmake -f ms\nt.mak install +Then, copy the files ``out32\*.lib`` somewhere in +somewhere in LIB, and the entire ``include\openssl`` directory as-is somewhere +in INCLUDE. + TkInter module support ~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -1,5 +1,4 @@ # Generated by tools/asdl_py.py -from rpython.rlib.unroll import unrolling_iterable from rpython.tool.pairtype import extendabletype from rpython.tool.sourcetools import func_with_new_name @@ -21,11 +20,15 @@ 'AST string must be of type str or unicode')) return w_obj - -class AST(W_Root): - - w_dict = None - +def get_field(space, w_node, name, optional): + w_obj = w_node.getdictvalue(space, name) + if w_obj is None and not optional: + raise oefmt(space.w_TypeError, + "required field \"%s\" missing from %T", name, w_node) + return w_obj + + +class AST(object): __metaclass__ = extendabletype def walkabout(self, visitor): @@ -34,8 +37,23 @@ def mutate_over(self, visitor): raise AssertionError("mutate_over() implementation not provided") - def sync_app_attrs(self, space): - raise NotImplementedError + +class NodeVisitorNotImplemented(Exception): + pass + + +class _FieldsWrapper(W_Root): + "Hack around the fact we can't store tuples on a TypeDef." + + def __init__(self, fields): + self.fields = fields + + def __spacebind__(self, space): + return space.newtuple([space.wrap(field) for field in self.fields]) + + +class W_AST(W_Root): + w_dict = None def getdict(self, space): if self.w_dict is None: @@ -47,7 +65,7 @@ if w_dict is None: w_dict = space.newdict() w_type = space.type(self) - w_fields = w_type.getdictvalue(space, "_fields") + w_fields = space.getattr(w_type, space.wrap("_fields")) for w_name in space.fixedview(w_fields): try: space.setitem(w_dict, w_name, @@ -71,79 +89,94 @@ space.setattr(self, w_name, space.getitem(w_state, w_name)) - def missing_field(self, space, required, host): - "Find which required field is missing." - state = self.initialization_state - for i in range(len(required)): - if (state >> i) & 1: - continue # field is present - missing = required[i] - if missing is None: - continue # field is optional - w_obj = self.getdictvalue(space, missing) - if w_obj is None: - raise oefmt(space.w_TypeError, - "required field \"%s\" missing from %s", - missing, host) - else: - raise oefmt(space.w_TypeError, - "incorrect type for field \"%s\" in %s", - missing, host) - raise AssertionError("should not reach here") - - -class NodeVisitorNotImplemented(Exception): - pass - - -class _FieldsWrapper(W_Root): - "Hack around the fact we can't store tuples on a TypeDef." - - def __init__(self, fields): - self.fields = fields - - def __spacebind__(self, space): - return space.newtuple([space.wrap(field) for field in self.fields]) - - -def get_AST_new(node_class): - def generic_AST_new(space, w_type, __args__): - node = space.allocate_instance(node_class, w_type) - node.initialization_state = 0 - return space.wrap(node) - return func_with_new_name(generic_AST_new, "new_%s" % node_class.__name__) - -def AST_init(space, w_self, __args__): +def W_AST_new(space, w_type, __args__): + node = space.allocate_instance(W_AST, w_type) + return space.wrap(node) + +def W_AST_init(space, w_self, __args__): args_w, kwargs_w = __args__.unpack() - if args_w and len(args_w) != 0: - w_err = space.wrap("_ast.AST constructor takes 0 positional arguments") - raise OperationError(space.w_TypeError, w_err) + fields_w = space.fixedview(space.getattr(space.type(w_self), + space.wrap("_fields"))) + num_fields = len(fields_w) if fields_w else 0 + if args_w and len(args_w) != num_fields: + if num_fields == 0: + raise oefmt(space.w_TypeError, + "%T constructor takes 0 positional arguments", w_self) + elif num_fields == 1: + raise oefmt(space.w_TypeError, + "%T constructor takes either 0 or %d positional argument", w_self, num_fields) + else: + raise oefmt(space.w_TypeError, + "%T constructor takes either 0 or %d positional arguments", w_self, num_fields) + if args_w: + for i, w_field in enumerate(fields_w): + space.setattr(w_self, w_field, args_w[i]) for field, w_value in kwargs_w.iteritems(): space.setattr(w_self, space.wrap(field), w_value) -AST.typedef = typedef.TypeDef("_ast.AST", + +W_AST.typedef = typedef.TypeDef("_ast.AST", _fields=_FieldsWrapper([]), _attributes=_FieldsWrapper([]), - __reduce__=interp2app(AST.reduce_w), - __setstate__=interp2app(AST.setstate_w), + __reduce__=interp2app(W_AST.reduce_w), + __setstate__=interp2app(W_AST.setstate_w), __dict__ = typedef.GetSetProperty(typedef.descr_get_dict, - typedef.descr_set_dict, cls=AST), - __new__=interp2app(get_AST_new(AST)), - __init__=interp2app(AST_init), + typedef.descr_set_dict, cls=W_AST), + __new__=interp2app(W_AST_new), + __init__=interp2app(W_AST_init), ) - - +class State: + AST_TYPES = [] + + @classmethod + def ast_type(cls, name, base, fields, attributes=None): + cls.AST_TYPES.append((name, base, fields, attributes)) + + def __init__(self, space): + self.w_AST = space.gettypeobject(W_AST.typedef) + for (name, base, fields, attributes) in self.AST_TYPES: + self.make_new_type(space, name, base, fields, attributes) + + def make_new_type(self, space, name, base, fields, attributes): + w_base = getattr(self, 'w_%s' % base) + w_dict = space.newdict() + space.setitem_str(w_dict, '__module__', space.wrap('_ast')) + if fields is not None: + space.setitem_str(w_dict, "_fields", + space.newtuple([space.wrap(f) for f in fields])) + if attributes is not None: + space.setitem_str(w_dict, "_attributes", + space.newtuple([space.wrap(a) for a in attributes])) + w_type = space.call_function( + space.w_type, + space.wrap(name), space.newtuple([w_base]), w_dict) + setattr(self, 'w_%s' % name, w_type) + +def get(space): + return space.fromcache(State) class mod(AST): - pass + @staticmethod + def from_object(space, w_node): + if space.is_w(w_node, space.w_None): + return None + if space.isinstance_w(w_node, get(space).w_Module): + return Module.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Interactive): + return Interactive.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Expression): + return Expression.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Suite): + return Suite.from_object(space, w_node) + raise oefmt(space.w_TypeError, + "Expected mod node, got %T", w_node) +State.ast_type('mod', 'AST', None, []) class Module(mod): def __init__(self, body): self.body = body - self.w_body = None - self.initialization_state = 1 def walkabout(self, visitor): visitor.visit_Module(self) @@ -153,29 +186,30 @@ visitor._mutate_sequence(self.body) return visitor.visit_Module(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 1: - self.missing_field(space, ['body'], 'Module') + def to_object(self, space): + w_node = space.call_function(get(space).w_Module) + if self.body is None: + body_w = [] else: - pass - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + return Module(_body) + +State.ast_type('Module', 'mod', ['body']) class Interactive(mod): def __init__(self, body): self.body = body - self.w_body = None - self.initialization_state = 1 def walkabout(self, visitor): visitor.visit_Interactive(self) @@ -185,28 +219,30 @@ visitor._mutate_sequence(self.body) return visitor.visit_Interactive(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 1: - self.missing_field(space, ['body'], 'Interactive') + def to_object(self, space): + w_node = space.call_function(get(space).w_Interactive) + if self.body is None: + body_w = [] else: - pass - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + return Interactive(_body) + +State.ast_type('Interactive', 'mod', ['body']) class Expression(mod): def __init__(self, body): self.body = body - self.initialization_state = 1 def walkabout(self, visitor): visitor.visit_Expression(self) @@ -215,20 +251,25 @@ self.body = self.body.mutate_over(visitor) return visitor.visit_Expression(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 1: - self.missing_field(space, ['body'], 'Expression') - else: - pass - self.body.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Expression) + w_body = self.body.to_object(space) # expr + space.setattr(w_node, space.wrap('body'), w_body) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + _body = expr.from_object(space, w_body) + return Expression(_body) + +State.ast_type('Expression', 'mod', ['body']) class Suite(mod): def __init__(self, body): self.body = body - self.w_body = None - self.initialization_state = 1 def walkabout(self, visitor): visitor.visit_Suite(self) @@ -238,21 +279,24 @@ visitor._mutate_sequence(self.body) return visitor.visit_Suite(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 1: - self.missing_field(space, ['body'], 'Suite') + def to_object(self, space): + w_node = space.call_function(get(space).w_Suite) + if self.body is None: + body_w = [] else: - pass - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + return Suite(_body) + +State.ast_type('Suite', 'mod', ['body']) class stmt(AST): @@ -261,18 +305,67 @@ self.lineno = lineno self.col_offset = col_offset + @staticmethod + def from_object(space, w_node): + if space.is_w(w_node, space.w_None): + return None + if space.isinstance_w(w_node, get(space).w_FunctionDef): + return FunctionDef.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_ClassDef): + return ClassDef.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Return): + return Return.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Delete): + return Delete.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Assign): + return Assign.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_AugAssign): + return AugAssign.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_For): + return For.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_While): + return While.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_If): + return If.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_With): + return With.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Raise): + return Raise.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_TryExcept): + return TryExcept.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_TryFinally): + return TryFinally.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Assert): + return Assert.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Import): + return Import.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_ImportFrom): + return ImportFrom.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Global): + return Global.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Nonlocal): + return Nonlocal.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Expr): + return Expr.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Pass): + return Pass.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Break): + return Break.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Continue): + return Continue.from_object(space, w_node) + raise oefmt(space.w_TypeError, + "Expected stmt node, got %T", w_node) +State.ast_type('stmt', 'AST', None, ['lineno', 'col_offset']) + class FunctionDef(stmt): def __init__(self, name, args, body, decorator_list, returns, lineno, col_offset): self.name = name self.args = args self.body = body - self.w_body = None self.decorator_list = decorator_list - self.w_decorator_list = None self.returns = returns stmt.__init__(self, lineno, col_offset) - self.initialization_state = 127 def walkabout(self, visitor): visitor.visit_FunctionDef(self) @@ -287,35 +380,53 @@ self.returns = self.returns.mutate_over(visitor) return visitor.visit_FunctionDef(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~64) ^ 63: - self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list', None], 'FunctionDef') + def to_object(self, space): + w_node = space.call_function(get(space).w_FunctionDef) + w_name = space.wrap(self.name.decode('utf-8')) # identifier + space.setattr(w_node, space.wrap('name'), w_name) + w_args = self.args.to_object(space) # arguments + space.setattr(w_node, space.wrap('args'), w_args) + if self.body is None: + body_w = [] else: - if not self.initialization_state & 64: - self.returns = None - self.args.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_decorator_list - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.decorator_list = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.decorator_list = None - if self.decorator_list is not None: - for node in self.decorator_list: - node.sync_app_attrs(space) - if self.returns: - self.returns.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.decorator_list is None: + decorator_list_w = [] + else: + decorator_list_w = [node.to_object(space) for node in self.decorator_list] # expr + w_decorator_list = space.newlist(decorator_list_w) + space.setattr(w_node, space.wrap('decorator_list'), w_decorator_list) + w_returns = self.returns.to_object(space) if self.returns is not None else space.w_None # expr + space.setattr(w_node, space.wrap('returns'), w_returns) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_name = get_field(space, w_node, 'name', False) + w_args = get_field(space, w_node, 'args', False) + w_body = get_field(space, w_node, 'body', False) + w_decorator_list = get_field(space, w_node, 'decorator_list', False) + w_returns = get_field(space, w_node, 'returns', True) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _name = space.identifier_w(w_name) + _args = arguments.from_object(space, w_args) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + decorator_list_w = space.unpackiterable(w_decorator_list) + _decorator_list = [expr.from_object(space, w_item) for w_item in decorator_list_w] + _returns = expr.from_object(space, w_returns) if w_returns is not None else None + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return FunctionDef(_name, _args, _body, _decorator_list, _returns, _lineno, _col_offset) + +State.ast_type('FunctionDef', 'stmt', ['name', 'args', 'body', 'decorator_list', 'returns']) class ClassDef(stmt): @@ -323,17 +434,12 @@ def __init__(self, name, bases, keywords, starargs, kwargs, body, decorator_list, lineno, col_offset): self.name = name self.bases = bases - self.w_bases = None self.keywords = keywords - self.w_keywords = None self.starargs = starargs self.kwargs = kwargs self.body = body - self.w_body = None self.decorator_list = decorator_list - self.w_decorator_list = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 511 def walkabout(self, visitor): visitor.visit_ClassDef(self) @@ -353,58 +459,71 @@ visitor._mutate_sequence(self.decorator_list) return visitor.visit_ClassDef(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~96) ^ 415: - self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'keywords', None, None, 'body', 'decorator_list'], 'ClassDef') + def to_object(self, space): + w_node = space.call_function(get(space).w_ClassDef) + w_name = space.wrap(self.name.decode('utf-8')) # identifier + space.setattr(w_node, space.wrap('name'), w_name) + if self.bases is None: + bases_w = [] else: - if not self.initialization_state & 32: - self.starargs = None - if not self.initialization_state & 64: - self.kwargs = None - w_list = self.w_bases - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.bases = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.bases = None - if self.bases is not None: - for node in self.bases: - node.sync_app_attrs(space) - w_list = self.w_keywords - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.keywords = [space.interp_w(keyword, w_obj) for w_obj in list_w] - else: - self.keywords = None - if self.keywords is not None: - for node in self.keywords: - node.sync_app_attrs(space) - if self.starargs: - self.starargs.sync_app_attrs(space) - if self.kwargs: - self.kwargs.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_decorator_list - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.decorator_list = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.decorator_list = None - if self.decorator_list is not None: - for node in self.decorator_list: - node.sync_app_attrs(space) + bases_w = [node.to_object(space) for node in self.bases] # expr + w_bases = space.newlist(bases_w) + space.setattr(w_node, space.wrap('bases'), w_bases) + if self.keywords is None: + keywords_w = [] + else: + keywords_w = [node.to_object(space) for node in self.keywords] # keyword + w_keywords = space.newlist(keywords_w) + space.setattr(w_node, space.wrap('keywords'), w_keywords) + w_starargs = self.starargs.to_object(space) if self.starargs is not None else space.w_None # expr + space.setattr(w_node, space.wrap('starargs'), w_starargs) + w_kwargs = self.kwargs.to_object(space) if self.kwargs is not None else space.w_None # expr + space.setattr(w_node, space.wrap('kwargs'), w_kwargs) + if self.body is None: + body_w = [] + else: + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.decorator_list is None: + decorator_list_w = [] + else: + decorator_list_w = [node.to_object(space) for node in self.decorator_list] # expr + w_decorator_list = space.newlist(decorator_list_w) + space.setattr(w_node, space.wrap('decorator_list'), w_decorator_list) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_name = get_field(space, w_node, 'name', False) + w_bases = get_field(space, w_node, 'bases', False) + w_keywords = get_field(space, w_node, 'keywords', False) + w_starargs = get_field(space, w_node, 'starargs', True) + w_kwargs = get_field(space, w_node, 'kwargs', True) + w_body = get_field(space, w_node, 'body', False) + w_decorator_list = get_field(space, w_node, 'decorator_list', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _name = space.identifier_w(w_name) + bases_w = space.unpackiterable(w_bases) + _bases = [expr.from_object(space, w_item) for w_item in bases_w] + keywords_w = space.unpackiterable(w_keywords) + _keywords = [keyword.from_object(space, w_item) for w_item in keywords_w] + _starargs = expr.from_object(space, w_starargs) if w_starargs is not None else None + _kwargs = expr.from_object(space, w_kwargs) if w_kwargs is not None else None + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + decorator_list_w = space.unpackiterable(w_decorator_list) + _decorator_list = [expr.from_object(space, w_item) for w_item in decorator_list_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return ClassDef(_name, _bases, _keywords, _starargs, _kwargs, _body, _decorator_list, _lineno, _col_offset) + +State.ast_type('ClassDef', 'stmt', ['name', 'bases', 'keywords', 'starargs', 'kwargs', 'body', 'decorator_list']) class Return(stmt): @@ -412,7 +531,6 @@ def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Return(self) @@ -422,23 +540,34 @@ self.value = self.value.mutate_over(visitor) return visitor.visit_Return(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~4) ^ 3: - self.missing_field(space, ['lineno', 'col_offset', None], 'Return') - else: - if not self.initialization_state & 4: - self.value = None - if self.value: - self.value.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Return) + w_value = self.value.to_object(space) if self.value is not None else space.w_None # expr + space.setattr(w_node, space.wrap('value'), w_value) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_value = get_field(space, w_node, 'value', True) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _value = expr.from_object(space, w_value) if w_value is not None else None + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Return(_value, _lineno, _col_offset) + +State.ast_type('Return', 'stmt', ['value']) class Delete(stmt): def __init__(self, targets, lineno, col_offset): self.targets = targets - self.w_targets = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Delete(self) @@ -448,31 +577,40 @@ visitor._mutate_sequence(self.targets) return visitor.visit_Delete(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete') + def to_object(self, space): + w_node = space.call_function(get(space).w_Delete) + if self.targets is None: + targets_w = [] else: - pass - w_list = self.w_targets - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.targets = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.targets = None - if self.targets is not None: - for node in self.targets: - node.sync_app_attrs(space) + targets_w = [node.to_object(space) for node in self.targets] # expr + w_targets = space.newlist(targets_w) + space.setattr(w_node, space.wrap('targets'), w_targets) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_targets = get_field(space, w_node, 'targets', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + targets_w = space.unpackiterable(w_targets) + _targets = [expr.from_object(space, w_item) for w_item in targets_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Delete(_targets, _lineno, _col_offset) + +State.ast_type('Delete', 'stmt', ['targets']) class Assign(stmt): def __init__(self, targets, value, lineno, col_offset): self.targets = targets - self.w_targets = None self.value = value stmt.__init__(self, lineno, col_offset) - self.initialization_state = 15 def walkabout(self, visitor): visitor.visit_Assign(self) @@ -483,22 +621,36 @@ self.value = self.value.mutate_over(visitor) return visitor.visit_Assign(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 15: - self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign') + def to_object(self, space): + w_node = space.call_function(get(space).w_Assign) + if self.targets is None: + targets_w = [] else: - pass - w_list = self.w_targets - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.targets = [space.interp_w(expr, w_obj) for w_obj in list_w] - else: - self.targets = None - if self.targets is not None: - for node in self.targets: - node.sync_app_attrs(space) - self.value.sync_app_attrs(space) + targets_w = [node.to_object(space) for node in self.targets] # expr + w_targets = space.newlist(targets_w) + space.setattr(w_node, space.wrap('targets'), w_targets) + w_value = self.value.to_object(space) # expr + space.setattr(w_node, space.wrap('value'), w_value) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_targets = get_field(space, w_node, 'targets', False) + w_value = get_field(space, w_node, 'value', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + targets_w = space.unpackiterable(w_targets) + _targets = [expr.from_object(space, w_item) for w_item in targets_w] + _value = expr.from_object(space, w_value) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Assign(_targets, _value, _lineno, _col_offset) + +State.ast_type('Assign', 'stmt', ['targets', 'value']) class AugAssign(stmt): @@ -508,7 +660,6 @@ self.op = op self.value = value stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_AugAssign(self) @@ -518,13 +669,35 @@ self.value = self.value.mutate_over(visitor) return visitor.visit_AugAssign(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 31: - self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign') - else: - pass - self.target.sync_app_attrs(space) - self.value.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_AugAssign) + w_target = self.target.to_object(space) # expr + space.setattr(w_node, space.wrap('target'), w_target) + w_op = operator_to_class[self.op - 1]().to_object(space) # operator + space.setattr(w_node, space.wrap('op'), w_op) + w_value = self.value.to_object(space) # expr + space.setattr(w_node, space.wrap('value'), w_value) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_target = get_field(space, w_node, 'target', False) + w_op = get_field(space, w_node, 'op', False) + w_value = get_field(space, w_node, 'value', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _target = expr.from_object(space, w_target) + _op = operator.from_object(space, w_op) + _value = expr.from_object(space, w_value) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return AugAssign(_target, _op, _value, _lineno, _col_offset) + +State.ast_type('AugAssign', 'stmt', ['target', 'op', 'value']) class For(stmt): @@ -533,11 +706,8 @@ self.target = target self.iter = iter self.body = body - self.w_body = None self.orelse = orelse - self.w_orelse = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 63 def walkabout(self, visitor): visitor.visit_For(self) @@ -551,33 +721,49 @@ visitor._mutate_sequence(self.orelse) return visitor.visit_For(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 63: - self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For') + def to_object(self, space): + w_node = space.call_function(get(space).w_For) + w_target = self.target.to_object(space) # expr + space.setattr(w_node, space.wrap('target'), w_target) + w_iter = self.iter.to_object(space) # expr + space.setattr(w_node, space.wrap('iter'), w_iter) + if self.body is None: + body_w = [] else: - pass - self.target.sync_app_attrs(space) - self.iter.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_orelse - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.orelse = None - if self.orelse is not None: - for node in self.orelse: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.orelse is None: + orelse_w = [] + else: + orelse_w = [node.to_object(space) for node in self.orelse] # stmt + w_orelse = space.newlist(orelse_w) + space.setattr(w_node, space.wrap('orelse'), w_orelse) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_target = get_field(space, w_node, 'target', False) + w_iter = get_field(space, w_node, 'iter', False) + w_body = get_field(space, w_node, 'body', False) + w_orelse = get_field(space, w_node, 'orelse', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _target = expr.from_object(space, w_target) + _iter = expr.from_object(space, w_iter) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + orelse_w = space.unpackiterable(w_orelse) + _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return For(_target, _iter, _body, _orelse, _lineno, _col_offset) + +State.ast_type('For', 'stmt', ['target', 'iter', 'body', 'orelse']) class While(stmt): @@ -585,11 +771,8 @@ def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body - self.w_body = None self.orelse = orelse - self.w_orelse = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_While(self) @@ -602,32 +785,45 @@ visitor._mutate_sequence(self.orelse) return visitor.visit_While(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 31: - self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While') + def to_object(self, space): + w_node = space.call_function(get(space).w_While) + w_test = self.test.to_object(space) # expr + space.setattr(w_node, space.wrap('test'), w_test) + if self.body is None: + body_w = [] else: - pass - self.test.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_orelse - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.orelse = None - if self.orelse is not None: - for node in self.orelse: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.orelse is None: + orelse_w = [] + else: + orelse_w = [node.to_object(space) for node in self.orelse] # stmt + w_orelse = space.newlist(orelse_w) + space.setattr(w_node, space.wrap('orelse'), w_orelse) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_test = get_field(space, w_node, 'test', False) + w_body = get_field(space, w_node, 'body', False) + w_orelse = get_field(space, w_node, 'orelse', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _test = expr.from_object(space, w_test) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + orelse_w = space.unpackiterable(w_orelse) + _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return While(_test, _body, _orelse, _lineno, _col_offset) + +State.ast_type('While', 'stmt', ['test', 'body', 'orelse']) class If(stmt): @@ -635,11 +831,8 @@ def __init__(self, test, body, orelse, lineno, col_offset): self.test = test self.body = body - self.w_body = None self.orelse = orelse - self.w_orelse = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_If(self) @@ -652,43 +845,53 @@ visitor._mutate_sequence(self.orelse) return visitor.visit_If(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 31: - self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If') + def to_object(self, space): + w_node = space.call_function(get(space).w_If) + w_test = self.test.to_object(space) # expr + space.setattr(w_node, space.wrap('test'), w_test) + if self.body is None: + body_w = [] else: - pass - self.test.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_orelse - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.orelse = None - if self.orelse is not None: - for node in self.orelse: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.orelse is None: + orelse_w = [] + else: + orelse_w = [node.to_object(space) for node in self.orelse] # stmt + w_orelse = space.newlist(orelse_w) + space.setattr(w_node, space.wrap('orelse'), w_orelse) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_test = get_field(space, w_node, 'test', False) + w_body = get_field(space, w_node, 'body', False) + w_orelse = get_field(space, w_node, 'orelse', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _test = expr.from_object(space, w_test) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + orelse_w = space.unpackiterable(w_orelse) + _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return If(_test, _body, _orelse, _lineno, _col_offset) + +State.ast_type('If', 'stmt', ['test', 'body', 'orelse']) class With(stmt): def __init__(self, items, body, lineno, col_offset): self.items = items - self.w_items = None self.body = body - self.w_body = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 15 def walkabout(self, visitor): visitor.visit_With(self) @@ -700,31 +903,41 @@ visitor._mutate_sequence(self.body) return visitor.visit_With(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 15: - self.missing_field(space, ['lineno', 'col_offset', 'items', 'body'], 'With') + def to_object(self, space): + w_node = space.call_function(get(space).w_With) + if self.items is None: + items_w = [] else: - pass - w_list = self.w_items - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.items = [space.interp_w(withitem, w_obj) for w_obj in list_w] - else: - self.items = None - if self.items is not None: - for node in self.items: - node.sync_app_attrs(space) - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) + items_w = [node.to_object(space) for node in self.items] # withitem + w_items = space.newlist(items_w) + space.setattr(w_node, space.wrap('items'), w_items) + if self.body is None: + body_w = [] + else: + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_items = get_field(space, w_node, 'items', False) + w_body = get_field(space, w_node, 'body', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + items_w = space.unpackiterable(w_items) + _items = [withitem.from_object(space, w_item) for w_item in items_w] + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return With(_items, _body, _lineno, _col_offset) + +State.ast_type('With', 'stmt', ['items', 'body']) class Raise(stmt): @@ -733,7 +946,6 @@ self.exc = exc self.cause = cause stmt.__init__(self, lineno, col_offset) - self.initialization_state = 15 def walkabout(self, visitor): visitor.visit_Raise(self) @@ -745,31 +957,40 @@ self.cause = self.cause.mutate_over(visitor) return visitor.visit_Raise(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~12) ^ 3: - self.missing_field(space, ['lineno', 'col_offset', None, None], 'Raise') - else: - if not self.initialization_state & 4: - self.exc = None - if not self.initialization_state & 8: - self.cause = None - if self.exc: - self.exc.sync_app_attrs(space) - if self.cause: - self.cause.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Raise) + w_exc = self.exc.to_object(space) if self.exc is not None else space.w_None # expr + space.setattr(w_node, space.wrap('exc'), w_exc) + w_cause = self.cause.to_object(space) if self.cause is not None else space.w_None # expr + space.setattr(w_node, space.wrap('cause'), w_cause) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_exc = get_field(space, w_node, 'exc', True) + w_cause = get_field(space, w_node, 'cause', True) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _exc = expr.from_object(space, w_exc) if w_exc is not None else None + _cause = expr.from_object(space, w_cause) if w_cause is not None else None + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Raise(_exc, _cause, _lineno, _col_offset) + +State.ast_type('Raise', 'stmt', ['exc', 'cause']) class TryExcept(stmt): def __init__(self, body, handlers, orelse, lineno, col_offset): self.body = body - self.w_body = None self.handlers = handlers - self.w_handlers = None self.orelse = orelse - self.w_orelse = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_TryExcept(self) @@ -783,52 +1004,58 @@ visitor._mutate_sequence(self.orelse) return visitor.visit_TryExcept(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 31: - self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept') + def to_object(self, space): + w_node = space.call_function(get(space).w_TryExcept) + if self.body is None: + body_w = [] else: - pass - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_handlers - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.handlers = [space.interp_w(excepthandler, w_obj) for w_obj in list_w] - else: - self.handlers = None - if self.handlers is not None: - for node in self.handlers: - node.sync_app_attrs(space) - w_list = self.w_orelse - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.orelse = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.orelse = None - if self.orelse is not None: - for node in self.orelse: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.handlers is None: + handlers_w = [] + else: + handlers_w = [node.to_object(space) for node in self.handlers] # excepthandler + w_handlers = space.newlist(handlers_w) + space.setattr(w_node, space.wrap('handlers'), w_handlers) + if self.orelse is None: + orelse_w = [] + else: + orelse_w = [node.to_object(space) for node in self.orelse] # stmt + w_orelse = space.newlist(orelse_w) + space.setattr(w_node, space.wrap('orelse'), w_orelse) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + w_handlers = get_field(space, w_node, 'handlers', False) + w_orelse = get_field(space, w_node, 'orelse', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + handlers_w = space.unpackiterable(w_handlers) + _handlers = [excepthandler.from_object(space, w_item) for w_item in handlers_w] + orelse_w = space.unpackiterable(w_orelse) + _orelse = [stmt.from_object(space, w_item) for w_item in orelse_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return TryExcept(_body, _handlers, _orelse, _lineno, _col_offset) + +State.ast_type('TryExcept', 'stmt', ['body', 'handlers', 'orelse']) class TryFinally(stmt): def __init__(self, body, finalbody, lineno, col_offset): self.body = body - self.w_body = None self.finalbody = finalbody - self.w_finalbody = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 15 def walkabout(self, visitor): visitor.visit_TryFinally(self) @@ -840,31 +1067,41 @@ visitor._mutate_sequence(self.finalbody) return visitor.visit_TryFinally(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 15: - self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally') + def to_object(self, space): + w_node = space.call_function(get(space).w_TryFinally) + if self.body is None: + body_w = [] else: - pass - w_list = self.w_body - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.body = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.body = None - if self.body is not None: - for node in self.body: - node.sync_app_attrs(space) - w_list = self.w_finalbody - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.finalbody = [space.interp_w(stmt, w_obj) for w_obj in list_w] - else: - self.finalbody = None - if self.finalbody is not None: - for node in self.finalbody: - node.sync_app_attrs(space) + body_w = [node.to_object(space) for node in self.body] # stmt + w_body = space.newlist(body_w) + space.setattr(w_node, space.wrap('body'), w_body) + if self.finalbody is None: + finalbody_w = [] + else: + finalbody_w = [node.to_object(space) for node in self.finalbody] # stmt + w_finalbody = space.newlist(finalbody_w) + space.setattr(w_node, space.wrap('finalbody'), w_finalbody) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_body = get_field(space, w_node, 'body', False) + w_finalbody = get_field(space, w_node, 'finalbody', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + body_w = space.unpackiterable(w_body) + _body = [stmt.from_object(space, w_item) for w_item in body_w] + finalbody_w = space.unpackiterable(w_finalbody) + _finalbody = [stmt.from_object(space, w_item) for w_item in finalbody_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return TryFinally(_body, _finalbody, _lineno, _col_offset) + +State.ast_type('TryFinally', 'stmt', ['body', 'finalbody']) class Assert(stmt): @@ -873,7 +1110,6 @@ self.test = test self.msg = msg stmt.__init__(self, lineno, col_offset) - self.initialization_state = 15 def walkabout(self, visitor): visitor.visit_Assert(self) @@ -884,24 +1120,38 @@ self.msg = self.msg.mutate_over(visitor) return visitor.visit_Assert(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~8) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert') - else: - if not self.initialization_state & 8: - self.msg = None - self.test.sync_app_attrs(space) - if self.msg: - self.msg.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Assert) + w_test = self.test.to_object(space) # expr + space.setattr(w_node, space.wrap('test'), w_test) + w_msg = self.msg.to_object(space) if self.msg is not None else space.w_None # expr + space.setattr(w_node, space.wrap('msg'), w_msg) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_test = get_field(space, w_node, 'test', False) + w_msg = get_field(space, w_node, 'msg', True) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _test = expr.from_object(space, w_test) + _msg = expr.from_object(space, w_msg) if w_msg is not None else None + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Assert(_test, _msg, _lineno, _col_offset) + +State.ast_type('Assert', 'stmt', ['test', 'msg']) class Import(stmt): def __init__(self, names, lineno, col_offset): self.names = names - self.w_names = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Import(self) @@ -911,21 +1161,32 @@ visitor._mutate_sequence(self.names) return visitor.visit_Import(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import') + def to_object(self, space): + w_node = space.call_function(get(space).w_Import) + if self.names is None: + names_w = [] else: - pass - w_list = self.w_names - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.names = [space.interp_w(alias, w_obj) for w_obj in list_w] - else: - self.names = None - if self.names is not None: - for node in self.names: - node.sync_app_attrs(space) + names_w = [node.to_object(space) for node in self.names] # alias + w_names = space.newlist(names_w) + space.setattr(w_node, space.wrap('names'), w_names) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_names = get_field(space, w_node, 'names', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + names_w = space.unpackiterable(w_names) + _names = [alias.from_object(space, w_item) for w_item in names_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Import(_names, _lineno, _col_offset) + +State.ast_type('Import', 'stmt', ['names']) class ImportFrom(stmt): @@ -933,10 +1194,8 @@ def __init__(self, module, names, level, lineno, col_offset): self.module = module self.names = names - self.w_names = None self.level = level stmt.__init__(self, lineno, col_offset) - self.initialization_state = 31 def walkabout(self, visitor): visitor.visit_ImportFrom(self) @@ -946,33 +1205,47 @@ visitor._mutate_sequence(self.names) return visitor.visit_ImportFrom(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~20) ^ 11: - self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom') + def to_object(self, space): + w_node = space.call_function(get(space).w_ImportFrom) + w_module = space.wrap(self.module.decode('utf-8')) if self.module is not None else space.w_None # identifier + space.setattr(w_node, space.wrap('module'), w_module) + if self.names is None: + names_w = [] else: - if not self.initialization_state & 4: - self.module = None - if not self.initialization_state & 16: - self.level = 0 - w_list = self.w_names - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.names = [space.interp_w(alias, w_obj) for w_obj in list_w] - else: - self.names = None - if self.names is not None: - for node in self.names: - node.sync_app_attrs(space) + names_w = [node.to_object(space) for node in self.names] # alias + w_names = space.newlist(names_w) + space.setattr(w_node, space.wrap('names'), w_names) + w_level = space.wrap(self.level) # int + space.setattr(w_node, space.wrap('level'), w_level) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_module = get_field(space, w_node, 'module', True) + w_names = get_field(space, w_node, 'names', False) + w_level = get_field(space, w_node, 'level', True) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _module = space.str_or_None_w(w_module) + names_w = space.unpackiterable(w_names) + _names = [alias.from_object(space, w_item) for w_item in names_w] + _level = space.int_w(w_level) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return ImportFrom(_module, _names, _level, _lineno, _col_offset) + +State.ast_type('ImportFrom', 'stmt', ['module', 'names', 'level']) class Global(stmt): def __init__(self, names, lineno, col_offset): self.names = names - self.w_names = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Global(self) @@ -980,27 +1253,39 @@ def mutate_over(self, visitor): return visitor.visit_Global(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global') + def to_object(self, space): + w_node = space.call_function(get(space).w_Global) + if self.names is None: + names_w = [] else: - pass - w_list = self.w_names - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.names = [space.identifier_w(w_obj) for w_obj in list_w] - else: - self.names = None + names_w = [space.wrap(node.decode('utf-8')) for node in self.names] # identifier + w_names = space.newlist(names_w) + space.setattr(w_node, space.wrap('names'), w_names) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_names = get_field(space, w_node, 'names', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + names_w = space.unpackiterable(w_names) + _names = [space.identifier_w(w_item) for w_item in names_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Global(_names, _lineno, _col_offset) + +State.ast_type('Global', 'stmt', ['names']) class Nonlocal(stmt): def __init__(self, names, lineno, col_offset): self.names = names - self.w_names = None stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Nonlocal(self) @@ -1008,18 +1293,32 @@ def mutate_over(self, visitor): return visitor.visit_Nonlocal(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Nonlocal') + def to_object(self, space): + w_node = space.call_function(get(space).w_Nonlocal) + if self.names is None: + names_w = [] else: - pass - w_list = self.w_names - if w_list is not None: - list_w = space.listview(w_list) - if list_w: - self.names = [space.identifier_w(w_obj) for w_obj in list_w] - else: - self.names = None + names_w = [space.wrap(node.decode('utf-8')) for node in self.names] # identifier + w_names = space.newlist(names_w) + space.setattr(w_node, space.wrap('names'), w_names) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_names = get_field(space, w_node, 'names', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + names_w = space.unpackiterable(w_names) + _names = [space.identifier_w(w_item) for w_item in names_w] + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Nonlocal(_names, _lineno, _col_offset) + +State.ast_type('Nonlocal', 'stmt', ['names']) class Expr(stmt): @@ -1027,7 +1326,6 @@ def __init__(self, value, lineno, col_offset): self.value = value stmt.__init__(self, lineno, col_offset) - self.initialization_state = 7 def walkabout(self, visitor): visitor.visit_Expr(self) @@ -1036,19 +1334,33 @@ self.value = self.value.mutate_over(visitor) return visitor.visit_Expr(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 7: - self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr') - else: - pass - self.value.sync_app_attrs(space) + def to_object(self, space): + w_node = space.call_function(get(space).w_Expr) + w_value = self.value.to_object(space) # expr + space.setattr(w_node, space.wrap('value'), w_value) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_value = get_field(space, w_node, 'value', False) + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _value = expr.from_object(space, w_value) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Expr(_value, _lineno, _col_offset) + +State.ast_type('Expr', 'stmt', ['value']) class Pass(stmt): def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) - self.initialization_state = 3 def walkabout(self, visitor): visitor.visit_Pass(self) @@ -1056,18 +1368,29 @@ def mutate_over(self, visitor): return visitor.visit_Pass(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 3: - self.missing_field(space, ['lineno', 'col_offset'], 'Pass') - else: - pass + def to_object(self, space): + w_node = space.call_function(get(space).w_Pass) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Pass(_lineno, _col_offset) + +State.ast_type('Pass', 'stmt', []) class Break(stmt): def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) - self.initialization_state = 3 def walkabout(self, visitor): visitor.visit_Break(self) @@ -1075,18 +1398,29 @@ def mutate_over(self, visitor): return visitor.visit_Break(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 3: - self.missing_field(space, ['lineno', 'col_offset'], 'Break') - else: - pass + def to_object(self, space): + w_node = space.call_function(get(space).w_Break) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Break(_lineno, _col_offset) + +State.ast_type('Break', 'stmt', []) class Continue(stmt): def __init__(self, lineno, col_offset): stmt.__init__(self, lineno, col_offset) - self.initialization_state = 3 def walkabout(self, visitor): visitor.visit_Continue(self) @@ -1094,11 +1428,23 @@ def mutate_over(self, visitor): return visitor.visit_Continue(self) - def sync_app_attrs(self, space): - if (self.initialization_state & ~0) ^ 3: - self.missing_field(space, ['lineno', 'col_offset'], 'Continue') - else: - pass + def to_object(self, space): + w_node = space.call_function(get(space).w_Continue) + w_lineno = space.wrap(self.lineno) # int + space.setattr(w_node, space.wrap('lineno'), w_lineno) + w_col_offset = space.wrap(self.col_offset) # int + space.setattr(w_node, space.wrap('col_offset'), w_col_offset) + return w_node + + @staticmethod + def from_object(space, w_node): + w_lineno = get_field(space, w_node, 'lineno', False) + w_col_offset = get_field(space, w_node, 'col_offset', False) + _lineno = space.int_w(w_lineno) + _col_offset = space.int_w(w_col_offset) + return Continue(_lineno, _col_offset) + +State.ast_type('Continue', 'stmt', []) class expr(AST): @@ -1107,14 +1453,72 @@ self.lineno = lineno self.col_offset = col_offset + @staticmethod + def from_object(space, w_node): + if space.is_w(w_node, space.w_None): + return None + if space.isinstance_w(w_node, get(space).w_BoolOp): + return BoolOp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_BinOp): + return BinOp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_UnaryOp): + return UnaryOp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Lambda): + return Lambda.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_IfExp): + return IfExp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Dict): + return Dict.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_Set): + return Set.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_ListComp): + return ListComp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_SetComp): + return SetComp.from_object(space, w_node) + if space.isinstance_w(w_node, get(space).w_DictComp): From noreply at buildbot.pypy.org Wed Aug 27 05:58:35 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 27 Aug 2014 05:58:35 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: fix per new ast.py Message-ID: <20140827035835.409F21C063C@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3.3 Changeset: r73078:1d260c164b38 Date: 2014-08-26 16:41 -0700 http://bitbucket.org/pypy/pypy/changeset/1d260c164b38/ Log: fix per new ast.py diff --git a/pypy/interpreter/astcompiler/validate.py b/pypy/interpreter/astcompiler/validate.py --- a/pypy/interpreter/astcompiler/validate.py +++ b/pypy/interpreter/astcompiler/validate.py @@ -21,7 +21,7 @@ def expr_context_name(ctx): if not 1 <= ctx <= len(ast.expr_context_to_class): return '??' - return ast.expr_context_to_class[ctx - 1].typedef.name + return ast.expr_context_to_class[ctx - 1].__name__[1:] def _check_context(expected_ctx, actual_ctx): if expected_ctx != actual_ctx: From noreply at buildbot.pypy.org Wed Aug 27 10:06:40 2014 From: noreply at buildbot.pypy.org (amauryfa) Date: Wed, 27 Aug 2014 10:06:40 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Oops, add missing faulthandler.h Message-ID: <20140827080640.9732B1D2AB9@cobra.cs.uni-duesseldorf.de> Author: Amaury Forgeot d'Arc Branch: py3.3 Changeset: r73079:222c001e9305 Date: 2014-08-27 10:04 +0200 http://bitbucket.org/pypy/pypy/changeset/222c001e9305/ Log: Oops, add missing faulthandler.h diff --git a/pypy/module/faulthandler/faulthandler.c b/pypy/module/faulthandler/faulthandler.c --- a/pypy/module/faulthandler/faulthandler.c +++ b/pypy/module/faulthandler/faulthandler.c @@ -1,5 +1,5 @@ -#include #include +#include "faulthandler.h" int pypy_faulthandler_read_null(void) diff --git a/pypy/module/faulthandler/faulthandler.h b/pypy/module/faulthandler/faulthandler.h new file mode 100644 --- /dev/null +++ b/pypy/module/faulthandler/faulthandler.h @@ -0,0 +1,18 @@ +#ifndef PYPY_FAULTHANDLER_H +#define PYPY_FAULTHANDLER_H + +#include + +int pypy_faulthandler_read_null(void); +void pypy_faulthandler_sigsegv(void); +int pypy_faulthandler_sigfpe(void); +void pypy_faulthandler_sigabrt(); +#ifdef SIGBUS +void pypy_faulthandler_sigbus(void); +#endif + +#ifdef SIGILL +void pypy_faulthandler_sigill(void); +#endif + +#endif /* PYPY_FAULTHANDLER_H */ From noreply at buildbot.pypy.org Wed Aug 27 10:57:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 27 Aug 2014 10:57:07 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Fix Message-ID: <20140827085707.DDCA01D3511@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r73080:b590b6bbe094 Date: 2014-08-27 10:56 +0200 http://bitbucket.org/pypy/pypy/changeset/b590b6bbe094/ Log: Fix diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -715,8 +715,8 @@ def op_jit_assembler_call(funcptr, *args): return funcptr(*args) -def op_stm_rewind_jmp_frame(): - pass +def op_stm_rewind_jmp_frame(x=None): + return llmemory.NULL def op_stm_hint_commit_soon(): pass From noreply at buildbot.pypy.org Wed Aug 27 11:15:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 27 Aug 2014 11:15:55 +0200 (CEST) Subject: [pypy-commit] pypy default: Port change from stmgc-c7 Message-ID: <20140827091555.C07281D2644@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73081:1977c04f2580 Date: 2014-08-27 11:15 +0200 http://bitbucket.org/pypy/pypy/changeset/1977c04f2580/ Log: Port change from stmgc-c7 diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -8,8 +8,9 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.tool import rffi_platform -class error(Exception): +class RThreadError(Exception): pass +error = RThreadError translator_c_dir = py.path.local(cdir) From noreply at buildbot.pypy.org Wed Aug 27 16:58:53 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 27 Aug 2014 16:58:53 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: Replicate CPython's strange behavior in a vafguely reasonable way Message-ID: <20140827145853.0BD7A1D36F6@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73082:8b19227722ab Date: 2014-08-27 07:58 -0700 http://bitbucket.org/pypy/pypy/changeset/8b19227722ab/ Log: Replicate CPython's strange behavior in a vafguely reasonable way diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -452,12 +452,6 @@ # The real comparison if space.is_w(space.type(w_v), space.type(w_w)): - if (space.isinstance_w(w_v, space.w_set) or - space.isinstance_w(w_v, space.w_frozenset)): - raise OperationError( - space.w_TypeError, - space.wrap("cannot compare sets using cmp()") - ) # for object of the same type, prefer __cmp__ over rich comparison. w_cmp = space.lookup(w_v, '__cmp__') w_res = _invoke_binop(space, w_cmp, w_v, w_w) diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -168,6 +168,14 @@ w_currently_in_repr = ec._py_repr = space.newdict() return setrepr(space, w_currently_in_repr, self) + def descr_cmp(self, space, w_other): + if space.is_w(space.type(self), space.type(w_other)): + # hack hack until we get the expected result + raise OperationError(space.w_TypeError, + space.wrap('cannot compare sets using cmp()')) + else: + return space.w_NotImplemented + def descr_eq(self, space, w_other): if isinstance(w_other, W_BaseSetObject): return space.wrap(self.equals(w_other)) @@ -514,6 +522,7 @@ __init__ = gateway.interp2app(W_BaseSetObject.descr_init), __repr__ = gateway.interp2app(W_BaseSetObject.descr_repr), __hash__ = None, + __cmp__ = gateway.interp2app(W_BaseSetObject.descr_cmp), # comparison operators __eq__ = gateway.interp2app(W_BaseSetObject.descr_eq), @@ -613,6 +622,7 @@ __new__ = gateway.interp2app(W_FrozensetObject.descr_new2), __repr__ = gateway.interp2app(W_BaseSetObject.descr_repr), __hash__ = gateway.interp2app(W_FrozensetObject.descr_hash), + __cmp__ = gateway.interp2app(W_BaseSetObject.descr_cmp), # comparison operators __eq__ = gateway.interp2app(W_BaseSetObject.descr_eq), From noreply at buildbot.pypy.org Wed Aug 27 19:00:21 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 27 Aug 2014 19:00:21 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: (Originally by pjenvey) Dont' call close here, if the handshake fails we still need to be able to call close on teh original socket Message-ID: <20140827170021.D144C1D36F4@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73083:10154fa64f9d Date: 2014-08-27 09:59 -0700 http://bitbucket.org/pypy/pypy/changeset/10154fa64f9d/ Log: (Originally by pjenvey) Dont' call close here, if the handshake fails we still need to be able to call close on teh original socket diff --git a/lib-python/2.7/ssl.py b/lib-python/2.7/ssl.py --- a/lib-python/2.7/ssl.py +++ b/lib-python/2.7/ssl.py @@ -115,11 +115,10 @@ raise NotImplementedError("only stream sockets are supported") socket.__init__(self, _sock=sock._sock) - # "close" the original socket: it is not usable any more. - # this only calls _drop(), which should not actually call - # the operating system's close() because the reference - # counter is greater than 1 (we hold one too). - sock.close() + # "close" the original socket: it is not usable any more. which should + # not actually call the operating system's close() because the + # reference counter is greater than 1 (we hold one too). + sock._sock._drop() if ciphers is None and ssl_version != _SSLv2_IF_EXISTS: ciphers = _DEFAULT_CIPHERS From noreply at buildbot.pypy.org Wed Aug 27 19:18:30 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 27 Aug 2014 19:18:30 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: More methods for tkapp Message-ID: <20140827171830.191A71D366B@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73084:27bd5f479a55 Date: 2014-08-27 10:18 -0700 http://bitbucket.org/pypy/pypy/changeset/27bd5f479a55/ Log: More methods for tkapp diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py --- a/lib_pypy/_tkinter/app.py +++ b/lib_pypy/_tkinter/app.py @@ -474,6 +474,41 @@ self.raiseTclError() return v[0] + def exprboolean(self, s): + if '\x00' in s: + raise TypeError + v = tkffi.new("int*") + res = tklib.Tcl_ExprBoolean(self.interp, s, v) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return v[0] + + def exprlong(self, s): + if '\x00' in s: + raise TypeError + v = tkffi.new("long*") + res = tklib.Tcl_ExprLong(self.interp, s, v) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return v[0] + + def exprdouble(self, s): + if '\x00' in s: + raise TypeError + v = tkffi.new("double*") + res = tklib.Tcl_ExprDouble(self.interp, s, v) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return v[0] + + def exprstring(self, s): + if '\x00' in s: + raise TypeError + res = tklib.Tcl_ExprString(self.interp, s) + if res == tklib.TCL_ERROR: + self.raiseTclError() + return tkffi.string(tklib.Tcl_GetStringResult(self.interp)) + def mainloop(self, threshold): self._check_tcl_appartment() self.dispatching = True diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -76,6 +76,11 @@ char *Tcl_GetStringFromObj(Tcl_Obj* objPtr, int* lengthPtr); unsigned char *Tcl_GetByteArrayFromObj(Tcl_Obj* objPtr, int* lengthPtr); +int Tcl_ExprBoolean(Tcl_Interp* interp, const char *expr, int *booleanPtr); +int Tcl_ExprLong(Tcl_Interp* interp, const char *expr, long* longPtr); +int Tcl_ExprDouble(Tcl_Interp* interp, const char *expr, double* doublePtr); +int Tcl_ExprString(Tcl_Interp* interp, const char *expr); + Tcl_UniChar *Tcl_GetUnicode(Tcl_Obj* objPtr); int Tcl_GetCharLength(Tcl_Obj* objPtr); From noreply at buildbot.pypy.org Wed Aug 27 19:19:30 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 27 Aug 2014 19:19:30 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: fix behavior of flush/close after write errors in line buffered streamio Message-ID: <20140827171930.582BB1D366B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73085:d8cc30a1c503 Date: 2014-08-27 13:18 -0400 http://bitbucket.org/pypy/pypy/changeset/d8cc30a1c503/ Log: fix behavior of flush/close after write errors in line buffered streamio diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -269,6 +269,9 @@ try: f.write('hello') raises(IOError, f.write, '\n') + f.write('zzz') + raises(IOError, f.flush) + f.flush() finally: f.close() diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -731,16 +731,23 @@ def __init__(self, base, bufsize=-1): self.base = base - self.do_write = base.write # write more data self.do_tell = base.tell # return a byte offset if bufsize == -1: # Get default from the class bufsize = self.bufsize self.bufsize = bufsize # buffer size (hint only) self.buf = [] self.buflen = 0 + self.error = False + + def do_write(self, data): + try: + self.base.write(data) + except: + self.error = True + raise def flush_buffers(self): - if self.buf: + if self.buf and not self.error: self.do_write(''.join(self.buf)) self.buf = [] self.buflen = 0 @@ -749,6 +756,7 @@ return self.do_tell() + self.buflen def write(self, data): + self.error = False buflen = self.buflen datalen = len(data) if datalen + buflen < self.bufsize: @@ -783,6 +791,7 @@ """ def write(self, data): + self.error = False p = data.rfind('\n') + 1 assert p >= 0 if self.buflen + len(data) < self.bufsize: From noreply at buildbot.pypy.org Wed Aug 27 19:31:15 2014 From: noreply at buildbot.pypy.org (ISF) Date: Wed, 27 Aug 2014 19:31:15 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Fix PPCGuardToken constructor Message-ID: <20140827173115.702A11D36F2@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r73086:a2ae19d7f7bd Date: 2014-08-27 17:01 -0300 http://bitbucket.org/pypy/pypy/changeset/a2ae19d7f7bd/ Log: Fix PPCGuardToken constructor diff --git a/rpython/jit/backend/ppc/codebuilder.py b/rpython/jit/backend/ppc/codebuilder.py --- a/rpython/jit/backend/ppc/codebuilder.py +++ b/rpython/jit/backend/ppc/codebuilder.py @@ -6,6 +6,7 @@ from rpython.jit.backend.ppc.arch import (IS_PPC_32, WORD, IS_PPC_64, LR_BC_OFFSET) import rpython.jit.backend.ppc.register as r +import rpython.jit.backend.ppc.condition as c from rpython.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin from rpython.jit.backend.llsupport.assembler import GuardToken from rpython.rtyper.lltypesystem import lltype, rffi @@ -923,15 +924,15 @@ def flush_icache(x, y): pass class PPCGuardToken(GuardToken): - # Passing fcond may be needed here + # We may have to find a suitable default value for fcond def __init__(self, cpu, gcmap, descr, failargs, faillocs, offset, exc, frame_depth, is_guard_not_invalidated=False, - is_guard_not_forced=False): + is_guard_not_forced=False, fcond=c.EQ): GuardToken.__init__(self, cpu, gcmap, descr, failargs, faillocs, exc, frame_depth, is_guard_not_invalidated, is_guard_not_forced) + self.fcond = fcond self.offset = offset - #self.is_invalidate = is_invalidate class OverwritingBuilder(PPCAssembler): def __init__(self, cb, start, num_insts): From noreply at buildbot.pypy.org Wed Aug 27 19:31:16 2014 From: noreply at buildbot.pypy.org (ISF) Date: Wed, 27 Aug 2014 19:31:16 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Add method is_float to locations Message-ID: <20140827173116.C09231D36F2@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r73087:35d3261abaab Date: 2014-08-27 17:03 -0300 http://bitbucket.org/pypy/pypy/changeset/35d3261abaab/ Log: Add method is_float to locations diff --git a/rpython/jit/backend/ppc/locations.py b/rpython/jit/backend/ppc/locations.py --- a/rpython/jit/backend/ppc/locations.py +++ b/rpython/jit/backend/ppc/locations.py @@ -34,6 +34,9 @@ def is_imm_float(self): return False + def is_float(self): + return False + def as_key(self): raise NotImplementedError @@ -55,7 +58,7 @@ class FPRegisterLocation(RegisterLocation): _immutable_ = True - type = FLOAT + type = FLOAT width = FWORD def __repr__(self): @@ -67,6 +70,9 @@ def is_fp_reg(self): return True + def is_float(self): + return True + def as_key(self): return self.value + 100 From noreply at buildbot.pypy.org Wed Aug 27 19:31:17 2014 From: noreply at buildbot.pypy.org (ISF) Date: Wed, 27 Aug 2014 19:31:17 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Fix construction of PPCGuardTokens Message-ID: <20140827173117.E92511D36F2@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r73088:618b8c6dc34a Date: 2014-08-27 17:06 -0300 http://bitbucket.org/pypy/pypy/changeset/618b8c6dc34a/ Log: Fix construction of PPCGuardTokens diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -1,4 +1,4 @@ -from rpython.jit.backend.ppc.helper.assembler import (gen_emit_cmp_op, +from rpython.jit.backend.ppc.helper.assembler import (gen_emit_cmp_op, gen_emit_unary_cmp_op) from rpython.jit.backend.ppc.helper.regalloc import _check_imm_arg import rpython.jit.backend.ppc.condition as c @@ -240,7 +240,7 @@ is_guard_not_forced=False): pos = self.mc.currpos() self.mc.nop() # has to be patched later on - token = self.build_guard_token(op, arglocs[0].value, arglocs[1:], pos, + token = self.build_guard_token(op, arglocs[0].value, arglocs[1:], fcond, save_exc, is_guard_not_invalidated, is_guard_not_forced) self.pending_guards.append(token) @@ -249,8 +249,11 @@ is_guard_not_invalidated=False, is_guard_not_forced=False): descr = op.getdescr() + offset = self.mc.currpos() gcmap = allocate_gcmap(self, frame_depth, JITFRAME_FIXED_SIZE) - #token = PPCGuardToken() + token = PPCGuardToken(self.cpu, gcmap, descr, op.getfailargs(), + arglocs, save_exc, frame_depth, + is_guard_not_invalidated, is_guard_not_forced) return token def emit_guard_true(self, op, arglocs, regalloc): From noreply at buildbot.pypy.org Wed Aug 27 19:31:19 2014 From: noreply at buildbot.pypy.org (ISF) Date: Wed, 27 Aug 2014 19:31:19 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Use correct names for new methods and attributes Message-ID: <20140827173119.2B7261D36F2@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r73089:9182d976d739 Date: 2014-08-27 17:09 -0300 http://bitbucket.org/pypy/pypy/changeset/9182d976d739/ Log: Use correct names for new methods and attributes diff --git a/rpython/jit/backend/ppc/ppc_assembler.py b/rpython/jit/backend/ppc/ppc_assembler.py --- a/rpython/jit/backend/ppc/ppc_assembler.py +++ b/rpython/jit/backend/ppc/ppc_assembler.py @@ -32,7 +32,7 @@ from rpython.rtyper.annlowlevel import llhelper from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.jit.backend.ppc.locations import StackLocation, get_spp_offset +from rpython.jit.backend.ppc.locations import StackLocation, get_spp_offset, imm from rpython.rlib.jit import AsmInfo from rpython.rlib.objectmodel import compute_unique_id @@ -196,6 +196,7 @@ regs, fpregs) self.failure_recovery_func = failure_recovery_func + self.failure_recovery_code = [0, 0, 0] recovery_func_sign = lltype.Ptr(lltype.FuncType([lltype.Signed] * 3, lltype.Signed)) @@ -1124,7 +1125,7 @@ ptr = rffi.cast(lltype.Signed, gcmap) if push: with scratch_reg(mc): - mc.load_imm(r.SCRATCH.value, ptr) + mc.load_imm(r.SCRATCH, ptr) mc.stdu(r.SCRATCH.value, r.SP.value, -WORD) elif store: assert False, "Not implemented" @@ -1132,7 +1133,7 @@ def generate_quick_failure(self, guardtok): startpos = self.mc.currpos() fail_descr, target = self.store_info_on_descr(startpos, guardtok) - self.regalloc_push(fail_descr) + self.regalloc_push(imm(fail_descr)) self.push_gcmap(self.mc, gcmap=guardtok.gcmap, push=True) self.mc.call(target) return startpos @@ -1144,13 +1145,13 @@ def process_pending_guards(self, block_start): clt = self.current_clt for tok in self.pending_guards: - descr = tok.descr + descr = tok.faildescr assert isinstance(descr, AbstractFailDescr) descr._ppc_block_start = block_start - if not tok.is_invalidate: + if not tok.is_guard_not_invalidated: mc = PPCBuilder() - offset = descr._ppc_guard_pos - tok.offset + offset = tok.pos_recovery_stub - tok.offset mc.b_cond_offset(offset, tok.fcond) mc.prepare_insts_blocks(True) mc.copy_to_raw_memory(block_start + tok.offset) From noreply at buildbot.pypy.org Wed Aug 27 19:31:20 2014 From: noreply at buildbot.pypy.org (ISF) Date: Wed, 27 Aug 2014 19:31:20 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Implement regalloc_push for fixed and floating point immediates Message-ID: <20140827173120.4CA1C1D36F2@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r73090:abc36e75739d Date: 2014-08-27 17:12 -0300 http://bitbucket.org/pypy/pypy/changeset/abc36e75739d/ Log: Implement regalloc_push for fixed and floating point immediates diff --git a/rpython/jit/backend/ppc/ppc_assembler.py b/rpython/jit/backend/ppc/ppc_assembler.py --- a/rpython/jit/backend/ppc/ppc_assembler.py +++ b/rpython/jit/backend/ppc/ppc_assembler.py @@ -1270,12 +1270,18 @@ """Pushes the value stored in loc to the stack Can trash the current value of SCRATCH when pushing a stack loc""" - if loc.is_imm() or loc.is_imm_float(): - assert 0, "not implemented yet" - self.mc.addi(r.SP.value, r.SP.value, -WORD) # decrease stack pointer assert IS_PPC_64, 'needs to updated for ppc 32' - if loc.is_stack(): + + if loc.is_imm(): + with scratch_reg(self.mc): + self.regalloc_mov(loc, r.SCRATCH) + self.mc.store(r.SCRATCH.value, r.SP.value, 0) + elif loc.is_imm_float(): + with scratch_reg(self.mc): + self.regalloc_mov(loc, r.FP_SCRATCH) + self.mc.store(r.FP_SCRATCH.value, r.SP.value, 0) + elif loc.is_stack(): # XXX this code has to be verified with scratch_reg(self.mc): self.regalloc_mov(loc, r.SCRATCH) From noreply at buildbot.pypy.org Wed Aug 27 19:31:21 2014 From: noreply at buildbot.pypy.org (ISF) Date: Wed, 27 Aug 2014 19:31:21 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Avoid index error for operations with no arguments Message-ID: <20140827173121.684941D36F2@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r73091:39a0b812af40 Date: 2014-08-27 17:15 -0300 http://bitbucket.org/pypy/pypy/changeset/39a0b812af40/ Log: Avoid index error for operations with no arguments diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -21,6 +21,7 @@ from rpython.jit.backend.ppc import locations from rpython.rtyper.lltypesystem import rffi, lltype, rstr, llmemory from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.jit.backend.llsupport import symbolic from rpython.jit.backend.llsupport.descr import ArrayDescr import rpython.jit.backend.ppc.register as r @@ -494,8 +495,14 @@ return [loc1, res] def prepare_finish(self, op): - loc = self.loc(op.getarg(0)) - self.possibly_free_var(op.getarg(0)) + if op.numargs() > 0: + loc = self.loc(op.getarg(0)) + self.possibly_free_var(op.getarg(0)) + else: + descr = op.getdescr() + fail_descr = cast_instance_to_gcref(descr) + fail_descr = rffi.cast(lltype.Signed, fail_descr) + loc = imm(fail_descr) return [loc] def prepare_call_malloc_gc(self, op): From noreply at buildbot.pypy.org Wed Aug 27 19:31:22 2014 From: noreply at buildbot.pypy.org (ISF) Date: Wed, 27 Aug 2014 19:31:22 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Add alias for the scratch register used with floating point operations Message-ID: <20140827173122.8326E1D36F2@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r73092:155ab5ceedc9 Date: 2014-08-27 17:17 -0300 http://bitbucket.org/pypy/pypy/changeset/155ab5ceedc9/ Log: Add alias for the scratch register used with floating point operations diff --git a/rpython/jit/backend/ppc/register.py b/rpython/jit/backend/ppc/register.py --- a/rpython/jit/backend/ppc/register.py +++ b/rpython/jit/backend/ppc/register.py @@ -21,11 +21,12 @@ f24, f25, f26, f27, f28, f29, f30, f31] VOLATILES_FLOAT = [f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13] -SCRATCH = r0 -SP = r1 -TOC = r2 -RES = r3 -SPP = r31 +SCRATCH = r0 +FP_SCRATCH = f0 +SP = r1 +TOC = r2 +RES = r3 +SPP = r31 MANAGED_REGS = [r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r14, r15, r16, r17, r18, From noreply at buildbot.pypy.org Wed Aug 27 19:31:23 2014 From: noreply at buildbot.pypy.org (ISF) Date: Wed, 27 Aug 2014 19:31:23 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Add missing attributes Message-ID: <20140827173123.9F6081D36F2@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r73093:276cded8054f Date: 2014-08-27 17:19 -0300 http://bitbucket.org/pypy/pypy/changeset/276cded8054f/ Log: Add missing attributes diff --git a/rpython/jit/backend/ppc/runner.py b/rpython/jit/backend/ppc/runner.py --- a/rpython/jit/backend/ppc/runner.py +++ b/rpython/jit/backend/ppc/runner.py @@ -14,8 +14,13 @@ class PPC_CPU(AbstractLLCPU): + + IS_64_BIT = True BOOTSTRAP_TP = lltype.FuncType([], lltype.Signed) + frame_reg = r.SP + all_reg_indexes = range(len(r.ALL_REGS)) + def __init__(self, rtyper, stats, opts=None, translate_support_code=False, gcdescr=None): if gcdescr is not None: @@ -39,10 +44,10 @@ self.assembler.finish_once() def compile_loop(self, inputargs, operations, looptoken, log=True, name=""): - return self.assembler.assemble_loop(name, inputargs, + return self.assembler.assemble_loop(name, inputargs, operations, looptoken, log) - def compile_bridge(self, faildescr, inputargs, operations, + def compile_bridge(self, faildescr, inputargs, operations, original_loop_token, log=False): clt = original_loop_token.compiled_loop_token clt.compiling_a_bridge() From noreply at buildbot.pypy.org Wed Aug 27 19:31:24 2014 From: noreply at buildbot.pypy.org (ISF) Date: Wed, 27 Aug 2014 19:31:24 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Import and use BasicFinalDescr Message-ID: <20140827173124.B692F1D36F2@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r73094:749844b18617 Date: 2014-08-27 17:21 -0300 http://bitbucket.org/pypy/pypy/changeset/749844b18617/ Log: Import and use BasicFinalDescr diff --git a/rpython/jit/backend/ppc/test/test_call_assembler.py b/rpython/jit/backend/ppc/test/test_call_assembler.py --- a/rpython/jit/backend/ppc/test/test_call_assembler.py +++ b/rpython/jit/backend/ppc/test/test_call_assembler.py @@ -1,6 +1,7 @@ import py from rpython.jit.metainterp.history import BoxInt, ConstInt -from rpython.jit.metainterp.history import BoxPtr, ConstPtr, BasicFailDescr +from rpython.jit.metainterp.history import (BoxPtr, ConstPtr, BasicFailDescr, + BasicFinalDescr) from rpython.jit.metainterp.history import JitCellToken from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.jit.codewriter import heaptracker @@ -54,7 +55,7 @@ for i in range(numargs + 1): namespace["fdescr%d" % i] = BasicFailDescr(i) - namespace["finishdescr"] = BasicFailDescr(numargs + 1) + namespace["finishdescr"] = BasicFinalDescr(numargs + 1) for i in range(1, numargs + 1): arglist = [] From noreply at buildbot.pypy.org Wed Aug 27 19:31:25 2014 From: noreply at buildbot.pypy.org (ISF) Date: Wed, 27 Aug 2014 19:31:25 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Fix imports and make viewcode work again for ppc Message-ID: <20140827173125.CD30A1D36F2@cobra.cs.uni-duesseldorf.de> Author: Ivan Sichmann Freitas Branch: ppc-updated-backend Changeset: r73095:0b2bb05791eb Date: 2014-08-27 17:25 -0300 http://bitbucket.org/pypy/pypy/changeset/0b2bb05791eb/ Log: Fix imports and make viewcode work again for ppc diff --git a/rpython/jit/backend/ppc/tool/viewcode.py b/rpython/jit/backend/ppc/tool/viewcode.py --- a/rpython/jit/backend/ppc/tool/viewcode.py +++ b/rpython/jit/backend/ppc/tool/viewcode.py @@ -23,11 +23,11 @@ tmpfile = str(udir.join('dump.tmp')) # hack hack -import pypy.tool -mod = new.module('pypy.tool.udir') +import rpython.tool +mod = new.module('rpython.tool.udir') mod.udir = udir -sys.modules['pypy.tool.udir'] = mod -pypy.tool.udir = mod +sys.modules['rpython.tool.udir'] = mod +rpython.tool.udir = mod # ____________________________________________________________ # Some support code from Psyco. There is more over there, @@ -55,7 +55,7 @@ return format_code_dump_with_labels(originaddr, lines, label_list) def format_code_dump_with_labels(originaddr, lines, label_list): - from pypy.rlib.rarithmetic import r_uint + from rpython.rlib.rarithmetic import r_uint if not label_list: label_list = [] originaddr = r_uint(originaddr) @@ -344,7 +344,7 @@ # http://codespeak.net/svn/user/arigo/hack/misc/graphlib.py # but needs to be a bit more subtle later -from pypy.translator.tool.make_dot import DotGen +from rpython.translator.tool.make_dot import DotGen from dotviewer.graphclient import display_page class Graph(DotGen): @@ -413,7 +413,7 @@ sys.exit(2) # import cStringIO - from pypy.tool import logparser + from rpython.tool import logparser log1 = logparser.parse_log_file(sys.argv[1]) text1 = logparser.extract_category(log1, catprefix='jit-backend-dump') f = cStringIO.StringIO() From noreply at buildbot.pypy.org Wed Aug 27 19:35:18 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 27 Aug 2014 19:35:18 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: Fix decoding of some values in tkapp, obscure rules Message-ID: <20140827173518.9AB411D36F2@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73096:e76da23f2351 Date: 2014-08-27 10:26 -0700 http://bitbucket.org/pypy/pypy/changeset/e76da23f2351/ Log: Fix decoding of some values in tkapp, obscure rules diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py --- a/lib_pypy/_tkinter/tclobj.py +++ b/lib_pypy/_tkinter/tclobj.py @@ -11,7 +11,7 @@ self.ListType = tklib.Tcl_GetObjType("list") self.ProcBodyType = tklib.Tcl_GetObjType("procbody") self.StringType = tklib.Tcl_GetObjType("string") - + def FromObj(app, value): """Convert a TclObj pointer into a Python object.""" @@ -24,7 +24,14 @@ try: result.decode('ascii') except UnicodeDecodeError: - result = result.decode('utf8') + try: + result = result.decode('utf8') + except UnicodeDecodeError: + # Tcl encodes null character as \xc0\x80 + try: + result = result.replace('\xc0\x80', '\x00').decode('utf-8') + except UnicodeDecodeError: + pass return result elif value.typePtr == typeCache.BooleanType: From noreply at buildbot.pypy.org Wed Aug 27 19:35:19 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 27 Aug 2014 19:35:19 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: merged upstream Message-ID: <20140827173519.D822F1D36F2@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73097:138f485e13fe Date: 2014-08-27 10:34 -0700 http://bitbucket.org/pypy/pypy/changeset/138f485e13fe/ Log: merged upstream diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -269,6 +269,9 @@ try: f.write('hello') raises(IOError, f.write, '\n') + f.write('zzz') + raises(IOError, f.flush) + f.flush() finally: f.close() diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -731,16 +731,23 @@ def __init__(self, base, bufsize=-1): self.base = base - self.do_write = base.write # write more data self.do_tell = base.tell # return a byte offset if bufsize == -1: # Get default from the class bufsize = self.bufsize self.bufsize = bufsize # buffer size (hint only) self.buf = [] self.buflen = 0 + self.error = False + + def do_write(self, data): + try: + self.base.write(data) + except: + self.error = True + raise def flush_buffers(self): - if self.buf: + if self.buf and not self.error: self.do_write(''.join(self.buf)) self.buf = [] self.buflen = 0 @@ -749,6 +756,7 @@ return self.do_tell() + self.buflen def write(self, data): + self.error = False buflen = self.buflen datalen = len(data) if datalen + buflen < self.bufsize: @@ -783,6 +791,7 @@ """ def write(self, data): + self.error = False p = data.rfind('\n') + 1 assert p >= 0 if self.buflen + len(data) < self.bufsize: From noreply at buildbot.pypy.org Wed Aug 27 20:19:35 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 27 Aug 2014 20:19:35 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: enhance this test Message-ID: <20140827181935.B9E391D354C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73098:358436365bce Date: 2014-08-27 13:59 -0400 http://bitbucket.org/pypy/pypy/changeset/358436365bce/ Log: enhance this test diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -320,7 +320,8 @@ path = self.path posix = self.posix fd = posix.open(path, posix.O_RDONLY) - raises(OSError, posix.fdopen, fd, 'w') + exc = raises(OSError, posix.fdopen, fd, 'w') + assert str(exc.value) == "[Errno 22] Invalid argument" posix.close(fd) # fd should not be closed def test_getcwd(self): From noreply at buildbot.pypy.org Wed Aug 27 20:19:37 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 27 Aug 2014 20:19:37 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: check fd compatibility with mode in fdopen_as_stream Message-ID: <20140827181937.214E91D354C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73099:ef774aafe39d Date: 2014-08-27 14:18 -0400 http://bitbucket.org/pypy/pypy/changeset/ef774aafe39d/ Log: check fd compatibility with mode in fdopen_as_stream diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -8,7 +8,7 @@ from rpython.rlib.rstring import StringBuilder from pypy.module._file.interp_stream import W_AbstractStream, StreamErrors from pypy.module.posix.interp_posix import dispatch_filename -from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, make_weakref_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -299,8 +299,8 @@ def file_fdopen(self, fd, mode="r", buffering=-1): try: self.direct_fdopen(fd, mode, buffering) - except StreamErrors, e: - raise wrap_streamerror(self.space, e, self.w_name) + except OSError as e: + raise wrap_oserror(self.space, e) _exposed_method_names = [] diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -105,6 +105,9 @@ linux = platform.Defined('linux') WIN32 = platform.Defined('_WIN32') + O_RDONLY = platform.DefinedConstantInteger('O_RDONLY') + O_WRONLY = platform.DefinedConstantInteger('O_WRONLY') + O_RDWR = platform.DefinedConstantInteger('O_RDWR') O_NONBLOCK = platform.DefinedConstantInteger('O_NONBLOCK') F_GETFL = platform.DefinedConstantInteger('F_GETFL') F_SETFL = platform.DefinedConstantInteger('F_SETFL') @@ -406,6 +409,9 @@ locals().update(constants) +O_RDONLY = cConfig.O_RDONLY +O_WRONLY = cConfig.O_WRONLY +O_RDWR = cConfig.O_RDWR O_NONBLOCK = cConfig.O_NONBLOCK F_GETFL = cConfig.F_GETFL F_SETFL = cConfig.F_SETFL diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -37,7 +37,7 @@ import os, sys, errno from rpython.rlib.objectmodel import specialize, we_are_translated from rpython.rlib.rarithmetic import r_longlong, intmask -from rpython.rlib import rposix, nonconst +from rpython.rlib import rposix, nonconst, _rsocket_rffi as _c from rpython.rlib.rstring import StringBuilder from os import O_RDONLY, O_WRONLY, O_RDWR, O_CREAT, O_TRUNC, O_APPEND @@ -85,10 +85,26 @@ def _setfd_binary(fd): pass +if hasattr(_c, 'fcntl'): + def _check_fd_mode(fd, reading, writing): + flags = intmask(_c.fcntl(fd, _c.F_GETFL, 0)) + if flags & _c.O_RDWR: + return + elif flags & _c.O_WRONLY: + if not reading: + return + else: # O_RDONLY + if not writing: + return + raise OSError(22, "Invalid argument") +else: + def _check_fd_mode(fd, reading, writing): + # XXX + pass + def fdopen_as_stream(fd, mode, buffering=-1, signal_checker=None): - # XXX XXX XXX you want do check whether the modes are compatible - # otherwise you get funny results os_flags, universal, reading, writing, basemode, binary = decode_mode(mode) + _check_fd_mode(fd, reading, writing) _setfd_binary(fd) stream = DiskFile(fd, signal_checker) return construct_stream_tower(stream, buffering, universal, reading, From noreply at buildbot.pypy.org Wed Aug 27 20:19:38 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 27 Aug 2014 20:19:38 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: merge heads Message-ID: <20140827181938.67EF81D354C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73100:ae39690130e6 Date: 2014-08-27 14:19 -0400 http://bitbucket.org/pypy/pypy/changeset/ae39690130e6/ Log: merge heads diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py --- a/lib_pypy/_tkinter/tclobj.py +++ b/lib_pypy/_tkinter/tclobj.py @@ -11,7 +11,7 @@ self.ListType = tklib.Tcl_GetObjType("list") self.ProcBodyType = tklib.Tcl_GetObjType("procbody") self.StringType = tklib.Tcl_GetObjType("string") - + def FromObj(app, value): """Convert a TclObj pointer into a Python object.""" @@ -24,7 +24,14 @@ try: result.decode('ascii') except UnicodeDecodeError: - result = result.decode('utf8') + try: + result = result.decode('utf8') + except UnicodeDecodeError: + # Tcl encodes null character as \xc0\x80 + try: + result = result.replace('\xc0\x80', '\x00').decode('utf-8') + except UnicodeDecodeError: + pass return result elif value.typePtr == typeCache.BooleanType: From noreply at buildbot.pypy.org Wed Aug 27 21:32:59 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 27 Aug 2014 21:32:59 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: allow \x00 as fill char in __format__ Message-ID: <20140827193259.AACC51D366B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73101:c4dbc5c1e8d9 Date: 2014-08-27 15:32 -0400 http://bitbucket.org/pypy/pypy/changeset/c4dbc5c1e8d9/ Log: allow \x00 as fill char in __format__ diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -428,7 +428,7 @@ def _parse_spec(self, default_type, default_align): space = self.space - self._fill_char = self._lit("\0")[0] + self._fill_char = self._lit(" ")[0] self._align = default_align self._alternate = False self._sign = "\0" @@ -441,9 +441,11 @@ length = len(spec) i = 0 got_align = True + got_fill_char = False if length - i >= 2 and self._is_alignment(spec[i + 1]): self._align = spec[i + 1] self._fill_char = spec[i] + got_fill_char = True i += 2 elif length - i >= 1 and self._is_alignment(spec[i]): self._align = spec[i] @@ -456,7 +458,7 @@ if length - i >= 1 and spec[i] == "#": self._alternate = True i += 1 - if self._fill_char == "\0" and length - i >= 1 and spec[i] == "0": + if not got_fill_char and length - i >= 1 and spec[i] == "0": self._fill_char = self._lit("0")[0] if not got_align: self._align = "=" @@ -569,8 +571,6 @@ assert precision >= 0 length = precision string = string[:precision] - if self._fill_char == "\0": - self._fill_char = self._lit(" ")[0] self._calc_padding(string, length) return space.wrap(self._pad(string)) @@ -811,7 +811,7 @@ self._get_locale(tp) spec = self._calc_num_width(n_prefix, sign_char, to_numeric, n_digits, n_remainder, False, result) - fill = self._lit(" ") if self._fill_char == "\0" else self._fill_char + fill = self._fill_char upper = self._type == "X" return self.space.wrap(self._fill_number(spec, result, to_numeric, to_prefix, fill, to_remainder, upper)) @@ -957,7 +957,7 @@ digits = result spec = self._calc_num_width(0, sign, to_number, n_digits, n_remainder, have_dec_point, digits) - fill = self._lit(" ") if self._fill_char == "\0" else self._fill_char + fill = self._fill_char return self.space.wrap(self._fill_number(spec, digits, to_number, 0, fill, to_remainder, False)) @@ -1092,8 +1092,6 @@ out = self._builder() fill = self._fill_char - if fill == "\0": - fill = self._lit(" ")[0] #compose the string #add left padding diff --git a/pypy/objspace/std/test/test_newformat.py b/pypy/objspace/std/test/test_newformat.py --- a/pypy/objspace/std/test/test_newformat.py +++ b/pypy/objspace/std/test/test_newformat.py @@ -184,6 +184,20 @@ format_string = self.s("{{{}:.6f}}").format(sys.maxsize + 1) raises(ValueError, "format(2.34, format_string)") + def test_format_null_fill_char(self): + assert self.s('{0:\x00<6s}').format('foo') == 'foo' + '\x00' * 3 + assert self.s('{0:\x01<6s}').format('foo') == 'foo' + '\x01' * 3 + assert self.s('{0:\x00^6s}').format('foo') == '\x00foo\x00\x00' + + assert self.s('{0:\x00<6}').format(3) == '3' + '\x00' * 5 + assert self.s('{0:\x01<6}').format(3) == '3' + '\x01' * 5 + + assert self.s('{0:\x00<6}').format(3.14) == '3.14' + '\x00' * 2 + assert self.s('{0:\x01<6}').format(3.14) == '3.14' + '\x01' * 2 + + assert self.s('{0:\x00<12}').format(3+2.0j) == '(3+2j)' + '\x00' * 6 + assert self.s('{0:\x01<12}').format(3+2.0j) == '(3+2j)' + '\x01' * 6 + class AppTestUnicodeFormat(BaseStringFormatTests): def setup_class(cls): From noreply at buildbot.pypy.org Wed Aug 27 22:39:17 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 27 Aug 2014 22:39:17 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: fix deadlock in thread test_fork Message-ID: <20140827203917.376FF1D366B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73102:c8d88835c602 Date: 2014-08-27 16:38 -0400 http://bitbucket.org/pypy/pypy/changeset/c8d88835c602/ Log: fix deadlock in thread test_fork diff --git a/pypy/module/thread/test/test_fork.py b/pypy/module/thread/test/test_fork.py --- a/pypy/module/thread/test/test_fork.py +++ b/pypy/module/thread/test/test_fork.py @@ -13,16 +13,17 @@ skip("No fork on this platform") def busy_thread(): + print 'sleep' while run: time.sleep(0) done.append(None) - for i in range(1): + for i in range(5): run = True done = [] try: + print 'sleep' thread.start_new(busy_thread, ()) - print 'sleep' pid = os.fork() if pid == 0: From noreply at buildbot.pypy.org Wed Aug 27 23:13:01 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 27 Aug 2014 23:13:01 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: Fix one of the two remaining tcl bugs Message-ID: <20140827211301.85D3A1D36F4@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73103:4cde5141989b Date: 2014-08-27 14:11 -0700 http://bitbucket.org/pypy/pypy/changeset/4cde5141989b/ Log: Fix one of the two remaining tcl bugs diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py --- a/lib_pypy/_tkinter/app.py +++ b/lib_pypy/_tkinter/app.py @@ -2,7 +2,7 @@ from .tklib import tklib, tkffi from . import TclError -from .tclobj import TclObject, FromObj, AsObj, TypeCache +from .tclobj import TclObject, FromObj, FromTclString, AsObj, TypeCache import contextlib import sys @@ -55,7 +55,7 @@ assert self.app.interp == interp with self.app._tcl_lock_released(): try: - args = [tkffi.string(arg) for arg in argv[1:argc]] + args = [FromTclString(tkffi.string(arg)) for arg in argv[1:argc]] result = self.func(*args) obj = AsObj(result) tklib.Tcl_SetObjResult(interp, obj) diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py --- a/lib_pypy/_tkinter/tclobj.py +++ b/lib_pypy/_tkinter/tclobj.py @@ -13,26 +13,29 @@ self.StringType = tklib.Tcl_GetObjType("string") +def FromTclString(s): + # If the result contains any bytes with the top bit set, it's + # UTF-8 and we should decode it to Unicode. + try: + s.decode('ascii') + except UnicodeDecodeError: + try: + return s.decode('utf8') + except UnicodeDecodeError: + # Tcl encodes null character as \xc0\x80 + try: + return s.replace('\xc0\x80', '\x00').decode('utf-8') + except UnicodeDecodeError: + pass + return s + + def FromObj(app, value): """Convert a TclObj pointer into a Python object.""" typeCache = app._typeCache if not value.typePtr: buf = tkffi.buffer(value.bytes, value.length) - result = buf[:] - # If the result contains any bytes with the top bit set, it's - # UTF-8 and we should decode it to Unicode. - try: - result.decode('ascii') - except UnicodeDecodeError: - try: - result = result.decode('utf8') - except UnicodeDecodeError: - # Tcl encodes null character as \xc0\x80 - try: - result = result.replace('\xc0\x80', '\x00').decode('utf-8') - except UnicodeDecodeError: - pass - return result + return FromTclString(buf[:]) elif value.typePtr == typeCache.BooleanType: return bool(value.internalRep.longValue) From noreply at buildbot.pypy.org Wed Aug 27 23:13:02 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 27 Aug 2014 23:13:02 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: merged upstream Message-ID: <20140827211302.EE0171D36F4@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73104:96013c5bad15 Date: 2014-08-27 14:12 -0700 http://bitbucket.org/pypy/pypy/changeset/96013c5bad15/ Log: merged upstream diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -8,7 +8,7 @@ from rpython.rlib.rstring import StringBuilder from pypy.module._file.interp_stream import W_AbstractStream, StreamErrors from pypy.module.posix.interp_posix import dispatch_filename -from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, make_weakref_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -299,8 +299,8 @@ def file_fdopen(self, fd, mode="r", buffering=-1): try: self.direct_fdopen(fd, mode, buffering) - except StreamErrors, e: - raise wrap_streamerror(self.space, e, self.w_name) + except OSError as e: + raise wrap_oserror(self.space, e) _exposed_method_names = [] diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -320,7 +320,8 @@ path = self.path posix = self.posix fd = posix.open(path, posix.O_RDONLY) - raises(OSError, posix.fdopen, fd, 'w') + exc = raises(OSError, posix.fdopen, fd, 'w') + assert str(exc.value) == "[Errno 22] Invalid argument" posix.close(fd) # fd should not be closed def test_getcwd(self): diff --git a/pypy/module/thread/test/test_fork.py b/pypy/module/thread/test/test_fork.py --- a/pypy/module/thread/test/test_fork.py +++ b/pypy/module/thread/test/test_fork.py @@ -13,16 +13,17 @@ skip("No fork on this platform") def busy_thread(): + print 'sleep' while run: time.sleep(0) done.append(None) - for i in range(1): + for i in range(5): run = True done = [] try: + print 'sleep' thread.start_new(busy_thread, ()) - print 'sleep' pid = os.fork() if pid == 0: diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -428,7 +428,7 @@ def _parse_spec(self, default_type, default_align): space = self.space - self._fill_char = self._lit("\0")[0] + self._fill_char = self._lit(" ")[0] self._align = default_align self._alternate = False self._sign = "\0" @@ -441,9 +441,11 @@ length = len(spec) i = 0 got_align = True + got_fill_char = False if length - i >= 2 and self._is_alignment(spec[i + 1]): self._align = spec[i + 1] self._fill_char = spec[i] + got_fill_char = True i += 2 elif length - i >= 1 and self._is_alignment(spec[i]): self._align = spec[i] @@ -456,7 +458,7 @@ if length - i >= 1 and spec[i] == "#": self._alternate = True i += 1 - if self._fill_char == "\0" and length - i >= 1 and spec[i] == "0": + if not got_fill_char and length - i >= 1 and spec[i] == "0": self._fill_char = self._lit("0")[0] if not got_align: self._align = "=" @@ -569,8 +571,6 @@ assert precision >= 0 length = precision string = string[:precision] - if self._fill_char == "\0": - self._fill_char = self._lit(" ")[0] self._calc_padding(string, length) return space.wrap(self._pad(string)) @@ -811,7 +811,7 @@ self._get_locale(tp) spec = self._calc_num_width(n_prefix, sign_char, to_numeric, n_digits, n_remainder, False, result) - fill = self._lit(" ") if self._fill_char == "\0" else self._fill_char + fill = self._fill_char upper = self._type == "X" return self.space.wrap(self._fill_number(spec, result, to_numeric, to_prefix, fill, to_remainder, upper)) @@ -957,7 +957,7 @@ digits = result spec = self._calc_num_width(0, sign, to_number, n_digits, n_remainder, have_dec_point, digits) - fill = self._lit(" ") if self._fill_char == "\0" else self._fill_char + fill = self._fill_char return self.space.wrap(self._fill_number(spec, digits, to_number, 0, fill, to_remainder, False)) @@ -1092,8 +1092,6 @@ out = self._builder() fill = self._fill_char - if fill == "\0": - fill = self._lit(" ")[0] #compose the string #add left padding diff --git a/pypy/objspace/std/test/test_newformat.py b/pypy/objspace/std/test/test_newformat.py --- a/pypy/objspace/std/test/test_newformat.py +++ b/pypy/objspace/std/test/test_newformat.py @@ -184,6 +184,20 @@ format_string = self.s("{{{}:.6f}}").format(sys.maxsize + 1) raises(ValueError, "format(2.34, format_string)") + def test_format_null_fill_char(self): + assert self.s('{0:\x00<6s}').format('foo') == 'foo' + '\x00' * 3 + assert self.s('{0:\x01<6s}').format('foo') == 'foo' + '\x01' * 3 + assert self.s('{0:\x00^6s}').format('foo') == '\x00foo\x00\x00' + + assert self.s('{0:\x00<6}').format(3) == '3' + '\x00' * 5 + assert self.s('{0:\x01<6}').format(3) == '3' + '\x01' * 5 + + assert self.s('{0:\x00<6}').format(3.14) == '3.14' + '\x00' * 2 + assert self.s('{0:\x01<6}').format(3.14) == '3.14' + '\x01' * 2 + + assert self.s('{0:\x00<12}').format(3+2.0j) == '(3+2j)' + '\x00' * 6 + assert self.s('{0:\x01<12}').format(3+2.0j) == '(3+2j)' + '\x01' * 6 + class AppTestUnicodeFormat(BaseStringFormatTests): def setup_class(cls): diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -105,6 +105,9 @@ linux = platform.Defined('linux') WIN32 = platform.Defined('_WIN32') + O_RDONLY = platform.DefinedConstantInteger('O_RDONLY') + O_WRONLY = platform.DefinedConstantInteger('O_WRONLY') + O_RDWR = platform.DefinedConstantInteger('O_RDWR') O_NONBLOCK = platform.DefinedConstantInteger('O_NONBLOCK') F_GETFL = platform.DefinedConstantInteger('F_GETFL') F_SETFL = platform.DefinedConstantInteger('F_SETFL') @@ -406,6 +409,9 @@ locals().update(constants) +O_RDONLY = cConfig.O_RDONLY +O_WRONLY = cConfig.O_WRONLY +O_RDWR = cConfig.O_RDWR O_NONBLOCK = cConfig.O_NONBLOCK F_GETFL = cConfig.F_GETFL F_SETFL = cConfig.F_SETFL diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -37,7 +37,7 @@ import os, sys, errno from rpython.rlib.objectmodel import specialize, we_are_translated from rpython.rlib.rarithmetic import r_longlong, intmask -from rpython.rlib import rposix, nonconst +from rpython.rlib import rposix, nonconst, _rsocket_rffi as _c from rpython.rlib.rstring import StringBuilder from os import O_RDONLY, O_WRONLY, O_RDWR, O_CREAT, O_TRUNC, O_APPEND @@ -85,10 +85,26 @@ def _setfd_binary(fd): pass +if hasattr(_c, 'fcntl'): + def _check_fd_mode(fd, reading, writing): + flags = intmask(_c.fcntl(fd, _c.F_GETFL, 0)) + if flags & _c.O_RDWR: + return + elif flags & _c.O_WRONLY: + if not reading: + return + else: # O_RDONLY + if not writing: + return + raise OSError(22, "Invalid argument") +else: + def _check_fd_mode(fd, reading, writing): + # XXX + pass + def fdopen_as_stream(fd, mode, buffering=-1, signal_checker=None): - # XXX XXX XXX you want do check whether the modes are compatible - # otherwise you get funny results os_flags, universal, reading, writing, basemode, binary = decode_mode(mode) + _check_fd_mode(fd, reading, writing) _setfd_binary(fd) stream = DiskFile(fd, signal_checker) return construct_stream_tower(stream, buffering, universal, reading, From noreply at buildbot.pypy.org Wed Aug 27 23:57:59 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Wed, 27 Aug 2014 23:57:59 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: (alex, dreid): fix the last failing tkinter test... this took a very long time to debug Message-ID: <20140827215759.2A61D1D36F6@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73105:53c7f5cc4006 Date: 2014-08-27 14:57 -0700 http://bitbucket.org/pypy/pypy/changeset/53c7f5cc4006/ Log: (alex, dreid): fix the last failing tkinter test... this took a very long time to debug diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py --- a/lib_pypy/_tkinter/__init__.py +++ b/lib_pypy/_tkinter/__init__.py @@ -5,6 +5,8 @@ # This version is based on cffi, and is a translation of _tkinter.c # from CPython, version 2.7.4. +import sys + class TclError(Exception): pass @@ -50,4 +52,6 @@ result = [] _flatten1(result, item, 0) return tuple(result) - + + +tklib.Tcl_FindExecutable(sys.executable) diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py --- a/lib_pypy/_tkinter/tklib.py +++ b/lib_pypy/_tkinter/tklib.py @@ -112,6 +112,7 @@ int Tcl_DoOneEvent(int flags); int Tk_GetNumMainWindows(); +void Tcl_FindExecutable(char *argv0); """) # XXX find a better way to detect paths From noreply at buildbot.pypy.org Thu Aug 28 01:18:41 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 28 Aug 2014 01:18:41 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: (alex, dreid): fixed a bug in the utf7 decoder where unconsumed characters would still be returned Message-ID: <20140827231841.A81421D36F2@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73106:4f71852efa78 Date: 2014-08-27 16:18 -0700 http://bitbucket.org/pypy/pypy/changeset/4f71852efa78/ Log: (alex, dreid): fixed a bug in the utf7 decoder where unconsumed characters would still be returned diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -906,7 +906,7 @@ elif inShift: pos = shiftOutStartPos # back off output - return result.build(), pos + return result.build()[:pos], pos def unicode_encode_utf_7(s, size, errors, errorhandler=None): if size == 0: diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -231,6 +231,8 @@ assert decode(s, 4, None) == (u'a+-', 4) assert decode(s, 5, None) == (u'a+-b', 5) + assert decode((27 * u"\u3042" + "\n").encode('utf7')[:28], 28, None) == (u'', 0) + def test_utf7_surrogates(self): encode = self.getencoder('utf-7') u = u'\U000abcde' From noreply at buildbot.pypy.org Thu Aug 28 01:51:49 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 28 Aug 2014 01:51:49 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: Fixed translation Message-ID: <20140827235149.6C0EE1D3551@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73107:b223e8747096 Date: 2014-08-27 16:51 -0700 http://bitbucket.org/pypy/pypy/changeset/b223e8747096/ Log: Fixed translation diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -906,6 +906,7 @@ elif inShift: pos = shiftOutStartPos # back off output + assert pos >= 0 return result.build()[:pos], pos def unicode_encode_utf_7(s, size, errors, errorhandler=None): From noreply at buildbot.pypy.org Thu Aug 28 09:00:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 28 Aug 2014 09:00:08 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20140828070008.0786F1C000D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r529:620cf455b509 Date: 2014-08-28 09:00 +0200 http://bitbucket.org/pypy/pypy.org/changeset/620cf455b509/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $52313 of $105000 (49.8%) + $52380 of $105000 (49.9%)
          diff --git a/don3.html b/don3.html --- a/don3.html +++ b/don3.html @@ -15,7 +15,7 @@ - $48398 of $60000 (80.7%) + $48408 of $60000 (80.7%)
          diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -17,7 +17,7 @@ 2nd call: - $13914 of $80000 (17.4%) + $13939 of $80000 (17.4%)
          From noreply at buildbot.pypy.org Thu Aug 28 11:02:12 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 28 Aug 2014 11:02:12 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: opimpl impl for stm_transaction_break Message-ID: <20140828090213.25E721D2ACA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r73108:9ba1d8b49ca3 Date: 2014-08-28 11:02 +0200 http://bitbucket.org/pypy/pypy/changeset/9ba1d8b49ca3/ Log: opimpl impl for stm_transaction_break diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -434,7 +434,7 @@ 'stm_abort_and_retry': LLOp(canmallocgc=True), 'stm_enter_callback_call': LLOp(canmallocgc=True), 'stm_leave_callback_call': LLOp(), - 'stm_transaction_break': LLOp(canmallocgc=True), + 'stm_transaction_break': LLOp(canmallocgc=True, canrun=True), 'stm_should_break_transaction': LLOp(sideeffects=False), 'stm_rewind_jmp_frame': LLOp(canrun=True), 'stm_set_transaction_length': LLOp(), diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -718,6 +718,9 @@ def op_stm_rewind_jmp_frame(x=None): return llmemory.NULL +def op_stm_transaction_break(): + pass + def op_stm_hint_commit_soon(): pass From noreply at buildbot.pypy.org Thu Aug 28 14:42:31 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Thu, 28 Aug 2014 14:42:31 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: make duhton translate without --stm on this branch Message-ID: <20140828124231.98CCA1D237F@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r73109:98cf7234b83a Date: 2014-08-28 14:42 +0200 http://bitbucket.org/pypy/pypy/changeset/98cf7234b83a/ Log: make duhton translate without --stm on this branch diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py --- a/rpython/memory/gc/base.py +++ b/rpython/memory/gc/base.py @@ -344,6 +344,7 @@ break obj = self.run_finalizers.popleft() finalizer = self.getfinalizer(self.get_type_id(obj)) + assert finalizer finalizer(obj) finally: self.finalizer_lock_count -= 1 diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -48,7 +48,7 @@ LL_OPERATIONS[op.opname].canmallocgc) - + def find_initializing_stores(collect_analyzer, graph): from rpython.flowspace.model import mkentrymap entrymap = mkentrymap(graph) @@ -625,7 +625,7 @@ # causes it to return True raise Exception("'no_collect' function can trigger collection:" " %s\n%s" % (func, err.getvalue())) - + if self.write_barrier_ptr: self.clean_sets = ( find_initializing_stores(self.collect_analyzer, graph)) @@ -1284,15 +1284,15 @@ ll_call_destructor(destrptr, v, typename) fptr = self.transformer.annotate_finalizer(ll_finalizer, [llmemory.Address], lltype.Void) - g = destrptr._obj.graph - if self.translator.config.translation.stm: - light = False # XXX no working finalizers with STM so far - else: - try: + try: + g = destrptr._obj.graph + if self.translator.config.translation.stm: + light = False # XXX no working finalizers with STM so far + else: analyzer = FinalizerAnalyzer(self.translator) light = not analyzer.analyze_light_finalizer(g) - except lltype.DelayedPointer: - light = False # XXX bah, too bad + except lltype.DelayedPointer: + light = False # XXX bah, too bad return fptr, light def make_custom_trace_funcptr_for_type(self, TYPE): diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -315,10 +315,16 @@ source = py.code.Source(r""" def wrapper(%(args)s): # no *args - no GIL for mallocing the tuple token = 0 - rjbuf = llop.stm_rewind_jmp_frame(llmemory.Address, 1) + if rgc.stm_is_enabled(): + rjbuf = llop.stm_rewind_jmp_frame(llmemory.Address, 1) + else: + rjbuf = llmemory.NULL if aroundstate is not None: if aroundstate.enter_callback is not None: - token = aroundstate.enter_callback(rjbuf) + if rgc.stm_is_enabled(): + token = aroundstate.enter_callback(rjbuf) + else: + aroundstate.enter_callback() else: after = aroundstate.after if after is not None: @@ -339,7 +345,10 @@ stackcounter.stacks_counter -= 1 if aroundstate is not None: if aroundstate.leave_callback is not None: - aroundstate.leave_callback(rjbuf, token) + if rgc.stm_is_enabled(): + aroundstate.leave_callback(rjbuf, token) + else: + aroundstate.leave_callback() else: before = aroundstate.before if before is not None: @@ -352,6 +361,7 @@ miniglobals = locals().copy() miniglobals['Exception'] = Exception miniglobals['os'] = os + miniglobals['rgc'] = rgc miniglobals['we_are_translated'] = we_are_translated miniglobals['stackcounter'] = stackcounter miniglobals['llmemory'] = llmemory From noreply at buildbot.pypy.org Thu Aug 28 15:12:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 28 Aug 2014 15:12:16 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: Test and fix Message-ID: <20140828131216.39B0E1D38D4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: stmgc-c7 Changeset: r73110:e8fa261dce03 Date: 2014-08-28 15:11 +0200 http://bitbucket.org/pypy/pypy/changeset/e8fa261dce03/ Log: Test and fix diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -435,11 +435,14 @@ if isinstance(ARGTYPE, ContainerType): args[-1] = '*%s' % (args[-1],) - line = '%s(%s);' % (fnexpr, ', '.join(args)) - if self.lltypemap(v_result) is not Void: - # skip assignment of 'void' return value - r = self.expr(v_result) - line = '%s = %s' % (r, line) + if fnexpr == 'NULL': + line = 'abort(); /* call to NULL */' + else: + line = '%s(%s);' % (fnexpr, ', '.join(args)) + if self.lltypemap(v_result) is not Void: + # skip assignment of 'void' return value + r = self.expr(v_result) + line = '%s = %s' % (r, line) if targets: for graph in targets: if getattr(graph, 'inhibit_tail_call', False): diff --git a/rpython/translator/c/test/test_lltyped.py b/rpython/translator/c/test/test_lltyped.py --- a/rpython/translator/c/test/test_lltyped.py +++ b/rpython/translator/c/test/test_lltyped.py @@ -956,3 +956,13 @@ fn = self.getcompiled(f, [int]) assert fn(0) == 9 + + def test_call_null_funcptr(self): + fnptr = nullptr(FuncType([], Void)) + def f(n): + if n > 10: + fnptr() # never reached, or so we hope + return n + + fn = self.getcompiled(f, [int]) + assert fn(6) == 6 From noreply at buildbot.pypy.org Thu Aug 28 18:48:09 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 28 Aug 2014 18:48:09 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: (alex, dreid) Completely fix the utf7 decoder this time Message-ID: <20140828164809.A61321C000D@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73111:687dd5c34b84 Date: 2014-08-28 09:47 -0700 http://bitbucket.org/pypy/pypy/changeset/687dd5c34b84/ Log: (alex, dreid) Completely fix the utf7 decoder this time diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -874,13 +874,14 @@ result.append(unichr(ord(ch))) elif ch == '+': + startingpos = pos pos += 1 # consume '+' if pos < size and s[pos] == '-': # '+-' encodes '+' pos += 1 result.append(u'+') else: # begin base64-encoded section inShift = 1 - shiftOutStartPos = pos - 1 + shiftOutStartPos = result.getlength() base64bits = 0 base64buffer = 0 @@ -888,13 +889,14 @@ result.append(unichr(oc)) pos += 1 else: + startingpos = pos pos += 1 msg = "unexpected special character" res, pos = errorhandler(errors, 'utf7', msg, s, pos-1, pos) result.append(res) # end of string - + final_length = result.getlength() if inShift and final: # in shift sequence, no more to follow # if we're in an inconsistent state, that's an error if (surrogate or @@ -904,10 +906,11 @@ res, pos = errorhandler(errors, 'utf7', msg, s, shiftOutStartPos, pos) result.append(res) elif inShift: - pos = shiftOutStartPos # back off output + pos = startingpos + final_length = shiftOutStartPos # back off output - assert pos >= 0 - return result.build()[:pos], pos + assert final_length >= 0 + return result.build()[:final_length], pos def unicode_encode_utf_7(s, size, errors, errorhandler=None): if size == 0: diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -232,6 +232,7 @@ assert decode(s, 5, None) == (u'a+-b', 5) assert decode((27 * u"\u3042" + "\n").encode('utf7')[:28], 28, None) == (u'', 0) + assert decode('+MEI\n+MEIwQjBCMEIwQjBCMEIwQjBCMEIwQjBCMEIwQjBCMEIwQjBCMEIwQjBCMEIwQjBCME', 72, None) == (u'\u3042\n', 5) def test_utf7_surrogates(self): encode = self.getencoder('utf-7') From noreply at buildbot.pypy.org Thu Aug 28 18:59:16 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 28 Aug 2014 18:59:16 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: Fixed translation and a bug with replace error handler Message-ID: <20140828165916.9529E1C000D@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73112:3f0c24c4ae4a Date: 2014-08-28 09:58 -0700 http://bitbucket.org/pypy/pypy/changeset/3f0c24c4ae4a/ Log: Fixed translation and a bug with replace error handler diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -800,6 +800,7 @@ result = UnicodeBuilder(size) pos = 0 shiftOutStartPos = 0 + startinpos = 0 while pos < size: ch = s[pos] oc = ord(ch) @@ -874,7 +875,7 @@ result.append(unichr(ord(ch))) elif ch == '+': - startingpos = pos + startinpos = pos pos += 1 # consume '+' if pos < size and s[pos] == '-': # '+-' encodes '+' pos += 1 @@ -889,7 +890,7 @@ result.append(unichr(oc)) pos += 1 else: - startingpos = pos + startinpos = pos pos += 1 msg = "unexpected special character" res, pos = errorhandler(errors, 'utf7', msg, s, pos-1, pos) @@ -905,8 +906,9 @@ msg = "unterminated shift sequence" res, pos = errorhandler(errors, 'utf7', msg, s, shiftOutStartPos, pos) result.append(res) + final_length = result.getlength() elif inShift: - pos = startingpos + pos = startinpos final_length = shiftOutStartPos # back off output assert final_length >= 0 From noreply at buildbot.pypy.org Thu Aug 28 20:12:13 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 28 Aug 2014 20:12:13 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: adjust this test for pypy Message-ID: <20140828181213.66A821D3551@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73113:0bdbb7cefe56 Date: 2014-08-28 14:11 -0400 http://bitbucket.org/pypy/pypy/changeset/0bdbb7cefe56/ Log: adjust this test for pypy diff --git a/lib-python/2.7/idlelib/idle_test/test_delegator.py b/lib-python/2.7/idlelib/idle_test/test_delegator.py --- a/lib-python/2.7/idlelib/idle_test/test_delegator.py +++ b/lib-python/2.7/idlelib/idle_test/test_delegator.py @@ -14,8 +14,8 @@ # add an attribute: self.assertRaises(AttributeError, mydel.__getattr__, 'xyz') bl = mydel.bit_length - self.assertIs(bl, int.bit_length) - self.assertIs(mydel.__dict__['bit_length'], int.bit_length) + self.assertEqual(bl, int.bit_length) + self.assertEqual(mydel.__dict__['bit_length'], int.bit_length) self.assertEqual(mydel._Delegator__cache, {'bit_length'}) # add a second attribute From noreply at buildbot.pypy.org Thu Aug 28 20:20:31 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 28 Aug 2014 20:20:31 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: adjust this function to work on pypy? Message-ID: <20140828182031.136431D366B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73114:25424a2825f1 Date: 2014-08-28 14:19 -0400 http://bitbucket.org/pypy/pypy/changeset/25424a2825f1/ Log: adjust this function to work on pypy? diff --git a/lib-python/2.7/idlelib/CallTips.py b/lib-python/2.7/idlelib/CallTips.py --- a/lib-python/2.7/idlelib/CallTips.py +++ b/lib-python/2.7/idlelib/CallTips.py @@ -124,6 +124,7 @@ # Given a class object, return a function object used for the # constructor (ie, __init__() ) or None if we can't find one. try: + class_ob.__init__.im_func.func_code.co_code return class_ob.__init__.im_func except AttributeError: for base in class_ob.__bases__: From noreply at buildbot.pypy.org Thu Aug 28 20:32:52 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 28 Aug 2014 20:32:52 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: further hacks for pypy compat Message-ID: <20140828183252.7127C1D36F4@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73115:bfc017df0694 Date: 2014-08-28 14:32 -0400 http://bitbucket.org/pypy/pypy/changeset/bfc017df0694/ Log: further hacks for pypy compat diff --git a/lib-python/2.7/idlelib/CallTips.py b/lib-python/2.7/idlelib/CallTips.py --- a/lib-python/2.7/idlelib/CallTips.py +++ b/lib-python/2.7/idlelib/CallTips.py @@ -170,7 +170,7 @@ fob = ob.im_func if ob.im_self is not None: arg_offset = 1 - elif type(ob_call) == types.MethodType: + elif type(ob_call) == types.MethodType and hasattr(ob_call.im_func.func_code, 'co_code'): # a callable class instance fob = ob_call.im_func arg_offset = 1 @@ -200,7 +200,7 @@ lines = (textwrap.wrap(argspec, _MAX_COLS, subsequent_indent=_INDENT) if len(argspec) > _MAX_COLS else [argspec] if argspec else []) - if isinstance(ob_call, types.MethodType): + if isinstance(ob_call, types.MethodType) and hasattr(ob_call.im_func.func_code, 'co_code'): doc = ob_call.__doc__ else: doc = getattr(ob, "__doc__", "") From noreply at buildbot.pypy.org Thu Aug 28 20:35:38 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 28 Aug 2014 20:35:38 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: fix list's docstring Message-ID: <20140828183538.77CF41D36F4@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73116:50e98deea8f2 Date: 2014-08-28 14:35 -0400 http://bitbucket.org/pypy/pypy/changeset/50e98deea8f2/ Log: fix list's docstring diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1830,8 +1830,8 @@ W_ListObject.typedef = StdTypeDef("list", - __doc__ = """list() -> new list -list(sequence) -> new list initialized from sequence's items""", + __doc__ = """list() -> new empty list +list(iterable) -> new list initialized from iterable's items""", __new__ = interp2app(W_ListObject.descr_new), __init__ = interp2app(W_ListObject.descr_init), __repr__ = interp2app(W_ListObject.descr_repr), diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -441,6 +441,9 @@ cls.w_on_arm = cls.space.wrap(platform.machine().startswith('arm')) cls.w_runappdirect = cls.space.wrap(cls.runappdirect) + def test_doc(self): + assert list.__doc__ == "list() -> new empty list\nlist(iterable) -> new list initialized from iterable's items" + def test_getstrategyfromlist_w(self): l0 = ["a", "2", "a", True] # this raised TypeError on ListStrategies From noreply at buildbot.pypy.org Thu Aug 28 20:45:48 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 28 Aug 2014 20:45:48 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: same for list.__new__ Message-ID: <20140828184548.8EED41C3CCC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73117:6c8a4e26c1bc Date: 2014-08-28 14:45 -0400 http://bitbucket.org/pypy/pypy/changeset/6c8a4e26c1bc/ Log: same for list.__new__ diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -358,6 +358,7 @@ @staticmethod def descr_new(space, w_listtype, __args__): + """T.__new__(S, ...) -> a new object with type S, a subtype of T""" w_obj = space.allocate_instance(W_ListObject, w_listtype) w_obj.clear(space) return w_obj diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -443,6 +443,7 @@ def test_doc(self): assert list.__doc__ == "list() -> new empty list\nlist(iterable) -> new list initialized from iterable's items" + assert list.__new__.__doc__ == "T.__new__(S, ...) -> a new object with type S, a subtype of T" def test_getstrategyfromlist_w(self): l0 = ["a", "2", "a", True] From noreply at buildbot.pypy.org Thu Aug 28 21:56:21 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 28 Aug 2014 21:56:21 +0200 (CEST) Subject: [pypy-commit] pypy default: avoid a tell here by using relative seek Message-ID: <20140828195621.380271D2D9C@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73118:796ebef4b291 Date: 2014-08-28 15:55 -0400 http://bitbucket.org/pypy/pypy/changeset/796ebef4b291/ Log: avoid a tell here by using relative seek diff --git a/rpython/rlib/streamio.py b/rpython/rlib/streamio.py --- a/rpython/rlib/streamio.py +++ b/rpython/rlib/streamio.py @@ -532,7 +532,7 @@ def flush_buffers(self): if self.buf: try: - self.do_seek(self.tell(), 0) + self.do_seek(self.pos - len(self.buf), 1) except (MyNotImplementedError, OSError): pass else: From noreply at buildbot.pypy.org Thu Aug 28 22:20:49 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 28 Aug 2014 22:20:49 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: another fix for idlelib Message-ID: <20140828202049.C10B91C000D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73119:ac7806a1e7cd Date: 2014-08-28 16:20 -0400 http://bitbucket.org/pypy/pypy/changeset/ac7806a1e7cd/ Log: another fix for idlelib diff --git a/lib-python/2.7/idlelib/CallTips.py b/lib-python/2.7/idlelib/CallTips.py --- a/lib-python/2.7/idlelib/CallTips.py +++ b/lib-python/2.7/idlelib/CallTips.py @@ -177,7 +177,7 @@ else: fob = ob # Try to build one for Python defined functions - if type(fob) in [types.FunctionType, types.LambdaType]: + if type(fob) in [types.FunctionType, types.LambdaType] and hasattr(fob.func_code, 'co_code'): argcount = fob.func_code.co_argcount real_args = fob.func_code.co_varnames[arg_offset:argcount] defaults = fob.func_defaults or [] From noreply at buildbot.pypy.org Thu Aug 28 22:26:03 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 28 Aug 2014 22:26:03 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: line length Message-ID: <20140828202603.177971C000D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: stdlib-2.7.8 Changeset: r73120:3aa7ae874502 Date: 2014-08-28 16:25 -0400 http://bitbucket.org/pypy/pypy/changeset/3aa7ae874502/ Log: line length diff --git a/lib-python/2.7/idlelib/CallTips.py b/lib-python/2.7/idlelib/CallTips.py --- a/lib-python/2.7/idlelib/CallTips.py +++ b/lib-python/2.7/idlelib/CallTips.py @@ -170,14 +170,16 @@ fob = ob.im_func if ob.im_self is not None: arg_offset = 1 - elif type(ob_call) == types.MethodType and hasattr(ob_call.im_func.func_code, 'co_code'): + elif type(ob_call) == types.MethodType and \ + hasattr(ob_call.im_func.func_code, 'co_code'): # a callable class instance fob = ob_call.im_func arg_offset = 1 else: fob = ob # Try to build one for Python defined functions - if type(fob) in [types.FunctionType, types.LambdaType] and hasattr(fob.func_code, 'co_code'): + if type(fob) in [types.FunctionType, types.LambdaType] and \ + hasattr(fob.func_code, 'co_code'): argcount = fob.func_code.co_argcount real_args = fob.func_code.co_varnames[arg_offset:argcount] defaults = fob.func_defaults or [] @@ -200,7 +202,8 @@ lines = (textwrap.wrap(argspec, _MAX_COLS, subsequent_indent=_INDENT) if len(argspec) > _MAX_COLS else [argspec] if argspec else []) - if isinstance(ob_call, types.MethodType) and hasattr(ob_call.im_func.func_code, 'co_code'): + if isinstance(ob_call, types.MethodType) and \ + hasattr(ob_call.im_func.func_code, 'co_code'): doc = ob_call.__doc__ else: doc = getattr(ob, "__doc__", "") From noreply at buildbot.pypy.org Thu Aug 28 23:46:59 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 28 Aug 2014 23:46:59 +0200 (CEST) Subject: [pypy-commit] pypy stdlib-2.7.8: closing branch for merge Message-ID: <20140828214659.647401D237F@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: stdlib-2.7.8 Changeset: r73121:0ad4fc224862 Date: 2014-08-28 14:45 -0700 http://bitbucket.org/pypy/pypy/changeset/0ad4fc224862/ Log: closing branch for merge From noreply at buildbot.pypy.org Thu Aug 28 23:47:05 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 28 Aug 2014 23:47:05 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in the stdlib-2.7.8 branch. Message-ID: <20140828214705.E20BA1D237F@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r73122:eb8c4b7527dc Date: 2014-08-28 14:46 -0700 http://bitbucket.org/pypy/pypy/changeset/eb8c4b7527dc/ Log: Merged in the stdlib-2.7.8 branch. This upgrades our copy of the stdlib to be 2.7.8, and fixes all the resulting compatibility stuff. Thanks to everyone who contributed to get this done so quickly! diff too long, truncating to 2000 out of 37070 lines diff --git a/lib-python/2.7/CGIHTTPServer.py b/lib-python/2.7/CGIHTTPServer.py --- a/lib-python/2.7/CGIHTTPServer.py +++ b/lib-python/2.7/CGIHTTPServer.py @@ -84,7 +84,7 @@ path begins with one of the strings in self.cgi_directories (and the next character is a '/' or the end of the string). """ - collapsed_path = _url_collapse_path(self.path) + collapsed_path = _url_collapse_path(urllib.unquote(self.path)) dir_sep = collapsed_path.find('/', 1) head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] if head in self.cgi_directories: diff --git a/lib-python/2.7/Cookie.py b/lib-python/2.7/Cookie.py --- a/lib-python/2.7/Cookie.py +++ b/lib-python/2.7/Cookie.py @@ -1,6 +1,3 @@ -#!/usr/bin/env python -# - #### # Copyright 2000 by Timothy O'Malley # diff --git a/lib-python/2.7/HTMLParser.py b/lib-python/2.7/HTMLParser.py --- a/lib-python/2.7/HTMLParser.py +++ b/lib-python/2.7/HTMLParser.py @@ -22,9 +22,12 @@ starttagopen = re.compile('<[a-zA-Z]') piclose = re.compile('>') commentclose = re.compile(r'--\s*>') -tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*') + # see http://www.w3.org/TR/html5/tokenization.html#tag-open-state # and http://www.w3.org/TR/html5/tokenization.html#tag-name-state +# note: if you change tagfind/attrfind remember to update locatestarttagend too +tagfind = re.compile('([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*') +# this regex is currently unused, but left for backward compatibility tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*') attrfind = re.compile( @@ -32,7 +35,7 @@ r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*') locatestarttagend = re.compile(r""" - <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name + <[a-zA-Z][^\t\n\r\f />\x00]* # tag name (?:[\s/]* # optional whitespace before attribute name (?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name (?:\s*=+\s* # value indicator @@ -192,9 +195,9 @@ i = self.updatepos(i, k) continue else: - if ";" in rawdata[i:]: #bail by consuming &# - self.handle_data(rawdata[0:2]) - i = self.updatepos(i, 2) + if ";" in rawdata[i:]: # bail by consuming '&#' + self.handle_data(rawdata[i:i+2]) + i = self.updatepos(i, i+2) break elif startswith('&', i): match = entityref.match(rawdata, i) @@ -373,14 +376,14 @@ self.handle_data(rawdata[i:gtpos]) return gtpos # find the name: w3.org/TR/html5/tokenization.html#tag-name-state - namematch = tagfind_tolerant.match(rawdata, i+2) + namematch = tagfind.match(rawdata, i+2) if not namematch: # w3.org/TR/html5/tokenization.html#end-tag-open-state if rawdata[i:i+3] == '': return i+3 else: return self.parse_bogus_comment(i) - tagname = namematch.group().lower() + tagname = namematch.group(1).lower() # consume and ignore other stuff between the name and the > # Note: this is not 100% correct, since we might have things like # , but looking for > after tha name should cover diff --git a/lib-python/2.7/SimpleHTTPServer.py b/lib-python/2.7/SimpleHTTPServer.py --- a/lib-python/2.7/SimpleHTTPServer.py +++ b/lib-python/2.7/SimpleHTTPServer.py @@ -43,8 +43,10 @@ """Serve a GET request.""" f = self.send_head() if f: - self.copyfile(f, self.wfile) - f.close() + try: + self.copyfile(f, self.wfile) + finally: + f.close() def do_HEAD(self): """Serve a HEAD request.""" @@ -88,13 +90,17 @@ except IOError: self.send_error(404, "File not found") return None - self.send_response(200) - self.send_header("Content-type", ctype) - fs = os.fstat(f.fileno()) - self.send_header("Content-Length", str(fs[6])) - self.send_header("Last-Modified", self.date_time_string(fs.st_mtime)) - self.end_headers() - return f + try: + self.send_response(200) + self.send_header("Content-type", ctype) + fs = os.fstat(f.fileno()) + self.send_header("Content-Length", str(fs[6])) + self.send_header("Last-Modified", self.date_time_string(fs.st_mtime)) + self.end_headers() + return f + except: + f.close() + raise def list_directory(self, path): """Helper to produce a directory listing (absent index.html). diff --git a/lib-python/2.7/SimpleXMLRPCServer.py b/lib-python/2.7/SimpleXMLRPCServer.py --- a/lib-python/2.7/SimpleXMLRPCServer.py +++ b/lib-python/2.7/SimpleXMLRPCServer.py @@ -704,4 +704,5 @@ server = SimpleXMLRPCServer(("localhost", 8000)) server.register_function(pow) server.register_function(lambda x,y: x+y, 'add') + server.register_multicall_functions() server.serve_forever() diff --git a/lib-python/2.7/SocketServer.py b/lib-python/2.7/SocketServer.py --- a/lib-python/2.7/SocketServer.py +++ b/lib-python/2.7/SocketServer.py @@ -513,35 +513,37 @@ def collect_children(self): """Internal routine to wait for children that have exited.""" - if self.active_children is None: return + if self.active_children is None: + return + + # If we're above the max number of children, wait and reap them until + # we go back below threshold. Note that we use waitpid(-1) below to be + # able to collect children in size() syscalls instead + # of size(): the downside is that this might reap children + # which we didn't spawn, which is why we only resort to this when we're + # above max_children. while len(self.active_children) >= self.max_children: - # XXX: This will wait for any child process, not just ones - # spawned by this library. This could confuse other - # libraries that expect to be able to wait for their own - # children. try: - pid, status = os.waitpid(0, 0) - except os.error: - pid = None - if pid not in self.active_children: continue - self.active_children.remove(pid) + pid, _ = os.waitpid(-1, 0) + self.active_children.discard(pid) + except OSError as e: + if e.errno == errno.ECHILD: + # we don't have any children, we're done + self.active_children.clear() + elif e.errno != errno.EINTR: + break - # XXX: This loop runs more system calls than it ought - # to. There should be a way to put the active_children into a - # process group and then use os.waitpid(-pgid) to wait for any - # of that set, but I couldn't find a way to allocate pgids - # that couldn't collide. - for child in self.active_children: + # Now reap all defunct children. + for pid in self.active_children.copy(): try: - pid, status = os.waitpid(child, os.WNOHANG) - except os.error: - pid = None - if not pid: continue - try: - self.active_children.remove(pid) - except ValueError, e: - raise ValueError('%s. x=%d and list=%r' % (e.message, pid, - self.active_children)) + pid, _ = os.waitpid(pid, os.WNOHANG) + # if the child hasn't exited yet, pid will be 0 and ignored by + # discard() below + self.active_children.discard(pid) + except OSError as e: + if e.errno == errno.ECHILD: + # someone else reaped it + self.active_children.discard(pid) def handle_timeout(self): """Wait for zombies after self.timeout seconds of inactivity. @@ -557,8 +559,8 @@ if pid: # Parent process if self.active_children is None: - self.active_children = [] - self.active_children.append(pid) + self.active_children = set() + self.active_children.add(pid) self.close_request(request) #close handle in parent process return else: diff --git a/lib-python/2.7/_MozillaCookieJar.py b/lib-python/2.7/_MozillaCookieJar.py --- a/lib-python/2.7/_MozillaCookieJar.py +++ b/lib-python/2.7/_MozillaCookieJar.py @@ -39,7 +39,7 @@ magic_re = "#( Netscape)? HTTP Cookie File" header = """\ # Netscape HTTP Cookie File -# http://www.netscape.com/newsref/std/cookie_spec.html +# http://curl.haxx.se/rfc/cookie_spec.html # This is a generated file! Do not edit. """ diff --git a/lib-python/2.7/_abcoll.py b/lib-python/2.7/_abcoll.py --- a/lib-python/2.7/_abcoll.py +++ b/lib-python/2.7/_abcoll.py @@ -165,12 +165,17 @@ def __gt__(self, other): if not isinstance(other, Set): return NotImplemented - return other < self + return len(self) > len(other) and self.__ge__(other) def __ge__(self, other): if not isinstance(other, Set): return NotImplemented - return other <= self + if len(self) < len(other): + return False + for elem in other: + if elem not in self: + return False + return True def __eq__(self, other): if not isinstance(other, Set): @@ -194,6 +199,8 @@ return NotImplemented return self._from_iterable(value for value in other if value in self) + __rand__ = __and__ + def isdisjoint(self, other): 'Return True if two sets have a null intersection.' for value in other: @@ -207,6 +214,8 @@ chain = (e for s in (self, other) for e in s) return self._from_iterable(chain) + __ror__ = __or__ + def __sub__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): @@ -215,6 +224,14 @@ return self._from_iterable(value for value in self if value not in other) + def __rsub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in other + if value not in self) + def __xor__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): @@ -222,6 +239,8 @@ other = self._from_iterable(other) return (self - other) | (other - self) + __rxor__ = __xor__ + # Sets are not hashable by default, but subclasses can change this __hash__ = None diff --git a/lib-python/2.7/_osx_support.py b/lib-python/2.7/_osx_support.py --- a/lib-python/2.7/_osx_support.py +++ b/lib-python/2.7/_osx_support.py @@ -182,7 +182,7 @@ # Compiler is GCC, check if it is LLVM-GCC data = _read_output("'%s' --version" % (cc.replace("'", "'\"'\"'"),)) - if 'llvm-gcc' in data: + if data and 'llvm-gcc' in data: # Found LLVM-GCC, fall back to clang cc = _find_build_tool('clang') @@ -450,8 +450,16 @@ # case and disallow installs. cflags = _config_vars.get(_INITPRE+'CFLAGS', _config_vars.get('CFLAGS', '')) - if ((macrelease + '.') >= '10.4.' and - '-arch' in cflags.strip()): + if macrelease: + try: + macrelease = tuple(int(i) for i in macrelease.split('.')[0:2]) + except ValueError: + macrelease = (10, 0) + else: + # assume no universal support + macrelease = (10, 0) + + if (macrelease >= (10, 4)) and '-arch' in cflags.strip(): # The universal build will build fat binaries, but not on # systems before 10.4 diff --git a/lib-python/2.7/_pyio.py b/lib-python/2.7/_pyio.py --- a/lib-python/2.7/_pyio.py +++ b/lib-python/2.7/_pyio.py @@ -192,38 +192,45 @@ (appending and "a" or "") + (updating and "+" or ""), closefd) - line_buffering = False - if buffering == 1 or buffering < 0 and raw.isatty(): - buffering = -1 - line_buffering = True - if buffering < 0: - buffering = DEFAULT_BUFFER_SIZE - try: - bs = os.fstat(raw.fileno()).st_blksize - except (os.error, AttributeError): - pass + result = raw + try: + line_buffering = False + if buffering == 1 or buffering < 0 and raw.isatty(): + buffering = -1 + line_buffering = True + if buffering < 0: + buffering = DEFAULT_BUFFER_SIZE + try: + bs = os.fstat(raw.fileno()).st_blksize + except (os.error, AttributeError): + pass + else: + if bs > 1: + buffering = bs + if buffering < 0: + raise ValueError("invalid buffering size") + if buffering == 0: + if binary: + return result + raise ValueError("can't have unbuffered text I/O") + if updating: + buffer = BufferedRandom(raw, buffering) + elif writing or appending: + buffer = BufferedWriter(raw, buffering) + elif reading: + buffer = BufferedReader(raw, buffering) else: - if bs > 1: - buffering = bs - if buffering < 0: - raise ValueError("invalid buffering size") - if buffering == 0: + raise ValueError("unknown mode: %r" % mode) + result = buffer if binary: - return raw - raise ValueError("can't have unbuffered text I/O") - if updating: - buffer = BufferedRandom(raw, buffering) - elif writing or appending: - buffer = BufferedWriter(raw, buffering) - elif reading: - buffer = BufferedReader(raw, buffering) - else: - raise ValueError("unknown mode: %r" % mode) - if binary: - return buffer - text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering) - text.mode = mode - return text + return result + text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering) + result = text + text.mode = mode + return result + except: + result.close() + raise class DocDescriptor: @@ -1997,7 +2004,13 @@ def getvalue(self): self.flush() - return self.buffer.getvalue().decode(self._encoding, self._errors) + decoder = self._decoder or self._get_decoder() + old_state = decoder.getstate() + decoder.reset() + try: + return decoder.decode(self.buffer.getvalue(), final=True) + finally: + decoder.setstate(old_state) def __repr__(self): # TextIOWrapper tells the encoding in its repr. In StringIO, diff --git a/lib-python/2.7/_weakrefset.py b/lib-python/2.7/_weakrefset.py --- a/lib-python/2.7/_weakrefset.py +++ b/lib-python/2.7/_weakrefset.py @@ -60,6 +60,8 @@ for itemref in self.data: item = itemref() if item is not None: + # Caveat: the iterator will keep a strong reference to + # `item` until it is resumed or closed. yield item def __len__(self): diff --git a/lib-python/2.7/aifc.py b/lib-python/2.7/aifc.py --- a/lib-python/2.7/aifc.py +++ b/lib-python/2.7/aifc.py @@ -778,7 +778,7 @@ def _ensure_header_written(self, datasize): if not self._nframeswritten: - if self._comptype in ('ULAW', 'ALAW'): + if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw'): if not self._sampwidth: self._sampwidth = 2 if self._sampwidth != 2: @@ -844,7 +844,7 @@ if self._datalength & 1: self._datalength = self._datalength + 1 if self._aifc: - if self._comptype in ('ULAW', 'ALAW'): + if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw'): self._datalength = self._datalength // 2 if self._datalength & 1: self._datalength = self._datalength + 1 @@ -852,7 +852,10 @@ self._datalength = (self._datalength + 3) // 4 if self._datalength & 1: self._datalength = self._datalength + 1 - self._form_length_pos = self._file.tell() + try: + self._form_length_pos = self._file.tell() + except (AttributeError, IOError): + self._form_length_pos = None commlength = self._write_form_length(self._datalength) if self._aifc: self._file.write('AIFC') @@ -864,7 +867,8 @@ self._file.write('COMM') _write_ulong(self._file, commlength) _write_short(self._file, self._nchannels) - self._nframes_pos = self._file.tell() + if self._form_length_pos is not None: + self._nframes_pos = self._file.tell() _write_ulong(self._file, self._nframes) if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'): _write_short(self._file, 8) @@ -875,7 +879,8 @@ self._file.write(self._comptype) _write_string(self._file, self._compname) self._file.write('SSND') - self._ssnd_length_pos = self._file.tell() + if self._form_length_pos is not None: + self._ssnd_length_pos = self._file.tell() _write_ulong(self._file, self._datalength + 8) _write_ulong(self._file, 0) _write_ulong(self._file, 0) diff --git a/lib-python/2.7/argparse.py b/lib-python/2.7/argparse.py --- a/lib-python/2.7/argparse.py +++ b/lib-python/2.7/argparse.py @@ -168,6 +168,8 @@ self._prog = prog self._indent_increment = indent_increment self._max_help_position = max_help_position + self._max_help_position = min(max_help_position, + max(width - 20, indent_increment * 2)) self._width = width self._current_indent = 0 @@ -339,7 +341,7 @@ else: line_len = len(indent) - 1 for part in parts: - if line_len + 1 + len(part) > text_width: + if line_len + 1 + len(part) > text_width and line: lines.append(indent + ' '.join(line)) line = [] line_len = len(indent) - 1 @@ -478,7 +480,7 @@ def _format_text(self, text): if '%(prog)' in text: text = text % dict(prog=self._prog) - text_width = self._width - self._current_indent + text_width = max(self._width - self._current_indent, 11) indent = ' ' * self._current_indent return self._fill_text(text, text_width, indent) + '\n\n' @@ -486,7 +488,7 @@ # determine the required width and the entry label help_position = min(self._action_max_length + 2, self._max_help_position) - help_width = self._width - help_position + help_width = max(self._width - help_position, 11) action_width = help_position - self._current_indent - 2 action_header = self._format_action_invocation(action) @@ -1155,9 +1157,13 @@ __hash__ = None def __eq__(self, other): + if not isinstance(other, Namespace): + return NotImplemented return vars(self) == vars(other) def __ne__(self, other): + if not isinstance(other, Namespace): + return NotImplemented return not (self == other) def __contains__(self, key): diff --git a/lib-python/2.7/bsddb/dbshelve.py b/lib-python/2.7/bsddb/dbshelve.py --- a/lib-python/2.7/bsddb/dbshelve.py +++ b/lib-python/2.7/bsddb/dbshelve.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python #------------------------------------------------------------------------ # Copyright (c) 1997-2001 by Total Control Software # All Rights Reserved diff --git a/lib-python/2.7/bsddb/test/test_dbtables.py b/lib-python/2.7/bsddb/test/test_dbtables.py --- a/lib-python/2.7/bsddb/test/test_dbtables.py +++ b/lib-python/2.7/bsddb/test/test_dbtables.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python -# #----------------------------------------------------------------------- # A test suite for the table interface built on bsddb.db #----------------------------------------------------------------------- diff --git a/lib-python/2.7/codecs.py b/lib-python/2.7/codecs.py --- a/lib-python/2.7/codecs.py +++ b/lib-python/2.7/codecs.py @@ -456,15 +456,12 @@ # read until we get the required number of characters (if available) while True: - # can the request can be satisfied from the character buffer? - if chars < 0: - if size < 0: - if self.charbuffer: - break - elif len(self.charbuffer) >= size: + # can the request be satisfied from the character buffer? + if chars >= 0: + if len(self.charbuffer) >= chars: break - else: - if len(self.charbuffer) >= chars: + elif size >= 0: + if len(self.charbuffer) >= size: break # we need more data if size < 0: diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py --- a/lib-python/2.7/collections.py +++ b/lib-python/2.7/collections.py @@ -319,6 +319,7 @@ if isinstance(field_names, basestring): field_names = field_names.replace(',', ' ').split() field_names = map(str, field_names) + typename = str(typename) if rename: seen = set() for index, name in enumerate(field_names): @@ -331,6 +332,8 @@ field_names[index] = '_%d' % index seen.add(name) for name in [typename] + field_names: + if type(name) != str: + raise TypeError('Type names and field names must be strings') if not all(c.isalnum() or c=='_' for c in name): raise ValueError('Type names and field names can only contain ' 'alphanumeric characters and underscores: %r' % name) diff --git a/lib-python/2.7/csv.py b/lib-python/2.7/csv.py --- a/lib-python/2.7/csv.py +++ b/lib-python/2.7/csv.py @@ -93,6 +93,10 @@ self.line_num = self.reader.line_num return self._fieldnames + # Issue 20004: Because DictReader is a classic class, this setter is + # ignored. At this point in 2.7's lifecycle, it is too late to change the + # base class for fear of breaking working code. If you want to change + # fieldnames without overwriting the getter, set _fieldnames directly. @fieldnames.setter def fieldnames(self, value): self._fieldnames = value @@ -140,8 +144,8 @@ if self.extrasaction == "raise": wrong_fields = [k for k in rowdict if k not in self.fieldnames] if wrong_fields: - raise ValueError("dict contains fields not in fieldnames: " + - ", ".join(wrong_fields)) + raise ValueError("dict contains fields not in fieldnames: " + + ", ".join([repr(x) for x in wrong_fields])) return [rowdict.get(key, self.restval) for key in self.fieldnames] def writerow(self, rowdict): diff --git a/lib-python/2.7/ctypes/test/__init__.py b/lib-python/2.7/ctypes/test/__init__.py --- a/lib-python/2.7/ctypes/test/__init__.py +++ b/lib-python/2.7/ctypes/test/__init__.py @@ -2,7 +2,15 @@ use_resources = [] -class ResourceDenied(Exception): +import ctypes +ctypes_symbols = dir(ctypes) + +def need_symbol(name): + return unittest.skipUnless(name in ctypes_symbols, + '{!r} is required'.format(name)) + + +class ResourceDenied(unittest.SkipTest): """Test skipped because it requested a disallowed resource. This is raised when a test calls requires() for a resource that diff --git a/lib-python/2.7/ctypes/test/test_arrays.py b/lib-python/2.7/ctypes/test/test_arrays.py --- a/lib-python/2.7/ctypes/test/test_arrays.py +++ b/lib-python/2.7/ctypes/test/test_arrays.py @@ -2,6 +2,8 @@ from ctypes import * from test.test_support import impl_detail +from ctypes.test import need_symbol + formats = "bBhHiIlLqQfd" # c_longdouble commented out for PyPy, look at the commend in test_longdouble @@ -98,8 +100,8 @@ self.assertEqual(values, [1, 2, 3, 4, 5]) def test_classcache(self): - self.assertTrue(not ARRAY(c_int, 3) is ARRAY(c_int, 4)) - self.assertTrue(ARRAY(c_int, 3) is ARRAY(c_int, 3)) + self.assertIsNot(ARRAY(c_int, 3), ARRAY(c_int, 4)) + self.assertIs(ARRAY(c_int, 3), ARRAY(c_int, 3)) def test_from_address(self): # Failed with 0.9.8, reported by JUrner @@ -112,20 +114,16 @@ self.assertEqual(sz[1:4:2], "o") self.assertEqual(sz.value, "foo") - try: - create_unicode_buffer - except NameError: - pass - else: - def test_from_addressW(self): - p = create_unicode_buffer("foo") - sz = (c_wchar * 3).from_address(addressof(p)) - self.assertEqual(sz[:], "foo") - self.assertEqual(sz[::], "foo") - self.assertEqual(sz[::-1], "oof") - self.assertEqual(sz[::3], "f") - self.assertEqual(sz[1:4:2], "o") - self.assertEqual(sz.value, "foo") + @need_symbol('create_unicode_buffer') + def test_from_addressW(self): + p = create_unicode_buffer("foo") + sz = (c_wchar * 3).from_address(addressof(p)) + self.assertEqual(sz[:], "foo") + self.assertEqual(sz[::], "foo") + self.assertEqual(sz[::-1], "oof") + self.assertEqual(sz[::3], "f") + self.assertEqual(sz[1:4:2], "o") + self.assertEqual(sz.value, "foo") def test_cache(self): # Array types are cached internally in the _ctypes extension, @@ -139,7 +137,7 @@ # Create a new array type based on it: t1 = my_int * 1 t2 = my_int * 1 - self.assertTrue(t1 is t2) + self.assertIs(t1, t2) if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_as_parameter.py b/lib-python/2.7/ctypes/test/test_as_parameter.py --- a/lib-python/2.7/ctypes/test/test_as_parameter.py +++ b/lib-python/2.7/ctypes/test/test_as_parameter.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import need_symbol import _ctypes_test dll = CDLL(_ctypes_test.__file__) @@ -17,11 +18,8 @@ def wrap(self, param): return param + @need_symbol('c_wchar') def test_wchar_parm(self): - try: - c_wchar - except NameError: - return f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double] result = f(self.wrap(1), self.wrap(u"x"), self.wrap(3), self.wrap(4), self.wrap(5.0), self.wrap(6.0)) @@ -134,7 +132,7 @@ f.argtypes = [c_longlong, MyCallback] def callback(value): - self.assertTrue(isinstance(value, (int, long))) + self.assertIsInstance(value, (int, long)) return value & 0x7FFFFFFF cb = MyCallback(callback) diff --git a/lib-python/2.7/ctypes/test/test_bitfields.py b/lib-python/2.7/ctypes/test/test_bitfields.py --- a/lib-python/2.7/ctypes/test/test_bitfields.py +++ b/lib-python/2.7/ctypes/test/test_bitfields.py @@ -1,4 +1,5 @@ from ctypes import * +from ctypes.test import need_symbol import unittest import os @@ -131,15 +132,6 @@ self.assertEqual(result[0], TypeError) self.assertIn('bit fields not allowed for type', result[1]) - try: - c_wchar - except NameError: - pass - else: - result = self.fail_fields(("a", c_wchar, 1)) - self.assertEqual(result[0], TypeError) - self.assertIn('bit fields not allowed for type', result[1]) - class Dummy(Structure): _fields_ = [] @@ -147,6 +139,12 @@ self.assertEqual(result[0], TypeError) self.assertIn('bit fields not allowed for type', result[1]) + @need_symbol('c_wchar') + def test_c_wchar(self): + result = self.fail_fields(("a", c_wchar, 1)) + self.assertEqual(result, + (TypeError, 'bit fields not allowed for type c_wchar')) + def test_single_bitfield_size(self): for c_typ in int_types: result = self.fail_fields(("a", c_typ, -1)) @@ -213,7 +211,7 @@ class X(Structure): _fields_ = [("a", c_byte, 4), ("b", c_int, 32)] - self.assertEqual(sizeof(X), sizeof(c_int)*2) + self.assertEqual(sizeof(X), alignment(c_int)+sizeof(c_int)) def test_mixed_3(self): class X(Structure): @@ -246,7 +244,7 @@ _anonymous_ = ["_"] _fields_ = [("_", X)] - @unittest.skipUnless(hasattr(ctypes, "c_uint32"), "c_int32 is required") + @need_symbol('c_uint32') def test_uint32(self): class X(Structure): _fields_ = [("a", c_uint32, 32)] @@ -256,7 +254,7 @@ x.a = 0xFDCBA987 self.assertEqual(x.a, 0xFDCBA987) - @unittest.skipUnless(hasattr(ctypes, "c_uint64"), "c_int64 is required") + @need_symbol('c_uint64') def test_uint64(self): class X(Structure): _fields_ = [("a", c_uint64, 64)] diff --git a/lib-python/2.7/ctypes/test/test_buffers.py b/lib-python/2.7/ctypes/test/test_buffers.py --- a/lib-python/2.7/ctypes/test/test_buffers.py +++ b/lib-python/2.7/ctypes/test/test_buffers.py @@ -1,4 +1,5 @@ from ctypes import * +from ctypes.test import need_symbol import unittest class StringBufferTestCase(unittest.TestCase): @@ -7,12 +8,12 @@ b = create_string_buffer(32) self.assertEqual(len(b), 32) self.assertEqual(sizeof(b), 32 * sizeof(c_char)) - self.assertTrue(type(b[0]) is str) + self.assertIs(type(b[0]), str) b = create_string_buffer("abc") self.assertEqual(len(b), 4) # trailing nul char self.assertEqual(sizeof(b), 4 * sizeof(c_char)) - self.assertTrue(type(b[0]) is str) + self.assertIs(type(b[0]), str) self.assertEqual(b[0], "a") self.assertEqual(b[:], "abc\0") self.assertEqual(b[::], "abc\0") @@ -36,39 +37,36 @@ self.assertEqual(b[::2], "ac") self.assertEqual(b[::5], "a") - try: - c_wchar - except NameError: - pass - else: - def test_unicode_buffer(self): - b = create_unicode_buffer(32) - self.assertEqual(len(b), 32) - self.assertEqual(sizeof(b), 32 * sizeof(c_wchar)) - self.assertTrue(type(b[0]) is unicode) + @need_symbol('c_wchar') + def test_unicode_buffer(self): + b = create_unicode_buffer(32) + self.assertEqual(len(b), 32) + self.assertEqual(sizeof(b), 32 * sizeof(c_wchar)) + self.assertIs(type(b[0]), unicode) - b = create_unicode_buffer(u"abc") - self.assertEqual(len(b), 4) # trailing nul char - self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) - self.assertTrue(type(b[0]) is unicode) - self.assertEqual(b[0], u"a") - self.assertEqual(b[:], "abc\0") - self.assertEqual(b[::], "abc\0") - self.assertEqual(b[::-1], "\0cba") - self.assertEqual(b[::2], "ac") - self.assertEqual(b[::5], "a") + b = create_unicode_buffer(u"abc") + self.assertEqual(len(b), 4) # trailing nul char + self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) + self.assertIs(type(b[0]), unicode) + self.assertEqual(b[0], u"a") + self.assertEqual(b[:], "abc\0") + self.assertEqual(b[::], "abc\0") + self.assertEqual(b[::-1], "\0cba") + self.assertEqual(b[::2], "ac") + self.assertEqual(b[::5], "a") - def test_unicode_conversion(self): - b = create_unicode_buffer("abc") - self.assertEqual(len(b), 4) # trailing nul char - self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) - self.assertTrue(type(b[0]) is unicode) - self.assertEqual(b[0], u"a") - self.assertEqual(b[:], "abc\0") - self.assertEqual(b[::], "abc\0") - self.assertEqual(b[::-1], "\0cba") - self.assertEqual(b[::2], "ac") - self.assertEqual(b[::5], "a") + @need_symbol('c_wchar') + def test_unicode_conversion(self): + b = create_unicode_buffer("abc") + self.assertEqual(len(b), 4) # trailing nul char + self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) + self.assertIs(type(b[0]), unicode) + self.assertEqual(b[0], u"a") + self.assertEqual(b[:], "abc\0") + self.assertEqual(b[::], "abc\0") + self.assertEqual(b[::-1], "\0cba") + self.assertEqual(b[::2], "ac") + self.assertEqual(b[::5], "a") if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_byteswap.py b/lib-python/2.7/ctypes/test/test_byteswap.py --- a/lib-python/2.7/ctypes/test/test_byteswap.py +++ b/lib-python/2.7/ctypes/test/test_byteswap.py @@ -15,7 +15,8 @@ # For Structures and Unions, these types are created on demand. class Test(unittest.TestCase): - def X_test(self): + @unittest.skip('test disabled') + def test_X(self): print >> sys.stderr, sys.byteorder for i in range(32): bits = BITS() @@ -25,11 +26,11 @@ @xfail def test_endian_short(self): if sys.byteorder == "little": - self.assertTrue(c_short.__ctype_le__ is c_short) - self.assertTrue(c_short.__ctype_be__.__ctype_le__ is c_short) + self.assertIs(c_short.__ctype_le__, c_short) + self.assertIs(c_short.__ctype_be__.__ctype_le__, c_short) else: - self.assertTrue(c_short.__ctype_be__ is c_short) - self.assertTrue(c_short.__ctype_le__.__ctype_be__ is c_short) + self.assertIs(c_short.__ctype_be__, c_short) + self.assertIs(c_short.__ctype_le__.__ctype_be__, c_short) s = c_short.__ctype_be__(0x1234) self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234") self.assertEqual(bin(s), "1234") @@ -53,11 +54,11 @@ @xfail def test_endian_int(self): if sys.byteorder == "little": - self.assertTrue(c_int.__ctype_le__ is c_int) - self.assertTrue(c_int.__ctype_be__.__ctype_le__ is c_int) + self.assertIs(c_int.__ctype_le__, c_int) + self.assertIs(c_int.__ctype_be__.__ctype_le__, c_int) else: - self.assertTrue(c_int.__ctype_be__ is c_int) - self.assertTrue(c_int.__ctype_le__.__ctype_be__ is c_int) + self.assertIs(c_int.__ctype_be__, c_int) + self.assertIs(c_int.__ctype_le__.__ctype_be__, c_int) s = c_int.__ctype_be__(0x12345678) self.assertEqual(bin(struct.pack(">i", 0x12345678)), "12345678") @@ -82,11 +83,11 @@ @xfail def test_endian_longlong(self): if sys.byteorder == "little": - self.assertTrue(c_longlong.__ctype_le__ is c_longlong) - self.assertTrue(c_longlong.__ctype_be__.__ctype_le__ is c_longlong) + self.assertIs(c_longlong.__ctype_le__, c_longlong) + self.assertIs(c_longlong.__ctype_be__.__ctype_le__, c_longlong) else: - self.assertTrue(c_longlong.__ctype_be__ is c_longlong) - self.assertTrue(c_longlong.__ctype_le__.__ctype_be__ is c_longlong) + self.assertIs(c_longlong.__ctype_be__, c_longlong) + self.assertIs(c_longlong.__ctype_le__.__ctype_be__, c_longlong) s = c_longlong.__ctype_be__(0x1234567890ABCDEF) self.assertEqual(bin(struct.pack(">q", 0x1234567890ABCDEF)), "1234567890ABCDEF") @@ -111,11 +112,11 @@ @xfail def test_endian_float(self): if sys.byteorder == "little": - self.assertTrue(c_float.__ctype_le__ is c_float) - self.assertTrue(c_float.__ctype_be__.__ctype_le__ is c_float) + self.assertIs(c_float.__ctype_le__, c_float) + self.assertIs(c_float.__ctype_be__.__ctype_le__, c_float) else: - self.assertTrue(c_float.__ctype_be__ is c_float) - self.assertTrue(c_float.__ctype_le__.__ctype_be__ is c_float) + self.assertIs(c_float.__ctype_be__, c_float) + self.assertIs(c_float.__ctype_le__.__ctype_be__, c_float) s = c_float(math.pi) self.assertEqual(bin(struct.pack("f", math.pi)), bin(s)) # Hm, what's the precision of a float compared to a double? @@ -130,11 +131,11 @@ @xfail def test_endian_double(self): if sys.byteorder == "little": - self.assertTrue(c_double.__ctype_le__ is c_double) - self.assertTrue(c_double.__ctype_be__.__ctype_le__ is c_double) + self.assertIs(c_double.__ctype_le__, c_double) + self.assertIs(c_double.__ctype_be__.__ctype_le__, c_double) else: - self.assertTrue(c_double.__ctype_be__ is c_double) - self.assertTrue(c_double.__ctype_le__.__ctype_be__ is c_double) + self.assertIs(c_double.__ctype_be__, c_double) + self.assertIs(c_double.__ctype_le__.__ctype_be__, c_double) s = c_double(math.pi) self.assertEqual(s.value, math.pi) self.assertEqual(bin(struct.pack("d", math.pi)), bin(s)) @@ -146,14 +147,14 @@ self.assertEqual(bin(struct.pack(">d", math.pi)), bin(s)) def test_endian_other(self): - self.assertTrue(c_byte.__ctype_le__ is c_byte) - self.assertTrue(c_byte.__ctype_be__ is c_byte) + self.assertIs(c_byte.__ctype_le__, c_byte) + self.assertIs(c_byte.__ctype_be__, c_byte) - self.assertTrue(c_ubyte.__ctype_le__ is c_ubyte) - self.assertTrue(c_ubyte.__ctype_be__ is c_ubyte) + self.assertIs(c_ubyte.__ctype_le__, c_ubyte) + self.assertIs(c_ubyte.__ctype_be__, c_ubyte) - self.assertTrue(c_char.__ctype_le__ is c_char) - self.assertTrue(c_char.__ctype_be__ is c_char) + self.assertIs(c_char.__ctype_le__, c_char) + self.assertIs(c_char.__ctype_be__, c_char) @xfail def test_struct_fields_1(self): diff --git a/lib-python/2.7/ctypes/test/test_callbacks.py b/lib-python/2.7/ctypes/test/test_callbacks.py --- a/lib-python/2.7/ctypes/test/test_callbacks.py +++ b/lib-python/2.7/ctypes/test/test_callbacks.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import need_symbol from ctypes.test import xfail import _ctypes_test @@ -95,9 +96,10 @@ # disabled: would now (correctly) raise a RuntimeWarning about # a memory leak. A callback function cannot return a non-integral # C type without causing a memory leak. -## def test_char_p(self): -## self.check_type(c_char_p, "abc") -## self.check_type(c_char_p, "def") + @unittest.skip('test disabled') + def test_char_p(self): + self.check_type(c_char_p, "abc") + self.check_type(c_char_p, "def") @xfail def test_pyobject(self): @@ -150,13 +152,12 @@ CFUNCTYPE(None)(lambda x=Nasty(): None) -try: - WINFUNCTYPE -except NameError: - pass -else: - class StdcallCallbacks(Callbacks): + at need_symbol('WINFUNCTYPE') +class StdcallCallbacks(Callbacks): + try: functype = WINFUNCTYPE + except NameError: + pass ################################################################ @@ -186,7 +187,7 @@ from ctypes.util import find_library libc_path = find_library("c") if not libc_path: - return # cannot test + self.skipTest('could not find libc') libc = CDLL(libc_path) @CFUNCTYPE(c_int, POINTER(c_int), POINTER(c_int)) @@ -198,23 +199,19 @@ libc.qsort(array, len(array), sizeof(c_int), cmp_func) self.assertEqual(array[:], [1, 5, 7, 33, 99]) - try: - WINFUNCTYPE - except NameError: - pass - else: - def test_issue_8959_b(self): - from ctypes.wintypes import BOOL, HWND, LPARAM + @need_symbol('WINFUNCTYPE') + def test_issue_8959_b(self): + from ctypes.wintypes import BOOL, HWND, LPARAM + global windowCount + windowCount = 0 + + @WINFUNCTYPE(BOOL, HWND, LPARAM) + def EnumWindowsCallbackFunc(hwnd, lParam): global windowCount - windowCount = 0 + windowCount += 1 + return True #Allow windows to keep enumerating - @WINFUNCTYPE(BOOL, HWND, LPARAM) - def EnumWindowsCallbackFunc(hwnd, lParam): - global windowCount - windowCount += 1 - return True #Allow windows to keep enumerating - - windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0) + windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0) def test_callback_register_int(self): # Issue #8275: buggy handling of callback args under Win64 diff --git a/lib-python/2.7/ctypes/test/test_cast.py b/lib-python/2.7/ctypes/test/test_cast.py --- a/lib-python/2.7/ctypes/test/test_cast.py +++ b/lib-python/2.7/ctypes/test/test_cast.py @@ -1,4 +1,5 @@ from ctypes import * +from ctypes.test import need_symbol import unittest import sys @@ -38,14 +39,14 @@ p = cast(array, POINTER(c_char_p)) # array and p share a common _objects attribute - self.assertTrue(p._objects is array._objects) + self.assertIs(p._objects, array._objects) self.assertEqual(array._objects, {'0': "foo bar", id(array): array}) p[0] = "spam spam" self.assertEqual(p._objects, {'0': "spam spam", id(array): array}) - self.assertTrue(array._objects is p._objects) + self.assertIs(array._objects, p._objects) p[1] = "foo bar" self.assertEqual(p._objects, {'1': 'foo bar', '0': "spam spam", id(array): array}) - self.assertTrue(array._objects is p._objects) + self.assertIs(array._objects, p._objects) def test_other(self): p = cast((c_int * 4)(1, 2, 3, 4), POINTER(c_int)) @@ -75,15 +76,11 @@ self.assertEqual(cast(cast(s, c_void_p), c_char_p).value, "hiho") - try: - c_wchar_p - except NameError: - pass - else: - def test_wchar_p(self): - s = c_wchar_p("hiho") - self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value, - "hiho") + @need_symbol('c_wchar_p') + def test_wchar_p(self): + s = c_wchar_p("hiho") + self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value, + "hiho") if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_cfuncs.py b/lib-python/2.7/ctypes/test/test_cfuncs.py --- a/lib-python/2.7/ctypes/test/test_cfuncs.py +++ b/lib-python/2.7/ctypes/test/test_cfuncs.py @@ -3,6 +3,7 @@ import unittest from ctypes import * +from ctypes.test import need_symbol import _ctypes_test from test.test_support import impl_detail @@ -196,7 +197,7 @@ try: WinDLL except NameError: - pass + def stdcall_dll(*_): pass else: class stdcall_dll(WinDLL): def __getattr__(self, name): @@ -206,9 +207,9 @@ setattr(self, name, func) return func - class stdcallCFunctions(CFunctions): - _dll = stdcall_dll(_ctypes_test.__file__) - pass + at need_symbol('WinDLL') +class stdcallCFunctions(CFunctions): + _dll = stdcall_dll(_ctypes_test.__file__) if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_checkretval.py b/lib-python/2.7/ctypes/test/test_checkretval.py --- a/lib-python/2.7/ctypes/test/test_checkretval.py +++ b/lib-python/2.7/ctypes/test/test_checkretval.py @@ -1,6 +1,7 @@ import unittest from ctypes import * +from ctypes.test import need_symbol class CHECKED(c_int): def _check_retval_(value): @@ -25,15 +26,11 @@ del dll._testfunc_p_p.restype self.assertEqual(42, dll._testfunc_p_p(42)) - try: - oledll - except NameError: - pass - else: - def test_oledll(self): - self.assertRaises(WindowsError, - oledll.oleaut32.CreateTypeLib2, - 0, None, None) + @need_symbol('oledll') + def test_oledll(self): + self.assertRaises(WindowsError, + oledll.oleaut32.CreateTypeLib2, + 0, None, None) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_errcheck.py b/lib-python/2.7/ctypes/test/test_errcheck.py deleted file mode 100644 --- a/lib-python/2.7/ctypes/test/test_errcheck.py +++ /dev/null @@ -1,19 +0,0 @@ -import sys -from ctypes import * - -##class HMODULE(Structure): -## _fields_ = [("value", c_void_p)] - -## def __repr__(self): -## return "" % self.value - -##windll.kernel32.GetModuleHandleA.restype = HMODULE - -##print windll.kernel32.GetModuleHandleA("python23.dll") -##print hex(sys.dllhandle) - -##def nonzero(handle): -## return (GetLastError(), handle) - -##windll.kernel32.GetModuleHandleA.errcheck = nonzero -##print windll.kernel32.GetModuleHandleA("spam") diff --git a/lib-python/2.7/ctypes/test/test_find.py b/lib-python/2.7/ctypes/test/test_find.py --- a/lib-python/2.7/ctypes/test/test_find.py +++ b/lib-python/2.7/ctypes/test/test_find.py @@ -1,4 +1,5 @@ import unittest +import os import sys from ctypes import * from ctypes.util import find_library @@ -40,43 +41,43 @@ except OSError: pass - if lib_gl: - def test_gl(self): - if self.gl: - self.gl.glClearIndex + @unittest.skipUnless(lib_gl, 'lib_gl not available') + def test_gl(self): + if self.gl: + self.gl.glClearIndex - if lib_glu: - def test_glu(self): - if self.glu: - self.glu.gluBeginCurve + @unittest.skipUnless(lib_glu, 'lib_glu not available') + def test_glu(self): + if self.glu: + self.glu.gluBeginCurve - if lib_gle: - def test_gle(self): - if self.gle: - self.gle.gleGetJoinStyle + @unittest.skipUnless(lib_gle, 'lib_gle not available') + def test_gle(self): + if self.gle: + self.gle.gleGetJoinStyle -##if os.name == "posix" and sys.platform != "darwin": - -## # On platforms where the default shared library suffix is '.so', -## # at least some libraries can be loaded as attributes of the cdll -## # object, since ctypes now tries loading the lib again -## # with '.so' appended of the first try fails. -## # -## # Won't work for libc, unfortunately. OTOH, it isn't -## # needed for libc since this is already mapped into the current -## # process (?) -## # -## # On MAC OSX, it won't work either, because dlopen() needs a full path, -## # and the default suffix is either none or '.dylib'. - -## class LoadLibs(unittest.TestCase): -## def test_libm(self): -## import math -## libm = cdll.libm -## sqrt = libm.sqrt -## sqrt.argtypes = (c_double,) -## sqrt.restype = c_double -## self.assertEqual(sqrt(2), math.sqrt(2)) +# On platforms where the default shared library suffix is '.so', +# at least some libraries can be loaded as attributes of the cdll +# object, since ctypes now tries loading the lib again +# with '.so' appended of the first try fails. +# +# Won't work for libc, unfortunately. OTOH, it isn't +# needed for libc since this is already mapped into the current +# process (?) +# +# On MAC OSX, it won't work either, because dlopen() needs a full path, +# and the default suffix is either none or '.dylib'. + at unittest.skip('test disabled') + at unittest.skipUnless(os.name=="posix" and sys.platform != "darwin", + 'test not suitable for this platform') +class LoadLibs(unittest.TestCase): + def test_libm(self): + import math + libm = cdll.libm + sqrt = libm.sqrt + sqrt.argtypes = (c_double,) + sqrt.restype = c_double + self.assertEqual(sqrt(2), math.sqrt(2)) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_frombuffer.py b/lib-python/2.7/ctypes/test/test_frombuffer.py --- a/lib-python/2.7/ctypes/test/test_frombuffer.py +++ b/lib-python/2.7/ctypes/test/test_frombuffer.py @@ -25,7 +25,7 @@ a[0], a[-1] = 200, -200 self.assertEqual(x[:], a.tolist()) - self.assertTrue(a in x._objects.values()) + self.assertIn(a, x._objects.values()) self.assertRaises(ValueError, c_int.from_buffer, a, -1) diff --git a/lib-python/2.7/ctypes/test/test_funcptr.py b/lib-python/2.7/ctypes/test/test_funcptr.py --- a/lib-python/2.7/ctypes/test/test_funcptr.py +++ b/lib-python/2.7/ctypes/test/test_funcptr.py @@ -75,7 +75,7 @@ ## "lpfnWndProc", WNDPROC_2(wndproc)) # instead: - self.assertTrue(WNDPROC is WNDPROC_2) + self.assertIs(WNDPROC, WNDPROC_2) # 'wndclass.lpfnWndProc' leaks 94 references. Why? self.assertEqual(wndclass.lpfnWndProc(1, 2, 3, 4), 10) diff --git a/lib-python/2.7/ctypes/test/test_functions.py b/lib-python/2.7/ctypes/test/test_functions.py --- a/lib-python/2.7/ctypes/test/test_functions.py +++ b/lib-python/2.7/ctypes/test/test_functions.py @@ -6,6 +6,7 @@ """ from ctypes import * +from ctypes.test import need_symbol import sys, unittest from ctypes.test import xfail from test.test_support import impl_detail @@ -65,22 +66,16 @@ pass + @need_symbol('c_wchar') def test_wchar_parm(self): - try: - c_wchar - except NameError: - return f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double] result = f(1, u"x", 3, 4, 5.0, 6.0) self.assertEqual(result, 139) self.assertEqual(type(result), int) + @need_symbol('c_wchar') def test_wchar_result(self): - try: - c_wchar - except NameError: - return f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double] f.restype = c_wchar @@ -158,11 +153,8 @@ self.assertEqual(result, -21) self.assertEqual(type(result), float) + @need_symbol('c_longlong') def test_longlongresult(self): - try: - c_longlong - except NameError: - return f = dll._testfunc_q_bhilfd f.restype = c_longlong f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double] @@ -299,6 +291,7 @@ result = f(-10, cb) self.assertEqual(result, -18) + @need_symbol('c_longlong') def test_longlong_callbacks(self): f = dll._testfunc_callback_q_qf @@ -309,7 +302,7 @@ f.argtypes = [c_longlong, MyCallback] def callback(value): - self.assertTrue(isinstance(value, (int, long))) + self.assertIsInstance(value, (int, long)) return value & 0x7FFFFFFF cb = MyCallback(callback) @@ -351,16 +344,16 @@ s2h = dll.ret_2h_func(inp) self.assertEqual((s2h.x, s2h.y), (99*2, 88*3)) - if sys.platform == "win32": - def test_struct_return_2H_stdcall(self): - class S2H(Structure): - _fields_ = [("x", c_short), - ("y", c_short)] + @unittest.skipUnless(sys.platform == "win32", 'Windows-specific test') + def test_struct_return_2H_stdcall(self): + class S2H(Structure): + _fields_ = [("x", c_short), + ("y", c_short)] - windll.s_ret_2h_func.restype = S2H - windll.s_ret_2h_func.argtypes = [S2H] - s2h = windll.s_ret_2h_func(S2H(99, 88)) - self.assertEqual((s2h.x, s2h.y), (99*2, 88*3)) + windll.s_ret_2h_func.restype = S2H + windll.s_ret_2h_func.argtypes = [S2H] + s2h = windll.s_ret_2h_func(S2H(99, 88)) + self.assertEqual((s2h.x, s2h.y), (99*2, 88*3)) def test_struct_return_8H(self): class S8I(Structure): @@ -379,23 +372,24 @@ self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h), (9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9)) - if sys.platform == "win32": - def test_struct_return_8H_stdcall(self): - class S8I(Structure): - _fields_ = [("a", c_int), - ("b", c_int), - ("c", c_int), - ("d", c_int), - ("e", c_int), - ("f", c_int), - ("g", c_int), - ("h", c_int)] - windll.s_ret_8i_func.restype = S8I - windll.s_ret_8i_func.argtypes = [S8I] - inp = S8I(9, 8, 7, 6, 5, 4, 3, 2) - s8i = windll.s_ret_8i_func(inp) - self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h), - (9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9)) + @unittest.skipUnless(sys.platform == "win32", 'Windows-specific test') + def test_struct_return_8H_stdcall(self): + class S8I(Structure): + _fields_ = [("a", c_int), + ("b", c_int), + ("c", c_int), + ("d", c_int), + ("e", c_int), + ("f", c_int), + ("g", c_int), + ("h", c_int)] + windll.s_ret_8i_func.restype = S8I + windll.s_ret_8i_func.argtypes = [S8I] + inp = S8I(9, 8, 7, 6, 5, 4, 3, 2) + s8i = windll.s_ret_8i_func(inp) + self.assertEqual( + (s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h), + (9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9)) @xfail def test_sf1651235(self): diff --git a/lib-python/2.7/ctypes/test/test_integers.py b/lib-python/2.7/ctypes/test/test_integers.py deleted file mode 100644 --- a/lib-python/2.7/ctypes/test/test_integers.py +++ /dev/null @@ -1,5 +0,0 @@ -# superseded by test_numbers.py -import unittest - -if __name__ == '__main__': - unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_keeprefs.py b/lib-python/2.7/ctypes/test/test_keeprefs.py --- a/lib-python/2.7/ctypes/test/test_keeprefs.py +++ b/lib-python/2.7/ctypes/test/test_keeprefs.py @@ -94,7 +94,8 @@ self.assertEqual(x._objects, {'1': i}) class DeletePointerTestCase(unittest.TestCase): - def X_test(self): + @unittest.skip('test disabled') + def test_X(self): class X(Structure): _fields_ = [("p", POINTER(c_char_p))] x = X() diff --git a/lib-python/2.7/ctypes/test/test_loading.py b/lib-python/2.7/ctypes/test/test_loading.py --- a/lib-python/2.7/ctypes/test/test_loading.py +++ b/lib-python/2.7/ctypes/test/test_loading.py @@ -21,18 +21,21 @@ unknowndll = "xxrandomnamexx" - if libc_name is not None: - def test_load(self): - CDLL(libc_name) - CDLL(os.path.basename(libc_name)) - self.assertRaises(OSError, CDLL, self.unknowndll) + @unittest.skipUnless(libc_name is not None, 'could not find libc') + def test_load(self): + CDLL(libc_name) + CDLL(os.path.basename(libc_name)) + self.assertRaises(OSError, CDLL, self.unknowndll) - if libc_name is not None and os.path.basename(libc_name) == "libc.so.6": - def test_load_version(self): - cdll.LoadLibrary("libc.so.6") - # linux uses version, libc 9 should not exist - self.assertRaises(OSError, cdll.LoadLibrary, "libc.so.9") - self.assertRaises(OSError, cdll.LoadLibrary, self.unknowndll) + @unittest.skipUnless(libc_name is not None, 'could not find libc') + @unittest.skipUnless(libc_name is not None and + os.path.basename(libc_name) == "libc.so.6", + 'wrong libc path for test') + def test_load_version(self): + cdll.LoadLibrary("libc.so.6") + # linux uses version, libc 9 should not exist + self.assertRaises(OSError, cdll.LoadLibrary, "libc.so.9") + self.assertRaises(OSError, cdll.LoadLibrary, self.unknowndll) def test_find(self): for name in ("c", "m"): @@ -41,68 +44,73 @@ cdll.LoadLibrary(lib) CDLL(lib) - if os.name in ("nt", "ce"): - def test_load_library(self): - self.assertFalse(libc_name is None) - if is_resource_enabled("printing"): - print find_library("kernel32") - print find_library("user32") + @unittest.skipUnless(os.name in ("nt", "ce"), + 'test specific to Windows (NT/CE)') + def test_load_library(self): + self.assertIsNotNone(libc_name) + if is_resource_enabled("printing"): + print find_library("kernel32") + print find_library("user32") - if os.name == "nt": - windll.kernel32.GetModuleHandleW - windll["kernel32"].GetModuleHandleW - windll.LoadLibrary("kernel32").GetModuleHandleW - WinDLL("kernel32").GetModuleHandleW - elif os.name == "ce": - windll.coredll.GetModuleHandleW - windll["coredll"].GetModuleHandleW - windll.LoadLibrary("coredll").GetModuleHandleW - WinDLL("coredll").GetModuleHandleW + if os.name == "nt": + windll.kernel32.GetModuleHandleW + windll["kernel32"].GetModuleHandleW + windll.LoadLibrary("kernel32").GetModuleHandleW + WinDLL("kernel32").GetModuleHandleW + elif os.name == "ce": + windll.coredll.GetModuleHandleW + windll["coredll"].GetModuleHandleW + windll.LoadLibrary("coredll").GetModuleHandleW + WinDLL("coredll").GetModuleHandleW - def test_load_ordinal_functions(self): - import _ctypes_test - dll = WinDLL(_ctypes_test.__file__) - # We load the same function both via ordinal and name - func_ord = dll[2] - func_name = dll.GetString - # addressof gets the address where the function pointer is stored - a_ord = addressof(func_ord) - a_name = addressof(func_name) - f_ord_addr = c_void_p.from_address(a_ord).value - f_name_addr = c_void_p.from_address(a_name).value - self.assertEqual(hex(f_ord_addr), hex(f_name_addr)) + @unittest.skipUnless(os.name in ("nt", "ce"), + 'test specific to Windows (NT/CE)') + def test_load_ordinal_functions(self): + import _ctypes_test + dll = WinDLL(_ctypes_test.__file__) + # We load the same function both via ordinal and name + func_ord = dll[2] + func_name = dll.GetString + # addressof gets the address where the function pointer is stored + a_ord = addressof(func_ord) + a_name = addressof(func_name) + f_ord_addr = c_void_p.from_address(a_ord).value + f_name_addr = c_void_p.from_address(a_name).value + self.assertEqual(hex(f_ord_addr), hex(f_name_addr)) - self.assertRaises(AttributeError, dll.__getitem__, 1234) + self.assertRaises(AttributeError, dll.__getitem__, 1234) - if os.name == "nt": - @xfail - def test_1703286_A(self): - from _ctypes import LoadLibrary, FreeLibrary - # On winXP 64-bit, advapi32 loads at an address that does - # NOT fit into a 32-bit integer. FreeLibrary must be able - # to accept this address. + @xfail + @unittest.skipUnless(os.name == "nt", 'Windows-specific test') + def test_1703286_A(self): + from _ctypes import LoadLibrary, FreeLibrary + # On winXP 64-bit, advapi32 loads at an address that does + # NOT fit into a 32-bit integer. FreeLibrary must be able + # to accept this address. - # These are tests for http://www.python.org/sf/1703286 - handle = LoadLibrary("advapi32") - FreeLibrary(handle) + # These are tests for http://www.python.org/sf/1703286 + handle = LoadLibrary("advapi32") + FreeLibrary(handle) - @xfail - def test_1703286_B(self): - # Since on winXP 64-bit advapi32 loads like described - # above, the (arbitrarily selected) CloseEventLog function - # also has a high address. 'call_function' should accept - # addresses so large. - from _ctypes import call_function - advapi32 = windll.advapi32 - # Calling CloseEventLog with a NULL argument should fail, - # but the call should not segfault or so. - self.assertEqual(0, advapi32.CloseEventLog(None)) - windll.kernel32.GetProcAddress.argtypes = c_void_p, c_char_p - windll.kernel32.GetProcAddress.restype = c_void_p - proc = windll.kernel32.GetProcAddress(advapi32._handle, "CloseEventLog") - self.assertTrue(proc) - # This is the real test: call the function via 'call_function' - self.assertEqual(0, call_function(proc, (None,))) + @xfail + @unittest.skipUnless(os.name == "nt", 'Windows-specific test') + def test_1703286_B(self): + # Since on winXP 64-bit advapi32 loads like described + # above, the (arbitrarily selected) CloseEventLog function + # also has a high address. 'call_function' should accept + # addresses so large. + from _ctypes import call_function + advapi32 = windll.advapi32 + # Calling CloseEventLog with a NULL argument should fail, + # but the call should not segfault or so. + self.assertEqual(0, advapi32.CloseEventLog(None)) + windll.kernel32.GetProcAddress.argtypes = c_void_p, c_char_p + windll.kernel32.GetProcAddress.restype = c_void_p + proc = windll.kernel32.GetProcAddress(advapi32._handle, + "CloseEventLog") + self.assertTrue(proc) + # This is the real test: call the function via 'call_function' + self.assertEqual(0, call_function(proc, (None,))) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_macholib.py b/lib-python/2.7/ctypes/test/test_macholib.py --- a/lib-python/2.7/ctypes/test/test_macholib.py +++ b/lib-python/2.7/ctypes/test/test_macholib.py @@ -45,17 +45,21 @@ raise ValueError("%s not found" % (name,)) class MachOTest(unittest.TestCase): - if sys.platform == "darwin": - def test_find(self): + @unittest.skipUnless(sys.platform == "darwin", 'OSX-specific test') + def test_find(self): - self.assertEqual(find_lib('pthread'), - '/usr/lib/libSystem.B.dylib') + self.assertEqual(find_lib('pthread'), + '/usr/lib/libSystem.B.dylib') - result = find_lib('z') - self.assertTrue(result.endswith('.dylib')) + result = find_lib('z') + # Issue #21093: dyld default search path includes $HOME/lib and + # /usr/local/lib before /usr/lib, which caused test failures if + # a local copy of libz exists in one of them. Now ignore the head + # of the path. + self.assertRegexpMatches(result, r".*/lib/libz\..*.*\.dylib") - self.assertEqual(find_lib('IOKit'), - '/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit') + self.assertEqual(find_lib('IOKit'), + '/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit') if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_memfunctions.py b/lib-python/2.7/ctypes/test/test_memfunctions.py --- a/lib-python/2.7/ctypes/test/test_memfunctions.py +++ b/lib-python/2.7/ctypes/test/test_memfunctions.py @@ -1,17 +1,19 @@ import sys import unittest from ctypes import * +from ctypes.test import need_symbol class MemFunctionsTest(unittest.TestCase): -## def test_overflow(self): -## # string_at and wstring_at must use the Python calling -## # convention (which acquires the GIL and checks the Python -## # error flag). Provoke an error and catch it; see also issue -## # #3554: -## self.assertRaises((OverflowError, MemoryError, SystemError), -## lambda: wstring_at(u"foo", sys.maxint - 1)) -## self.assertRaises((OverflowError, MemoryError, SystemError), -## lambda: string_at("foo", sys.maxint - 1)) + @unittest.skip('test disabled') + def test_overflow(self): + # string_at and wstring_at must use the Python calling + # convention (which acquires the GIL and checks the Python + # error flag). Provoke an error and catch it; see also issue + # #3554: + self.assertRaises((OverflowError, MemoryError, SystemError), + lambda: wstring_at(u"foo", sys.maxint - 1)) + self.assertRaises((OverflowError, MemoryError, SystemError), + lambda: string_at("foo", sys.maxint - 1)) def test_memmove(self): # large buffers apparently increase the chance that the memory @@ -60,21 +62,17 @@ self.assertEqual(string_at("foo bar", 8), "foo bar\0") self.assertEqual(string_at("foo bar", 3), "foo") - try: - create_unicode_buffer - except NameError: - pass - else: - def test_wstring_at(self): - p = create_unicode_buffer("Hello, World") - a = create_unicode_buffer(1000000) - result = memmove(a, p, len(p) * sizeof(c_wchar)) - self.assertEqual(a.value, "Hello, World") + @need_symbol('create_unicode_buffer') + def test_wstring_at(self): + p = create_unicode_buffer("Hello, World") + a = create_unicode_buffer(1000000) + result = memmove(a, p, len(p) * sizeof(c_wchar)) + self.assertEqual(a.value, "Hello, World") - self.assertEqual(wstring_at(a), "Hello, World") - self.assertEqual(wstring_at(a, 5), "Hello") - self.assertEqual(wstring_at(a, 16), "Hello, World\0\0\0\0") - self.assertEqual(wstring_at(a, 0), "") + self.assertEqual(wstring_at(a), "Hello, World") + self.assertEqual(wstring_at(a, 5), "Hello") + self.assertEqual(wstring_at(a, 16), "Hello, World\0\0\0\0") + self.assertEqual(wstring_at(a, 0), "") if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -83,12 +83,13 @@ self.assertRaises(TypeError, t, "") self.assertRaises(TypeError, t, None) -## def test_valid_ranges(self): -## # invalid values of the correct type -## # raise ValueError (not OverflowError) -## for t, (l, h) in zip(unsigned_types, unsigned_ranges): -## self.assertRaises(ValueError, t, l-1) -## self.assertRaises(ValueError, t, h+1) + @unittest.skip('test disabled') + def test_valid_ranges(self): + # invalid values of the correct type + # raise ValueError (not OverflowError) + for t, (l, h) in zip(unsigned_types, unsigned_ranges): + self.assertRaises(ValueError, t, l-1) + self.assertRaises(ValueError, t, h+1) @xfail def test_from_param(self): @@ -185,10 +186,10 @@ a = array(t._type_, [3.14]) v = t.from_address(a.buffer_info()[0]) self.assertEqual(v.value, a[0]) - self.assertTrue(type(v) is t) + self.assertIs(type(v), t) a[0] = 2.3456e17 self.assertEqual(v.value, a[0]) - self.assertTrue(type(v) is t) + self.assertIs(type(v), t) def test_char_from_address(self): from ctypes import c_char @@ -197,22 +198,23 @@ a = array('c', 'x') v = c_char.from_address(a.buffer_info()[0]) self.assertEqual(v.value, a[0]) - self.assertTrue(type(v) is c_char) + self.assertIs(type(v), c_char) a[0] = '?' self.assertEqual(v.value, a[0]) # array does not support c_bool / 't' - # def test_bool_from_address(self): - # from ctypes import c_bool - # from array import array - # a = array(c_bool._type_, [True]) - # v = t.from_address(a.buffer_info()[0]) - # self.assertEqual(v.value, a[0]) - # self.assertEqual(type(v) is t) - # a[0] = False - # self.assertEqual(v.value, a[0]) - # self.assertEqual(type(v) is t) + @unittest.skip('test disabled') + def test_bool_from_address(self): + from ctypes import c_bool + from array import array + a = array(c_bool._type_, [True]) + v = t.from_address(a.buffer_info()[0]) + self.assertEqual(v.value, a[0]) + self.assertEqual(type(v) is t) + a[0] = False + self.assertEqual(v.value, a[0]) + self.assertEqual(type(v) is t) def test_init(self): # c_int() can be initialized from Python's int, and c_int. @@ -230,8 +232,9 @@ if (hasattr(t, "__ctype_le__")): self.assertRaises(OverflowError, t.__ctype_le__, big_int) -## def test_perf(self): -## check_perf() + @unittest.skip('test disabled') + def test_perf(self): + check_perf() from ctypes import _SimpleCData class c_int_S(_SimpleCData): diff --git a/lib-python/2.7/ctypes/test/test_objects.py b/lib-python/2.7/ctypes/test/test_objects.py --- a/lib-python/2.7/ctypes/test/test_objects.py +++ b/lib-python/2.7/ctypes/test/test_objects.py @@ -59,12 +59,9 @@ import ctypes.test.test_objects class TestCase(unittest.TestCase): - if sys.hexversion > 0x02040000: - # Python 2.3 has no ELLIPSIS flag, so we don't test with this - # version: - def test(self): - doctest.testmod(ctypes.test.test_objects) + def test(self): + failures, tests = doctest.testmod(ctypes.test.test_objects) + self.assertFalse(failures, 'doctests failed, see output above') if __name__ == '__main__': - if sys.hexversion > 0x02040000: - doctest.testmod(ctypes.test.test_objects) + doctest.testmod(ctypes.test.test_objects) diff --git a/lib-python/2.7/ctypes/test/test_parameters.py b/lib-python/2.7/ctypes/test/test_parameters.py --- a/lib-python/2.7/ctypes/test/test_parameters.py +++ b/lib-python/2.7/ctypes/test/test_parameters.py @@ -1,4 +1,5 @@ import unittest, sys +from ctypes.test import need_symbol from ctypes.test import xfail @@ -38,10 +39,9 @@ self.assertEqual(CVOIDP.from_param("abc"), "abcabc") self.assertEqual(CCHARP.from_param("abc"), "abcabcabcabc") - try: - from ctypes import c_wchar_p - except ImportError: - return + @need_symbol('c_wchar_p') + def test_subclasses_c_wchar_p(self): + from ctypes import c_wchar_p class CWCHARP(c_wchar_p): def from_param(cls, value): @@ -58,7 +58,7 @@ # c_char_p.from_param on a Python String packs the string # into a cparam object s = "123" - self.assertTrue(c_char_p.from_param(s)._obj is s) + self.assertIs(c_char_p.from_param(s)._obj, s) # new in 0.9.1: convert (encode) unicode to ascii self.assertEqual(c_char_p.from_param(u"123")._obj, "123") @@ -69,15 +69,11 @@ # calling c_char_p.from_param with a c_char_p instance # returns the argument itself: a = c_char_p("123") - self.assertTrue(c_char_p.from_param(a) is a) + self.assertIs(c_char_p.from_param(a), a) + @need_symbol('c_wchar_p') def test_cw_strings(self): - from ctypes import byref - try: - from ctypes import c_wchar_p - except ImportError: -## print "(No c_wchar_p)" - return + from ctypes import byref, c_wchar_p s = u"123" if sys.platform == "win32": self.assertTrue(c_wchar_p.from_param(s)._obj is s) @@ -150,9 +146,6 @@ self.assertRaises(TypeError, LPINT.from_param, c_long*3) self.assertRaises(TypeError, LPINT.from_param, c_uint*3) -## def test_performance(self): -## check_perf() - def test_noctypes_argtype(self): import _ctypes_test from ctypes import CDLL, c_void_p, ArgumentError diff --git a/lib-python/2.7/ctypes/test/test_pep3118.py b/lib-python/2.7/ctypes/test/test_pep3118.py --- a/lib-python/2.7/ctypes/test/test_pep3118.py +++ b/lib-python/2.7/ctypes/test/test_pep3118.py @@ -95,6 +95,10 @@ class aUnion(Union): _fields_ = [("a", c_int)] +class StructWithArrays(Structure): + _fields_ = [("x", c_long * 3 * 2), ("y", Point * 4)] + + class Incomplete(Structure): pass @@ -144,10 +148,10 @@ ## arrays and pointers - (c_double * 4, "(4) Author: Alex Gaynor Branch: Changeset: r73123:551a8a15b4a4 Date: 2014-08-28 14:49 -0700 http://bitbucket.org/pypy/pypy/changeset/551a8a15b4a4/ Log: Added docs for the branch to whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -57,3 +57,6 @@ .. branch: split-ast-classes Classes in the ast module are now distinct from structures used by the compiler. + +.. branch: stdlib-2.7.8 +Upgrades from 2.7.6 to 2.7.8 From noreply at buildbot.pypy.org Fri Aug 29 00:06:10 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 00:06:10 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: rename some functions for clarity Message-ID: <20140828220611.0318F1D2AC1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73124:b12884a291d1 Date: 2014-08-28 17:52 -0400 http://bitbucket.org/pypy/pypy/changeset/b12884a291d1/ Log: rename some functions for clarity diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -41,15 +41,15 @@ _IOLBF = config['_IOLBF'] _IOFBF = config['_IOFBF'] -c_open = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], FILEP) -c_close = llexternal('fclose', [FILEP], rffi.INT, releasegil=False) +c_fopen = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], FILEP) +c_fclose = llexternal('fclose', [FILEP], rffi.INT, releasegil=False) c_fwrite = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, FILEP], rffi.SIZE_T) c_fread = llexternal('fread', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, FILEP], rffi.SIZE_T) c_feof = llexternal('feof', [FILEP], rffi.INT) c_ferror = llexternal('ferror', [FILEP], rffi.INT) -c_clearerror = llexternal('clearerr', [FILEP], lltype.Void) +c_clearerr = llexternal('clearerr', [FILEP], lltype.Void) c_fseek = llexternal('fseek', [FILEP, rffi.LONG, rffi.INT], rffi.INT) c_tmpfile = llexternal('tmpfile', [], FILEP) @@ -73,7 +73,7 @@ def _error(ll_file): errno = c_ferror(ll_file) - c_clearerror(ll_file) + c_clearerr(ll_file) raise OSError(errno, os.strerror(errno)) @@ -84,7 +84,7 @@ try: ll_mode = rffi.str2charp(mode) try: - ll_f = c_open(ll_name, ll_mode) + ll_f = c_fopen(ll_name, ll_mode) if not ll_f: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) @@ -173,7 +173,7 @@ raise OSError(errno, os.strerror(errno)) return res - _do_close = staticmethod(c_close) # overridden in RPopenFile + _do_close = staticmethod(c_fclose) # overridden in RPopenFile def read(self, size=-1): # XXX CPython uses a more delicate logic here From noreply at buildbot.pypy.org Fri Aug 29 00:06:12 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 00:06:12 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: test/fix rfile buffering Message-ID: <20140828220612.4A8441D2AC1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73125:be91fae57988 Date: 2014-08-28 18:05 -0400 http://bitbucket.org/pypy/pypy/changeset/be91fae57988/ Log: test/fix rfile buffering diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -32,6 +32,7 @@ _IONBF = platform.DefinedConstantInteger('_IONBF') _IOLBF = platform.DefinedConstantInteger('_IOLBF') _IOFBF = platform.DefinedConstantInteger('_IOFBF') + BUFSIZ = platform.DefinedConstantInteger('BUFSIZ') config = platform.configure(CConfig) @@ -40,6 +41,7 @@ _IONBF = config['_IONBF'] _IOLBF = config['_IOLBF'] _IOFBF = config['_IOFBF'] +BUFSIZ = config['BUFSIZ'] c_fopen = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], FILEP) c_fclose = llexternal('fclose', [FILEP], rffi.INT, releasegil=False) @@ -65,7 +67,7 @@ c_popen = llexternal('popen', [rffi.CCHARP, rffi.CCHARP], FILEP) c_pclose = llexternal('pclose', [FILEP], rffi.INT, releasegil=False) -c_setvbuf = llexternal('setvbuf', [FILEP, rffi.CCHARP, rffi.INT, rffi.SIZE_T], lltype.Void) +c_setvbuf = llexternal('setvbuf', [FILEP, rffi.CCHARP, rffi.INT, rffi.SIZE_T], rffi.INT) BASE_BUF_SIZE = 4096 BASE_LINE_SIZE = 100 @@ -92,8 +94,13 @@ lltype.free(ll_mode, flavor='raw') finally: lltype.free(ll_name, flavor='raw') - if buffering != -1: - c_setvbuf(ll_f, lltype.nullptr(rffi.CCHARP.TO), _IOFBF, buffering) + if buffering >= 0: + if buffering == 0: + c_setvbuf(ll_f, lltype.nullptr(rffi.CCHARP.TO), _IONBF, 0) + elif buffering == 1: + c_setvbuf(ll_f, lltype.nullptr(rffi.CCHARP.TO), _IOLBF, BUFSIZ) + else: + c_setvbuf(ll_f, lltype.nullptr(rffi.CCHARP.TO), _IOFBF, buffering) return RFile(ll_f) diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -20,18 +20,36 @@ self.interpret(f, []) assert open(fname, "r").read() == "dupa" - def test_open_buffering(self): + def test_open_buffering_line(self): fname = str(self.tmpdir.join('file_1a')) def f(): - f = open(fname, 'w', 3) + f = open(fname, 'w', 1) + f.write('dupa\ndupb') + f2 = open(fname, 'r') + assert f2.read() == 'dupa\n' + f.close() + assert f2.read() == 'dupb' + f2.close() + + f() + self.interpret(f, []) + + def test_open_buffering_full(self): + fname = str(self.tmpdir.join('file_1b')) + + def f(): + f = open(fname, 'w', 128) f.write('dupa') f2 = open(fname, 'r') assert f2.read() == '' + f.write('z' * 5000) + assert f2.read() != '' f.close() - f2 = open(fname, 'r') - assert f2.read() == 'dupa' + assert f2.read() != '' + f2.close() + f() self.interpret(f, []) def test_read_write(self): From noreply at buildbot.pypy.org Fri Aug 29 00:12:43 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 00:12:43 +0200 (CEST) Subject: [pypy-commit] pypy default: another docstring for listobject Message-ID: <20140828221243.072141C3CCC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73126:117bce1f2cea Date: 2014-08-28 18:12 -0400 http://bitbucket.org/pypy/pypy/changeset/117bce1f2cea/ Log: another docstring for listobject diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -364,6 +364,7 @@ return w_obj def descr_init(self, space, __args__): + """x.__init__(...) initializes x; see help(type(x)) for signature""" # this is on the silly side w_iterable, = __args__.parse_obj( None, 'list', init_signature, init_defaults) diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -444,6 +444,7 @@ def test_doc(self): assert list.__doc__ == "list() -> new empty list\nlist(iterable) -> new list initialized from iterable's items" assert list.__new__.__doc__ == "T.__new__(S, ...) -> a new object with type S, a subtype of T" + assert list.__init__.__doc__ == "x.__init__(...) initializes x; see help(type(x)) for signature" def test_getstrategyfromlist_w(self): l0 = ["a", "2", "a", True] From noreply at buildbot.pypy.org Fri Aug 29 00:52:55 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 29 Aug 2014 00:52:55 +0200 (CEST) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140828225255.238301C000D@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r73127:986e16e0e1fc Date: 2014-08-28 15:47 -0700 http://bitbucket.org/pypy/pypy/changeset/986e16e0e1fc/ Log: merge default diff too long, truncating to 2000 out of 34965 lines diff --git a/lib-python/2.7/CGIHTTPServer.py b/lib-python/2.7/CGIHTTPServer.py --- a/lib-python/2.7/CGIHTTPServer.py +++ b/lib-python/2.7/CGIHTTPServer.py @@ -84,7 +84,7 @@ path begins with one of the strings in self.cgi_directories (and the next character is a '/' or the end of the string). """ - collapsed_path = _url_collapse_path(self.path) + collapsed_path = _url_collapse_path(urllib.unquote(self.path)) dir_sep = collapsed_path.find('/', 1) head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] if head in self.cgi_directories: diff --git a/lib-python/2.7/Cookie.py b/lib-python/2.7/Cookie.py --- a/lib-python/2.7/Cookie.py +++ b/lib-python/2.7/Cookie.py @@ -1,6 +1,3 @@ -#!/usr/bin/env python -# - #### # Copyright 2000 by Timothy O'Malley # diff --git a/lib-python/2.7/HTMLParser.py b/lib-python/2.7/HTMLParser.py --- a/lib-python/2.7/HTMLParser.py +++ b/lib-python/2.7/HTMLParser.py @@ -22,9 +22,12 @@ starttagopen = re.compile('<[a-zA-Z]') piclose = re.compile('>') commentclose = re.compile(r'--\s*>') -tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*') + # see http://www.w3.org/TR/html5/tokenization.html#tag-open-state # and http://www.w3.org/TR/html5/tokenization.html#tag-name-state +# note: if you change tagfind/attrfind remember to update locatestarttagend too +tagfind = re.compile('([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*') +# this regex is currently unused, but left for backward compatibility tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*') attrfind = re.compile( @@ -32,7 +35,7 @@ r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*') locatestarttagend = re.compile(r""" - <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name + <[a-zA-Z][^\t\n\r\f />\x00]* # tag name (?:[\s/]* # optional whitespace before attribute name (?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name (?:\s*=+\s* # value indicator @@ -192,9 +195,9 @@ i = self.updatepos(i, k) continue else: - if ";" in rawdata[i:]: #bail by consuming &# - self.handle_data(rawdata[0:2]) - i = self.updatepos(i, 2) + if ";" in rawdata[i:]: # bail by consuming '&#' + self.handle_data(rawdata[i:i+2]) + i = self.updatepos(i, i+2) break elif startswith('&', i): match = entityref.match(rawdata, i) @@ -373,14 +376,14 @@ self.handle_data(rawdata[i:gtpos]) return gtpos # find the name: w3.org/TR/html5/tokenization.html#tag-name-state - namematch = tagfind_tolerant.match(rawdata, i+2) + namematch = tagfind.match(rawdata, i+2) if not namematch: # w3.org/TR/html5/tokenization.html#end-tag-open-state if rawdata[i:i+3] == '': return i+3 else: return self.parse_bogus_comment(i) - tagname = namematch.group().lower() + tagname = namematch.group(1).lower() # consume and ignore other stuff between the name and the > # Note: this is not 100% correct, since we might have things like # , but looking for > after tha name should cover diff --git a/lib-python/2.7/SimpleHTTPServer.py b/lib-python/2.7/SimpleHTTPServer.py --- a/lib-python/2.7/SimpleHTTPServer.py +++ b/lib-python/2.7/SimpleHTTPServer.py @@ -43,8 +43,10 @@ """Serve a GET request.""" f = self.send_head() if f: - self.copyfile(f, self.wfile) - f.close() + try: + self.copyfile(f, self.wfile) + finally: + f.close() def do_HEAD(self): """Serve a HEAD request.""" @@ -88,13 +90,17 @@ except IOError: self.send_error(404, "File not found") return None - self.send_response(200) - self.send_header("Content-type", ctype) - fs = os.fstat(f.fileno()) - self.send_header("Content-Length", str(fs[6])) - self.send_header("Last-Modified", self.date_time_string(fs.st_mtime)) - self.end_headers() - return f + try: + self.send_response(200) + self.send_header("Content-type", ctype) + fs = os.fstat(f.fileno()) + self.send_header("Content-Length", str(fs[6])) + self.send_header("Last-Modified", self.date_time_string(fs.st_mtime)) + self.end_headers() + return f + except: + f.close() + raise def list_directory(self, path): """Helper to produce a directory listing (absent index.html). diff --git a/lib-python/2.7/SimpleXMLRPCServer.py b/lib-python/2.7/SimpleXMLRPCServer.py --- a/lib-python/2.7/SimpleXMLRPCServer.py +++ b/lib-python/2.7/SimpleXMLRPCServer.py @@ -704,4 +704,5 @@ server = SimpleXMLRPCServer(("localhost", 8000)) server.register_function(pow) server.register_function(lambda x,y: x+y, 'add') + server.register_multicall_functions() server.serve_forever() diff --git a/lib-python/2.7/SocketServer.py b/lib-python/2.7/SocketServer.py --- a/lib-python/2.7/SocketServer.py +++ b/lib-python/2.7/SocketServer.py @@ -513,35 +513,37 @@ def collect_children(self): """Internal routine to wait for children that have exited.""" - if self.active_children is None: return + if self.active_children is None: + return + + # If we're above the max number of children, wait and reap them until + # we go back below threshold. Note that we use waitpid(-1) below to be + # able to collect children in size() syscalls instead + # of size(): the downside is that this might reap children + # which we didn't spawn, which is why we only resort to this when we're + # above max_children. while len(self.active_children) >= self.max_children: - # XXX: This will wait for any child process, not just ones - # spawned by this library. This could confuse other - # libraries that expect to be able to wait for their own - # children. try: - pid, status = os.waitpid(0, 0) - except os.error: - pid = None - if pid not in self.active_children: continue - self.active_children.remove(pid) + pid, _ = os.waitpid(-1, 0) + self.active_children.discard(pid) + except OSError as e: + if e.errno == errno.ECHILD: + # we don't have any children, we're done + self.active_children.clear() + elif e.errno != errno.EINTR: + break - # XXX: This loop runs more system calls than it ought - # to. There should be a way to put the active_children into a - # process group and then use os.waitpid(-pgid) to wait for any - # of that set, but I couldn't find a way to allocate pgids - # that couldn't collide. - for child in self.active_children: + # Now reap all defunct children. + for pid in self.active_children.copy(): try: - pid, status = os.waitpid(child, os.WNOHANG) - except os.error: - pid = None - if not pid: continue - try: - self.active_children.remove(pid) - except ValueError, e: - raise ValueError('%s. x=%d and list=%r' % (e.message, pid, - self.active_children)) + pid, _ = os.waitpid(pid, os.WNOHANG) + # if the child hasn't exited yet, pid will be 0 and ignored by + # discard() below + self.active_children.discard(pid) + except OSError as e: + if e.errno == errno.ECHILD: + # someone else reaped it + self.active_children.discard(pid) def handle_timeout(self): """Wait for zombies after self.timeout seconds of inactivity. @@ -557,8 +559,8 @@ if pid: # Parent process if self.active_children is None: - self.active_children = [] - self.active_children.append(pid) + self.active_children = set() + self.active_children.add(pid) self.close_request(request) #close handle in parent process return else: diff --git a/lib-python/2.7/_MozillaCookieJar.py b/lib-python/2.7/_MozillaCookieJar.py --- a/lib-python/2.7/_MozillaCookieJar.py +++ b/lib-python/2.7/_MozillaCookieJar.py @@ -39,7 +39,7 @@ magic_re = "#( Netscape)? HTTP Cookie File" header = """\ # Netscape HTTP Cookie File -# http://www.netscape.com/newsref/std/cookie_spec.html +# http://curl.haxx.se/rfc/cookie_spec.html # This is a generated file! Do not edit. """ diff --git a/lib-python/2.7/_abcoll.py b/lib-python/2.7/_abcoll.py --- a/lib-python/2.7/_abcoll.py +++ b/lib-python/2.7/_abcoll.py @@ -165,12 +165,17 @@ def __gt__(self, other): if not isinstance(other, Set): return NotImplemented - return other < self + return len(self) > len(other) and self.__ge__(other) def __ge__(self, other): if not isinstance(other, Set): return NotImplemented - return other <= self + if len(self) < len(other): + return False + for elem in other: + if elem not in self: + return False + return True def __eq__(self, other): if not isinstance(other, Set): @@ -194,6 +199,8 @@ return NotImplemented return self._from_iterable(value for value in other if value in self) + __rand__ = __and__ + def isdisjoint(self, other): 'Return True if two sets have a null intersection.' for value in other: @@ -207,6 +214,8 @@ chain = (e for s in (self, other) for e in s) return self._from_iterable(chain) + __ror__ = __or__ + def __sub__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): @@ -215,6 +224,14 @@ return self._from_iterable(value for value in self if value not in other) + def __rsub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in other + if value not in self) + def __xor__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): @@ -222,6 +239,8 @@ other = self._from_iterable(other) return (self - other) | (other - self) + __rxor__ = __xor__ + # Sets are not hashable by default, but subclasses can change this __hash__ = None diff --git a/lib-python/2.7/_osx_support.py b/lib-python/2.7/_osx_support.py --- a/lib-python/2.7/_osx_support.py +++ b/lib-python/2.7/_osx_support.py @@ -182,7 +182,7 @@ # Compiler is GCC, check if it is LLVM-GCC data = _read_output("'%s' --version" % (cc.replace("'", "'\"'\"'"),)) - if 'llvm-gcc' in data: + if data and 'llvm-gcc' in data: # Found LLVM-GCC, fall back to clang cc = _find_build_tool('clang') @@ -450,8 +450,16 @@ # case and disallow installs. cflags = _config_vars.get(_INITPRE+'CFLAGS', _config_vars.get('CFLAGS', '')) - if ((macrelease + '.') >= '10.4.' and - '-arch' in cflags.strip()): + if macrelease: + try: + macrelease = tuple(int(i) for i in macrelease.split('.')[0:2]) + except ValueError: + macrelease = (10, 0) + else: + # assume no universal support + macrelease = (10, 0) + + if (macrelease >= (10, 4)) and '-arch' in cflags.strip(): # The universal build will build fat binaries, but not on # systems before 10.4 diff --git a/lib-python/2.7/_pyio.py b/lib-python/2.7/_pyio.py --- a/lib-python/2.7/_pyio.py +++ b/lib-python/2.7/_pyio.py @@ -192,38 +192,45 @@ (appending and "a" or "") + (updating and "+" or ""), closefd) - line_buffering = False - if buffering == 1 or buffering < 0 and raw.isatty(): - buffering = -1 - line_buffering = True - if buffering < 0: - buffering = DEFAULT_BUFFER_SIZE - try: - bs = os.fstat(raw.fileno()).st_blksize - except (os.error, AttributeError): - pass + result = raw + try: + line_buffering = False + if buffering == 1 or buffering < 0 and raw.isatty(): + buffering = -1 + line_buffering = True + if buffering < 0: + buffering = DEFAULT_BUFFER_SIZE + try: + bs = os.fstat(raw.fileno()).st_blksize + except (os.error, AttributeError): + pass + else: + if bs > 1: + buffering = bs + if buffering < 0: + raise ValueError("invalid buffering size") + if buffering == 0: + if binary: + return result + raise ValueError("can't have unbuffered text I/O") + if updating: + buffer = BufferedRandom(raw, buffering) + elif writing or appending: + buffer = BufferedWriter(raw, buffering) + elif reading: + buffer = BufferedReader(raw, buffering) else: - if bs > 1: - buffering = bs - if buffering < 0: - raise ValueError("invalid buffering size") - if buffering == 0: + raise ValueError("unknown mode: %r" % mode) + result = buffer if binary: - return raw - raise ValueError("can't have unbuffered text I/O") - if updating: - buffer = BufferedRandom(raw, buffering) - elif writing or appending: - buffer = BufferedWriter(raw, buffering) - elif reading: - buffer = BufferedReader(raw, buffering) - else: - raise ValueError("unknown mode: %r" % mode) - if binary: - return buffer - text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering) - text.mode = mode - return text + return result + text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering) + result = text + text.mode = mode + return result + except: + result.close() + raise class DocDescriptor: @@ -1997,7 +2004,13 @@ def getvalue(self): self.flush() - return self.buffer.getvalue().decode(self._encoding, self._errors) + decoder = self._decoder or self._get_decoder() + old_state = decoder.getstate() + decoder.reset() + try: + return decoder.decode(self.buffer.getvalue(), final=True) + finally: + decoder.setstate(old_state) def __repr__(self): # TextIOWrapper tells the encoding in its repr. In StringIO, diff --git a/lib-python/2.7/_weakrefset.py b/lib-python/2.7/_weakrefset.py --- a/lib-python/2.7/_weakrefset.py +++ b/lib-python/2.7/_weakrefset.py @@ -60,6 +60,8 @@ for itemref in self.data: item = itemref() if item is not None: + # Caveat: the iterator will keep a strong reference to + # `item` until it is resumed or closed. yield item def __len__(self): diff --git a/lib-python/2.7/aifc.py b/lib-python/2.7/aifc.py --- a/lib-python/2.7/aifc.py +++ b/lib-python/2.7/aifc.py @@ -778,7 +778,7 @@ def _ensure_header_written(self, datasize): if not self._nframeswritten: - if self._comptype in ('ULAW', 'ALAW'): + if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw'): if not self._sampwidth: self._sampwidth = 2 if self._sampwidth != 2: @@ -844,7 +844,7 @@ if self._datalength & 1: self._datalength = self._datalength + 1 if self._aifc: - if self._comptype in ('ULAW', 'ALAW'): + if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw'): self._datalength = self._datalength // 2 if self._datalength & 1: self._datalength = self._datalength + 1 @@ -852,7 +852,10 @@ self._datalength = (self._datalength + 3) // 4 if self._datalength & 1: self._datalength = self._datalength + 1 - self._form_length_pos = self._file.tell() + try: + self._form_length_pos = self._file.tell() + except (AttributeError, IOError): + self._form_length_pos = None commlength = self._write_form_length(self._datalength) if self._aifc: self._file.write('AIFC') @@ -864,7 +867,8 @@ self._file.write('COMM') _write_ulong(self._file, commlength) _write_short(self._file, self._nchannels) - self._nframes_pos = self._file.tell() + if self._form_length_pos is not None: + self._nframes_pos = self._file.tell() _write_ulong(self._file, self._nframes) if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'): _write_short(self._file, 8) @@ -875,7 +879,8 @@ self._file.write(self._comptype) _write_string(self._file, self._compname) self._file.write('SSND') - self._ssnd_length_pos = self._file.tell() + if self._form_length_pos is not None: + self._ssnd_length_pos = self._file.tell() _write_ulong(self._file, self._datalength + 8) _write_ulong(self._file, 0) _write_ulong(self._file, 0) diff --git a/lib-python/2.7/argparse.py b/lib-python/2.7/argparse.py --- a/lib-python/2.7/argparse.py +++ b/lib-python/2.7/argparse.py @@ -168,6 +168,8 @@ self._prog = prog self._indent_increment = indent_increment self._max_help_position = max_help_position + self._max_help_position = min(max_help_position, + max(width - 20, indent_increment * 2)) self._width = width self._current_indent = 0 @@ -339,7 +341,7 @@ else: line_len = len(indent) - 1 for part in parts: - if line_len + 1 + len(part) > text_width: + if line_len + 1 + len(part) > text_width and line: lines.append(indent + ' '.join(line)) line = [] line_len = len(indent) - 1 @@ -478,7 +480,7 @@ def _format_text(self, text): if '%(prog)' in text: text = text % dict(prog=self._prog) - text_width = self._width - self._current_indent + text_width = max(self._width - self._current_indent, 11) indent = ' ' * self._current_indent return self._fill_text(text, text_width, indent) + '\n\n' @@ -486,7 +488,7 @@ # determine the required width and the entry label help_position = min(self._action_max_length + 2, self._max_help_position) - help_width = self._width - help_position + help_width = max(self._width - help_position, 11) action_width = help_position - self._current_indent - 2 action_header = self._format_action_invocation(action) @@ -1155,9 +1157,13 @@ __hash__ = None def __eq__(self, other): + if not isinstance(other, Namespace): + return NotImplemented return vars(self) == vars(other) def __ne__(self, other): + if not isinstance(other, Namespace): + return NotImplemented return not (self == other) def __contains__(self, key): diff --git a/lib-python/2.7/bsddb/dbshelve.py b/lib-python/2.7/bsddb/dbshelve.py --- a/lib-python/2.7/bsddb/dbshelve.py +++ b/lib-python/2.7/bsddb/dbshelve.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python #------------------------------------------------------------------------ # Copyright (c) 1997-2001 by Total Control Software # All Rights Reserved diff --git a/lib-python/2.7/bsddb/test/test_dbtables.py b/lib-python/2.7/bsddb/test/test_dbtables.py --- a/lib-python/2.7/bsddb/test/test_dbtables.py +++ b/lib-python/2.7/bsddb/test/test_dbtables.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python -# #----------------------------------------------------------------------- # A test suite for the table interface built on bsddb.db #----------------------------------------------------------------------- diff --git a/lib-python/2.7/codecs.py b/lib-python/2.7/codecs.py --- a/lib-python/2.7/codecs.py +++ b/lib-python/2.7/codecs.py @@ -456,15 +456,12 @@ # read until we get the required number of characters (if available) while True: - # can the request can be satisfied from the character buffer? - if chars < 0: - if size < 0: - if self.charbuffer: - break - elif len(self.charbuffer) >= size: + # can the request be satisfied from the character buffer? + if chars >= 0: + if len(self.charbuffer) >= chars: break - else: - if len(self.charbuffer) >= chars: + elif size >= 0: + if len(self.charbuffer) >= size: break # we need more data if size < 0: diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py --- a/lib-python/2.7/collections.py +++ b/lib-python/2.7/collections.py @@ -319,6 +319,7 @@ if isinstance(field_names, basestring): field_names = field_names.replace(',', ' ').split() field_names = map(str, field_names) + typename = str(typename) if rename: seen = set() for index, name in enumerate(field_names): @@ -331,6 +332,8 @@ field_names[index] = '_%d' % index seen.add(name) for name in [typename] + field_names: + if type(name) != str: + raise TypeError('Type names and field names must be strings') if not all(c.isalnum() or c=='_' for c in name): raise ValueError('Type names and field names can only contain ' 'alphanumeric characters and underscores: %r' % name) diff --git a/lib-python/2.7/csv.py b/lib-python/2.7/csv.py --- a/lib-python/2.7/csv.py +++ b/lib-python/2.7/csv.py @@ -93,6 +93,10 @@ self.line_num = self.reader.line_num return self._fieldnames + # Issue 20004: Because DictReader is a classic class, this setter is + # ignored. At this point in 2.7's lifecycle, it is too late to change the + # base class for fear of breaking working code. If you want to change + # fieldnames without overwriting the getter, set _fieldnames directly. @fieldnames.setter def fieldnames(self, value): self._fieldnames = value @@ -140,8 +144,8 @@ if self.extrasaction == "raise": wrong_fields = [k for k in rowdict if k not in self.fieldnames] if wrong_fields: - raise ValueError("dict contains fields not in fieldnames: " + - ", ".join(wrong_fields)) + raise ValueError("dict contains fields not in fieldnames: " + + ", ".join([repr(x) for x in wrong_fields])) return [rowdict.get(key, self.restval) for key in self.fieldnames] def writerow(self, rowdict): diff --git a/lib-python/2.7/ctypes/test/__init__.py b/lib-python/2.7/ctypes/test/__init__.py --- a/lib-python/2.7/ctypes/test/__init__.py +++ b/lib-python/2.7/ctypes/test/__init__.py @@ -2,7 +2,15 @@ use_resources = [] -class ResourceDenied(Exception): +import ctypes +ctypes_symbols = dir(ctypes) + +def need_symbol(name): + return unittest.skipUnless(name in ctypes_symbols, + '{!r} is required'.format(name)) + + +class ResourceDenied(unittest.SkipTest): """Test skipped because it requested a disallowed resource. This is raised when a test calls requires() for a resource that diff --git a/lib-python/2.7/ctypes/test/test_arrays.py b/lib-python/2.7/ctypes/test/test_arrays.py --- a/lib-python/2.7/ctypes/test/test_arrays.py +++ b/lib-python/2.7/ctypes/test/test_arrays.py @@ -2,6 +2,8 @@ from ctypes import * from test.test_support import impl_detail +from ctypes.test import need_symbol + formats = "bBhHiIlLqQfd" # c_longdouble commented out for PyPy, look at the commend in test_longdouble @@ -98,8 +100,8 @@ self.assertEqual(values, [1, 2, 3, 4, 5]) def test_classcache(self): - self.assertTrue(not ARRAY(c_int, 3) is ARRAY(c_int, 4)) - self.assertTrue(ARRAY(c_int, 3) is ARRAY(c_int, 3)) + self.assertIsNot(ARRAY(c_int, 3), ARRAY(c_int, 4)) + self.assertIs(ARRAY(c_int, 3), ARRAY(c_int, 3)) def test_from_address(self): # Failed with 0.9.8, reported by JUrner @@ -112,20 +114,16 @@ self.assertEqual(sz[1:4:2], "o") self.assertEqual(sz.value, "foo") - try: - create_unicode_buffer - except NameError: - pass - else: - def test_from_addressW(self): - p = create_unicode_buffer("foo") - sz = (c_wchar * 3).from_address(addressof(p)) - self.assertEqual(sz[:], "foo") - self.assertEqual(sz[::], "foo") - self.assertEqual(sz[::-1], "oof") - self.assertEqual(sz[::3], "f") - self.assertEqual(sz[1:4:2], "o") - self.assertEqual(sz.value, "foo") + @need_symbol('create_unicode_buffer') + def test_from_addressW(self): + p = create_unicode_buffer("foo") + sz = (c_wchar * 3).from_address(addressof(p)) + self.assertEqual(sz[:], "foo") + self.assertEqual(sz[::], "foo") + self.assertEqual(sz[::-1], "oof") + self.assertEqual(sz[::3], "f") + self.assertEqual(sz[1:4:2], "o") + self.assertEqual(sz.value, "foo") def test_cache(self): # Array types are cached internally in the _ctypes extension, @@ -139,7 +137,7 @@ # Create a new array type based on it: t1 = my_int * 1 t2 = my_int * 1 - self.assertTrue(t1 is t2) + self.assertIs(t1, t2) if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_as_parameter.py b/lib-python/2.7/ctypes/test/test_as_parameter.py --- a/lib-python/2.7/ctypes/test/test_as_parameter.py +++ b/lib-python/2.7/ctypes/test/test_as_parameter.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import need_symbol import _ctypes_test dll = CDLL(_ctypes_test.__file__) @@ -17,11 +18,8 @@ def wrap(self, param): return param + @need_symbol('c_wchar') def test_wchar_parm(self): - try: - c_wchar - except NameError: - return f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double] result = f(self.wrap(1), self.wrap(u"x"), self.wrap(3), self.wrap(4), self.wrap(5.0), self.wrap(6.0)) @@ -134,7 +132,7 @@ f.argtypes = [c_longlong, MyCallback] def callback(value): - self.assertTrue(isinstance(value, (int, long))) + self.assertIsInstance(value, (int, long)) return value & 0x7FFFFFFF cb = MyCallback(callback) diff --git a/lib-python/2.7/ctypes/test/test_bitfields.py b/lib-python/2.7/ctypes/test/test_bitfields.py --- a/lib-python/2.7/ctypes/test/test_bitfields.py +++ b/lib-python/2.7/ctypes/test/test_bitfields.py @@ -1,4 +1,5 @@ from ctypes import * +from ctypes.test import need_symbol import unittest import os @@ -131,15 +132,6 @@ self.assertEqual(result[0], TypeError) self.assertIn('bit fields not allowed for type', result[1]) - try: - c_wchar - except NameError: - pass - else: - result = self.fail_fields(("a", c_wchar, 1)) - self.assertEqual(result[0], TypeError) - self.assertIn('bit fields not allowed for type', result[1]) - class Dummy(Structure): _fields_ = [] @@ -147,6 +139,12 @@ self.assertEqual(result[0], TypeError) self.assertIn('bit fields not allowed for type', result[1]) + @need_symbol('c_wchar') + def test_c_wchar(self): + result = self.fail_fields(("a", c_wchar, 1)) + self.assertEqual(result, + (TypeError, 'bit fields not allowed for type c_wchar')) + def test_single_bitfield_size(self): for c_typ in int_types: result = self.fail_fields(("a", c_typ, -1)) @@ -213,7 +211,7 @@ class X(Structure): _fields_ = [("a", c_byte, 4), ("b", c_int, 32)] - self.assertEqual(sizeof(X), sizeof(c_int)*2) + self.assertEqual(sizeof(X), alignment(c_int)+sizeof(c_int)) def test_mixed_3(self): class X(Structure): @@ -246,7 +244,7 @@ _anonymous_ = ["_"] _fields_ = [("_", X)] - @unittest.skipUnless(hasattr(ctypes, "c_uint32"), "c_int32 is required") + @need_symbol('c_uint32') def test_uint32(self): class X(Structure): _fields_ = [("a", c_uint32, 32)] @@ -256,7 +254,7 @@ x.a = 0xFDCBA987 self.assertEqual(x.a, 0xFDCBA987) - @unittest.skipUnless(hasattr(ctypes, "c_uint64"), "c_int64 is required") + @need_symbol('c_uint64') def test_uint64(self): class X(Structure): _fields_ = [("a", c_uint64, 64)] diff --git a/lib-python/2.7/ctypes/test/test_buffers.py b/lib-python/2.7/ctypes/test/test_buffers.py --- a/lib-python/2.7/ctypes/test/test_buffers.py +++ b/lib-python/2.7/ctypes/test/test_buffers.py @@ -1,4 +1,5 @@ from ctypes import * +from ctypes.test import need_symbol import unittest class StringBufferTestCase(unittest.TestCase): @@ -7,12 +8,12 @@ b = create_string_buffer(32) self.assertEqual(len(b), 32) self.assertEqual(sizeof(b), 32 * sizeof(c_char)) - self.assertTrue(type(b[0]) is str) + self.assertIs(type(b[0]), str) b = create_string_buffer("abc") self.assertEqual(len(b), 4) # trailing nul char self.assertEqual(sizeof(b), 4 * sizeof(c_char)) - self.assertTrue(type(b[0]) is str) + self.assertIs(type(b[0]), str) self.assertEqual(b[0], "a") self.assertEqual(b[:], "abc\0") self.assertEqual(b[::], "abc\0") @@ -36,39 +37,36 @@ self.assertEqual(b[::2], "ac") self.assertEqual(b[::5], "a") - try: - c_wchar - except NameError: - pass - else: - def test_unicode_buffer(self): - b = create_unicode_buffer(32) - self.assertEqual(len(b), 32) - self.assertEqual(sizeof(b), 32 * sizeof(c_wchar)) - self.assertTrue(type(b[0]) is unicode) + @need_symbol('c_wchar') + def test_unicode_buffer(self): + b = create_unicode_buffer(32) + self.assertEqual(len(b), 32) + self.assertEqual(sizeof(b), 32 * sizeof(c_wchar)) + self.assertIs(type(b[0]), unicode) - b = create_unicode_buffer(u"abc") - self.assertEqual(len(b), 4) # trailing nul char - self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) - self.assertTrue(type(b[0]) is unicode) - self.assertEqual(b[0], u"a") - self.assertEqual(b[:], "abc\0") - self.assertEqual(b[::], "abc\0") - self.assertEqual(b[::-1], "\0cba") - self.assertEqual(b[::2], "ac") - self.assertEqual(b[::5], "a") + b = create_unicode_buffer(u"abc") + self.assertEqual(len(b), 4) # trailing nul char + self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) + self.assertIs(type(b[0]), unicode) + self.assertEqual(b[0], u"a") + self.assertEqual(b[:], "abc\0") + self.assertEqual(b[::], "abc\0") + self.assertEqual(b[::-1], "\0cba") + self.assertEqual(b[::2], "ac") + self.assertEqual(b[::5], "a") - def test_unicode_conversion(self): - b = create_unicode_buffer("abc") - self.assertEqual(len(b), 4) # trailing nul char - self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) - self.assertTrue(type(b[0]) is unicode) - self.assertEqual(b[0], u"a") - self.assertEqual(b[:], "abc\0") - self.assertEqual(b[::], "abc\0") - self.assertEqual(b[::-1], "\0cba") - self.assertEqual(b[::2], "ac") - self.assertEqual(b[::5], "a") + @need_symbol('c_wchar') + def test_unicode_conversion(self): + b = create_unicode_buffer("abc") + self.assertEqual(len(b), 4) # trailing nul char + self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) + self.assertIs(type(b[0]), unicode) + self.assertEqual(b[0], u"a") + self.assertEqual(b[:], "abc\0") + self.assertEqual(b[::], "abc\0") + self.assertEqual(b[::-1], "\0cba") + self.assertEqual(b[::2], "ac") + self.assertEqual(b[::5], "a") if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_byteswap.py b/lib-python/2.7/ctypes/test/test_byteswap.py --- a/lib-python/2.7/ctypes/test/test_byteswap.py +++ b/lib-python/2.7/ctypes/test/test_byteswap.py @@ -15,7 +15,8 @@ # For Structures and Unions, these types are created on demand. class Test(unittest.TestCase): - def X_test(self): + @unittest.skip('test disabled') + def test_X(self): print >> sys.stderr, sys.byteorder for i in range(32): bits = BITS() @@ -25,11 +26,11 @@ @xfail def test_endian_short(self): if sys.byteorder == "little": - self.assertTrue(c_short.__ctype_le__ is c_short) - self.assertTrue(c_short.__ctype_be__.__ctype_le__ is c_short) + self.assertIs(c_short.__ctype_le__, c_short) + self.assertIs(c_short.__ctype_be__.__ctype_le__, c_short) else: - self.assertTrue(c_short.__ctype_be__ is c_short) - self.assertTrue(c_short.__ctype_le__.__ctype_be__ is c_short) + self.assertIs(c_short.__ctype_be__, c_short) + self.assertIs(c_short.__ctype_le__.__ctype_be__, c_short) s = c_short.__ctype_be__(0x1234) self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234") self.assertEqual(bin(s), "1234") @@ -53,11 +54,11 @@ @xfail def test_endian_int(self): if sys.byteorder == "little": - self.assertTrue(c_int.__ctype_le__ is c_int) - self.assertTrue(c_int.__ctype_be__.__ctype_le__ is c_int) + self.assertIs(c_int.__ctype_le__, c_int) + self.assertIs(c_int.__ctype_be__.__ctype_le__, c_int) else: - self.assertTrue(c_int.__ctype_be__ is c_int) - self.assertTrue(c_int.__ctype_le__.__ctype_be__ is c_int) + self.assertIs(c_int.__ctype_be__, c_int) + self.assertIs(c_int.__ctype_le__.__ctype_be__, c_int) s = c_int.__ctype_be__(0x12345678) self.assertEqual(bin(struct.pack(">i", 0x12345678)), "12345678") @@ -82,11 +83,11 @@ @xfail def test_endian_longlong(self): if sys.byteorder == "little": - self.assertTrue(c_longlong.__ctype_le__ is c_longlong) - self.assertTrue(c_longlong.__ctype_be__.__ctype_le__ is c_longlong) + self.assertIs(c_longlong.__ctype_le__, c_longlong) + self.assertIs(c_longlong.__ctype_be__.__ctype_le__, c_longlong) else: - self.assertTrue(c_longlong.__ctype_be__ is c_longlong) - self.assertTrue(c_longlong.__ctype_le__.__ctype_be__ is c_longlong) + self.assertIs(c_longlong.__ctype_be__, c_longlong) + self.assertIs(c_longlong.__ctype_le__.__ctype_be__, c_longlong) s = c_longlong.__ctype_be__(0x1234567890ABCDEF) self.assertEqual(bin(struct.pack(">q", 0x1234567890ABCDEF)), "1234567890ABCDEF") @@ -111,11 +112,11 @@ @xfail def test_endian_float(self): if sys.byteorder == "little": - self.assertTrue(c_float.__ctype_le__ is c_float) - self.assertTrue(c_float.__ctype_be__.__ctype_le__ is c_float) + self.assertIs(c_float.__ctype_le__, c_float) + self.assertIs(c_float.__ctype_be__.__ctype_le__, c_float) else: - self.assertTrue(c_float.__ctype_be__ is c_float) - self.assertTrue(c_float.__ctype_le__.__ctype_be__ is c_float) + self.assertIs(c_float.__ctype_be__, c_float) + self.assertIs(c_float.__ctype_le__.__ctype_be__, c_float) s = c_float(math.pi) self.assertEqual(bin(struct.pack("f", math.pi)), bin(s)) # Hm, what's the precision of a float compared to a double? @@ -130,11 +131,11 @@ @xfail def test_endian_double(self): if sys.byteorder == "little": - self.assertTrue(c_double.__ctype_le__ is c_double) - self.assertTrue(c_double.__ctype_be__.__ctype_le__ is c_double) + self.assertIs(c_double.__ctype_le__, c_double) + self.assertIs(c_double.__ctype_be__.__ctype_le__, c_double) else: - self.assertTrue(c_double.__ctype_be__ is c_double) - self.assertTrue(c_double.__ctype_le__.__ctype_be__ is c_double) + self.assertIs(c_double.__ctype_be__, c_double) + self.assertIs(c_double.__ctype_le__.__ctype_be__, c_double) s = c_double(math.pi) self.assertEqual(s.value, math.pi) self.assertEqual(bin(struct.pack("d", math.pi)), bin(s)) @@ -146,14 +147,14 @@ self.assertEqual(bin(struct.pack(">d", math.pi)), bin(s)) def test_endian_other(self): - self.assertTrue(c_byte.__ctype_le__ is c_byte) - self.assertTrue(c_byte.__ctype_be__ is c_byte) + self.assertIs(c_byte.__ctype_le__, c_byte) + self.assertIs(c_byte.__ctype_be__, c_byte) - self.assertTrue(c_ubyte.__ctype_le__ is c_ubyte) - self.assertTrue(c_ubyte.__ctype_be__ is c_ubyte) + self.assertIs(c_ubyte.__ctype_le__, c_ubyte) + self.assertIs(c_ubyte.__ctype_be__, c_ubyte) - self.assertTrue(c_char.__ctype_le__ is c_char) - self.assertTrue(c_char.__ctype_be__ is c_char) + self.assertIs(c_char.__ctype_le__, c_char) + self.assertIs(c_char.__ctype_be__, c_char) @xfail def test_struct_fields_1(self): diff --git a/lib-python/2.7/ctypes/test/test_callbacks.py b/lib-python/2.7/ctypes/test/test_callbacks.py --- a/lib-python/2.7/ctypes/test/test_callbacks.py +++ b/lib-python/2.7/ctypes/test/test_callbacks.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import need_symbol from ctypes.test import xfail import _ctypes_test @@ -95,9 +96,10 @@ # disabled: would now (correctly) raise a RuntimeWarning about # a memory leak. A callback function cannot return a non-integral # C type without causing a memory leak. -## def test_char_p(self): -## self.check_type(c_char_p, "abc") -## self.check_type(c_char_p, "def") + @unittest.skip('test disabled') + def test_char_p(self): + self.check_type(c_char_p, "abc") + self.check_type(c_char_p, "def") @xfail def test_pyobject(self): @@ -150,13 +152,12 @@ CFUNCTYPE(None)(lambda x=Nasty(): None) -try: - WINFUNCTYPE -except NameError: - pass -else: - class StdcallCallbacks(Callbacks): + at need_symbol('WINFUNCTYPE') +class StdcallCallbacks(Callbacks): + try: functype = WINFUNCTYPE + except NameError: + pass ################################################################ @@ -186,7 +187,7 @@ from ctypes.util import find_library libc_path = find_library("c") if not libc_path: - return # cannot test + self.skipTest('could not find libc') libc = CDLL(libc_path) @CFUNCTYPE(c_int, POINTER(c_int), POINTER(c_int)) @@ -198,23 +199,19 @@ libc.qsort(array, len(array), sizeof(c_int), cmp_func) self.assertEqual(array[:], [1, 5, 7, 33, 99]) - try: - WINFUNCTYPE - except NameError: - pass - else: - def test_issue_8959_b(self): - from ctypes.wintypes import BOOL, HWND, LPARAM + @need_symbol('WINFUNCTYPE') + def test_issue_8959_b(self): + from ctypes.wintypes import BOOL, HWND, LPARAM + global windowCount + windowCount = 0 + + @WINFUNCTYPE(BOOL, HWND, LPARAM) + def EnumWindowsCallbackFunc(hwnd, lParam): global windowCount - windowCount = 0 + windowCount += 1 + return True #Allow windows to keep enumerating - @WINFUNCTYPE(BOOL, HWND, LPARAM) - def EnumWindowsCallbackFunc(hwnd, lParam): - global windowCount - windowCount += 1 - return True #Allow windows to keep enumerating - - windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0) + windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0) def test_callback_register_int(self): # Issue #8275: buggy handling of callback args under Win64 diff --git a/lib-python/2.7/ctypes/test/test_cast.py b/lib-python/2.7/ctypes/test/test_cast.py --- a/lib-python/2.7/ctypes/test/test_cast.py +++ b/lib-python/2.7/ctypes/test/test_cast.py @@ -1,4 +1,5 @@ from ctypes import * +from ctypes.test import need_symbol import unittest import sys @@ -38,14 +39,14 @@ p = cast(array, POINTER(c_char_p)) # array and p share a common _objects attribute - self.assertTrue(p._objects is array._objects) + self.assertIs(p._objects, array._objects) self.assertEqual(array._objects, {'0': "foo bar", id(array): array}) p[0] = "spam spam" self.assertEqual(p._objects, {'0': "spam spam", id(array): array}) - self.assertTrue(array._objects is p._objects) + self.assertIs(array._objects, p._objects) p[1] = "foo bar" self.assertEqual(p._objects, {'1': 'foo bar', '0': "spam spam", id(array): array}) - self.assertTrue(array._objects is p._objects) + self.assertIs(array._objects, p._objects) def test_other(self): p = cast((c_int * 4)(1, 2, 3, 4), POINTER(c_int)) @@ -75,15 +76,11 @@ self.assertEqual(cast(cast(s, c_void_p), c_char_p).value, "hiho") - try: - c_wchar_p - except NameError: - pass - else: - def test_wchar_p(self): - s = c_wchar_p("hiho") - self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value, - "hiho") + @need_symbol('c_wchar_p') + def test_wchar_p(self): + s = c_wchar_p("hiho") + self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value, + "hiho") if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_cfuncs.py b/lib-python/2.7/ctypes/test/test_cfuncs.py --- a/lib-python/2.7/ctypes/test/test_cfuncs.py +++ b/lib-python/2.7/ctypes/test/test_cfuncs.py @@ -3,6 +3,7 @@ import unittest from ctypes import * +from ctypes.test import need_symbol import _ctypes_test from test.test_support import impl_detail @@ -196,7 +197,7 @@ try: WinDLL except NameError: - pass + def stdcall_dll(*_): pass else: class stdcall_dll(WinDLL): def __getattr__(self, name): @@ -206,9 +207,9 @@ setattr(self, name, func) return func - class stdcallCFunctions(CFunctions): - _dll = stdcall_dll(_ctypes_test.__file__) - pass + at need_symbol('WinDLL') +class stdcallCFunctions(CFunctions): + _dll = stdcall_dll(_ctypes_test.__file__) if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_checkretval.py b/lib-python/2.7/ctypes/test/test_checkretval.py --- a/lib-python/2.7/ctypes/test/test_checkretval.py +++ b/lib-python/2.7/ctypes/test/test_checkretval.py @@ -1,6 +1,7 @@ import unittest from ctypes import * +from ctypes.test import need_symbol class CHECKED(c_int): def _check_retval_(value): @@ -25,15 +26,11 @@ del dll._testfunc_p_p.restype self.assertEqual(42, dll._testfunc_p_p(42)) - try: - oledll - except NameError: - pass - else: - def test_oledll(self): - self.assertRaises(WindowsError, - oledll.oleaut32.CreateTypeLib2, - 0, None, None) + @need_symbol('oledll') + def test_oledll(self): + self.assertRaises(WindowsError, + oledll.oleaut32.CreateTypeLib2, + 0, None, None) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_errcheck.py b/lib-python/2.7/ctypes/test/test_errcheck.py deleted file mode 100644 --- a/lib-python/2.7/ctypes/test/test_errcheck.py +++ /dev/null @@ -1,19 +0,0 @@ -import sys -from ctypes import * - -##class HMODULE(Structure): -## _fields_ = [("value", c_void_p)] - -## def __repr__(self): -## return "" % self.value - -##windll.kernel32.GetModuleHandleA.restype = HMODULE - -##print windll.kernel32.GetModuleHandleA("python23.dll") -##print hex(sys.dllhandle) - -##def nonzero(handle): -## return (GetLastError(), handle) - -##windll.kernel32.GetModuleHandleA.errcheck = nonzero -##print windll.kernel32.GetModuleHandleA("spam") diff --git a/lib-python/2.7/ctypes/test/test_find.py b/lib-python/2.7/ctypes/test/test_find.py --- a/lib-python/2.7/ctypes/test/test_find.py +++ b/lib-python/2.7/ctypes/test/test_find.py @@ -1,4 +1,5 @@ import unittest +import os import sys from ctypes import * from ctypes.util import find_library @@ -40,43 +41,43 @@ except OSError: pass - if lib_gl: - def test_gl(self): - if self.gl: - self.gl.glClearIndex + @unittest.skipUnless(lib_gl, 'lib_gl not available') + def test_gl(self): + if self.gl: + self.gl.glClearIndex - if lib_glu: - def test_glu(self): - if self.glu: - self.glu.gluBeginCurve + @unittest.skipUnless(lib_glu, 'lib_glu not available') + def test_glu(self): + if self.glu: + self.glu.gluBeginCurve - if lib_gle: - def test_gle(self): - if self.gle: - self.gle.gleGetJoinStyle + @unittest.skipUnless(lib_gle, 'lib_gle not available') + def test_gle(self): + if self.gle: + self.gle.gleGetJoinStyle -##if os.name == "posix" and sys.platform != "darwin": - -## # On platforms where the default shared library suffix is '.so', -## # at least some libraries can be loaded as attributes of the cdll -## # object, since ctypes now tries loading the lib again -## # with '.so' appended of the first try fails. -## # -## # Won't work for libc, unfortunately. OTOH, it isn't -## # needed for libc since this is already mapped into the current -## # process (?) -## # -## # On MAC OSX, it won't work either, because dlopen() needs a full path, -## # and the default suffix is either none or '.dylib'. - -## class LoadLibs(unittest.TestCase): -## def test_libm(self): -## import math -## libm = cdll.libm -## sqrt = libm.sqrt -## sqrt.argtypes = (c_double,) -## sqrt.restype = c_double -## self.assertEqual(sqrt(2), math.sqrt(2)) +# On platforms where the default shared library suffix is '.so', +# at least some libraries can be loaded as attributes of the cdll +# object, since ctypes now tries loading the lib again +# with '.so' appended of the first try fails. +# +# Won't work for libc, unfortunately. OTOH, it isn't +# needed for libc since this is already mapped into the current +# process (?) +# +# On MAC OSX, it won't work either, because dlopen() needs a full path, +# and the default suffix is either none or '.dylib'. + at unittest.skip('test disabled') + at unittest.skipUnless(os.name=="posix" and sys.platform != "darwin", + 'test not suitable for this platform') +class LoadLibs(unittest.TestCase): + def test_libm(self): + import math + libm = cdll.libm + sqrt = libm.sqrt + sqrt.argtypes = (c_double,) + sqrt.restype = c_double + self.assertEqual(sqrt(2), math.sqrt(2)) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_frombuffer.py b/lib-python/2.7/ctypes/test/test_frombuffer.py --- a/lib-python/2.7/ctypes/test/test_frombuffer.py +++ b/lib-python/2.7/ctypes/test/test_frombuffer.py @@ -25,7 +25,7 @@ a[0], a[-1] = 200, -200 self.assertEqual(x[:], a.tolist()) - self.assertTrue(a in x._objects.values()) + self.assertIn(a, x._objects.values()) self.assertRaises(ValueError, c_int.from_buffer, a, -1) diff --git a/lib-python/2.7/ctypes/test/test_funcptr.py b/lib-python/2.7/ctypes/test/test_funcptr.py --- a/lib-python/2.7/ctypes/test/test_funcptr.py +++ b/lib-python/2.7/ctypes/test/test_funcptr.py @@ -75,7 +75,7 @@ ## "lpfnWndProc", WNDPROC_2(wndproc)) # instead: - self.assertTrue(WNDPROC is WNDPROC_2) + self.assertIs(WNDPROC, WNDPROC_2) # 'wndclass.lpfnWndProc' leaks 94 references. Why? self.assertEqual(wndclass.lpfnWndProc(1, 2, 3, 4), 10) diff --git a/lib-python/2.7/ctypes/test/test_functions.py b/lib-python/2.7/ctypes/test/test_functions.py --- a/lib-python/2.7/ctypes/test/test_functions.py +++ b/lib-python/2.7/ctypes/test/test_functions.py @@ -6,6 +6,7 @@ """ from ctypes import * +from ctypes.test import need_symbol import sys, unittest from ctypes.test import xfail from test.test_support import impl_detail @@ -65,22 +66,16 @@ pass + @need_symbol('c_wchar') def test_wchar_parm(self): - try: - c_wchar - except NameError: - return f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double] result = f(1, u"x", 3, 4, 5.0, 6.0) self.assertEqual(result, 139) self.assertEqual(type(result), int) + @need_symbol('c_wchar') def test_wchar_result(self): - try: - c_wchar - except NameError: - return f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double] f.restype = c_wchar @@ -158,11 +153,8 @@ self.assertEqual(result, -21) self.assertEqual(type(result), float) + @need_symbol('c_longlong') def test_longlongresult(self): - try: - c_longlong - except NameError: - return f = dll._testfunc_q_bhilfd f.restype = c_longlong f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double] @@ -299,6 +291,7 @@ result = f(-10, cb) self.assertEqual(result, -18) + @need_symbol('c_longlong') def test_longlong_callbacks(self): f = dll._testfunc_callback_q_qf @@ -309,7 +302,7 @@ f.argtypes = [c_longlong, MyCallback] def callback(value): - self.assertTrue(isinstance(value, (int, long))) + self.assertIsInstance(value, (int, long)) return value & 0x7FFFFFFF cb = MyCallback(callback) @@ -351,16 +344,16 @@ s2h = dll.ret_2h_func(inp) self.assertEqual((s2h.x, s2h.y), (99*2, 88*3)) - if sys.platform == "win32": - def test_struct_return_2H_stdcall(self): - class S2H(Structure): - _fields_ = [("x", c_short), - ("y", c_short)] + @unittest.skipUnless(sys.platform == "win32", 'Windows-specific test') + def test_struct_return_2H_stdcall(self): + class S2H(Structure): + _fields_ = [("x", c_short), + ("y", c_short)] - windll.s_ret_2h_func.restype = S2H - windll.s_ret_2h_func.argtypes = [S2H] - s2h = windll.s_ret_2h_func(S2H(99, 88)) - self.assertEqual((s2h.x, s2h.y), (99*2, 88*3)) + windll.s_ret_2h_func.restype = S2H + windll.s_ret_2h_func.argtypes = [S2H] + s2h = windll.s_ret_2h_func(S2H(99, 88)) + self.assertEqual((s2h.x, s2h.y), (99*2, 88*3)) def test_struct_return_8H(self): class S8I(Structure): @@ -379,23 +372,24 @@ self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h), (9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9)) - if sys.platform == "win32": - def test_struct_return_8H_stdcall(self): - class S8I(Structure): - _fields_ = [("a", c_int), - ("b", c_int), - ("c", c_int), - ("d", c_int), - ("e", c_int), - ("f", c_int), - ("g", c_int), - ("h", c_int)] - windll.s_ret_8i_func.restype = S8I - windll.s_ret_8i_func.argtypes = [S8I] - inp = S8I(9, 8, 7, 6, 5, 4, 3, 2) - s8i = windll.s_ret_8i_func(inp) - self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h), - (9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9)) + @unittest.skipUnless(sys.platform == "win32", 'Windows-specific test') + def test_struct_return_8H_stdcall(self): + class S8I(Structure): + _fields_ = [("a", c_int), + ("b", c_int), + ("c", c_int), + ("d", c_int), + ("e", c_int), + ("f", c_int), + ("g", c_int), + ("h", c_int)] + windll.s_ret_8i_func.restype = S8I + windll.s_ret_8i_func.argtypes = [S8I] + inp = S8I(9, 8, 7, 6, 5, 4, 3, 2) + s8i = windll.s_ret_8i_func(inp) + self.assertEqual( + (s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h), + (9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9)) @xfail def test_sf1651235(self): diff --git a/lib-python/2.7/ctypes/test/test_integers.py b/lib-python/2.7/ctypes/test/test_integers.py deleted file mode 100644 --- a/lib-python/2.7/ctypes/test/test_integers.py +++ /dev/null @@ -1,5 +0,0 @@ -# superseded by test_numbers.py -import unittest - -if __name__ == '__main__': - unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_keeprefs.py b/lib-python/2.7/ctypes/test/test_keeprefs.py --- a/lib-python/2.7/ctypes/test/test_keeprefs.py +++ b/lib-python/2.7/ctypes/test/test_keeprefs.py @@ -94,7 +94,8 @@ self.assertEqual(x._objects, {'1': i}) class DeletePointerTestCase(unittest.TestCase): - def X_test(self): + @unittest.skip('test disabled') + def test_X(self): class X(Structure): _fields_ = [("p", POINTER(c_char_p))] x = X() diff --git a/lib-python/2.7/ctypes/test/test_loading.py b/lib-python/2.7/ctypes/test/test_loading.py --- a/lib-python/2.7/ctypes/test/test_loading.py +++ b/lib-python/2.7/ctypes/test/test_loading.py @@ -21,18 +21,21 @@ unknowndll = "xxrandomnamexx" - if libc_name is not None: - def test_load(self): - CDLL(libc_name) - CDLL(os.path.basename(libc_name)) - self.assertRaises(OSError, CDLL, self.unknowndll) + @unittest.skipUnless(libc_name is not None, 'could not find libc') + def test_load(self): + CDLL(libc_name) + CDLL(os.path.basename(libc_name)) + self.assertRaises(OSError, CDLL, self.unknowndll) - if libc_name is not None and os.path.basename(libc_name) == "libc.so.6": - def test_load_version(self): - cdll.LoadLibrary("libc.so.6") - # linux uses version, libc 9 should not exist - self.assertRaises(OSError, cdll.LoadLibrary, "libc.so.9") - self.assertRaises(OSError, cdll.LoadLibrary, self.unknowndll) + @unittest.skipUnless(libc_name is not None, 'could not find libc') + @unittest.skipUnless(libc_name is not None and + os.path.basename(libc_name) == "libc.so.6", + 'wrong libc path for test') + def test_load_version(self): + cdll.LoadLibrary("libc.so.6") + # linux uses version, libc 9 should not exist + self.assertRaises(OSError, cdll.LoadLibrary, "libc.so.9") + self.assertRaises(OSError, cdll.LoadLibrary, self.unknowndll) def test_find(self): for name in ("c", "m"): @@ -41,68 +44,73 @@ cdll.LoadLibrary(lib) CDLL(lib) - if os.name in ("nt", "ce"): - def test_load_library(self): - self.assertFalse(libc_name is None) - if is_resource_enabled("printing"): - print find_library("kernel32") - print find_library("user32") + @unittest.skipUnless(os.name in ("nt", "ce"), + 'test specific to Windows (NT/CE)') + def test_load_library(self): + self.assertIsNotNone(libc_name) + if is_resource_enabled("printing"): + print find_library("kernel32") + print find_library("user32") - if os.name == "nt": - windll.kernel32.GetModuleHandleW - windll["kernel32"].GetModuleHandleW - windll.LoadLibrary("kernel32").GetModuleHandleW - WinDLL("kernel32").GetModuleHandleW - elif os.name == "ce": - windll.coredll.GetModuleHandleW - windll["coredll"].GetModuleHandleW - windll.LoadLibrary("coredll").GetModuleHandleW - WinDLL("coredll").GetModuleHandleW + if os.name == "nt": + windll.kernel32.GetModuleHandleW + windll["kernel32"].GetModuleHandleW + windll.LoadLibrary("kernel32").GetModuleHandleW + WinDLL("kernel32").GetModuleHandleW + elif os.name == "ce": + windll.coredll.GetModuleHandleW + windll["coredll"].GetModuleHandleW + windll.LoadLibrary("coredll").GetModuleHandleW + WinDLL("coredll").GetModuleHandleW - def test_load_ordinal_functions(self): - import _ctypes_test - dll = WinDLL(_ctypes_test.__file__) - # We load the same function both via ordinal and name - func_ord = dll[2] - func_name = dll.GetString - # addressof gets the address where the function pointer is stored - a_ord = addressof(func_ord) - a_name = addressof(func_name) - f_ord_addr = c_void_p.from_address(a_ord).value - f_name_addr = c_void_p.from_address(a_name).value - self.assertEqual(hex(f_ord_addr), hex(f_name_addr)) + @unittest.skipUnless(os.name in ("nt", "ce"), + 'test specific to Windows (NT/CE)') + def test_load_ordinal_functions(self): + import _ctypes_test + dll = WinDLL(_ctypes_test.__file__) + # We load the same function both via ordinal and name + func_ord = dll[2] + func_name = dll.GetString + # addressof gets the address where the function pointer is stored + a_ord = addressof(func_ord) + a_name = addressof(func_name) + f_ord_addr = c_void_p.from_address(a_ord).value + f_name_addr = c_void_p.from_address(a_name).value + self.assertEqual(hex(f_ord_addr), hex(f_name_addr)) - self.assertRaises(AttributeError, dll.__getitem__, 1234) + self.assertRaises(AttributeError, dll.__getitem__, 1234) - if os.name == "nt": - @xfail - def test_1703286_A(self): - from _ctypes import LoadLibrary, FreeLibrary - # On winXP 64-bit, advapi32 loads at an address that does - # NOT fit into a 32-bit integer. FreeLibrary must be able - # to accept this address. + @xfail + @unittest.skipUnless(os.name == "nt", 'Windows-specific test') + def test_1703286_A(self): + from _ctypes import LoadLibrary, FreeLibrary + # On winXP 64-bit, advapi32 loads at an address that does + # NOT fit into a 32-bit integer. FreeLibrary must be able + # to accept this address. - # These are tests for http://www.python.org/sf/1703286 - handle = LoadLibrary("advapi32") - FreeLibrary(handle) + # These are tests for http://www.python.org/sf/1703286 + handle = LoadLibrary("advapi32") + FreeLibrary(handle) - @xfail - def test_1703286_B(self): - # Since on winXP 64-bit advapi32 loads like described - # above, the (arbitrarily selected) CloseEventLog function - # also has a high address. 'call_function' should accept - # addresses so large. - from _ctypes import call_function - advapi32 = windll.advapi32 - # Calling CloseEventLog with a NULL argument should fail, - # but the call should not segfault or so. - self.assertEqual(0, advapi32.CloseEventLog(None)) - windll.kernel32.GetProcAddress.argtypes = c_void_p, c_char_p - windll.kernel32.GetProcAddress.restype = c_void_p - proc = windll.kernel32.GetProcAddress(advapi32._handle, "CloseEventLog") - self.assertTrue(proc) - # This is the real test: call the function via 'call_function' - self.assertEqual(0, call_function(proc, (None,))) + @xfail + @unittest.skipUnless(os.name == "nt", 'Windows-specific test') + def test_1703286_B(self): + # Since on winXP 64-bit advapi32 loads like described + # above, the (arbitrarily selected) CloseEventLog function + # also has a high address. 'call_function' should accept + # addresses so large. + from _ctypes import call_function + advapi32 = windll.advapi32 + # Calling CloseEventLog with a NULL argument should fail, + # but the call should not segfault or so. + self.assertEqual(0, advapi32.CloseEventLog(None)) + windll.kernel32.GetProcAddress.argtypes = c_void_p, c_char_p + windll.kernel32.GetProcAddress.restype = c_void_p + proc = windll.kernel32.GetProcAddress(advapi32._handle, + "CloseEventLog") + self.assertTrue(proc) + # This is the real test: call the function via 'call_function' + self.assertEqual(0, call_function(proc, (None,))) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_macholib.py b/lib-python/2.7/ctypes/test/test_macholib.py --- a/lib-python/2.7/ctypes/test/test_macholib.py +++ b/lib-python/2.7/ctypes/test/test_macholib.py @@ -45,17 +45,21 @@ raise ValueError("%s not found" % (name,)) class MachOTest(unittest.TestCase): - if sys.platform == "darwin": - def test_find(self): + @unittest.skipUnless(sys.platform == "darwin", 'OSX-specific test') + def test_find(self): - self.assertEqual(find_lib('pthread'), - '/usr/lib/libSystem.B.dylib') + self.assertEqual(find_lib('pthread'), + '/usr/lib/libSystem.B.dylib') - result = find_lib('z') - self.assertTrue(result.endswith('.dylib')) + result = find_lib('z') + # Issue #21093: dyld default search path includes $HOME/lib and + # /usr/local/lib before /usr/lib, which caused test failures if + # a local copy of libz exists in one of them. Now ignore the head + # of the path. + self.assertRegexpMatches(result, r".*/lib/libz\..*.*\.dylib") - self.assertEqual(find_lib('IOKit'), - '/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit') + self.assertEqual(find_lib('IOKit'), + '/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit') if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_memfunctions.py b/lib-python/2.7/ctypes/test/test_memfunctions.py --- a/lib-python/2.7/ctypes/test/test_memfunctions.py +++ b/lib-python/2.7/ctypes/test/test_memfunctions.py @@ -1,17 +1,19 @@ import sys import unittest from ctypes import * +from ctypes.test import need_symbol class MemFunctionsTest(unittest.TestCase): -## def test_overflow(self): -## # string_at and wstring_at must use the Python calling -## # convention (which acquires the GIL and checks the Python -## # error flag). Provoke an error and catch it; see also issue -## # #3554: -## self.assertRaises((OverflowError, MemoryError, SystemError), -## lambda: wstring_at(u"foo", sys.maxint - 1)) -## self.assertRaises((OverflowError, MemoryError, SystemError), -## lambda: string_at("foo", sys.maxint - 1)) + @unittest.skip('test disabled') + def test_overflow(self): + # string_at and wstring_at must use the Python calling + # convention (which acquires the GIL and checks the Python + # error flag). Provoke an error and catch it; see also issue + # #3554: + self.assertRaises((OverflowError, MemoryError, SystemError), + lambda: wstring_at(u"foo", sys.maxint - 1)) + self.assertRaises((OverflowError, MemoryError, SystemError), + lambda: string_at("foo", sys.maxint - 1)) def test_memmove(self): # large buffers apparently increase the chance that the memory @@ -60,21 +62,17 @@ self.assertEqual(string_at("foo bar", 8), "foo bar\0") self.assertEqual(string_at("foo bar", 3), "foo") - try: - create_unicode_buffer - except NameError: - pass - else: - def test_wstring_at(self): - p = create_unicode_buffer("Hello, World") - a = create_unicode_buffer(1000000) - result = memmove(a, p, len(p) * sizeof(c_wchar)) - self.assertEqual(a.value, "Hello, World") + @need_symbol('create_unicode_buffer') + def test_wstring_at(self): + p = create_unicode_buffer("Hello, World") + a = create_unicode_buffer(1000000) + result = memmove(a, p, len(p) * sizeof(c_wchar)) + self.assertEqual(a.value, "Hello, World") - self.assertEqual(wstring_at(a), "Hello, World") - self.assertEqual(wstring_at(a, 5), "Hello") - self.assertEqual(wstring_at(a, 16), "Hello, World\0\0\0\0") - self.assertEqual(wstring_at(a, 0), "") + self.assertEqual(wstring_at(a), "Hello, World") + self.assertEqual(wstring_at(a, 5), "Hello") + self.assertEqual(wstring_at(a, 16), "Hello, World\0\0\0\0") + self.assertEqual(wstring_at(a, 0), "") if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -83,12 +83,13 @@ self.assertRaises(TypeError, t, "") self.assertRaises(TypeError, t, None) -## def test_valid_ranges(self): -## # invalid values of the correct type -## # raise ValueError (not OverflowError) -## for t, (l, h) in zip(unsigned_types, unsigned_ranges): -## self.assertRaises(ValueError, t, l-1) -## self.assertRaises(ValueError, t, h+1) + @unittest.skip('test disabled') + def test_valid_ranges(self): + # invalid values of the correct type + # raise ValueError (not OverflowError) + for t, (l, h) in zip(unsigned_types, unsigned_ranges): + self.assertRaises(ValueError, t, l-1) + self.assertRaises(ValueError, t, h+1) @xfail def test_from_param(self): @@ -185,10 +186,10 @@ a = array(t._type_, [3.14]) v = t.from_address(a.buffer_info()[0]) self.assertEqual(v.value, a[0]) - self.assertTrue(type(v) is t) + self.assertIs(type(v), t) a[0] = 2.3456e17 self.assertEqual(v.value, a[0]) - self.assertTrue(type(v) is t) + self.assertIs(type(v), t) def test_char_from_address(self): from ctypes import c_char @@ -197,22 +198,23 @@ a = array('c', 'x') v = c_char.from_address(a.buffer_info()[0]) self.assertEqual(v.value, a[0]) - self.assertTrue(type(v) is c_char) + self.assertIs(type(v), c_char) a[0] = '?' self.assertEqual(v.value, a[0]) # array does not support c_bool / 't' - # def test_bool_from_address(self): - # from ctypes import c_bool - # from array import array - # a = array(c_bool._type_, [True]) - # v = t.from_address(a.buffer_info()[0]) - # self.assertEqual(v.value, a[0]) - # self.assertEqual(type(v) is t) - # a[0] = False - # self.assertEqual(v.value, a[0]) - # self.assertEqual(type(v) is t) + @unittest.skip('test disabled') + def test_bool_from_address(self): + from ctypes import c_bool + from array import array + a = array(c_bool._type_, [True]) + v = t.from_address(a.buffer_info()[0]) + self.assertEqual(v.value, a[0]) + self.assertEqual(type(v) is t) + a[0] = False + self.assertEqual(v.value, a[0]) + self.assertEqual(type(v) is t) def test_init(self): # c_int() can be initialized from Python's int, and c_int. @@ -230,8 +232,9 @@ if (hasattr(t, "__ctype_le__")): self.assertRaises(OverflowError, t.__ctype_le__, big_int) -## def test_perf(self): -## check_perf() + @unittest.skip('test disabled') + def test_perf(self): + check_perf() from ctypes import _SimpleCData class c_int_S(_SimpleCData): diff --git a/lib-python/2.7/ctypes/test/test_objects.py b/lib-python/2.7/ctypes/test/test_objects.py --- a/lib-python/2.7/ctypes/test/test_objects.py +++ b/lib-python/2.7/ctypes/test/test_objects.py @@ -59,12 +59,9 @@ import ctypes.test.test_objects class TestCase(unittest.TestCase): - if sys.hexversion > 0x02040000: - # Python 2.3 has no ELLIPSIS flag, so we don't test with this - # version: - def test(self): - doctest.testmod(ctypes.test.test_objects) + def test(self): + failures, tests = doctest.testmod(ctypes.test.test_objects) + self.assertFalse(failures, 'doctests failed, see output above') if __name__ == '__main__': - if sys.hexversion > 0x02040000: - doctest.testmod(ctypes.test.test_objects) + doctest.testmod(ctypes.test.test_objects) diff --git a/lib-python/2.7/ctypes/test/test_parameters.py b/lib-python/2.7/ctypes/test/test_parameters.py --- a/lib-python/2.7/ctypes/test/test_parameters.py +++ b/lib-python/2.7/ctypes/test/test_parameters.py @@ -1,4 +1,5 @@ import unittest, sys +from ctypes.test import need_symbol from ctypes.test import xfail @@ -38,10 +39,9 @@ self.assertEqual(CVOIDP.from_param("abc"), "abcabc") self.assertEqual(CCHARP.from_param("abc"), "abcabcabcabc") - try: - from ctypes import c_wchar_p - except ImportError: - return + @need_symbol('c_wchar_p') + def test_subclasses_c_wchar_p(self): + from ctypes import c_wchar_p class CWCHARP(c_wchar_p): def from_param(cls, value): @@ -58,7 +58,7 @@ # c_char_p.from_param on a Python String packs the string # into a cparam object s = "123" - self.assertTrue(c_char_p.from_param(s)._obj is s) + self.assertIs(c_char_p.from_param(s)._obj, s) # new in 0.9.1: convert (encode) unicode to ascii self.assertEqual(c_char_p.from_param(u"123")._obj, "123") @@ -69,15 +69,11 @@ # calling c_char_p.from_param with a c_char_p instance # returns the argument itself: a = c_char_p("123") - self.assertTrue(c_char_p.from_param(a) is a) + self.assertIs(c_char_p.from_param(a), a) + @need_symbol('c_wchar_p') def test_cw_strings(self): - from ctypes import byref - try: - from ctypes import c_wchar_p - except ImportError: -## print "(No c_wchar_p)" - return + from ctypes import byref, c_wchar_p s = u"123" if sys.platform == "win32": self.assertTrue(c_wchar_p.from_param(s)._obj is s) @@ -150,9 +146,6 @@ self.assertRaises(TypeError, LPINT.from_param, c_long*3) self.assertRaises(TypeError, LPINT.from_param, c_uint*3) -## def test_performance(self): -## check_perf() - def test_noctypes_argtype(self): import _ctypes_test from ctypes import CDLL, c_void_p, ArgumentError diff --git a/lib-python/2.7/ctypes/test/test_pep3118.py b/lib-python/2.7/ctypes/test/test_pep3118.py --- a/lib-python/2.7/ctypes/test/test_pep3118.py +++ b/lib-python/2.7/ctypes/test/test_pep3118.py @@ -95,6 +95,10 @@ class aUnion(Union): _fields_ = [("a", c_int)] +class StructWithArrays(Structure): + _fields_ = [("x", c_long * 3 * 2), ("y", Point * 4)] + + class Incomplete(Structure): pass @@ -144,10 +148,10 @@ ## arrays and pointers - (c_double * 4, "(4) Author: Philip Jenvey Branch: py3k Changeset: r73128:e8ec916b9a16 Date: 2014-08-28 15:52 -0700 http://bitbucket.org/pypy/pypy/changeset/e8ec916b9a16/ Log: adapt to py3 diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py --- a/lib_pypy/_tkinter/app.py +++ b/lib_pypy/_tkinter/app.py @@ -450,8 +450,7 @@ def getint(self, s): if isinstance(s, int): return s - if isinstance(s, unicode): - s = str(s) + s = s.encode('utf-8') if '\x00' in s: raise TypeError v = tkffi.new("int*") @@ -463,8 +462,7 @@ def getdouble(self, s): if isinstance(s, float): return s - if isinstance(s, unicode): - s = str(s) + s = s.encode('utf-8') if '\x00' in s: raise TypeError v = tkffi.new("double*") diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py --- a/lib_pypy/_tkinter/tclobj.py +++ b/lib_pypy/_tkinter/tclobj.py @@ -14,19 +14,14 @@ def FromTclString(s): - # If the result contains any bytes with the top bit set, it's - # UTF-8 and we should decode it to Unicode. try: - s.decode('ascii') + return s.decode('utf8') except UnicodeDecodeError: + # Tcl encodes null character as \xc0\x80 try: - return s.decode('utf8') + return s.replace('\xc0\x80', '\x00').decode('utf-8') except UnicodeDecodeError: - # Tcl encodes null character as \xc0\x80 - try: - return s.replace('\xc0\x80', '\x00').decode('utf-8') - except UnicodeDecodeError: - pass + pass return s From noreply at buildbot.pypy.org Fri Aug 29 01:15:19 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 29 Aug 2014 01:15:19 +0200 (CEST) Subject: [pypy-commit] pypy default: More docstring Message-ID: <20140828231519.051B81C3CCC@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r73129:14b8d76b9d53 Date: 2014-08-28 16:14 -0700 http://bitbucket.org/pypy/pypy/changeset/14b8d76b9d53/ Log: More docstring diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -818,6 +818,9 @@ Method.typedef = TypeDef( "method", + __doc__ = """instancemethod(function, instance, class) + +Create an instance method object.""", __new__ = interp2app(Method.descr_method__new__.im_func), __call__ = interp2app(Method.descr_method_call), __get__ = interp2app(Method.descr_method_get), From noreply at buildbot.pypy.org Fri Aug 29 01:46:00 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 01:46:00 +0200 (CEST) Subject: [pypy-commit] pypy default: docstring for Method Message-ID: <20140828234600.C0FFE1C000D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73130:45d3cb1cdc8c Date: 2014-08-28 19:44 -0400 http://bitbucket.org/pypy/pypy/changeset/45d3cb1cdc8c/ Log: docstring for Method diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -379,6 +379,7 @@ assert bm.im_class is B assert bm.__doc__ == "aaa" assert bm.x == 3 + assert type(bm).__doc__ == "instancemethod(function, instance, class)\n\nCreate an instance method object." raises(AttributeError, setattr, bm, 'x', 15) l = [] assert l.append.__self__ is l diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -833,6 +833,7 @@ __repr__ = interp2app(Method.descr_method_repr), __reduce__ = interp2app(Method.descr_method__reduce__), __weakref__ = make_weakref_descr(Method), + __doc__ = """instancemethod(function, instance, class)\n\nCreate an instance method object.""" ) Method.typedef.acceptable_as_base_class = False From noreply at buildbot.pypy.org Fri Aug 29 01:46:02 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 01:46:02 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140828234602.011201C000D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73131:fc874a203461 Date: 2014-08-28 19:45 -0400 http://bitbucket.org/pypy/pypy/changeset/fc874a203461/ Log: merge heads diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -818,6 +818,9 @@ Method.typedef = TypeDef( "method", + __doc__ = """instancemethod(function, instance, class) + +Create an instance method object.""", __new__ = interp2app(Method.descr_method__new__.im_func), __call__ = interp2app(Method.descr_method_call), __get__ = interp2app(Method.descr_method_get), @@ -833,7 +836,6 @@ __repr__ = interp2app(Method.descr_method_repr), __reduce__ = interp2app(Method.descr_method__reduce__), __weakref__ = make_weakref_descr(Method), - __doc__ = """instancemethod(function, instance, class)\n\nCreate an instance method object.""" ) Method.typedef.acceptable_as_base_class = False From noreply at buildbot.pypy.org Fri Aug 29 03:21:43 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 03:21:43 +0200 (CEST) Subject: [pypy-commit] pypy default: test/fix rfile.truncate Message-ID: <20140829012143.A84FC1D3551@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73132:c337d9417d2e Date: 2014-08-28 21:20 -0400 http://bitbucket.org/pypy/pypy/changeset/c337d9417d2e/ Log: test/fix rfile.truncate diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -233,6 +233,7 @@ if self.ll_file: if arg == -1: arg = self.tell() + self.flush() res = c_ftruncate(self.fileno(), arg) if res == -1: errno = rposix.get_errno() diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -140,14 +140,14 @@ fname = str(self.tmpdir.join('file_trunc')) def f(): - f = open(fname, "w") - f.write("xyz") + f = open(fname, "w+b") + f.write("hello world") + f.seek(7) + f.truncate() f.seek(0) - f.truncate(2) + data = f.read() + assert data == "hello w" f.close() - f2 = open(fname) - assert f2.read() == "xy" - f2.close() f() self.interpret(f, []) From noreply at buildbot.pypy.org Fri Aug 29 03:21:51 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 03:21:51 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: merge default Message-ID: <20140829012151.6C7041D3551@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73133:78976787103e Date: 2014-08-28 21:21 -0400 http://bitbucket.org/pypy/pypy/changeset/78976787103e/ Log: merge default diff too long, truncating to 2000 out of 48735 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -354,6 +354,6 @@ See the License for the specific language governing permissions and limitations under the License. -Detailled license information is contained in the NOTICE file in the +Detailed license information is contained in the NOTICE file in the directory. diff --git a/lib-python/2.7/CGIHTTPServer.py b/lib-python/2.7/CGIHTTPServer.py --- a/lib-python/2.7/CGIHTTPServer.py +++ b/lib-python/2.7/CGIHTTPServer.py @@ -84,7 +84,7 @@ path begins with one of the strings in self.cgi_directories (and the next character is a '/' or the end of the string). """ - collapsed_path = _url_collapse_path(self.path) + collapsed_path = _url_collapse_path(urllib.unquote(self.path)) dir_sep = collapsed_path.find('/', 1) head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] if head in self.cgi_directories: diff --git a/lib-python/2.7/Cookie.py b/lib-python/2.7/Cookie.py --- a/lib-python/2.7/Cookie.py +++ b/lib-python/2.7/Cookie.py @@ -1,6 +1,3 @@ -#!/usr/bin/env python -# - #### # Copyright 2000 by Timothy O'Malley # diff --git a/lib-python/2.7/HTMLParser.py b/lib-python/2.7/HTMLParser.py --- a/lib-python/2.7/HTMLParser.py +++ b/lib-python/2.7/HTMLParser.py @@ -22,9 +22,12 @@ starttagopen = re.compile('<[a-zA-Z]') piclose = re.compile('>') commentclose = re.compile(r'--\s*>') -tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*') + # see http://www.w3.org/TR/html5/tokenization.html#tag-open-state # and http://www.w3.org/TR/html5/tokenization.html#tag-name-state +# note: if you change tagfind/attrfind remember to update locatestarttagend too +tagfind = re.compile('([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*') +# this regex is currently unused, but left for backward compatibility tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*') attrfind = re.compile( @@ -32,7 +35,7 @@ r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*') locatestarttagend = re.compile(r""" - <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name + <[a-zA-Z][^\t\n\r\f />\x00]* # tag name (?:[\s/]* # optional whitespace before attribute name (?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name (?:\s*=+\s* # value indicator @@ -192,9 +195,9 @@ i = self.updatepos(i, k) continue else: - if ";" in rawdata[i:]: #bail by consuming &# - self.handle_data(rawdata[0:2]) - i = self.updatepos(i, 2) + if ";" in rawdata[i:]: # bail by consuming '&#' + self.handle_data(rawdata[i:i+2]) + i = self.updatepos(i, i+2) break elif startswith('&', i): match = entityref.match(rawdata, i) @@ -373,14 +376,14 @@ self.handle_data(rawdata[i:gtpos]) return gtpos # find the name: w3.org/TR/html5/tokenization.html#tag-name-state - namematch = tagfind_tolerant.match(rawdata, i+2) + namematch = tagfind.match(rawdata, i+2) if not namematch: # w3.org/TR/html5/tokenization.html#end-tag-open-state if rawdata[i:i+3] == '': return i+3 else: return self.parse_bogus_comment(i) - tagname = namematch.group().lower() + tagname = namematch.group(1).lower() # consume and ignore other stuff between the name and the > # Note: this is not 100% correct, since we might have things like # , but looking for > after tha name should cover diff --git a/lib-python/2.7/SimpleHTTPServer.py b/lib-python/2.7/SimpleHTTPServer.py --- a/lib-python/2.7/SimpleHTTPServer.py +++ b/lib-python/2.7/SimpleHTTPServer.py @@ -43,8 +43,10 @@ """Serve a GET request.""" f = self.send_head() if f: - self.copyfile(f, self.wfile) - f.close() + try: + self.copyfile(f, self.wfile) + finally: + f.close() def do_HEAD(self): """Serve a HEAD request.""" @@ -88,13 +90,17 @@ except IOError: self.send_error(404, "File not found") return None - self.send_response(200) - self.send_header("Content-type", ctype) - fs = os.fstat(f.fileno()) - self.send_header("Content-Length", str(fs[6])) - self.send_header("Last-Modified", self.date_time_string(fs.st_mtime)) - self.end_headers() - return f + try: + self.send_response(200) + self.send_header("Content-type", ctype) + fs = os.fstat(f.fileno()) + self.send_header("Content-Length", str(fs[6])) + self.send_header("Last-Modified", self.date_time_string(fs.st_mtime)) + self.end_headers() + return f + except: + f.close() + raise def list_directory(self, path): """Helper to produce a directory listing (absent index.html). diff --git a/lib-python/2.7/SimpleXMLRPCServer.py b/lib-python/2.7/SimpleXMLRPCServer.py --- a/lib-python/2.7/SimpleXMLRPCServer.py +++ b/lib-python/2.7/SimpleXMLRPCServer.py @@ -704,4 +704,5 @@ server = SimpleXMLRPCServer(("localhost", 8000)) server.register_function(pow) server.register_function(lambda x,y: x+y, 'add') + server.register_multicall_functions() server.serve_forever() diff --git a/lib-python/2.7/SocketServer.py b/lib-python/2.7/SocketServer.py --- a/lib-python/2.7/SocketServer.py +++ b/lib-python/2.7/SocketServer.py @@ -513,35 +513,37 @@ def collect_children(self): """Internal routine to wait for children that have exited.""" - if self.active_children is None: return + if self.active_children is None: + return + + # If we're above the max number of children, wait and reap them until + # we go back below threshold. Note that we use waitpid(-1) below to be + # able to collect children in size() syscalls instead + # of size(): the downside is that this might reap children + # which we didn't spawn, which is why we only resort to this when we're + # above max_children. while len(self.active_children) >= self.max_children: - # XXX: This will wait for any child process, not just ones - # spawned by this library. This could confuse other - # libraries that expect to be able to wait for their own - # children. try: - pid, status = os.waitpid(0, 0) - except os.error: - pid = None - if pid not in self.active_children: continue - self.active_children.remove(pid) + pid, _ = os.waitpid(-1, 0) + self.active_children.discard(pid) + except OSError as e: + if e.errno == errno.ECHILD: + # we don't have any children, we're done + self.active_children.clear() + elif e.errno != errno.EINTR: + break - # XXX: This loop runs more system calls than it ought - # to. There should be a way to put the active_children into a - # process group and then use os.waitpid(-pgid) to wait for any - # of that set, but I couldn't find a way to allocate pgids - # that couldn't collide. - for child in self.active_children: + # Now reap all defunct children. + for pid in self.active_children.copy(): try: - pid, status = os.waitpid(child, os.WNOHANG) - except os.error: - pid = None - if not pid: continue - try: - self.active_children.remove(pid) - except ValueError, e: - raise ValueError('%s. x=%d and list=%r' % (e.message, pid, - self.active_children)) + pid, _ = os.waitpid(pid, os.WNOHANG) + # if the child hasn't exited yet, pid will be 0 and ignored by + # discard() below + self.active_children.discard(pid) + except OSError as e: + if e.errno == errno.ECHILD: + # someone else reaped it + self.active_children.discard(pid) def handle_timeout(self): """Wait for zombies after self.timeout seconds of inactivity. @@ -557,8 +559,8 @@ if pid: # Parent process if self.active_children is None: - self.active_children = [] - self.active_children.append(pid) + self.active_children = set() + self.active_children.add(pid) self.close_request(request) #close handle in parent process return else: diff --git a/lib-python/2.7/_MozillaCookieJar.py b/lib-python/2.7/_MozillaCookieJar.py --- a/lib-python/2.7/_MozillaCookieJar.py +++ b/lib-python/2.7/_MozillaCookieJar.py @@ -39,7 +39,7 @@ magic_re = "#( Netscape)? HTTP Cookie File" header = """\ # Netscape HTTP Cookie File -# http://www.netscape.com/newsref/std/cookie_spec.html +# http://curl.haxx.se/rfc/cookie_spec.html # This is a generated file! Do not edit. """ diff --git a/lib-python/2.7/_abcoll.py b/lib-python/2.7/_abcoll.py --- a/lib-python/2.7/_abcoll.py +++ b/lib-python/2.7/_abcoll.py @@ -165,12 +165,17 @@ def __gt__(self, other): if not isinstance(other, Set): return NotImplemented - return other < self + return len(self) > len(other) and self.__ge__(other) def __ge__(self, other): if not isinstance(other, Set): return NotImplemented - return other <= self + if len(self) < len(other): + return False + for elem in other: + if elem not in self: + return False + return True def __eq__(self, other): if not isinstance(other, Set): @@ -194,6 +199,8 @@ return NotImplemented return self._from_iterable(value for value in other if value in self) + __rand__ = __and__ + def isdisjoint(self, other): 'Return True if two sets have a null intersection.' for value in other: @@ -207,6 +214,8 @@ chain = (e for s in (self, other) for e in s) return self._from_iterable(chain) + __ror__ = __or__ + def __sub__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): @@ -215,6 +224,14 @@ return self._from_iterable(value for value in self if value not in other) + def __rsub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in other + if value not in self) + def __xor__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): @@ -222,6 +239,8 @@ other = self._from_iterable(other) return (self - other) | (other - self) + __rxor__ = __xor__ + # Sets are not hashable by default, but subclasses can change this __hash__ = None diff --git a/lib-python/2.7/_osx_support.py b/lib-python/2.7/_osx_support.py --- a/lib-python/2.7/_osx_support.py +++ b/lib-python/2.7/_osx_support.py @@ -182,7 +182,7 @@ # Compiler is GCC, check if it is LLVM-GCC data = _read_output("'%s' --version" % (cc.replace("'", "'\"'\"'"),)) - if 'llvm-gcc' in data: + if data and 'llvm-gcc' in data: # Found LLVM-GCC, fall back to clang cc = _find_build_tool('clang') @@ -450,8 +450,16 @@ # case and disallow installs. cflags = _config_vars.get(_INITPRE+'CFLAGS', _config_vars.get('CFLAGS', '')) - if ((macrelease + '.') >= '10.4.' and - '-arch' in cflags.strip()): + if macrelease: + try: + macrelease = tuple(int(i) for i in macrelease.split('.')[0:2]) + except ValueError: + macrelease = (10, 0) + else: + # assume no universal support + macrelease = (10, 0) + + if (macrelease >= (10, 4)) and '-arch' in cflags.strip(): # The universal build will build fat binaries, but not on # systems before 10.4 diff --git a/lib-python/2.7/_pyio.py b/lib-python/2.7/_pyio.py --- a/lib-python/2.7/_pyio.py +++ b/lib-python/2.7/_pyio.py @@ -192,38 +192,45 @@ (appending and "a" or "") + (updating and "+" or ""), closefd) - line_buffering = False - if buffering == 1 or buffering < 0 and raw.isatty(): - buffering = -1 - line_buffering = True - if buffering < 0: - buffering = DEFAULT_BUFFER_SIZE - try: - bs = os.fstat(raw.fileno()).st_blksize - except (os.error, AttributeError): - pass + result = raw + try: + line_buffering = False + if buffering == 1 or buffering < 0 and raw.isatty(): + buffering = -1 + line_buffering = True + if buffering < 0: + buffering = DEFAULT_BUFFER_SIZE + try: + bs = os.fstat(raw.fileno()).st_blksize + except (os.error, AttributeError): + pass + else: + if bs > 1: + buffering = bs + if buffering < 0: + raise ValueError("invalid buffering size") + if buffering == 0: + if binary: + return result + raise ValueError("can't have unbuffered text I/O") + if updating: + buffer = BufferedRandom(raw, buffering) + elif writing or appending: + buffer = BufferedWriter(raw, buffering) + elif reading: + buffer = BufferedReader(raw, buffering) else: - if bs > 1: - buffering = bs - if buffering < 0: - raise ValueError("invalid buffering size") - if buffering == 0: + raise ValueError("unknown mode: %r" % mode) + result = buffer if binary: - return raw - raise ValueError("can't have unbuffered text I/O") - if updating: - buffer = BufferedRandom(raw, buffering) - elif writing or appending: - buffer = BufferedWriter(raw, buffering) - elif reading: - buffer = BufferedReader(raw, buffering) - else: - raise ValueError("unknown mode: %r" % mode) - if binary: - return buffer - text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering) - text.mode = mode - return text + return result + text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering) + result = text + text.mode = mode + return result + except: + result.close() + raise class DocDescriptor: @@ -1997,7 +2004,13 @@ def getvalue(self): self.flush() - return self.buffer.getvalue().decode(self._encoding, self._errors) + decoder = self._decoder or self._get_decoder() + old_state = decoder.getstate() + decoder.reset() + try: + return decoder.decode(self.buffer.getvalue(), final=True) + finally: + decoder.setstate(old_state) def __repr__(self): # TextIOWrapper tells the encoding in its repr. In StringIO, diff --git a/lib-python/2.7/_weakrefset.py b/lib-python/2.7/_weakrefset.py --- a/lib-python/2.7/_weakrefset.py +++ b/lib-python/2.7/_weakrefset.py @@ -60,6 +60,8 @@ for itemref in self.data: item = itemref() if item is not None: + # Caveat: the iterator will keep a strong reference to + # `item` until it is resumed or closed. yield item def __len__(self): diff --git a/lib-python/2.7/aifc.py b/lib-python/2.7/aifc.py --- a/lib-python/2.7/aifc.py +++ b/lib-python/2.7/aifc.py @@ -778,7 +778,7 @@ def _ensure_header_written(self, datasize): if not self._nframeswritten: - if self._comptype in ('ULAW', 'ALAW'): + if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw'): if not self._sampwidth: self._sampwidth = 2 if self._sampwidth != 2: @@ -844,7 +844,7 @@ if self._datalength & 1: self._datalength = self._datalength + 1 if self._aifc: - if self._comptype in ('ULAW', 'ALAW'): + if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw'): self._datalength = self._datalength // 2 if self._datalength & 1: self._datalength = self._datalength + 1 @@ -852,7 +852,10 @@ self._datalength = (self._datalength + 3) // 4 if self._datalength & 1: self._datalength = self._datalength + 1 - self._form_length_pos = self._file.tell() + try: + self._form_length_pos = self._file.tell() + except (AttributeError, IOError): + self._form_length_pos = None commlength = self._write_form_length(self._datalength) if self._aifc: self._file.write('AIFC') @@ -864,7 +867,8 @@ self._file.write('COMM') _write_ulong(self._file, commlength) _write_short(self._file, self._nchannels) - self._nframes_pos = self._file.tell() + if self._form_length_pos is not None: + self._nframes_pos = self._file.tell() _write_ulong(self._file, self._nframes) if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'): _write_short(self._file, 8) @@ -875,7 +879,8 @@ self._file.write(self._comptype) _write_string(self._file, self._compname) self._file.write('SSND') - self._ssnd_length_pos = self._file.tell() + if self._form_length_pos is not None: + self._ssnd_length_pos = self._file.tell() _write_ulong(self._file, self._datalength + 8) _write_ulong(self._file, 0) _write_ulong(self._file, 0) diff --git a/lib-python/2.7/argparse.py b/lib-python/2.7/argparse.py --- a/lib-python/2.7/argparse.py +++ b/lib-python/2.7/argparse.py @@ -168,6 +168,8 @@ self._prog = prog self._indent_increment = indent_increment self._max_help_position = max_help_position + self._max_help_position = min(max_help_position, + max(width - 20, indent_increment * 2)) self._width = width self._current_indent = 0 @@ -339,7 +341,7 @@ else: line_len = len(indent) - 1 for part in parts: - if line_len + 1 + len(part) > text_width: + if line_len + 1 + len(part) > text_width and line: lines.append(indent + ' '.join(line)) line = [] line_len = len(indent) - 1 @@ -478,7 +480,7 @@ def _format_text(self, text): if '%(prog)' in text: text = text % dict(prog=self._prog) - text_width = self._width - self._current_indent + text_width = max(self._width - self._current_indent, 11) indent = ' ' * self._current_indent return self._fill_text(text, text_width, indent) + '\n\n' @@ -486,7 +488,7 @@ # determine the required width and the entry label help_position = min(self._action_max_length + 2, self._max_help_position) - help_width = self._width - help_position + help_width = max(self._width - help_position, 11) action_width = help_position - self._current_indent - 2 action_header = self._format_action_invocation(action) @@ -1155,9 +1157,13 @@ __hash__ = None def __eq__(self, other): + if not isinstance(other, Namespace): + return NotImplemented return vars(self) == vars(other) def __ne__(self, other): + if not isinstance(other, Namespace): + return NotImplemented return not (self == other) def __contains__(self, key): diff --git a/lib-python/2.7/bsddb/dbshelve.py b/lib-python/2.7/bsddb/dbshelve.py --- a/lib-python/2.7/bsddb/dbshelve.py +++ b/lib-python/2.7/bsddb/dbshelve.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python #------------------------------------------------------------------------ # Copyright (c) 1997-2001 by Total Control Software # All Rights Reserved diff --git a/lib-python/2.7/bsddb/test/test_dbtables.py b/lib-python/2.7/bsddb/test/test_dbtables.py --- a/lib-python/2.7/bsddb/test/test_dbtables.py +++ b/lib-python/2.7/bsddb/test/test_dbtables.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python -# #----------------------------------------------------------------------- # A test suite for the table interface built on bsddb.db #----------------------------------------------------------------------- diff --git a/lib-python/2.7/codecs.py b/lib-python/2.7/codecs.py --- a/lib-python/2.7/codecs.py +++ b/lib-python/2.7/codecs.py @@ -456,15 +456,12 @@ # read until we get the required number of characters (if available) while True: - # can the request can be satisfied from the character buffer? - if chars < 0: - if size < 0: - if self.charbuffer: - break - elif len(self.charbuffer) >= size: + # can the request be satisfied from the character buffer? + if chars >= 0: + if len(self.charbuffer) >= chars: break - else: - if len(self.charbuffer) >= chars: + elif size >= 0: + if len(self.charbuffer) >= size: break # we need more data if size < 0: diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py --- a/lib-python/2.7/collections.py +++ b/lib-python/2.7/collections.py @@ -319,6 +319,7 @@ if isinstance(field_names, basestring): field_names = field_names.replace(',', ' ').split() field_names = map(str, field_names) + typename = str(typename) if rename: seen = set() for index, name in enumerate(field_names): @@ -331,6 +332,8 @@ field_names[index] = '_%d' % index seen.add(name) for name in [typename] + field_names: + if type(name) != str: + raise TypeError('Type names and field names must be strings') if not all(c.isalnum() or c=='_' for c in name): raise ValueError('Type names and field names can only contain ' 'alphanumeric characters and underscores: %r' % name) diff --git a/lib-python/2.7/csv.py b/lib-python/2.7/csv.py --- a/lib-python/2.7/csv.py +++ b/lib-python/2.7/csv.py @@ -93,6 +93,10 @@ self.line_num = self.reader.line_num return self._fieldnames + # Issue 20004: Because DictReader is a classic class, this setter is + # ignored. At this point in 2.7's lifecycle, it is too late to change the + # base class for fear of breaking working code. If you want to change + # fieldnames without overwriting the getter, set _fieldnames directly. @fieldnames.setter def fieldnames(self, value): self._fieldnames = value @@ -140,8 +144,8 @@ if self.extrasaction == "raise": wrong_fields = [k for k in rowdict if k not in self.fieldnames] if wrong_fields: - raise ValueError("dict contains fields not in fieldnames: " + - ", ".join(wrong_fields)) + raise ValueError("dict contains fields not in fieldnames: " + + ", ".join([repr(x) for x in wrong_fields])) return [rowdict.get(key, self.restval) for key in self.fieldnames] def writerow(self, rowdict): diff --git a/lib-python/2.7/ctypes/test/__init__.py b/lib-python/2.7/ctypes/test/__init__.py --- a/lib-python/2.7/ctypes/test/__init__.py +++ b/lib-python/2.7/ctypes/test/__init__.py @@ -2,7 +2,15 @@ use_resources = [] -class ResourceDenied(Exception): +import ctypes +ctypes_symbols = dir(ctypes) + +def need_symbol(name): + return unittest.skipUnless(name in ctypes_symbols, + '{!r} is required'.format(name)) + + +class ResourceDenied(unittest.SkipTest): """Test skipped because it requested a disallowed resource. This is raised when a test calls requires() for a resource that diff --git a/lib-python/2.7/ctypes/test/test_arrays.py b/lib-python/2.7/ctypes/test/test_arrays.py --- a/lib-python/2.7/ctypes/test/test_arrays.py +++ b/lib-python/2.7/ctypes/test/test_arrays.py @@ -2,6 +2,8 @@ from ctypes import * from test.test_support import impl_detail +from ctypes.test import need_symbol + formats = "bBhHiIlLqQfd" # c_longdouble commented out for PyPy, look at the commend in test_longdouble @@ -98,8 +100,8 @@ self.assertEqual(values, [1, 2, 3, 4, 5]) def test_classcache(self): - self.assertTrue(not ARRAY(c_int, 3) is ARRAY(c_int, 4)) - self.assertTrue(ARRAY(c_int, 3) is ARRAY(c_int, 3)) + self.assertIsNot(ARRAY(c_int, 3), ARRAY(c_int, 4)) + self.assertIs(ARRAY(c_int, 3), ARRAY(c_int, 3)) def test_from_address(self): # Failed with 0.9.8, reported by JUrner @@ -112,20 +114,16 @@ self.assertEqual(sz[1:4:2], "o") self.assertEqual(sz.value, "foo") - try: - create_unicode_buffer - except NameError: - pass - else: - def test_from_addressW(self): - p = create_unicode_buffer("foo") - sz = (c_wchar * 3).from_address(addressof(p)) - self.assertEqual(sz[:], "foo") - self.assertEqual(sz[::], "foo") - self.assertEqual(sz[::-1], "oof") - self.assertEqual(sz[::3], "f") - self.assertEqual(sz[1:4:2], "o") - self.assertEqual(sz.value, "foo") + @need_symbol('create_unicode_buffer') + def test_from_addressW(self): + p = create_unicode_buffer("foo") + sz = (c_wchar * 3).from_address(addressof(p)) + self.assertEqual(sz[:], "foo") + self.assertEqual(sz[::], "foo") + self.assertEqual(sz[::-1], "oof") + self.assertEqual(sz[::3], "f") + self.assertEqual(sz[1:4:2], "o") + self.assertEqual(sz.value, "foo") def test_cache(self): # Array types are cached internally in the _ctypes extension, @@ -139,7 +137,7 @@ # Create a new array type based on it: t1 = my_int * 1 t2 = my_int * 1 - self.assertTrue(t1 is t2) + self.assertIs(t1, t2) if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_as_parameter.py b/lib-python/2.7/ctypes/test/test_as_parameter.py --- a/lib-python/2.7/ctypes/test/test_as_parameter.py +++ b/lib-python/2.7/ctypes/test/test_as_parameter.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import need_symbol import _ctypes_test dll = CDLL(_ctypes_test.__file__) @@ -17,11 +18,8 @@ def wrap(self, param): return param + @need_symbol('c_wchar') def test_wchar_parm(self): - try: - c_wchar - except NameError: - return f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double] result = f(self.wrap(1), self.wrap(u"x"), self.wrap(3), self.wrap(4), self.wrap(5.0), self.wrap(6.0)) @@ -134,7 +132,7 @@ f.argtypes = [c_longlong, MyCallback] def callback(value): - self.assertTrue(isinstance(value, (int, long))) + self.assertIsInstance(value, (int, long)) return value & 0x7FFFFFFF cb = MyCallback(callback) diff --git a/lib-python/2.7/ctypes/test/test_bitfields.py b/lib-python/2.7/ctypes/test/test_bitfields.py --- a/lib-python/2.7/ctypes/test/test_bitfields.py +++ b/lib-python/2.7/ctypes/test/test_bitfields.py @@ -1,4 +1,5 @@ from ctypes import * +from ctypes.test import need_symbol import unittest import os @@ -131,15 +132,6 @@ self.assertEqual(result[0], TypeError) self.assertIn('bit fields not allowed for type', result[1]) - try: - c_wchar - except NameError: - pass - else: - result = self.fail_fields(("a", c_wchar, 1)) - self.assertEqual(result[0], TypeError) - self.assertIn('bit fields not allowed for type', result[1]) - class Dummy(Structure): _fields_ = [] @@ -147,6 +139,12 @@ self.assertEqual(result[0], TypeError) self.assertIn('bit fields not allowed for type', result[1]) + @need_symbol('c_wchar') + def test_c_wchar(self): + result = self.fail_fields(("a", c_wchar, 1)) + self.assertEqual(result, + (TypeError, 'bit fields not allowed for type c_wchar')) + def test_single_bitfield_size(self): for c_typ in int_types: result = self.fail_fields(("a", c_typ, -1)) @@ -213,7 +211,7 @@ class X(Structure): _fields_ = [("a", c_byte, 4), ("b", c_int, 32)] - self.assertEqual(sizeof(X), sizeof(c_int)*2) + self.assertEqual(sizeof(X), alignment(c_int)+sizeof(c_int)) def test_mixed_3(self): class X(Structure): @@ -246,7 +244,7 @@ _anonymous_ = ["_"] _fields_ = [("_", X)] - @unittest.skipUnless(hasattr(ctypes, "c_uint32"), "c_int32 is required") + @need_symbol('c_uint32') def test_uint32(self): class X(Structure): _fields_ = [("a", c_uint32, 32)] @@ -256,7 +254,7 @@ x.a = 0xFDCBA987 self.assertEqual(x.a, 0xFDCBA987) - @unittest.skipUnless(hasattr(ctypes, "c_uint64"), "c_int64 is required") + @need_symbol('c_uint64') def test_uint64(self): class X(Structure): _fields_ = [("a", c_uint64, 64)] diff --git a/lib-python/2.7/ctypes/test/test_buffers.py b/lib-python/2.7/ctypes/test/test_buffers.py --- a/lib-python/2.7/ctypes/test/test_buffers.py +++ b/lib-python/2.7/ctypes/test/test_buffers.py @@ -1,4 +1,5 @@ from ctypes import * +from ctypes.test import need_symbol import unittest class StringBufferTestCase(unittest.TestCase): @@ -7,12 +8,12 @@ b = create_string_buffer(32) self.assertEqual(len(b), 32) self.assertEqual(sizeof(b), 32 * sizeof(c_char)) - self.assertTrue(type(b[0]) is str) + self.assertIs(type(b[0]), str) b = create_string_buffer("abc") self.assertEqual(len(b), 4) # trailing nul char self.assertEqual(sizeof(b), 4 * sizeof(c_char)) - self.assertTrue(type(b[0]) is str) + self.assertIs(type(b[0]), str) self.assertEqual(b[0], "a") self.assertEqual(b[:], "abc\0") self.assertEqual(b[::], "abc\0") @@ -36,39 +37,36 @@ self.assertEqual(b[::2], "ac") self.assertEqual(b[::5], "a") - try: - c_wchar - except NameError: - pass - else: - def test_unicode_buffer(self): - b = create_unicode_buffer(32) - self.assertEqual(len(b), 32) - self.assertEqual(sizeof(b), 32 * sizeof(c_wchar)) - self.assertTrue(type(b[0]) is unicode) + @need_symbol('c_wchar') + def test_unicode_buffer(self): + b = create_unicode_buffer(32) + self.assertEqual(len(b), 32) + self.assertEqual(sizeof(b), 32 * sizeof(c_wchar)) + self.assertIs(type(b[0]), unicode) - b = create_unicode_buffer(u"abc") - self.assertEqual(len(b), 4) # trailing nul char - self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) - self.assertTrue(type(b[0]) is unicode) - self.assertEqual(b[0], u"a") - self.assertEqual(b[:], "abc\0") - self.assertEqual(b[::], "abc\0") - self.assertEqual(b[::-1], "\0cba") - self.assertEqual(b[::2], "ac") - self.assertEqual(b[::5], "a") + b = create_unicode_buffer(u"abc") + self.assertEqual(len(b), 4) # trailing nul char + self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) + self.assertIs(type(b[0]), unicode) + self.assertEqual(b[0], u"a") + self.assertEqual(b[:], "abc\0") + self.assertEqual(b[::], "abc\0") + self.assertEqual(b[::-1], "\0cba") + self.assertEqual(b[::2], "ac") + self.assertEqual(b[::5], "a") - def test_unicode_conversion(self): - b = create_unicode_buffer("abc") - self.assertEqual(len(b), 4) # trailing nul char - self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) - self.assertTrue(type(b[0]) is unicode) - self.assertEqual(b[0], u"a") - self.assertEqual(b[:], "abc\0") - self.assertEqual(b[::], "abc\0") - self.assertEqual(b[::-1], "\0cba") - self.assertEqual(b[::2], "ac") - self.assertEqual(b[::5], "a") + @need_symbol('c_wchar') + def test_unicode_conversion(self): + b = create_unicode_buffer("abc") + self.assertEqual(len(b), 4) # trailing nul char + self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) + self.assertIs(type(b[0]), unicode) + self.assertEqual(b[0], u"a") + self.assertEqual(b[:], "abc\0") + self.assertEqual(b[::], "abc\0") + self.assertEqual(b[::-1], "\0cba") + self.assertEqual(b[::2], "ac") + self.assertEqual(b[::5], "a") if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_byteswap.py b/lib-python/2.7/ctypes/test/test_byteswap.py --- a/lib-python/2.7/ctypes/test/test_byteswap.py +++ b/lib-python/2.7/ctypes/test/test_byteswap.py @@ -15,7 +15,8 @@ # For Structures and Unions, these types are created on demand. class Test(unittest.TestCase): - def X_test(self): + @unittest.skip('test disabled') + def test_X(self): print >> sys.stderr, sys.byteorder for i in range(32): bits = BITS() @@ -25,11 +26,11 @@ @xfail def test_endian_short(self): if sys.byteorder == "little": - self.assertTrue(c_short.__ctype_le__ is c_short) - self.assertTrue(c_short.__ctype_be__.__ctype_le__ is c_short) + self.assertIs(c_short.__ctype_le__, c_short) + self.assertIs(c_short.__ctype_be__.__ctype_le__, c_short) else: - self.assertTrue(c_short.__ctype_be__ is c_short) - self.assertTrue(c_short.__ctype_le__.__ctype_be__ is c_short) + self.assertIs(c_short.__ctype_be__, c_short) + self.assertIs(c_short.__ctype_le__.__ctype_be__, c_short) s = c_short.__ctype_be__(0x1234) self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234") self.assertEqual(bin(s), "1234") @@ -53,11 +54,11 @@ @xfail def test_endian_int(self): if sys.byteorder == "little": - self.assertTrue(c_int.__ctype_le__ is c_int) - self.assertTrue(c_int.__ctype_be__.__ctype_le__ is c_int) + self.assertIs(c_int.__ctype_le__, c_int) + self.assertIs(c_int.__ctype_be__.__ctype_le__, c_int) else: - self.assertTrue(c_int.__ctype_be__ is c_int) - self.assertTrue(c_int.__ctype_le__.__ctype_be__ is c_int) + self.assertIs(c_int.__ctype_be__, c_int) + self.assertIs(c_int.__ctype_le__.__ctype_be__, c_int) s = c_int.__ctype_be__(0x12345678) self.assertEqual(bin(struct.pack(">i", 0x12345678)), "12345678") @@ -82,11 +83,11 @@ @xfail def test_endian_longlong(self): if sys.byteorder == "little": - self.assertTrue(c_longlong.__ctype_le__ is c_longlong) - self.assertTrue(c_longlong.__ctype_be__.__ctype_le__ is c_longlong) + self.assertIs(c_longlong.__ctype_le__, c_longlong) + self.assertIs(c_longlong.__ctype_be__.__ctype_le__, c_longlong) else: - self.assertTrue(c_longlong.__ctype_be__ is c_longlong) - self.assertTrue(c_longlong.__ctype_le__.__ctype_be__ is c_longlong) + self.assertIs(c_longlong.__ctype_be__, c_longlong) + self.assertIs(c_longlong.__ctype_le__.__ctype_be__, c_longlong) s = c_longlong.__ctype_be__(0x1234567890ABCDEF) self.assertEqual(bin(struct.pack(">q", 0x1234567890ABCDEF)), "1234567890ABCDEF") @@ -111,11 +112,11 @@ @xfail def test_endian_float(self): if sys.byteorder == "little": - self.assertTrue(c_float.__ctype_le__ is c_float) - self.assertTrue(c_float.__ctype_be__.__ctype_le__ is c_float) + self.assertIs(c_float.__ctype_le__, c_float) + self.assertIs(c_float.__ctype_be__.__ctype_le__, c_float) else: - self.assertTrue(c_float.__ctype_be__ is c_float) - self.assertTrue(c_float.__ctype_le__.__ctype_be__ is c_float) + self.assertIs(c_float.__ctype_be__, c_float) + self.assertIs(c_float.__ctype_le__.__ctype_be__, c_float) s = c_float(math.pi) self.assertEqual(bin(struct.pack("f", math.pi)), bin(s)) # Hm, what's the precision of a float compared to a double? @@ -130,11 +131,11 @@ @xfail def test_endian_double(self): if sys.byteorder == "little": - self.assertTrue(c_double.__ctype_le__ is c_double) - self.assertTrue(c_double.__ctype_be__.__ctype_le__ is c_double) + self.assertIs(c_double.__ctype_le__, c_double) + self.assertIs(c_double.__ctype_be__.__ctype_le__, c_double) else: - self.assertTrue(c_double.__ctype_be__ is c_double) - self.assertTrue(c_double.__ctype_le__.__ctype_be__ is c_double) + self.assertIs(c_double.__ctype_be__, c_double) + self.assertIs(c_double.__ctype_le__.__ctype_be__, c_double) s = c_double(math.pi) self.assertEqual(s.value, math.pi) self.assertEqual(bin(struct.pack("d", math.pi)), bin(s)) @@ -146,14 +147,14 @@ self.assertEqual(bin(struct.pack(">d", math.pi)), bin(s)) def test_endian_other(self): - self.assertTrue(c_byte.__ctype_le__ is c_byte) - self.assertTrue(c_byte.__ctype_be__ is c_byte) + self.assertIs(c_byte.__ctype_le__, c_byte) + self.assertIs(c_byte.__ctype_be__, c_byte) - self.assertTrue(c_ubyte.__ctype_le__ is c_ubyte) - self.assertTrue(c_ubyte.__ctype_be__ is c_ubyte) + self.assertIs(c_ubyte.__ctype_le__, c_ubyte) + self.assertIs(c_ubyte.__ctype_be__, c_ubyte) - self.assertTrue(c_char.__ctype_le__ is c_char) - self.assertTrue(c_char.__ctype_be__ is c_char) + self.assertIs(c_char.__ctype_le__, c_char) + self.assertIs(c_char.__ctype_be__, c_char) @xfail def test_struct_fields_1(self): diff --git a/lib-python/2.7/ctypes/test/test_callbacks.py b/lib-python/2.7/ctypes/test/test_callbacks.py --- a/lib-python/2.7/ctypes/test/test_callbacks.py +++ b/lib-python/2.7/ctypes/test/test_callbacks.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import need_symbol from ctypes.test import xfail import _ctypes_test @@ -95,9 +96,10 @@ # disabled: would now (correctly) raise a RuntimeWarning about # a memory leak. A callback function cannot return a non-integral # C type without causing a memory leak. -## def test_char_p(self): -## self.check_type(c_char_p, "abc") -## self.check_type(c_char_p, "def") + @unittest.skip('test disabled') + def test_char_p(self): + self.check_type(c_char_p, "abc") + self.check_type(c_char_p, "def") @xfail def test_pyobject(self): @@ -150,13 +152,12 @@ CFUNCTYPE(None)(lambda x=Nasty(): None) -try: - WINFUNCTYPE -except NameError: - pass -else: - class StdcallCallbacks(Callbacks): + at need_symbol('WINFUNCTYPE') +class StdcallCallbacks(Callbacks): + try: functype = WINFUNCTYPE + except NameError: + pass ################################################################ @@ -186,7 +187,7 @@ from ctypes.util import find_library libc_path = find_library("c") if not libc_path: - return # cannot test + self.skipTest('could not find libc') libc = CDLL(libc_path) @CFUNCTYPE(c_int, POINTER(c_int), POINTER(c_int)) @@ -198,23 +199,19 @@ libc.qsort(array, len(array), sizeof(c_int), cmp_func) self.assertEqual(array[:], [1, 5, 7, 33, 99]) - try: - WINFUNCTYPE - except NameError: - pass - else: - def test_issue_8959_b(self): - from ctypes.wintypes import BOOL, HWND, LPARAM + @need_symbol('WINFUNCTYPE') + def test_issue_8959_b(self): + from ctypes.wintypes import BOOL, HWND, LPARAM + global windowCount + windowCount = 0 + + @WINFUNCTYPE(BOOL, HWND, LPARAM) + def EnumWindowsCallbackFunc(hwnd, lParam): global windowCount - windowCount = 0 + windowCount += 1 + return True #Allow windows to keep enumerating - @WINFUNCTYPE(BOOL, HWND, LPARAM) - def EnumWindowsCallbackFunc(hwnd, lParam): - global windowCount - windowCount += 1 - return True #Allow windows to keep enumerating - - windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0) + windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0) def test_callback_register_int(self): # Issue #8275: buggy handling of callback args under Win64 diff --git a/lib-python/2.7/ctypes/test/test_cast.py b/lib-python/2.7/ctypes/test/test_cast.py --- a/lib-python/2.7/ctypes/test/test_cast.py +++ b/lib-python/2.7/ctypes/test/test_cast.py @@ -1,4 +1,5 @@ from ctypes import * +from ctypes.test import need_symbol import unittest import sys @@ -38,14 +39,14 @@ p = cast(array, POINTER(c_char_p)) # array and p share a common _objects attribute - self.assertTrue(p._objects is array._objects) + self.assertIs(p._objects, array._objects) self.assertEqual(array._objects, {'0': "foo bar", id(array): array}) p[0] = "spam spam" self.assertEqual(p._objects, {'0': "spam spam", id(array): array}) - self.assertTrue(array._objects is p._objects) + self.assertIs(array._objects, p._objects) p[1] = "foo bar" self.assertEqual(p._objects, {'1': 'foo bar', '0': "spam spam", id(array): array}) - self.assertTrue(array._objects is p._objects) + self.assertIs(array._objects, p._objects) def test_other(self): p = cast((c_int * 4)(1, 2, 3, 4), POINTER(c_int)) @@ -75,15 +76,11 @@ self.assertEqual(cast(cast(s, c_void_p), c_char_p).value, "hiho") - try: - c_wchar_p - except NameError: - pass - else: - def test_wchar_p(self): - s = c_wchar_p("hiho") - self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value, - "hiho") + @need_symbol('c_wchar_p') + def test_wchar_p(self): + s = c_wchar_p("hiho") + self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value, + "hiho") if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_cfuncs.py b/lib-python/2.7/ctypes/test/test_cfuncs.py --- a/lib-python/2.7/ctypes/test/test_cfuncs.py +++ b/lib-python/2.7/ctypes/test/test_cfuncs.py @@ -3,6 +3,7 @@ import unittest from ctypes import * +from ctypes.test import need_symbol import _ctypes_test from test.test_support import impl_detail @@ -196,7 +197,7 @@ try: WinDLL except NameError: - pass + def stdcall_dll(*_): pass else: class stdcall_dll(WinDLL): def __getattr__(self, name): @@ -206,9 +207,9 @@ setattr(self, name, func) return func - class stdcallCFunctions(CFunctions): - _dll = stdcall_dll(_ctypes_test.__file__) - pass + at need_symbol('WinDLL') +class stdcallCFunctions(CFunctions): + _dll = stdcall_dll(_ctypes_test.__file__) if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_checkretval.py b/lib-python/2.7/ctypes/test/test_checkretval.py --- a/lib-python/2.7/ctypes/test/test_checkretval.py +++ b/lib-python/2.7/ctypes/test/test_checkretval.py @@ -1,6 +1,7 @@ import unittest from ctypes import * +from ctypes.test import need_symbol class CHECKED(c_int): def _check_retval_(value): @@ -25,15 +26,11 @@ del dll._testfunc_p_p.restype self.assertEqual(42, dll._testfunc_p_p(42)) - try: - oledll - except NameError: - pass - else: - def test_oledll(self): - self.assertRaises(WindowsError, - oledll.oleaut32.CreateTypeLib2, - 0, None, None) + @need_symbol('oledll') + def test_oledll(self): + self.assertRaises(WindowsError, + oledll.oleaut32.CreateTypeLib2, + 0, None, None) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_errcheck.py b/lib-python/2.7/ctypes/test/test_errcheck.py deleted file mode 100644 --- a/lib-python/2.7/ctypes/test/test_errcheck.py +++ /dev/null @@ -1,19 +0,0 @@ -import sys -from ctypes import * - -##class HMODULE(Structure): -## _fields_ = [("value", c_void_p)] - -## def __repr__(self): -## return "" % self.value - -##windll.kernel32.GetModuleHandleA.restype = HMODULE - -##print windll.kernel32.GetModuleHandleA("python23.dll") -##print hex(sys.dllhandle) - -##def nonzero(handle): -## return (GetLastError(), handle) - -##windll.kernel32.GetModuleHandleA.errcheck = nonzero -##print windll.kernel32.GetModuleHandleA("spam") diff --git a/lib-python/2.7/ctypes/test/test_find.py b/lib-python/2.7/ctypes/test/test_find.py --- a/lib-python/2.7/ctypes/test/test_find.py +++ b/lib-python/2.7/ctypes/test/test_find.py @@ -1,4 +1,5 @@ import unittest +import os import sys from ctypes import * from ctypes.util import find_library @@ -40,43 +41,43 @@ except OSError: pass - if lib_gl: - def test_gl(self): - if self.gl: - self.gl.glClearIndex + @unittest.skipUnless(lib_gl, 'lib_gl not available') + def test_gl(self): + if self.gl: + self.gl.glClearIndex - if lib_glu: - def test_glu(self): - if self.glu: - self.glu.gluBeginCurve + @unittest.skipUnless(lib_glu, 'lib_glu not available') + def test_glu(self): + if self.glu: + self.glu.gluBeginCurve - if lib_gle: - def test_gle(self): - if self.gle: - self.gle.gleGetJoinStyle + @unittest.skipUnless(lib_gle, 'lib_gle not available') + def test_gle(self): + if self.gle: + self.gle.gleGetJoinStyle -##if os.name == "posix" and sys.platform != "darwin": - -## # On platforms where the default shared library suffix is '.so', -## # at least some libraries can be loaded as attributes of the cdll -## # object, since ctypes now tries loading the lib again -## # with '.so' appended of the first try fails. -## # -## # Won't work for libc, unfortunately. OTOH, it isn't -## # needed for libc since this is already mapped into the current -## # process (?) -## # -## # On MAC OSX, it won't work either, because dlopen() needs a full path, -## # and the default suffix is either none or '.dylib'. - -## class LoadLibs(unittest.TestCase): -## def test_libm(self): -## import math -## libm = cdll.libm -## sqrt = libm.sqrt -## sqrt.argtypes = (c_double,) -## sqrt.restype = c_double -## self.assertEqual(sqrt(2), math.sqrt(2)) +# On platforms where the default shared library suffix is '.so', +# at least some libraries can be loaded as attributes of the cdll +# object, since ctypes now tries loading the lib again +# with '.so' appended of the first try fails. +# +# Won't work for libc, unfortunately. OTOH, it isn't +# needed for libc since this is already mapped into the current +# process (?) +# +# On MAC OSX, it won't work either, because dlopen() needs a full path, +# and the default suffix is either none or '.dylib'. + at unittest.skip('test disabled') + at unittest.skipUnless(os.name=="posix" and sys.platform != "darwin", + 'test not suitable for this platform') +class LoadLibs(unittest.TestCase): + def test_libm(self): + import math + libm = cdll.libm + sqrt = libm.sqrt + sqrt.argtypes = (c_double,) + sqrt.restype = c_double + self.assertEqual(sqrt(2), math.sqrt(2)) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_frombuffer.py b/lib-python/2.7/ctypes/test/test_frombuffer.py --- a/lib-python/2.7/ctypes/test/test_frombuffer.py +++ b/lib-python/2.7/ctypes/test/test_frombuffer.py @@ -25,7 +25,7 @@ a[0], a[-1] = 200, -200 self.assertEqual(x[:], a.tolist()) - self.assertTrue(a in x._objects.values()) + self.assertIn(a, x._objects.values()) self.assertRaises(ValueError, c_int.from_buffer, a, -1) diff --git a/lib-python/2.7/ctypes/test/test_funcptr.py b/lib-python/2.7/ctypes/test/test_funcptr.py --- a/lib-python/2.7/ctypes/test/test_funcptr.py +++ b/lib-python/2.7/ctypes/test/test_funcptr.py @@ -75,7 +75,7 @@ ## "lpfnWndProc", WNDPROC_2(wndproc)) # instead: - self.assertTrue(WNDPROC is WNDPROC_2) + self.assertIs(WNDPROC, WNDPROC_2) # 'wndclass.lpfnWndProc' leaks 94 references. Why? self.assertEqual(wndclass.lpfnWndProc(1, 2, 3, 4), 10) diff --git a/lib-python/2.7/ctypes/test/test_functions.py b/lib-python/2.7/ctypes/test/test_functions.py --- a/lib-python/2.7/ctypes/test/test_functions.py +++ b/lib-python/2.7/ctypes/test/test_functions.py @@ -6,6 +6,7 @@ """ from ctypes import * +from ctypes.test import need_symbol import sys, unittest from ctypes.test import xfail from test.test_support import impl_detail @@ -65,22 +66,16 @@ pass + @need_symbol('c_wchar') def test_wchar_parm(self): - try: - c_wchar - except NameError: - return f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double] result = f(1, u"x", 3, 4, 5.0, 6.0) self.assertEqual(result, 139) self.assertEqual(type(result), int) + @need_symbol('c_wchar') def test_wchar_result(self): - try: - c_wchar - except NameError: - return f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double] f.restype = c_wchar @@ -158,11 +153,8 @@ self.assertEqual(result, -21) self.assertEqual(type(result), float) + @need_symbol('c_longlong') def test_longlongresult(self): - try: - c_longlong - except NameError: - return f = dll._testfunc_q_bhilfd f.restype = c_longlong f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double] @@ -299,6 +291,7 @@ result = f(-10, cb) self.assertEqual(result, -18) + @need_symbol('c_longlong') def test_longlong_callbacks(self): f = dll._testfunc_callback_q_qf @@ -309,7 +302,7 @@ f.argtypes = [c_longlong, MyCallback] def callback(value): - self.assertTrue(isinstance(value, (int, long))) + self.assertIsInstance(value, (int, long)) return value & 0x7FFFFFFF cb = MyCallback(callback) @@ -351,16 +344,16 @@ s2h = dll.ret_2h_func(inp) self.assertEqual((s2h.x, s2h.y), (99*2, 88*3)) - if sys.platform == "win32": - def test_struct_return_2H_stdcall(self): - class S2H(Structure): - _fields_ = [("x", c_short), - ("y", c_short)] + @unittest.skipUnless(sys.platform == "win32", 'Windows-specific test') + def test_struct_return_2H_stdcall(self): + class S2H(Structure): + _fields_ = [("x", c_short), + ("y", c_short)] - windll.s_ret_2h_func.restype = S2H - windll.s_ret_2h_func.argtypes = [S2H] - s2h = windll.s_ret_2h_func(S2H(99, 88)) - self.assertEqual((s2h.x, s2h.y), (99*2, 88*3)) + windll.s_ret_2h_func.restype = S2H + windll.s_ret_2h_func.argtypes = [S2H] + s2h = windll.s_ret_2h_func(S2H(99, 88)) + self.assertEqual((s2h.x, s2h.y), (99*2, 88*3)) def test_struct_return_8H(self): class S8I(Structure): @@ -379,23 +372,24 @@ self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h), (9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9)) - if sys.platform == "win32": - def test_struct_return_8H_stdcall(self): - class S8I(Structure): - _fields_ = [("a", c_int), - ("b", c_int), - ("c", c_int), - ("d", c_int), - ("e", c_int), - ("f", c_int), - ("g", c_int), - ("h", c_int)] - windll.s_ret_8i_func.restype = S8I - windll.s_ret_8i_func.argtypes = [S8I] - inp = S8I(9, 8, 7, 6, 5, 4, 3, 2) - s8i = windll.s_ret_8i_func(inp) - self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h), - (9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9)) + @unittest.skipUnless(sys.platform == "win32", 'Windows-specific test') + def test_struct_return_8H_stdcall(self): + class S8I(Structure): + _fields_ = [("a", c_int), + ("b", c_int), + ("c", c_int), + ("d", c_int), + ("e", c_int), + ("f", c_int), + ("g", c_int), + ("h", c_int)] + windll.s_ret_8i_func.restype = S8I + windll.s_ret_8i_func.argtypes = [S8I] + inp = S8I(9, 8, 7, 6, 5, 4, 3, 2) + s8i = windll.s_ret_8i_func(inp) + self.assertEqual( + (s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h), + (9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9)) @xfail def test_sf1651235(self): diff --git a/lib-python/2.7/ctypes/test/test_integers.py b/lib-python/2.7/ctypes/test/test_integers.py deleted file mode 100644 --- a/lib-python/2.7/ctypes/test/test_integers.py +++ /dev/null @@ -1,5 +0,0 @@ -# superseded by test_numbers.py -import unittest - -if __name__ == '__main__': - unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_keeprefs.py b/lib-python/2.7/ctypes/test/test_keeprefs.py --- a/lib-python/2.7/ctypes/test/test_keeprefs.py +++ b/lib-python/2.7/ctypes/test/test_keeprefs.py @@ -94,7 +94,8 @@ self.assertEqual(x._objects, {'1': i}) class DeletePointerTestCase(unittest.TestCase): - def X_test(self): + @unittest.skip('test disabled') + def test_X(self): class X(Structure): _fields_ = [("p", POINTER(c_char_p))] x = X() diff --git a/lib-python/2.7/ctypes/test/test_loading.py b/lib-python/2.7/ctypes/test/test_loading.py --- a/lib-python/2.7/ctypes/test/test_loading.py +++ b/lib-python/2.7/ctypes/test/test_loading.py @@ -21,18 +21,21 @@ unknowndll = "xxrandomnamexx" - if libc_name is not None: - def test_load(self): - CDLL(libc_name) - CDLL(os.path.basename(libc_name)) - self.assertRaises(OSError, CDLL, self.unknowndll) + @unittest.skipUnless(libc_name is not None, 'could not find libc') + def test_load(self): + CDLL(libc_name) + CDLL(os.path.basename(libc_name)) + self.assertRaises(OSError, CDLL, self.unknowndll) - if libc_name is not None and os.path.basename(libc_name) == "libc.so.6": - def test_load_version(self): - cdll.LoadLibrary("libc.so.6") - # linux uses version, libc 9 should not exist - self.assertRaises(OSError, cdll.LoadLibrary, "libc.so.9") - self.assertRaises(OSError, cdll.LoadLibrary, self.unknowndll) + @unittest.skipUnless(libc_name is not None, 'could not find libc') + @unittest.skipUnless(libc_name is not None and + os.path.basename(libc_name) == "libc.so.6", + 'wrong libc path for test') + def test_load_version(self): + cdll.LoadLibrary("libc.so.6") + # linux uses version, libc 9 should not exist + self.assertRaises(OSError, cdll.LoadLibrary, "libc.so.9") + self.assertRaises(OSError, cdll.LoadLibrary, self.unknowndll) def test_find(self): for name in ("c", "m"): @@ -41,68 +44,73 @@ cdll.LoadLibrary(lib) CDLL(lib) - if os.name in ("nt", "ce"): - def test_load_library(self): - self.assertFalse(libc_name is None) - if is_resource_enabled("printing"): - print find_library("kernel32") - print find_library("user32") + @unittest.skipUnless(os.name in ("nt", "ce"), + 'test specific to Windows (NT/CE)') + def test_load_library(self): + self.assertIsNotNone(libc_name) + if is_resource_enabled("printing"): + print find_library("kernel32") + print find_library("user32") - if os.name == "nt": - windll.kernel32.GetModuleHandleW - windll["kernel32"].GetModuleHandleW - windll.LoadLibrary("kernel32").GetModuleHandleW - WinDLL("kernel32").GetModuleHandleW - elif os.name == "ce": - windll.coredll.GetModuleHandleW - windll["coredll"].GetModuleHandleW - windll.LoadLibrary("coredll").GetModuleHandleW - WinDLL("coredll").GetModuleHandleW + if os.name == "nt": + windll.kernel32.GetModuleHandleW + windll["kernel32"].GetModuleHandleW + windll.LoadLibrary("kernel32").GetModuleHandleW + WinDLL("kernel32").GetModuleHandleW + elif os.name == "ce": + windll.coredll.GetModuleHandleW + windll["coredll"].GetModuleHandleW + windll.LoadLibrary("coredll").GetModuleHandleW + WinDLL("coredll").GetModuleHandleW - def test_load_ordinal_functions(self): - import _ctypes_test - dll = WinDLL(_ctypes_test.__file__) - # We load the same function both via ordinal and name - func_ord = dll[2] - func_name = dll.GetString - # addressof gets the address where the function pointer is stored - a_ord = addressof(func_ord) - a_name = addressof(func_name) - f_ord_addr = c_void_p.from_address(a_ord).value - f_name_addr = c_void_p.from_address(a_name).value - self.assertEqual(hex(f_ord_addr), hex(f_name_addr)) + @unittest.skipUnless(os.name in ("nt", "ce"), + 'test specific to Windows (NT/CE)') + def test_load_ordinal_functions(self): + import _ctypes_test + dll = WinDLL(_ctypes_test.__file__) + # We load the same function both via ordinal and name + func_ord = dll[2] + func_name = dll.GetString + # addressof gets the address where the function pointer is stored + a_ord = addressof(func_ord) + a_name = addressof(func_name) + f_ord_addr = c_void_p.from_address(a_ord).value + f_name_addr = c_void_p.from_address(a_name).value + self.assertEqual(hex(f_ord_addr), hex(f_name_addr)) - self.assertRaises(AttributeError, dll.__getitem__, 1234) + self.assertRaises(AttributeError, dll.__getitem__, 1234) - if os.name == "nt": - @xfail - def test_1703286_A(self): - from _ctypes import LoadLibrary, FreeLibrary - # On winXP 64-bit, advapi32 loads at an address that does - # NOT fit into a 32-bit integer. FreeLibrary must be able - # to accept this address. + @xfail + @unittest.skipUnless(os.name == "nt", 'Windows-specific test') + def test_1703286_A(self): + from _ctypes import LoadLibrary, FreeLibrary + # On winXP 64-bit, advapi32 loads at an address that does + # NOT fit into a 32-bit integer. FreeLibrary must be able + # to accept this address. - # These are tests for http://www.python.org/sf/1703286 - handle = LoadLibrary("advapi32") - FreeLibrary(handle) + # These are tests for http://www.python.org/sf/1703286 + handle = LoadLibrary("advapi32") + FreeLibrary(handle) - @xfail - def test_1703286_B(self): - # Since on winXP 64-bit advapi32 loads like described - # above, the (arbitrarily selected) CloseEventLog function - # also has a high address. 'call_function' should accept - # addresses so large. - from _ctypes import call_function - advapi32 = windll.advapi32 - # Calling CloseEventLog with a NULL argument should fail, - # but the call should not segfault or so. - self.assertEqual(0, advapi32.CloseEventLog(None)) - windll.kernel32.GetProcAddress.argtypes = c_void_p, c_char_p - windll.kernel32.GetProcAddress.restype = c_void_p - proc = windll.kernel32.GetProcAddress(advapi32._handle, "CloseEventLog") - self.assertTrue(proc) - # This is the real test: call the function via 'call_function' - self.assertEqual(0, call_function(proc, (None,))) + @xfail + @unittest.skipUnless(os.name == "nt", 'Windows-specific test') + def test_1703286_B(self): + # Since on winXP 64-bit advapi32 loads like described + # above, the (arbitrarily selected) CloseEventLog function + # also has a high address. 'call_function' should accept + # addresses so large. + from _ctypes import call_function + advapi32 = windll.advapi32 + # Calling CloseEventLog with a NULL argument should fail, + # but the call should not segfault or so. + self.assertEqual(0, advapi32.CloseEventLog(None)) + windll.kernel32.GetProcAddress.argtypes = c_void_p, c_char_p + windll.kernel32.GetProcAddress.restype = c_void_p + proc = windll.kernel32.GetProcAddress(advapi32._handle, + "CloseEventLog") + self.assertTrue(proc) + # This is the real test: call the function via 'call_function' + self.assertEqual(0, call_function(proc, (None,))) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_macholib.py b/lib-python/2.7/ctypes/test/test_macholib.py --- a/lib-python/2.7/ctypes/test/test_macholib.py +++ b/lib-python/2.7/ctypes/test/test_macholib.py @@ -45,17 +45,21 @@ raise ValueError("%s not found" % (name,)) class MachOTest(unittest.TestCase): - if sys.platform == "darwin": - def test_find(self): + @unittest.skipUnless(sys.platform == "darwin", 'OSX-specific test') + def test_find(self): - self.assertEqual(find_lib('pthread'), - '/usr/lib/libSystem.B.dylib') + self.assertEqual(find_lib('pthread'), + '/usr/lib/libSystem.B.dylib') - result = find_lib('z') - self.assertTrue(result.endswith('.dylib')) + result = find_lib('z') + # Issue #21093: dyld default search path includes $HOME/lib and + # /usr/local/lib before /usr/lib, which caused test failures if + # a local copy of libz exists in one of them. Now ignore the head + # of the path. + self.assertRegexpMatches(result, r".*/lib/libz\..*.*\.dylib") - self.assertEqual(find_lib('IOKit'), - '/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit') + self.assertEqual(find_lib('IOKit'), + '/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit') if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_memfunctions.py b/lib-python/2.7/ctypes/test/test_memfunctions.py --- a/lib-python/2.7/ctypes/test/test_memfunctions.py +++ b/lib-python/2.7/ctypes/test/test_memfunctions.py @@ -1,17 +1,19 @@ import sys import unittest from ctypes import * +from ctypes.test import need_symbol class MemFunctionsTest(unittest.TestCase): -## def test_overflow(self): -## # string_at and wstring_at must use the Python calling -## # convention (which acquires the GIL and checks the Python -## # error flag). Provoke an error and catch it; see also issue -## # #3554: -## self.assertRaises((OverflowError, MemoryError, SystemError), -## lambda: wstring_at(u"foo", sys.maxint - 1)) -## self.assertRaises((OverflowError, MemoryError, SystemError), -## lambda: string_at("foo", sys.maxint - 1)) + @unittest.skip('test disabled') + def test_overflow(self): + # string_at and wstring_at must use the Python calling + # convention (which acquires the GIL and checks the Python + # error flag). Provoke an error and catch it; see also issue + # #3554: + self.assertRaises((OverflowError, MemoryError, SystemError), + lambda: wstring_at(u"foo", sys.maxint - 1)) + self.assertRaises((OverflowError, MemoryError, SystemError), + lambda: string_at("foo", sys.maxint - 1)) def test_memmove(self): # large buffers apparently increase the chance that the memory @@ -60,21 +62,17 @@ self.assertEqual(string_at("foo bar", 8), "foo bar\0") self.assertEqual(string_at("foo bar", 3), "foo") - try: - create_unicode_buffer - except NameError: - pass - else: - def test_wstring_at(self): - p = create_unicode_buffer("Hello, World") - a = create_unicode_buffer(1000000) - result = memmove(a, p, len(p) * sizeof(c_wchar)) - self.assertEqual(a.value, "Hello, World") + @need_symbol('create_unicode_buffer') + def test_wstring_at(self): + p = create_unicode_buffer("Hello, World") + a = create_unicode_buffer(1000000) + result = memmove(a, p, len(p) * sizeof(c_wchar)) + self.assertEqual(a.value, "Hello, World") - self.assertEqual(wstring_at(a), "Hello, World") - self.assertEqual(wstring_at(a, 5), "Hello") - self.assertEqual(wstring_at(a, 16), "Hello, World\0\0\0\0") - self.assertEqual(wstring_at(a, 0), "") + self.assertEqual(wstring_at(a), "Hello, World") + self.assertEqual(wstring_at(a, 5), "Hello") + self.assertEqual(wstring_at(a, 16), "Hello, World\0\0\0\0") + self.assertEqual(wstring_at(a, 0), "") if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -83,12 +83,13 @@ self.assertRaises(TypeError, t, "") self.assertRaises(TypeError, t, None) -## def test_valid_ranges(self): -## # invalid values of the correct type -## # raise ValueError (not OverflowError) -## for t, (l, h) in zip(unsigned_types, unsigned_ranges): -## self.assertRaises(ValueError, t, l-1) -## self.assertRaises(ValueError, t, h+1) + @unittest.skip('test disabled') + def test_valid_ranges(self): + # invalid values of the correct type + # raise ValueError (not OverflowError) + for t, (l, h) in zip(unsigned_types, unsigned_ranges): + self.assertRaises(ValueError, t, l-1) + self.assertRaises(ValueError, t, h+1) @xfail def test_from_param(self): @@ -185,10 +186,10 @@ a = array(t._type_, [3.14]) v = t.from_address(a.buffer_info()[0]) self.assertEqual(v.value, a[0]) - self.assertTrue(type(v) is t) + self.assertIs(type(v), t) a[0] = 2.3456e17 self.assertEqual(v.value, a[0]) - self.assertTrue(type(v) is t) + self.assertIs(type(v), t) def test_char_from_address(self): from ctypes import c_char @@ -197,22 +198,23 @@ a = array('c', 'x') v = c_char.from_address(a.buffer_info()[0]) self.assertEqual(v.value, a[0]) - self.assertTrue(type(v) is c_char) + self.assertIs(type(v), c_char) a[0] = '?' self.assertEqual(v.value, a[0]) # array does not support c_bool / 't' - # def test_bool_from_address(self): - # from ctypes import c_bool - # from array import array - # a = array(c_bool._type_, [True]) - # v = t.from_address(a.buffer_info()[0]) - # self.assertEqual(v.value, a[0]) - # self.assertEqual(type(v) is t) - # a[0] = False - # self.assertEqual(v.value, a[0]) - # self.assertEqual(type(v) is t) + @unittest.skip('test disabled') + def test_bool_from_address(self): + from ctypes import c_bool + from array import array + a = array(c_bool._type_, [True]) + v = t.from_address(a.buffer_info()[0]) + self.assertEqual(v.value, a[0]) + self.assertEqual(type(v) is t) + a[0] = False + self.assertEqual(v.value, a[0]) + self.assertEqual(type(v) is t) def test_init(self): # c_int() can be initialized from Python's int, and c_int. @@ -230,8 +232,9 @@ if (hasattr(t, "__ctype_le__")): self.assertRaises(OverflowError, t.__ctype_le__, big_int) -## def test_perf(self): -## check_perf() + @unittest.skip('test disabled') + def test_perf(self): + check_perf() from ctypes import _SimpleCData class c_int_S(_SimpleCData): diff --git a/lib-python/2.7/ctypes/test/test_objects.py b/lib-python/2.7/ctypes/test/test_objects.py --- a/lib-python/2.7/ctypes/test/test_objects.py +++ b/lib-python/2.7/ctypes/test/test_objects.py @@ -59,12 +59,9 @@ import ctypes.test.test_objects class TestCase(unittest.TestCase): - if sys.hexversion > 0x02040000: - # Python 2.3 has no ELLIPSIS flag, so we don't test with this - # version: - def test(self): - doctest.testmod(ctypes.test.test_objects) + def test(self): + failures, tests = doctest.testmod(ctypes.test.test_objects) + self.assertFalse(failures, 'doctests failed, see output above') if __name__ == '__main__': - if sys.hexversion > 0x02040000: - doctest.testmod(ctypes.test.test_objects) + doctest.testmod(ctypes.test.test_objects) diff --git a/lib-python/2.7/ctypes/test/test_parameters.py b/lib-python/2.7/ctypes/test/test_parameters.py --- a/lib-python/2.7/ctypes/test/test_parameters.py +++ b/lib-python/2.7/ctypes/test/test_parameters.py @@ -1,4 +1,5 @@ import unittest, sys +from ctypes.test import need_symbol from ctypes.test import xfail @@ -38,10 +39,9 @@ self.assertEqual(CVOIDP.from_param("abc"), "abcabc") self.assertEqual(CCHARP.from_param("abc"), "abcabcabcabc") - try: - from ctypes import c_wchar_p - except ImportError: - return + @need_symbol('c_wchar_p') + def test_subclasses_c_wchar_p(self): + from ctypes import c_wchar_p class CWCHARP(c_wchar_p): def from_param(cls, value): @@ -58,7 +58,7 @@ # c_char_p.from_param on a Python String packs the string # into a cparam object s = "123" - self.assertTrue(c_char_p.from_param(s)._obj is s) + self.assertIs(c_char_p.from_param(s)._obj, s) # new in 0.9.1: convert (encode) unicode to ascii self.assertEqual(c_char_p.from_param(u"123")._obj, "123") @@ -69,15 +69,11 @@ # calling c_char_p.from_param with a c_char_p instance # returns the argument itself: a = c_char_p("123") - self.assertTrue(c_char_p.from_param(a) is a) + self.assertIs(c_char_p.from_param(a), a) + @need_symbol('c_wchar_p') def test_cw_strings(self): - from ctypes import byref - try: - from ctypes import c_wchar_p - except ImportError: -## print "(No c_wchar_p)" - return + from ctypes import byref, c_wchar_p s = u"123" if sys.platform == "win32": self.assertTrue(c_wchar_p.from_param(s)._obj is s) @@ -150,9 +146,6 @@ self.assertRaises(TypeError, LPINT.from_param, c_long*3) self.assertRaises(TypeError, LPINT.from_param, c_uint*3) -## def test_performance(self): -## check_perf() - def test_noctypes_argtype(self): import _ctypes_test from ctypes import CDLL, c_void_p, ArgumentError diff --git a/lib-python/2.7/ctypes/test/test_pep3118.py b/lib-python/2.7/ctypes/test/test_pep3118.py --- a/lib-python/2.7/ctypes/test/test_pep3118.py +++ b/lib-python/2.7/ctypes/test/test_pep3118.py @@ -95,6 +95,10 @@ class aUnion(Union): _fields_ = [("a", c_int)] +class StructWithArrays(Structure): + _fields_ = [("x", c_long * 3 * 2), ("y", Point * 4)] + + class Incomplete(Structure): pass @@ -144,10 +148,10 @@ ## arrays and pointers - (c_double * 4, "(4) Author: Brian Kearns Branch: Changeset: r73134:36e559a420ff Date: 2014-08-28 17:52 -0400 http://bitbucket.org/pypy/pypy/changeset/36e559a420ff/ Log: rename some functions for clarity diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -34,15 +34,15 @@ OFF_T = config['off_t'] FILEP = rffi.COpaquePtr("FILE") -c_open = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], FILEP) -c_close = llexternal('fclose', [FILEP], rffi.INT, releasegil=False) +c_fopen = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], FILEP) +c_fclose = llexternal('fclose', [FILEP], rffi.INT, releasegil=False) c_fwrite = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, FILEP], rffi.SIZE_T) c_fread = llexternal('fread', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, FILEP], rffi.SIZE_T) c_feof = llexternal('feof', [FILEP], rffi.INT) c_ferror = llexternal('ferror', [FILEP], rffi.INT) -c_clearerror = llexternal('clearerr', [FILEP], lltype.Void) +c_clearerr = llexternal('clearerr', [FILEP], lltype.Void) c_fseek = llexternal('fseek', [FILEP, rffi.LONG, rffi.INT], rffi.INT) c_tmpfile = llexternal('tmpfile', [], FILEP) @@ -65,7 +65,7 @@ def _error(ll_file): errno = c_ferror(ll_file) - c_clearerror(ll_file) + c_clearerr(ll_file) raise OSError(errno, os.strerror(errno)) @@ -77,7 +77,7 @@ try: ll_mode = rffi.str2charp(mode) try: - ll_f = c_open(ll_name, ll_mode) + ll_f = c_fopen(ll_name, ll_mode) if not ll_f: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) @@ -164,7 +164,7 @@ raise OSError(errno, os.strerror(errno)) return res - _do_close = staticmethod(c_close) # overridden in RPopenFile + _do_close = staticmethod(c_fclose) # overridden in RPopenFile def read(self, size=-1): # XXX CPython uses a more delicate logic here From noreply at buildbot.pypy.org Fri Aug 29 03:41:18 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 03:41:18 +0200 (CEST) Subject: [pypy-commit] pypy default: test/fix rfile buffering Message-ID: <20140829014118.382B61C000D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73135:7445b0d7890d Date: 2014-08-28 18:05 -0400 http://bitbucket.org/pypy/pypy/changeset/7445b0d7890d/ Log: test/fix rfile buffering diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -29,10 +29,19 @@ off_t = platform.SimpleType('off_t') + _IONBF = platform.DefinedConstantInteger('_IONBF') + _IOLBF = platform.DefinedConstantInteger('_IOLBF') + _IOFBF = platform.DefinedConstantInteger('_IOFBF') + BUFSIZ = platform.DefinedConstantInteger('BUFSIZ') + config = platform.configure(CConfig) OFF_T = config['off_t'] FILEP = rffi.COpaquePtr("FILE") +_IONBF = config['_IONBF'] +_IOLBF = config['_IOLBF'] +_IOFBF = config['_IOFBF'] +BUFSIZ = config['BUFSIZ'] c_fopen = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], FILEP) c_fclose = llexternal('fclose', [FILEP], rffi.INT, releasegil=False) @@ -58,6 +67,7 @@ c_popen = llexternal('popen', [rffi.CCHARP, rffi.CCHARP], FILEP) c_pclose = llexternal('pclose', [FILEP], rffi.INT, releasegil=False) +c_setvbuf = llexternal('setvbuf', [FILEP, rffi.CCHARP, rffi.INT, rffi.SIZE_T], rffi.INT) BASE_BUF_SIZE = 4096 BASE_LINE_SIZE = 100 @@ -70,7 +80,6 @@ def create_file(filename, mode="r", buffering=-1): - assert buffering == -1 assert filename is not None assert mode is not None ll_name = rffi.str2charp(filename) @@ -85,6 +94,13 @@ lltype.free(ll_mode, flavor='raw') finally: lltype.free(ll_name, flavor='raw') + if buffering >= 0: + if buffering == 0: + c_setvbuf(ll_f, lltype.nullptr(rffi.CCHARP.TO), _IONBF, 0) + elif buffering == 1: + c_setvbuf(ll_f, lltype.nullptr(rffi.CCHARP.TO), _IOLBF, BUFSIZ) + else: + c_setvbuf(ll_f, lltype.nullptr(rffi.CCHARP.TO), _IOFBF, buffering) return RFile(ll_f) diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -20,6 +20,38 @@ self.interpret(f, []) assert open(fname, "r").read() == "dupa" + def test_open_buffering_line(self): + fname = str(self.tmpdir.join('file_1a')) + + def f(): + f = open(fname, 'w', 1) + f.write('dupa\ndupb') + f2 = open(fname, 'r') + assert f2.read() == 'dupa\n' + f.close() + assert f2.read() == 'dupb' + f2.close() + + f() + self.interpret(f, []) + + def test_open_buffering_full(self): + fname = str(self.tmpdir.join('file_1b')) + + def f(): + f = open(fname, 'w', 128) + f.write('dupa') + f2 = open(fname, 'r') + assert f2.read() == '' + f.write('z' * 5000) + assert f2.read() != '' + f.close() + assert f2.read() != '' + f2.close() + + f() + self.interpret(f, []) + def test_read_write(self): fname = str(self.tmpdir.join('file_2')) From noreply at buildbot.pypy.org Fri Aug 29 03:53:21 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 03:53:21 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: undo these rtyper changes, don't seem necessary Message-ID: <20140829015321.995221C000D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73136:65e70815a2db Date: 2014-08-28 21:47 -0400 http://bitbucket.org/pypy/pypy/changeset/65e70815a2db/ Log: undo these rtyper changes, don't seem necessary diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -421,16 +421,12 @@ return lltype_to_annotation(lltype.Ptr(UNICODE)) def specialize_call(self, hop): - from rpython.rtyper.lltypesystem.rstr import (string_repr, - unicode_repr) hop.exception_cannot_occur() - if strtype is str: - v_ll_str = hop.inputarg(string_repr, 0) - else: - v_ll_str = hop.inputarg(unicode_repr, 0) + assert hop.args_r[0].lowleveltype == hop.r_result.lowleveltype + v_ll_str, = hop.inputargs(*hop.args_r) return hop.genop('same_as', [v_ll_str], resulttype = hop.r_result.lowleveltype) - + return hlstr, llstr hlstr, llstr = make_string_entries(str) diff --git a/rpython/rtyper/test/test_annlowlevel.py b/rpython/rtyper/test/test_annlowlevel.py --- a/rpython/rtyper/test/test_annlowlevel.py +++ b/rpython/rtyper/test/test_annlowlevel.py @@ -34,14 +34,6 @@ res = self.interpret(f, [self.string_to_ll("abc")]) assert res == 3 - def test_llstr_const_char(self): - def f(arg): - s = llstr(hlstr(arg)[0]) - return len(s.chars) - - res = self.interpret(f, [self.string_to_ll("abc")]) - assert res == 1 - def test_hlunicode(self): s = mallocunicode(3) s.chars[0] = u"a" From noreply at buildbot.pypy.org Fri Aug 29 03:53:22 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 03:53:22 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: start to use rfile in pypy/module/_file Message-ID: <20140829015322.F1C991C000D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73137:201de1362418 Date: 2014-08-28 21:52 -0400 http://bitbucket.org/pypy/pypy/changeset/201de1362418/ Log: start to use rfile in pypy/module/_file diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -2,7 +2,7 @@ import os import stat import errno -from rpython.rlib import streamio +from rpython.rlib import streamio, rfile from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong from rpython.rlib.rstring import StringBuilder @@ -97,9 +97,8 @@ def _when_reading_first_flush(self, otherfile): """Flush otherfile before reading from self.""" - xxx - self.stream = streamio.CallbackReadFilter(self.stream, - otherfile._try_to_flush) + #self.stream = streamio.CallbackReadFilter(self.stream, + # otherfile._try_to_flush) def _try_to_flush(self): stream = self.stream @@ -114,14 +113,11 @@ @unwrap_spec(mode=str, buffering=int) def direct___init__(self, w_name, mode='r', buffering=-1): self.direct_close() + self.check_mode_ok(mode) self.w_name = w_name - self.check_mode_ok(mode) - xxx - stream = dispatch_filename(streamio.open_file_as_stream)( - self.space, w_name, mode, buffering, signal_checker(self.space)) - fd = stream.try_to_find_file_descriptor() - self.check_not_dir(fd) - self.fdopenstream(stream, fd, mode) + self.stream = rfile.create_file(self.space.str_w(w_name), mode, buffering) + self.mode = mode + #self.binary = "b" in mode def direct___enter__(self): self.check_closed() @@ -135,37 +131,30 @@ def direct_fdopen(self, fd, mode='r', buffering=-1): self.direct_close() + self.check_mode_ok(mode) self.w_name = self.space.wrap('') - self.check_mode_ok(mode) - xxx - stream = streamio.fdopen_as_stream(fd, mode, buffering, - signal_checker(self.space)) - self.check_not_dir(fd) - self.fdopenstream(stream, fd, mode) + self.stream = rfile.create_fdopen_rfile(fd, mode) + self.mode = mode + #self.binary = "b" in mode def direct_close(self): stream = self.stream if stream is not None: - self.newlines = self.stream.getnewlines() + #self.newlines = self.stream.getnewlines() self.stream = None - self.fd = -1 - openstreams = getopenstreams(self.space) - try: - del openstreams[stream] - except KeyError: - pass + #openstreams = getopenstreams(self.space) + #try: + # del openstreams[stream] + #except KeyError: + # pass # close the stream. If cffi_fileobj is None, we close the # underlying fileno too. Otherwise, we leave that to # cffi_fileobj.close(). - cffifo = self.cffi_fileobj - self.cffi_fileobj = None - stream.close1(cffifo is None) - if cffifo is not None: - cffifo.close() + stream.close() def direct_fileno(self): - self.getstream() # check if the file is still open - return self.fd + stream = self.getstream() # check if the file is still open + return stream.fileno() def direct_flush(self): self.getstream().flush() @@ -180,7 +169,7 @@ def direct_read(self, n=-1): stream = self.getstream() if n < 0: - return stream.readall() + return stream.read() else: result = StringBuilder(n) while n > 0: @@ -204,6 +193,7 @@ @unwrap_spec(size=int) def direct_readline(self, size=-1): stream = self.getstream() + return stream.readline() if size < 0: return stream.readline() else: @@ -233,7 +223,7 @@ # this is implemented as: .read().split('\n') # except that it keeps the \n in the resulting strings if size <= 0: - data = stream.readall() + data = stream.read() else: data = stream.read(size) result = [] @@ -292,8 +282,8 @@ direct_xreadlines = direct___iter__ def direct_isatty(self): - self.getstream() # check if the file is still open - return os.isatty(self.fd) + stream = self.getstream() # check if the file is still open + return os.isatty(stream.fileno()) # ____________________________________________________________ # @@ -339,8 +329,8 @@ try: try: result = self.direct_%(name)s(%(callsig)s) - except StreamErrors, e: - raise wrap_streamerror(space, e, self.w_name) + except OSError as e: + raise wrap_oserror_as_ioerror(self.space, e, self.w_name) finally: self.unlock() return %(wrapresult)s @@ -512,7 +502,8 @@ def descr_file_newlines(space, file): if file.stream: - newlines = file.stream.getnewlines() + #newlines = file.stream.getnewlines() + newlines = file.newlines else: newlines = file.newlines if newlines == 0: From noreply at buildbot.pypy.org Fri Aug 29 05:53:14 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 05:53:14 +0200 (CEST) Subject: [pypy-commit] pypy default: cleanup/pep8 Message-ID: <20140829035314.5DAA81D2AC1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73138:89c0b1e952c3 Date: 2014-08-28 22:14 -0400 http://bitbucket.org/pypy/pypy/changeset/89c0b1e952c3/ Log: cleanup/pep8 diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -21,8 +21,6 @@ fileno = '_fileno' eci = ExternalCompilationInfo(includes=includes) -def llexternal(*args, **kwargs): - return rffi.llexternal(*args, compilation_info=eci, **kwargs) class CConfig(object): _compilation_info_ = eci @@ -36,13 +34,20 @@ config = platform.configure(CConfig) +FILEP = rffi.COpaquePtr("FILE") OFF_T = config['off_t'] -FILEP = rffi.COpaquePtr("FILE") _IONBF = config['_IONBF'] _IOLBF = config['_IOLBF'] _IOFBF = config['_IOFBF'] BUFSIZ = config['BUFSIZ'] +BASE_BUF_SIZE = 4096 +BASE_LINE_SIZE = 100 + + +def llexternal(*args, **kwargs): + return rffi.llexternal(*args, compilation_info=eci, **kwargs) + c_fopen = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], FILEP) c_fclose = llexternal('fclose', [FILEP], rffi.INT, releasegil=False) c_fwrite = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, @@ -69,9 +74,6 @@ c_pclose = llexternal('pclose', [FILEP], rffi.INT, releasegil=False) c_setvbuf = llexternal('setvbuf', [FILEP, rffi.CCHARP, rffi.INT, rffi.SIZE_T], rffi.INT) -BASE_BUF_SIZE = 4096 -BASE_LINE_SIZE = 100 - def _error(ll_file): errno = c_ferror(ll_file) @@ -111,6 +113,7 @@ raise OSError(errno, os.strerror(errno)) return RFile(res) + def create_fdopen_rfile(fd, mode="r"): assert mode is not None ll_mode = rffi.str2charp(mode) @@ -123,6 +126,7 @@ lltype.free(ll_mode, flavor='raw') return RFile(ll_f) + def create_popen_file(command, type): ll_command = rffi.str2charp(command) try: From noreply at buildbot.pypy.org Fri Aug 29 05:53:15 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 05:53:15 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: remove usage of StreamErrors Message-ID: <20140829035315.859471D2AC1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73139:2b155355a77a Date: 2014-08-28 23:03 -0400 http://bitbucket.org/pypy/pypy/changeset/2b155355a77a/ Log: remove usage of StreamErrors diff --git a/pypy/module/_file/__init__.py b/pypy/module/_file/__init__.py --- a/pypy/module/_file/__init__.py +++ b/pypy/module/_file/__init__.py @@ -13,7 +13,7 @@ def shutdown(self, space): # at shutdown, flush all open streams. Ignore I/O errors. - from pypy.module._file.interp_file import getopenstreams, StreamErrors + from pypy.module._file.interp_file import getopenstreams openstreams = getopenstreams(space) while openstreams: for stream in openstreams.keys(): @@ -24,7 +24,7 @@ else: try: stream.flush() - except StreamErrors: + except OSError: pass def setup_after_space_initialization(self): diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -6,7 +6,7 @@ from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong from rpython.rlib.rstring import StringBuilder -from pypy.module._file.interp_stream import W_AbstractStream, StreamErrors +from pypy.module._file.interp_stream import W_AbstractStream from pypy.module.posix.interp_posix import dispatch_filename from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.typedef import (TypeDef, GetSetProperty, @@ -53,9 +53,8 @@ assert isinstance(self, W_File) try: self.direct_close() - except StreamErrors, e: - operr = wrap_streamerror(self.space, e, self.w_name) - raise operr + except OSError as e: + raise wrap_oserror_as_ioerror(self.space, e, self.w_name) def fdopenstream(self, stream, fd, mode, w_name=None): self.fd = fd From noreply at buildbot.pypy.org Fri Aug 29 05:53:16 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 05:53:16 +0200 (CEST) Subject: [pypy-commit] pypy default: simplify rfile check if closed Message-ID: <20140829035316.BDF141D2AC1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73140:2b3c04eda6f2 Date: 2014-08-28 23:22 -0400 http://bitbucket.org/pypy/pypy/changeset/2b3c04eda6f2/ Log: simplify rfile check if closed diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -82,8 +82,6 @@ def create_file(filename, mode="r", buffering=-1): - assert filename is not None - assert mode is not None ll_name = rffi.str2charp(filename) try: ll_mode = rffi.str2charp(mode) @@ -115,7 +113,6 @@ def create_fdopen_rfile(fd, mode="r"): - assert mode is not None ll_mode = rffi.str2charp(mode) try: ll_f = c_fdopen(rffi.cast(rffi.INT, fd), ll_mode) @@ -147,18 +144,18 @@ def __init__(self, ll_file): self.ll_file = ll_file + def _check_closed(self): + if not self.ll_file: + raise ValueError("I/O operation on closed file") + def write(self, value): - assert value is not None - ll_file = self.ll_file - if not ll_file: - raise ValueError("I/O operation on closed file") - assert value is not None + self._check_closed() ll_value = rffi.get_nonmovingbuffer(value) try: # note that since we got a nonmoving buffer, it is either raw # or already cannot move, so the arithmetics below are fine length = len(value) - bytes = c_fwrite(ll_value, 1, length, ll_file) + bytes = c_fwrite(ll_value, 1, length, self.ll_file) if bytes != length: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) @@ -174,11 +171,11 @@ The actual return value may be determined with os.WEXITSTATUS. """ res = 0 - ll_f = self.ll_file - if ll_f: + ll_file = self.ll_file + if ll_file: # double close is allowed self.ll_file = lltype.nullptr(FILEP.TO) - res = self._do_close(ll_f) + res = self._do_close(ll_file) if res == -1: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) @@ -188,9 +185,8 @@ def read(self, size=-1): # XXX CPython uses a more delicate logic here + self._check_closed() ll_file = self.ll_file - if not ll_file: - raise ValueError("I/O operation on closed file") if size < 0: # read the entire contents buf = lltype.malloc(rffi.CCHARP.TO, BASE_BUF_SIZE, flavor='raw') @@ -218,58 +214,51 @@ return s def seek(self, pos, whence=0): - ll_file = self.ll_file - if not ll_file: - raise ValueError("I/O operation on closed file") - res = c_fseek(ll_file, pos, whence) + self._check_closed() + res = c_fseek(self.ll_file, pos, whence) if res == -1: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) def fileno(self): - if self.ll_file: - return intmask(c_fileno(self.ll_file)) - raise ValueError("I/O operation on closed file") + self._check_closed() + return intmask(c_fileno(self.ll_file)) def tell(self): - if self.ll_file: - res = intmask(c_ftell(self.ll_file)) - if res == -1: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - return res - raise ValueError("I/O operation on closed file") + self._check_closed() + res = intmask(c_ftell(self.ll_file)) + if res == -1: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + return res def flush(self): - if self.ll_file: - res = c_fflush(self.ll_file) - if res != 0: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - return - raise ValueError("I/O operation on closed file") + self._check_closed() + res = c_fflush(self.ll_file) + if res != 0: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) def truncate(self, arg=-1): - if self.ll_file: - if arg == -1: - arg = self.tell() - self.flush() - res = c_ftruncate(self.fileno(), arg) - if res == -1: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - return - raise ValueError("I/O operation on closed file") + self._check_closed() + if arg == -1: + arg = self.tell() + self.flush() + res = c_ftruncate(self.fileno(), arg) + if res == -1: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) def __del__(self): self.close() def _readline1(self, raw_buf): - result = c_fgets(raw_buf, BASE_LINE_SIZE, self.ll_file) + ll_file = self.ll_file + result = c_fgets(raw_buf, BASE_LINE_SIZE, ll_file) if not result: - if c_feof(self.ll_file): # ok + if c_feof(ll_file): # ok return 0 - raise _error(self.ll_file) + raise _error(ll_file) # # Assume that fgets() works as documented, and additionally # never writes beyond the final \0, which the CPython @@ -287,23 +276,22 @@ return strlen def readline(self): - if self.ll_file: - with rffi.scoped_alloc_buffer(BASE_LINE_SIZE) as buf: + self._check_closed() + with rffi.scoped_alloc_buffer(BASE_LINE_SIZE) as buf: + c = self._readline1(buf.raw) + if c >= 0: + return buf.str(c) + # + # this is the rare case: the line is longer than BASE_LINE_SIZE + s = StringBuilder() + while True: + s.append_charpsize(buf.raw, BASE_LINE_SIZE - 1) c = self._readline1(buf.raw) if c >= 0: - return buf.str(c) - # - # this is the rare case: the line is longer than BASE_LINE_SIZE - s = StringBuilder() - while True: - s.append_charpsize(buf.raw, BASE_LINE_SIZE - 1) - c = self._readline1(buf.raw) - if c >= 0: - break - # - s.append_charpsize(buf.raw, c) - return s.build() - raise ValueError("I/O operation on closed file") + break + # + s.append_charpsize(buf.raw, c) + return s.build() class RPopenFile(RFile): From noreply at buildbot.pypy.org Fri Aug 29 05:53:17 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 05:53:17 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: merge default Message-ID: <20140829035317.E1CDA1D2AC1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73141:d882ed7c35d1 Date: 2014-08-28 23:27 -0400 http://bitbucket.org/pypy/pypy/changeset/d882ed7c35d1/ Log: merge default diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -21,8 +21,6 @@ fileno = '_fileno' eci = ExternalCompilationInfo(includes=includes) -def llexternal(*args, **kwargs): - return rffi.llexternal(*args, compilation_info=eci, **kwargs) class CConfig(object): _compilation_info_ = eci @@ -36,13 +34,20 @@ config = platform.configure(CConfig) +FILEP = rffi.COpaquePtr("FILE") OFF_T = config['off_t'] -FILEP = rffi.COpaquePtr("FILE") _IONBF = config['_IONBF'] _IOLBF = config['_IOLBF'] _IOFBF = config['_IOFBF'] BUFSIZ = config['BUFSIZ'] +BASE_BUF_SIZE = 4096 +BASE_LINE_SIZE = 100 + + +def llexternal(*args, **kwargs): + return rffi.llexternal(*args, compilation_info=eci, **kwargs) + c_fopen = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], FILEP) c_fclose = llexternal('fclose', [FILEP], rffi.INT, releasegil=False) c_fwrite = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, @@ -69,9 +74,6 @@ c_pclose = llexternal('pclose', [FILEP], rffi.INT, releasegil=False) c_setvbuf = llexternal('setvbuf', [FILEP, rffi.CCHARP, rffi.INT, rffi.SIZE_T], rffi.INT) -BASE_BUF_SIZE = 4096 -BASE_LINE_SIZE = 100 - def _error(ll_file): errno = c_ferror(ll_file) @@ -80,8 +82,6 @@ def create_file(filename, mode="r", buffering=-1): - assert filename is not None - assert mode is not None ll_name = rffi.str2charp(filename) try: ll_mode = rffi.str2charp(mode) @@ -111,8 +111,8 @@ raise OSError(errno, os.strerror(errno)) return RFile(res) + def create_fdopen_rfile(fd, mode="r"): - assert mode is not None ll_mode = rffi.str2charp(mode) try: ll_f = c_fdopen(rffi.cast(rffi.INT, fd), ll_mode) @@ -123,6 +123,7 @@ lltype.free(ll_mode, flavor='raw') return RFile(ll_f) + def create_popen_file(command, type): ll_command = rffi.str2charp(command) try: @@ -143,18 +144,18 @@ def __init__(self, ll_file): self.ll_file = ll_file + def _check_closed(self): + if not self.ll_file: + raise ValueError("I/O operation on closed file") + def write(self, value): - assert value is not None - ll_file = self.ll_file - if not ll_file: - raise ValueError("I/O operation on closed file") - assert value is not None + self._check_closed() ll_value = rffi.get_nonmovingbuffer(value) try: # note that since we got a nonmoving buffer, it is either raw # or already cannot move, so the arithmetics below are fine length = len(value) - bytes = c_fwrite(ll_value, 1, length, ll_file) + bytes = c_fwrite(ll_value, 1, length, self.ll_file) if bytes != length: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) @@ -170,11 +171,11 @@ The actual return value may be determined with os.WEXITSTATUS. """ res = 0 - ll_f = self.ll_file - if ll_f: + ll_file = self.ll_file + if ll_file: # double close is allowed self.ll_file = lltype.nullptr(FILEP.TO) - res = self._do_close(ll_f) + res = self._do_close(ll_file) if res == -1: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) @@ -184,9 +185,8 @@ def read(self, size=-1): # XXX CPython uses a more delicate logic here + self._check_closed() ll_file = self.ll_file - if not ll_file: - raise ValueError("I/O operation on closed file") if size < 0: # read the entire contents buf = lltype.malloc(rffi.CCHARP.TO, BASE_BUF_SIZE, flavor='raw') @@ -214,58 +214,51 @@ return s def seek(self, pos, whence=0): - ll_file = self.ll_file - if not ll_file: - raise ValueError("I/O operation on closed file") - res = c_fseek(ll_file, pos, whence) + self._check_closed() + res = c_fseek(self.ll_file, pos, whence) if res == -1: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) def fileno(self): - if self.ll_file: - return intmask(c_fileno(self.ll_file)) - raise ValueError("I/O operation on closed file") + self._check_closed() + return intmask(c_fileno(self.ll_file)) def tell(self): - if self.ll_file: - res = intmask(c_ftell(self.ll_file)) - if res == -1: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - return res - raise ValueError("I/O operation on closed file") + self._check_closed() + res = intmask(c_ftell(self.ll_file)) + if res == -1: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + return res def flush(self): - if self.ll_file: - res = c_fflush(self.ll_file) - if res != 0: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - return - raise ValueError("I/O operation on closed file") + self._check_closed() + res = c_fflush(self.ll_file) + if res != 0: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) def truncate(self, arg=-1): - if self.ll_file: - if arg == -1: - arg = self.tell() - self.flush() - res = c_ftruncate(self.fileno(), arg) - if res == -1: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - return - raise ValueError("I/O operation on closed file") + self._check_closed() + if arg == -1: + arg = self.tell() + self.flush() + res = c_ftruncate(self.fileno(), arg) + if res == -1: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) def __del__(self): self.close() def _readline1(self, raw_buf): - result = c_fgets(raw_buf, BASE_LINE_SIZE, self.ll_file) + ll_file = self.ll_file + result = c_fgets(raw_buf, BASE_LINE_SIZE, ll_file) if not result: - if c_feof(self.ll_file): # ok + if c_feof(ll_file): # ok return 0 - raise _error(self.ll_file) + raise _error(ll_file) # # Assume that fgets() works as documented, and additionally # never writes beyond the final \0, which the CPython @@ -283,23 +276,22 @@ return strlen def readline(self): - if self.ll_file: - with rffi.scoped_alloc_buffer(BASE_LINE_SIZE) as buf: + self._check_closed() + with rffi.scoped_alloc_buffer(BASE_LINE_SIZE) as buf: + c = self._readline1(buf.raw) + if c >= 0: + return buf.str(c) + # + # this is the rare case: the line is longer than BASE_LINE_SIZE + s = StringBuilder() + while True: + s.append_charpsize(buf.raw, BASE_LINE_SIZE - 1) c = self._readline1(buf.raw) if c >= 0: - return buf.str(c) - # - # this is the rare case: the line is longer than BASE_LINE_SIZE - s = StringBuilder() - while True: - s.append_charpsize(buf.raw, BASE_LINE_SIZE - 1) - c = self._readline1(buf.raw) - if c >= 0: - break - # - s.append_charpsize(buf.raw, c) - return s.build() - raise ValueError("I/O operation on closed file") + break + # + s.append_charpsize(buf.raw, c) + return s.build() class RPopenFile(RFile): From noreply at buildbot.pypy.org Fri Aug 29 05:53:19 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 05:53:19 +0200 (CEST) Subject: [pypy-commit] pypy default: fix rfile readline with null chars Message-ID: <20140829035319.448B41D2AC1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73142:1c38a12270c8 Date: 2014-08-28 23:51 -0400 http://bitbucket.org/pypy/pypy/changeset/1c38a12270c8/ Log: fix rfile readline with null chars diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -254,26 +254,37 @@ def _readline1(self, raw_buf): ll_file = self.ll_file + for i in range(BASE_LINE_SIZE): + raw_buf[i] = '\n' + result = c_fgets(raw_buf, BASE_LINE_SIZE, ll_file) if not result: if c_feof(ll_file): # ok return 0 raise _error(ll_file) - # + # Assume that fgets() works as documented, and additionally # never writes beyond the final \0, which the CPython # fileobject.c says appears to be the case everywhere. # The only case where the buffer was not big enough is the # case where the buffer is full, ends with \0, and doesn't # end with \n\0. - strlen = 0 - while raw_buf[strlen] != '\0': - strlen += 1 - if (strlen == BASE_LINE_SIZE - 1 and - raw_buf[BASE_LINE_SIZE - 2] != '\n'): - return -1 # overflow! - # common case - return strlen + + p = 0 + while raw_buf[p] != '\n': + p += 1 + if p == BASE_LINE_SIZE: + # fgets read whole buffer without finding newline + return -1 + # p points to first \n + + if p + 1 < BASE_LINE_SIZE and raw_buf[p + 1] == '\0': + # \n followed by \0, fgets read and found newline + return p + 1 + else: + # \n not followed by \0, fgets read but didnt find newline + assert p > 0 and raw_buf[p - 1] == '\0' + return p - 1 def readline(self): self._check_closed() diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -57,13 +57,17 @@ def f(): f = open(fname, "w") - f.write("dupa") + f.write("dupa\x00dupb") f.close() f2 = open(fname) dupa = f2.read() - assert dupa == "dupa" + assert dupa == "dupa\x00dupb" + f2.seek(0) + dupa = f2.readline() + assert dupa == "dupa\x00dupb" f2.close() + f() self.interpret(f, []) def test_read_sequentially(self): From noreply at buildbot.pypy.org Fri Aug 29 05:53:20 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 05:53:20 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: merge default Message-ID: <20140829035320.6EBBD1D2AC1@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73143:49cb9ef9fde8 Date: 2014-08-28 23:52 -0400 http://bitbucket.org/pypy/pypy/changeset/49cb9ef9fde8/ Log: merge default diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -254,26 +254,37 @@ def _readline1(self, raw_buf): ll_file = self.ll_file + for i in range(BASE_LINE_SIZE): + raw_buf[i] = '\n' + result = c_fgets(raw_buf, BASE_LINE_SIZE, ll_file) if not result: if c_feof(ll_file): # ok return 0 raise _error(ll_file) - # + # Assume that fgets() works as documented, and additionally # never writes beyond the final \0, which the CPython # fileobject.c says appears to be the case everywhere. # The only case where the buffer was not big enough is the # case where the buffer is full, ends with \0, and doesn't # end with \n\0. - strlen = 0 - while raw_buf[strlen] != '\0': - strlen += 1 - if (strlen == BASE_LINE_SIZE - 1 and - raw_buf[BASE_LINE_SIZE - 2] != '\n'): - return -1 # overflow! - # common case - return strlen + + p = 0 + while raw_buf[p] != '\n': + p += 1 + if p == BASE_LINE_SIZE: + # fgets read whole buffer without finding newline + return -1 + # p points to first \n + + if p + 1 < BASE_LINE_SIZE and raw_buf[p + 1] == '\0': + # \n followed by \0, fgets read and found newline + return p + 1 + else: + # \n not followed by \0, fgets read but didnt find newline + assert p > 0 and raw_buf[p - 1] == '\0' + return p - 1 def readline(self): self._check_closed() diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -57,13 +57,17 @@ def f(): f = open(fname, "w") - f.write("dupa") + f.write("dupa\x00dupb") f.close() f2 = open(fname) dupa = f2.read() - assert dupa == "dupa" + assert dupa == "dupa\x00dupb" + f2.seek(0) + dupa = f2.readline() + assert dupa == "dupa\x00dupb" f2.close() + f() self.interpret(f, []) def test_read_sequentially(self): From noreply at buildbot.pypy.org Fri Aug 29 06:39:30 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 06:39:30 +0200 (CEST) Subject: [pypy-commit] pypy default: support size argument for rfile.readline Message-ID: <20140829043930.2F2171C000D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73144:bfcd7f844c8f Date: 2014-08-29 00:22 -0400 http://bitbucket.org/pypy/pypy/changeset/bfcd7f844c8f/ Log: support size argument for rfile.readline diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -31,6 +31,7 @@ _IOLBF = platform.DefinedConstantInteger('_IOLBF') _IOFBF = platform.DefinedConstantInteger('_IOFBF') BUFSIZ = platform.DefinedConstantInteger('BUFSIZ') + EOF = platform.DefinedConstantInteger('EOF') config = platform.configure(CConfig) @@ -40,6 +41,7 @@ _IOLBF = config['_IOLBF'] _IOFBF = config['_IOFBF'] BUFSIZ = config['BUFSIZ'] +EOF = config['EOF'] BASE_BUF_SIZE = 4096 BASE_LINE_SIZE = 100 @@ -67,6 +69,7 @@ c_fflush = llexternal('fflush', [FILEP], rffi.INT) c_ftruncate = llexternal(ftruncate, [rffi.INT, OFF_T], rffi.INT, macro=True) +c_getc = llexternal('getc', [FILEP], rffi.INT, macro=True) c_fgets = llexternal('fgets', [rffi.CCHARP, rffi.INT, FILEP], rffi.CCHARP) @@ -286,22 +289,38 @@ assert p > 0 and raw_buf[p - 1] == '\0' return p - 1 - def readline(self): + def readline(self, size=-1): self._check_closed() - with rffi.scoped_alloc_buffer(BASE_LINE_SIZE) as buf: - c = self._readline1(buf.raw) - if c >= 0: - return buf.str(c) - # - # this is the rare case: the line is longer than BASE_LINE_SIZE - s = StringBuilder() - while True: - s.append_charpsize(buf.raw, BASE_LINE_SIZE - 1) + if size == 0: + return "" + elif size < 0: + with rffi.scoped_alloc_buffer(BASE_LINE_SIZE) as buf: c = self._readline1(buf.raw) if c >= 0: + return buf.str(c) + + # this is the rare case: the line is longer than BASE_LINE_SIZE + s = StringBuilder() + while True: + s.append_charpsize(buf.raw, BASE_LINE_SIZE - 1) + c = self._readline1(buf.raw) + if c >= 0: + break + s.append_charpsize(buf.raw, c) + return s.build() + else: # size > 0 + ll_file = self.ll_file + s = StringBuilder() + while s.getlength() < size: + c = c_getc(ll_file) + if c == EOF: + if c_ferror(ll_file): + raise _error(ll_file) break - # - s.append_charpsize(buf.raw, c) + c = chr(c) + s.append(c) + if c == '\n': + break return s.build() diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -63,6 +63,13 @@ dupa = f2.read() assert dupa == "dupa\x00dupb" f2.seek(0) + dupa = f2.readline(0) + assert dupa == "" + dupa = f2.readline(2) + assert dupa == "du" + dupa = f2.readline(100) + assert dupa == "pa\x00dupb" + f2.seek(0) dupa = f2.readline() assert dupa == "dupa\x00dupb" f2.close() From noreply at buildbot.pypy.org Fri Aug 29 06:39:31 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 06:39:31 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: merge default Message-ID: <20140829043931.782071C000D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73145:3a1c0e548781 Date: 2014-08-29 00:30 -0400 http://bitbucket.org/pypy/pypy/changeset/3a1c0e548781/ Log: merge default diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -31,6 +31,7 @@ _IOLBF = platform.DefinedConstantInteger('_IOLBF') _IOFBF = platform.DefinedConstantInteger('_IOFBF') BUFSIZ = platform.DefinedConstantInteger('BUFSIZ') + EOF = platform.DefinedConstantInteger('EOF') config = platform.configure(CConfig) @@ -40,6 +41,7 @@ _IOLBF = config['_IOLBF'] _IOFBF = config['_IOFBF'] BUFSIZ = config['BUFSIZ'] +EOF = config['EOF'] BASE_BUF_SIZE = 4096 BASE_LINE_SIZE = 100 @@ -67,6 +69,7 @@ c_fflush = llexternal('fflush', [FILEP], rffi.INT) c_ftruncate = llexternal(ftruncate, [rffi.INT, OFF_T], rffi.INT, macro=True) +c_getc = llexternal('getc', [FILEP], rffi.INT, macro=True) c_fgets = llexternal('fgets', [rffi.CCHARP, rffi.INT, FILEP], rffi.CCHARP) @@ -286,22 +289,38 @@ assert p > 0 and raw_buf[p - 1] == '\0' return p - 1 - def readline(self): + def readline(self, size=-1): self._check_closed() - with rffi.scoped_alloc_buffer(BASE_LINE_SIZE) as buf: - c = self._readline1(buf.raw) - if c >= 0: - return buf.str(c) - # - # this is the rare case: the line is longer than BASE_LINE_SIZE - s = StringBuilder() - while True: - s.append_charpsize(buf.raw, BASE_LINE_SIZE - 1) + if size == 0: + return "" + elif size < 0: + with rffi.scoped_alloc_buffer(BASE_LINE_SIZE) as buf: c = self._readline1(buf.raw) if c >= 0: + return buf.str(c) + + # this is the rare case: the line is longer than BASE_LINE_SIZE + s = StringBuilder() + while True: + s.append_charpsize(buf.raw, BASE_LINE_SIZE - 1) + c = self._readline1(buf.raw) + if c >= 0: + break + s.append_charpsize(buf.raw, c) + return s.build() + else: # size > 0 + ll_file = self.ll_file + s = StringBuilder() + while s.getlength() < size: + c = c_getc(ll_file) + if c == EOF: + if c_ferror(ll_file): + raise _error(ll_file) break - # - s.append_charpsize(buf.raw, c) + c = chr(c) + s.append(c) + if c == '\n': + break return s.build() diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -63,6 +63,13 @@ dupa = f2.read() assert dupa == "dupa\x00dupb" f2.seek(0) + dupa = f2.readline(0) + assert dupa == "" + dupa = f2.readline(2) + assert dupa == "du" + dupa = f2.readline(100) + assert dupa == "pa\x00dupb" + f2.seek(0) dupa = f2.readline() assert dupa == "dupa\x00dupb" f2.close() From noreply at buildbot.pypy.org Fri Aug 29 06:39:32 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 06:39:32 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: pass size to rfile.readline Message-ID: <20140829043932.B1E961C000D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73146:ae483af51c0d Date: 2014-08-29 00:31 -0400 http://bitbucket.org/pypy/pypy/changeset/ae483af51c0d/ Log: pass size to rfile.readline diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -192,29 +192,7 @@ @unwrap_spec(size=int) def direct_readline(self, size=-1): stream = self.getstream() - return stream.readline() - if size < 0: - return stream.readline() - else: - # very inefficient unless there is a peek() - result = StringBuilder() - while size > 0: - # "peeks" on the underlying stream to see how many chars - # we can safely read without reading past an end-of-line - startindex, peeked = stream.peek() - assert 0 <= startindex <= len(peeked) - endindex = startindex + size - pn = peeked.find("\n", startindex, endindex) - if pn < 0: - pn = min(endindex - 1, len(peeked)) - c = stream.read(pn - startindex + 1) - if not c: - break - result.append(c) - if c.endswith('\n'): - break - size -= len(c) - return result.build() + return stream.readline(size) @unwrap_spec(size=int) def direct_readlines(self, size=0): From noreply at buildbot.pypy.org Fri Aug 29 06:39:33 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 06:39:33 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: set binary flag from mode Message-ID: <20140829043933.E6C701C000D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73147:d36c42962914 Date: 2014-08-29 00:39 -0400 http://bitbucket.org/pypy/pypy/changeset/d36c42962914/ Log: set binary flag from mode diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -116,7 +116,7 @@ self.w_name = w_name self.stream = rfile.create_file(self.space.str_w(w_name), mode, buffering) self.mode = mode - #self.binary = "b" in mode + self.binary = "b" in mode def direct___enter__(self): self.check_closed() @@ -134,7 +134,7 @@ self.w_name = self.space.wrap('') self.stream = rfile.create_fdopen_rfile(fd, mode) self.mode = mode - #self.binary = "b" in mode + self.binary = "b" in mode def direct_close(self): stream = self.stream From noreply at buildbot.pypy.org Fri Aug 29 07:20:11 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 07:20:11 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: fix flush at exit Message-ID: <20140829052011.177751D22E6@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73148:f61ee40f042c Date: 2014-08-29 00:51 -0400 http://bitbucket.org/pypy/pypy/changeset/f61ee40f042c/ Log: fix flush at exit diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -56,15 +56,11 @@ except OSError as e: raise wrap_oserror_as_ioerror(self.space, e, self.w_name) - def fdopenstream(self, stream, fd, mode, w_name=None): - self.fd = fd + def fdopenstream(self, stream, mode): + self.stream = stream self.mode = mode self.binary = "b" in mode - if w_name is not None: - self.w_name = w_name - self.stream = stream - if stream.flushable(): - getopenstreams(self.space)[stream] = None + getopenstreams(self.space)[stream] = None def check_not_dir(self, fd): try: @@ -114,9 +110,8 @@ self.direct_close() self.check_mode_ok(mode) self.w_name = w_name - self.stream = rfile.create_file(self.space.str_w(w_name), mode, buffering) - self.mode = mode - self.binary = "b" in mode + stream = rfile.create_file(self.space.str_w(w_name), mode, buffering) + self.fdopenstream(stream, mode) def direct___enter__(self): self.check_closed() @@ -132,20 +127,19 @@ self.direct_close() self.check_mode_ok(mode) self.w_name = self.space.wrap('') - self.stream = rfile.create_fdopen_rfile(fd, mode) - self.mode = mode - self.binary = "b" in mode + stream = rfile.create_fdopen_rfile(fd, mode) + self.fdopenstream(stream, mode) def direct_close(self): stream = self.stream if stream is not None: #self.newlines = self.stream.getnewlines() self.stream = None - #openstreams = getopenstreams(self.space) - #try: - # del openstreams[stream] - #except KeyError: - # pass + openstreams = getopenstreams(self.space) + try: + del openstreams[stream] + except KeyError: + pass # close the stream. If cffi_fileobj is None, we close the # underlying fileno too. Otherwise, we leave that to # cffi_fileobj.close(). From noreply at buildbot.pypy.org Fri Aug 29 07:20:12 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 07:20:12 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: use rfile.read(n) logic instead Message-ID: <20140829052012.773581D22E6@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73149:35cbd34bdf5b Date: 2014-08-29 00:57 -0400 http://bitbucket.org/pypy/pypy/changeset/35cbd34bdf5b/ Log: use rfile.read(n) logic instead diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -161,27 +161,7 @@ @unwrap_spec(n=int) def direct_read(self, n=-1): stream = self.getstream() - if n < 0: - return stream.read() - else: - result = StringBuilder(n) - while n > 0: - try: - data = stream.read(n) - except OSError, e: - # a special-case only for read() (similar to CPython, which - # also loses partial data with other methods): if we get - # EAGAIN after already some data was received, return it. - if is_wouldblock_error(e): - got = result.build() - if len(got) > 0: - return got - raise - if not data: - break - n -= len(data) - result.append(data) - return result.build() + return stream.read(n) @unwrap_spec(size=int) def direct_readline(self, size=-1): From noreply at buildbot.pypy.org Fri Aug 29 07:20:13 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 07:20:13 +0200 (CEST) Subject: [pypy-commit] pypy default: test/fix rfile.read(0) Message-ID: <20140829052013.B4C5B1D22E6@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73150:26d170cd6c24 Date: 2014-08-29 01:01 -0400 http://bitbucket.org/pypy/pypy/changeset/26d170cd6c24/ Log: test/fix rfile.read(0) diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -190,7 +190,9 @@ # XXX CPython uses a more delicate logic here self._check_closed() ll_file = self.ll_file - if size < 0: + if size == 0: + return "" + elif size < 0: # read the entire contents buf = lltype.malloc(rffi.CCHARP.TO, BASE_BUF_SIZE, flavor='raw') try: @@ -206,7 +208,7 @@ s.append_charpsize(buf, returned_size) finally: lltype.free(buf, flavor='raw') - else: + else: # size > 0 with rffi.scoped_alloc_buffer(size) as buf: returned_size = c_fread(buf.raw, 1, size, ll_file) returned_size = intmask(returned_size) # is between 0 and size diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -60,6 +60,8 @@ f.write("dupa\x00dupb") f.close() f2 = open(fname) + dupa = f2.read(0) + assert dupa == "" dupa = f2.read() assert dupa == "dupa\x00dupb" f2.seek(0) From noreply at buildbot.pypy.org Fri Aug 29 07:20:14 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 07:20:14 +0200 (CEST) Subject: [pypy-commit] pypy default: test/fix check for directory in rfile open Message-ID: <20140829052014.E8CD71D22E6@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73151:8986be62c395 Date: 2014-08-29 01:17 -0400 http://bitbucket.org/pypy/pypy/changeset/8986be62c395/ Log: test/fix check for directory in rfile open diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -3,7 +3,7 @@ python builtin open() """ -import os +import os, stat, errno from rpython.rlib import rposix from rpython.rlib.rarithmetic import intmask from rpython.rlib.rstring import StringBuilder @@ -79,9 +79,20 @@ def _error(ll_file): - errno = c_ferror(ll_file) + err = c_ferror(ll_file) c_clearerr(ll_file) - raise OSError(errno, os.strerror(errno)) + raise OSError(err, os.strerror(err)) + + +def _dircheck(ll_file): + try: + st = os.fstat(c_fileno(ll_file)) + except OSError: + pass + else: + if stat.S_ISDIR(st[0]): + err = errno.EISDIR + raise OSError(err, os.strerror(err)) def create_file(filename, mode="r", buffering=-1): @@ -97,6 +108,7 @@ lltype.free(ll_mode, flavor='raw') finally: lltype.free(ll_name, flavor='raw') + _dircheck(ll_f) if buffering >= 0: if buffering == 0: c_setvbuf(ll_f, lltype.nullptr(rffi.CCHARP.TO), _IONBF, 0) @@ -124,6 +136,7 @@ raise OSError(errno, os.strerror(errno)) finally: lltype.free(ll_mode, flavor='raw') + _dircheck(ll_f) return RFile(ll_f) diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -1,4 +1,4 @@ -import os, sys, py +import os, sys, py, errno from rpython.rtyper.test.tool import BaseRtypingTest from rpython.tool.udir import udir from rpython.rlib import rfile @@ -17,9 +17,37 @@ f.write("dupa") f.close() + f() self.interpret(f, []) assert open(fname, "r").read() == "dupa" + def test_open_errors(self): + def f(): + try: + open('zzz') + except OSError as e: + assert e.errno == errno.ENOENT + else: + assert False + + try: + open('.') + except OSError as e: + assert e.errno == errno.EISDIR + else: + assert False + + fd = os.open('.', os.O_RDONLY, 0777) + try: + os.fdopen(fd) + except OSError as e: + assert e.errno == errno.EISDIR + else: + assert False + os.close(fd) + + self.interpret(f, []) + def test_open_buffering_line(self): fname = str(self.tmpdir.join('file_1a')) From noreply at buildbot.pypy.org Fri Aug 29 07:20:16 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 07:20:16 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: merge default Message-ID: <20140829052016.31ECE1D22E6@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73152:58d64536f9b4 Date: 2014-08-29 01:19 -0400 http://bitbucket.org/pypy/pypy/changeset/58d64536f9b4/ Log: merge default diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -3,7 +3,7 @@ python builtin open() """ -import os +import os, stat, errno from rpython.rlib import rposix from rpython.rlib.rarithmetic import intmask from rpython.rlib.rstring import StringBuilder @@ -79,9 +79,20 @@ def _error(ll_file): - errno = c_ferror(ll_file) + err = c_ferror(ll_file) c_clearerr(ll_file) - raise OSError(errno, os.strerror(errno)) + raise OSError(err, os.strerror(err)) + + +def _dircheck(ll_file): + try: + st = os.fstat(c_fileno(ll_file)) + except OSError: + pass + else: + if stat.S_ISDIR(st[0]): + err = errno.EISDIR + raise OSError(err, os.strerror(err)) def create_file(filename, mode="r", buffering=-1): @@ -97,6 +108,7 @@ lltype.free(ll_mode, flavor='raw') finally: lltype.free(ll_name, flavor='raw') + _dircheck(ll_f) if buffering >= 0: if buffering == 0: c_setvbuf(ll_f, lltype.nullptr(rffi.CCHARP.TO), _IONBF, 0) @@ -124,6 +136,7 @@ raise OSError(errno, os.strerror(errno)) finally: lltype.free(ll_mode, flavor='raw') + _dircheck(ll_f) return RFile(ll_f) @@ -190,7 +203,9 @@ # XXX CPython uses a more delicate logic here self._check_closed() ll_file = self.ll_file - if size < 0: + if size == 0: + return "" + elif size < 0: # read the entire contents buf = lltype.malloc(rffi.CCHARP.TO, BASE_BUF_SIZE, flavor='raw') try: @@ -206,7 +221,7 @@ s.append_charpsize(buf, returned_size) finally: lltype.free(buf, flavor='raw') - else: + else: # size > 0 with rffi.scoped_alloc_buffer(size) as buf: returned_size = c_fread(buf.raw, 1, size, ll_file) returned_size = intmask(returned_size) # is between 0 and size diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -1,4 +1,4 @@ -import os, sys, py +import os, sys, py, errno from rpython.rtyper.test.tool import BaseRtypingTest from rpython.tool.udir import udir from rpython.rlib import rfile @@ -17,9 +17,37 @@ f.write("dupa") f.close() + f() self.interpret(f, []) assert open(fname, "r").read() == "dupa" + def test_open_errors(self): + def f(): + try: + open('zzz') + except OSError as e: + assert e.errno == errno.ENOENT + else: + assert False + + try: + open('.') + except OSError as e: + assert e.errno == errno.EISDIR + else: + assert False + + fd = os.open('.', os.O_RDONLY, 0777) + try: + os.fdopen(fd) + except OSError as e: + assert e.errno == errno.EISDIR + else: + assert False + os.close(fd) + + self.interpret(f, []) + def test_open_buffering_line(self): fname = str(self.tmpdir.join('file_1a')) @@ -60,6 +88,8 @@ f.write("dupa\x00dupb") f.close() f2 = open(fname) + dupa = f2.read(0) + assert dupa == "" dupa = f2.read() assert dupa == "dupa\x00dupb" f2.seek(0) From noreply at buildbot.pypy.org Fri Aug 29 07:27:56 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 07:27:56 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: kill unused code Message-ID: <20140829052756.E45F81D27E6@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73153:c0e3a6041194 Date: 2014-08-29 01:23 -0400 http://bitbucket.org/pypy/pypy/changeset/c0e3a6041194/ Log: kill unused code diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -33,7 +33,6 @@ softspace= 0 # Required according to file object docs encoding = None errors = None - fd = -1 cffi_fileobj = None # pypy/module/_cffi_backend newlines = 0 # Updated when the stream is closed @@ -62,16 +61,6 @@ self.binary = "b" in mode getopenstreams(self.space)[stream] = None - def check_not_dir(self, fd): - try: - st = os.fstat(fd) - except OSError: - pass - else: - if (stat.S_ISDIR(st[0])): - ose = OSError(errno.EISDIR, '') - raise wrap_oserror_as_ioerror(self.space, ose, self.w_name) - def check_mode_ok(self, mode): if (not mode or mode[0] not in ['r', 'w', 'a', 'U'] or ('U' in mode and ('w' in mode or 'a' in mode))): From noreply at buildbot.pypy.org Fri Aug 29 07:41:33 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 07:41:33 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: fix handling of unicode filename Message-ID: <20140829054133.E056B1C000D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73154:a01f06512ec4 Date: 2014-08-29 01:40 -0400 http://bitbucket.org/pypy/pypy/changeset/a01f06512ec4/ Log: fix handling of unicode filename diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -7,7 +7,7 @@ from rpython.rlib.rarithmetic import r_longlong from rpython.rlib.rstring import StringBuilder from pypy.module._file.interp_stream import W_AbstractStream -from pypy.module.posix.interp_posix import dispatch_filename +from pypy.module.posix.interp_posix import fsencode_w from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, make_weakref_descr, interp_attrproperty_w) @@ -99,7 +99,7 @@ self.direct_close() self.check_mode_ok(mode) self.w_name = w_name - stream = rfile.create_file(self.space.str_w(w_name), mode, buffering) + stream = rfile.create_file(fsencode_w(self.space, w_name), mode, buffering) self.fdopenstream(stream, mode) def direct___enter__(self): From noreply at buildbot.pypy.org Fri Aug 29 07:41:35 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 07:41:35 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: kill unused imports Message-ID: <20140829054135.11A881C000D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73155:ac18a16f281b Date: 2014-08-29 01:40 -0400 http://bitbucket.org/pypy/pypy/changeset/ac18a16f281b/ Log: kill unused imports diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -1,18 +1,16 @@ import py import os -import stat import errno -from rpython.rlib import streamio, rfile +from rpython.rlib import rfile from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong -from rpython.rlib.rstring import StringBuilder from pypy.module._file.interp_stream import W_AbstractStream from pypy.module.posix.interp_posix import fsencode_w from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, make_weakref_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror +from pypy.interpreter.streamutil import wrap_oserror_as_ioerror class W_File(W_AbstractStream): From noreply at buildbot.pypy.org Fri Aug 29 09:05:14 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 29 Aug 2014 09:05:14 +0200 (CEST) Subject: [pypy-commit] pypy default: try harder to avoid windows-app crashes opening a dialog box under buildbots Message-ID: <20140829070514.8CA181C000D@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r73156:716d587362cb Date: 2014-08-29 10:03 +0300 http://bitbucket.org/pypy/pypy/changeset/716d587362cb/ Log: try harder to avoid windows-app crashes opening a dialog box under buildbots diff --git a/pypy/test_all.py b/pypy/test_all.py --- a/pypy/test_all.py +++ b/pypy/test_all.py @@ -27,4 +27,21 @@ sys.path.insert(0,os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) import pytest import pytest_cov + if sys.platform == 'win32': + #Try to avoid opeing a dialog box if one of the tests causes a system error + # We do this in runner.py, but buildbots run twisted which ruins inheritance + # in windows subprocesses. + import ctypes + winapi = ctypes.windll.kernel32 + SetErrorMode = winapi.SetErrorMode + SetErrorMode.argtypes=[ctypes.c_int] + + SEM_FAILCRITICALERRORS = 1 + SEM_NOGPFAULTERRORBOX = 2 + SEM_NOOPENFILEERRORBOX = 0x8000 + flags = SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX + #Since there is no GetErrorMode, do a double Set + old_mode = SetErrorMode(flags) + SetErrorMode(old_mode | flags) + sys.exit(pytest.main(plugins=[pytest_cov])) diff --git a/testrunner/runner.py b/testrunner/runner.py --- a/testrunner/runner.py +++ b/testrunner/runner.py @@ -407,7 +407,8 @@ run_param.dry_run = opts.dry_run if run_param.dry_run: - print >>out, run_param.__dict__ + print >>out, '\n'.join([str((k, getattr(run_param, k))) \ + for k in dir(run_param) if k[:2] != '__']) res = execute_tests(run_param, testdirs, logfile, out) From noreply at buildbot.pypy.org Fri Aug 29 09:36:11 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 09:36:11 +0200 (CEST) Subject: [pypy-commit] pypy default: sanitize mode in rfile Message-ID: <20140829073611.46E921D22E6@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73157:3505ae408343 Date: 2014-08-29 03:25 -0400 http://bitbucket.org/pypy/pypy/changeset/3505ae408343/ Log: sanitize mode in rfile diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -95,7 +95,28 @@ raise OSError(err, os.strerror(err)) +def _sanitize_mode(mode): + if len(mode) == 0: + raise ValueError("empty mode string") + upos = mode.find('U') + if upos >= 0: + mode = mode[:upos] + mode[upos+1:] + first = mode[0:1] + if first == 'w' or first == 'a': + raise ValueError("universal newline mode can only be used with " + "modes starting with 'r'") + if first != 'r': + mode = 'r' + mode + if 'b' not in mode: + mode = mode[0] + 'b' + mode[1:] + elif mode[0] != 'r' and mode[0] != 'w' and mode[0] != 'a': + raise ValueError("mode string must begin with one of 'r', 'w', 'a' " + "or 'U', not '%s'" % mode) + return mode + + def create_file(filename, mode="r", buffering=-1): + mode = _sanitize_mode(mode) ll_name = rffi.str2charp(filename) try: ll_mode = rffi.str2charp(mode) @@ -128,6 +149,7 @@ def create_fdopen_rfile(fd, mode="r"): + mode = _sanitize_mode(mode) ll_mode = rffi.str2charp(mode) try: ll_f = c_fdopen(rffi.cast(rffi.INT, fd), ll_mode) diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -24,6 +24,13 @@ def test_open_errors(self): def f(): try: + open('zzz', 'badmode') + except ValueError: + pass + else: + assert False + + try: open('zzz') except OSError as e: assert e.errno == errno.ENOENT @@ -37,6 +44,13 @@ else: assert False + try: + os.fdopen(42, "badmode") + except ValueError: + pass + else: + assert False + fd = os.open('.', os.O_RDONLY, 0777) try: os.fdopen(fd) @@ -87,22 +101,23 @@ f = open(fname, "w") f.write("dupa\x00dupb") f.close() - f2 = open(fname) - dupa = f2.read(0) - assert dupa == "" - dupa = f2.read() - assert dupa == "dupa\x00dupb" - f2.seek(0) - dupa = f2.readline(0) - assert dupa == "" - dupa = f2.readline(2) - assert dupa == "du" - dupa = f2.readline(100) - assert dupa == "pa\x00dupb" - f2.seek(0) - dupa = f2.readline() - assert dupa == "dupa\x00dupb" - f2.close() + for mode in ['r', 'U']: + f2 = open(fname, mode) + dupa = f2.read(0) + assert dupa == "" + dupa = f2.read() + assert dupa == "dupa\x00dupb" + f2.seek(0) + dupa = f2.readline(0) + assert dupa == "" + dupa = f2.readline(2) + assert dupa == "du" + dupa = f2.readline(100) + assert dupa == "pa\x00dupb" + f2.seek(0) + dupa = f2.readline() + assert dupa == "dupa\x00dupb" + f2.close() f() self.interpret(f, []) From noreply at buildbot.pypy.org Fri Aug 29 09:36:12 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 09:36:12 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140829073612.91D791D22E6@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73158:1d79593ea750 Date: 2014-08-29 03:35 -0400 http://bitbucket.org/pypy/pypy/changeset/1d79593ea750/ Log: merge heads diff --git a/pypy/test_all.py b/pypy/test_all.py --- a/pypy/test_all.py +++ b/pypy/test_all.py @@ -27,4 +27,21 @@ sys.path.insert(0,os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) import pytest import pytest_cov + if sys.platform == 'win32': + #Try to avoid opeing a dialog box if one of the tests causes a system error + # We do this in runner.py, but buildbots run twisted which ruins inheritance + # in windows subprocesses. + import ctypes + winapi = ctypes.windll.kernel32 + SetErrorMode = winapi.SetErrorMode + SetErrorMode.argtypes=[ctypes.c_int] + + SEM_FAILCRITICALERRORS = 1 + SEM_NOGPFAULTERRORBOX = 2 + SEM_NOOPENFILEERRORBOX = 0x8000 + flags = SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX + #Since there is no GetErrorMode, do a double Set + old_mode = SetErrorMode(flags) + SetErrorMode(old_mode | flags) + sys.exit(pytest.main(plugins=[pytest_cov])) diff --git a/testrunner/runner.py b/testrunner/runner.py --- a/testrunner/runner.py +++ b/testrunner/runner.py @@ -407,7 +407,8 @@ run_param.dry_run = opts.dry_run if run_param.dry_run: - print >>out, run_param.__dict__ + print >>out, '\n'.join([str((k, getattr(run_param, k))) \ + for k in dir(run_param) if k[:2] != '__']) res = execute_tests(run_param, testdirs, logfile, out) From noreply at buildbot.pypy.org Fri Aug 29 09:36:46 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 09:36:46 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: merge default Message-ID: <20140829073646.C00E51D22E6@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73159:07e7602306fb Date: 2014-08-29 03:26 -0400 http://bitbucket.org/pypy/pypy/changeset/07e7602306fb/ Log: merge default diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -95,7 +95,28 @@ raise OSError(err, os.strerror(err)) +def _sanitize_mode(mode): + if len(mode) == 0: + raise ValueError("empty mode string") + upos = mode.find('U') + if upos >= 0: + mode = mode[:upos] + mode[upos+1:] + first = mode[0:1] + if first == 'w' or first == 'a': + raise ValueError("universal newline mode can only be used with " + "modes starting with 'r'") + if first != 'r': + mode = 'r' + mode + if 'b' not in mode: + mode = mode[0] + 'b' + mode[1:] + elif mode[0] != 'r' and mode[0] != 'w' and mode[0] != 'a': + raise ValueError("mode string must begin with one of 'r', 'w', 'a' " + "or 'U', not '%s'" % mode) + return mode + + def create_file(filename, mode="r", buffering=-1): + mode = _sanitize_mode(mode) ll_name = rffi.str2charp(filename) try: ll_mode = rffi.str2charp(mode) @@ -128,6 +149,7 @@ def create_fdopen_rfile(fd, mode="r"): + mode = _sanitize_mode(mode) ll_mode = rffi.str2charp(mode) try: ll_f = c_fdopen(rffi.cast(rffi.INT, fd), ll_mode) diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -24,6 +24,13 @@ def test_open_errors(self): def f(): try: + open('zzz', 'badmode') + except ValueError: + pass + else: + assert False + + try: open('zzz') except OSError as e: assert e.errno == errno.ENOENT @@ -37,6 +44,13 @@ else: assert False + try: + os.fdopen(42, "badmode") + except ValueError: + pass + else: + assert False + fd = os.open('.', os.O_RDONLY, 0777) try: os.fdopen(fd) @@ -87,22 +101,23 @@ f = open(fname, "w") f.write("dupa\x00dupb") f.close() - f2 = open(fname) - dupa = f2.read(0) - assert dupa == "" - dupa = f2.read() - assert dupa == "dupa\x00dupb" - f2.seek(0) - dupa = f2.readline(0) - assert dupa == "" - dupa = f2.readline(2) - assert dupa == "du" - dupa = f2.readline(100) - assert dupa == "pa\x00dupb" - f2.seek(0) - dupa = f2.readline() - assert dupa == "dupa\x00dupb" - f2.close() + for mode in ['r', 'U']: + f2 = open(fname, mode) + dupa = f2.read(0) + assert dupa == "" + dupa = f2.read() + assert dupa == "dupa\x00dupb" + f2.seek(0) + dupa = f2.readline(0) + assert dupa == "" + dupa = f2.readline(2) + assert dupa == "du" + dupa = f2.readline(100) + assert dupa == "pa\x00dupb" + f2.seek(0) + dupa = f2.readline() + assert dupa == "dupa\x00dupb" + f2.close() f() self.interpret(f, []) From noreply at buildbot.pypy.org Fri Aug 29 09:36:48 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 09:36:48 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: use mode check from rfile Message-ID: <20140829073648.1DA7B1D22E6@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73160:98475f7b0785 Date: 2014-08-29 03:34 -0400 http://bitbucket.org/pypy/pypy/changeset/98475f7b0785/ Log: use mode check from rfile diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -59,12 +59,6 @@ self.binary = "b" in mode getopenstreams(self.space)[stream] = None - def check_mode_ok(self, mode): - if (not mode or mode[0] not in ['r', 'w', 'a', 'U'] or - ('U' in mode and ('w' in mode or 'a' in mode))): - space = self.space - raise oefmt(space.w_ValueError, "invalid mode: '%s'", mode) - def check_closed(self): if self.stream is None: raise OperationError(self.space.w_ValueError, @@ -95,7 +89,6 @@ @unwrap_spec(mode=str, buffering=int) def direct___init__(self, w_name, mode='r', buffering=-1): self.direct_close() - self.check_mode_ok(mode) self.w_name = w_name stream = rfile.create_file(fsencode_w(self.space, w_name), mode, buffering) self.fdopenstream(stream, mode) @@ -112,7 +105,6 @@ def direct_fdopen(self, fd, mode='r', buffering=-1): self.direct_close() - self.check_mode_ok(mode) self.w_name = self.space.wrap('') stream = rfile.create_fdopen_rfile(fd, mode) self.fdopenstream(stream, mode) @@ -230,6 +222,8 @@ def file_fdopen(self, fd, mode="r", buffering=-1): try: self.direct_fdopen(fd, mode, buffering) + except ValueError as e: + raise OperationError(self.space.w_ValueError, self.space.wrap(str(e))) except OSError as e: raise wrap_oserror(self.space, e) @@ -267,6 +261,8 @@ try: try: result = self.direct_%(name)s(%(callsig)s) + except ValueError as e: + raise OperationError(space.w_ValueError, space.wrap(str(e))) except OSError as e: raise wrap_oserror_as_ioerror(self.space, e, self.w_name) finally: diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -72,6 +72,8 @@ except AttributeError: fdopen = os.fdopen # when running with -A fd = os.open(self.temppath, os.O_WRONLY | os.O_CREAT) + exc = raises(ValueError, fdopen, fd, "badmode") + assert str(exc.value) == "mode string must begin with one of 'r', 'w', 'a' or 'U', not 'badmode'" f2 = fdopen(fd, "a") f2.seek(0, 2) f2.write("bar\nboo") From noreply at buildbot.pypy.org Fri Aug 29 10:40:17 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 29 Aug 2014 10:40:17 +0200 (CEST) Subject: [pypy-commit] pypy default: avoid windows-app crashes opening a dialog box for single tests Message-ID: <20140829084017.009631D2339@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r73161:807c7a1359e4 Date: 2014-08-29 11:40 +0300 http://bitbucket.org/pypy/pypy/changeset/807c7a1359e4/ Log: avoid windows-app crashes opening a dialog box for single tests diff --git a/pytest.py b/pytest.py --- a/pytest.py +++ b/pytest.py @@ -8,6 +8,21 @@ if __name__ == '__main__': # if run as a script or by 'python -m pytest' # we trigger the below "else" condition by the following import import pytest + import sys + if sys.platform == 'win32': + #Try to avoid opeing a dialog box if one of the tests causes a system error + import ctypes + winapi = ctypes.windll.kernel32 + SetErrorMode = winapi.SetErrorMode + SetErrorMode.argtypes=[ctypes.c_int] + + SEM_FAILCRITICALERRORS = 1 + SEM_NOGPFAULTERRORBOX = 2 + SEM_NOOPENFILEERRORBOX = 0x8000 + flags = SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX + #Since there is no GetErrorMode, do a double Set + old_mode = SetErrorMode(flags) + SetErrorMode(old_mode | flags) raise SystemExit(pytest.main()) # else we are imported From noreply at buildbot.pypy.org Fri Aug 29 11:24:12 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 29 Aug 2014 11:24:12 +0200 (CEST) Subject: [pypy-commit] stmgc default: uhm, I guess we forgot to update this place when changing the indexing method in card marking Message-ID: <20140829092412.2B89C1D2AC1@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1324:386289f8c74b Date: 2014-08-29 11:24 +0200 http://bitbucket.org/pypy/stmgc/changeset/386289f8c74b/ Log: uhm, I guess we forgot to update this place when changing the indexing method in card marking diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -288,6 +288,9 @@ struct object_s *realobj = (struct object_s *)REAL_ADDRESS(STM_SEGMENT->segment_base, obj); size_t size = stmcb_size_rounded_up(realobj); + uintptr_t offset_itemsize[2]; + stmcb_get_card_base_itemsize(realobj, offset_itemsize); + size = (size - offset_itemsize[0]) / offset_itemsize[1]; uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj); uintptr_t card_index = 1; @@ -310,7 +313,6 @@ obj, start, stop)); stmcb_trace_cards(realobj, &minor_trace_if_young, start, stop); - } /* all cards should be cleared on overflow objs */ From noreply at buildbot.pypy.org Fri Aug 29 11:24:13 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 29 Aug 2014 11:24:13 +0200 (CEST) Subject: [pypy-commit] stmgc default: Merge Message-ID: <20140829092413.553C51D2AC1@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: Changeset: r1325:dbe9b14b252f Date: 2014-08-29 11:24 +0200 http://bitbucket.org/pypy/stmgc/changeset/dbe9b14b252f/ Log: Merge diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -144,24 +144,13 @@ DuObject *du_xor(DuObject *cons, DuObject *locals) { int result = 0; - /* _du_read1(cons); IMMUTABLE */ - DuObject *expr = _DuCons_CAR(cons); - DuObject *next = _DuCons_NEXT(cons); - - _du_save2(next, locals); - DuObject *obj = Du_Eval(expr, locals); - result = DuInt_AsInt(obj); - _du_restore2(next, locals); - - cons = next; - while (cons != Du_None) { /* _du_read1(cons); IMMUTABLE */ - expr = _DuCons_CAR(cons); - next = _DuCons_NEXT(cons); + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); _du_save2(next, locals); - obj = Du_Eval(expr, locals); + DuObject *obj = Du_Eval(expr, locals); result ^= DuInt_AsInt(obj); _du_restore2(next, locals); @@ -353,8 +342,6 @@ case 3: r = a != b; break; case 4: r = a > b; break; case 5: r = a >= b; break; - case 6: r = a && b; break; - case 7: r = a || b; break; } return DuInt_FromInt(r); } @@ -371,11 +358,48 @@ { return _du_intcmp(cons, locals, 4); } DuObject *du_ge(DuObject *cons, DuObject *locals) { return _du_intcmp(cons, locals, 5); } + DuObject *du_and(DuObject *cons, DuObject *locals) -{ return _du_intcmp(cons, locals, 6); } +{ + while (cons != Du_None) { + /* _du_read1(cons); IMMUTABLE */ + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); + + _du_save2(next, locals); + DuObject *obj = Du_Eval(expr, locals); + int result = DuObject_IsTrue(obj); + _du_restore2(next, locals); + + if (!result) + return DuInt_FromInt(0); + + cons = next; + } + + return DuInt_FromInt(1); +} + DuObject *du_or(DuObject *cons, DuObject *locals) -{ return _du_intcmp(cons, locals, 7); } +{ + while (cons != Du_None) { + /* _du_read1(cons); IMMUTABLE */ + DuObject *expr = _DuCons_CAR(cons); + DuObject *next = _DuCons_NEXT(cons); + _du_save2(next, locals); + DuObject *obj = Du_Eval(expr, locals); + int result = DuObject_IsTrue(obj); + _du_restore2(next, locals); + + if (result) + return DuInt_FromInt(1); + + cons = next; + } + + return DuInt_FromInt(0); +} DuObject *du_type(DuObject *cons, DuObject *locals) diff --git a/duhton/test/test_int.py b/duhton/test/test_int.py --- a/duhton/test/test_int.py +++ b/duhton/test/test_int.py @@ -20,9 +20,11 @@ assert evaluate("(* 2 3 7)") == 42 assert evaluate("(* (+ 5 1) (+ 6 1))") == 42 -def test_div(): +def test_div_mod(): assert evaluate("(/ 11 2)") == 5 assert evaluate("(/ 29 2 3)") == 4 + assert evaluate("(% 29 2)") == 1 + assert evaluate("(% 29 10 3)") == 0 def test_cmp(): assert evaluate("(< 6 6)") == 0 @@ -47,3 +49,35 @@ assert evaluate("(>= 7 6)") == 1 # assert evaluate("(< (+ 10 2) (+ 4 5))") == 0 + +def test_and_or(): + assert evaluate("(&& 1 1 1)") == 1 + assert evaluate("(&& 1 0 1)") == 0 + assert evaluate("(&& 0 sdfdsfsd)") == 0 + assert evaluate("(&& None)") == 0 + assert evaluate("(&& (quote bla))") == 1 + assert evaluate("(&& )") == 1 + + assert evaluate("(|| 0 1)") == 1 + assert evaluate("(|| 0 0 0 1)") == 1 + assert evaluate("(|| 0 0 0)") == 0 + assert evaluate("(|| 1 sdfdsfafds)") == 1 + assert evaluate("(|| None)") == 0 + assert evaluate("(|| (quote bla))") == 1 + assert evaluate("(|| )") == 0 + + +def test_shifts_bitwise(): + assert evaluate("(<< 1 1)") == 2 + assert evaluate("(<< 12)") == 12 + assert evaluate("(<< 1 1 1)") == 4 + assert evaluate("(<< 0 1)") == 0 + + assert evaluate("(>> 4 1 1)") == 1 + assert evaluate("(>> 4 3)") == 0 + assert evaluate("(>> 4)") == 4 + + assert evaluate("(^ 1 4)") == 1 ^ 4 + assert evaluate("(^ 1 4 122)") == 1 ^ 4 ^ 122 + assert evaluate("(^ 1)") == 1 + assert evaluate("(^)") == 0 From noreply at buildbot.pypy.org Fri Aug 29 13:50:49 2014 From: noreply at buildbot.pypy.org (Raemi) Date: Fri, 29 Aug 2014 13:50:49 +0200 (CEST) Subject: [pypy-commit] pypy stmgc-c7: import stmgc Message-ID: <20140829115049.2D6711D2339@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: stmgc-c7 Changeset: r73162:c0a9332d75f7 Date: 2014-08-29 13:51 +0200 http://bitbucket.org/pypy/pypy/changeset/c0a9332d75f7/ Log: import stmgc diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -7949c54b03a5 +dbe9b14b252f diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -289,6 +289,9 @@ struct object_s *realobj = (struct object_s *)REAL_ADDRESS(STM_SEGMENT->segment_base, obj); size_t size = stmcb_size_rounded_up(realobj); + uintptr_t offset_itemsize[2]; + stmcb_get_card_base_itemsize(realobj, offset_itemsize); + size = (size - offset_itemsize[0]) / offset_itemsize[1]; uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj); uintptr_t card_index = 1; @@ -311,7 +314,6 @@ obj, start, stop)); stmcb_trace_cards(realobj, &minor_trace_if_young, start, stop); - } /* all cards should be cleared on overflow objs */ From noreply at buildbot.pypy.org Fri Aug 29 15:22:07 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 29 Aug 2014 15:22:07 +0200 (CEST) Subject: [pypy-commit] pypy default: update contributors, 12 new names Message-ID: <20140829132207.827E01D37BA@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r73163:c775186e2e80 Date: 2014-08-29 16:21 +0300 http://bitbucket.org/pypy/pypy/changeset/c775186e2e80/ Log: update contributors, 12 new names diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -25,8 +25,8 @@ Manuel Jacob Anders Chrigstrom Eric van Riet Paap + Ronan Lamy Wim Lavrijsen - Ronan Lamy Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen @@ -35,10 +35,10 @@ Anders Lehmann Aurelien Campeas Niklaus Haldimann + Remi Meier Camillo Bruni Laura Creighton Toon Verwaest - Remi Meier Leonardo Santagada Seo Sanghyeon Romain Guillebert @@ -50,6 +50,7 @@ Guido Wesdorp Lawrence Oluyede Bartosz Skowron + Gregor Wegberg Daniel Roberts Niko Matsakis Adrien Di Mascio @@ -63,6 +64,7 @@ stian Michael Foord Stephan Diehl + Tyler Wade Stefan Schwarzer Valentino Volonghi Tomek Meka @@ -75,43 +77,45 @@ Alexandre Fayolle Simon Burton Marius Gedminas + Martin Matusiak + Konstantin Lopuhin John Witulski - Konstantin Lopuhin + wenzhu man Greg Price Dario Bertini Mark Pearse Simon Cross + Ivan Sichmann Freitas Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Stefano Rivera Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy Tobias Oberstein Adrian Kuhn Boris Feigin - Stefano Rivera tav Taavi Burns Georg Brandl + Laurence Tratt Bert Freudenberg Stian Andreassen - Laurence Tratt Wanja Saatkamp - Ivan Sichmann Freitas Gerald Klix Mike Blume Oscar Nierstrasz Stefan H. Muller + Edd Barrett Jeremy Thurgood - Gregor Wegberg Rami Chowdhury Tobias Pape - Edd Barrett David Malcolm Eugene Oden Henry Mason + Vasily Kuznetsov Preston Timmons Jeff Terrace David Ripton @@ -128,12 +132,11 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira + Tim Felgentreff Rocco Moretti Gintautas Miliauskas Michael Twomey Lucian Branescu Mihaila - Tim Felgentreff - Tyler Wade Gabriel Lavoie Olivier Dormond Jared Grubb @@ -149,22 +152,22 @@ Aaron Iles Michael Cheng Justas Sadzevicius + Gasper Zejn + anatoly techtonik + Neil Shepperd Mikael Schönenberg - Gasper Zejn - Neil Shepperd - Elmo Mäntynen + Elmo M?ntynen Jonathan David Riehl Stanislaw Halik Anders Qvist + Corbin Simpson Chirag Jadwani Beatrice During Alex Perry Vincent Legoll Alan McIntyre Alexander Sedov - Corbin Simpson Christopher Pope - wenzhuman Christian Tismer Marc Abramowitz Dan Stromberg @@ -174,29 +177,33 @@ Carl Meyer Karl Ramm Pieter Zieschang + Sebastian Pawluś Gabriel Lukas Vacek Andrew Dalke Sylvain Thenault Nathan Taylor Vladimir Kryachko + Arjun Naik + Attila Gobi Jacek Generowicz Alejandro J. Cura Jacob Oscarson Travis Francis Athougies Ryan Gonzalez + Ian Foote Kristjan Valur Jonsson - Sebastian Pawluś Neil Blakey-Milner - anatoly techtonik Lutz Paelike Lucio Torre Lars Wassermann + Valentina Mukhamedzhanova Henrik Vendelbo Dan Buch Miguel de Val Borro Artur Lisiecki Sergey Kishchenko + Yichao Yu Ignas Mikalajunas Christoph Gerum Martin Blais @@ -209,6 +216,7 @@ Bobby Impollonia timo at eistee.fritz.box Andrew Thompson + Yusei Tahara Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado @@ -219,6 +227,7 @@ Michael Hudson-Doyle Anders Sigfridsson Yasir Suhail + Jason Michalski rafalgalczynski at gmail.com Floris Bruynooghe Laurens Van Houtven @@ -226,7 +235,6 @@ Gustavo Niemeyer Stephan Busemann Rafał Gałczyński - Yusei Tahara Christian Muirhead James Lan shoma hosaka @@ -238,7 +246,6 @@ Chris Lambacher coolbutuseless at gmail.com Rodrigo Araújo - w31rd0 Jim Baker James Robert Armin Ronacher @@ -252,6 +259,7 @@ Asmo Soinio Stefan Marr jiaaro + Mads Kiilerich opassembler.py Antony Lee Jim Hunziker @@ -261,11 +269,13 @@ soareschen Kurt Griffiths Mike Bayer + Matthew Miller Flavio Percoco Kristoffer Kleine yasirs Michael Chermside Anna Ravencroft + Dan Crosta Julien Phalip Dan Loewenherz diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py --- a/pypy/doc/tool/makecontributor.py +++ b/pypy/doc/tool/makecontributor.py @@ -67,6 +67,8 @@ 'Edd Barrett': ['edd'], 'Manuel Jacob': ['mjacob'], 'Rami Chowdhury': ['necaris'], + 'Stanislaw Halik':['w31rd0'], + 'wenzhu man':['wenzhuman'], } alias_map = {} From noreply at buildbot.pypy.org Fri Aug 29 15:52:44 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 29 Aug 2014 15:52:44 +0200 (CEST) Subject: [pypy-commit] pypy default: start a release announcement, help is welcome Message-ID: <20140829135244.C9DFA1D37D5@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r73164:e8039184f11e Date: 2014-08-29 16:52 +0300 http://bitbucket.org/pypy/pypy/changeset/e8039184f11e/ Log: start a release announcement, help is welcome diff --git a/pypy/doc/release-2.4.rst b/pypy/doc/release-2.4.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.4.rst @@ -0,0 +1,107 @@ +================================================= +PyPy 2.4 - ???????? +================================================= + +We're pleased to announce PyPy 2.4, a significant milestone on it's own right +and the proud parent of our recent PyPy3 and STM releases. + +This release contains several improvements and bugfixes. + +You can download the PyPy 2.4 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project, and for those who donate to our three sub-projects. +We've shown quite a bit of progress +but we're slowly running out of funds. +Please consider donating more, or even better convince your employer to donate, +so we can finish those projects! The three sub-projects are: + +* `Py3k`_ (supporting Python 3.x): We have released a Python 3.2.5 compatable version + we call PyPy3 2.3.1, and are working toward a Python 3.3 compatable version + +* `STM`_ (software transactional memory): We have release a first working version, and +continue to try out new promising paths of acheiving a fast multithreaded python + +* `NumPy`_ which requires installation of our fork of upstream numpy, available `on bitbucket`_ + +.. _`Py3k`: http://pypy.org/py3donate.html +.. _`STM`: http://pypy.org/tmdonate2.html +.. _`NumPy`: http://pypy.org/numpydonate.html +.. _`on bitbucket`: https://www.bitbucket.org/pypy/numpy + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.3 and cpython 2.7.x`_ performance comparison; +note that cpython's speed has not changed since 2.7.2) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows, +and OpenBSD, +as well as newer ARM hardware (ARMv6 or ARMv7, with VFPv3) running Linux. + +While we support 32 bit python on Windows, work on the native Windows 64 +bit python is still stalling, we would welcome a volunteer +to `handle that`_. + +.. _`pypy 2.3 and cpython 2.7.x`: http://speed.pypy.org +.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation + +Highlights +========== + +Benchmarks improved after internal improvements in string and bytearray handling, +and a major rewrite of the GIL handling. Many of these improvements are offshoots +of the STM work. + +We merged in Python's 2.7.8 stdlib in a record time of one week, proving the +maturity of our underlying RPython code base and PyPy interpreter. + +We welcomed more than 12 new contributors, and conducted two Google Summer of Code +projects XXX details? + +Issues reported with our previous release were fixed after reports from users on +our new issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at +#pypy. Here is a summary of the user-facing changes; +for more information see `whats-new`_: + +* Reduced internal copying of bytearray operations + +* Tweak the internal structure of StringBuilder to speed up large string +handling, which becomes advantageous on large programs at the cost of slightly +slower small *benchmark* type programs. + +* Boost performance of thread-local variables in both unjitted and jitted code + +* Move to a mixed polling and mutex GIL model that make mutli-threaded jitted + code run *much* faster + +* Optimize errno handling in linux + +* Remove ctypes pythonapi and ctypes.PyDLL, which never worked on PyPy + +* Fix performance regression on ufunc(, ) in numpy + +* Classes in the ast module are now distinct from structures used by the compiler, + which simplifies and speeds up translation of our source code to the PyPy binary + interpreter + +* Upgrade stdlib from 2.7.5 to 2.7.8 + +* + +* Many issues were resolved_ since the 2.3.1 release on June 8 + +.. _`whats-new`: http://doc.pypy.org/en/latest/whatsnew-2.3.1.html +.. _resolved: https://bitbucket.org/pypy/pypy/issues?status=resolved + +Please try it out and let us know what you think. We especially welcome +success stories, we know you are using PyPy, please tell us about it! + +Cheers + +The PyPy Team + From noreply at buildbot.pypy.org Fri Aug 29 16:38:45 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 29 Aug 2014 16:38:45 +0200 (CEST) Subject: [pypy-commit] pypy default: fix the case of llstr(char) Message-ID: <20140829143845.EE9231D3811@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r73165:5ca79366c56e Date: 2014-08-20 14:38 +0200 http://bitbucket.org/pypy/pypy/changeset/5ca79366c56e/ Log: fix the case of llstr(char) diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -421,9 +421,13 @@ return lltype_to_annotation(lltype.Ptr(UNICODE)) def specialize_call(self, hop): + from rpython.rtyper.lltypesystem.rstr import (string_repr, + unicode_repr) hop.exception_cannot_occur() - assert hop.args_r[0].lowleveltype == hop.r_result.lowleveltype - v_ll_str, = hop.inputargs(*hop.args_r) + if strtype is str: + v_ll_str = hop.inputarg(string_repr, 0) + else: + v_ll_str = hop.inputarg(unicode_repr, 0) return hop.genop('same_as', [v_ll_str], resulttype = hop.r_result.lowleveltype) diff --git a/rpython/rtyper/test/test_annlowlevel.py b/rpython/rtyper/test/test_annlowlevel.py --- a/rpython/rtyper/test/test_annlowlevel.py +++ b/rpython/rtyper/test/test_annlowlevel.py @@ -34,6 +34,14 @@ res = self.interpret(f, [self.string_to_ll("abc")]) assert res == 3 + def test_llstr_const_char(self): + def f(arg): + s = llstr(hlstr(arg)[0]) + return len(s.chars) + + res = self.interpret(f, [self.string_to_ll("abc")]) + assert res == 1 + def test_hlunicode(self): s = mallocunicode(3) s.chars[0] = u"a" From noreply at buildbot.pypy.org Fri Aug 29 16:38:47 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 16:38:47 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140829143847.51D8D1D3811@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73166:0628cfb76bd5 Date: 2014-08-29 10:38 -0400 http://bitbucket.org/pypy/pypy/changeset/0628cfb76bd5/ Log: merge heads diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -25,8 +25,8 @@ Manuel Jacob Anders Chrigstrom Eric van Riet Paap + Ronan Lamy Wim Lavrijsen - Ronan Lamy Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen @@ -35,10 +35,10 @@ Anders Lehmann Aurelien Campeas Niklaus Haldimann + Remi Meier Camillo Bruni Laura Creighton Toon Verwaest - Remi Meier Leonardo Santagada Seo Sanghyeon Romain Guillebert @@ -50,6 +50,7 @@ Guido Wesdorp Lawrence Oluyede Bartosz Skowron + Gregor Wegberg Daniel Roberts Niko Matsakis Adrien Di Mascio @@ -63,6 +64,7 @@ stian Michael Foord Stephan Diehl + Tyler Wade Stefan Schwarzer Valentino Volonghi Tomek Meka @@ -75,43 +77,45 @@ Alexandre Fayolle Simon Burton Marius Gedminas + Martin Matusiak + Konstantin Lopuhin John Witulski - Konstantin Lopuhin + wenzhu man Greg Price Dario Bertini Mark Pearse Simon Cross + Ivan Sichmann Freitas Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Stefano Rivera Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy Tobias Oberstein Adrian Kuhn Boris Feigin - Stefano Rivera tav Taavi Burns Georg Brandl + Laurence Tratt Bert Freudenberg Stian Andreassen - Laurence Tratt Wanja Saatkamp - Ivan Sichmann Freitas Gerald Klix Mike Blume Oscar Nierstrasz Stefan H. Muller + Edd Barrett Jeremy Thurgood - Gregor Wegberg Rami Chowdhury Tobias Pape - Edd Barrett David Malcolm Eugene Oden Henry Mason + Vasily Kuznetsov Preston Timmons Jeff Terrace David Ripton @@ -128,12 +132,11 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira + Tim Felgentreff Rocco Moretti Gintautas Miliauskas Michael Twomey Lucian Branescu Mihaila - Tim Felgentreff - Tyler Wade Gabriel Lavoie Olivier Dormond Jared Grubb @@ -149,22 +152,22 @@ Aaron Iles Michael Cheng Justas Sadzevicius + Gasper Zejn + anatoly techtonik + Neil Shepperd Mikael Schönenberg - Gasper Zejn - Neil Shepperd - Elmo Mäntynen + Elmo M?ntynen Jonathan David Riehl Stanislaw Halik Anders Qvist + Corbin Simpson Chirag Jadwani Beatrice During Alex Perry Vincent Legoll Alan McIntyre Alexander Sedov - Corbin Simpson Christopher Pope - wenzhuman Christian Tismer Marc Abramowitz Dan Stromberg @@ -174,29 +177,33 @@ Carl Meyer Karl Ramm Pieter Zieschang + Sebastian Pawluś Gabriel Lukas Vacek Andrew Dalke Sylvain Thenault Nathan Taylor Vladimir Kryachko + Arjun Naik + Attila Gobi Jacek Generowicz Alejandro J. Cura Jacob Oscarson Travis Francis Athougies Ryan Gonzalez + Ian Foote Kristjan Valur Jonsson - Sebastian Pawluś Neil Blakey-Milner - anatoly techtonik Lutz Paelike Lucio Torre Lars Wassermann + Valentina Mukhamedzhanova Henrik Vendelbo Dan Buch Miguel de Val Borro Artur Lisiecki Sergey Kishchenko + Yichao Yu Ignas Mikalajunas Christoph Gerum Martin Blais @@ -209,6 +216,7 @@ Bobby Impollonia timo at eistee.fritz.box Andrew Thompson + Yusei Tahara Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado @@ -219,6 +227,7 @@ Michael Hudson-Doyle Anders Sigfridsson Yasir Suhail + Jason Michalski rafalgalczynski at gmail.com Floris Bruynooghe Laurens Van Houtven @@ -226,7 +235,6 @@ Gustavo Niemeyer Stephan Busemann Rafał Gałczyński - Yusei Tahara Christian Muirhead James Lan shoma hosaka @@ -238,7 +246,6 @@ Chris Lambacher coolbutuseless at gmail.com Rodrigo Araújo - w31rd0 Jim Baker James Robert Armin Ronacher @@ -252,6 +259,7 @@ Asmo Soinio Stefan Marr jiaaro + Mads Kiilerich opassembler.py Antony Lee Jim Hunziker @@ -261,11 +269,13 @@ soareschen Kurt Griffiths Mike Bayer + Matthew Miller Flavio Percoco Kristoffer Kleine yasirs Michael Chermside Anna Ravencroft + Dan Crosta Julien Phalip Dan Loewenherz diff --git a/pypy/doc/release-2.4.rst b/pypy/doc/release-2.4.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.4.rst @@ -0,0 +1,107 @@ +================================================= +PyPy 2.4 - ???????? +================================================= + +We're pleased to announce PyPy 2.4, a significant milestone on it's own right +and the proud parent of our recent PyPy3 and STM releases. + +This release contains several improvements and bugfixes. + +You can download the PyPy 2.4 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project, and for those who donate to our three sub-projects. +We've shown quite a bit of progress +but we're slowly running out of funds. +Please consider donating more, or even better convince your employer to donate, +so we can finish those projects! The three sub-projects are: + +* `Py3k`_ (supporting Python 3.x): We have released a Python 3.2.5 compatable version + we call PyPy3 2.3.1, and are working toward a Python 3.3 compatable version + +* `STM`_ (software transactional memory): We have release a first working version, and +continue to try out new promising paths of acheiving a fast multithreaded python + +* `NumPy`_ which requires installation of our fork of upstream numpy, available `on bitbucket`_ + +.. _`Py3k`: http://pypy.org/py3donate.html +.. _`STM`: http://pypy.org/tmdonate2.html +.. _`NumPy`: http://pypy.org/numpydonate.html +.. _`on bitbucket`: https://www.bitbucket.org/pypy/numpy + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.3 and cpython 2.7.x`_ performance comparison; +note that cpython's speed has not changed since 2.7.2) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows, +and OpenBSD, +as well as newer ARM hardware (ARMv6 or ARMv7, with VFPv3) running Linux. + +While we support 32 bit python on Windows, work on the native Windows 64 +bit python is still stalling, we would welcome a volunteer +to `handle that`_. + +.. _`pypy 2.3 and cpython 2.7.x`: http://speed.pypy.org +.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation + +Highlights +========== + +Benchmarks improved after internal improvements in string and bytearray handling, +and a major rewrite of the GIL handling. Many of these improvements are offshoots +of the STM work. + +We merged in Python's 2.7.8 stdlib in a record time of one week, proving the +maturity of our underlying RPython code base and PyPy interpreter. + +We welcomed more than 12 new contributors, and conducted two Google Summer of Code +projects XXX details? + +Issues reported with our previous release were fixed after reports from users on +our new issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at +#pypy. Here is a summary of the user-facing changes; +for more information see `whats-new`_: + +* Reduced internal copying of bytearray operations + +* Tweak the internal structure of StringBuilder to speed up large string +handling, which becomes advantageous on large programs at the cost of slightly +slower small *benchmark* type programs. + +* Boost performance of thread-local variables in both unjitted and jitted code + +* Move to a mixed polling and mutex GIL model that make mutli-threaded jitted + code run *much* faster + +* Optimize errno handling in linux + +* Remove ctypes pythonapi and ctypes.PyDLL, which never worked on PyPy + +* Fix performance regression on ufunc(, ) in numpy + +* Classes in the ast module are now distinct from structures used by the compiler, + which simplifies and speeds up translation of our source code to the PyPy binary + interpreter + +* Upgrade stdlib from 2.7.5 to 2.7.8 + +* + +* Many issues were resolved_ since the 2.3.1 release on June 8 + +.. _`whats-new`: http://doc.pypy.org/en/latest/whatsnew-2.3.1.html +.. _resolved: https://bitbucket.org/pypy/pypy/issues?status=resolved + +Please try it out and let us know what you think. We especially welcome +success stories, we know you are using PyPy, please tell us about it! + +Cheers + +The PyPy Team + diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py --- a/pypy/doc/tool/makecontributor.py +++ b/pypy/doc/tool/makecontributor.py @@ -67,6 +67,8 @@ 'Edd Barrett': ['edd'], 'Manuel Jacob': ['mjacob'], 'Rami Chowdhury': ['necaris'], + 'Stanislaw Halik':['w31rd0'], + 'wenzhu man':['wenzhuman'], } alias_map = {} diff --git a/pytest.py b/pytest.py --- a/pytest.py +++ b/pytest.py @@ -8,6 +8,21 @@ if __name__ == '__main__': # if run as a script or by 'python -m pytest' # we trigger the below "else" condition by the following import import pytest + import sys + if sys.platform == 'win32': + #Try to avoid opeing a dialog box if one of the tests causes a system error + import ctypes + winapi = ctypes.windll.kernel32 + SetErrorMode = winapi.SetErrorMode + SetErrorMode.argtypes=[ctypes.c_int] + + SEM_FAILCRITICALERRORS = 1 + SEM_NOGPFAULTERRORBOX = 2 + SEM_NOOPENFILEERRORBOX = 0x8000 + flags = SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX + #Since there is no GetErrorMode, do a double Set + old_mode = SetErrorMode(flags) + SetErrorMode(old_mode | flags) raise SystemExit(pytest.main()) # else we are imported From noreply at buildbot.pypy.org Fri Aug 29 16:42:53 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 16:42:53 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: improve these tests Message-ID: <20140829144253.CCE161D22E6@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73167:00a18e4129fd Date: 2014-08-29 10:41 -0400 http://bitbucket.org/pypy/pypy/changeset/00a18e4129fd/ Log: improve these tests diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -87,10 +87,12 @@ f.close() def test_badmode(self): - raises(ValueError, self.file, "foo", "bar") + exc = raises(ValueError, self.file, "foo", "bar") + assert str(exc.value) == "mode string must begin with one of 'r', 'w', 'a' or 'U', not 'bar'" def test_wraposerror(self): - raises(IOError, self.file, "hopefully/not/existant.bar") + exc = raises(IOError, self.file, "hopefully/not/existant.bar") + assert str(exc.value) == "[Errno 2] No such file or directory: 'hopefully/not/existant.bar'" def test_correct_file_mode(self): import os From noreply at buildbot.pypy.org Fri Aug 29 16:42:55 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 16:42:55 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: merge default Message-ID: <20140829144255.3B4041D22E6@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73168:407345c30fb6 Date: 2014-08-29 10:41 -0400 http://bitbucket.org/pypy/pypy/changeset/407345c30fb6/ Log: merge default diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -25,8 +25,8 @@ Manuel Jacob Anders Chrigstrom Eric van Riet Paap + Ronan Lamy Wim Lavrijsen - Ronan Lamy Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen @@ -35,10 +35,10 @@ Anders Lehmann Aurelien Campeas Niklaus Haldimann + Remi Meier Camillo Bruni Laura Creighton Toon Verwaest - Remi Meier Leonardo Santagada Seo Sanghyeon Romain Guillebert @@ -50,6 +50,7 @@ Guido Wesdorp Lawrence Oluyede Bartosz Skowron + Gregor Wegberg Daniel Roberts Niko Matsakis Adrien Di Mascio @@ -63,6 +64,7 @@ stian Michael Foord Stephan Diehl + Tyler Wade Stefan Schwarzer Valentino Volonghi Tomek Meka @@ -75,43 +77,45 @@ Alexandre Fayolle Simon Burton Marius Gedminas + Martin Matusiak + Konstantin Lopuhin John Witulski - Konstantin Lopuhin + wenzhu man Greg Price Dario Bertini Mark Pearse Simon Cross + Ivan Sichmann Freitas Andreas Stührk Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Stefano Rivera Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy Tobias Oberstein Adrian Kuhn Boris Feigin - Stefano Rivera tav Taavi Burns Georg Brandl + Laurence Tratt Bert Freudenberg Stian Andreassen - Laurence Tratt Wanja Saatkamp - Ivan Sichmann Freitas Gerald Klix Mike Blume Oscar Nierstrasz Stefan H. Muller + Edd Barrett Jeremy Thurgood - Gregor Wegberg Rami Chowdhury Tobias Pape - Edd Barrett David Malcolm Eugene Oden Henry Mason + Vasily Kuznetsov Preston Timmons Jeff Terrace David Ripton @@ -128,12 +132,11 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira + Tim Felgentreff Rocco Moretti Gintautas Miliauskas Michael Twomey Lucian Branescu Mihaila - Tim Felgentreff - Tyler Wade Gabriel Lavoie Olivier Dormond Jared Grubb @@ -149,22 +152,22 @@ Aaron Iles Michael Cheng Justas Sadzevicius + Gasper Zejn + anatoly techtonik + Neil Shepperd Mikael Schönenberg - Gasper Zejn - Neil Shepperd - Elmo Mäntynen + Elmo M?ntynen Jonathan David Riehl Stanislaw Halik Anders Qvist + Corbin Simpson Chirag Jadwani Beatrice During Alex Perry Vincent Legoll Alan McIntyre Alexander Sedov - Corbin Simpson Christopher Pope - wenzhuman Christian Tismer Marc Abramowitz Dan Stromberg @@ -174,29 +177,33 @@ Carl Meyer Karl Ramm Pieter Zieschang + Sebastian Pawluś Gabriel Lukas Vacek Andrew Dalke Sylvain Thenault Nathan Taylor Vladimir Kryachko + Arjun Naik + Attila Gobi Jacek Generowicz Alejandro J. Cura Jacob Oscarson Travis Francis Athougies Ryan Gonzalez + Ian Foote Kristjan Valur Jonsson - Sebastian Pawluś Neil Blakey-Milner - anatoly techtonik Lutz Paelike Lucio Torre Lars Wassermann + Valentina Mukhamedzhanova Henrik Vendelbo Dan Buch Miguel de Val Borro Artur Lisiecki Sergey Kishchenko + Yichao Yu Ignas Mikalajunas Christoph Gerum Martin Blais @@ -209,6 +216,7 @@ Bobby Impollonia timo at eistee.fritz.box Andrew Thompson + Yusei Tahara Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado @@ -219,6 +227,7 @@ Michael Hudson-Doyle Anders Sigfridsson Yasir Suhail + Jason Michalski rafalgalczynski at gmail.com Floris Bruynooghe Laurens Van Houtven @@ -226,7 +235,6 @@ Gustavo Niemeyer Stephan Busemann Rafał Gałczyński - Yusei Tahara Christian Muirhead James Lan shoma hosaka @@ -238,7 +246,6 @@ Chris Lambacher coolbutuseless at gmail.com Rodrigo Araújo - w31rd0 Jim Baker James Robert Armin Ronacher @@ -252,6 +259,7 @@ Asmo Soinio Stefan Marr jiaaro + Mads Kiilerich opassembler.py Antony Lee Jim Hunziker @@ -261,11 +269,13 @@ soareschen Kurt Griffiths Mike Bayer + Matthew Miller Flavio Percoco Kristoffer Kleine yasirs Michael Chermside Anna Ravencroft + Dan Crosta Julien Phalip Dan Loewenherz diff --git a/pypy/doc/release-2.4.rst b/pypy/doc/release-2.4.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.4.rst @@ -0,0 +1,107 @@ +================================================= +PyPy 2.4 - ???????? +================================================= + +We're pleased to announce PyPy 2.4, a significant milestone on it's own right +and the proud parent of our recent PyPy3 and STM releases. + +This release contains several improvements and bugfixes. + +You can download the PyPy 2.4 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project, and for those who donate to our three sub-projects. +We've shown quite a bit of progress +but we're slowly running out of funds. +Please consider donating more, or even better convince your employer to donate, +so we can finish those projects! The three sub-projects are: + +* `Py3k`_ (supporting Python 3.x): We have released a Python 3.2.5 compatable version + we call PyPy3 2.3.1, and are working toward a Python 3.3 compatable version + +* `STM`_ (software transactional memory): We have release a first working version, and +continue to try out new promising paths of acheiving a fast multithreaded python + +* `NumPy`_ which requires installation of our fork of upstream numpy, available `on bitbucket`_ + +.. _`Py3k`: http://pypy.org/py3donate.html +.. _`STM`: http://pypy.org/tmdonate2.html +.. _`NumPy`: http://pypy.org/numpydonate.html +.. _`on bitbucket`: https://www.bitbucket.org/pypy/numpy + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy 2.3 and cpython 2.7.x`_ performance comparison; +note that cpython's speed has not changed since 2.7.2) +due to its integrated tracing JIT compiler. + +This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows, +and OpenBSD, +as well as newer ARM hardware (ARMv6 or ARMv7, with VFPv3) running Linux. + +While we support 32 bit python on Windows, work on the native Windows 64 +bit python is still stalling, we would welcome a volunteer +to `handle that`_. + +.. _`pypy 2.3 and cpython 2.7.x`: http://speed.pypy.org +.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation + +Highlights +========== + +Benchmarks improved after internal improvements in string and bytearray handling, +and a major rewrite of the GIL handling. Many of these improvements are offshoots +of the STM work. + +We merged in Python's 2.7.8 stdlib in a record time of one week, proving the +maturity of our underlying RPython code base and PyPy interpreter. + +We welcomed more than 12 new contributors, and conducted two Google Summer of Code +projects XXX details? + +Issues reported with our previous release were fixed after reports from users on +our new issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at +#pypy. Here is a summary of the user-facing changes; +for more information see `whats-new`_: + +* Reduced internal copying of bytearray operations + +* Tweak the internal structure of StringBuilder to speed up large string +handling, which becomes advantageous on large programs at the cost of slightly +slower small *benchmark* type programs. + +* Boost performance of thread-local variables in both unjitted and jitted code + +* Move to a mixed polling and mutex GIL model that make mutli-threaded jitted + code run *much* faster + +* Optimize errno handling in linux + +* Remove ctypes pythonapi and ctypes.PyDLL, which never worked on PyPy + +* Fix performance regression on ufunc(, ) in numpy + +* Classes in the ast module are now distinct from structures used by the compiler, + which simplifies and speeds up translation of our source code to the PyPy binary + interpreter + +* Upgrade stdlib from 2.7.5 to 2.7.8 + +* + +* Many issues were resolved_ since the 2.3.1 release on June 8 + +.. _`whats-new`: http://doc.pypy.org/en/latest/whatsnew-2.3.1.html +.. _resolved: https://bitbucket.org/pypy/pypy/issues?status=resolved + +Please try it out and let us know what you think. We especially welcome +success stories, we know you are using PyPy, please tell us about it! + +Cheers + +The PyPy Team + diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py --- a/pypy/doc/tool/makecontributor.py +++ b/pypy/doc/tool/makecontributor.py @@ -67,6 +67,8 @@ 'Edd Barrett': ['edd'], 'Manuel Jacob': ['mjacob'], 'Rami Chowdhury': ['necaris'], + 'Stanislaw Halik':['w31rd0'], + 'wenzhu man':['wenzhuman'], } alias_map = {} diff --git a/pypy/test_all.py b/pypy/test_all.py --- a/pypy/test_all.py +++ b/pypy/test_all.py @@ -27,4 +27,21 @@ sys.path.insert(0,os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) import pytest import pytest_cov + if sys.platform == 'win32': + #Try to avoid opeing a dialog box if one of the tests causes a system error + # We do this in runner.py, but buildbots run twisted which ruins inheritance + # in windows subprocesses. + import ctypes + winapi = ctypes.windll.kernel32 + SetErrorMode = winapi.SetErrorMode + SetErrorMode.argtypes=[ctypes.c_int] + + SEM_FAILCRITICALERRORS = 1 + SEM_NOGPFAULTERRORBOX = 2 + SEM_NOOPENFILEERRORBOX = 0x8000 + flags = SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX + #Since there is no GetErrorMode, do a double Set + old_mode = SetErrorMode(flags) + SetErrorMode(old_mode | flags) + sys.exit(pytest.main(plugins=[pytest_cov])) diff --git a/pytest.py b/pytest.py --- a/pytest.py +++ b/pytest.py @@ -8,6 +8,21 @@ if __name__ == '__main__': # if run as a script or by 'python -m pytest' # we trigger the below "else" condition by the following import import pytest + import sys + if sys.platform == 'win32': + #Try to avoid opeing a dialog box if one of the tests causes a system error + import ctypes + winapi = ctypes.windll.kernel32 + SetErrorMode = winapi.SetErrorMode + SetErrorMode.argtypes=[ctypes.c_int] + + SEM_FAILCRITICALERRORS = 1 + SEM_NOGPFAULTERRORBOX = 2 + SEM_NOOPENFILEERRORBOX = 0x8000 + flags = SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX + #Since there is no GetErrorMode, do a double Set + old_mode = SetErrorMode(flags) + SetErrorMode(old_mode | flags) raise SystemExit(pytest.main()) # else we are imported diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -421,9 +421,13 @@ return lltype_to_annotation(lltype.Ptr(UNICODE)) def specialize_call(self, hop): + from rpython.rtyper.lltypesystem.rstr import (string_repr, + unicode_repr) hop.exception_cannot_occur() - assert hop.args_r[0].lowleveltype == hop.r_result.lowleveltype - v_ll_str, = hop.inputargs(*hop.args_r) + if strtype is str: + v_ll_str = hop.inputarg(string_repr, 0) + else: + v_ll_str = hop.inputarg(unicode_repr, 0) return hop.genop('same_as', [v_ll_str], resulttype = hop.r_result.lowleveltype) diff --git a/rpython/rtyper/test/test_annlowlevel.py b/rpython/rtyper/test/test_annlowlevel.py --- a/rpython/rtyper/test/test_annlowlevel.py +++ b/rpython/rtyper/test/test_annlowlevel.py @@ -34,6 +34,14 @@ res = self.interpret(f, [self.string_to_ll("abc")]) assert res == 3 + def test_llstr_const_char(self): + def f(arg): + s = llstr(hlstr(arg)[0]) + return len(s.chars) + + res = self.interpret(f, [self.string_to_ll("abc")]) + assert res == 1 + def test_hlunicode(self): s = mallocunicode(3) s.chars[0] = u"a" diff --git a/testrunner/runner.py b/testrunner/runner.py --- a/testrunner/runner.py +++ b/testrunner/runner.py @@ -407,7 +407,8 @@ run_param.dry_run = opts.dry_run if run_param.dry_run: - print >>out, run_param.__dict__ + print >>out, '\n'.join([str((k, getattr(run_param, k))) \ + for k in dir(run_param) if k[:2] != '__']) res = execute_tests(run_param, testdirs, logfile, out) From noreply at buildbot.pypy.org Fri Aug 29 17:12:32 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 17:12:32 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: test ioerror with unicode filename Message-ID: <20140829151232.9FF7C1D22E6@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73169:21addf9f84cc Date: 2014-08-29 11:12 -0400 http://bitbucket.org/pypy/pypy/changeset/21addf9f84cc/ Log: test ioerror with unicode filename diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -149,6 +149,8 @@ u'\xe9'.encode(sys.getfilesystemencoding()) except UnicodeEncodeError: skip("encoding not good enough") + exc = raises(IOError, self.file, 'zzz' + u'\xe9', 'r') + assert str(exc.value) == "[Errno 2] No such file or directory: u'zzz\\xe9'" f = self.file(self.temppath + u'\xe9', "w") f.close() From noreply at buildbot.pypy.org Fri Aug 29 17:32:23 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 17:32:23 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: skip this test on win32 Message-ID: <20140829153223.0508F1C35F7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73170:f2ba31323129 Date: 2014-08-29 08:23 -0700 http://bitbucket.org/pypy/pypy/changeset/f2ba31323129/ Log: skip this test on win32 diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -62,6 +62,8 @@ self.interpret(f, []) + @py.test.mark.skipif("sys.platform == 'win32'") + # http://msdn.microsoft.com/en-us/library/86cebhfs.aspx def test_open_buffering_line(self): fname = str(self.tmpdir.join('file_1a')) From noreply at buildbot.pypy.org Fri Aug 29 17:32:24 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 17:32:24 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: fix this test for win32 Message-ID: <20140829153224.57F841C35F7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73171:19dbd8189f60 Date: 2014-08-29 08:31 -0700 http://bitbucket.org/pypy/pypy/changeset/19dbd8189f60/ Log: fix this test for win32 diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -22,45 +22,56 @@ assert open(fname, "r").read() == "dupa" def test_open_errors(self): - def f(): - try: - open('zzz', 'badmode') - except ValueError: - pass - else: - assert False + def f(exc): + def g(): + try: + open('zzz', 'badmode') + except ValueError: + pass + else: + assert False - try: - open('zzz') - except OSError as e: - assert e.errno == errno.ENOENT - else: - assert False + try: + open('zzz') + except exc as e: + assert e.errno == errno.ENOENT + else: + assert False - try: - open('.') - except OSError as e: - assert e.errno == errno.EISDIR - else: - assert False + try: + open('.') + except exc as e: + if os.name == 'posix': + assert e.errno == errno.EISDIR + else: + assert e.errno == errno.EACCES + else: + assert False - try: - os.fdopen(42, "badmode") - except ValueError: - pass - else: - assert False + try: + os.fdopen(42, "badmode") + except ValueError: + pass + else: + assert False - fd = os.open('.', os.O_RDONLY, 0777) - try: - os.fdopen(fd) - except OSError as e: - assert e.errno == errno.EISDIR - else: - assert False - os.close(fd) + try: + fd = os.open('.', os.O_RDONLY, 0777) + except OSError as e: + assert os.name == 'nt' and e.errno == errno.EACCES + else: + assert os.name != 'nt' + try: + os.fdopen(fd) + except exc as e: + assert e.errno == errno.EISDIR + else: + assert False + os.close(fd) + return g - self.interpret(f, []) + f(IOError)() + self.interpret(f(OSError), []) @py.test.mark.skipif("sys.platform == 'win32'") # http://msdn.microsoft.com/en-us/library/86cebhfs.aspx From noreply at buildbot.pypy.org Fri Aug 29 17:36:51 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 17:36:51 +0200 (CEST) Subject: [pypy-commit] pypy default: fix rfile tests on win32 Message-ID: <20140829153651.113071C35F7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73172:38edb61347fa Date: 2014-08-29 08:31 -0700 http://bitbucket.org/pypy/pypy/changeset/38edb61347fa/ Log: fix rfile tests on win32 diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -22,46 +22,59 @@ assert open(fname, "r").read() == "dupa" def test_open_errors(self): - def f(): - try: - open('zzz', 'badmode') - except ValueError: - pass - else: - assert False + def f(exc): + def g(): + try: + open('zzz', 'badmode') + except ValueError: + pass + else: + assert False - try: - open('zzz') - except OSError as e: - assert e.errno == errno.ENOENT - else: - assert False + try: + open('zzz') + except exc as e: + assert e.errno == errno.ENOENT + else: + assert False - try: - open('.') - except OSError as e: - assert e.errno == errno.EISDIR - else: - assert False + try: + open('.') + except exc as e: + if os.name == 'posix': + assert e.errno == errno.EISDIR + else: + assert e.errno == errno.EACCES + else: + assert False - try: - os.fdopen(42, "badmode") - except ValueError: - pass - else: - assert False + try: + os.fdopen(42, "badmode") + except ValueError: + pass + else: + assert False - fd = os.open('.', os.O_RDONLY, 0777) - try: - os.fdopen(fd) - except OSError as e: - assert e.errno == errno.EISDIR - else: - assert False - os.close(fd) + try: + fd = os.open('.', os.O_RDONLY, 0777) + except OSError as e: + assert os.name == 'nt' and e.errno == errno.EACCES + else: + assert os.name != 'nt' + try: + os.fdopen(fd) + except exc as e: + assert e.errno == errno.EISDIR + else: + assert False + os.close(fd) + return g - self.interpret(f, []) + f(IOError)() + self.interpret(f(OSError), []) + @py.test.mark.skipif("sys.platform == 'win32'") + # http://msdn.microsoft.com/en-us/library/86cebhfs.aspx def test_open_buffering_line(self): fname = str(self.tmpdir.join('file_1a')) From noreply at buildbot.pypy.org Fri Aug 29 18:03:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 29 Aug 2014 18:03:57 +0200 (CEST) Subject: [pypy-commit] pypy default: Add one specific function used during encoding to _pypyjson. Message-ID: <20140829160357.E0B861D22E6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73173:e80c25f01061 Date: 2014-08-29 18:03 +0200 http://bitbucket.org/pypy/pypy/changeset/e80c25f01061/ Log: Add one specific function used during encoding to _pypyjson. It's a performance bottleneck in some cases. diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -529,3 +529,10 @@ _current_indent_level): yield chunk self.__remove_markers(markers, o) + + +# overwrite some helpers here with more efficient versions +try: + from _pypyjson import raw_encode_basestring_ascii +except ImportError: + pass diff --git a/pypy/module/_pypyjson/__init__.py b/pypy/module/_pypyjson/__init__.py --- a/pypy/module/_pypyjson/__init__.py +++ b/pypy/module/_pypyjson/__init__.py @@ -7,4 +7,6 @@ interpleveldefs = { 'loads' : 'interp_decoder.loads', + 'raw_encode_basestring_ascii': + 'interp_encoder.raw_encode_basestring_ascii', } diff --git a/pypy/module/_pypyjson/interp_encoder.py b/pypy/module/_pypyjson/interp_encoder.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/interp_encoder.py @@ -0,0 +1,63 @@ +from rpython.rlib.rstring import StringBuilder + + +HEX = '0123456789abcdef' + +ESCAPE_DICT = { + '\b': '\\b', + '\f': '\\f', + '\n': '\\n', + '\r': '\\r', + '\t': '\\t', +} +ESCAPE_BEFORE_SPACE = [ESCAPE_DICT.get(chr(_i), '\\u%04x' % _i) + for _i in range(32)] + + +def raw_encode_basestring_ascii(space, w_string): + if space.isinstance_w(w_string, space.w_str): + s = space.str_w(w_string) + for c in s: + if c >= ' ' and c <= '~' and c != '"' and c != '\\': + pass + else: + break + else: + # the input is a string with only non-special ascii chars + return w_string + + w_string = space.call_method(w_string, 'decode', space.wrap('utf-8')) + + u = space.unicode_w(w_string) + sb = StringBuilder() + for c in u: + if c <= u'~': + if c == u'"' or c == u'\\': + sb.append('\\') + elif c < u' ': + sb.append(ESCAPE_BEFORE_SPACE[ord(c)]) + continue + sb.append(chr(ord(c))) + else: + if c <= u'\uffff': + sb.append('\\u') + sb.append(HEX[ord(c) >> 12]) + sb.append(HEX[(ord(c) >> 8) & 0x0f]) + sb.append(HEX[(ord(c) >> 4) & 0x0f]) + sb.append(HEX[ord(c) & 0x0f]) + else: + # surrogate pair + n = ord(c) - 0x10000 + s1 = 0xd800 | ((n >> 10) & 0x3ff) + sb.append('\\ud') + sb.append(HEX[(s1 >> 8) & 0x0f]) + sb.append(HEX[(s1 >> 4) & 0x0f]) + sb.append(HEX[s1 & 0x0f]) + s2 = 0xdc00 | (n & 0x3ff) + sb.append('\\ud') + sb.append(HEX[(s2 >> 8) & 0x0f]) + sb.append(HEX[(s2 >> 4) & 0x0f]) + sb.append(HEX[s2 & 0x0f]) + + res = sb.build() + return space.wrap(res) diff --git a/pypy/module/_pypyjson/test/test__pypyjson.py b/pypy/module/_pypyjson/test/test__pypyjson.py --- a/pypy/module/_pypyjson/test/test__pypyjson.py +++ b/pypy/module/_pypyjson/test/test__pypyjson.py @@ -188,4 +188,23 @@ import _pypyjson # http://json.org/JSON_checker/test/fail25.json s = '["\ttab\tcharacter\tin\tstring\t"]' - raises(ValueError, "_pypyjson.loads(s)") \ No newline at end of file + raises(ValueError, "_pypyjson.loads(s)") + + def test_raw_encode_basestring_ascii(self): + import _pypyjson + def check(s): + s = _pypyjson.raw_encode_basestring_ascii(s) + assert type(s) is str + return s + assert check("") == "" + assert check(u"") == "" + assert check("abc ") == "abc " + assert check(u"abc ") == "abc " + raises(UnicodeDecodeError, check, "\xc0") + assert check("\xc2\x84") == "\\u0084" + assert check("\xf0\x92\x8d\x85") == "\\ud808\\udf45" + assert check(u"\ud808\udf45") == "\\ud808\\udf45" + assert check(u"\U00012345") == "\\ud808\\udf45" + assert check("a\"c") == "a\\\"c" + assert check("\\\"\b\f\n\r\t") == '\\\\\\"\\b\\f\\n\\r\\t' + assert check("\x07") == "\\u0007" From noreply at buildbot.pypy.org Fri Aug 29 18:08:46 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 18:08:46 +0200 (CEST) Subject: [pypy-commit] pypy default: skip these on windows, crash Message-ID: <20140829160846.A3E131C35F7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73174:435d9a69a2f6 Date: 2014-08-29 08:55 -0700 http://bitbucket.org/pypy/pypy/changeset/435d9a69a2f6/ Log: skip these on windows, crash diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -232,12 +232,16 @@ def test_exception_from_close(self): import os + if os.name == 'nt': + skip("crashes on nt") f = self.file(self.temppath, 'w') os.close(f.fileno()) raises(IOError, f.close) # bad file descriptor def test_exception_from_del(self): import os, gc, sys, cStringIO + if os.name == 'nt': + skip("crashes on nt") f = self.file(self.temppath, 'w') g = cStringIO.StringIO() preverr = sys.stderr From noreply at buildbot.pypy.org Fri Aug 29 18:08:47 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 18:08:47 +0200 (CEST) Subject: [pypy-commit] pypy default: fix this test against cpython on win32 Message-ID: <20140829160847.F1D221C35F7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73175:dc7c03274026 Date: 2014-08-29 09:01 -0700 http://bitbucket.org/pypy/pypy/changeset/dc7c03274026/ Log: fix this test against cpython on win32 diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -554,14 +554,16 @@ import errno, sys f = open(fn) - exc = raises(EnvironmentError, f.truncate, 3) - if sys.platform == 'win32': - assert exc.value.errno == 5 # ERROR_ACCESS_DENIED + exc = raises(IOError, f.truncate, 3) + # CPython explicitly checks the file mode + # PyPy relies on the libc to raise the error + if '__pypy__' not in sys.builtin_module_names: + assert str(exc.value) == "File not open for writing" else: - # CPython explicitely checks the file mode - # PyPy relies on the libc to raise the error - assert (exc.value.message == "File not open for writing" or - exc.value.errno == errno.EINVAL) + if sys.platform == 'win32': + assert exc.value.errno == 5 # ERROR_ACCESS_DENIED + else: + assert exc.value.errno == errno.EINVAL f.close() def test_readinto(self): From noreply at buildbot.pypy.org Fri Aug 29 18:21:30 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 18:21:30 +0200 (CEST) Subject: [pypy-commit] pypy default: reenable these tests, work on buildbot? Message-ID: <20140829162130.51E2C1D22E6@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73176:9f0f00ddfc76 Date: 2014-08-29 09:21 -0700 http://bitbucket.org/pypy/pypy/changeset/9f0f00ddfc76/ Log: reenable these tests, work on buildbot? diff --git a/pypy/module/_file/test/test_file.py b/pypy/module/_file/test/test_file.py --- a/pypy/module/_file/test/test_file.py +++ b/pypy/module/_file/test/test_file.py @@ -232,16 +232,12 @@ def test_exception_from_close(self): import os - if os.name == 'nt': - skip("crashes on nt") f = self.file(self.temppath, 'w') os.close(f.fileno()) raises(IOError, f.close) # bad file descriptor def test_exception_from_del(self): import os, gc, sys, cStringIO - if os.name == 'nt': - skip("crashes on nt") f = self.file(self.temppath, 'w') g = cStringIO.StringIO() preverr = sys.stderr From noreply at buildbot.pypy.org Fri Aug 29 18:59:19 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 18:59:19 +0200 (CEST) Subject: [pypy-commit] pypy default: test file mode in rfile Message-ID: <20140829165919.BFF9A1D37F3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73177:06ebd52f400f Date: 2014-08-29 12:54 -0400 http://bitbucket.org/pypy/pypy/changeset/06ebd52f400f/ Log: test file mode in rfile diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -137,7 +137,7 @@ c_setvbuf(ll_f, lltype.nullptr(rffi.CCHARP.TO), _IOLBF, BUFSIZ) else: c_setvbuf(ll_f, lltype.nullptr(rffi.CCHARP.TO), _IOFBF, buffering) - return RFile(ll_f) + return RFile(ll_f, mode) def create_temp_rfile(): @@ -159,7 +159,7 @@ finally: lltype.free(ll_mode, flavor='raw') _dircheck(ll_f) - return RFile(ll_f) + return RFile(ll_f, mode) def create_popen_file(command, type): @@ -179,15 +179,33 @@ class RFile(object): - def __init__(self, ll_file): + readable = False + writable = False + + def __init__(self, ll_file, mode='+'): self.ll_file = ll_file + if 'r' in mode: + self.readable = True + if 'w' in mode or 'a' in mode: + self.writable = True + if '+' in mode: + self.readable = self.writable = True def _check_closed(self): if not self.ll_file: raise ValueError("I/O operation on closed file") + def _check_reading(self): + if not self.readable: + raise OSError(0, "File not open for reading") + + def _check_writing(self): + if not self.writable: + raise OSError(0, "File not open for writing") + def write(self, value): self._check_closed() + self._check_writing() ll_value = rffi.get_nonmovingbuffer(value) try: # note that since we got a nonmoving buffer, it is either raw @@ -224,6 +242,7 @@ def read(self, size=-1): # XXX CPython uses a more delicate logic here self._check_closed() + self._check_reading() ll_file = self.ll_file if size == 0: return "" @@ -281,6 +300,7 @@ def truncate(self, arg=-1): self._check_closed() + self._check_writing() if arg == -1: arg = self.tell() self.flush() @@ -328,6 +348,7 @@ def readline(self, size=-1): self._check_closed() + self._check_reading() if size == 0: return "" elif size < 0: diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -112,10 +112,28 @@ def f(): f = open(fname, "w") + try: + f.read() + except exc as e: + assert e.errno == errno + else: + assert False + try: + f.readline() + except exc as e: + assert e.errno == errno + else: + assert False f.write("dupa\x00dupb") f.close() for mode in ['r', 'U']: f2 = open(fname, mode) + try: + f2.write('') + except exc as e: + assert e.errno == errno + else: + assert False dupa = f2.read(0) assert dupa == "" dupa = f2.read() @@ -132,7 +150,9 @@ assert dupa == "dupa\x00dupb" f2.close() + exc = IOError; errno = None f() + exc = OSError; errno = 0 self.interpret(f, []) def test_read_sequentially(self): @@ -188,11 +208,19 @@ new_fno = os.dup(f.fileno()) f2 = os.fdopen(new_fno, "w") f.close() + try: + f2.read() + except exc as e: + assert e.errno == errno + else: + assert False f2.write("xxx") f2.close() + exc = IOError; errno = None f() assert open(fname).read() == "xxx" + exc = OSError; errno = 0 self.interpret(f, []) assert open(fname).read() == "xxx" @@ -249,8 +277,18 @@ data = f.read() assert data == "hello w" f.close() + f = open(fname) + try: + f.truncate() + except exc as e: + assert e.errno == errno + else: + assert False + f.close() + exc = IOError; errno = None f() + exc = OSError; errno = 0 self.interpret(f, []) From noreply at buildbot.pypy.org Fri Aug 29 18:59:21 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 18:59:21 +0200 (CEST) Subject: [pypy-commit] pypy default: fix translation Message-ID: <20140829165921.080C01D37F3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73178:5ddeaa6b6665 Date: 2014-08-29 12:58 -0400 http://bitbucket.org/pypy/pypy/changeset/5ddeaa6b6665/ Log: fix translation diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -206,6 +206,7 @@ def write(self, value): self._check_closed() self._check_writing() + assert value is not None ll_value = rffi.get_nonmovingbuffer(value) try: # note that since we got a nonmoving buffer, it is either raw From noreply at buildbot.pypy.org Fri Aug 29 19:15:46 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 19:15:46 +0200 (CEST) Subject: [pypy-commit] pypy default: undo test mode in rfile Message-ID: <20140829171546.A28A21C35F7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73179:f7221037bd92 Date: 2014-08-29 13:04 -0400 http://bitbucket.org/pypy/pypy/changeset/f7221037bd92/ Log: undo test mode in rfile diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -137,7 +137,7 @@ c_setvbuf(ll_f, lltype.nullptr(rffi.CCHARP.TO), _IOLBF, BUFSIZ) else: c_setvbuf(ll_f, lltype.nullptr(rffi.CCHARP.TO), _IOFBF, buffering) - return RFile(ll_f, mode) + return RFile(ll_f) def create_temp_rfile(): @@ -159,7 +159,7 @@ finally: lltype.free(ll_mode, flavor='raw') _dircheck(ll_f) - return RFile(ll_f, mode) + return RFile(ll_f) def create_popen_file(command, type): @@ -179,33 +179,15 @@ class RFile(object): - readable = False - writable = False - - def __init__(self, ll_file, mode='+'): + def __init__(self, ll_file): self.ll_file = ll_file - if 'r' in mode: - self.readable = True - if 'w' in mode or 'a' in mode: - self.writable = True - if '+' in mode: - self.readable = self.writable = True def _check_closed(self): if not self.ll_file: raise ValueError("I/O operation on closed file") - def _check_reading(self): - if not self.readable: - raise OSError(0, "File not open for reading") - - def _check_writing(self): - if not self.writable: - raise OSError(0, "File not open for writing") - def write(self, value): self._check_closed() - self._check_writing() assert value is not None ll_value = rffi.get_nonmovingbuffer(value) try: @@ -243,7 +225,6 @@ def read(self, size=-1): # XXX CPython uses a more delicate logic here self._check_closed() - self._check_reading() ll_file = self.ll_file if size == 0: return "" @@ -301,7 +282,6 @@ def truncate(self, arg=-1): self._check_closed() - self._check_writing() if arg == -1: arg = self.tell() self.flush() @@ -349,7 +329,6 @@ def readline(self, size=-1): self._check_closed() - self._check_reading() if size == 0: return "" elif size < 0: diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -112,28 +112,10 @@ def f(): f = open(fname, "w") - try: - f.read() - except exc as e: - assert e.errno == errno - else: - assert False - try: - f.readline() - except exc as e: - assert e.errno == errno - else: - assert False f.write("dupa\x00dupb") f.close() for mode in ['r', 'U']: f2 = open(fname, mode) - try: - f2.write('') - except exc as e: - assert e.errno == errno - else: - assert False dupa = f2.read(0) assert dupa == "" dupa = f2.read() @@ -150,9 +132,7 @@ assert dupa == "dupa\x00dupb" f2.close() - exc = IOError; errno = None f() - exc = OSError; errno = 0 self.interpret(f, []) def test_read_sequentially(self): @@ -208,19 +188,11 @@ new_fno = os.dup(f.fileno()) f2 = os.fdopen(new_fno, "w") f.close() - try: - f2.read() - except exc as e: - assert e.errno == errno - else: - assert False f2.write("xxx") f2.close() - exc = IOError; errno = None f() assert open(fname).read() == "xxx" - exc = OSError; errno = 0 self.interpret(f, []) assert open(fname).read() == "xxx" @@ -277,18 +249,8 @@ data = f.read() assert data == "hello w" f.close() - f = open(fname) - try: - f.truncate() - except exc as e: - assert e.errno == errno - else: - assert False - f.close() - exc = IOError; errno = None f() - exc = OSError; errno = 0 self.interpret(f, []) From noreply at buildbot.pypy.org Fri Aug 29 19:15:47 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 19:15:47 +0200 (CEST) Subject: [pypy-commit] pypy default: fix this test on older cpythons Message-ID: <20140829171547.DDC361C35F7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73180:48a6c8cd92f1 Date: 2014-08-29 13:15 -0400 http://bitbucket.org/pypy/pypy/changeset/48a6c8cd92f1/ Log: fix this test on older cpythons diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -23,7 +23,7 @@ def test_open_errors(self): def f(exc): - def g(): + def g(run): try: open('zzz', 'badmode') except ValueError: @@ -61,17 +61,18 @@ assert os.name == 'nt' and e.errno == errno.EACCES else: assert os.name != 'nt' - try: - os.fdopen(fd) - except exc as e: - assert e.errno == errno.EISDIR - else: - assert False + if run: + try: + os.fdopen(fd) + except exc as e: + assert e.errno == errno.EISDIR + else: + assert False os.close(fd) return g - f(IOError)() - self.interpret(f(OSError), []) + f(IOError)(sys.version_info >= (2, 7, 9)) + self.interpret(f(OSError), [True]) @py.test.mark.skipif("sys.platform == 'win32'") # http://msdn.microsoft.com/en-us/library/86cebhfs.aspx From noreply at buildbot.pypy.org Fri Aug 29 19:31:38 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 19:31:38 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: merge default Message-ID: <20140829173138.5B4191C35F7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73181:513125b111be Date: 2014-08-29 12:21 -0400 http://bitbucket.org/pypy/pypy/changeset/513125b111be/ Log: merge default diff --git a/lib-python/2.7/json/encoder.py b/lib-python/2.7/json/encoder.py --- a/lib-python/2.7/json/encoder.py +++ b/lib-python/2.7/json/encoder.py @@ -529,3 +529,10 @@ _current_indent_level): yield chunk self.__remove_markers(markers, o) + + +# overwrite some helpers here with more efficient versions +try: + from _pypyjson import raw_encode_basestring_ascii +except ImportError: + pass diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -554,14 +554,16 @@ import errno, sys f = open(fn) - exc = raises(EnvironmentError, f.truncate, 3) - if sys.platform == 'win32': - assert exc.value.errno == 5 # ERROR_ACCESS_DENIED + exc = raises(IOError, f.truncate, 3) + # CPython explicitly checks the file mode + # PyPy relies on the libc to raise the error + if '__pypy__' not in sys.builtin_module_names: + assert str(exc.value) == "File not open for writing" else: - # CPython explicitely checks the file mode - # PyPy relies on the libc to raise the error - assert (exc.value.message == "File not open for writing" or - exc.value.errno == errno.EINVAL) + if sys.platform == 'win32': + assert exc.value.errno == 5 # ERROR_ACCESS_DENIED + else: + assert exc.value.errno == errno.EINVAL f.close() def test_readinto(self): diff --git a/pypy/module/_pypyjson/__init__.py b/pypy/module/_pypyjson/__init__.py --- a/pypy/module/_pypyjson/__init__.py +++ b/pypy/module/_pypyjson/__init__.py @@ -7,4 +7,6 @@ interpleveldefs = { 'loads' : 'interp_decoder.loads', + 'raw_encode_basestring_ascii': + 'interp_encoder.raw_encode_basestring_ascii', } diff --git a/pypy/module/_pypyjson/interp_encoder.py b/pypy/module/_pypyjson/interp_encoder.py new file mode 100644 --- /dev/null +++ b/pypy/module/_pypyjson/interp_encoder.py @@ -0,0 +1,63 @@ +from rpython.rlib.rstring import StringBuilder + + +HEX = '0123456789abcdef' + +ESCAPE_DICT = { + '\b': '\\b', + '\f': '\\f', + '\n': '\\n', + '\r': '\\r', + '\t': '\\t', +} +ESCAPE_BEFORE_SPACE = [ESCAPE_DICT.get(chr(_i), '\\u%04x' % _i) + for _i in range(32)] + + +def raw_encode_basestring_ascii(space, w_string): + if space.isinstance_w(w_string, space.w_str): + s = space.str_w(w_string) + for c in s: + if c >= ' ' and c <= '~' and c != '"' and c != '\\': + pass + else: + break + else: + # the input is a string with only non-special ascii chars + return w_string + + w_string = space.call_method(w_string, 'decode', space.wrap('utf-8')) + + u = space.unicode_w(w_string) + sb = StringBuilder() + for c in u: + if c <= u'~': + if c == u'"' or c == u'\\': + sb.append('\\') + elif c < u' ': + sb.append(ESCAPE_BEFORE_SPACE[ord(c)]) + continue + sb.append(chr(ord(c))) + else: + if c <= u'\uffff': + sb.append('\\u') + sb.append(HEX[ord(c) >> 12]) + sb.append(HEX[(ord(c) >> 8) & 0x0f]) + sb.append(HEX[(ord(c) >> 4) & 0x0f]) + sb.append(HEX[ord(c) & 0x0f]) + else: + # surrogate pair + n = ord(c) - 0x10000 + s1 = 0xd800 | ((n >> 10) & 0x3ff) + sb.append('\\ud') + sb.append(HEX[(s1 >> 8) & 0x0f]) + sb.append(HEX[(s1 >> 4) & 0x0f]) + sb.append(HEX[s1 & 0x0f]) + s2 = 0xdc00 | (n & 0x3ff) + sb.append('\\ud') + sb.append(HEX[(s2 >> 8) & 0x0f]) + sb.append(HEX[(s2 >> 4) & 0x0f]) + sb.append(HEX[s2 & 0x0f]) + + res = sb.build() + return space.wrap(res) diff --git a/pypy/module/_pypyjson/test/test__pypyjson.py b/pypy/module/_pypyjson/test/test__pypyjson.py --- a/pypy/module/_pypyjson/test/test__pypyjson.py +++ b/pypy/module/_pypyjson/test/test__pypyjson.py @@ -188,4 +188,23 @@ import _pypyjson # http://json.org/JSON_checker/test/fail25.json s = '["\ttab\tcharacter\tin\tstring\t"]' - raises(ValueError, "_pypyjson.loads(s)") \ No newline at end of file + raises(ValueError, "_pypyjson.loads(s)") + + def test_raw_encode_basestring_ascii(self): + import _pypyjson + def check(s): + s = _pypyjson.raw_encode_basestring_ascii(s) + assert type(s) is str + return s + assert check("") == "" + assert check(u"") == "" + assert check("abc ") == "abc " + assert check(u"abc ") == "abc " + raises(UnicodeDecodeError, check, "\xc0") + assert check("\xc2\x84") == "\\u0084" + assert check("\xf0\x92\x8d\x85") == "\\ud808\\udf45" + assert check(u"\ud808\udf45") == "\\ud808\\udf45" + assert check(u"\U00012345") == "\\ud808\\udf45" + assert check("a\"c") == "a\\\"c" + assert check("\\\"\b\f\n\r\t") == '\\\\\\"\\b\\f\\n\\r\\t' + assert check("\x07") == "\\u0007" From noreply at buildbot.pypy.org Fri Aug 29 19:31:39 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 19:31:39 +0200 (CEST) Subject: [pypy-commit] pypy default: check file mode before read/write Message-ID: <20140829173139.AFAB91C35F7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73182:7dd943c6998d Date: 2014-08-29 13:25 -0400 http://bitbucket.org/pypy/pypy/changeset/7dd943c6998d/ Log: check file mode before read/write diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -30,6 +30,8 @@ w_name = None mode = "" binary = False + readable = False + writable = False softspace= 0 # Required according to file object docs encoding = None errors = None @@ -61,6 +63,12 @@ self.fd = fd self.mode = mode self.binary = "b" in mode + if 'r' in mode or 'U' in mode: + self.readable = True + if 'w' in mode or 'a' in mode: + self.writable = True + if '+' in mode: + self.readable = self.writable = True if w_name is not None: self.w_name = w_name self.stream = stream @@ -89,6 +97,16 @@ self.space.wrap("I/O operation on closed file") ) + def check_readable(self): + if not self.readable: + raise OperationError(self.space.w_IOError, self.space.wrap( + "File not open for reading")) + + def check_writable(self): + if not self.writable: + raise OperationError(self.space.w_IOError, self.space.wrap( + "File not open for writing")) + def getstream(self): """Return self.stream or raise an app-level ValueError if missing (i.e. if the file is closed).""" @@ -176,6 +194,7 @@ @unwrap_spec(n=int) def direct_read(self, n=-1): stream = self.getstream() + self.check_readable() if n < 0: return stream.readall() else: @@ -201,6 +220,7 @@ @unwrap_spec(size=int) def direct_readline(self, size=-1): stream = self.getstream() + self.check_readable() if size < 0: return stream.readline() else: @@ -227,6 +247,7 @@ @unwrap_spec(size=int) def direct_readlines(self, size=0): stream = self.getstream() + self.check_readable() # this is implemented as: .read().split('\n') # except that it keeps the \n in the resulting strings if size <= 0: @@ -260,6 +281,7 @@ def direct_truncate(self, w_size=None): # note: a wrapped size! stream = self.getstream() + self.check_writable() space = self.space if space.is_none(w_size): size = stream.tell() @@ -269,6 +291,7 @@ def direct_write(self, w_data): space = self.space + self.check_writable() if self.binary: data = space.getarg_w('s*', w_data).as_str() else: @@ -462,6 +485,7 @@ space = self.space self.check_closed() + self.check_writable() lines = space.fixedview(w_lines) for i, w_line in enumerate(lines): if not space.isinstance_w(w_line, space.w_str): diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -555,15 +555,7 @@ import errno, sys f = open(fn) exc = raises(IOError, f.truncate, 3) - # CPython explicitly checks the file mode - # PyPy relies on the libc to raise the error - if '__pypy__' not in sys.builtin_module_names: - assert str(exc.value) == "File not open for writing" - else: - if sys.platform == 'win32': - assert exc.value.errno == 5 # ERROR_ACCESS_DENIED - else: - assert exc.value.errno == errno.EINVAL + assert str(exc.value) == "File not open for writing" f.close() def test_readinto(self): From noreply at buildbot.pypy.org Fri Aug 29 19:31:40 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 19:31:40 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: merge default Message-ID: <20140829173140.E2F451C35F7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73183:64c36466fbba Date: 2014-08-29 13:31 -0400 http://bitbucket.org/pypy/pypy/changeset/64c36466fbba/ Log: merge default diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -28,6 +28,8 @@ w_name = None mode = "" binary = False + readable = False + writable = False softspace= 0 # Required according to file object docs encoding = None errors = None @@ -57,6 +59,12 @@ self.stream = stream self.mode = mode self.binary = "b" in mode + if 'r' in mode or 'U' in mode: + self.readable = True + if 'w' in mode or 'a' in mode: + self.writable = True + if '+' in mode: + self.readable = self.writable = True getopenstreams(self.space)[stream] = None def check_closed(self): @@ -65,6 +73,16 @@ self.space.wrap("I/O operation on closed file") ) + def check_readable(self): + if not self.readable: + raise OperationError(self.space.w_IOError, self.space.wrap( + "File not open for reading")) + + def check_writable(self): + if not self.writable: + raise OperationError(self.space.w_IOError, self.space.wrap( + "File not open for writing")) + def getstream(self): """Return self.stream or raise an app-level ValueError if missing (i.e. if the file is closed).""" @@ -140,16 +158,19 @@ @unwrap_spec(n=int) def direct_read(self, n=-1): stream = self.getstream() + self.check_readable() return stream.read(n) @unwrap_spec(size=int) def direct_readline(self, size=-1): stream = self.getstream() + self.check_readable() return stream.readline(size) @unwrap_spec(size=int) def direct_readlines(self, size=0): stream = self.getstream() + self.check_readable() # this is implemented as: .read().split('\n') # except that it keeps the \n in the resulting strings if size <= 0: @@ -183,6 +204,7 @@ def direct_truncate(self, w_size=None): # note: a wrapped size! stream = self.getstream() + self.check_writable() space = self.space if space.is_none(w_size): size = stream.tell() @@ -192,6 +214,7 @@ def direct_write(self, w_data): space = self.space + self.check_writable() if self.binary: data = space.getarg_w('s*', w_data).as_str() else: @@ -389,6 +412,7 @@ space = self.space self.check_closed() + self.check_writable() lines = space.fixedview(w_lines) for i, w_line in enumerate(lines): if not space.isinstance_w(w_line, space.w_str): diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -555,15 +555,7 @@ import errno, sys f = open(fn) exc = raises(IOError, f.truncate, 3) - # CPython explicitly checks the file mode - # PyPy relies on the libc to raise the error - if '__pypy__' not in sys.builtin_module_names: - assert str(exc.value) == "File not open for writing" - else: - if sys.platform == 'win32': - assert exc.value.errno == 5 # ERROR_ACCESS_DENIED - else: - assert exc.value.errno == errno.EINVAL + assert str(exc.value) == "File not open for writing" f.close() def test_readinto(self): diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -188,6 +188,7 @@ def write(self, value): self._check_closed() + assert value is not None ll_value = rffi.get_nonmovingbuffer(value) try: # note that since we got a nonmoving buffer, it is either raw diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -23,7 +23,7 @@ def test_open_errors(self): def f(exc): - def g(): + def g(run): try: open('zzz', 'badmode') except ValueError: @@ -61,17 +61,18 @@ assert os.name == 'nt' and e.errno == errno.EACCES else: assert os.name != 'nt' - try: - os.fdopen(fd) - except exc as e: - assert e.errno == errno.EISDIR - else: - assert False + if run: + try: + os.fdopen(fd) + except exc as e: + assert e.errno == errno.EISDIR + else: + assert False os.close(fd) return g - f(IOError)() - self.interpret(f(OSError), []) + f(IOError)(sys.version_info >= (2, 7, 9)) + self.interpret(f(OSError), [True]) @py.test.mark.skipif("sys.platform == 'win32'") # http://msdn.microsoft.com/en-us/library/86cebhfs.aspx From noreply at buildbot.pypy.org Fri Aug 29 19:35:42 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 19:35:42 +0200 (CEST) Subject: [pypy-commit] pypy default: try enforceargs instead Message-ID: <20140829173542.9391D1C35F7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73184:0e6e65cd1b55 Date: 2014-08-29 13:34 -0400 http://bitbucket.org/pypy/pypy/changeset/0e6e65cd1b55/ Log: try enforceargs instead diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -5,6 +5,7 @@ import os, stat, errno from rpython.rlib import rposix +from rpython.rlib.objectmodel import enforceargs from rpython.rlib.rarithmetic import intmask from rpython.rlib.rstring import StringBuilder from rpython.rtyper.lltypesystem import rffi, lltype @@ -186,9 +187,9 @@ if not self.ll_file: raise ValueError("I/O operation on closed file") + @enforceargs(None, str) def write(self, value): self._check_closed() - assert value is not None ll_value = rffi.get_nonmovingbuffer(value) try: # note that since we got a nonmoving buffer, it is either raw From noreply at buildbot.pypy.org Fri Aug 29 20:15:38 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 20:15:38 +0200 (CEST) Subject: [pypy-commit] pypy default: rfile test write after close Message-ID: <20140829181538.0DFB61D37DF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73185:46aab882b557 Date: 2014-08-29 14:14 -0400 http://bitbucket.org/pypy/pypy/changeset/46aab882b557/ Log: rfile test write after close diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -16,6 +16,13 @@ f = open(fname, "w") f.write("dupa") f.close() + try: + f.write("dupb") + except ValueError: + pass + else: + assert False + f.close() f() self.interpret(f, []) From noreply at buildbot.pypy.org Fri Aug 29 20:24:21 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 29 Aug 2014 20:24:21 +0200 (CEST) Subject: [pypy-commit] pypy default: Presize the StringBuilder to the length of the input string, which is the minimum length for the output Message-ID: <20140829182421.B76A91D37DF@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r73186:e435043d0dbe Date: 2014-08-29 11:23 -0700 http://bitbucket.org/pypy/pypy/changeset/e435043d0dbe/ Log: Presize the StringBuilder to the length of the input string, which is the minimum length for the output diff --git a/pypy/module/_pypyjson/interp_encoder.py b/pypy/module/_pypyjson/interp_encoder.py --- a/pypy/module/_pypyjson/interp_encoder.py +++ b/pypy/module/_pypyjson/interp_encoder.py @@ -29,7 +29,7 @@ w_string = space.call_method(w_string, 'decode', space.wrap('utf-8')) u = space.unicode_w(w_string) - sb = StringBuilder() + sb = StringBuilder(len(u)) for c in u: if c <= u'~': if c == u'"' or c == u'\\': From noreply at buildbot.pypy.org Fri Aug 29 20:24:23 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 29 Aug 2014 20:24:23 +0200 (CEST) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <20140829182423.0511F1D37DF@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r73187:74f177d4a019 Date: 2014-08-29 11:23 -0700 http://bitbucket.org/pypy/pypy/changeset/74f177d4a019/ Log: merged upstream diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -16,6 +16,13 @@ f = open(fname, "w") f.write("dupa") f.close() + try: + f.write("dupb") + except ValueError: + pass + else: + assert False + f.close() f() self.interpret(f, []) From noreply at buildbot.pypy.org Fri Aug 29 22:03:58 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 22:03:58 +0200 (CEST) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20140829200358.778021D3805@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73188:b5b2cc993c2c Date: 2014-08-29 15:50 -0400 http://bitbucket.org/pypy/pypy/changeset/b5b2cc993c2c/ Log: cleanup diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -52,31 +52,34 @@ return rffi.llexternal(*args, compilation_info=eci, **kwargs) c_fopen = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], FILEP) +c_popen = llexternal('popen', [rffi.CCHARP, rffi.CCHARP], FILEP) +c_fdopen = llexternal(('_' if os.name == 'nt' else '') + 'fdopen', + [rffi.INT, rffi.CCHARP], FILEP) +c_tmpfile = llexternal('tmpfile', [], FILEP) + c_fclose = llexternal('fclose', [FILEP], rffi.INT, releasegil=False) -c_fwrite = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, - FILEP], rffi.SIZE_T) -c_fread = llexternal('fread', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, - FILEP], rffi.SIZE_T) +c_pclose = llexternal('pclose', [FILEP], rffi.INT, releasegil=False) + +c_setvbuf = llexternal('setvbuf', [FILEP, rffi.CCHARP, rffi.INT, rffi.SIZE_T], + rffi.INT) + +c_getc = llexternal('getc', [FILEP], rffi.INT, macro=True) +c_fgets = llexternal('fgets', [rffi.CCHARP, rffi.INT, FILEP], rffi.CCHARP) +c_fread = llexternal('fread', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, FILEP], + rffi.SIZE_T) + +c_fwrite = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, FILEP], + rffi.SIZE_T) +c_fflush = llexternal('fflush', [FILEP], rffi.INT) +c_ftruncate = llexternal(ftruncate, [rffi.INT, OFF_T], rffi.INT, macro=True) + +c_fseek = llexternal('fseek', [FILEP, rffi.LONG, rffi.INT], rffi.INT) +c_ftell = llexternal('ftell', [FILEP], rffi.LONG) +c_fileno = llexternal(fileno, [FILEP], rffi.INT) + c_feof = llexternal('feof', [FILEP], rffi.INT) c_ferror = llexternal('ferror', [FILEP], rffi.INT) c_clearerr = llexternal('clearerr', [FILEP], lltype.Void) -c_fseek = llexternal('fseek', [FILEP, rffi.LONG, rffi.INT], - rffi.INT) -c_tmpfile = llexternal('tmpfile', [], FILEP) -c_fileno = llexternal(fileno, [FILEP], rffi.INT) -c_fdopen = llexternal(('_' if os.name == 'nt' else '') + 'fdopen', - [rffi.INT, rffi.CCHARP], FILEP) -c_ftell = llexternal('ftell', [FILEP], rffi.LONG) -c_fflush = llexternal('fflush', [FILEP], rffi.INT) -c_ftruncate = llexternal(ftruncate, [rffi.INT, OFF_T], rffi.INT, macro=True) - -c_getc = llexternal('getc', [FILEP], rffi.INT, macro=True) -c_fgets = llexternal('fgets', [rffi.CCHARP, rffi.INT, FILEP], - rffi.CCHARP) - -c_popen = llexternal('popen', [rffi.CCHARP, rffi.CCHARP], FILEP) -c_pclose = llexternal('pclose', [FILEP], rffi.INT, releasegil=False) -c_setvbuf = llexternal('setvbuf', [FILEP, rffi.CCHARP, rffi.INT, rffi.SIZE_T], rffi.INT) def _error(ll_file): @@ -141,14 +144,6 @@ return RFile(ll_f) -def create_temp_rfile(): - res = c_tmpfile() - if not res: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - return RFile(res) - - def create_fdopen_rfile(fd, mode="r"): mode = _sanitize_mode(mode) ll_mode = rffi.str2charp(mode) @@ -163,6 +158,14 @@ return RFile(ll_f) +def create_temp_rfile(): + res = c_tmpfile() + if not res: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + return RFile(res) + + def create_popen_file(command, type): ll_command = rffi.str2charp(command) try: From noreply at buildbot.pypy.org Fri Aug 29 22:03:59 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 22:03:59 +0200 (CEST) Subject: [pypy-commit] pypy default: cpython releases the gil for these Message-ID: <20140829200359.BA9131D3805@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73189:62f4648aed86 Date: 2014-08-29 15:53 -0400 http://bitbucket.org/pypy/pypy/changeset/62f4648aed86/ Log: cpython releases the gil for these diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -57,8 +57,8 @@ [rffi.INT, rffi.CCHARP], FILEP) c_tmpfile = llexternal('tmpfile', [], FILEP) -c_fclose = llexternal('fclose', [FILEP], rffi.INT, releasegil=False) -c_pclose = llexternal('pclose', [FILEP], rffi.INT, releasegil=False) +c_fclose = llexternal('fclose', [FILEP], rffi.INT) +c_pclose = llexternal('pclose', [FILEP], rffi.INT) c_setvbuf = llexternal('setvbuf', [FILEP, rffi.CCHARP, rffi.INT, rffi.SIZE_T], rffi.INT) From noreply at buildbot.pypy.org Fri Aug 29 22:19:27 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 29 Aug 2014 22:19:27 +0200 (CEST) Subject: [pypy-commit] pypy py3k: fsencode the executable Message-ID: <20140829201927.F27101D22E6@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r73190:b08b3a507401 Date: 2014-08-29 12:59 -0700 http://bitbucket.org/pypy/pypy/changeset/b08b3a507401/ Log: fsencode the executable diff --git a/lib_pypy/_tkinter/__init__.py b/lib_pypy/_tkinter/__init__.py --- a/lib_pypy/_tkinter/__init__.py +++ b/lib_pypy/_tkinter/__init__.py @@ -5,6 +5,7 @@ # This version is based on cffi, and is a translation of _tkinter.c # from CPython, version 2.7.4. +import os import sys class TclError(Exception): @@ -54,4 +55,4 @@ return tuple(result) -tklib.Tcl_FindExecutable(sys.executable) +tklib.Tcl_FindExecutable(os.fsencode(sys.executable)) From noreply at buildbot.pypy.org Fri Aug 29 22:19:29 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 29 Aug 2014 22:19:29 +0200 (CEST) Subject: [pypy-commit] pypy py3k: readapt tests to py3 Message-ID: <20140829201929.6DC381D22E6@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r73191:559bee04d58b Date: 2014-08-29 12:59 -0700 http://bitbucket.org/pypy/pypy/changeset/559bee04d58b/ Log: readapt tests to py3 diff --git a/pypy/interpreter/test/test_nestedscope.py b/pypy/interpreter/test/test_nestedscope.py --- a/pypy/interpreter/test/test_nestedscope.py +++ b/pypy/interpreter/test/test_nestedscope.py @@ -61,7 +61,7 @@ def test_cell_repr(self): import re - from repr import repr as r # Don't shadow builtin repr + from reprlib import repr as r # Don't shadow builtin repr def get_cell(): x = 42 @@ -70,7 +70,7 @@ return inner x = get_cell().__closure__[0] assert re.match(r'', repr(x)) - assert re.match(r'', r(x)) + assert re.match(r'', r(x)) def get_cell(): if False: diff --git a/pypy/module/_csv/test/test_dialect.py b/pypy/module/_csv/test/test_dialect.py --- a/pypy/module/_csv/test/test_dialect.py +++ b/pypy/module/_csv/test/test_dialect.py @@ -89,8 +89,8 @@ exc_info = raises(TypeError, _csv.register_dialect, 'foo1', delimiter="") assert exc_info.value.args[0] == '"delimiter" must be a 1-character string' - exc_info = raises(TypeError, _csv.register_dialect, 'foo1', delimiter=u",") - assert exc_info.value.args[0] == '"delimiter" must be string, not unicode' + exc_info = raises(TypeError, _csv.register_dialect, 'foo1', delimiter=b",") + assert exc_info.value.args[0] == '"delimiter" must be string, not bytes' exc_info = raises(TypeError, _csv.register_dialect, 'foo1', delimiter=4) assert exc_info.value.args[0] == '"delimiter" must be string, not int' diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -284,14 +284,6 @@ finally: os.chdir(cwdb) - def test_fdopen_keeps_fd_open_on_errors(self): - path = self.path - posix = self.posix - fd = posix.open(path, posix.O_RDONLY) - exc = raises(OSError, posix.fdopen, fd, 'w') - assert str(exc.value) == "[Errno 22] Invalid argument" - posix.close(fd) # fd should not be closed - def test_getcwdb(self): assert isinstance(self.posix.getcwdb(), bytes) From noreply at buildbot.pypy.org Fri Aug 29 22:19:30 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 29 Aug 2014 22:19:30 +0200 (CEST) Subject: [pypy-commit] pypy py3k: we now match 2.7.8/3.3.5 behavior, test against that Message-ID: <20140829201930.A65691D22E6@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r73192:205a8e437a9a Date: 2014-08-29 13:00 -0700 http://bitbucket.org/pypy/pypy/changeset/205a8e437a9a/ Log: we now match 2.7.8/3.3.5 behavior, test against that diff --git a/lib-python/3/test/test_poll.py b/lib-python/3/test/test_poll.py --- a/lib-python/3/test/test_poll.py +++ b/lib-python/3/test/test_poll.py @@ -151,14 +151,11 @@ if x != 5: self.fail('Overflow must have occurred') - pollster = select.poll() - # Issue 15989 - self.assertRaises(OverflowError, pollster.register, 0, - _testcapi.SHRT_MAX + 1) - self.assertRaises(OverflowError, pollster.register, 0, - _testcapi.USHRT_MAX + 1) - self.assertRaises(OverflowError, pollster.poll, _testcapi.INT_MAX + 1) - self.assertRaises(OverflowError, pollster.poll, _testcapi.UINT_MAX + 1) + # Issues #15989, #17919 + self.assertRaises(OverflowError, pollster.register, 0, -1) + self.assertRaises(OverflowError, pollster.register, 0, 1 << 64) + self.assertRaises(OverflowError, pollster.modify, 1, -1) + self.assertRaises(OverflowError, pollster.modify, 1, 1 << 64) def test_main(): run_unittest(PollTests) From noreply at buildbot.pypy.org Fri Aug 29 22:19:31 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 29 Aug 2014 22:19:31 +0200 (CEST) Subject: [pypy-commit] pypy py3k: n/a to py3, bytes.decode always returns unicode Message-ID: <20140829201931.E6D9A1D22E6@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r73193:e9bdfdd996b2 Date: 2014-08-29 13:04 -0700 http://bitbucket.org/pypy/pypy/changeset/e9bdfdd996b2/ Log: n/a to py3, bytes.decode always returns unicode diff --git a/pypy/interpreter/pyparser/pyparse.py b/pypy/interpreter/pyparser/pyparse.py --- a/pypy/interpreter/pyparser/pyparse.py +++ b/pypy/interpreter/pyparser/pyparse.py @@ -8,8 +8,6 @@ return bytes w_text = space.call_method(space.wrapbytes(bytes), "decode", space.wrap(encoding)) - if not space.isinstance_w(w_text, space.w_unicode): - raise error.SyntaxError("codec did not return a unicode object") w_recoded = space.call_method(w_text, "encode", space.wrap("utf-8")) return space.bytes_w(w_recoded) diff --git a/pypy/interpreter/pyparser/test/test_pyparse.py b/pypy/interpreter/pyparser/test/test_pyparse.py --- a/pypy/interpreter/pyparser/test/test_pyparse.py +++ b/pypy/interpreter/pyparser/test/test_pyparse.py @@ -40,13 +40,6 @@ tree = self.parse("a日本 = 32") tree = self.parse("日本 = 32") - def test_non_unicode_codec(self): - exc = py.test.raises(SyntaxError, self.parse, """\ -# coding: string-escape -\x70\x72\x69\x6e\x74\x20\x32\x2b\x32\x0a -""").value - assert exc.msg == "codec did not return a unicode object" - def test_syntax_error(self): parse = self.parse exc = py.test.raises(SyntaxError, parse, "name another for").value From noreply at buildbot.pypy.org Fri Aug 29 22:19:33 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 29 Aug 2014 22:19:33 +0200 (CEST) Subject: [pypy-commit] pypy py3k: readapt to py3k Message-ID: <20140829201933.2A7DB1D22E6@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r73194:a1d33cd5a124 Date: 2014-08-29 13:18 -0700 http://bitbucket.org/pypy/pypy/changeset/a1d33cd5a124/ Log: readapt to py3k diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py --- a/pypy/interpreter/nestedscope.py +++ b/pypy/interpreter/nestedscope.py @@ -78,7 +78,7 @@ content = "%s object at 0x%s" % (space.type(self.w_value).name, self.w_value.getaddrstring(space)) s = "" % (self.getaddrstring(space), content) - return space.wrap(s) + return space.wrap(s.decode('utf-8')) def descr__cell_contents(self, space): try: From noreply at buildbot.pypy.org Fri Aug 29 22:37:42 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 22:37:42 +0200 (CEST) Subject: [pypy-commit] pypy default: more cleanup Message-ID: <20140829203742.8F1FE1D37BA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r73195:e3103fe6a9e9 Date: 2014-08-29 16:33 -0400 http://bitbucket.org/pypy/pypy/changeset/e3103fe6a9e9/ Log: more cleanup diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -57,12 +57,12 @@ [rffi.INT, rffi.CCHARP], FILEP) c_tmpfile = llexternal('tmpfile', [], FILEP) +c_setvbuf = llexternal('setvbuf', [FILEP, rffi.CCHARP, rffi.INT, rffi.SIZE_T], + rffi.INT) + c_fclose = llexternal('fclose', [FILEP], rffi.INT) c_pclose = llexternal('pclose', [FILEP], rffi.INT) -c_setvbuf = llexternal('setvbuf', [FILEP, rffi.CCHARP, rffi.INT, rffi.SIZE_T], - rffi.INT) - c_getc = llexternal('getc', [FILEP], rffi.INT, macro=True) c_fgets = llexternal('fgets', [rffi.CCHARP, rffi.INT, FILEP], rffi.CCHARP) c_fread = llexternal('fread', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, FILEP], @@ -125,37 +125,38 @@ try: ll_mode = rffi.str2charp(mode) try: - ll_f = c_fopen(ll_name, ll_mode) - if not ll_f: + ll_file = c_fopen(ll_name, ll_mode) + if not ll_file: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) finally: lltype.free(ll_mode, flavor='raw') finally: lltype.free(ll_name, flavor='raw') - _dircheck(ll_f) + _dircheck(ll_file) if buffering >= 0: + buf = lltype.nullptr(rffi.CCHARP.TO) if buffering == 0: - c_setvbuf(ll_f, lltype.nullptr(rffi.CCHARP.TO), _IONBF, 0) + c_setvbuf(ll_file, buf, _IONBF, 0) elif buffering == 1: - c_setvbuf(ll_f, lltype.nullptr(rffi.CCHARP.TO), _IOLBF, BUFSIZ) + c_setvbuf(ll_file, buf, _IOLBF, BUFSIZ) else: - c_setvbuf(ll_f, lltype.nullptr(rffi.CCHARP.TO), _IOFBF, buffering) - return RFile(ll_f) + c_setvbuf(ll_file, buf, _IOFBF, buffering) + return RFile(ll_file) def create_fdopen_rfile(fd, mode="r"): mode = _sanitize_mode(mode) ll_mode = rffi.str2charp(mode) try: - ll_f = c_fdopen(rffi.cast(rffi.INT, fd), ll_mode) - if not ll_f: + ll_file = c_fdopen(rffi.cast(rffi.INT, fd), ll_mode) + if not ll_file: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) finally: lltype.free(ll_mode, flavor='raw') - _dircheck(ll_f) - return RFile(ll_f) + _dircheck(ll_file) + return RFile(ll_file) def create_temp_rfile(): @@ -171,39 +172,24 @@ try: ll_type = rffi.str2charp(type) try: - ll_f = c_popen(ll_command, ll_type) - if not ll_f: + ll_file = c_popen(ll_command, ll_type) + if not ll_file: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) finally: lltype.free(ll_type, flavor='raw') finally: lltype.free(ll_command, flavor='raw') - return RPopenFile(ll_f) + return RFile(ll_file, c_pclose) class RFile(object): - def __init__(self, ll_file): - self.ll_file = ll_file + def __init__(self, ll_file, do_close=c_fclose): + self._ll_file = ll_file + self._do_close = do_close - def _check_closed(self): - if not self.ll_file: - raise ValueError("I/O operation on closed file") - - @enforceargs(None, str) - def write(self, value): - self._check_closed() - ll_value = rffi.get_nonmovingbuffer(value) - try: - # note that since we got a nonmoving buffer, it is either raw - # or already cannot move, so the arithmetics below are fine - length = len(value) - bytes = c_fwrite(ll_value, 1, length, self.ll_file) - if bytes != length: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - finally: - rffi.free_nonmovingbuffer(value, ll_value) + def __del__(self): + self.close() def close(self): """Closes the described file. @@ -214,22 +200,24 @@ The actual return value may be determined with os.WEXITSTATUS. """ res = 0 - ll_file = self.ll_file + ll_file = self._ll_file if ll_file: # double close is allowed - self.ll_file = lltype.nullptr(FILEP.TO) + self._ll_file = lltype.nullptr(FILEP.TO) res = self._do_close(ll_file) if res == -1: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) return res - _do_close = staticmethod(c_fclose) # overridden in RPopenFile + def _check_closed(self): + if not self._ll_file: + raise ValueError("I/O operation on closed file") def read(self, size=-1): # XXX CPython uses a more delicate logic here self._check_closed() - ll_file = self.ll_file + ll_file = self._ll_file if size == 0: return "" elif size < 0: @@ -258,47 +246,8 @@ s = buf.str(returned_size) return s - def seek(self, pos, whence=0): - self._check_closed() - res = c_fseek(self.ll_file, pos, whence) - if res == -1: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - - def fileno(self): - self._check_closed() - return intmask(c_fileno(self.ll_file)) - - def tell(self): - self._check_closed() - res = intmask(c_ftell(self.ll_file)) - if res == -1: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - return res - - def flush(self): - self._check_closed() - res = c_fflush(self.ll_file) - if res != 0: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - - def truncate(self, arg=-1): - self._check_closed() - if arg == -1: - arg = self.tell() - self.flush() - res = c_ftruncate(self.fileno(), arg) - if res == -1: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - - def __del__(self): - self.close() - def _readline1(self, raw_buf): - ll_file = self.ll_file + ll_file = self._ll_file for i in range(BASE_LINE_SIZE): raw_buf[i] = '\n' @@ -351,7 +300,7 @@ s.append_charpsize(buf.raw, c) return s.build() else: # size > 0 - ll_file = self.ll_file + ll_file = self._ll_file s = StringBuilder() while s.getlength() < size: c = c_getc(ll_file) @@ -365,6 +314,53 @@ break return s.build() + @enforceargs(None, str) + def write(self, value): + self._check_closed() + ll_value = rffi.get_nonmovingbuffer(value) + try: + # note that since we got a nonmoving buffer, it is either raw + # or already cannot move, so the arithmetics below are fine + length = len(value) + bytes = c_fwrite(ll_value, 1, length, self._ll_file) + if bytes != length: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + finally: + rffi.free_nonmovingbuffer(value, ll_value) -class RPopenFile(RFile): - _do_close = staticmethod(c_pclose) + def flush(self): + self._check_closed() + res = c_fflush(self._ll_file) + if res != 0: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + + def truncate(self, arg=-1): + self._check_closed() + if arg == -1: + arg = self.tell() + self.flush() + res = c_ftruncate(self.fileno(), arg) + if res == -1: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + + def seek(self, pos, whence=0): + self._check_closed() + res = c_fseek(self._ll_file, pos, whence) + if res == -1: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + + def tell(self): + self._check_closed() + res = intmask(c_ftell(self._ll_file)) + if res == -1: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + return res + + def fileno(self): + self._check_closed() + return intmask(c_fileno(self._ll_file)) From noreply at buildbot.pypy.org Fri Aug 29 22:37:43 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 22:37:43 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: merge default Message-ID: <20140829203743.DBCF61D37BA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73196:3f3b478b3f53 Date: 2014-08-29 16:33 -0400 http://bitbucket.org/pypy/pypy/changeset/3f3b478b3f53/ Log: merge default diff --git a/pypy/module/_pypyjson/interp_encoder.py b/pypy/module/_pypyjson/interp_encoder.py --- a/pypy/module/_pypyjson/interp_encoder.py +++ b/pypy/module/_pypyjson/interp_encoder.py @@ -29,7 +29,7 @@ w_string = space.call_method(w_string, 'decode', space.wrap('utf-8')) u = space.unicode_w(w_string) - sb = StringBuilder() + sb = StringBuilder(len(u)) for c in u: if c <= u'~': if c == u'"' or c == u'\\': diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -5,6 +5,7 @@ import os, stat, errno from rpython.rlib import rposix +from rpython.rlib.objectmodel import enforceargs from rpython.rlib.rarithmetic import intmask from rpython.rlib.rstring import StringBuilder from rpython.rtyper.lltypesystem import rffi, lltype @@ -51,31 +52,34 @@ return rffi.llexternal(*args, compilation_info=eci, **kwargs) c_fopen = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], FILEP) -c_fclose = llexternal('fclose', [FILEP], rffi.INT, releasegil=False) -c_fwrite = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, - FILEP], rffi.SIZE_T) -c_fread = llexternal('fread', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, - FILEP], rffi.SIZE_T) +c_popen = llexternal('popen', [rffi.CCHARP, rffi.CCHARP], FILEP) +c_fdopen = llexternal(('_' if os.name == 'nt' else '') + 'fdopen', + [rffi.INT, rffi.CCHARP], FILEP) +c_tmpfile = llexternal('tmpfile', [], FILEP) + +c_setvbuf = llexternal('setvbuf', [FILEP, rffi.CCHARP, rffi.INT, rffi.SIZE_T], + rffi.INT) + +c_fclose = llexternal('fclose', [FILEP], rffi.INT) +c_pclose = llexternal('pclose', [FILEP], rffi.INT) + +c_getc = llexternal('getc', [FILEP], rffi.INT, macro=True) +c_fgets = llexternal('fgets', [rffi.CCHARP, rffi.INT, FILEP], rffi.CCHARP) +c_fread = llexternal('fread', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, FILEP], + rffi.SIZE_T) + +c_fwrite = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, FILEP], + rffi.SIZE_T) +c_fflush = llexternal('fflush', [FILEP], rffi.INT) +c_ftruncate = llexternal(ftruncate, [rffi.INT, OFF_T], rffi.INT, macro=True) + +c_fseek = llexternal('fseek', [FILEP, rffi.LONG, rffi.INT], rffi.INT) +c_ftell = llexternal('ftell', [FILEP], rffi.LONG) +c_fileno = llexternal(fileno, [FILEP], rffi.INT) + c_feof = llexternal('feof', [FILEP], rffi.INT) c_ferror = llexternal('ferror', [FILEP], rffi.INT) c_clearerr = llexternal('clearerr', [FILEP], lltype.Void) -c_fseek = llexternal('fseek', [FILEP, rffi.LONG, rffi.INT], - rffi.INT) -c_tmpfile = llexternal('tmpfile', [], FILEP) -c_fileno = llexternal(fileno, [FILEP], rffi.INT) -c_fdopen = llexternal(('_' if os.name == 'nt' else '') + 'fdopen', - [rffi.INT, rffi.CCHARP], FILEP) -c_ftell = llexternal('ftell', [FILEP], rffi.LONG) -c_fflush = llexternal('fflush', [FILEP], rffi.INT) -c_ftruncate = llexternal(ftruncate, [rffi.INT, OFF_T], rffi.INT, macro=True) - -c_getc = llexternal('getc', [FILEP], rffi.INT, macro=True) -c_fgets = llexternal('fgets', [rffi.CCHARP, rffi.INT, FILEP], - rffi.CCHARP) - -c_popen = llexternal('popen', [rffi.CCHARP, rffi.CCHARP], FILEP) -c_pclose = llexternal('pclose', [FILEP], rffi.INT, releasegil=False) -c_setvbuf = llexternal('setvbuf', [FILEP, rffi.CCHARP, rffi.INT, rffi.SIZE_T], rffi.INT) def _error(ll_file): @@ -121,23 +125,38 @@ try: ll_mode = rffi.str2charp(mode) try: - ll_f = c_fopen(ll_name, ll_mode) - if not ll_f: + ll_file = c_fopen(ll_name, ll_mode) + if not ll_file: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) finally: lltype.free(ll_mode, flavor='raw') finally: lltype.free(ll_name, flavor='raw') - _dircheck(ll_f) + _dircheck(ll_file) if buffering >= 0: + buf = lltype.nullptr(rffi.CCHARP.TO) if buffering == 0: - c_setvbuf(ll_f, lltype.nullptr(rffi.CCHARP.TO), _IONBF, 0) + c_setvbuf(ll_file, buf, _IONBF, 0) elif buffering == 1: - c_setvbuf(ll_f, lltype.nullptr(rffi.CCHARP.TO), _IOLBF, BUFSIZ) + c_setvbuf(ll_file, buf, _IOLBF, BUFSIZ) else: - c_setvbuf(ll_f, lltype.nullptr(rffi.CCHARP.TO), _IOFBF, buffering) - return RFile(ll_f) + c_setvbuf(ll_file, buf, _IOFBF, buffering) + return RFile(ll_file) + + +def create_fdopen_rfile(fd, mode="r"): + mode = _sanitize_mode(mode) + ll_mode = rffi.str2charp(mode) + try: + ll_file = c_fdopen(rffi.cast(rffi.INT, fd), ll_mode) + if not ll_file: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + finally: + lltype.free(ll_mode, flavor='raw') + _dircheck(ll_file) + return RFile(ll_file) def create_temp_rfile(): @@ -148,58 +167,29 @@ return RFile(res) -def create_fdopen_rfile(fd, mode="r"): - mode = _sanitize_mode(mode) - ll_mode = rffi.str2charp(mode) - try: - ll_f = c_fdopen(rffi.cast(rffi.INT, fd), ll_mode) - if not ll_f: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - finally: - lltype.free(ll_mode, flavor='raw') - _dircheck(ll_f) - return RFile(ll_f) - - def create_popen_file(command, type): ll_command = rffi.str2charp(command) try: ll_type = rffi.str2charp(type) try: - ll_f = c_popen(ll_command, ll_type) - if not ll_f: + ll_file = c_popen(ll_command, ll_type) + if not ll_file: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) finally: lltype.free(ll_type, flavor='raw') finally: lltype.free(ll_command, flavor='raw') - return RPopenFile(ll_f) + return RFile(ll_file, c_pclose) class RFile(object): - def __init__(self, ll_file): - self.ll_file = ll_file + def __init__(self, ll_file, do_close=c_fclose): + self._ll_file = ll_file + self._do_close = do_close - def _check_closed(self): - if not self.ll_file: - raise ValueError("I/O operation on closed file") - - def write(self, value): - self._check_closed() - assert value is not None - ll_value = rffi.get_nonmovingbuffer(value) - try: - # note that since we got a nonmoving buffer, it is either raw - # or already cannot move, so the arithmetics below are fine - length = len(value) - bytes = c_fwrite(ll_value, 1, length, self.ll_file) - if bytes != length: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - finally: - rffi.free_nonmovingbuffer(value, ll_value) + def __del__(self): + self.close() def close(self): """Closes the described file. @@ -210,22 +200,24 @@ The actual return value may be determined with os.WEXITSTATUS. """ res = 0 - ll_file = self.ll_file + ll_file = self._ll_file if ll_file: # double close is allowed - self.ll_file = lltype.nullptr(FILEP.TO) + self._ll_file = lltype.nullptr(FILEP.TO) res = self._do_close(ll_file) if res == -1: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) return res - _do_close = staticmethod(c_fclose) # overridden in RPopenFile + def _check_closed(self): + if not self._ll_file: + raise ValueError("I/O operation on closed file") def read(self, size=-1): # XXX CPython uses a more delicate logic here self._check_closed() - ll_file = self.ll_file + ll_file = self._ll_file if size == 0: return "" elif size < 0: @@ -254,47 +246,8 @@ s = buf.str(returned_size) return s - def seek(self, pos, whence=0): - self._check_closed() - res = c_fseek(self.ll_file, pos, whence) - if res == -1: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - - def fileno(self): - self._check_closed() - return intmask(c_fileno(self.ll_file)) - - def tell(self): - self._check_closed() - res = intmask(c_ftell(self.ll_file)) - if res == -1: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - return res - - def flush(self): - self._check_closed() - res = c_fflush(self.ll_file) - if res != 0: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - - def truncate(self, arg=-1): - self._check_closed() - if arg == -1: - arg = self.tell() - self.flush() - res = c_ftruncate(self.fileno(), arg) - if res == -1: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - - def __del__(self): - self.close() - def _readline1(self, raw_buf): - ll_file = self.ll_file + ll_file = self._ll_file for i in range(BASE_LINE_SIZE): raw_buf[i] = '\n' @@ -347,7 +300,7 @@ s.append_charpsize(buf.raw, c) return s.build() else: # size > 0 - ll_file = self.ll_file + ll_file = self._ll_file s = StringBuilder() while s.getlength() < size: c = c_getc(ll_file) @@ -361,6 +314,53 @@ break return s.build() + @enforceargs(None, str) + def write(self, value): + self._check_closed() + ll_value = rffi.get_nonmovingbuffer(value) + try: + # note that since we got a nonmoving buffer, it is either raw + # or already cannot move, so the arithmetics below are fine + length = len(value) + bytes = c_fwrite(ll_value, 1, length, self._ll_file) + if bytes != length: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + finally: + rffi.free_nonmovingbuffer(value, ll_value) -class RPopenFile(RFile): - _do_close = staticmethod(c_pclose) + def flush(self): + self._check_closed() + res = c_fflush(self._ll_file) + if res != 0: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + + def truncate(self, arg=-1): + self._check_closed() + if arg == -1: + arg = self.tell() + self.flush() + res = c_ftruncate(self.fileno(), arg) + if res == -1: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + + def seek(self, pos, whence=0): + self._check_closed() + res = c_fseek(self._ll_file, pos, whence) + if res == -1: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + + def tell(self): + self._check_closed() + res = intmask(c_ftell(self._ll_file)) + if res == -1: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + return res + + def fileno(self): + self._check_closed() + return intmask(c_fileno(self._ll_file)) diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -16,6 +16,13 @@ f = open(fname, "w") f.write("dupa") f.close() + try: + f.write("dupb") + except ValueError: + pass + else: + assert False + f.close() f() self.interpret(f, []) From noreply at buildbot.pypy.org Fri Aug 29 22:37:45 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 29 Aug 2014 22:37:45 +0200 (CEST) Subject: [pypy-commit] pypy use-file-star-for-file: kill unused funcs Message-ID: <20140829203745.1F2D91D37BA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: use-file-star-for-file Changeset: r73197:a58ff2d31f1e Date: 2014-08-29 16:35 -0400 http://bitbucket.org/pypy/pypy/changeset/a58ff2d31f1e/ Log: kill unused funcs diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -89,16 +89,6 @@ self.check_closed() return self.stream - def _when_reading_first_flush(self, otherfile): - """Flush otherfile before reading from self.""" - #self.stream = streamio.CallbackReadFilter(self.stream, - # otherfile._try_to_flush) - - def _try_to_flush(self): - stream = self.stream - if stream is not None: - stream.flush() - # ____________________________________________________________ # # The 'direct_' methods assume that the caller already acquired the diff --git a/pypy/module/sys/state.py b/pypy/module/sys/state.py --- a/pypy/module/sys/state.py +++ b/pypy/module/sys/state.py @@ -49,8 +49,6 @@ stderr.name = '' self.w_stderr = space.wrap(stderr) - stdin._when_reading_first_flush(stdout) - def getio(space): return space.fromcache(IOState) From noreply at buildbot.pypy.org Sat Aug 30 08:44:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 30 Aug 2014 08:44:31 +0200 (CEST) Subject: [pypy-commit] pypy default: Performance tweaks: may return the unicode object passed in as argument Message-ID: <20140830064431.412C31C01D0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73198:c71b5f944ccf Date: 2014-08-30 08:29 +0200 http://bitbucket.org/pypy/pypy/changeset/c71b5f944ccf/ Log: Performance tweaks: may return the unicode object passed in as argument if it needs no encoding at all. diff --git a/pypy/module/_pypyjson/interp_encoder.py b/pypy/module/_pypyjson/interp_encoder.py --- a/pypy/module/_pypyjson/interp_encoder.py +++ b/pypy/module/_pypyjson/interp_encoder.py @@ -1,4 +1,6 @@ from rpython.rlib.rstring import StringBuilder +from rpython.rlib.runicode import str_decode_utf_8 +from pypy.interpreter import unicodehelper HEX = '0123456789abcdef' @@ -17,20 +19,39 @@ def raw_encode_basestring_ascii(space, w_string): if space.isinstance_w(w_string, space.w_str): s = space.str_w(w_string) - for c in s: + for i in range(len(s)): + c = s[i] if c >= ' ' and c <= '~' and c != '"' and c != '\\': pass else: + first = i break else: # the input is a string with only non-special ascii chars return w_string - w_string = space.call_method(w_string, 'decode', space.wrap('utf-8')) + eh = unicodehelper.decode_error_handler(space) + u = str_decode_utf_8( + s, len(s), None, final=True, errorhandler=eh, + allow_surrogates=True)[0] + sb = StringBuilder(len(u)) + sb.append_slice(s, 0, first) + else: + u = space.unicode_w(w_string) + for i in range(len(u)): + c = u[i] + if c >= u' ' and c <= u'~' and c != u'"' and c != u'\\': + pass + else: + break + else: + # the input is a unicode with only non-special ascii chars + return w_string + sb = StringBuilder(len(u)) + first = 0 - u = space.unicode_w(w_string) - sb = StringBuilder(len(u)) - for c in u: + for i in range(first, len(u)): + c = u[i] if c <= u'~': if c == u'"' or c == u'\\': sb.append('\\') diff --git a/pypy/module/_pypyjson/test/test__pypyjson.py b/pypy/module/_pypyjson/test/test__pypyjson.py --- a/pypy/module/_pypyjson/test/test__pypyjson.py +++ b/pypy/module/_pypyjson/test/test__pypyjson.py @@ -192,14 +192,14 @@ def test_raw_encode_basestring_ascii(self): import _pypyjson - def check(s): + def check(s, expected_type=str): s = _pypyjson.raw_encode_basestring_ascii(s) - assert type(s) is str + assert type(s) is expected_type return s assert check("") == "" - assert check(u"") == "" + assert check(u"", expected_type=unicode) == u"" assert check("abc ") == "abc " - assert check(u"abc ") == "abc " + assert check(u"abc ", expected_type=unicode) == u"abc " raises(UnicodeDecodeError, check, "\xc0") assert check("\xc2\x84") == "\\u0084" assert check("\xf0\x92\x8d\x85") == "\\ud808\\udf45" From noreply at buildbot.pypy.org Sat Aug 30 08:44:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 30 Aug 2014 08:44:32 +0200 (CEST) Subject: [pypy-commit] pypy default: Test fix for test_xpickle Message-ID: <20140830064432.7E3731C01D0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73199:aba3692914d3 Date: 2014-08-30 06:44 +0000 http://bitbucket.org/pypy/pypy/changeset/aba3692914d3/ Log: Test fix for test_xpickle diff --git a/lib-python/2.7/test/pickletester.py b/lib-python/2.7/test/pickletester.py --- a/lib-python/2.7/test/pickletester.py +++ b/lib-python/2.7/test/pickletester.py @@ -6,14 +6,16 @@ import pickletools import copy_reg -from test.test_support import TestFailed, verbose, have_unicode, TESTFN, impl_detail +from test.test_support import TestFailed, verbose, have_unicode, TESTFN try: - from test.test_support import _2G, _1M, precisionbigmemtest + from test.test_support import _2G, _1M, precisionbigmemtest, impl_detail except ImportError: # this import might fail when run on older Python versions by test_xpickle _2G = _1M = 0 def precisionbigmemtest(*args, **kwargs): return lambda self: None + def impl_detail(*args, **kwargs): + return lambda self: None # Tests that try a number of pickle protocols should have a # for proto in protocols: From noreply at buildbot.pypy.org Sat Aug 30 09:29:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 30 Aug 2014 09:29:29 +0200 (CEST) Subject: [pypy-commit] pypy default: Bug fix for arm Message-ID: <20140830072929.D72781C01D0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73200:8192749d7069 Date: 2014-08-30 09:29 +0200 http://bitbucket.org/pypy/pypy/changeset/8192749d7069/ Log: Bug fix for arm diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -7,7 +7,7 @@ from rpython.jit.backend.arm.arch import (WORD, DOUBLE_WORD, FUNC_ALIGN, JITFRAME_FIXED_SIZE) from rpython.jit.backend.arm.codebuilder import InstrBuilder, OverwritingBuilder -from rpython.jit.backend.arm.locations import imm, StackLocation +from rpython.jit.backend.arm.locations import imm, StackLocation, get_fp_offset from rpython.jit.backend.arm.helper.regalloc import VMEM_imm_size from rpython.jit.backend.arm.opassembler import ResOpAssembler from rpython.jit.backend.arm.regalloc import (Regalloc, @@ -708,9 +708,9 @@ return AsmInfo(ops_offset, startpos + rawstart, codeendpos - startpos) - def new_stack_loc(self, i, pos, tp): + def new_stack_loc(self, i, tp): base_ofs = self.cpu.get_baseofs_of_frame_field() - return StackLocation(i, pos + base_ofs, tp) + return StackLocation(i, get_fp_offset(base_ofs, i), tp) def check_frame_before_jump(self, target_token): if target_token in self.target_tokens_currently_compiling: diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -153,7 +153,7 @@ i = pos - self.cpu.JITFRAME_FIXED_SIZE assert i >= 0 tp = inputargs[input_i].type - locs.append(self.new_stack_loc(i, pos, tp)) + locs.append(self.new_stack_loc(i, tp)) input_i += 1 return locs diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1764,7 +1764,7 @@ ofs = self.cpu.get_ofs_of_frame_field('jf_gcmap') mc.MOV_bi(ofs, 0) - def new_stack_loc(self, i, pos, tp): + def new_stack_loc(self, i, tp): base_ofs = self.cpu.get_baseofs_of_frame_field() return FrameLoc(i, get_ebp_ofs(base_ofs, i), tp) From noreply at buildbot.pypy.org Sat Aug 30 12:10:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 30 Aug 2014 12:10:56 +0200 (CEST) Subject: [pypy-commit] pypy default: Add LDREX, STREX, DMB instructions, needed for lock manipulation Message-ID: <20140830101056.032201C09B2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73201:7a2fd8406fbd Date: 2014-08-30 12:09 +0200 http://bitbucket.org/pypy/pypy/changeset/7a2fd8406fbd/ Log: Add LDREX, STREX, DMB instructions, needed for lock manipulation diff --git a/rpython/jit/backend/arm/codebuilder.py b/rpython/jit/backend/arm/codebuilder.py --- a/rpython/jit/backend/arm/codebuilder.py +++ b/rpython/jit/backend/arm/codebuilder.py @@ -318,6 +318,23 @@ | (rd & 0xF) << 12 | imm16 & 0xFFF) + def LDREX(self, rt, rn, c=cond.AL): + self.write32(c << 28 + | 0x01900f9f + | (rt & 0xF) << 12 + | (rn & 0xF) << 16) + + def STREX(self, rd, rt, rn, c=cond.AL): + """rd must not be the same register as rt or rn""" + self.write32(c << 28 + | 0x01800f90 + | (rt & 0xF) + | (rd & 0xF) << 12 + | (rn & 0xF) << 16) + + def DMB(self): + self.write32(0xf57ff05f) + DIV = binary_helper_call('int_div') MOD = binary_helper_call('int_mod') UDIV = binary_helper_call('uint_div') diff --git a/rpython/jit/backend/arm/test/test_instr_codebuilder.py b/rpython/jit/backend/arm/test/test_instr_codebuilder.py --- a/rpython/jit/backend/arm/test/test_instr_codebuilder.py +++ b/rpython/jit/backend/arm/test/test_instr_codebuilder.py @@ -187,6 +187,18 @@ self.cb.MOVT_ri(r.r3.value, 0xFFFF, conditions.NE) self.assert_equal("MOVTNE r3, #65535") + def test_ldrex(self): + self.cb.LDREX(r.r10.value, r.r11.value) + self.assert_equal('LDREX r10, [r11]') + + def test_strex(self): + self.cb.STREX(r.r9.value, r.r1.value, r.r14.value, conditions.NE) + self.assert_equal('STREXNE r9, r1, [r14]') + + def test_dmb(self): + self.cb.DMB() + self.assert_equal('DMB') + def test_size_of_gen_load_int(): for v, n in [(5, 4), (6, 4), (7, 2)]: From noreply at buildbot.pypy.org Sat Aug 30 12:10:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 30 Aug 2014 12:10:57 +0200 (CEST) Subject: [pypy-commit] pypy default: in-progress: call_release_gil Message-ID: <20140830101057.3B5241C09B2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73202:0b926ccd6944 Date: 2014-08-30 12:10 +0200 http://bitbucket.org/pypy/pypy/changeset/0b926ccd6944/ Log: in-progress: call_release_gil diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -81,31 +81,76 @@ self.mc.gen_load_int(r.ip.value, n) self.mc.SUB_rr(r.sp.value, r.sp.value, r.ip.value) - def select_call_release_gil_mode(self): - AbstractCallBuilder.select_call_release_gil_mode(self) + def call_releasegil_addr_and_move_real_arguments(self, fastgil): + assert self.is_call_release_gil + assert not self.asm._is_asmgcc() - def call_releasegil_addr_and_move_real_arguments(self): - assert not self.asm._is_asmgcc() - from rpython.jit.backend.arm.regalloc import CoreRegisterManager - with saved_registers(self.mc, - CoreRegisterManager.save_around_call_regs): - self.mc.BL(self.asm.releasegil_addr) + # Save this thread's shadowstack pointer into r7, for later comparison + gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap + if gcrootmap: + rst = gcrootmap.get_root_stack_top_addr() + self.mc.gen_load_int(r.r5.value, rst) + self.mc.LDR_ri(r.r7.value, r.r5.value) + + # change 'rpy_fastgil' to 0 (it should be non-zero right now) + self.mc.DMB() + self.mc.gen_load_int(r.r6.value, fastgil) + self.mc.MOV_ri(r.ip.value, 0) + self.mc.STR_ri(r.ip.value, r.r6.value) if not we_are_translated(): # for testing: we should not access self.mc.ADD_ri(r.fp.value, r.fp.value, 1) # fp any more - def move_real_result_and_call_reacqgil_addr(self): + def move_real_result_and_call_reacqgil_addr(self, fastgil): + # try to reacquire the lock. The registers r5 to r7 are still + # valid from before the call: + # r5 == &root_stack_top + # r6 == fastgil + # r7 == previous value of root_stack_top + self.mc.LDREX(r.i3.value, r.r6.value) # load the lock value + self.mc.MOV_ri(r.ip.value, 1) + self.mc.CMP_ri(r.r3.value, 0) # is the lock free? + self.mc.STREX(r.r3.value, r.ipvalue, r.r6.value, c=cond.EQ) + # try to claim the lock + self.mc.CMP_ri(r.r3.value, 0, c=cond.EQ) # did this succeed? + self.mc.DMB() + # the success of the lock acquisition is defined by + # 'EQ is true', or equivalently by 'r3 == 0'. + # + if self.asm.cpu.gc_ll_descr.gcrootmap: + # When doing a call_release_gil with shadowstack, there + # is the risk that the 'rpy_fastgil' was free but the + # current shadowstack can be the one of a different + # thread. So here we check if the shadowstack pointer + # is still the same as before we released the GIL (saved + # in 'r7'), and if not, we fall back to 'reacqgil_addr'. + self.mc.LDR_ri(r.ip.value, r.r5.value, c=cond.EQ) + self.mc.CMP_rr(r.ip.value, r.r7.value, c=cond.EQ) + b1_location = self.mc.currpos() + self.mc.BKPT() # BEQ below + # there are two cases here: either EQ was false from + # the beginning, or EQ was true at first but the CMP + # made it false. In the second case we need to + # release the fastgil here. We know which case it is + # by checking again r3. + self.mc.CMP_ri(r.r3.value, 0) + self.mc.STR_ri(r.r3.value, r.r6.value, c=cond.EQ) + else: + b1_location = self.mc.currpos() + self.mc.BKPT() # BEQ below + # # save the result we just got - assert not self.asm._is_asmgcc() gpr_to_save, vfp_to_save = self.get_result_locs() with saved_registers(self.mc, gpr_to_save, vfp_to_save): self.mc.BL(self.asm.reacqgil_addr) + # replace b1_location with B(here, cond.EQ) + pmc = OverwritingBuilder(self.mc, b1_location, WORD) + pmc.B_offs(self.mc.currpos(), c.EQ) + if not we_are_translated(): # for testing: now we can accesss self.mc.SUB_ri(r.fp.value, r.fp.value, 1) # fp again - # for shadowstack, done for us by _reload_frame_if_necessary() - def get_result_locs(self): raise NotImplementedError From noreply at buildbot.pypy.org Sat Aug 30 12:28:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 30 Aug 2014 12:28:21 +0200 (CEST) Subject: [pypy-commit] pypy default: Fixes Message-ID: <20140830102821.D210B1C3273@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73203:bac11f2871ed Date: 2014-08-30 12:27 +0200 http://bitbucket.org/pypy/pypy/changeset/bac11f2871ed/ Log: Fixes diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -10,6 +10,7 @@ from rpython.jit.backend.arm.helper.assembler import count_reg_args from rpython.jit.backend.arm.helper.assembler import saved_registers from rpython.jit.backend.arm.helper.regalloc import check_imm_arg +from rpython.jit.backend.arm.codebuilder import OverwritingBuilder class ARMCallbuilder(AbstractCallBuilder): @@ -107,12 +108,12 @@ # r5 == &root_stack_top # r6 == fastgil # r7 == previous value of root_stack_top - self.mc.LDREX(r.i3.value, r.r6.value) # load the lock value + self.mc.LDREX(r.r3.value, r.r6.value) # load the lock value self.mc.MOV_ri(r.ip.value, 1) self.mc.CMP_ri(r.r3.value, 0) # is the lock free? - self.mc.STREX(r.r3.value, r.ipvalue, r.r6.value, c=cond.EQ) + self.mc.STREX(r.r3.value, r.ip.value, r.r6.value, c=c.EQ) # try to claim the lock - self.mc.CMP_ri(r.r3.value, 0, c=cond.EQ) # did this succeed? + self.mc.CMP_ri(r.r3.value, 0, cond=c.EQ) # did this succeed? self.mc.DMB() # the success of the lock acquisition is defined by # 'EQ is true', or equivalently by 'r3 == 0'. @@ -124,8 +125,8 @@ # thread. So here we check if the shadowstack pointer # is still the same as before we released the GIL (saved # in 'r7'), and if not, we fall back to 'reacqgil_addr'. - self.mc.LDR_ri(r.ip.value, r.r5.value, c=cond.EQ) - self.mc.CMP_rr(r.ip.value, r.r7.value, c=cond.EQ) + self.mc.LDR_ri(r.ip.value, r.r5.value, cond=c.EQ) + self.mc.CMP_rr(r.ip.value, r.r7.value, cond=c.EQ) b1_location = self.mc.currpos() self.mc.BKPT() # BEQ below # there are two cases here: either EQ was false from @@ -134,7 +135,7 @@ # release the fastgil here. We know which case it is # by checking again r3. self.mc.CMP_ri(r.r3.value, 0) - self.mc.STR_ri(r.r3.value, r.r6.value, c=cond.EQ) + self.mc.STR_ri(r.r3.value, r.r6.value, cond=c.EQ) else: b1_location = self.mc.currpos() self.mc.BKPT() # BEQ below @@ -144,7 +145,7 @@ with saved_registers(self.mc, gpr_to_save, vfp_to_save): self.mc.BL(self.asm.reacqgil_addr) - # replace b1_location with B(here, cond.EQ) + # replace b1_location with B(here, c.EQ) pmc = OverwritingBuilder(self.mc, b1_location, WORD) pmc.B_offs(self.mc.currpos(), c.EQ) From noreply at buildbot.pypy.org Sat Aug 30 13:21:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 30 Aug 2014 13:21:16 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a warning Message-ID: <20140830112116.8D2991C3204@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73204:b566335eb1fc Date: 2014-08-30 13:20 +0200 http://bitbucket.org/pypy/pypy/changeset/b566335eb1fc/ Log: Add a warning diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2702,6 +2702,9 @@ assert r == result def test_call_release_gil_variable_function_and_arguments(self): + # NOTE NOTE NOTE + # This also works as a test for ctypes and libffi. + # On some platforms, one of these is buggy... from rpython.rlib.libffi import types from rpython.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong from rpython.rlib.rarithmetic import r_singlefloat From noreply at buildbot.pypy.org Sat Aug 30 13:44:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 30 Aug 2014 13:44:13 +0200 (CEST) Subject: [pypy-commit] pypy default: Translation fix Message-ID: <20140830114413.D7BCC1C09B2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73205:a261edda4d4a Date: 2014-08-30 13:43 +0200 http://bitbucket.org/pypy/pypy/changeset/a261edda4d4a/ Log: Translation fix diff --git a/rpython/jit/backend/arm/codebuilder.py b/rpython/jit/backend/arm/codebuilder.py --- a/rpython/jit/backend/arm/codebuilder.py +++ b/rpython/jit/backend/arm/codebuilder.py @@ -332,8 +332,8 @@ | (rd & 0xF) << 12 | (rn & 0xF) << 16) - def DMB(self): - self.write32(0xf57ff05f) + def DMB(self, c=cond.AL): + self.write32(c << 28 | 0x157ff05f) DIV = binary_helper_call('int_div') MOD = binary_helper_call('int_mod') From noreply at buildbot.pypy.org Sat Aug 30 13:44:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 30 Aug 2014 13:44:15 +0200 (CEST) Subject: [pypy-commit] pypy default: While we're at it, this DMB wait instruction is only needed if EQ. Message-ID: <20140830114415.2C8701C09B2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73206:622f29ad42ab Date: 2014-08-30 13:43 +0200 http://bitbucket.org/pypy/pypy/changeset/622f29ad42ab/ Log: While we're at it, this DMB wait instruction is only needed if EQ. diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -114,7 +114,7 @@ self.mc.STREX(r.r3.value, r.ip.value, r.r6.value, c=c.EQ) # try to claim the lock self.mc.CMP_ri(r.r3.value, 0, cond=c.EQ) # did this succeed? - self.mc.DMB() + self.mc.DMB(c=c.EQ) # the success of the lock acquisition is defined by # 'EQ is true', or equivalently by 'r3 == 0'. # From noreply at buildbot.pypy.org Sat Aug 30 15:00:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 30 Aug 2014 15:00:03 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix some tests on Windows (does not appear after translation) Message-ID: <20140830130003.1DD061C1036@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73207:c893766bb4b3 Date: 2014-08-30 14:59 +0200 http://bitbucket.org/pypy/pypy/changeset/c893766bb4b3/ Log: Fix some tests on Windows (does not appear after translation) diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -301,6 +301,7 @@ save_ebp = not self.asm.cpu.gc_ll_descr.is_shadow_stack() ofs = WORD * (FRAME_FIXED_SIZE - 1) if save_ebp: # only for testing (or with Boehm) + ofs -= self.current_esp self.mc.MOV_sr(ofs, ebp.value) self.mc.MOV(ebp, esp) self.mc.CALL(self.fnloc) From noreply at buildbot.pypy.org Sat Aug 30 16:14:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 30 Aug 2014 16:14:13 +0200 (CEST) Subject: [pypy-commit] pypy default: Add change_current_fraction() Message-ID: <20140830141413.AB1BA1C09B2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73208:30c2d0b47ea9 Date: 2014-08-30 16:13 +0200 http://bitbucket.org/pypy/pypy/changeset/30c2d0b47ea9/ Log: Add change_current_fraction() diff --git a/rpython/jit/metainterp/counter.py b/rpython/jit/metainterp/counter.py --- a/rpython/jit/metainterp/counter.py +++ b/rpython/jit/metainterp/counter.py @@ -136,6 +136,34 @@ return True tick._always_inline_ = True + def change_current_fraction(self, hash, new_fraction): + """Change the value stored for 'hash' to be the given 'new_fraction', + which should be a float equal to or slightly lower than 1.0. + """ + p_entry = self.timetable[self._get_index(hash)] + subhash = self._get_subhash(hash) + + # find in 'n' the index that will be overwritten: the first within + # range(5) that contains either the right subhash, or a null time + # (or, if there isn't any, then just n == 4 will do). + n = 0 + while n < 4 and (p_entry.subhashes[n] != subhash and + float(p_entry.times[n]) != 0.0): + n += 1 + + # move one step to the right all elements [n - 1, n - 2, ..., 0], + # (this overwrites the old item at index 'n') + while n > 0: + n -= 1 + p_entry.subhashes[n + 1] = p_entry.subhashes[n] + p_entry.times[n + 1] = p_entry.times[n] + + # insert the new hash at index 0. This is a good approximation, + # because change_current_fraction() should be used for + # new_fraction == value close to 1.0. + p_entry.subhashes[0] = rffi.cast(rffi.USHORT, subhash) + p_entry.times[0] = r_singlefloat(new_fraction) + def reset(self, hash): p_entry = self.timetable[self._get_index(hash)] subhash = self._get_subhash(hash) diff --git a/rpython/jit/metainterp/test/test_counter.py b/rpython/jit/metainterp/test/test_counter.py --- a/rpython/jit/metainterp/test/test_counter.py +++ b/rpython/jit/metainterp/test/test_counter.py @@ -106,3 +106,25 @@ assert jc.lookup_chain(104) is d3 assert d3.next is d4 assert d4.next is None + + +def test_change_current_fraction(): + jc = JitCounter() + incr = jc.compute_threshold(8) + # change_current_fraction() with a fresh new hash + jc.change_current_fraction(index2hash(jc, 104), 0.95) + r = jc.tick(index2hash(jc, 104), incr) + assert r is True + # change_current_fraction() with an already-existing hash + r = jc.tick(index2hash(jc, 104), incr) + assert r is False + jc.change_current_fraction(index2hash(jc, 104), 0.95) + r = jc.tick(index2hash(jc, 104), incr) + assert r is True + # change_current_fraction() with a smaller incr + incr = jc.compute_threshold(32) + jc.change_current_fraction(index2hash(jc, 104), 0.95) + r = jc.tick(index2hash(jc, 104), incr) + assert r is False + r = jc.tick(index2hash(jc, 104), incr) + assert r is True From noreply at buildbot.pypy.org Sat Aug 30 17:43:12 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 30 Aug 2014 17:43:12 +0200 (CEST) Subject: [pypy-commit] pypy trace-limit-hack: (arigo, fijal) try to hack the noninlinable marking to trace quicker the Message-ID: <20140830154312.919CB1C01D0@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: trace-limit-hack Changeset: r73209:68c497551ab4 Date: 2014-08-30 09:42 -0600 http://bitbucket.org/pypy/pypy/changeset/68c497551ab4/ Log: (arigo, fijal) try to hack the noninlinable marking to trace quicker the functions involved diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1991,6 +1991,10 @@ if greenkey_of_huge_function is not None: warmrunnerstate.disable_noninlinable_function( greenkey_of_huge_function) + if self.current_merge_points: + jd_sd = self.jitdriver_sd + greenkey = self.current_merge_points[0][0][:jd_sd.num_green_args] + warmrunnerstate.JitCell.trace_next_iteration(greenkey) raise SwitchToBlackhole(Counters.ABORT_TOO_LONG) def _interpret(self): diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -140,6 +140,9 @@ return token return None + def has_seen_a_procedure_token(self): + return self.wref_procedure_token is not None + def set_procedure_token(self, token, tmp=False): self.wref_procedure_token = self._makeref(token) if tmp: @@ -154,9 +157,14 @@ def should_remove_jitcell(self): if self.get_procedure_token() is not None: return False # don't remove JitCells with a procedure_token - # don't remove JitCells that are being traced, or JitCells with - # the "don't trace here" flag. Other JitCells can be removed. - return (self.flags & (JC_TRACING | JC_DONT_TRACE_HERE)) == 0 + if self.flags & JC_TRACING: + return False # don't remove JitCells that are being traced + if self.flags & JC_DONT_TRACE_HERE: + # if we have this flag, and we *had* a procedure_token but + # we no longer have one, then remove me. this prevents this + # JitCell from being immortal. + return self.has_seen_a_procedure_token() + return True # Other JitCells can be removed. # ____________________________________________________________ @@ -365,6 +373,12 @@ # machine code was already compiled for these greenargs procedure_token = cell.get_procedure_token() if procedure_token is None: + if cell.flags & JC_DONT_TRACE_HERE: + if not cell.has_seen_a_procedure_token(): + # we're seeing a fresh JC_DONT_TRACE_HERE with no + # procedure_token. Compile now. + bound_reached(hash, cell, *args) + return # it was an aborted compilation, or maybe a weakref that # has been freed jitcounter.cleanup_chain(hash) @@ -467,6 +481,12 @@ return JitCell.get_jitcell(*greenargs) @staticmethod + def trace_next_iteration(greenkey): + greenargs = unwrap_greenkey(greenkey) + hash = JitCell.get_uhash(*greenargs) + jitcounter.change_current_fraction(hash, 0.98) + + @staticmethod def ensure_jit_cell_at_key(greenkey): greenargs = unwrap_greenkey(greenkey) hash = JitCell.get_uhash(*greenargs) From noreply at buildbot.pypy.org Sat Aug 30 18:02:18 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 30 Aug 2014 18:02:18 +0200 (CEST) Subject: [pypy-commit] pypy trace-limit-hack: Don't call trace_next_iteration() if we didn't call Message-ID: <20140830160218.ED5B61C3204@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: trace-limit-hack Changeset: r73210:f61a39ccf6f7 Date: 2014-08-30 18:02 +0200 http://bitbucket.org/pypy/pypy/changeset/f61a39ccf6f7/ Log: Don't call trace_next_iteration() if we didn't call disable_noninlinable_function(), to avoid bad performance if we get a very long, flat list of operations with no disablable call. diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1991,10 +1991,10 @@ if greenkey_of_huge_function is not None: warmrunnerstate.disable_noninlinable_function( greenkey_of_huge_function) - if self.current_merge_points: - jd_sd = self.jitdriver_sd - greenkey = self.current_merge_points[0][0][:jd_sd.num_green_args] - warmrunnerstate.JitCell.trace_next_iteration(greenkey) + if self.current_merge_points: + jd_sd = self.jitdriver_sd + greenkey = self.current_merge_points[0][0][:jd_sd.num_green_args] + warmrunnerstate.JitCell.trace_next_iteration(greenkey) raise SwitchToBlackhole(Counters.ABORT_TOO_LONG) def _interpret(self): From noreply at buildbot.pypy.org Sat Aug 30 18:24:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 30 Aug 2014 18:24:27 +0200 (CEST) Subject: [pypy-commit] pypy default: issue #1842 fixed: don't crash on select.select([large_number], ...) Message-ID: <20140830162427.166C31C09B2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73211:f41115b2bb5a Date: 2014-08-30 18:23 +0200 http://bitbucket.org/pypy/pypy/changeset/f41115b2bb5a/ Log: issue #1842 fixed: don't crash on select.select([large_number], ...) diff --git a/pypy/module/select/interp_select.py b/pypy/module/select/interp_select.py --- a/pypy/module/select/interp_select.py +++ b/pypy/module/select/interp_select.py @@ -98,6 +98,9 @@ for w_f in list_w: fd = space.c_filedescriptor_w(w_f) if fd > nfds: + if _c.MAX_FD_SIZE is not None and fd >= _c.MAX_FD_SIZE: + raise oefmt(space.w_ValueError, + "file descriptor out of range in select()") nfds = fd _c.FD_SET(fd, ll_list) fdlist.append(fd) diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -198,6 +198,16 @@ finally: writeend.close() + def test_select_descr_out_of_bounds(self): + import sys, select + raises(ValueError, select.select, [-1], [], []) + raises(ValueError, select.select, [], [-2], []) + raises(ValueError, select.select, [], [], [-3]) + if sys.platform != 'win32': + raises(ValueError, select.select, [2000000000], [], []) + raises(ValueError, select.select, [], [2000000000], []) + raises(ValueError, select.select, [], [], [2000000000]) + def test_poll(self): import select if not hasattr(select, 'poll'): diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py --- a/rpython/rlib/_rsocket_rffi.py +++ b/rpython/rlib/_rsocket_rffi.py @@ -651,9 +651,16 @@ return rwin32.FormatError(errno) def gai_strerror_str(errno): return rwin32.FormatError(errno) + + # WinSock does not use a bitmask in select, and uses + # socket handles greater than FD_SETSIZE + MAX_FD_SIZE = None + else: from rpython.rlib.rposix import get_errno as geterrno socket_strerror_str = os.strerror def gai_strerror_str(errno): return rffi.charp2str(gai_strerror(errno)) + + MAX_FD_SIZE = FD_SETSIZE diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py --- a/rpython/rlib/ropenssl.py +++ b/rpython/rlib/ropenssl.py @@ -5,6 +5,7 @@ from rpython.translator.platform import platform from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rlib.unroll import unrolling_iterable +from rpython.rlib._rsocket_rffi import MAX_FD_SIZE if sys.platform == 'win32' and platform.name != 'mingw32': @@ -47,13 +48,6 @@ include_dir='inc32', library_dir='out32'), ]) -# WinSock does not use a bitmask in select, and uses -# socket handles greater than FD_SETSIZE -if sys.platform == 'win32': - MAX_FD_SIZE = None -else: - from rpython.rlib._rsocket_rffi import FD_SETSIZE as MAX_FD_SIZE - ASN1_STRING = lltype.Ptr(lltype.ForwardReference()) ASN1_ITEM = rffi.COpaquePtr('ASN1_ITEM') X509_NAME = rffi.COpaquePtr('X509_NAME') From noreply at buildbot.pypy.org Sat Aug 30 18:44:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 30 Aug 2014 18:44:32 +0200 (CEST) Subject: [pypy-commit] pypy default: issue #1259: trying to remove all built-in modules '__file__' attribute. Message-ID: <20140830164432.D5FF61C01D0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73212:93fffd3d2e03 Date: 2014-08-30 18:44 +0200 http://bitbucket.org/pypy/pypy/changeset/93fffd3d2e03/ Log: issue #1259: trying to remove all built-in modules '__file__' attribute. diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -7,7 +7,6 @@ class MixedModule(Module): applevel_name = None - expose__file__attribute = True # The following attribute is None as long as the module has not been # imported yet, and when it has been, it is mod.__dict__.items() just @@ -144,8 +143,6 @@ for name, spec in cls.appleveldefs.items(): loaders[name] = getappfileloader(pkgroot, appname, spec) assert '__file__' not in loaders - if cls.expose__file__attribute: - loaders['__file__'] = cls.get__file__ if '__doc__' not in loaders: loaders['__doc__'] = cls.get__doc__ @@ -159,28 +156,6 @@ w_obj = loader(space) space.setattr(space.wrap(self), space.wrap(name), w_obj) - def get__file__(cls, space): - """ NOT_RPYTHON. - return the __file__ attribute of a MixedModule - which is the root-directory for the various - applevel and interplevel snippets that make - up the module. - """ - try: - fname = cls._fname - except AttributeError: - pkgroot = cls.__module__ - mod = __import__(pkgroot, None, None, ['__doc__']) - fname = mod.__file__ - assert os.path.basename(fname).startswith('__init__.py') - # make it clear that it's not really the interp-level module - # at this path that we are seeing, but an app-level version of it - fname = os.path.dirname(fname) - cls._fname = fname - return space.wrap(fname) - - get__file__ = classmethod(get__file__) - def get__doc__(cls, space): return space.wrap(cls.__doc__) get__doc__ = classmethod(get__doc__) diff --git a/pypy/interpreter/test/test_extmodules.py b/pypy/interpreter/test/test_extmodules.py --- a/pypy/interpreter/test/test_extmodules.py +++ b/pypy/interpreter/test/test_extmodules.py @@ -64,5 +64,5 @@ @pytest.mark.skipif("config.option.runappdirect") def test_import(self): import extmod - assert extmod.__file__.endswith('extmod') + assert not hasattr(extmod, '__file__') assert type(extmod.time()) is float diff --git a/pypy/interpreter/test/test_module.py b/pypy/interpreter/test/test_module.py --- a/pypy/interpreter/test/test_module.py +++ b/pypy/interpreter/test/test_module.py @@ -42,12 +42,9 @@ bar = type(sys)('bar','docstring') assert bar.__doc__ == 'docstring' - def test___file__(self): - import sys, os - if not hasattr(sys, "pypy_objspaceclass"): - skip("need PyPy for sys.__file__ checking") - assert sys.__file__ - assert os.path.basename(sys.__file__) == 'sys' + def test___file__(self): + import sys + assert not hasattr(sys, '__file__') def test_repr(self): import sys diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py --- a/pypy/module/__builtin__/__init__.py +++ b/pypy/module/__builtin__/__init__.py @@ -7,7 +7,6 @@ class Module(MixedModule): """Built-in functions, exceptions, and other objects.""" - expose__file__attribute = False appleveldefs = { 'execfile' : 'app_io.execfile', diff --git a/pypy/module/errno/test/test_errno.py b/pypy/module/errno/test/test_errno.py --- a/pypy/module/errno/test/test_errno.py +++ b/pypy/module/errno/test/test_errno.py @@ -8,7 +8,7 @@ cls.w_errorcode = cls.space.wrap(errno.errorcode) def test_posix(self): - assert self.errno.__file__ + assert not hasattr(self.errno, '__file__') def test_constants(self): for code, name in self.errorcode.iteritems(): diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -96,7 +96,7 @@ need_sparse_files() def test_posix_is_pypy_s(self): - assert self.posix.__file__ + assert hasattr(self.posix, '_statfields') def test_some_posix_basic_operation(self): path = self.path @@ -282,13 +282,8 @@ f = posix.fdopen(fd, "r") f.close() - # Ensure that fcntl is not faked - try: - import fcntl - except ImportError: - pass - else: - assert fcntl.__file__.endswith('pypy/module/fcntl') + # There used to be code here to ensure that fcntl is not faked + # but we can't do that cleanly any more exc = raises(OSError, posix.fdopen, fd) assert exc.value.errno == errno.EBADF diff --git a/pypy/module/posix/test/test_posix_libfile.py b/pypy/module/posix/test/test_posix_libfile.py --- a/pypy/module/posix/test/test_posix_libfile.py +++ b/pypy/module/posix/test/test_posix_libfile.py @@ -19,7 +19,7 @@ cls.w_path = cls.space.wrap(str(path)) def test_posix_is_pypy_s(self): - assert self.posix.__file__ + assert hasattr(self.posix, '_statfields') def test_fdopen(self): path = self.path From noreply at buildbot.pypy.org Sat Aug 30 22:25:01 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 30 Aug 2014 22:25:01 +0200 (CEST) Subject: [pypy-commit] pypy default: fix for issue #1846, also submitted to cpython as http://bugs.python.org/issue22308 Message-ID: <20140830202501.C97A61C01D0@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r73213:fbb5b4cefb37 Date: 2014-08-30 21:58 +0300 http://bitbucket.org/pypy/pypy/changeset/fbb5b4cefb37/ Log: fix for issue #1846, also submitted to cpython as http://bugs.python.org/issue22308 diff --git a/lib-python/2.7/sysconfig.py b/lib-python/2.7/sysconfig.py --- a/lib-python/2.7/sysconfig.py +++ b/lib-python/2.7/sysconfig.py @@ -7,30 +7,30 @@ _INSTALL_SCHEMES = { 'posix_prefix': { - 'stdlib': '{base}/lib/python{py_version_short}', - 'platstdlib': '{platbase}/lib/python{py_version_short}', - 'purelib': '{base}/lib/python{py_version_short}/site-packages', - 'platlib': '{platbase}/lib/python{py_version_short}/site-packages', - 'include': '{base}/include/python{py_version_short}', - 'platinclude': '{platbase}/include/python{py_version_short}', + 'stdlib': '{base}/lib/{implementation_lower}{py_version_short}', + 'platstdlib': '{platbase}/lib/{implementation_lower}{py_version_short}', + 'purelib': '{base}/lib/{implementation_lower}{py_version_short}/site-packages', + 'platlib': '{platbase}/lib/{implementation_lower}{py_version_short}/site-packages', + 'include': '{base}/include/{implementation_lower}{py_version_short}', + 'platinclude': '{platbase}/include/{implementation_lower}{py_version_short}', 'scripts': '{base}/bin', 'data': '{base}', }, 'posix_home': { - 'stdlib': '{base}/lib/python', - 'platstdlib': '{base}/lib/python', - 'purelib': '{base}/lib/python', - 'platlib': '{base}/lib/python', - 'include': '{base}/include/python', - 'platinclude': '{base}/include/python', + 'stdlib': '{base}/lib/{implementation_lower}', + 'platstdlib': '{base}/lib/{implementation_lower}', + 'purelib': '{base}/lib/{implementation_lower}', + 'platlib': '{base}/lib/{implementation_lower}', + 'include': '{base}/include/{implementation_lower}', + 'platinclude': '{base}/include/{implementation_lower}', 'scripts': '{base}/bin', 'data' : '{base}', }, 'pypy': { - 'stdlib': '{base}/lib-python/{py_version_short}', - 'platstdlib': '{base}/lib-python/{py_version_short}', - 'purelib': '{base}/lib-python/{py_version_short}', - 'platlib': '{base}/lib-python/{py_version_short}', + 'stdlib': '{base}/lib-{implementation_lower}/{py_version_short}', + 'platstdlib': '{base}/lib-{implementation_lower}/{py_version_short}', + 'purelib': '{base}/lib-{implementation_lower}/{py_version_short}', + 'platlib': '{base}/lib-{implementation_lower}/{py_version_short}', 'include': '{base}/include', 'platinclude': '{base}/include', 'scripts': '{base}/bin', @@ -57,37 +57,37 @@ 'data' : '{base}', }, 'os2_home': { - 'stdlib': '{userbase}/lib/python{py_version_short}', - 'platstdlib': '{userbase}/lib/python{py_version_short}', - 'purelib': '{userbase}/lib/python{py_version_short}/site-packages', - 'platlib': '{userbase}/lib/python{py_version_short}/site-packages', - 'include': '{userbase}/include/python{py_version_short}', + 'stdlib': '{userbase}/lib/{implementation_lower}{py_version_short}', + 'platstdlib': '{userbase}/lib/{implementation_lower}{py_version_short}', + 'purelib': '{userbase}/lib/{implementation_lower}{py_version_short}/site-packages', + 'platlib': '{userbase}/lib/{implementation_lower}{py_version_short}/site-packages', + 'include': '{userbase}/include/{implementation_lower}{py_version_short}', 'scripts': '{userbase}/bin', 'data' : '{userbase}', }, 'nt_user': { - 'stdlib': '{userbase}/Python{py_version_nodot}', - 'platstdlib': '{userbase}/Python{py_version_nodot}', - 'purelib': '{userbase}/Python{py_version_nodot}/site-packages', - 'platlib': '{userbase}/Python{py_version_nodot}/site-packages', - 'include': '{userbase}/Python{py_version_nodot}/Include', + 'stdlib': '{userbase}/{implementation}{py_version_nodot}', + 'platstdlib': '{userbase}/{implementation}{py_version_nodot}', + 'purelib': '{userbase}/{implementation}{py_version_nodot}/site-packages', + 'platlib': '{userbase}/{implementation}{py_version_nodot}/site-packages', + 'include': '{userbase}/{implementation}{py_version_nodot}/Include', 'scripts': '{userbase}/Scripts', 'data' : '{userbase}', }, 'posix_user': { - 'stdlib': '{userbase}/lib/python{py_version_short}', - 'platstdlib': '{userbase}/lib/python{py_version_short}', - 'purelib': '{userbase}/lib/python{py_version_short}/site-packages', - 'platlib': '{userbase}/lib/python{py_version_short}/site-packages', - 'include': '{userbase}/include/python{py_version_short}', + 'stdlib': '{userbase}/lib/{implementation_lower}{py_version_short}', + 'platstdlib': '{userbase}/lib/{implementation_lower}{py_version_short}', + 'purelib': '{userbase}/lib/{implementation_lower}{py_version_short}/site-packages', + 'platlib': '{userbase}/lib/{implementation_lower}{py_version_short}/site-packages', + 'include': '{userbase}/include/{implementation_lower}{py_version_short}', 'scripts': '{userbase}/bin', 'data' : '{userbase}', }, 'osx_framework_user': { - 'stdlib': '{userbase}/lib/python', - 'platstdlib': '{userbase}/lib/python', - 'purelib': '{userbase}/lib/python/site-packages', - 'platlib': '{userbase}/lib/python/site-packages', + 'stdlib': '{userbase}/lib/{implementation_lower}', + 'platstdlib': '{userbase}/lib/{implementation_lower}', + 'purelib': '{userbase}/lib/{implementation_lower}/site-packages', + 'platlib': '{userbase}/lib/{implementation_lower}/site-packages', 'include': '{userbase}/include', 'scripts': '{userbase}/bin', 'data' : '{userbase}', @@ -104,6 +104,11 @@ _CONFIG_VARS = None _USER_BASE = None +def _get_implementation(): + if '__pypy__' in sys.builtin_module_names: + return 'PyPy' + return 'Python' + def _safe_realpath(path): try: return realpath(path) @@ -475,6 +480,8 @@ _CONFIG_VARS['base'] = _PREFIX _CONFIG_VARS['platbase'] = _EXEC_PREFIX _CONFIG_VARS['projectbase'] = _PROJECT_BASE + _CONFIG_VARS['implementation'] = _get_implementation() + _CONFIG_VARS['implementation_lower'] = _get_implementation().lower() if os.name in ('nt', 'os2'): _init_non_posix(_CONFIG_VARS) @@ -644,6 +651,8 @@ _print_dict('Paths', get_paths()) print _print_dict('Variables', get_config_vars()) + print + _print_dict('User', get_paths('%s_user' % os.name)) if __name__ == '__main__': From noreply at buildbot.pypy.org Sat Aug 30 22:35:23 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 30 Aug 2014 22:35:23 +0200 (CEST) Subject: [pypy-commit] pypy py3k: store the ref to the owning object in CBuffer instead of the object itself for Message-ID: <20140830203523.1F91A1C09B2@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r73214:c4c79992f1f8 Date: 2014-08-30 13:31 -0700 http://bitbucket.org/pypy/pypy/changeset/c4c79992f1f8/ Log: store the ref to the owning object in CBuffer instead of the object itself for later Py_DecRef'ing (which acts upon the ref anyway). simplifies the destructor enough to workaround a del calls to much translation error diff --git a/pypy/module/cpyext/buffer.py b/pypy/module/cpyext/buffer.py --- a/pypy/module/cpyext/buffer.py +++ b/pypy/module/cpyext/buffer.py @@ -1,6 +1,5 @@ from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import buffer -from rpython.rlib.objectmodel import import_from_mixin from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, Py_buffer) from pypy.module.cpyext.pyobject import PyObject, Py_DecRef @@ -13,17 +12,18 @@ # PyPy only supports contiguous Py_buffers for now. return 1 -class CBufferMixin(object): +class CBuffer(buffer.Buffer): - def __init__(self, space, c_buf, c_len, w_obj): + _immutable_ = True + + def __init__(self, space, c_buf, c_len, c_obj): self.space = space self.c_buf = c_buf self.c_len = c_len - self.w_obj = w_obj + self.c_obj = c_obj - def destructor(self): - assert isinstance(self, CBufferMixin) - Py_DecRef(self.space, self.w_obj) + def __del__(self): + Py_DecRef(self.space, self.c_obj) def getlength(self): return self.c_len @@ -34,10 +34,3 @@ def as_str(self): return rffi.charpsize2str(rffi.cast(rffi.CCHARP, self.c_buf), self.c_len) - -class CBuffer(buffer.Buffer): - import_from_mixin(CBufferMixin) - _immutable_ = True - - def __del__(self): - CBufferMixin.destructor(self) diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -20,6 +20,5 @@ raise oefmt(space.w_ValueError, "cannot make memory view from a buffer with a NULL data " "pointer") - w_obj = from_ref(space, view.c_obj) - buf = CBuffer(space, view.c_buf, view.c_len, w_obj) + buf = CBuffer(space, view.c_buf, view.c_len, view.c_obj) return space.wrap(W_MemoryView(buf)) From noreply at buildbot.pypy.org Sat Aug 30 23:37:40 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 30 Aug 2014 23:37:40 +0200 (CEST) Subject: [pypy-commit] pypy default: updates for 2.4 release Message-ID: <20140830213740.BC9B71D39A3@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r73215:d347935839d4 Date: 2014-08-31 00:36 +0300 http://bitbucket.org/pypy/pypy/changeset/d347935839d4/ Log: updates for 2.4 release diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -35,280 +35,290 @@ the beginning of each file) the files in the 'pypy' directory are each copyrighted by one or more of the following people and organizations: - Armin Rigo - Maciej Fijalkowski - Carl Friedrich Bolz - Antonio Cuni - Amaury Forgeot d'Arc - Samuele Pedroni - Alex Gaynor - Michael Hudson - David Schneider - Matti Picus - Brian Kearns - Philip Jenvey - Holger Krekel - Christian Tismer - Hakan Ardo - Benjamin Peterson - Manuel Jacob - Anders Chrigstrom - Eric van Riet Paap - Wim Lavrijsen - Ronan Lamy - Richard Emslie - Alexander Schremmer - Dan Villiom Podlaski Christiansen - Lukas Diekmann - Sven Hager - Anders Lehmann - Aurelien Campeas - Niklaus Haldimann - Camillo Bruni - Laura Creighton - Toon Verwaest - Remi Meier - Leonardo Santagada - Seo Sanghyeon - Romain Guillebert - Justin Peel - Ronny Pfannschmidt - David Edelsohn - Anders Hammarquist - Jakub Gustak - Guido Wesdorp - Lawrence Oluyede - Bartosz Skowron - Daniel Roberts - Niko Matsakis - Adrien Di Mascio - Alexander Hesse - Ludovic Aubry - Jacob Hallen - Jason Creighton - Alex Martelli - Michal Bendowski - Jan de Mooij - stian - Michael Foord - Stephan Diehl - Stefan Schwarzer - Valentino Volonghi - Tomek Meka - Patrick Maupin - Bob Ippolito - Bruno Gola - Jean-Paul Calderone - Timo Paulssen - Squeaky - Alexandre Fayolle - Simon Burton - Marius Gedminas - John Witulski - Konstantin Lopuhin - Greg Price - Dario Bertini - Mark Pearse - Simon Cross - Andreas Stührk - Jean-Philippe St. Pierre - Guido van Rossum - Pavel Vinogradov - Paweł Piotr Przeradowski - Paul deGrandis - Ilya Osadchiy - Tobias Oberstein - Adrian Kuhn - Boris Feigin - Stefano Rivera - tav - Taavi Burns - Georg Brandl - Bert Freudenberg - Stian Andreassen - Laurence Tratt - Wanja Saatkamp - Ivan Sichmann Freitas - Gerald Klix - Mike Blume - Oscar Nierstrasz - Stefan H. Muller - Jeremy Thurgood - Gregor Wegberg - Rami Chowdhury - Tobias Pape - Edd Barrett - David Malcolm - Eugene Oden - Henry Mason - Preston Timmons - Jeff Terrace - David Ripton - Dusty Phillips - Lukas Renggli - Guenter Jantzen - Ned Batchelder - Amit Regmi - Ben Young - Nicolas Chauvat - Andrew Durdin - Andrew Chambers - Michael Schneider - Nicholas Riley - Jason Chu - Igor Trindade Oliveira - Rocco Moretti - Gintautas Miliauskas - Michael Twomey - Lucian Branescu Mihaila - Tim Felgentreff - Tyler Wade - Gabriel Lavoie - Olivier Dormond - Jared Grubb - Karl Bartel - Brian Dorsey - Victor Stinner - Andrews Medina - Stuart Williams - Jasper Schulz - Christian Hudon - Toby Watson - Antoine Pitrou - Aaron Iles - Michael Cheng - Justas Sadzevicius - Mikael Schönenberg - Gasper Zejn - Neil Shepperd - Elmo Mäntynen - Jonathan David Riehl - Stanislaw Halik - Anders Qvist - Chirag Jadwani - Beatrice During - Alex Perry - Vincent Legoll - Alan McIntyre - Alexander Sedov - Corbin Simpson - Christopher Pope - wenzhuman - Christian Tismer - Marc Abramowitz - Dan Stromberg - Stefano Parmesan - Alexis Daboville - Jens-Uwe Mager - Carl Meyer - Karl Ramm - Pieter Zieschang - Gabriel - Lukas Vacek - Andrew Dalke - Sylvain Thenault - Nathan Taylor - Vladimir Kryachko - Jacek Generowicz - Alejandro J. Cura - Jacob Oscarson - Travis Francis Athougies - Ryan Gonzalez - Kristjan Valur Jonsson - Sebastian Pawluś - Neil Blakey-Milner - anatoly techtonik - Lutz Paelike - Lucio Torre - Lars Wassermann - Henrik Vendelbo - Dan Buch - Miguel de Val Borro - Artur Lisiecki - Sergey Kishchenko - Ignas Mikalajunas - Christoph Gerum - Martin Blais - Lene Wagner - Tomo Cocoa - roberto at goyle - Yury V. Zaytsev - Anna Katrina Dominguez - William Leslie - Bobby Impollonia - timo at eistee.fritz.box - Andrew Thompson - Ben Darnell - Roberto De Ioris - Juan Francisco Cantero Hurtado - Godefroid Chappelle - Joshua Gilbert - Dan Colish - Christopher Armstrong - Michael Hudson-Doyle - Anders Sigfridsson - Yasir Suhail - rafalgalczynski at gmail.com - Floris Bruynooghe - Laurens Van Houtven - Akira Li - Gustavo Niemeyer - Stephan Busemann - Rafał Gałczyński - Yusei Tahara - Christian Muirhead - James Lan - shoma hosaka - Daniel Neuh?user - Matthew Miller - Buck Golemon - Konrad Delong - Dinu Gherman - Chris Lambacher - coolbutuseless at gmail.com - Rodrigo Araújo - w31rd0 - Jim Baker - James Robert - Armin Ronacher - Brett Cannon - yrttyr - aliceinwire - OlivierBlanvillain - Zooko Wilcox-O Hearn - Tomer Chachamu - Christopher Groskopf - Asmo Soinio - Stefan Marr - jiaaro - opassembler.py - Antony Lee - Jim Hunziker - Markus Unterwaditzer - Even Wiik Thomassen - jbs - soareschen - Kurt Griffiths - Mike Bayer - Flavio Percoco - Kristoffer Kleine - yasirs - Michael Chermside - Anna Ravencroft - Julien Phalip - Dan Loewenherz + Armin Rigo + Maciej Fijalkowski + Carl Friedrich Bolz + Antonio Cuni + Amaury Forgeot d'Arc + Samuele Pedroni + Alex Gaynor + Michael Hudson + David Schneider + Matti Picus + Brian Kearns + Philip Jenvey + Holger Krekel + Christian Tismer + Hakan Ardo + Benjamin Peterson + Manuel Jacob + Anders Chrigstrom + Eric van Riet Paap + Ronan Lamy + Wim Lavrijsen + Richard Emslie + Alexander Schremmer + Dan Villiom Podlaski Christiansen + Lukas Diekmann + Sven Hager + Anders Lehmann + Aurelien Campeas + Niklaus Haldimann + Remi Meier + Camillo Bruni + Laura Creighton + Toon Verwaest + Leonardo Santagada + Seo Sanghyeon + Romain Guillebert + Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp + Lawrence Oluyede + Bartosz Skowron + Gregor Wegberg + Daniel Roberts + Niko Matsakis + Adrien Di Mascio + Alexander Hesse + Ludovic Aubry + Jacob Hallen + Jason Creighton + Alex Martelli + Michal Bendowski + Jan de Mooij + stian + Michael Foord + Stephan Diehl + Tyler Wade + Stefan Schwarzer + Valentino Volonghi + Tomek Meka + Patrick Maupin + Bob Ippolito + Bruno Gola + Jean-Paul Calderone + Timo Paulssen + Squeaky + Alexandre Fayolle + Simon Burton + Marius Gedminas + Martin Matusiak + Konstantin Lopuhin + John Witulski + Wenzhu Man + Greg Price + Dario Bertini + Mark Pearse + Simon Cross + Ivan Sichmann Freitas + Andreas Stührk + Jean-Philippe St. Pierre + Guido van Rossum + Pavel Vinogradov + Stefano Rivera + Paweł Piotr Przeradowski + Paul deGrandis + Ilya Osadchiy + Tobias Oberstein + Adrian Kuhn + Boris Feigin + tav + Taavi Burns + Georg Brandl + Laurence Tratt + Bert Freudenberg + Stian Andreassen + Wanja Saatkamp + Gerald Klix + Mike Blume + Oscar Nierstrasz + Stefan H. Muller + Edd Barrett + Jeremy Thurgood + Rami Chowdhury + Tobias Pape + David Malcolm + Eugene Oden + Henry Mason + Vasily Kuznetsov + Preston Timmons + Jeff Terrace + David Ripton + Dusty Phillips + Lukas Renggli + Guenter Jantzen + Ned Batchelder + Amit Regmi + Ben Young + Nicolas Chauvat + Andrew Durdin + Andrew Chambers + Michael Schneider + Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Tim Felgentreff + Rocco Moretti + Gintautas Miliauskas + Michael Twomey + Lucian Branescu Mihaila + Gabriel Lavoie + Olivier Dormond + Jared Grubb + Karl Bartel + Brian Dorsey + Victor Stinner + Andrews Medina + Stuart Williams + Jasper Schulz + Christian Hudon + Toby Watson + Antoine Pitrou + Aaron Iles + Michael Cheng + Justas Sadzevicius + Gasper Zejn + anatoly techtonik + Neil Shepperd + Mikael Schönenberg + Elmo M?ntynen + Jonathan David Riehl + Stanislaw Halik + Anders Qvist + Corbin Simpson + Chirag Jadwani + Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre + Alexander Sedov + Christopher Pope + Christian Tismer + Marc Abramowitz + Dan Stromberg + Stefano Parmesan + Alexis Daboville + Jens-Uwe Mager + Carl Meyer + Karl Ramm + Pieter Zieschang + Sebastian Pawluś + Gabriel + Lukas Vacek + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Arjun Naik + Attila Gobi + Jacek Generowicz + Alejandro J. Cura + Jacob Oscarson + Travis Francis Athougies + Ryan Gonzalez + Ian Foote + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann + Valentina Mukhamedzhanova + Henrik Vendelbo + Dan Buch + Miguel de Val Borro + Artur Lisiecki + Sergey Kishchenko + Yichao Yu + Ignas Mikalajunas + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + roberto at goyle + Yury V. Zaytsev + Anna Katrina Dominguez + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Ben Darnell + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle + Joshua Gilbert + Dan Colish + Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Jason Michalski + rafalgalczynski at gmail.com + Floris Bruynooghe + Laurens Van Houtven + Akira Li + Gustavo Niemeyer + Stephan Busemann + Rafał Gałczyński + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuh?user + Matthew Miller + Buck Golemon + Konrad Delong + Dinu Gherman + Chris Lambacher + coolbutuseless at gmail.com + Rodrigo Araújo + Jim Baker + James Robert + Armin Ronacher + Brett Cannon + yrttyr + aliceinwire + OlivierBlanvillain + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + Asmo Soinio + Stefan Marr + jiaaro + Mads Kiilerich + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Kurt Griffiths + Mike Bayer + Matthew Miller + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Dan Crosta + Julien Phalip + Dan Loewenherz - Heinrich-Heine University, Germany - Open End AB (formerly AB Strakt), Sweden - merlinux GmbH, Germany - tismerysoft GmbH, Germany - Logilab Paris, France - DFKI GmbH, Germany - Impara, Germany - Change Maker, Sweden - University of California Berkeley, USA - Google Inc. - King's College London + Heinrich-Heine University, Germany + Open End AB (formerly AB Strakt), Sweden + merlinux GmbH, Germany + tismerysoft GmbH, Germany + Logilab Paris, France + DFKI GmbH, Germany + Impara, Germany + Change Maker, Sweden + University of California Berkeley, USA + Google Inc. + King's College London The PyPy Logo as used by http://speed.pypy.org and others was created by Samuel Reis and is distributed on terms of Creative Commons Share Alike diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -80,7 +80,7 @@ Martin Matusiak Konstantin Lopuhin John Witulski - wenzhu man + Wenzhu Man Greg Price Dario Bertini Mark Pearse diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py --- a/pypy/doc/tool/makecontributor.py +++ b/pypy/doc/tool/makecontributor.py @@ -68,7 +68,7 @@ 'Manuel Jacob': ['mjacob'], 'Rami Chowdhury': ['necaris'], 'Stanislaw Halik':['w31rd0'], - 'wenzhu man':['wenzhuman'], + 'Wenzhu Man':['wenzhu man', 'wenzhuman'], } alias_map = {} From noreply at buildbot.pypy.org Sun Aug 31 08:59:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 08:59:35 +0200 (CEST) Subject: [pypy-commit] pypy default: Document the JitCounter and BaseJitCell classes. Message-ID: <20140831065935.6189A1C31B7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73216:cd1ea9c54b1b Date: 2014-08-31 08:59 +0200 http://bitbucket.org/pypy/pypy/changeset/cd1ea9c54b1b/ Log: Document the JitCounter and BaseJitCell classes. These two classes have some usage patterns and invariants that are worth writing down precisely. diff --git a/rpython/jit/metainterp/counter.py b/rpython/jit/metainterp/counter.py --- a/rpython/jit/metainterp/counter.py +++ b/rpython/jit/metainterp/counter.py @@ -7,13 +7,78 @@ assert r_uint32.BITS == 32 UINT32MAX = 2 ** 32 - 1 -# keep in sync with the C code in pypy__decay_jit_counters +# keep in sync with the C code in pypy__decay_jit_counters below ENTRY = lltype.Struct('timetable_entry', ('times', lltype.FixedSizeArray(rffi.FLOAT, 5)), ('subhashes', lltype.FixedSizeArray(rffi.USHORT, 5))) class JitCounter: + """A process translated with the JIT contains one prebuilt instance + of this class. It is used for three things: + + * It maps greenkey hashes to counters, to know when we have seen this + greenkey enough to reach the 'threshold' or 'function_threshold' + parameters. This is done in a lossy way by a fixed-size 'timetable'. + + * It handles the counters on the failing guards, for 'trace_eagerness'. + This is done in the same 'timetable'. + + * It records the JitCell objects that are created when we compile + a loop, in a non-lossy dictionary-like strurcture. This is done + in the 'celltable'. + + The 'timetable' is a table of DEFAULT_SIZE entries, each of which + containing 5 entries. From a hash value, we use the index number + '_get_index(hash)', and then we look in all five entries for a + matching '_get_subhash(hash)'. The five entries are roughly kept + sorted by decreasing recorded time. The hash value itself should be + computed accordingly: we only use bits 21:32 for _get_index and + bits 0:16 for _get_subhash. (This organization is "probably good" + to get not-too-random behavior; another motivation for it was for + the STM branch, to avoid pointless conflicts between threads.) + + The time value stored in the timetable is a (short-precision) + floating-point number. The idea is that a value of 0.0 means + absent, and values go up to the maximum of 1.0. + + 'compute_threshold(threshold)' returns basically the fraction + 1.0/threshold, corresponding to the 'increment' value for the + following APIs. + + 'tick(hash, increment)' adds 'increment' to the time value stored + with the 'hash'. Remember that only bits 0:16,21:32 of the hash + are used; in case of collision between two hashes, they will grow + twice as fast, because each tick() call will contribute to the + colliding time value. + + 'fetch_next_hash()' returns a "random" hash value suitable for + using in tick() later. Used when compiling guards; when the + guard actually fails, we'll tick() the guard's stored random hash. + + 'reset(hash)', 'change_current_fraction(hash, new_time_value)' + change the time value associated with a hash. The former resets + it to zero, and the latter changes it to the given value (which + should be a value close to 1.0). + + 'set_decay(decay)', 'decay_all_counters()' is used to globally + reduce all the stored time values. They all get multiplied by + a fraction close to (but smaller than) 1.0, computed from the + 'decay' parameter. + + 'install_new_cell(hash, newcell)' adds the new JitCell to the + celltable, at the index given by 'hash' (bits 21:32). Unlike + the timetable, the celltable stores a linked list of JitCells + for every entry, and so it is not lossy. + + 'lookup_chain(hash)' returns the first JitCell at 'hash'. You can + then walk the chain by following the '.next' attributes until you + reach None. + + 'cleanup_chain(hash)' resets the timetable's 'hash' entry and + cleans up the celltable at 'hash'. It removes those JitCells + for which 'cell.should_remove_jitcell()' returns True. + """ DEFAULT_SIZE = 2048 def __init__(self, size=DEFAULT_SIZE, translator=None): diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -129,6 +129,49 @@ JC_TEMPORARY = 0x04 class BaseJitCell(object): + """Subclasses of BaseJitCell are used in tandem with the single + JitCounter instance to record places in the JIT-tracked user program + where something particular occurs with the JIT. For some + 'greenkeys' (e.g. Python bytecode position), we create one instance + of JitCell and attach it to that greenkey. This is implemented + with jitcounter.install_new_cell(), but conceptually you can think + about JitCode instances as attached to some locations of the + app-level Python code. + + We create subclasses of BaseJitCell --one per jitdriver-- so that + they can store greenkeys of different types. + + Note that we don't create a JitCell the first time we see a given + greenkey position in the interpreter. At first, we only hash the + greenkey and use that in the JitCounter to record the number of + times we have seen it. We only create a JitCell when the + JitCounter's total time value reaches 1.0 and we are starting to + JIT-compile. + + A JitCell has a 'wref_procedure_token' that is non-None when we + actually have a compiled procedure for that greenkey. (It is a + weakref, so that it could later be freed; in this case the JitCell + will likely be reclaimed a bit later by 'should_remove_jitcell()'.) + + There are other less-common cases where we also create a JitCell: to + record some long-term flags about the greenkey. In general, a + JitCell can have any combination of the following flags set: + + JC_TRACING: we are now tracing the loop from this greenkey. + We'll likely end up with a wref_procedure_token, soonish. + + JC_TEMPORARY: a "temporary" wref_procedure_token. + It's the procedure_token of a dummy loop that simply calls + back the interpreter. Used for a CALL_ASSEMBLER where the + target was not compiled yet. In this situation we are still + ticking the JitCounter for the same hash, until we reach the + threshold and start tracing the loop in earnest. + + JC_DONT_TRACE_HERE: when tracing, don't inline calls to + this particular function. (We only set this flag when aborting + due to a trace too long, so we use the same flag as a hint to + also mean "please trace from here as soon as possible".) + """ flags = 0 # JC_xxx flags wref_procedure_token = None next = None From noreply at buildbot.pypy.org Sun Aug 31 10:12:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 10:12:09 +0200 (CEST) Subject: [pypy-commit] pypy default: Another attempt at fixing Windows Message-ID: <20140831081209.0E5F91C06D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73217:f51abb1c0a6c Date: 2014-08-31 10:09 +0200 http://bitbucket.org/pypy/pypy/changeset/f51abb1c0a6c/ Log: Another attempt at fixing Windows diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -295,19 +295,15 @@ # Dynamically accept both stdcall and cdecl functions. # We could try to detect from pyjitpl which calling # convention this particular function takes, which would - # avoid these two extra MOVs... but later. The ebp register - # is unused here: it will be reloaded from the shadowstack. - # (This doesn't work during testing, though. Hack hack hack.) - save_ebp = not self.asm.cpu.gc_ll_descr.is_shadow_stack() - ofs = WORD * (FRAME_FIXED_SIZE - 1) - if save_ebp: # only for testing (or with Boehm) - ofs -= self.current_esp - self.mc.MOV_sr(ofs, ebp.value) - self.mc.MOV(ebp, esp) + # avoid these two extra MOVs... but later. Pick any + # caller-saved register here except ebx (used for shadowstack). + if IS_X86_32: + free_caller_save_reg = edi + else: + free_caller_save_reg = r14 + self.mc.MOV(free_caller_save_reg, esp) self.mc.CALL(self.fnloc) - self.mc.MOV(esp, ebp) - if save_ebp: # only for testing (or with Boehm) - self.mc.MOV_rs(ebp.value, ofs) + self.mc.MOV(esp, free_caller_save_reg) else: self.mc.CALL(self.fnloc) if self.callconv != FFI_DEFAULT_ABI: From noreply at buildbot.pypy.org Sun Aug 31 10:24:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 10:24:23 +0200 (CEST) Subject: [pypy-commit] pypy default: Don't use "sys.maxint // 4" but "sys.maxint // (size of unichar)". Message-ID: <20140831082423.436081C31B7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73218:16f41f8d766c Date: 2014-08-31 10:22 +0200 http://bitbucket.org/pypy/pypy/changeset/16f41f8d766c/ Log: Don't use "sys.maxint // 4" but "sys.maxint // (size of unichar)". diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -3728,7 +3728,9 @@ looptoken = JitCellToken() self.cpu.compile_loop(inputargs, operations, looptoken) # overflowing value: - deadframe = self.cpu.execute_token(looptoken, sys.maxint // 4 + 1) + unisize = self.cpu.gc_ll_descr.unicode_descr.itemsize + assert unisize in (2, 4) + deadframe = self.cpu.execute_token(looptoken, sys.maxint // unisize + 1) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == excdescr.identifier exc = self.cpu.grab_exc_value(deadframe) From noreply at buildbot.pypy.org Sun Aug 31 10:34:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 10:34:38 +0200 (CEST) Subject: [pypy-commit] pypy default: Also remove Unix-only modules from "translation_modules". Message-ID: <20140831083438.7B84A1C31B7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73219:ba3f691fea4d Date: 2014-08-31 10:33 +0200 http://bitbucket.org/pypy/pypy/changeset/ba3f691fea4d/ Log: Also remove Unix-only modules from "translation_modules". diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -52,11 +52,10 @@ if sys.platform == "win32": working_modules.add("_winreg") # unix only modules - working_modules.remove("crypt") - working_modules.remove("fcntl") - working_modules.remove("pwd") - working_modules.remove("termios") - working_modules.remove("_minimal_curses") + for name in ["crypt", "fcntl", "pwd", "termios", "_minimal_curses"]: + working_modules.remove(name) + if name in translation_modules: + translation_modules.remove(name) if "cppyy" in working_modules: working_modules.remove("cppyy") # not tested on win32 From noreply at buildbot.pypy.org Sun Aug 31 11:07:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 11:07:28 +0200 (CEST) Subject: [pypy-commit] pypy default: Remove all these functions: they are not tested, not used by PyPy, Message-ID: <20140831090728.EC8691C34DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73220:cbb94f1985b6 Date: 2014-08-31 10:49 +0200 http://bitbucket.org/pypy/pypy/changeset/cbb94f1985b6/ Log: Remove all these functions: they are not tested, not used by PyPy, and not working correctly on Windows. diff --git a/rpython/rlib/rpath.py b/rpython/rlib/rpath.py --- a/rpython/rlib/rpath.py +++ b/rpython/rlib/rpath.py @@ -23,103 +23,3 @@ return path else: raise ImportError('Unsupported os: %s' % os.name) - - -def dirname(p): - """Returns the directory component of a pathname""" - i = p.rfind('/') + 1 - assert i >= 0 - head = p[:i] - if head and head != '/' * len(head): - head = head.rstrip('/') - return head - - -def basename(p): - """Returns the final component of a pathname""" - i = p.rfind('/') + 1 - assert i >= 0 - return p[i:] - - -def split(p): - """Split a pathname. Returns tuple "(head, tail)" where "tail" is - everything after the final slash. Either part may be empty.""" - i = p.rfind('/') + 1 - assert i >= 0 - head, tail = p[:i], p[i:] - if head and head != '/' * len(head): - head = head.rstrip('/') - return head, tail - - -def exists(path): - """Test whether a path exists. Returns False for broken symbolic links""" - try: - assert path is not None - os.stat(path) - except os.error: - return False - return True - - -import os -from os.path import isabs, islink, abspath, normpath - -def join(a, p): - """Join two or more pathname components, inserting '/' as needed. - If any component is an absolute path, all previous path components - will be discarded. An empty last part will result in a path that - ends with a separator.""" - path = a - for b in p: - if b.startswith('/'): - path = b - elif path == '' or path.endswith('/'): - path += b - else: - path += '/' + b - return path - -def realpath(filename): - """Return the canonical path of the specified filename, eliminating any -symbolic links encountered in the path.""" - if isabs(filename): - bits = ['/'] + filename.split('/')[1:] - else: - bits = [''] + filename.split('/') - - for i in range(2, len(bits)+1): - component = join(bits[0], bits[1:i]) - # Resolve symbolic links. - if islink(component): - resolved = _resolve_link(component) - if resolved is None: - # Infinite loop -- return original component + rest of the path - return abspath(join(component, bits[i:])) - else: - newpath = join(resolved, bits[i:]) - return realpath(newpath) - - return abspath(filename) - - -def _resolve_link(path): - """Internal helper function. Takes a path and follows symlinks - until we either arrive at something that isn't a symlink, or - encounter a path we've seen before (meaning that there's a loop). - """ - paths_seen = {} - while islink(path): - if path in paths_seen: - # Already seen this path, so we must have a symlink loop - return None - paths_seen[path] = None - # Resolve where the link points to - resolved = os.readlink(path) - if not isabs(resolved): - dir = dirname(path) - path = normpath(join(dir, [resolved])) - else: - path = normpath(resolved) - return path From noreply at buildbot.pypy.org Sun Aug 31 12:41:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 12:41:07 +0200 (CEST) Subject: [pypy-commit] pypy default: Extend rpython.rlib.rpath to include the following functions: Message-ID: <20140831104107.541CE1C06D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73221:6bb6900ae05e Date: 2014-08-31 12:18 +0200 http://bitbucket.org/pypy/pypy/changeset/6bb6900ae05e/ Log: Extend rpython.rlib.rpath to include the following functions: risabs rabspath rsplitdrive rjoin diff --git a/rpython/rlib/rpath.py b/rpython/rlib/rpath.py --- a/rpython/rlib/rpath.py +++ b/rpython/rlib/rpath.py @@ -2,24 +2,122 @@ Minimal (and limited) RPython version of some functions contained in os.path. """ -import os.path +import os from rpython.rlib import rposix -if os.name == 'posix': - # the posix version is already RPython, just use it - # (but catch exceptions) - def rabspath(path): - try: - return os.path.abspath(path) - except OSError: - return path -elif os.name == 'nt': - def rabspath(path): + +def _posix_risabs(s): + """Test whether a path is absolute""" + return s.startswith('/') + +def _posix_rabspath(path): + """Return an absolute, **non-normalized** path. + **This version does not let exceptions propagate.**""" + try: + if not _posix_risabs(path): + cwd = os.getcwd() + path = _posix_rjoin(cwd, path) + return path + except OSError: + return path + +def _posix_rjoin(a, b): + """Join two pathname components, inserting '/' as needed. + If the second component is an absolute path, the first one + will be discarded. An empty last part will result in a path that + ends with a separator.""" + path = a + if b.startswith('/'): + path = b + elif path == '' or path.endswith('/'): + path += b + else: + path += '/' + b + return path + + +def _nt_risabs(s): + """Test whether a path is absolute""" + s = _nt_rsplitdrive(s)[1] + return s.startswith('/') or s.startswith('\\') + +def _nt_rabspath(path): + try: if path == '': path = os.getcwd() - try: - return rposix._getfullpathname(path) - except OSError: - return path + return rposix._getfullpathname(path) + except OSError: + return path + +def _nt_rsplitdrive(p): + """Split a pathname into drive/UNC sharepoint and relative path + specifiers. + Returns a 2-tuple (drive_or_unc, path); either part may be empty. + """ + if len(p) > 1: + normp = p.replace(altsep, sep) + if normp.startswith('\\\\') and not normp.startswith('\\\\\\'): + # is a UNC path: + # vvvvvvvvvvvvvvvvvvvv drive letter or UNC path + # \\machine\mountpoint\directory\etc\... + # directory ^^^^^^^^^^^^^^^ + index = normp.find('\\', 2) + if index < 0: + return '', p + index2 = normp.find('\\', index + 1) + # a UNC path can't have two slashes in a row + # (after the initial two) + if index2 == index + 1: + return '', p + if index2 < 0: + index2 = len(p) + return p[:index2], p[index2:] + if normp[1] == ':': + return p[:2], p[2:] + return '', p + +def _nt_rjoin(path, p): + """Join two or more pathname components, inserting "\\" as needed.""" + result_drive, result_path = _nt_rsplitdrive(path) + p_drive, p_path = _nt_rsplitdrive(p) + p_is_rel = True + if p_path and p_path[0] in '\\/': + # Second path is absolute + if p_drive or not result_drive: + result_drive = p_drive + result_path = p_path + p_is_rel = False + elif p_drive and p_drive != result_drive: + if p_drive.lower() != result_drive.lower(): + # Different drives => ignore the first path entirely + result_drive = p_drive + result_path = p_path + p_is_rel = False + else: + # Same drive in different case + result_drive = p_drive + if p_is_rel: + # Second path is relative to the first + if result_path and result_path[-1] not in '\\/': + result_path = result_path + '\\' + result_path = result_path + p_path + ## add separator between UNC and non-absolute path + if (result_path and result_path[0] not in '\\/' and + result_drive and result_drive[-1] != ':'): + return result_drive + '\\' + result_path + return result_drive + result_path + + +if os.name == 'posix': + sep = altsep = '/' + risabs = _posix_risabs + rabspath = _posix_rabspath + rjoin = _posix_rjoin +elif os.name == 'nt': + sep, altsep = '\\', '/' + risabs = _nt_risabs + rabspath = _nt_rabspath + rsplitdrive = _nt_rsplitdrive + rjoin = _nt_rjoin else: raise ImportError('Unsupported os: %s' % os.name) diff --git a/rpython/rlib/test/test_rpath.py b/rpython/rlib/test/test_rpath.py --- a/rpython/rlib/test/test_rpath.py +++ b/rpython/rlib/test/test_rpath.py @@ -2,17 +2,14 @@ import os from rpython.rlib import rpath -IS_WINDOWS = os.name == 'nt' - def test_rabspath_relative(tmpdir): tmpdir.chdir() assert rpath.rabspath('foo') == os.path.realpath(str(tmpdir.join('foo'))) - at py.test.mark.skipif("IS_WINDOWS") def test_rabspath_absolute_posix(): - assert rpath.rabspath('/foo') == '/foo' + assert rpath._posix_rabspath('/foo') == '/foo' - at py.test.mark.skipif("IS_WINDOWS") + at py.test.mark.skipif("os.name == 'nt'") def test_missing_current_dir(tmpdir): tmpdir1 = str(tmpdir) + '/temporary_removed' curdir1 = os.getcwd() @@ -25,7 +22,25 @@ os.chdir(curdir1) assert result == '.' - at py.test.mark.skipif("not IS_WINDOWS") +def test_rsplitdrive_nt(): + assert rpath._nt_rsplitdrive('D:\\FOO/BAR') == ('D:', '\\FOO/BAR') + assert rpath._nt_rsplitdrive('//') == ('', '//') + + at py.test.mark.skipif("os.name != 'nt'") def test_rabspath_absolute_nt(): - curdrive, _ = os.path.splitdrive(os.getcwd()) + curdrive = _ = rpath._nt_rsplitdrive(os.getcwd()) + assert len(curdrive) == 2 and curdrive[1] == ':' assert rpath.rabspath('\\foo') == '%s\\foo' % curdrive + +def test_risabs_posix(): + assert rpath._posix_risabs('/foo/bar') + assert not rpath._posix_risabs('foo/bar') + assert not rpath._posix_risabs('\\foo\\bar') + assert not rpath._posix_risabs('C:\\foo\\bar') + +def test_risabs_nt(): + assert rpath._nt_risabs('/foo/bar') + assert not rpath._nt_risabs('foo/bar') + assert rpath._nt_risabs('\\foo\\bar') + assert rpath._nt_risabs('C:\\FOO') + assert not rpath._nt_risabs('C:FOO') From noreply at buildbot.pypy.org Sun Aug 31 12:41:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 12:41:08 +0200 (CEST) Subject: [pypy-commit] pypy default: Accept a plain string and don't try to decode it to unicode and then Message-ID: <20140831104108.6DFDB1C06D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73222:0867fc01e680 Date: 2014-08-31 12:33 +0200 http://bitbucket.org/pypy/pypy/changeset/0867fc01e680/ Log: Accept a plain string and don't try to decode it to unicode and then re-encode it to utf-8. It may not work. diff --git a/py/_code/code.py b/py/_code/code.py --- a/py/_code/code.py +++ b/py/_code/code.py @@ -588,7 +588,7 @@ class TerminalRepr: def __str__(self): s = self.__unicode__() - if sys.version_info[0] < 3: + if sys.version_info[0] < 3 and isinstance(s, unicode): s = s.encode('utf-8') return s From noreply at buildbot.pypy.org Sun Aug 31 12:41:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 12:41:09 +0200 (CEST) Subject: [pypy-commit] pypy default: In RPython, this adds the ability to automatically redirect some Message-ID: <20140831104109.97CFA1C06D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73223:3c3f30ffed49 Date: 2014-08-31 12:40 +0200 http://bitbucket.org/pypy/pypy/changeset/3c3f30ffed49/ Log: In RPython, this adds the ability to automatically redirect some functions from os.path to our own functions in rpython.rlib.rpath. diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -14,6 +14,16 @@ SPECIAL_CASES[func] = sc_func return decorate +def redirect_function(srcfunc, dstfuncname): + @register_flow_sc(srcfunc) + def sc_redirected_function(ctx, *args_w): + components = dstfuncname.split('.') + obj = __import__('.'.join(components[:-1])) + for name in components[1:]: + obj = getattr(obj, name) + return ctx.appcall(obj, *args_w) + + @register_flow_sc(__import__) def sc_import(ctx, *args_w): assert all(isinstance(arg, Constant) for arg in args_w) @@ -44,34 +54,23 @@ from rpython.flowspace.operation import op return op.getattr(w_obj, w_index).eval(ctx) - at register_flow_sc(open) -def sc_open(ctx, *args_w): - from rpython.rlib.rfile import create_file - return ctx.appcall(create_file, *args_w) +# _________________________________________________________________________ - at register_flow_sc(os.fdopen) -def sc_os_fdopen(ctx, *args_w): - from rpython.rlib.rfile import create_fdopen_rfile - return ctx.appcall(create_fdopen_rfile, *args_w) +redirect_function(open, 'rpython.rlib.rfile.create_file') +redirect_function(os.fdopen, 'rpython.rlib.rfile.create_fdopen_rfile') +redirect_function(os.tmpfile, 'rpython.rlib.rfile.create_temp_rfile') - at register_flow_sc(os.tmpfile) -def sc_os_tmpfile(ctx): - from rpython.rlib.rfile import create_temp_rfile - return ctx.appcall(create_temp_rfile) +# on top of PyPy only: 'os.remove != os.unlink' +# (on CPython they are '==', but not identical either) +redirect_function(os.remove, 'os.unlink') - at register_flow_sc(os.remove) -def sc_os_remove(ctx, *args_w): - # on top of PyPy only: 'os.remove != os.unlink' - # (on CPython they are '==', but not identical either) - return ctx.appcall(os.unlink, *args_w) +redirect_function(os.path.isdir, 'rpython.rlib.rpath.risdir') +redirect_function(os.path.isabs, 'rpython.rlib.rpath.risabs') +redirect_function(os.path.abspath, 'rpython.rlib.rpath.rabspath') +redirect_function(os.path.join, 'rpython.rlib.rpath.rjoin') +if hasattr(os.path, 'splitdrive'): + redirect_function(os.path.splitdrive, 'rpython.rlib.rpath.rsplitdrive') -if os.name == 'nt': - @register_flow_sc(os.path.isdir) - def sc_os_path_isdir(ctx, *args_w): - # Cpython win32 reroutes os.path.isdir to nt._isdir - # which is not rpython - import genericpath - return ctx.appcall(genericpath.isdir, *args_w) # _________________________________________________________________________ # a simplified version of the basic printing routines, for RPython programs class StdOutBuffer: diff --git a/rpython/flowspace/test/test_objspace.py b/rpython/flowspace/test/test_objspace.py --- a/rpython/flowspace/test/test_objspace.py +++ b/rpython/flowspace/test/test_objspace.py @@ -1265,6 +1265,18 @@ assert ops[1].opname == 'simple_call' assert ops[1].args[0].value is os.unlink + def test_rabspath(self): + import os.path + def f(s): + return os.path.abspath(s) + graph = self.codetest(f) + simplify_graph(graph) + ops = graph.startblock.operations + assert ops[0].opname == 'simple_call' + # + from rpython.rlib import rpath + assert ops[0].args[0].value is rpath.rabspath + def test_constfold_in(self): def f(): if 'x' in "xyz": diff --git a/rpython/rlib/rpath.py b/rpython/rlib/rpath.py --- a/rpython/rlib/rpath.py +++ b/rpython/rlib/rpath.py @@ -2,10 +2,29 @@ Minimal (and limited) RPython version of some functions contained in os.path. """ -import os +import os, stat from rpython.rlib import rposix +# ____________________________________________________________ +# +# Generic implementations in RPython for both POSIX and NT +# + +def risdir(s): + """Return true if the pathname refers to an existing directory.""" + try: + st = os.stat(s) + except OSError: + return False + return stat.S_ISDIR(st.st_mode) + + +# ____________________________________________________________ +# +# POSIX-only implementations +# + def _posix_risabs(s): """Test whether a path is absolute""" return s.startswith('/') @@ -36,6 +55,11 @@ return path +# ____________________________________________________________ +# +# NT-only implementations +# + def _nt_risabs(s): """Test whether a path is absolute""" s = _nt_rsplitdrive(s)[1] @@ -108,6 +132,9 @@ return result_drive + result_path +# ____________________________________________________________ + + if os.name == 'posix': sep = altsep = '/' risabs = _posix_risabs diff --git a/rpython/rlib/test/test_rpath.py b/rpython/rlib/test/test_rpath.py --- a/rpython/rlib/test/test_rpath.py +++ b/rpython/rlib/test/test_rpath.py @@ -44,3 +44,8 @@ assert rpath._nt_risabs('\\foo\\bar') assert rpath._nt_risabs('C:\\FOO') assert not rpath._nt_risabs('C:FOO') + +def test_risdir(tmpdir): + assert rpath.risdir(tmpdir) + assert not rpath.risdir('_some_non_existant_file_') + assert not rpath.risdir(os.path.join(tmpdir, '_some_non_existant_file_')) From noreply at buildbot.pypy.org Sun Aug 31 13:20:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 13:20:23 +0200 (CEST) Subject: [pypy-commit] pypy default: Skip these three tests, which occasionally fail Message-ID: <20140831112023.B3C5F1C06D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73224:94bbfc345092 Date: 2014-08-31 13:20 +0200 http://bitbucket.org/pypy/pypy/changeset/94bbfc345092/ Log: Skip these three tests, which occasionally fail diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -520,6 +520,7 @@ _test_cond_include('AF_NETLINK') def test_thread_safe_gethostbyaddr(): + py.test.skip("hits non-thread-safe issues with ll2ctypes") import threading nthreads = 10 ip = '8.8.8.8' @@ -539,6 +540,7 @@ assert sum(result) == nthreads def test_thread_safe_gethostbyname_ex(): + py.test.skip("hits non-thread-safe issues with ll2ctypes") import threading nthreads = 10 domain = 'google.com' @@ -557,6 +559,7 @@ assert sum(result) == nthreads def test_getaddrinfo_pydotorg_threadsafe(): + py.test.skip("hits non-thread-safe issues with ll2ctypes") import threading nthreads = 10 result = [0] * nthreads From noreply at buildbot.pypy.org Sun Aug 31 13:59:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 13:59:07 +0200 (CEST) Subject: [pypy-commit] pypy default: Add os.path.normpath(), and make it called automatically from _posix_abspath(). Message-ID: <20140831115907.8C0671C1036@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73225:5a0be08136b5 Date: 2014-08-31 13:58 +0200 http://bitbucket.org/pypy/pypy/changeset/5a0be08136b5/ Log: Add os.path.normpath(), and make it called automatically from _posix_abspath(). diff --git a/rpython/flowspace/specialcase.py b/rpython/flowspace/specialcase.py --- a/rpython/flowspace/specialcase.py +++ b/rpython/flowspace/specialcase.py @@ -66,6 +66,7 @@ redirect_function(os.path.isdir, 'rpython.rlib.rpath.risdir') redirect_function(os.path.isabs, 'rpython.rlib.rpath.risabs') +redirect_function(os.path.normpath,'rpython.rlib.rpath.rnormpath') redirect_function(os.path.abspath, 'rpython.rlib.rpath.rabspath') redirect_function(os.path.join, 'rpython.rlib.rpath.rjoin') if hasattr(os.path, 'splitdrive'): diff --git a/rpython/rlib/rpath.py b/rpython/rlib/rpath.py --- a/rpython/rlib/rpath.py +++ b/rpython/rlib/rpath.py @@ -29,6 +29,33 @@ """Test whether a path is absolute""" return s.startswith('/') +def _posix_rnormpath(path): + """Normalize path, eliminating double slashes, etc.""" + slash, dot = '/', '.' + if path == '': + return dot + initial_slashes = path.startswith('/') + # POSIX allows one or two initial slashes, but treats three or more + # as single slash. + if (initial_slashes and + path.startswith('//') and not path.startswith('///')): + initial_slashes = 2 + comps = path.split('/') + new_comps = [] + for comp in comps: + if comp == '' or comp == '.': + continue + if (comp != '..' or (not initial_slashes and not new_comps) or + (new_comps and new_comps[-1] == '..')): + new_comps.append(comp) + elif new_comps: + new_comps.pop() + comps = new_comps + path = slash.join(comps) + if initial_slashes: + path = slash*initial_slashes + path + return path or dot + def _posix_rabspath(path): """Return an absolute, **non-normalized** path. **This version does not let exceptions propagate.**""" @@ -36,7 +63,7 @@ if not _posix_risabs(path): cwd = os.getcwd() path = _posix_rjoin(cwd, path) - return path + return _posix_rnormpath(path) except OSError: return path @@ -65,6 +92,56 @@ s = _nt_rsplitdrive(s)[1] return s.startswith('/') or s.startswith('\\') +def _nt_rnormpath(path): + """Normalize path, eliminating double slashes, etc.""" + backslash, dot = '\\', '.' + if path.startswith(('\\\\.\\', '\\\\?\\')): + # in the case of paths with these prefixes: + # \\.\ -> device names + # \\?\ -> literal paths + # do not do any normalization, but return the path unchanged + return path + path = path.replace("/", "\\") + prefix, path = _nt_rsplitdrive(path) + # We need to be careful here. If the prefix is empty, and the path starts + # with a backslash, it could either be an absolute path on the current + # drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It + # is therefore imperative NOT to collapse multiple backslashes blindly in + # that case. + # The code below preserves multiple backslashes when there is no drive + # letter. This means that the invalid filename \\\a\b is preserved + # unchanged, where a\\\b is normalised to a\b. It's not clear that there + # is any better behaviour for such edge cases. + if prefix == '': + # No drive letter - preserve initial backslashes + while path.startswith("\\"): + prefix = prefix + backslash + path = path[1:] + else: + # We have a drive letter - collapse initial backslashes + if path.startswith("\\"): + prefix = prefix + backslash + path = path.lstrip("\\") + comps = path.split("\\") + i = 0 + while i < len(comps): + if comps[i] in ('.', ''): + del comps[i] + elif comps[i] == '..': + if i > 0 and comps[i-1] != '..': + del comps[i-1:i+1] + i -= 1 + elif i == 0 and prefix.endswith("\\"): + del comps[i] + else: + i += 1 + else: + i += 1 + # If the path is now empty, substitute '.' + if not prefix and not comps: + comps.append(dot) + return prefix + backslash.join(comps) + def _nt_rabspath(path): try: if path == '': @@ -138,11 +215,13 @@ if os.name == 'posix': sep = altsep = '/' risabs = _posix_risabs + rnormpath = _posix_rnormpath rabspath = _posix_rabspath rjoin = _posix_rjoin elif os.name == 'nt': sep, altsep = '\\', '/' risabs = _nt_risabs + rnormpath = _nt_rnormpath rabspath = _nt_rabspath rsplitdrive = _nt_rsplitdrive rjoin = _nt_rjoin diff --git a/rpython/rlib/test/test_rpath.py b/rpython/rlib/test/test_rpath.py --- a/rpython/rlib/test/test_rpath.py +++ b/rpython/rlib/test/test_rpath.py @@ -2,12 +2,48 @@ import os from rpython.rlib import rpath +def test_rnormpath_posix(): + assert rpath._posix_rnormpath('///foo') == '/foo' + assert rpath._posix_rnormpath("") == "." + assert rpath._posix_rnormpath("/") == "/" + assert rpath._posix_rnormpath("//") == "//" + assert rpath._posix_rnormpath("///") == "/" + assert rpath._posix_rnormpath("///foo/.//bar//") == "/foo/bar" + assert rpath._posix_rnormpath("///foo/.//bar//.//..//.//baz") == "/foo/baz" + assert rpath._posix_rnormpath("///..//./foo/.//bar") == "/foo/bar" + +def test_rnormpath_nt(): + assert rpath._nt_rnormpath('A//////././//.//B') == r'A\B' + assert rpath._nt_rnormpath('A/./B') == r'A\B' + assert rpath._nt_rnormpath('A/foo/../B') == r'A\B' + assert rpath._nt_rnormpath('C:A//B') == r'C:A\B' + assert rpath._nt_rnormpath('D:A/./B') == r'D:A\B' + assert rpath._nt_rnormpath('e:A/foo/../B') == r'e:A\B' + assert rpath._nt_rnormpath('C:///A//B') == r'C:\A\B' + assert rpath._nt_rnormpath('D:///A/./B') == r'D:\A\B' + assert rpath._nt_rnormpath('e:///A/foo/../B') == r'e:\A\B' + assert rpath._nt_rnormpath('..') == r'..' + assert rpath._nt_rnormpath('.') == r'.' + assert rpath._nt_rnormpath('') == r'.' + assert rpath._nt_rnormpath('/') == '\\' + assert rpath._nt_rnormpath('c:/') == 'c:\\' + assert rpath._nt_rnormpath('/../.././..') == '\\' + assert rpath._nt_rnormpath('c:/../../..') == 'c:\\' + assert rpath._nt_rnormpath('../.././..') == r'..\..\..' + assert rpath._nt_rnormpath('K:../.././..') == r'K:..\..\..' + assert rpath._nt_rnormpath('C:////a/b') == r'C:\a\b' + assert rpath._nt_rnormpath('//machine/share//a/b') == r'\\machine\share\a\b' + assert rpath._nt_rnormpath('\\\\.\\NUL') == r'\\.\NUL' + assert rpath._nt_rnormpath('\\\\?\\D:/XY\\Z') == r'\\?\D:/XY\Z' + def test_rabspath_relative(tmpdir): tmpdir.chdir() assert rpath.rabspath('foo') == os.path.realpath(str(tmpdir.join('foo'))) def test_rabspath_absolute_posix(): assert rpath._posix_rabspath('/foo') == '/foo' + assert rpath._posix_rabspath('/foo/bar/..') == '/foo' + assert rpath._posix_rabspath('/foo/bar/../x') == '/foo/x' @py.test.mark.skipif("os.name == 'nt'") def test_missing_current_dir(tmpdir): @@ -28,6 +64,9 @@ @py.test.mark.skipif("os.name != 'nt'") def test_rabspath_absolute_nt(): + assert rpath._nt_rabspath('d:\\foo') == 'd:\\foo' + assert rpath._nt_rabspath('d:\\foo\\bar\\..') == 'd:\\foo' + assert rpath._nt_rabspath('d:\\foo\\bar\\..\\x') == 'd:\\foo\\x' curdrive = _ = rpath._nt_rsplitdrive(os.getcwd()) assert len(curdrive) == 2 and curdrive[1] == ':' assert rpath.rabspath('\\foo') == '%s\\foo' % curdrive @@ -46,6 +85,7 @@ assert not rpath._nt_risabs('C:FOO') def test_risdir(tmpdir): + tmpdir = str(tmpdir) assert rpath.risdir(tmpdir) assert not rpath.risdir('_some_non_existant_file_') assert not rpath.risdir(os.path.join(tmpdir, '_some_non_existant_file_')) From noreply at buildbot.pypy.org Sun Aug 31 14:50:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 14:50:36 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix: r5/r6/r7 can hold the address of the function to call, but Message-ID: <20140831125036.9BAAE1C34DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73226:fdfe7e9efa81 Date: 2014-08-31 14:49 +0200 http://bitbucket.org/pypy/pypy/changeset/fdfe7e9efa81/ Log: Fix: r5/r6/r7 can hold the address of the function to call, but in call_release_gil mode these three registers are garbaged. diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -38,9 +38,7 @@ if self.fnloc.is_imm(): self.mc.BL(self.fnloc.value) return - if self.fnloc.is_stack(): - self.asm.mov_loc_loc(self.fnloc, r.ip) - self.fnloc = r.ip + # --self.fnloc.is_stack() is always remapped to r4 here assert self.fnloc.is_core_reg() self.mc.BLX(self.fnloc.value) @@ -82,6 +80,15 @@ self.mc.gen_load_int(r.ip.value, n) self.mc.SUB_rr(r.sp.value, r.sp.value, r.ip.value) + def _must_remap_fnloc(self): + fnloc = self.fnloc + if fnloc.is_stack(): + return True + if self.is_call_release_gil: + if fnloc is r.r5 or fnloc is r.r6 or fnloc is r.r7: + return True + return False + def call_releasegil_addr_and_move_real_arguments(self, fastgil): assert self.is_call_release_gil assert not self.asm._is_asmgcc() @@ -261,7 +268,7 @@ # or on the stack, which we can not access later # If this happens to be the case we remap the register to r4 and use r4 # to call the function - if self.fnloc in r.argument_regs or self.fnloc.is_stack(): + if self.fnloc in r.argument_regs or self._must_remap_fnloc(): non_float_locs.append(self.fnloc) non_float_regs.append(r.r4) self.fnloc = r.r4 @@ -366,7 +373,7 @@ # or on the stack, which we can not access later # If this happens to be the case we remap the register to r4 and use r4 # to call the function - if self.fnloc in non_float_regs or self.fnloc.is_stack(): + if self.fnloc in non_float_regs or self._must_remap_fnloc(): non_float_locs.append(self.fnloc) non_float_regs.append(r.r4) self.fnloc = r.r4 From noreply at buildbot.pypy.org Sun Aug 31 15:29:36 2014 From: noreply at buildbot.pypy.org (groggi) Date: Sun, 31 Aug 2014 15:29:36 +0200 (CEST) Subject: [pypy-commit] pypy gc-incminimark-pinning: Merge default into gc-incminimark-pinning Message-ID: <20140831132936.04E3C1C31B7@cobra.cs.uni-duesseldorf.de> Author: Gregor Wegberg Branch: gc-incminimark-pinning Changeset: r73227:d21434327ff6 Date: 2014-08-31 15:28 +0200 http://bitbucket.org/pypy/pypy/changeset/d21434327ff6/ Log: Merge default into gc-incminimark-pinning diff too long, truncating to 2000 out of 41471 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -35,281 +35,290 @@ the beginning of each file) the files in the 'pypy' directory are each copyrighted by one or more of the following people and organizations: - Armin Rigo - Maciej Fijalkowski - Carl Friedrich Bolz - Antonio Cuni - Amaury Forgeot d'Arc - Samuele Pedroni - Alex Gaynor - Michael Hudson - David Schneider - Matti Picus - Brian Kearns - Philip Jenvey - Holger Krekel - Christian Tismer - Hakan Ardo - Benjamin Peterson - Manuel Jacob - Anders Chrigstrom - Eric van Riet Paap - Wim Lavrijsen - Ronan Lamy - Richard Emslie - Alexander Schremmer - Dan Villiom Podlaski Christiansen - Lukas Diekmann - Sven Hager - Anders Lehmann - Aurelien Campeas - Niklaus Haldimann - Camillo Bruni - Laura Creighton - Toon Verwaest - Remi Meier - Leonardo Santagada - Seo Sanghyeon - Romain Guillebert - Justin Peel - Ronny Pfannschmidt - David Edelsohn - Anders Hammarquist - Jakub Gustak - Guido Wesdorp - Lawrence Oluyede - Bartosz Skowron - Daniel Roberts - Niko Matsakis - Adrien Di Mascio - Alexander Hesse - Ludovic Aubry - Jacob Hallen - Jason Creighton - Alex Martelli - Michal Bendowski - Jan de Mooij - stian - Michael Foord - Stephan Diehl - Stefan Schwarzer - Valentino Volonghi - Tomek Meka - Patrick Maupin - Bob Ippolito - Bruno Gola - Jean-Paul Calderone - Timo Paulssen - Squeaky - Alexandre Fayolle - Simon Burton - Marius Gedminas - John Witulski - Konstantin Lopuhin - Greg Price - Dario Bertini - Mark Pearse - Simon Cross - Andreas Stührk - Jean-Philippe St. Pierre - Guido van Rossum - Pavel Vinogradov - Paweł Piotr Przeradowski - Paul deGrandis - Ilya Osadchiy - Tobias Oberstein - Adrian Kuhn - Boris Feigin - Stefano Rivera - tav - Taavi Burns - Georg Brandl - Bert Freudenberg - Stian Andreassen - Laurence Tratt - Wanja Saatkamp - Ivan Sichmann Freitas - Gerald Klix - Mike Blume - Oscar Nierstrasz - Stefan H. Muller - Jeremy Thurgood - Gregor Wegberg - Rami Chowdhury - Tobias Pape - Edd Barrett - David Malcolm - Eugene Oden - Henry Mason - Preston Timmons - Jeff Terrace - David Ripton - Dusty Phillips - Lukas Renggli - Guenter Jantzen - Ned Batchelder - Amit Regmi - Ben Young - Nicolas Chauvat - Andrew Durdin - Andrew Chambers - Michael Schneider - Nicholas Riley - Jason Chu - Igor Trindade Oliveira - Rocco Moretti - Gintautas Miliauskas - Michael Twomey - Lucian Branescu Mihaila - Tim Felgentreff - Tyler Wade - Gabriel Lavoie - Olivier Dormond - Jared Grubb - Karl Bartel - Brian Dorsey - Victor Stinner - Andrews Medina - Stuart Williams - Jasper Schulz - Christian Hudon - Toby Watson - Antoine Pitrou - Aaron Iles - Michael Cheng - Justas Sadzevicius - Mikael Schönenberg - Gasper Zejn - Neil Shepperd - Elmo Mäntynen - Jonathan David Riehl - Stanislaw Halik - Anders Qvist - Chirag Jadwani - Beatrice During - Alex Perry - Vincent Legoll - Alan McIntyre - Alexander Sedov - Corbin Simpson - Christopher Pope - wenzhuman - Christian Tismer - Marc Abramowitz - Dan Stromberg - Stefano Parmesan - Alexis Daboville - Jens-Uwe Mager - Carl Meyer - Karl Ramm - Pieter Zieschang - Gabriel - Lukas Vacek - Andrew Dalke - Sylvain Thenault - Nathan Taylor - Vladimir Kryachko - Jacek Generowicz - Alejandro J. Cura - Jacob Oscarson - Travis Francis Athougies - Ryan Gonzalez - Kristjan Valur Jonsson - Sebastian Pawluś - Neil Blakey-Milner - anatoly techtonik - Lutz Paelike - Lucio Torre - Lars Wassermann - Henrik Vendelbo - Dan Buch - Miguel de Val Borro - Artur Lisiecki - Sergey Kishchenko - Ignas Mikalajunas - Christoph Gerum - Martin Blais - Lene Wagner - Tomo Cocoa - roberto at goyle - Yury V. Zaytsev - Anna Katrina Dominguez - William Leslie - Bobby Impollonia - timo at eistee.fritz.box - Andrew Thompson - Ben Darnell - Roberto De Ioris - Juan Francisco Cantero Hurtado - Godefroid Chappelle - Joshua Gilbert - Dan Colish - Christopher Armstrong - Michael Hudson-Doyle - Anders Sigfridsson - Yasir Suhail - rafalgalczynski at gmail.com - Floris Bruynooghe - Laurens Van Houtven - Akira Li - Gustavo Niemeyer - Stephan Busemann - Rafał Gałczyński - Yusei Tahara - Christian Muirhead - James Lan - shoma hosaka - Daniel Neuh?user - Matthew Miller - Buck Golemon - Konrad Delong - Dinu Gherman - Chris Lambacher - coolbutuseless at gmail.com - Rodrigo Araújo - w31rd0 - Jim Baker - James Robert - Armin Ronacher - Brett Cannon - yrttyr - aliceinwire - OlivierBlanvillain - Zooko Wilcox-O Hearn - Tomer Chachamu - Christopher Groskopf - Asmo Soinio - Stefan Marr - jiaaro - opassembler.py - Antony Lee - Jim Hunziker - Markus Unterwaditzer - Even Wiik Thomassen - jbs - soareschen - Kurt Griffiths - Mike Bayer - Flavio Percoco - Kristoffer Kleine - yasirs - Michael Chermside - Anna Ravencroft - Andrew Chambers - Julien Phalip - Dan Loewenherz + Armin Rigo + Maciej Fijalkowski + Carl Friedrich Bolz + Antonio Cuni + Amaury Forgeot d'Arc + Samuele Pedroni + Alex Gaynor + Michael Hudson + David Schneider + Matti Picus + Brian Kearns + Philip Jenvey + Holger Krekel + Christian Tismer + Hakan Ardo + Benjamin Peterson + Manuel Jacob + Anders Chrigstrom + Eric van Riet Paap + Ronan Lamy + Wim Lavrijsen + Richard Emslie + Alexander Schremmer + Dan Villiom Podlaski Christiansen + Lukas Diekmann + Sven Hager + Anders Lehmann + Aurelien Campeas + Niklaus Haldimann + Remi Meier + Camillo Bruni + Laura Creighton + Toon Verwaest + Leonardo Santagada + Seo Sanghyeon + Romain Guillebert + Justin Peel + Ronny Pfannschmidt + David Edelsohn + Anders Hammarquist + Jakub Gustak + Guido Wesdorp + Lawrence Oluyede + Bartosz Skowron + Gregor Wegberg + Daniel Roberts + Niko Matsakis + Adrien Di Mascio + Alexander Hesse + Ludovic Aubry + Jacob Hallen + Jason Creighton + Alex Martelli + Michal Bendowski + Jan de Mooij + stian + Michael Foord + Stephan Diehl + Tyler Wade + Stefan Schwarzer + Valentino Volonghi + Tomek Meka + Patrick Maupin + Bob Ippolito + Bruno Gola + Jean-Paul Calderone + Timo Paulssen + Squeaky + Alexandre Fayolle + Simon Burton + Marius Gedminas + Martin Matusiak + Konstantin Lopuhin + John Witulski + Wenzhu Man + Greg Price + Dario Bertini + Mark Pearse + Simon Cross + Ivan Sichmann Freitas + Andreas Stührk + Jean-Philippe St. Pierre + Guido van Rossum + Pavel Vinogradov + Stefano Rivera + Paweł Piotr Przeradowski + Paul deGrandis + Ilya Osadchiy + Tobias Oberstein + Adrian Kuhn + Boris Feigin + tav + Taavi Burns + Georg Brandl + Laurence Tratt + Bert Freudenberg + Stian Andreassen + Wanja Saatkamp + Gerald Klix + Mike Blume + Oscar Nierstrasz + Stefan H. Muller + Edd Barrett + Jeremy Thurgood + Rami Chowdhury + Tobias Pape + David Malcolm + Eugene Oden + Henry Mason + Vasily Kuznetsov + Preston Timmons + Jeff Terrace + David Ripton + Dusty Phillips + Lukas Renggli + Guenter Jantzen + Ned Batchelder + Amit Regmi + Ben Young + Nicolas Chauvat + Andrew Durdin + Andrew Chambers + Michael Schneider + Nicholas Riley + Jason Chu + Igor Trindade Oliveira + Tim Felgentreff + Rocco Moretti + Gintautas Miliauskas + Michael Twomey + Lucian Branescu Mihaila + Gabriel Lavoie + Olivier Dormond + Jared Grubb + Karl Bartel + Brian Dorsey + Victor Stinner + Andrews Medina + Stuart Williams + Jasper Schulz + Christian Hudon + Toby Watson + Antoine Pitrou + Aaron Iles + Michael Cheng + Justas Sadzevicius + Gasper Zejn + anatoly techtonik + Neil Shepperd + Mikael Schönenberg + Elmo M?ntynen + Jonathan David Riehl + Stanislaw Halik + Anders Qvist + Corbin Simpson + Chirag Jadwani + Beatrice During + Alex Perry + Vincent Legoll + Alan McIntyre + Alexander Sedov + Christopher Pope + Christian Tismer + Marc Abramowitz + Dan Stromberg + Stefano Parmesan + Alexis Daboville + Jens-Uwe Mager + Carl Meyer + Karl Ramm + Pieter Zieschang + Sebastian Pawluś + Gabriel + Lukas Vacek + Andrew Dalke + Sylvain Thenault + Nathan Taylor + Vladimir Kryachko + Arjun Naik + Attila Gobi + Jacek Generowicz + Alejandro J. Cura + Jacob Oscarson + Travis Francis Athougies + Ryan Gonzalez + Ian Foote + Kristjan Valur Jonsson + Neil Blakey-Milner + Lutz Paelike + Lucio Torre + Lars Wassermann + Valentina Mukhamedzhanova + Henrik Vendelbo + Dan Buch + Miguel de Val Borro + Artur Lisiecki + Sergey Kishchenko + Yichao Yu + Ignas Mikalajunas + Christoph Gerum + Martin Blais + Lene Wagner + Tomo Cocoa + roberto at goyle + Yury V. Zaytsev + Anna Katrina Dominguez + William Leslie + Bobby Impollonia + timo at eistee.fritz.box + Andrew Thompson + Yusei Tahara + Ben Darnell + Roberto De Ioris + Juan Francisco Cantero Hurtado + Godefroid Chappelle + Joshua Gilbert + Dan Colish + Christopher Armstrong + Michael Hudson-Doyle + Anders Sigfridsson + Yasir Suhail + Jason Michalski + rafalgalczynski at gmail.com + Floris Bruynooghe + Laurens Van Houtven + Akira Li + Gustavo Niemeyer + Stephan Busemann + Rafał Gałczyński + Christian Muirhead + James Lan + shoma hosaka + Daniel Neuh?user + Matthew Miller + Buck Golemon + Konrad Delong + Dinu Gherman + Chris Lambacher + coolbutuseless at gmail.com + Rodrigo Araújo + Jim Baker + James Robert + Armin Ronacher + Brett Cannon + yrttyr + aliceinwire + OlivierBlanvillain + Zooko Wilcox-O Hearn + Tomer Chachamu + Christopher Groskopf + Asmo Soinio + Stefan Marr + jiaaro + Mads Kiilerich + opassembler.py + Antony Lee + Jim Hunziker + Markus Unterwaditzer + Even Wiik Thomassen + jbs + soareschen + Kurt Griffiths + Mike Bayer + Matthew Miller + Flavio Percoco + Kristoffer Kleine + yasirs + Michael Chermside + Anna Ravencroft + Dan Crosta + Julien Phalip + Dan Loewenherz - Heinrich-Heine University, Germany - Open End AB (formerly AB Strakt), Sweden - merlinux GmbH, Germany - tismerysoft GmbH, Germany - Logilab Paris, France - DFKI GmbH, Germany - Impara, Germany - Change Maker, Sweden - University of California Berkeley, USA - Google Inc. - King's College London + Heinrich-Heine University, Germany + Open End AB (formerly AB Strakt), Sweden + merlinux GmbH, Germany + tismerysoft GmbH, Germany + Logilab Paris, France + DFKI GmbH, Germany + Impara, Germany + Change Maker, Sweden + University of California Berkeley, USA + Google Inc. + King's College London The PyPy Logo as used by http://speed.pypy.org and others was created by Samuel Reis and is distributed on terms of Creative Commons Share Alike @@ -355,6 +364,6 @@ See the License for the specific language governing permissions and limitations under the License. -Detailled license information is contained in the NOTICE file in the +Detailed license information is contained in the NOTICE file in the directory. diff --git a/lib-python/2.7/CGIHTTPServer.py b/lib-python/2.7/CGIHTTPServer.py --- a/lib-python/2.7/CGIHTTPServer.py +++ b/lib-python/2.7/CGIHTTPServer.py @@ -84,7 +84,7 @@ path begins with one of the strings in self.cgi_directories (and the next character is a '/' or the end of the string). """ - collapsed_path = _url_collapse_path(self.path) + collapsed_path = _url_collapse_path(urllib.unquote(self.path)) dir_sep = collapsed_path.find('/', 1) head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] if head in self.cgi_directories: diff --git a/lib-python/2.7/Cookie.py b/lib-python/2.7/Cookie.py --- a/lib-python/2.7/Cookie.py +++ b/lib-python/2.7/Cookie.py @@ -1,6 +1,3 @@ -#!/usr/bin/env python -# - #### # Copyright 2000 by Timothy O'Malley # diff --git a/lib-python/2.7/HTMLParser.py b/lib-python/2.7/HTMLParser.py --- a/lib-python/2.7/HTMLParser.py +++ b/lib-python/2.7/HTMLParser.py @@ -22,9 +22,12 @@ starttagopen = re.compile('<[a-zA-Z]') piclose = re.compile('>') commentclose = re.compile(r'--\s*>') -tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*') + # see http://www.w3.org/TR/html5/tokenization.html#tag-open-state # and http://www.w3.org/TR/html5/tokenization.html#tag-name-state +# note: if you change tagfind/attrfind remember to update locatestarttagend too +tagfind = re.compile('([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*') +# this regex is currently unused, but left for backward compatibility tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*') attrfind = re.compile( @@ -32,7 +35,7 @@ r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*') locatestarttagend = re.compile(r""" - <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name + <[a-zA-Z][^\t\n\r\f />\x00]* # tag name (?:[\s/]* # optional whitespace before attribute name (?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name (?:\s*=+\s* # value indicator @@ -192,9 +195,9 @@ i = self.updatepos(i, k) continue else: - if ";" in rawdata[i:]: #bail by consuming &# - self.handle_data(rawdata[0:2]) - i = self.updatepos(i, 2) + if ";" in rawdata[i:]: # bail by consuming '&#' + self.handle_data(rawdata[i:i+2]) + i = self.updatepos(i, i+2) break elif startswith('&', i): match = entityref.match(rawdata, i) @@ -373,14 +376,14 @@ self.handle_data(rawdata[i:gtpos]) return gtpos # find the name: w3.org/TR/html5/tokenization.html#tag-name-state - namematch = tagfind_tolerant.match(rawdata, i+2) + namematch = tagfind.match(rawdata, i+2) if not namematch: # w3.org/TR/html5/tokenization.html#end-tag-open-state if rawdata[i:i+3] == '': return i+3 else: return self.parse_bogus_comment(i) - tagname = namematch.group().lower() + tagname = namematch.group(1).lower() # consume and ignore other stuff between the name and the > # Note: this is not 100% correct, since we might have things like # , but looking for > after tha name should cover diff --git a/lib-python/2.7/SimpleHTTPServer.py b/lib-python/2.7/SimpleHTTPServer.py --- a/lib-python/2.7/SimpleHTTPServer.py +++ b/lib-python/2.7/SimpleHTTPServer.py @@ -43,8 +43,10 @@ """Serve a GET request.""" f = self.send_head() if f: - self.copyfile(f, self.wfile) - f.close() + try: + self.copyfile(f, self.wfile) + finally: + f.close() def do_HEAD(self): """Serve a HEAD request.""" @@ -88,13 +90,17 @@ except IOError: self.send_error(404, "File not found") return None - self.send_response(200) - self.send_header("Content-type", ctype) - fs = os.fstat(f.fileno()) - self.send_header("Content-Length", str(fs[6])) - self.send_header("Last-Modified", self.date_time_string(fs.st_mtime)) - self.end_headers() - return f + try: + self.send_response(200) + self.send_header("Content-type", ctype) + fs = os.fstat(f.fileno()) + self.send_header("Content-Length", str(fs[6])) + self.send_header("Last-Modified", self.date_time_string(fs.st_mtime)) + self.end_headers() + return f + except: + f.close() + raise def list_directory(self, path): """Helper to produce a directory listing (absent index.html). diff --git a/lib-python/2.7/SimpleXMLRPCServer.py b/lib-python/2.7/SimpleXMLRPCServer.py --- a/lib-python/2.7/SimpleXMLRPCServer.py +++ b/lib-python/2.7/SimpleXMLRPCServer.py @@ -704,4 +704,5 @@ server = SimpleXMLRPCServer(("localhost", 8000)) server.register_function(pow) server.register_function(lambda x,y: x+y, 'add') + server.register_multicall_functions() server.serve_forever() diff --git a/lib-python/2.7/SocketServer.py b/lib-python/2.7/SocketServer.py --- a/lib-python/2.7/SocketServer.py +++ b/lib-python/2.7/SocketServer.py @@ -513,35 +513,37 @@ def collect_children(self): """Internal routine to wait for children that have exited.""" - if self.active_children is None: return + if self.active_children is None: + return + + # If we're above the max number of children, wait and reap them until + # we go back below threshold. Note that we use waitpid(-1) below to be + # able to collect children in size() syscalls instead + # of size(): the downside is that this might reap children + # which we didn't spawn, which is why we only resort to this when we're + # above max_children. while len(self.active_children) >= self.max_children: - # XXX: This will wait for any child process, not just ones - # spawned by this library. This could confuse other - # libraries that expect to be able to wait for their own - # children. try: - pid, status = os.waitpid(0, 0) - except os.error: - pid = None - if pid not in self.active_children: continue - self.active_children.remove(pid) + pid, _ = os.waitpid(-1, 0) + self.active_children.discard(pid) + except OSError as e: + if e.errno == errno.ECHILD: + # we don't have any children, we're done + self.active_children.clear() + elif e.errno != errno.EINTR: + break - # XXX: This loop runs more system calls than it ought - # to. There should be a way to put the active_children into a - # process group and then use os.waitpid(-pgid) to wait for any - # of that set, but I couldn't find a way to allocate pgids - # that couldn't collide. - for child in self.active_children: + # Now reap all defunct children. + for pid in self.active_children.copy(): try: - pid, status = os.waitpid(child, os.WNOHANG) - except os.error: - pid = None - if not pid: continue - try: - self.active_children.remove(pid) - except ValueError, e: - raise ValueError('%s. x=%d and list=%r' % (e.message, pid, - self.active_children)) + pid, _ = os.waitpid(pid, os.WNOHANG) + # if the child hasn't exited yet, pid will be 0 and ignored by + # discard() below + self.active_children.discard(pid) + except OSError as e: + if e.errno == errno.ECHILD: + # someone else reaped it + self.active_children.discard(pid) def handle_timeout(self): """Wait for zombies after self.timeout seconds of inactivity. @@ -557,8 +559,8 @@ if pid: # Parent process if self.active_children is None: - self.active_children = [] - self.active_children.append(pid) + self.active_children = set() + self.active_children.add(pid) self.close_request(request) #close handle in parent process return else: diff --git a/lib-python/2.7/_MozillaCookieJar.py b/lib-python/2.7/_MozillaCookieJar.py --- a/lib-python/2.7/_MozillaCookieJar.py +++ b/lib-python/2.7/_MozillaCookieJar.py @@ -39,7 +39,7 @@ magic_re = "#( Netscape)? HTTP Cookie File" header = """\ # Netscape HTTP Cookie File -# http://www.netscape.com/newsref/std/cookie_spec.html +# http://curl.haxx.se/rfc/cookie_spec.html # This is a generated file! Do not edit. """ diff --git a/lib-python/2.7/_abcoll.py b/lib-python/2.7/_abcoll.py --- a/lib-python/2.7/_abcoll.py +++ b/lib-python/2.7/_abcoll.py @@ -165,12 +165,17 @@ def __gt__(self, other): if not isinstance(other, Set): return NotImplemented - return other < self + return len(self) > len(other) and self.__ge__(other) def __ge__(self, other): if not isinstance(other, Set): return NotImplemented - return other <= self + if len(self) < len(other): + return False + for elem in other: + if elem not in self: + return False + return True def __eq__(self, other): if not isinstance(other, Set): @@ -194,6 +199,8 @@ return NotImplemented return self._from_iterable(value for value in other if value in self) + __rand__ = __and__ + def isdisjoint(self, other): 'Return True if two sets have a null intersection.' for value in other: @@ -207,6 +214,8 @@ chain = (e for s in (self, other) for e in s) return self._from_iterable(chain) + __ror__ = __or__ + def __sub__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): @@ -215,6 +224,14 @@ return self._from_iterable(value for value in self if value not in other) + def __rsub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in other + if value not in self) + def __xor__(self, other): if not isinstance(other, Set): if not isinstance(other, Iterable): @@ -222,6 +239,8 @@ other = self._from_iterable(other) return (self - other) | (other - self) + __rxor__ = __xor__ + # Sets are not hashable by default, but subclasses can change this __hash__ = None diff --git a/lib-python/2.7/_osx_support.py b/lib-python/2.7/_osx_support.py --- a/lib-python/2.7/_osx_support.py +++ b/lib-python/2.7/_osx_support.py @@ -182,7 +182,7 @@ # Compiler is GCC, check if it is LLVM-GCC data = _read_output("'%s' --version" % (cc.replace("'", "'\"'\"'"),)) - if 'llvm-gcc' in data: + if data and 'llvm-gcc' in data: # Found LLVM-GCC, fall back to clang cc = _find_build_tool('clang') @@ -450,8 +450,16 @@ # case and disallow installs. cflags = _config_vars.get(_INITPRE+'CFLAGS', _config_vars.get('CFLAGS', '')) - if ((macrelease + '.') >= '10.4.' and - '-arch' in cflags.strip()): + if macrelease: + try: + macrelease = tuple(int(i) for i in macrelease.split('.')[0:2]) + except ValueError: + macrelease = (10, 0) + else: + # assume no universal support + macrelease = (10, 0) + + if (macrelease >= (10, 4)) and '-arch' in cflags.strip(): # The universal build will build fat binaries, but not on # systems before 10.4 diff --git a/lib-python/2.7/_pyio.py b/lib-python/2.7/_pyio.py --- a/lib-python/2.7/_pyio.py +++ b/lib-python/2.7/_pyio.py @@ -192,38 +192,45 @@ (appending and "a" or "") + (updating and "+" or ""), closefd) - line_buffering = False - if buffering == 1 or buffering < 0 and raw.isatty(): - buffering = -1 - line_buffering = True - if buffering < 0: - buffering = DEFAULT_BUFFER_SIZE - try: - bs = os.fstat(raw.fileno()).st_blksize - except (os.error, AttributeError): - pass + result = raw + try: + line_buffering = False + if buffering == 1 or buffering < 0 and raw.isatty(): + buffering = -1 + line_buffering = True + if buffering < 0: + buffering = DEFAULT_BUFFER_SIZE + try: + bs = os.fstat(raw.fileno()).st_blksize + except (os.error, AttributeError): + pass + else: + if bs > 1: + buffering = bs + if buffering < 0: + raise ValueError("invalid buffering size") + if buffering == 0: + if binary: + return result + raise ValueError("can't have unbuffered text I/O") + if updating: + buffer = BufferedRandom(raw, buffering) + elif writing or appending: + buffer = BufferedWriter(raw, buffering) + elif reading: + buffer = BufferedReader(raw, buffering) else: - if bs > 1: - buffering = bs - if buffering < 0: - raise ValueError("invalid buffering size") - if buffering == 0: + raise ValueError("unknown mode: %r" % mode) + result = buffer if binary: - return raw - raise ValueError("can't have unbuffered text I/O") - if updating: - buffer = BufferedRandom(raw, buffering) - elif writing or appending: - buffer = BufferedWriter(raw, buffering) - elif reading: - buffer = BufferedReader(raw, buffering) - else: - raise ValueError("unknown mode: %r" % mode) - if binary: - return buffer - text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering) - text.mode = mode - return text + return result + text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering) + result = text + text.mode = mode + return result + except: + result.close() + raise class DocDescriptor: @@ -1997,7 +2004,13 @@ def getvalue(self): self.flush() - return self.buffer.getvalue().decode(self._encoding, self._errors) + decoder = self._decoder or self._get_decoder() + old_state = decoder.getstate() + decoder.reset() + try: + return decoder.decode(self.buffer.getvalue(), final=True) + finally: + decoder.setstate(old_state) def __repr__(self): # TextIOWrapper tells the encoding in its repr. In StringIO, diff --git a/lib-python/2.7/_weakrefset.py b/lib-python/2.7/_weakrefset.py --- a/lib-python/2.7/_weakrefset.py +++ b/lib-python/2.7/_weakrefset.py @@ -60,6 +60,8 @@ for itemref in self.data: item = itemref() if item is not None: + # Caveat: the iterator will keep a strong reference to + # `item` until it is resumed or closed. yield item def __len__(self): diff --git a/lib-python/2.7/aifc.py b/lib-python/2.7/aifc.py --- a/lib-python/2.7/aifc.py +++ b/lib-python/2.7/aifc.py @@ -778,7 +778,7 @@ def _ensure_header_written(self, datasize): if not self._nframeswritten: - if self._comptype in ('ULAW', 'ALAW'): + if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw'): if not self._sampwidth: self._sampwidth = 2 if self._sampwidth != 2: @@ -844,7 +844,7 @@ if self._datalength & 1: self._datalength = self._datalength + 1 if self._aifc: - if self._comptype in ('ULAW', 'ALAW'): + if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw'): self._datalength = self._datalength // 2 if self._datalength & 1: self._datalength = self._datalength + 1 @@ -852,7 +852,10 @@ self._datalength = (self._datalength + 3) // 4 if self._datalength & 1: self._datalength = self._datalength + 1 - self._form_length_pos = self._file.tell() + try: + self._form_length_pos = self._file.tell() + except (AttributeError, IOError): + self._form_length_pos = None commlength = self._write_form_length(self._datalength) if self._aifc: self._file.write('AIFC') @@ -864,7 +867,8 @@ self._file.write('COMM') _write_ulong(self._file, commlength) _write_short(self._file, self._nchannels) - self._nframes_pos = self._file.tell() + if self._form_length_pos is not None: + self._nframes_pos = self._file.tell() _write_ulong(self._file, self._nframes) if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'): _write_short(self._file, 8) @@ -875,7 +879,8 @@ self._file.write(self._comptype) _write_string(self._file, self._compname) self._file.write('SSND') - self._ssnd_length_pos = self._file.tell() + if self._form_length_pos is not None: + self._ssnd_length_pos = self._file.tell() _write_ulong(self._file, self._datalength + 8) _write_ulong(self._file, 0) _write_ulong(self._file, 0) diff --git a/lib-python/2.7/argparse.py b/lib-python/2.7/argparse.py --- a/lib-python/2.7/argparse.py +++ b/lib-python/2.7/argparse.py @@ -168,6 +168,8 @@ self._prog = prog self._indent_increment = indent_increment self._max_help_position = max_help_position + self._max_help_position = min(max_help_position, + max(width - 20, indent_increment * 2)) self._width = width self._current_indent = 0 @@ -339,7 +341,7 @@ else: line_len = len(indent) - 1 for part in parts: - if line_len + 1 + len(part) > text_width: + if line_len + 1 + len(part) > text_width and line: lines.append(indent + ' '.join(line)) line = [] line_len = len(indent) - 1 @@ -478,7 +480,7 @@ def _format_text(self, text): if '%(prog)' in text: text = text % dict(prog=self._prog) - text_width = self._width - self._current_indent + text_width = max(self._width - self._current_indent, 11) indent = ' ' * self._current_indent return self._fill_text(text, text_width, indent) + '\n\n' @@ -486,7 +488,7 @@ # determine the required width and the entry label help_position = min(self._action_max_length + 2, self._max_help_position) - help_width = self._width - help_position + help_width = max(self._width - help_position, 11) action_width = help_position - self._current_indent - 2 action_header = self._format_action_invocation(action) @@ -1155,9 +1157,13 @@ __hash__ = None def __eq__(self, other): + if not isinstance(other, Namespace): + return NotImplemented return vars(self) == vars(other) def __ne__(self, other): + if not isinstance(other, Namespace): + return NotImplemented return not (self == other) def __contains__(self, key): diff --git a/lib-python/2.7/bsddb/dbshelve.py b/lib-python/2.7/bsddb/dbshelve.py --- a/lib-python/2.7/bsddb/dbshelve.py +++ b/lib-python/2.7/bsddb/dbshelve.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python #------------------------------------------------------------------------ # Copyright (c) 1997-2001 by Total Control Software # All Rights Reserved diff --git a/lib-python/2.7/bsddb/test/test_dbtables.py b/lib-python/2.7/bsddb/test/test_dbtables.py --- a/lib-python/2.7/bsddb/test/test_dbtables.py +++ b/lib-python/2.7/bsddb/test/test_dbtables.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python -# #----------------------------------------------------------------------- # A test suite for the table interface built on bsddb.db #----------------------------------------------------------------------- diff --git a/lib-python/2.7/codecs.py b/lib-python/2.7/codecs.py --- a/lib-python/2.7/codecs.py +++ b/lib-python/2.7/codecs.py @@ -456,15 +456,12 @@ # read until we get the required number of characters (if available) while True: - # can the request can be satisfied from the character buffer? - if chars < 0: - if size < 0: - if self.charbuffer: - break - elif len(self.charbuffer) >= size: + # can the request be satisfied from the character buffer? + if chars >= 0: + if len(self.charbuffer) >= chars: break - else: - if len(self.charbuffer) >= chars: + elif size >= 0: + if len(self.charbuffer) >= size: break # we need more data if size < 0: diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py --- a/lib-python/2.7/collections.py +++ b/lib-python/2.7/collections.py @@ -319,6 +319,7 @@ if isinstance(field_names, basestring): field_names = field_names.replace(',', ' ').split() field_names = map(str, field_names) + typename = str(typename) if rename: seen = set() for index, name in enumerate(field_names): @@ -331,6 +332,8 @@ field_names[index] = '_%d' % index seen.add(name) for name in [typename] + field_names: + if type(name) != str: + raise TypeError('Type names and field names must be strings') if not all(c.isalnum() or c=='_' for c in name): raise ValueError('Type names and field names can only contain ' 'alphanumeric characters and underscores: %r' % name) diff --git a/lib-python/2.7/csv.py b/lib-python/2.7/csv.py --- a/lib-python/2.7/csv.py +++ b/lib-python/2.7/csv.py @@ -93,6 +93,10 @@ self.line_num = self.reader.line_num return self._fieldnames + # Issue 20004: Because DictReader is a classic class, this setter is + # ignored. At this point in 2.7's lifecycle, it is too late to change the + # base class for fear of breaking working code. If you want to change + # fieldnames without overwriting the getter, set _fieldnames directly. @fieldnames.setter def fieldnames(self, value): self._fieldnames = value @@ -140,8 +144,8 @@ if self.extrasaction == "raise": wrong_fields = [k for k in rowdict if k not in self.fieldnames] if wrong_fields: - raise ValueError("dict contains fields not in fieldnames: " + - ", ".join(wrong_fields)) + raise ValueError("dict contains fields not in fieldnames: " + + ", ".join([repr(x) for x in wrong_fields])) return [rowdict.get(key, self.restval) for key in self.fieldnames] def writerow(self, rowdict): diff --git a/lib-python/2.7/ctypes/test/__init__.py b/lib-python/2.7/ctypes/test/__init__.py --- a/lib-python/2.7/ctypes/test/__init__.py +++ b/lib-python/2.7/ctypes/test/__init__.py @@ -2,7 +2,15 @@ use_resources = [] -class ResourceDenied(Exception): +import ctypes +ctypes_symbols = dir(ctypes) + +def need_symbol(name): + return unittest.skipUnless(name in ctypes_symbols, + '{!r} is required'.format(name)) + + +class ResourceDenied(unittest.SkipTest): """Test skipped because it requested a disallowed resource. This is raised when a test calls requires() for a resource that diff --git a/lib-python/2.7/ctypes/test/test_arrays.py b/lib-python/2.7/ctypes/test/test_arrays.py --- a/lib-python/2.7/ctypes/test/test_arrays.py +++ b/lib-python/2.7/ctypes/test/test_arrays.py @@ -2,6 +2,8 @@ from ctypes import * from test.test_support import impl_detail +from ctypes.test import need_symbol + formats = "bBhHiIlLqQfd" # c_longdouble commented out for PyPy, look at the commend in test_longdouble @@ -98,8 +100,8 @@ self.assertEqual(values, [1, 2, 3, 4, 5]) def test_classcache(self): - self.assertTrue(not ARRAY(c_int, 3) is ARRAY(c_int, 4)) - self.assertTrue(ARRAY(c_int, 3) is ARRAY(c_int, 3)) + self.assertIsNot(ARRAY(c_int, 3), ARRAY(c_int, 4)) + self.assertIs(ARRAY(c_int, 3), ARRAY(c_int, 3)) def test_from_address(self): # Failed with 0.9.8, reported by JUrner @@ -112,20 +114,16 @@ self.assertEqual(sz[1:4:2], "o") self.assertEqual(sz.value, "foo") - try: - create_unicode_buffer - except NameError: - pass - else: - def test_from_addressW(self): - p = create_unicode_buffer("foo") - sz = (c_wchar * 3).from_address(addressof(p)) - self.assertEqual(sz[:], "foo") - self.assertEqual(sz[::], "foo") - self.assertEqual(sz[::-1], "oof") - self.assertEqual(sz[::3], "f") - self.assertEqual(sz[1:4:2], "o") - self.assertEqual(sz.value, "foo") + @need_symbol('create_unicode_buffer') + def test_from_addressW(self): + p = create_unicode_buffer("foo") + sz = (c_wchar * 3).from_address(addressof(p)) + self.assertEqual(sz[:], "foo") + self.assertEqual(sz[::], "foo") + self.assertEqual(sz[::-1], "oof") + self.assertEqual(sz[::3], "f") + self.assertEqual(sz[1:4:2], "o") + self.assertEqual(sz.value, "foo") def test_cache(self): # Array types are cached internally in the _ctypes extension, @@ -139,7 +137,7 @@ # Create a new array type based on it: t1 = my_int * 1 t2 = my_int * 1 - self.assertTrue(t1 is t2) + self.assertIs(t1, t2) if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_as_parameter.py b/lib-python/2.7/ctypes/test/test_as_parameter.py --- a/lib-python/2.7/ctypes/test/test_as_parameter.py +++ b/lib-python/2.7/ctypes/test/test_as_parameter.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import need_symbol import _ctypes_test dll = CDLL(_ctypes_test.__file__) @@ -17,11 +18,8 @@ def wrap(self, param): return param + @need_symbol('c_wchar') def test_wchar_parm(self): - try: - c_wchar - except NameError: - return f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double] result = f(self.wrap(1), self.wrap(u"x"), self.wrap(3), self.wrap(4), self.wrap(5.0), self.wrap(6.0)) @@ -134,7 +132,7 @@ f.argtypes = [c_longlong, MyCallback] def callback(value): - self.assertTrue(isinstance(value, (int, long))) + self.assertIsInstance(value, (int, long)) return value & 0x7FFFFFFF cb = MyCallback(callback) diff --git a/lib-python/2.7/ctypes/test/test_bitfields.py b/lib-python/2.7/ctypes/test/test_bitfields.py --- a/lib-python/2.7/ctypes/test/test_bitfields.py +++ b/lib-python/2.7/ctypes/test/test_bitfields.py @@ -1,4 +1,5 @@ from ctypes import * +from ctypes.test import need_symbol import unittest import os @@ -131,15 +132,6 @@ self.assertEqual(result[0], TypeError) self.assertIn('bit fields not allowed for type', result[1]) - try: - c_wchar - except NameError: - pass - else: - result = self.fail_fields(("a", c_wchar, 1)) - self.assertEqual(result[0], TypeError) - self.assertIn('bit fields not allowed for type', result[1]) - class Dummy(Structure): _fields_ = [] @@ -147,6 +139,12 @@ self.assertEqual(result[0], TypeError) self.assertIn('bit fields not allowed for type', result[1]) + @need_symbol('c_wchar') + def test_c_wchar(self): + result = self.fail_fields(("a", c_wchar, 1)) + self.assertEqual(result, + (TypeError, 'bit fields not allowed for type c_wchar')) + def test_single_bitfield_size(self): for c_typ in int_types: result = self.fail_fields(("a", c_typ, -1)) @@ -213,7 +211,7 @@ class X(Structure): _fields_ = [("a", c_byte, 4), ("b", c_int, 32)] - self.assertEqual(sizeof(X), sizeof(c_int)*2) + self.assertEqual(sizeof(X), alignment(c_int)+sizeof(c_int)) def test_mixed_3(self): class X(Structure): @@ -246,7 +244,7 @@ _anonymous_ = ["_"] _fields_ = [("_", X)] - @unittest.skipUnless(hasattr(ctypes, "c_uint32"), "c_int32 is required") + @need_symbol('c_uint32') def test_uint32(self): class X(Structure): _fields_ = [("a", c_uint32, 32)] @@ -256,7 +254,7 @@ x.a = 0xFDCBA987 self.assertEqual(x.a, 0xFDCBA987) - @unittest.skipUnless(hasattr(ctypes, "c_uint64"), "c_int64 is required") + @need_symbol('c_uint64') def test_uint64(self): class X(Structure): _fields_ = [("a", c_uint64, 64)] diff --git a/lib-python/2.7/ctypes/test/test_buffers.py b/lib-python/2.7/ctypes/test/test_buffers.py --- a/lib-python/2.7/ctypes/test/test_buffers.py +++ b/lib-python/2.7/ctypes/test/test_buffers.py @@ -1,4 +1,5 @@ from ctypes import * +from ctypes.test import need_symbol import unittest class StringBufferTestCase(unittest.TestCase): @@ -7,12 +8,12 @@ b = create_string_buffer(32) self.assertEqual(len(b), 32) self.assertEqual(sizeof(b), 32 * sizeof(c_char)) - self.assertTrue(type(b[0]) is str) + self.assertIs(type(b[0]), str) b = create_string_buffer("abc") self.assertEqual(len(b), 4) # trailing nul char self.assertEqual(sizeof(b), 4 * sizeof(c_char)) - self.assertTrue(type(b[0]) is str) + self.assertIs(type(b[0]), str) self.assertEqual(b[0], "a") self.assertEqual(b[:], "abc\0") self.assertEqual(b[::], "abc\0") @@ -36,39 +37,36 @@ self.assertEqual(b[::2], "ac") self.assertEqual(b[::5], "a") - try: - c_wchar - except NameError: - pass - else: - def test_unicode_buffer(self): - b = create_unicode_buffer(32) - self.assertEqual(len(b), 32) - self.assertEqual(sizeof(b), 32 * sizeof(c_wchar)) - self.assertTrue(type(b[0]) is unicode) + @need_symbol('c_wchar') + def test_unicode_buffer(self): + b = create_unicode_buffer(32) + self.assertEqual(len(b), 32) + self.assertEqual(sizeof(b), 32 * sizeof(c_wchar)) + self.assertIs(type(b[0]), unicode) - b = create_unicode_buffer(u"abc") - self.assertEqual(len(b), 4) # trailing nul char - self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) - self.assertTrue(type(b[0]) is unicode) - self.assertEqual(b[0], u"a") - self.assertEqual(b[:], "abc\0") - self.assertEqual(b[::], "abc\0") - self.assertEqual(b[::-1], "\0cba") - self.assertEqual(b[::2], "ac") - self.assertEqual(b[::5], "a") + b = create_unicode_buffer(u"abc") + self.assertEqual(len(b), 4) # trailing nul char + self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) + self.assertIs(type(b[0]), unicode) + self.assertEqual(b[0], u"a") + self.assertEqual(b[:], "abc\0") + self.assertEqual(b[::], "abc\0") + self.assertEqual(b[::-1], "\0cba") + self.assertEqual(b[::2], "ac") + self.assertEqual(b[::5], "a") - def test_unicode_conversion(self): - b = create_unicode_buffer("abc") - self.assertEqual(len(b), 4) # trailing nul char - self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) - self.assertTrue(type(b[0]) is unicode) - self.assertEqual(b[0], u"a") - self.assertEqual(b[:], "abc\0") - self.assertEqual(b[::], "abc\0") - self.assertEqual(b[::-1], "\0cba") - self.assertEqual(b[::2], "ac") - self.assertEqual(b[::5], "a") + @need_symbol('c_wchar') + def test_unicode_conversion(self): + b = create_unicode_buffer("abc") + self.assertEqual(len(b), 4) # trailing nul char + self.assertEqual(sizeof(b), 4 * sizeof(c_wchar)) + self.assertIs(type(b[0]), unicode) + self.assertEqual(b[0], u"a") + self.assertEqual(b[:], "abc\0") + self.assertEqual(b[::], "abc\0") + self.assertEqual(b[::-1], "\0cba") + self.assertEqual(b[::2], "ac") + self.assertEqual(b[::5], "a") if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_byteswap.py b/lib-python/2.7/ctypes/test/test_byteswap.py --- a/lib-python/2.7/ctypes/test/test_byteswap.py +++ b/lib-python/2.7/ctypes/test/test_byteswap.py @@ -15,7 +15,8 @@ # For Structures and Unions, these types are created on demand. class Test(unittest.TestCase): - def X_test(self): + @unittest.skip('test disabled') + def test_X(self): print >> sys.stderr, sys.byteorder for i in range(32): bits = BITS() @@ -25,11 +26,11 @@ @xfail def test_endian_short(self): if sys.byteorder == "little": - self.assertTrue(c_short.__ctype_le__ is c_short) - self.assertTrue(c_short.__ctype_be__.__ctype_le__ is c_short) + self.assertIs(c_short.__ctype_le__, c_short) + self.assertIs(c_short.__ctype_be__.__ctype_le__, c_short) else: - self.assertTrue(c_short.__ctype_be__ is c_short) - self.assertTrue(c_short.__ctype_le__.__ctype_be__ is c_short) + self.assertIs(c_short.__ctype_be__, c_short) + self.assertIs(c_short.__ctype_le__.__ctype_be__, c_short) s = c_short.__ctype_be__(0x1234) self.assertEqual(bin(struct.pack(">h", 0x1234)), "1234") self.assertEqual(bin(s), "1234") @@ -53,11 +54,11 @@ @xfail def test_endian_int(self): if sys.byteorder == "little": - self.assertTrue(c_int.__ctype_le__ is c_int) - self.assertTrue(c_int.__ctype_be__.__ctype_le__ is c_int) + self.assertIs(c_int.__ctype_le__, c_int) + self.assertIs(c_int.__ctype_be__.__ctype_le__, c_int) else: - self.assertTrue(c_int.__ctype_be__ is c_int) - self.assertTrue(c_int.__ctype_le__.__ctype_be__ is c_int) + self.assertIs(c_int.__ctype_be__, c_int) + self.assertIs(c_int.__ctype_le__.__ctype_be__, c_int) s = c_int.__ctype_be__(0x12345678) self.assertEqual(bin(struct.pack(">i", 0x12345678)), "12345678") @@ -82,11 +83,11 @@ @xfail def test_endian_longlong(self): if sys.byteorder == "little": - self.assertTrue(c_longlong.__ctype_le__ is c_longlong) - self.assertTrue(c_longlong.__ctype_be__.__ctype_le__ is c_longlong) + self.assertIs(c_longlong.__ctype_le__, c_longlong) + self.assertIs(c_longlong.__ctype_be__.__ctype_le__, c_longlong) else: - self.assertTrue(c_longlong.__ctype_be__ is c_longlong) - self.assertTrue(c_longlong.__ctype_le__.__ctype_be__ is c_longlong) + self.assertIs(c_longlong.__ctype_be__, c_longlong) + self.assertIs(c_longlong.__ctype_le__.__ctype_be__, c_longlong) s = c_longlong.__ctype_be__(0x1234567890ABCDEF) self.assertEqual(bin(struct.pack(">q", 0x1234567890ABCDEF)), "1234567890ABCDEF") @@ -111,11 +112,11 @@ @xfail def test_endian_float(self): if sys.byteorder == "little": - self.assertTrue(c_float.__ctype_le__ is c_float) - self.assertTrue(c_float.__ctype_be__.__ctype_le__ is c_float) + self.assertIs(c_float.__ctype_le__, c_float) + self.assertIs(c_float.__ctype_be__.__ctype_le__, c_float) else: - self.assertTrue(c_float.__ctype_be__ is c_float) - self.assertTrue(c_float.__ctype_le__.__ctype_be__ is c_float) + self.assertIs(c_float.__ctype_be__, c_float) + self.assertIs(c_float.__ctype_le__.__ctype_be__, c_float) s = c_float(math.pi) self.assertEqual(bin(struct.pack("f", math.pi)), bin(s)) # Hm, what's the precision of a float compared to a double? @@ -130,11 +131,11 @@ @xfail def test_endian_double(self): if sys.byteorder == "little": - self.assertTrue(c_double.__ctype_le__ is c_double) - self.assertTrue(c_double.__ctype_be__.__ctype_le__ is c_double) + self.assertIs(c_double.__ctype_le__, c_double) + self.assertIs(c_double.__ctype_be__.__ctype_le__, c_double) else: - self.assertTrue(c_double.__ctype_be__ is c_double) - self.assertTrue(c_double.__ctype_le__.__ctype_be__ is c_double) + self.assertIs(c_double.__ctype_be__, c_double) + self.assertIs(c_double.__ctype_le__.__ctype_be__, c_double) s = c_double(math.pi) self.assertEqual(s.value, math.pi) self.assertEqual(bin(struct.pack("d", math.pi)), bin(s)) @@ -146,14 +147,14 @@ self.assertEqual(bin(struct.pack(">d", math.pi)), bin(s)) def test_endian_other(self): - self.assertTrue(c_byte.__ctype_le__ is c_byte) - self.assertTrue(c_byte.__ctype_be__ is c_byte) + self.assertIs(c_byte.__ctype_le__, c_byte) + self.assertIs(c_byte.__ctype_be__, c_byte) - self.assertTrue(c_ubyte.__ctype_le__ is c_ubyte) - self.assertTrue(c_ubyte.__ctype_be__ is c_ubyte) + self.assertIs(c_ubyte.__ctype_le__, c_ubyte) + self.assertIs(c_ubyte.__ctype_be__, c_ubyte) - self.assertTrue(c_char.__ctype_le__ is c_char) - self.assertTrue(c_char.__ctype_be__ is c_char) + self.assertIs(c_char.__ctype_le__, c_char) + self.assertIs(c_char.__ctype_be__, c_char) @xfail def test_struct_fields_1(self): diff --git a/lib-python/2.7/ctypes/test/test_callbacks.py b/lib-python/2.7/ctypes/test/test_callbacks.py --- a/lib-python/2.7/ctypes/test/test_callbacks.py +++ b/lib-python/2.7/ctypes/test/test_callbacks.py @@ -1,5 +1,6 @@ import unittest from ctypes import * +from ctypes.test import need_symbol from ctypes.test import xfail import _ctypes_test @@ -95,9 +96,10 @@ # disabled: would now (correctly) raise a RuntimeWarning about # a memory leak. A callback function cannot return a non-integral # C type without causing a memory leak. -## def test_char_p(self): -## self.check_type(c_char_p, "abc") -## self.check_type(c_char_p, "def") + @unittest.skip('test disabled') + def test_char_p(self): + self.check_type(c_char_p, "abc") + self.check_type(c_char_p, "def") @xfail def test_pyobject(self): @@ -150,13 +152,12 @@ CFUNCTYPE(None)(lambda x=Nasty(): None) -try: - WINFUNCTYPE -except NameError: - pass -else: - class StdcallCallbacks(Callbacks): + at need_symbol('WINFUNCTYPE') +class StdcallCallbacks(Callbacks): + try: functype = WINFUNCTYPE + except NameError: + pass ################################################################ @@ -186,7 +187,7 @@ from ctypes.util import find_library libc_path = find_library("c") if not libc_path: - return # cannot test + self.skipTest('could not find libc') libc = CDLL(libc_path) @CFUNCTYPE(c_int, POINTER(c_int), POINTER(c_int)) @@ -198,23 +199,19 @@ libc.qsort(array, len(array), sizeof(c_int), cmp_func) self.assertEqual(array[:], [1, 5, 7, 33, 99]) - try: - WINFUNCTYPE - except NameError: - pass - else: - def test_issue_8959_b(self): - from ctypes.wintypes import BOOL, HWND, LPARAM + @need_symbol('WINFUNCTYPE') + def test_issue_8959_b(self): + from ctypes.wintypes import BOOL, HWND, LPARAM + global windowCount + windowCount = 0 + + @WINFUNCTYPE(BOOL, HWND, LPARAM) + def EnumWindowsCallbackFunc(hwnd, lParam): global windowCount - windowCount = 0 + windowCount += 1 + return True #Allow windows to keep enumerating - @WINFUNCTYPE(BOOL, HWND, LPARAM) - def EnumWindowsCallbackFunc(hwnd, lParam): - global windowCount - windowCount += 1 - return True #Allow windows to keep enumerating - - windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0) + windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0) def test_callback_register_int(self): # Issue #8275: buggy handling of callback args under Win64 diff --git a/lib-python/2.7/ctypes/test/test_cast.py b/lib-python/2.7/ctypes/test/test_cast.py --- a/lib-python/2.7/ctypes/test/test_cast.py +++ b/lib-python/2.7/ctypes/test/test_cast.py @@ -1,4 +1,5 @@ from ctypes import * +from ctypes.test import need_symbol import unittest import sys @@ -38,14 +39,14 @@ p = cast(array, POINTER(c_char_p)) # array and p share a common _objects attribute - self.assertTrue(p._objects is array._objects) + self.assertIs(p._objects, array._objects) self.assertEqual(array._objects, {'0': "foo bar", id(array): array}) p[0] = "spam spam" self.assertEqual(p._objects, {'0': "spam spam", id(array): array}) - self.assertTrue(array._objects is p._objects) + self.assertIs(array._objects, p._objects) p[1] = "foo bar" self.assertEqual(p._objects, {'1': 'foo bar', '0': "spam spam", id(array): array}) - self.assertTrue(array._objects is p._objects) + self.assertIs(array._objects, p._objects) def test_other(self): p = cast((c_int * 4)(1, 2, 3, 4), POINTER(c_int)) @@ -75,15 +76,11 @@ self.assertEqual(cast(cast(s, c_void_p), c_char_p).value, "hiho") - try: - c_wchar_p - except NameError: - pass - else: - def test_wchar_p(self): - s = c_wchar_p("hiho") - self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value, - "hiho") + @need_symbol('c_wchar_p') + def test_wchar_p(self): + s = c_wchar_p("hiho") + self.assertEqual(cast(cast(s, c_void_p), c_wchar_p).value, + "hiho") if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_cfuncs.py b/lib-python/2.7/ctypes/test/test_cfuncs.py --- a/lib-python/2.7/ctypes/test/test_cfuncs.py +++ b/lib-python/2.7/ctypes/test/test_cfuncs.py @@ -3,6 +3,7 @@ import unittest from ctypes import * +from ctypes.test import need_symbol import _ctypes_test from test.test_support import impl_detail @@ -196,7 +197,7 @@ try: WinDLL except NameError: - pass + def stdcall_dll(*_): pass else: class stdcall_dll(WinDLL): def __getattr__(self, name): @@ -206,9 +207,9 @@ setattr(self, name, func) return func - class stdcallCFunctions(CFunctions): - _dll = stdcall_dll(_ctypes_test.__file__) - pass + at need_symbol('WinDLL') +class stdcallCFunctions(CFunctions): + _dll = stdcall_dll(_ctypes_test.__file__) if __name__ == '__main__': unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_checkretval.py b/lib-python/2.7/ctypes/test/test_checkretval.py --- a/lib-python/2.7/ctypes/test/test_checkretval.py +++ b/lib-python/2.7/ctypes/test/test_checkretval.py @@ -1,6 +1,7 @@ import unittest from ctypes import * +from ctypes.test import need_symbol class CHECKED(c_int): def _check_retval_(value): @@ -25,15 +26,11 @@ del dll._testfunc_p_p.restype self.assertEqual(42, dll._testfunc_p_p(42)) - try: - oledll - except NameError: - pass - else: - def test_oledll(self): - self.assertRaises(WindowsError, - oledll.oleaut32.CreateTypeLib2, - 0, None, None) + @need_symbol('oledll') + def test_oledll(self): + self.assertRaises(WindowsError, + oledll.oleaut32.CreateTypeLib2, + 0, None, None) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_errcheck.py b/lib-python/2.7/ctypes/test/test_errcheck.py deleted file mode 100644 --- a/lib-python/2.7/ctypes/test/test_errcheck.py +++ /dev/null @@ -1,19 +0,0 @@ -import sys -from ctypes import * - -##class HMODULE(Structure): -## _fields_ = [("value", c_void_p)] - -## def __repr__(self): -## return "" % self.value - -##windll.kernel32.GetModuleHandleA.restype = HMODULE - -##print windll.kernel32.GetModuleHandleA("python23.dll") -##print hex(sys.dllhandle) - -##def nonzero(handle): -## return (GetLastError(), handle) - -##windll.kernel32.GetModuleHandleA.errcheck = nonzero -##print windll.kernel32.GetModuleHandleA("spam") diff --git a/lib-python/2.7/ctypes/test/test_find.py b/lib-python/2.7/ctypes/test/test_find.py --- a/lib-python/2.7/ctypes/test/test_find.py +++ b/lib-python/2.7/ctypes/test/test_find.py @@ -1,4 +1,5 @@ import unittest +import os import sys from ctypes import * from ctypes.util import find_library @@ -40,43 +41,43 @@ except OSError: pass - if lib_gl: - def test_gl(self): - if self.gl: - self.gl.glClearIndex + @unittest.skipUnless(lib_gl, 'lib_gl not available') + def test_gl(self): + if self.gl: + self.gl.glClearIndex - if lib_glu: - def test_glu(self): - if self.glu: - self.glu.gluBeginCurve + @unittest.skipUnless(lib_glu, 'lib_glu not available') + def test_glu(self): + if self.glu: + self.glu.gluBeginCurve - if lib_gle: - def test_gle(self): - if self.gle: - self.gle.gleGetJoinStyle + @unittest.skipUnless(lib_gle, 'lib_gle not available') + def test_gle(self): + if self.gle: + self.gle.gleGetJoinStyle -##if os.name == "posix" and sys.platform != "darwin": - -## # On platforms where the default shared library suffix is '.so', -## # at least some libraries can be loaded as attributes of the cdll -## # object, since ctypes now tries loading the lib again -## # with '.so' appended of the first try fails. -## # -## # Won't work for libc, unfortunately. OTOH, it isn't -## # needed for libc since this is already mapped into the current -## # process (?) -## # -## # On MAC OSX, it won't work either, because dlopen() needs a full path, -## # and the default suffix is either none or '.dylib'. - -## class LoadLibs(unittest.TestCase): -## def test_libm(self): -## import math -## libm = cdll.libm -## sqrt = libm.sqrt -## sqrt.argtypes = (c_double,) -## sqrt.restype = c_double -## self.assertEqual(sqrt(2), math.sqrt(2)) +# On platforms where the default shared library suffix is '.so', +# at least some libraries can be loaded as attributes of the cdll +# object, since ctypes now tries loading the lib again +# with '.so' appended of the first try fails. +# +# Won't work for libc, unfortunately. OTOH, it isn't +# needed for libc since this is already mapped into the current +# process (?) +# +# On MAC OSX, it won't work either, because dlopen() needs a full path, +# and the default suffix is either none or '.dylib'. + at unittest.skip('test disabled') + at unittest.skipUnless(os.name=="posix" and sys.platform != "darwin", + 'test not suitable for this platform') +class LoadLibs(unittest.TestCase): + def test_libm(self): + import math + libm = cdll.libm + sqrt = libm.sqrt + sqrt.argtypes = (c_double,) + sqrt.restype = c_double + self.assertEqual(sqrt(2), math.sqrt(2)) if __name__ == "__main__": unittest.main() diff --git a/lib-python/2.7/ctypes/test/test_frombuffer.py b/lib-python/2.7/ctypes/test/test_frombuffer.py --- a/lib-python/2.7/ctypes/test/test_frombuffer.py +++ b/lib-python/2.7/ctypes/test/test_frombuffer.py @@ -25,7 +25,7 @@ a[0], a[-1] = 200, -200 self.assertEqual(x[:], a.tolist()) - self.assertTrue(a in x._objects.values()) + self.assertIn(a, x._objects.values()) self.assertRaises(ValueError, c_int.from_buffer, a, -1) diff --git a/lib-python/2.7/ctypes/test/test_funcptr.py b/lib-python/2.7/ctypes/test/test_funcptr.py --- a/lib-python/2.7/ctypes/test/test_funcptr.py +++ b/lib-python/2.7/ctypes/test/test_funcptr.py @@ -75,7 +75,7 @@ ## "lpfnWndProc", WNDPROC_2(wndproc)) # instead: - self.assertTrue(WNDPROC is WNDPROC_2) + self.assertIs(WNDPROC, WNDPROC_2) # 'wndclass.lpfnWndProc' leaks 94 references. Why? self.assertEqual(wndclass.lpfnWndProc(1, 2, 3, 4), 10) diff --git a/lib-python/2.7/ctypes/test/test_functions.py b/lib-python/2.7/ctypes/test/test_functions.py --- a/lib-python/2.7/ctypes/test/test_functions.py +++ b/lib-python/2.7/ctypes/test/test_functions.py @@ -6,6 +6,7 @@ """ from ctypes import * +from ctypes.test import need_symbol import sys, unittest from ctypes.test import xfail from test.test_support import impl_detail @@ -65,22 +66,16 @@ pass + @need_symbol('c_wchar') def test_wchar_parm(self): - try: - c_wchar - except NameError: - return f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double] result = f(1, u"x", 3, 4, 5.0, 6.0) self.assertEqual(result, 139) self.assertEqual(type(result), int) + @need_symbol('c_wchar') def test_wchar_result(self): - try: - c_wchar - except NameError: - return f = dll._testfunc_i_bhilfd f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double] f.restype = c_wchar @@ -158,11 +153,8 @@ self.assertEqual(result, -21) self.assertEqual(type(result), float) + @need_symbol('c_longlong') def test_longlongresult(self): - try: - c_longlong - except NameError: - return f = dll._testfunc_q_bhilfd f.restype = c_longlong f.argtypes = [c_byte, c_short, c_int, c_long, c_float, c_double] @@ -299,6 +291,7 @@ result = f(-10, cb) self.assertEqual(result, -18) + @need_symbol('c_longlong') def test_longlong_callbacks(self): f = dll._testfunc_callback_q_qf @@ -309,7 +302,7 @@ f.argtypes = [c_longlong, MyCallback] def callback(value): - self.assertTrue(isinstance(value, (int, long))) + self.assertIsInstance(value, (int, long)) return value & 0x7FFFFFFF cb = MyCallback(callback) @@ -351,16 +344,16 @@ s2h = dll.ret_2h_func(inp) self.assertEqual((s2h.x, s2h.y), (99*2, 88*3)) - if sys.platform == "win32": - def test_struct_return_2H_stdcall(self): - class S2H(Structure): - _fields_ = [("x", c_short), - ("y", c_short)] + @unittest.skipUnless(sys.platform == "win32", 'Windows-specific test') + def test_struct_return_2H_stdcall(self): + class S2H(Structure): + _fields_ = [("x", c_short), + ("y", c_short)] - windll.s_ret_2h_func.restype = S2H - windll.s_ret_2h_func.argtypes = [S2H] - s2h = windll.s_ret_2h_func(S2H(99, 88)) - self.assertEqual((s2h.x, s2h.y), (99*2, 88*3)) + windll.s_ret_2h_func.restype = S2H + windll.s_ret_2h_func.argtypes = [S2H] + s2h = windll.s_ret_2h_func(S2H(99, 88)) + self.assertEqual((s2h.x, s2h.y), (99*2, 88*3)) def test_struct_return_8H(self): class S8I(Structure): From noreply at buildbot.pypy.org Sun Aug 31 15:40:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 15:40:56 +0200 (CEST) Subject: [pypy-commit] pypy default: Move out parts of these conversion functions that depend on "long long", Message-ID: <20140831134056.C26211C06D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73228:89666ce8368c Date: 2014-08-31 15:40 +0200 http://bitbucket.org/pypy/pypy/changeset/89666ce8368c/ Log: Move out parts of these conversion functions that depend on "long long", for the ARM JIT. diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -182,8 +182,12 @@ value = misc.read_raw_long_data(cdata, self.size) return self.space.wrap(value) else: - value = misc.read_raw_signed_data(cdata, self.size) - return self.space.wrap(value) # r_longlong => on 32-bit, 'long' + return self._convert_to_object_longlong(cdata) + + def _convert_to_object_longlong(self, cdata): + # in its own function: LONGLONG may make the whole function jit-opaque + value = misc.read_raw_signed_data(cdata, self.size) + return self.space.wrap(value) # r_longlong => on 32-bit, 'long' def convert_from_object(self, cdata, w_ob): if self.value_fits_long: @@ -193,8 +197,12 @@ self._overflow(w_ob) misc.write_raw_signed_data(cdata, value, self.size) else: - value = misc.as_long_long(self.space, w_ob) - misc.write_raw_signed_data(cdata, value, self.size) + self._convert_from_object_longlong(cdata, w_ob) + + def _convert_from_object_longlong(self, cdata, w_ob): + # in its own function: LONGLONG may make the whole function jit-opaque + value = misc.as_long_long(self.space, w_ob) + misc.write_raw_signed_data(cdata, value, self.size) def get_vararg_type(self): if self.size < rffi.sizeof(rffi.INT): @@ -264,8 +272,12 @@ self._overflow(w_ob) misc.write_raw_unsigned_data(cdata, value, self.size) else: - value = misc.as_unsigned_long_long(self.space, w_ob, strict=True) - misc.write_raw_unsigned_data(cdata, value, self.size) + self._convert_from_object_longlong(cdata, w_ob) + + def _convert_from_object_longlong(self, cdata, w_ob): + # in its own function: LONGLONG may make the whole function jit-opaque + value = misc.as_unsigned_long_long(self.space, w_ob, strict=True) + misc.write_raw_unsigned_data(cdata, value, self.size) def convert_to_object(self, cdata): if self.value_fits_ulong: @@ -275,8 +287,12 @@ else: return self.space.wrap(value) # r_uint => 'long' object else: - value = misc.read_raw_unsigned_data(cdata, self.size) - return self.space.wrap(value) # r_ulonglong => 'long' object + return self._convert_to_object_longlong(cdata) + + def _convert_to_object_longlong(self, cdata): + # in its own function: LONGLONG may make the whole function jit-opaque + value = misc.read_raw_unsigned_data(cdata, self.size) + return self.space.wrap(value) # r_ulonglong => 'long' object def get_vararg_type(self): if self.size < rffi.sizeof(rffi.INT): From noreply at buildbot.pypy.org Sun Aug 31 15:54:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 15:54:19 +0200 (CEST) Subject: [pypy-commit] pypy default: This attribute 'fields_list' is a quasi-immutable field pointing Message-ID: <20140831135419.EB9011C06D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73229:4dd401ace491 Date: 2014-08-31 15:53 +0200 http://bitbucket.org/pypy/pypy/changeset/4dd401ace491/ Log: This attribute 'fields_list' is a quasi-immutable field pointing to a really immutable list. Interestingly enough, we have support for that. diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -17,7 +17,7 @@ class W_CTypeStructOrUnion(W_CType): - _immutable_fields_ = ['alignment?', 'fields_list?', 'fields_dict?', + _immutable_fields_ = ['alignment?', 'fields_list?[*]', 'fields_dict?', 'custom_field_pos?', 'with_var_array?'] # fields added by complete_struct_or_union(): alignment = -1 diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -389,7 +389,7 @@ w_ctype.size = totalsize w_ctype.alignment = totalalignment - w_ctype.fields_list = fields_list + w_ctype.fields_list = fields_list[:] w_ctype.fields_dict = fields_dict w_ctype.custom_field_pos = custom_field_pos w_ctype.with_var_array = with_var_array From noreply at buildbot.pypy.org Sun Aug 31 16:10:50 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 31 Aug 2014 16:10:50 +0200 (CEST) Subject: [pypy-commit] pypy trace-limit-hack: close to be merged branch Message-ID: <20140831141050.0A9B41C1036@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: trace-limit-hack Changeset: r73230:cbf098b92edc Date: 2014-08-31 08:09 -0600 http://bitbucket.org/pypy/pypy/changeset/cbf098b92edc/ Log: close to be merged branch From noreply at buildbot.pypy.org Sun Aug 31 16:10:51 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 31 Aug 2014 16:10:51 +0200 (CEST) Subject: [pypy-commit] pypy default: (arigo, fijal) merge trace-limit-hack branch which adjusts the heuristics Message-ID: <20140831141051.4EF4D1C1036@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r73231:239c8dadefea Date: 2014-08-31 08:10 -0600 http://bitbucket.org/pypy/pypy/changeset/239c8dadefea/ Log: (arigo, fijal) merge trace-limit-hack branch which adjusts the heuristics how things are traced after ABORT_TOO_LONG. Now both the inner function and the outer loop should be traced "soon" after the abort. diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1991,6 +1991,10 @@ if greenkey_of_huge_function is not None: warmrunnerstate.disable_noninlinable_function( greenkey_of_huge_function) + if self.current_merge_points: + jd_sd = self.jitdriver_sd + greenkey = self.current_merge_points[0][0][:jd_sd.num_green_args] + warmrunnerstate.JitCell.trace_next_iteration(greenkey) raise SwitchToBlackhole(Counters.ABORT_TOO_LONG) def _interpret(self): diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -183,6 +183,9 @@ return token return None + def has_seen_a_procedure_token(self): + return self.wref_procedure_token is not None + def set_procedure_token(self, token, tmp=False): self.wref_procedure_token = self._makeref(token) if tmp: @@ -197,9 +200,14 @@ def should_remove_jitcell(self): if self.get_procedure_token() is not None: return False # don't remove JitCells with a procedure_token - # don't remove JitCells that are being traced, or JitCells with - # the "don't trace here" flag. Other JitCells can be removed. - return (self.flags & (JC_TRACING | JC_DONT_TRACE_HERE)) == 0 + if self.flags & JC_TRACING: + return False # don't remove JitCells that are being traced + if self.flags & JC_DONT_TRACE_HERE: + # if we have this flag, and we *had* a procedure_token but + # we no longer have one, then remove me. this prevents this + # JitCell from being immortal. + return self.has_seen_a_procedure_token() + return True # Other JitCells can be removed. # ____________________________________________________________ @@ -408,6 +416,12 @@ # machine code was already compiled for these greenargs procedure_token = cell.get_procedure_token() if procedure_token is None: + if cell.flags & JC_DONT_TRACE_HERE: + if not cell.has_seen_a_procedure_token(): + # we're seeing a fresh JC_DONT_TRACE_HERE with no + # procedure_token. Compile now. + bound_reached(hash, cell, *args) + return # it was an aborted compilation, or maybe a weakref that # has been freed jitcounter.cleanup_chain(hash) @@ -510,6 +524,12 @@ return JitCell.get_jitcell(*greenargs) @staticmethod + def trace_next_iteration(greenkey): + greenargs = unwrap_greenkey(greenkey) + hash = JitCell.get_uhash(*greenargs) + jitcounter.change_current_fraction(hash, 0.98) + + @staticmethod def ensure_jit_cell_at_key(greenkey): greenargs = unwrap_greenkey(greenkey) hash = JitCell.get_uhash(*greenargs) From noreply at buildbot.pypy.org Sun Aug 31 16:56:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 16:56:26 +0200 (CEST) Subject: [pypy-commit] pypy default: Move read_timestamp away from its own operation, and use a call with an Message-ID: <20140831145626.4EC7C1C31B7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73232:08e92f52e390 Date: 2014-08-31 16:56 +0200 http://bitbucket.org/pypy/pypy/changeset/08e92f52e390/ Log: Move read_timestamp away from its own operation, and use a call with an OS_xxx value instead. It is a bit simpler everywhere, and more importantly, it lets backends support this operation or not (in which case they are just assembled as CALLs). diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -1147,14 +1147,15 @@ emit_op_convert_longlong_bytes_to_float = gen_emit_unary_float_op( 'longlong_bytes_to_float', 'VMOV_cc') - def emit_op_read_timestamp(self, op, arglocs, regalloc, fcond): - assert 0, 'not supported' + """ disabled: missing an implementation that works in user mode + def ..._read_timestamp(...): tmp = arglocs[0] res = arglocs[1] self.mc.MRC(15, 0, tmp.value, 15, 12, 1) self.mc.MOV_ri(r.ip.value, 0) self.mc.VMOV_cr(res.value, tmp.value, r.ip.value) return fcond + """ def emit_op_cast_float_to_singlefloat(self, op, arglocs, regalloc, fcond): arg, res = arglocs diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -561,6 +561,8 @@ args = self.prepare_op_math_sqrt(op, fcond) self.perform_math(op, args, fcond) return + #if oopspecindex == EffectInfo.OS_MATH_READ_TIMESTAMP: + # ... return self._prepare_call(op) def _prepare_call(self, op, force_store=[], save_all_regs=False): @@ -1293,10 +1295,10 @@ prepare_op_convert_longlong_bytes_to_float = prepare_float_op(base=False, name='prepare_op_convert_longlong_bytes_to_float') - def prepare_op_read_timestamp(self, op, fcond): - loc = self.get_scratch_reg(INT) - res = self.vfprm.force_allocate_reg(op.result) - return [loc, res] + #def prepare_op_read_timestamp(self, op, fcond): + # loc = self.get_scratch_reg(INT) + # res = self.vfprm.force_allocate_reg(op.result) + # return [loc, res] def prepare_op_cast_float_to_singlefloat(self, op, fcond): loc1 = self.make_sure_var_in_reg(op.getarg(0)) diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -20,8 +20,7 @@ IS_64_BIT = False supports_floats = True - supports_longlong = False # XXX requires an implementation of - # read_timestamp that works in user mode + supports_longlong = True supports_singlefloats = True from rpython.jit.backend.arm.arch import JITFRAME_FIXED_SIZE diff --git a/rpython/jit/backend/arm/test/test_basic.py b/rpython/jit/backend/arm/test/test_basic.py --- a/rpython/jit/backend/arm/test/test_basic.py +++ b/rpython/jit/backend/arm/test/test_basic.py @@ -41,10 +41,6 @@ continue locals()[k] = lambda self: py.test.skip('requires longlong support') - def test_read_timestamp(self): - py.test.skip("The JIT on ARM does not support read_timestamp") - - if not CPU.supports_floats: for k in ('test_float', 'test_residual_external_call'): locals()[k] = lambda self: py.test.skip('requires float support') diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -15,7 +15,6 @@ from rpython.rlib.clibffi import FFI_DEFAULT_ABI from rpython.rlib.rarithmetic import ovfcheck, r_uint, r_ulonglong -from rpython.rlib.rtimer import read_timestamp class LLTrace(object): has_been_freed = False @@ -649,9 +648,6 @@ result_adr = llmemory.cast_ptr_to_adr(struct.typeptr) return heaptracker.adr2int(result_adr) - def bh_read_timestamp(self): - return read_timestamp() - def bh_new_raw_buffer(self, size): return lltype.malloc(rffi.CCHARP.TO, size, flavor='raw') diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -21,6 +21,9 @@ from rpython.jit.backend.llsupport import jitframe +IS_32_BIT = sys.maxint < 2**32 +IS_64_BIT = sys.maxint > 2**32 + def boxfloat(x): return BoxFloat(longlong.getfloatstorage(x)) @@ -1792,8 +1795,8 @@ c_nest, c_nest], 'void') def test_read_timestamp(self): - if not self.cpu.supports_longlong: - py.test.skip("longlong test") + if IS_32_BIT and not self.cpu.supports_longlong: + py.test.skip("read_timestamp returns a longlong") if sys.platform == 'win32': # windows quite often is very inexact (like the old Intel 8259 PIC), # so we stretch the time a little bit. @@ -1809,16 +1812,29 @@ else: def wait_a_bit(): pass + + from rpython.jit.codewriter.effectinfo import EffectInfo + from rpython.rlib import rtimer + + effectinfo = EffectInfo([], [], [], [], [], [], + EffectInfo.EF_CANNOT_RAISE, + EffectInfo.OS_MATH_READ_TIMESTAMP) + FPTR = self.Ptr(self.FuncType([], lltype.SignedLongLong)) + func_ptr = llhelper(FPTR, rtimer.read_timestamp) + FUNC = deref(FPTR) + funcbox = self.get_funcbox(self.cpu, func_ptr) + + calldescr = self.cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, effectinfo) if longlong.is_64_bit: - got1 = self.execute_operation(rop.READ_TIMESTAMP, [], 'int') + got1 = self.execute_operation(rop.CALL, [funcbox], 'int', calldescr) wait_a_bit() - got2 = self.execute_operation(rop.READ_TIMESTAMP, [], 'int') + got2 = self.execute_operation(rop.CALL, [funcbox], 'int', calldescr) res1 = got1.getint() res2 = got2.getint() else: - got1 = self.execute_operation(rop.READ_TIMESTAMP, [], 'float') + got1 = self.execute_operation(rop.CALL, [funcbox],'float',calldescr) wait_a_bit() - got2 = self.execute_operation(rop.READ_TIMESTAMP, [], 'float') + got2 = self.execute_operation(rop.CALL, [funcbox],'float',calldescr) res1 = got1.getlonglong() res2 = got2.getlonglong() assert res1 < res2 < res1 + 2**32 @@ -1941,7 +1957,7 @@ def test_convert_float_bytes(self): if not self.cpu.supports_floats: py.test.skip("requires floats") - if not self.cpu.supports_longlong: + if IS_32_BIT and not self.cpu.supports_longlong: py.test.skip("longlong test") t = 'int' if longlong.is_64_bit else 'float' res = self.execute_operation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG, @@ -2642,8 +2658,8 @@ (types.double, 12.3475226, rffi.DOUBLE), (types.float, r_singlefloat(-592.75), rffi.FLOAT), ]: - if sys.maxint < 2**32 and TP in (lltype.SignedLongLong, - lltype.UnsignedLongLong): + if IS_32_BIT and TP in (lltype.SignedLongLong, + lltype.UnsignedLongLong): if not cpu.supports_longlong: continue if TP == rffi.DOUBLE: @@ -2722,7 +2738,7 @@ (types.uint32, rffi.UINT), (types.sint32, rffi.INT), ] - if sys.maxint < 2**32 and cpu.supports_longlong: + if IS_32_BIT and cpu.supports_longlong: ALL_TYPES += [ (types.uint64, lltype.UnsignedLongLong), (types.sint64, lltype.SignedLongLong), @@ -3577,7 +3593,7 @@ "%r: got %r, expected %r" % (RESTYPE, res.value, expected)) def test_supports_longlong(self): - if sys.maxint > 2147483647: + if IS_64_BIT: assert not self.cpu.supports_longlong, ( "supports_longlong should be False on 64-bit platforms") diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1567,7 +1567,7 @@ else: assert 0, itemsize - def genop_read_timestamp(self, op, arglocs, resloc): + def genop_math_read_timestamp(self, op, arglocs, resloc): self.mc.RDTSC() if longlong.is_64_bit: self.mc.SHL_ri(edx.value, 32) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -801,6 +801,8 @@ return self._consider_get_errno(op) if oopspecindex == EffectInfo.OS_SET_ERRNO: return self._consider_set_errno(op) + if oopspecindex == EffectInfo.OS_MATH_READ_TIMESTAMP: + return self._consider_math_read_timestamp(op) self._consider_call(op) def consider_call_may_force(self, op, guard_op): @@ -1206,7 +1208,7 @@ else: raise AssertionError("bad unicode item size") - def consider_read_timestamp(self, op): + def _consider_math_read_timestamp(self, op): tmpbox_high = TempBox() self.rm.force_allocate_reg(tmpbox_high, selected_reg=eax) if longlong.is_64_bit: @@ -1214,7 +1216,7 @@ # result in rdx result_loc = self.rm.force_allocate_reg(op.result, selected_reg=edx) - self.perform(op, [], result_loc) + self.perform_math(op, [], result_loc) else: # on 32-bit, use both eax and edx as temporary registers, # use a temporary xmm register, and returns the result in @@ -1224,7 +1226,7 @@ xmmtmpbox = TempBox() xmmtmploc = self.xrm.force_allocate_reg(xmmtmpbox) result_loc = self.xrm.force_allocate_reg(op.result) - self.perform(op, [xmmtmploc], result_loc) + self.perform_math(op, [xmmtmploc], result_loc) self.xrm.possibly_free_var(xmmtmpbox) self.rm.possibly_free_var(tmpbox_low) self.rm.possibly_free_var(tmpbox_high) diff --git a/rpython/jit/codewriter/effectinfo.py b/rpython/jit/codewriter/effectinfo.py --- a/rpython/jit/codewriter/effectinfo.py +++ b/rpython/jit/codewriter/effectinfo.py @@ -79,6 +79,7 @@ OS_LLONG_U_TO_FLOAT = 94 # OS_MATH_SQRT = 100 + OS_MATH_READ_TIMESTAMP = 101 # OS_RAW_MALLOC_VARSIZE_CHAR = 110 OS_RAW_FREE = 111 diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -1910,6 +1910,12 @@ else: raise NotImplementedError(oopspec_name) + def rewrite_op_ll_read_timestamp(self, op): + op1 = self.prepare_builtin_call(op, "ll_read_timestamp", []) + return self.handle_residual_call(op1, + oopspecindex=EffectInfo.OS_MATH_READ_TIMESTAMP, + extraeffect=EffectInfo.EF_CANNOT_RAISE) + def rewrite_op_jit_force_quasi_immutable(self, op): v_inst, c_fieldname = op.args descr1 = self.cpu.fielddescrof(v_inst.concretetype.TO, diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -289,6 +289,10 @@ # (which is a residual call right now in the x86 backend) return llop.cast_float_to_uint(lltype.Unsigned, x) +def _ll_0_ll_read_timestamp(): + from rpython.rlib import rtimer + return rtimer.read_timestamp() + # math support # ------------ diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -7,7 +7,6 @@ from rpython.rlib.debug import ll_assert, make_sure_not_resized from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import intmask, LONG_BIT, r_uint, ovfcheck -from rpython.rlib.rtimer import read_timestamp from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper.lltypesystem import lltype, llmemory, rclass, rffi from rpython.rtyper.lltypesystem.lloperation import llop @@ -1382,10 +1381,6 @@ def bhimpl_copyunicodecontent(cpu, src, dst, srcstart, dststart, length): cpu.bh_copyunicodecontent(src, dst, srcstart, dststart, length) - @arguments(returns=LONGLONG_TYPECODE) - def bhimpl_ll_read_timestamp(): - return read_timestamp() - def _libffi_save_result(self, cif_description, exchange_buffer, result): ARRAY = lltype.Ptr(rffi.CArray(lltype.typeOf(result))) cast_int_to_ptr = self.cpu.cast_int_to_ptr diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -3,7 +3,6 @@ from rpython.rtyper.lltypesystem import lltype, rstr from rpython.rlib.rarithmetic import ovfcheck, r_longlong, is_valid_int -from rpython.rlib.rtimer import read_timestamp from rpython.rlib.unroll import unrolling_iterable from rpython.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat, check_descr from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID, AbstractDescr @@ -268,15 +267,6 @@ length = lengthbox.getint() rstr.copy_unicode_contents(src, dst, srcstart, dststart, length) -def do_read_timestamp(cpu, _): - x = read_timestamp() - if longlong.is_64_bit: - assert is_valid_int(x) # 64-bit - return BoxInt(x) - else: - assert isinstance(x, r_longlong) # 32-bit - return BoxFloat(x) - def do_keepalive(cpu, _, x): pass diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1232,10 +1232,6 @@ metainterp.history.record(rop.VIRTUAL_REF_FINISH, [vrefbox, nullbox], None) - @arguments() - def opimpl_ll_read_timestamp(self): - return self.metainterp.execute_and_record(rop.READ_TIMESTAMP, None) - @arguments("box", "box", "box") def _opimpl_libffi_save_result(self, box_cif_description, box_exchange_buffer, box_result): diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -489,7 +489,6 @@ '_MALLOC_LAST', 'FORCE_TOKEN/0', 'VIRTUAL_REF/2', # removed before it's passed to the backend - 'READ_TIMESTAMP/0', 'MARK_OPAQUE_PTR/1b', # this one has no *visible* side effect, since the virtualizable # must be forced, however we need to execute it anyway From noreply at buildbot.pypy.org Sun Aug 31 17:04:33 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 17:04:33 +0200 (CEST) Subject: [pypy-commit] pypy default: Update this comment Message-ID: <20140831150433.79FC11C31B7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73233:1eb05eb26ae6 Date: 2014-08-31 17:04 +0200 http://bitbucket.org/pypy/pypy/changeset/1eb05eb26ae6/ Log: Update this comment diff --git a/pypy/module/pypyjit/test_pypy_c/test_cprofile.py b/pypy/module/pypyjit/test_pypy_c/test_cprofile.py --- a/pypy/module/pypyjit/test_pypy_c/test_cprofile.py +++ b/pypy/module/pypyjit/test_pypy_c/test_cprofile.py @@ -26,9 +26,9 @@ for method in ['append', 'pop']: loop, = log.loops_by_id(method) print loop.ops_by_id(method) - # on 32-bit, there is f1=read_timestamp(); ...; - # f2=read_timestamp(); f3=call(llong_sub,f1,f2) - # which should turn into a single PADDQ/PSUBQ + # on 32-bit, there is f1=call(read_timestamp); ...; + # f2=call(read_timestamp); f3=call(llong_sub,f1,f2) + # but all calls can be special-cased by the backend if supported if sys.maxint != 2147483647: assert ' call(' not in repr(loop.ops_by_id(method)) assert ' call_may_force(' not in repr(loop.ops_by_id(method)) From noreply at buildbot.pypy.org Sun Aug 31 17:13:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 17:13:21 +0200 (CEST) Subject: [pypy-commit] pypy default: Avoid printing in the logs; use an unsigned output. Message-ID: <20140831151321.A63381C1036@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73234:e9f0c13de06b Date: 2014-08-31 17:07 +0200 http://bitbucket.org/pypy/pypy/changeset/e9f0c13de06b/ Log: Avoid printing in the logs; use an unsigned output. diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -4,6 +4,7 @@ from rpython.rlib.debug import (have_debug_prints, debug_start, debug_stop, debug_print) from rpython.rlib.objectmodel import we_are_translated, compute_unique_id +from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.lltypesystem import lltype, llmemory, rffi @@ -60,7 +61,7 @@ else: debug_start("jit-log-opt-bridge") debug_print("# bridge out of Guard", - "0x%x" % compute_unique_id(descr), + "0x%x" % r_uint(compute_unique_id(descr)), "with", len(operations), "ops") logops = self._log_operations(inputargs, operations, ops_offset) debug_stop("jit-log-opt-bridge") @@ -154,7 +155,7 @@ if op.getdescr() is not None: descr = op.getdescr() if is_guard and self.guard_number: - hash = compute_unique_id(descr) + hash = r_uint(compute_unique_id(descr)) r = "" % hash else: r = self.repr_of_descr(descr) From noreply at buildbot.pypy.org Sun Aug 31 17:27:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 17:27:50 +0200 (CEST) Subject: [pypy-commit] pypy default: longlong support in arm is mostly there, but incomplete so far Message-ID: <20140831152750.5949C1C34DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r73235:05af3fb5fc08 Date: 2014-08-31 17:27 +0200 http://bitbucket.org/pypy/pypy/changeset/05af3fb5fc08/ Log: longlong support in arm is mostly there, but incomplete so far diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -20,7 +20,7 @@ IS_64_BIT = False supports_floats = True - supports_longlong = True + supports_longlong = False # incomplete, notably in callbuilder.py supports_singlefloats = True from rpython.jit.backend.arm.arch import JITFRAME_FIXED_SIZE From noreply at buildbot.pypy.org Sun Aug 31 17:32:51 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 31 Aug 2014 17:32:51 +0200 (CEST) Subject: [pypy-commit] pypy gc_no_cleanup_nursery: The jitframe is no longer zeroed, can have garbage there Message-ID: <20140831153251.0C6E61C34DA@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: gc_no_cleanup_nursery Changeset: r73236:49185e690a83 Date: 2014-08-31 09:32 -0600 http://bitbucket.org/pypy/pypy/changeset/49185e690a83/ Log: The jitframe is no longer zeroed, can have garbage there diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -4191,9 +4191,6 @@ fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 23 assert self.cpu.get_int_value(deadframe, 0) == 42 - # make sure that force reads the registers from a zeroed piece of - # memory - assert values[0] == 0 def test_compile_bridge_while_running(self): def func(): From noreply at buildbot.pypy.org Sun Aug 31 18:35:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 18:35:51 +0200 (CEST) Subject: [pypy-commit] pypy arm-longlong: Branch to finish the longlong support on ARM Message-ID: <20140831163551.832581D392E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: arm-longlong Changeset: r73237:24d232b4809c Date: 2014-08-31 18:35 +0200 http://bitbucket.org/pypy/pypy/changeset/24d232b4809c/ Log: Branch to finish the longlong support on ARM From noreply at buildbot.pypy.org Sun Aug 31 18:44:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 18:44:36 +0200 (CEST) Subject: [pypy-commit] pypy arm-longlong: in-progress Message-ID: <20140831164436.166D61C06D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: arm-longlong Changeset: r73238:7befb5f6d6db Date: 2014-08-31 19:39 +0300 http://bitbucket.org/pypy/pypy/changeset/7befb5f6d6db/ Log: in-progress diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -316,10 +316,12 @@ float_regs = [] stack_args = [] singlefloats = None + longlong_mask = 0 arglocs = self.arglocs argtypes = self.argtypes + r_register_count = 0 count = 0 # stack alignment counter on_stack = 0 for i in range(len(arglocs)): @@ -327,23 +329,51 @@ if i < len(argtypes) and argtypes[i] == 'S': argtype = argtypes[i] arg = arglocs[i] + if arg.is_float(): - argtype = FLOAT - reg = self.get_next_vfp(argtype) - if reg: - assert len(float_regs) < len(r.vfp_argument_regs) - float_locs.append(arg) - assert reg not in float_regs - float_regs.append(reg) - else: # float argument that needs to go on the stack - if count % 2 != 0: - stack_args.append(None) - count = 0 - on_stack += 1 - stack_args.append(arg) - on_stack += 2 + if i < len(argtypes) and argtypes[i] == 'L': + # A longlong argument. It uses two regular argument + # positions, but aligned to an even number. This is + # a bit strange, but it is the case even for registers: + # it can be in r0-r1 or in r2-r3 but not in r1-r2. + assert arg.is_float() + if r_register_count == 0: + # will temporarily load the register into d8 + float_locs.append(arg) + float_regs.append(r.d8) + longlong_mask |= 1 + r_register_count = 2 + continue + elif r_register_count <= 2: + # will temporarily load the register into d9 + float_locs.append(arg) + float_regs.append(r.d9) + longlong_mask |= 2 + r_register_count = 4 + continue + else: + # A 64-bit float argument. Goes into the next free v# + # register, or if none, to the stack aligned to an + # even number of words. + argtype = FLOAT + reg = self.get_next_vfp(argtype) + if reg: + assert len(float_regs) < len(r.vfp_argument_regs) + float_locs.append(arg) + assert reg not in float_regs + float_regs.append(reg) + continue + # float or longlong argument that needs to go on the stack + if count % 2 != 0: + stack_args.append(None) + count = 0 + on_stack += 1 + stack_args.append(arg) + on_stack += 2 + elif argtype == 'S': - # Singlefloat argument + # Singlefloat (32-bit) argument. Goes into the next free + # v# register, or if none, to the stack in a single word. if singlefloats is None: singlefloats = [] tgt = self.get_next_vfp(argtype) @@ -355,19 +385,24 @@ on_stack += 1 stack_args.append(arg) else: - if len(non_float_regs) < len(r.argument_regs): - reg = r.argument_regs[len(non_float_regs)] + # Regular one-word argument. Goes into the next register + # free from the list r0, r1, r2, r3, or to the stack. + if r_register_count < len(r.argument_regs): + reg = r.argument_regs[r_register_count] + r_register_count += 1 non_float_locs.append(arg) non_float_regs.append(reg) else: # non-float argument that needs to go on the stack count += 1 on_stack += 1 stack_args.append(arg) + # align the stack if count % 2 != 0: stack_args.append(None) on_stack += 1 self._push_stack_args(stack_args, on_stack*WORD) + # Check that the address of the function we want to call is not # currently stored in one of the registers used to pass the arguments # or on the stack, which we can not access later @@ -377,6 +412,7 @@ non_float_locs.append(self.fnloc) non_float_regs.append(r.r4) self.fnloc = r.r4 + # remap values stored in vfp registers remap_frame_layout(self.asm, float_locs, float_regs, r.vfp_ip) if singlefloats: @@ -392,8 +428,14 @@ src = r.ip if src.is_core_reg(): self.mc.VMOV_cs(dest.value, src.value) + # remap values stored in core registers remap_frame_layout(self.asm, non_float_locs, non_float_regs, r.ip) + if longlong_mask & 1: + self.mc.FMRRD(r.r0.value, r.r1.value, r.d8.value) + if longlong_mask & 2: + self.mc.FMRRD(r.r2.value, r.r3.value, r.d9.value) + def load_result(self): resloc = self.resloc diff --git a/rpython/jit/backend/arm/codebuilder.py b/rpython/jit/backend/arm/codebuilder.py --- a/rpython/jit/backend/arm/codebuilder.py +++ b/rpython/jit/backend/arm/codebuilder.py @@ -339,6 +339,20 @@ MOD = binary_helper_call('int_mod') UDIV = binary_helper_call('uint_div') + def FMDRR(self, dm, rd, rn, c=cond.AL): + self.write32(c << 28 + | 0x0c400b10 + | (dm & 0xF) + | (rd & 0xF) << 12 + | (rn & 0xF) << 16) + + def FMRRD(self, rd, rn, dm, c=cond.AL): + self.write32(c << 28 + | 0x0c500b10 + | (dm & 0xF) + | (rd & 0xF) << 12 + | (rn & 0xF) << 16) + def _encode_reg_list(self, instr, regs): for reg in regs: instr |= 0x1 << reg diff --git a/rpython/jit/backend/arm/test/test_instr_codebuilder.py b/rpython/jit/backend/arm/test/test_instr_codebuilder.py --- a/rpython/jit/backend/arm/test/test_instr_codebuilder.py +++ b/rpython/jit/backend/arm/test/test_instr_codebuilder.py @@ -199,6 +199,14 @@ self.cb.DMB() self.assert_equal('DMB') + def test_fmdrr(self): + self.cb.FMDRR(r.d11.value, r.r9.value, r.r14.value) + self.assert_equal('FMDRR d11, r9, r14') + + def test_fmrrd(self): + self.cb.FMRRD(r.r9.value, r.r14.value, r.d11.value) + self.assert_equal('FMRRD r9, r14, d11') + def test_size_of_gen_load_int(): for v, n in [(5, 4), (6, 4), (7, 2)]: From noreply at buildbot.pypy.org Sun Aug 31 18:44:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 18:44:37 +0200 (CEST) Subject: [pypy-commit] pypy arm-longlong: hg merge default Message-ID: <20140831164437.46A0A1C06D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: arm-longlong Changeset: r73239:b7d61778b0c9 Date: 2014-08-31 19:40 +0300 http://bitbucket.org/pypy/pypy/changeset/b7d61778b0c9/ Log: hg merge default From noreply at buildbot.pypy.org Sun Aug 31 18:50:33 2014 From: noreply at buildbot.pypy.org (RonnyPfannschmidt) Date: Sun, 31 Aug 2014 18:50:33 +0200 (CEST) Subject: [pypy-commit] pypy refine-testrunner: merge from default Message-ID: <20140831165033.454221C06D8@cobra.cs.uni-duesseldorf.de> Author: Ronny Pfannschmidt Branch: refine-testrunner Changeset: r73240:a9df5830e867 Date: 2014-02-02 17:22 +0100 http://bitbucket.org/pypy/pypy/changeset/a9df5830e867/ Log: merge from default diff too long, truncating to 2000 out of 5591 lines diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -426,25 +426,12 @@ Could we use LLVM? ------------------ -In theory yes. But we tried to use it 5 or 6 times already, as a -translation backend or as a JIT backend --- and failed each time. +There is a (static) translation backend using LLVM in the branch +``llvm-translation-backend``. It can translate PyPy with or without the JIT on +Linux. -In more details: using LLVM as a (static) translation backend is -pointless nowadays because you can generate C code and compile it with -clang. (Note that compiling PyPy with clang gives a result that is not -faster than compiling it with gcc.) We might in theory get extra -benefits from LLVM's GC integration, but this requires more work on the -LLVM side before it would be remotely useful. Anyway, it could be -interfaced via a custom primitive in the C code. - -On the other hand, using LLVM as our JIT backend looks interesting as -well --- but again we made an attempt, and it failed: LLVM has no way to -patch the generated machine code. - -So the position of the core PyPy developers is that if anyone wants to -make an N+1'th attempt with LLVM, they are welcome, and will be happy to -provide help in the IRC channel, but they are left with the burden of proof -that (a) it works and (b) it gives important benefits. +Using LLVM as our JIT backend looks interesting as well -- we made an attempt, +but it failed: LLVM has no way to patch the generated machine code. ---------------------- How do I compile PyPy? diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -52,3 +52,8 @@ .. branch: annotator Remove FlowObjSpace. Improve cohesion between rpython.flowspace and rpython.annotator. + +.. branch: detect-immutable-fields +mapdicts keep track of whether or not an attribute is every assigned to +multiple times. If it's only assigned once then an elidable lookup is used when +possible. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -232,9 +232,8 @@ raise operationerrfmt(space.w_TypeError, msg, w_result) def ord(self, space): - typename = space.type(self).getname(space) - msg = "ord() expected string of length 1, but %s found" - raise operationerrfmt(space.w_TypeError, msg, typename) + msg = "ord() expected string of length 1, but %T found" + raise operationerrfmt(space.w_TypeError, msg, self) def __spacebind__(self, space): return self diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -6,7 +6,7 @@ from errno import EINTR from rpython.rlib import jit -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, specialize from pypy.interpreter import debug @@ -40,12 +40,11 @@ self.debug_excs = [] def clear(self, space): - # for sys.exc_clear() - self.w_type = space.w_None - self._w_value = space.w_None - self._application_traceback = None - if not we_are_translated(): - del self.debug_excs[:] + # XXX remove this method. The point is that we cannot always + # hack at 'self' to clear w_type and _w_value, because in some + # corner cases the OperationError will be used again: see + # test_interpreter.py:test_with_statement_and_sys_clear. + pass def match(self, space, w_check_class): "Check if this application-level exception matches 'w_check_class'." @@ -300,6 +299,10 @@ """ self._application_traceback = traceback + at specialize.memo() +def get_cleared_operation_error(space): + return OperationError(space.w_None, space.w_None) + # ____________________________________________________________ # optimization only: avoid the slowest operation -- the string # formatting with '%' -- in the common case were we don't @@ -371,8 +374,8 @@ class OpErrFmtNoArgs(OperationError): def __init__(self, w_type, value): + self._value = value self.setup(w_type) - self._value = value def get_w_value(self, space): w_value = self._w_value diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -1,5 +1,5 @@ import sys -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, get_cleared_operation_error from rpython.rlib.unroll import unrolling_iterable from rpython.rlib import jit @@ -217,6 +217,17 @@ if frame: # else, the exception goes nowhere and is lost frame.last_exception = operror + def clear_sys_exc_info(self): + # Find the frame out of which sys_exc_info() would return its result, + # and hack this frame's last_exception to become the cleared + # OperationError (which is different from None!). + frame = self.gettopframe_nohidden() + while frame: + if frame.last_exception is not None: + frame.last_exception = get_cleared_operation_error(self.space) + break + frame = self.getnextframe_nohidden(frame) + @jit.dont_look_inside def settrace(self, w_func): """Set the global trace function.""" diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -744,6 +744,9 @@ else: raise OperationError(space.w_TypeError, space.wrap("raise: no active exception to re-raise")) + if operror.w_type is space.w_None: + raise OperationError(space.w_TypeError, + space.wrap("raise: the exception to re-raise was cleared")) # re-raise, no new traceback obj will be attached self.last_exception = operror raise RaiseWithExplicitTraceback(operror) diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py --- a/pypy/interpreter/test/test_interpreter.py +++ b/pypy/interpreter/test/test_interpreter.py @@ -311,3 +311,73 @@ assert str(e) == "maximum recursion depth exceeded" else: assert 0, "should have raised!" + + def test_with_statement_and_sys_clear(self): + import sys + class CM(object): + def __enter__(self): + return self + def __exit__(self, exc_type, exc_value, tb): + sys.exc_clear() + try: + with CM(): + 1 / 0 + raise AssertionError("should not be reached") + except ZeroDivisionError: + pass + + def test_sys_clear_while_handling_exception(self): + import sys + def f(): + try: + some_missing_name + except NameError: + g() + assert sys.exc_info()[0] is NameError + def g(): + assert sys.exc_info()[0] is NameError + try: + 1 / 0 + except ZeroDivisionError: + assert sys.exc_info()[0] is ZeroDivisionError + sys.exc_clear() + assert sys.exc_info()[0] is None + h() + assert sys.exc_info()[0] is None + def h(): + assert sys.exc_info()[0] is None + f() + + def test_sys_clear_while_handling_exception_nested(self): + import sys + def f(): + try: + some_missing_name + except NameError: + g() + assert sys.exc_info()[0] is NameError + def g(): + assert sys.exc_info()[0] is NameError + try: + 1 / 0 + except ZeroDivisionError: + assert sys.exc_info()[0] is ZeroDivisionError + h1() + assert sys.exc_info()[0] is None + h() + assert sys.exc_info()[0] is None + def h(): + assert sys.exc_info()[0] is None + def h1(): + sys.exc_clear() + f() + + def test_sys_clear_reraise(self): + import sys + def f(): + try: + 1 / 0 + except ZeroDivisionError: + sys.exc_clear() + raise + raises(TypeError, f) diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -3,7 +3,7 @@ from rpython.rlib.objectmodel import we_are_translated from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.typeobject import MethodCache -from pypy.objspace.std.mapdict import IndexCache +from pypy.objspace.std.mapdict import MapAttrCache from rpython.rlib import rposix, rgc @@ -35,7 +35,7 @@ cache.misses = {} cache.hits = {} if space.config.objspace.std.withmapdict: - cache = space.fromcache(IndexCache) + cache = space.fromcache(MapAttrCache) cache.misses = {} cache.hits = {} @@ -45,7 +45,7 @@ in the mapdict cache with the given attribute name.""" assert space.config.objspace.std.withmethodcachecounter assert space.config.objspace.std.withmapdict - cache = space.fromcache(IndexCache) + cache = space.fromcache(MapAttrCache) return space.newtuple([space.newint(cache.hits.get(name, 0)), space.newint(cache.misses.get(name, 0))]) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -400,16 +400,16 @@ '_PyObject_CallFunction_SizeT', '_PyObject_CallMethod_SizeT', 'PyBuffer_FromMemory', 'PyBuffer_FromReadWriteMemory', 'PyBuffer_FromObject', - 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', '_Py_init_bufferobject', + 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', '_Py_get_buffer_type', 'PyCObject_FromVoidPtr', 'PyCObject_FromVoidPtrAndDesc', 'PyCObject_AsVoidPtr', 'PyCObject_GetDesc', 'PyCObject_Import', 'PyCObject_SetVoidPtr', - 'PyCObject_Type', '_Py_init_pycobject', + 'PyCObject_Type', '_Py_get_cobject_type', 'PyCapsule_New', 'PyCapsule_IsValid', 'PyCapsule_GetPointer', 'PyCapsule_GetName', 'PyCapsule_GetDestructor', 'PyCapsule_GetContext', 'PyCapsule_SetPointer', 'PyCapsule_SetName', 'PyCapsule_SetDestructor', - 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', '_Py_init_capsule', + 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', '_Py_get_capsule_type', 'PyObject_AsReadBuffer', 'PyObject_AsWriteBuffer', 'PyObject_CheckReadBuffer', @@ -691,17 +691,25 @@ prefix = 'PyPy' else: prefix = 'cpyexttest' - init_buffer = rffi.llexternal('_%s_init_bufferobject' % prefix, [], lltype.Void, - compilation_info=eci, releasegil=False) - init_pycobject = rffi.llexternal('_%s_init_pycobject' % prefix, [], lltype.Void, - compilation_info=eci, releasegil=False) - init_capsule = rffi.llexternal('_%s_init_capsule' % prefix, [], lltype.Void, - compilation_info=eci, releasegil=False) - INIT_FUNCTIONS.extend([ - lambda space: init_buffer(), - lambda space: init_pycobject(), - lambda space: init_capsule(), - ]) + # jump through hoops to avoid releasing the GIL during initialization + # of the cpyext module. The C functions are called with no wrapper, + # but must not do anything like calling back PyType_Ready(). We + # use them just to get a pointer to the PyTypeObjects defined in C. + get_buffer_type = rffi.llexternal('_%s_get_buffer_type' % prefix, + [], PyTypeObjectPtr, + compilation_info=eci, _nowrapper=True) + get_cobject_type = rffi.llexternal('_%s_get_cobject_type' % prefix, + [], PyTypeObjectPtr, + compilation_info=eci, _nowrapper=True) + get_capsule_type = rffi.llexternal('_%s_get_capsule_type' % prefix, + [], PyTypeObjectPtr, + compilation_info=eci, _nowrapper=True) + def init_types(space): + from pypy.module.cpyext.typeobject import py_type_ready + py_type_ready(space, get_buffer_type()) + py_type_ready(space, get_cobject_type()) + py_type_ready(space, get_capsule_type()) + INIT_FUNCTIONS.append(init_types) from pypy.module.posix.interp_posix import add_fork_hook reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void, compilation_info=eci) diff --git a/pypy/module/cpyext/include/pyconfig.h b/pypy/module/cpyext/include/pyconfig.h --- a/pypy/module/cpyext/include/pyconfig.h +++ b/pypy/module/cpyext/include/pyconfig.h @@ -15,6 +15,8 @@ #define HAVE_UNICODE #define WITHOUT_COMPLEX #define HAVE_WCHAR_H 1 +#define HAVE_SYS_TYPES_H 1 +#define HAVE_SYS_STAT_H 1 /* PyPy supposes Py_UNICODE == wchar_t */ #define HAVE_USABLE_WCHAR_T 1 diff --git a/pypy/module/cpyext/include/pyport.h b/pypy/module/cpyext/include/pyport.h --- a/pypy/module/cpyext/include/pyport.h +++ b/pypy/module/cpyext/include/pyport.h @@ -64,4 +64,45 @@ # error "Python needs a typedef for Py_uintptr_t in pyport.h." #endif /* HAVE_UINTPTR_T */ +/******************************* + * stat() and fstat() fiddling * + *******************************/ + +/* We expect that stat and fstat exist on most systems. + * It's confirmed on Unix, Mac and Windows. + * If you don't have them, add + * #define DONT_HAVE_STAT + * and/or + * #define DONT_HAVE_FSTAT + * to your pyconfig.h. Python code beyond this should check HAVE_STAT and + * HAVE_FSTAT instead. + * Also + * #define HAVE_SYS_STAT_H + * if exists on your platform, and + * #define HAVE_STAT_H + * if does. + */ +#ifndef DONT_HAVE_STAT +#define HAVE_STAT +#endif + +#ifndef DONT_HAVE_FSTAT +#define HAVE_FSTAT +#endif + +#ifdef RISCOS +#include +#include "unixstuff.h" +#endif + +#ifdef HAVE_SYS_STAT_H +#if defined(PYOS_OS2) && defined(PYCC_GCC) +#include +#endif +#include +#elif defined(HAVE_STAT_H) +#include +#else +#endif + #endif /* Py_PYPORT_H */ diff --git a/pypy/module/cpyext/src/bufferobject.c b/pypy/module/cpyext/src/bufferobject.c --- a/pypy/module/cpyext/src/bufferobject.c +++ b/pypy/module/cpyext/src/bufferobject.c @@ -783,9 +783,9 @@ return size; } -void _Py_init_bufferobject(void) +PyTypeObject *_Py_get_buffer_type(void) { - PyType_Ready(&PyBuffer_Type); + return &PyBuffer_Type; } static PySequenceMethods buffer_as_sequence = { diff --git a/pypy/module/cpyext/src/capsule.c b/pypy/module/cpyext/src/capsule.c --- a/pypy/module/cpyext/src/capsule.c +++ b/pypy/module/cpyext/src/capsule.c @@ -321,8 +321,7 @@ PyCapsule_Type__doc__ /*tp_doc*/ }; -void _Py_init_capsule() +PyTypeObject *_Py_get_capsule_type(void) { - PyType_Ready(&PyCapsule_Type); + return &PyCapsule_Type; } - diff --git a/pypy/module/cpyext/src/cobject.c b/pypy/module/cpyext/src/cobject.c --- a/pypy/module/cpyext/src/cobject.c +++ b/pypy/module/cpyext/src/cobject.c @@ -156,7 +156,7 @@ PyCObject_Type__doc__ /*tp_doc*/ }; -void _Py_init_pycobject() +PyTypeObject *_Py_get_cobject_type(void) { - PyType_Ready(&PyCObject_Type); + return &PyCObject_Type; } diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -549,11 +549,14 @@ pto.c_tp_flags |= Py_TPFLAGS_READY return pto +def py_type_ready(space, pto): + if pto.c_tp_flags & Py_TPFLAGS_READY: + return + type_realize(space, rffi.cast(PyObject, pto)) + @cpython_api([PyTypeObjectPtr], rffi.INT_real, error=-1) def PyType_Ready(space, pto): - if pto.c_tp_flags & Py_TPFLAGS_READY: - return 0 - type_realize(space, rffi.cast(PyObject, pto)) + py_type_ready(space, pto) return 0 def type_realize(space, py_obj): diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py --- a/pypy/module/gc/interp_gc.py +++ b/pypy/module/gc/interp_gc.py @@ -12,8 +12,8 @@ cache = space.fromcache(MethodCache) cache.clear() if space.config.objspace.std.withmapdict: - from pypy.objspace.std.mapdict import IndexCache - cache = space.fromcache(IndexCache) + from pypy.objspace.std.mapdict import MapAttrCache + cache = space.fromcache(MapAttrCache) cache.clear() rgc.collect() return space.wrap(0) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -394,6 +394,9 @@ class W_Float64Box(W_FloatingBox, PrimitiveBox): descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float64") + def descr_as_integer_ratio(self, space): + return space.call_method(self.item(space), 'as_integer_ratio') + class W_ComplexFloatingBox(W_InexactBox): def descr_get_real(self, space): dtype = self._COMPONENTS_BOX._get_dtype(space) @@ -719,6 +722,7 @@ __module__ = "numpy", __new__ = interp2app(W_Float64Box.descr__new__.im_func), __reduce__ = interp2app(W_Float64Box.descr_reduce), + as_integer_ratio = interp2app(W_Float64Box.descr_as_integer_ratio), ) W_ComplexFloatingBox.typedef = TypeDef("complexfloating", W_InexactBox.typedef, diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -903,8 +903,8 @@ w_res = self.descr_mul(space, other) assert isinstance(w_res, W_NDimArray) return w_res.descr_sum(space, space.wrap(-1), out) - dtype = interp_ufuncs.find_binop_result_dtype(space, - self.get_dtype(), other.get_dtype()) + dtype = interp_ufuncs.find_binop_result_dtype(space, self.get_dtype(), + other.get_dtype()) if self.get_size() < 1 and other.get_size() < 1: # numpy compatability return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) @@ -912,25 +912,27 @@ out_shape, other_critical_dim = _match_dot_shapes(space, self, other) if out: matches = True - if len(out.get_shape()) != len(out_shape): + if dtype != out.get_dtype(): + matches = False + elif not out.implementation.order == "C": + matches = False + elif len(out.get_shape()) != len(out_shape): matches = False else: for i in range(len(out_shape)): if out.get_shape()[i] != out_shape[i]: matches = False break - if dtype != out.get_dtype(): - matches = False - if not out.implementation.order == "C": - matches = False if not matches: raise OperationError(space.w_ValueError, space.wrap( - 'output array is not acceptable (must have the right type, nr dimensions, and be a C-Array)')) + 'output array is not acceptable (must have the right type, ' + 'nr dimensions, and be a C-Array)')) w_res = out + w_res.fill(space, self.get_dtype().coerce(space, None)) else: w_res = W_NDimArray.from_shape(space, out_shape, dtype, w_instance=self) # This is the place to add fpypy and blas - return loop.multidim_dot(space, self, other, w_res, dtype, + return loop.multidim_dot(space, self, other, w_res, dtype, other_critical_dim) def descr_mean(self, space, __args__): diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -254,6 +254,13 @@ return out return res + def descr_outer(self, space, __args__): + return self._outer(space, __args__) + + def _outer(self, space, __args__): + raise OperationError(space.w_ValueError, + space.wrap("outer product only supported for binary functions")) + class W_Ufunc1(W_Ufunc): _immutable_fields_ = ["func", "bool_result"] argcount = 1 @@ -432,6 +439,7 @@ nin = interp_attrproperty("argcount", cls=W_Ufunc), reduce = interp2app(W_Ufunc.descr_reduce), + outer = interp2app(W_Ufunc.descr_outer), ) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -146,8 +146,7 @@ while not obj_iter.done(): reduce_driver.jit_merge_point(shapelen=shapelen, func=func, done_func=done_func, - calc_dtype=calc_dtype, - ) + calc_dtype=calc_dtype) rval = obj_iter.getitem().convert_to(space, calc_dtype) if done_func is not None and done_func(calc_dtype, rval): return rval @@ -172,8 +171,7 @@ shapelen = len(obj.get_shape()) while not obj_iter.done(): reduce_cum_driver.jit_merge_point(shapelen=shapelen, func=func, - dtype=calc_dtype, - ) + dtype=calc_dtype) rval = obj_iter.getitem().convert_to(space, calc_dtype) cur_value = func(calc_dtype, cur_value, rval) out_iter.setitem(cur_value) @@ -271,8 +269,7 @@ iter.next() shapelen = len(arr.get_shape()) while not iter.done(): - arg_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, - ) + arg_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) w_val = iter.getitem() new_best = getattr(dtype.itemtype, op_name)(cur_best, w_val) if dtype.itemtype.ne(new_best, cur_best): @@ -311,6 +308,7 @@ if i != right_critical_dim] right_skip = range(len(left_shape) - 1) result_skip = [len(result.get_shape()) - (len(right_shape) > 1)] + assert result.get_dtype() == dtype outi = result.create_dot_iter(broadcast_shape, result_skip) lefti = left.create_dot_iter(broadcast_shape, left_skip) righti = right.create_dot_iter(broadcast_shape, right_skip) @@ -318,10 +316,10 @@ dot_driver.jit_merge_point(dtype=dtype) lval = lefti.getitem().convert_to(space, dtype) rval = righti.getitem().convert_to(space, dtype) - outval = outi.getitem().convert_to(space, dtype) + outval = outi.getitem() v = dtype.itemtype.mul(lval, rval) - value = dtype.itemtype.add(v, outval).convert_to(space, dtype) - outi.setitem(value) + v = dtype.itemtype.add(v, outval) + outi.setitem(v) outi.next() righti.next() lefti.next() @@ -652,8 +650,8 @@ out_iter = out.create_iter(shape) while not arr_iter.done(): round_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_v = dtype.itemtype.round(arr_iter.getitem().convert_to(space, dtype), - decimals) + w_v = arr_iter.getitem().convert_to(space, dtype) + w_v = dtype.itemtype.round(w_v, decimals) out_iter.setitem(w_v) arr_iter.next() out_iter.next() diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -56,6 +56,10 @@ b = arange(12).reshape(4, 3) c = a.dot(b) assert (c == [[ 42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() + c = a.dot(b.astype(float)) + assert (c == [[ 42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() + c = a.astype(float).dot(b) + assert (c == [[ 42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() a = arange(24).reshape(2, 3, 4) raises(ValueError, "a.dot(a)") @@ -91,9 +95,11 @@ out = arange(9).reshape(3, 3) c = dot(a, b, out=out) assert (c == out).all() - out = arange(9,dtype=float).reshape(3, 3) + assert (c == [[42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() + out = arange(9, dtype=float).reshape(3, 3) exc = raises(ValueError, dot, a, b, out) - assert exc.value[0].find('not acceptable') > 0 + assert exc.value[0] == ('output array is not acceptable (must have the ' + 'right type, nr dimensions, and be a C-Array)') def test_choose_basic(self): from numpypy import array diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -181,6 +181,11 @@ s = np.dtype([('a', 'int64'), ('b', 'int64')]).type('a' * 16) assert s.view('S16') == 'a' * 16 + def test_as_integer_ratio(self): + import numpy as np + raises(AttributeError, 'np.float32(1.5).as_integer_ratio()') + assert np.float64(1.5).as_integer_ratio() == (3, 2) + def test_complex_scalar_complex_cast(self): import numpy as np for tp in [np.csingle, np.cdouble, np.clongdouble]: diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -1052,3 +1052,9 @@ np.array([0, -1, -3, -6, -10])).all() assert (np.divide.accumulate(todivide) == np.array([2., 4., 16.])).all() + + def test_outer(self): + import numpy as np + from numpypy import absolute + exc = raises(ValueError, np.absolute.outer, [-1, -2]) + assert exc.value[0] == 'outer product only supported for binary functions' diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -7,9 +7,9 @@ from rpython.rlib.test.test_clibffi import get_libm_name def main(libm_name): try: - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types except ImportError: - sys.stderr.write('SKIP: cannot import _ffi\n') + sys.stderr.write('SKIP: cannot import _rawffi.alt\n') return 0 libm = CDLL(libm_name) @@ -45,9 +45,9 @@ from rpython.rlib.test.test_clibffi import get_libm_name def main(libm_name): try: - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types except ImportError: - sys.stderr.write('SKIP: cannot import _ffi\n') + sys.stderr.write('SKIP: cannot import _rawffi.alt\n') return 0 libm = CDLL(libm_name) @@ -82,12 +82,12 @@ from threading import Thread # if os.name == 'nt': - from _ffi import WinDLL, types + from _rawffi.alt import WinDLL, types libc = WinDLL('Kernel32.dll') sleep = libc.getfunc('Sleep', [types.uint], types.uint) delays = [0]*n + [1000] else: - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libc = CDLL(libc_name) sleep = libc.getfunc('sleep', [types.uint], types.uint) delays = [0]*n + [1] @@ -144,7 +144,7 @@ def test__ffi_struct(self): def main(): - from _ffi import _StructDescr, Field, types + from _rawffi.alt import _StructDescr, Field, types fields = [ Field('x', types.slong), ] diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -35,7 +35,7 @@ class A(object): pass a = A() - a.x = 2 + a.x = 1 def main(n): i = 0 while i < n: @@ -49,8 +49,7 @@ i9 = int_lt(i5, i6) guard_true(i9, descr=...) guard_not_invalidated(descr=...) - i10 = int_add_ovf(i5, i7) - guard_no_overflow(descr=...) + i10 = int_add(i5, 1) --TICK-- jump(..., descr=...) """) diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -155,9 +155,7 @@ to exc_info() will return (None,None,None) until another exception is raised and caught in the current thread or the execution stack returns to a frame where another exception is being handled.""" - operror = space.getexecutioncontext().sys_exc_info() - if operror is not None: - operror.clear(space) + space.getexecutioncontext().clear_sys_exc_info() def settrace(space, w_func): """Set the global debug tracing function. It will be called on each diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -1,20 +1,21 @@ """The builtin bytearray implementation""" +from rpython.rlib.objectmodel import ( + import_from_mixin, newlist_hint, resizelist_hint) +from rpython.rlib.rstring import StringBuilder + from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.interpreter.signature import Signature from pypy.objspace.std.sliceobject import W_SliceObject from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods from pypy.objspace.std.util import get_positive_index -from rpython.rlib.objectmodel import newlist_hint, resizelist_hint, import_from_mixin -from rpython.rlib.rstring import StringBuilder +NON_HEX_MSG = "non-hexadecimal number found in fromhex() arg at position %d" -def _make_data(s): - return [s[i] for i in range(len(s))] class W_BytearrayObject(W_Root): import_from_mixin(StringMethods) @@ -23,7 +24,7 @@ w_self.data = data def __repr__(w_self): - """ representation for debugging purposes """ + """representation for debugging purposes""" return "%s(%s)" % (w_self.__class__.__name__, ''.join(w_self.data)) def _new(self, value): @@ -127,11 +128,6 @@ @staticmethod def descr_fromhex(space, w_bytearraytype, w_hexstring): - "bytearray.fromhex(string) -> bytearray\n" - "\n" - "Create a bytearray object from a string of hexadecimal numbers.\n" - "Spaces between two numbers are accepted.\n" - "Example: bytearray.fromhex('B9 01EF') -> bytearray(b'\\xb9\\x01\\xef')." hexstring = space.str_w(w_hexstring) hexstring = hexstring.lower() data = [] @@ -143,18 +139,15 @@ i += 1 if i >= length: break - if i+1 == length: - raise OperationError(space.w_ValueError, space.wrap( - "non-hexadecimal number found in fromhex() arg at position %d" % i)) + if i + 1 == length: + raise operationerrfmt(space.w_ValueError, NON_HEX_MSG, i) top = _hex_digit_to_int(hexstring[i]) if top == -1: - raise OperationError(space.w_ValueError, space.wrap( - "non-hexadecimal number found in fromhex() arg at position %d" % i)) + raise operationerrfmt(space.w_ValueError, NON_HEX_MSG, i) bot = _hex_digit_to_int(hexstring[i+1]) if bot == -1: - raise OperationError(space.w_ValueError, space.wrap( - "non-hexadecimal number found in fromhex() arg at position %d" % (i+1,))) + raise operationerrfmt(space.w_ValueError, NON_HEX_MSG, i + 1) data.append(chr(top*16 + bot)) # in CPython bytearray.fromhex is a staticmethod, so @@ -178,23 +171,25 @@ from pypy.objspace.std.unicodeobject import ( _get_encoding_and_errors, encode_object ) - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + encoding, errors = _get_encoding_and_errors(space, w_encoding, + w_errors) - # if w_source is an integer this correctly raises a TypeError - # the CPython error message is: "encoding or errors without a string argument" - # ours is: "expected unicode, got int object" + # if w_source is an integer this correctly raises a + # TypeError the CPython error message is: "encoding or + # errors without a string argument" ours is: "expected + # unicode, got int object" w_source = encode_object(space, w_source, encoding, errors) # Is it an int? try: count = space.int_w(w_source) - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_TypeError): raise else: if count < 0: - raise OperationError(space.w_ValueError, - space.wrap("bytearray negative count")) + raise operationerrfmt(space.w_ValueError, + "bytearray negative count") self.data = ['\0'] * count return @@ -224,8 +219,8 @@ elif not '\x20' <= c < '\x7f': n = ord(c) buf.append('\\x') - buf.append("0123456789abcdef"[n>>4]) - buf.append("0123456789abcdef"[n&0xF]) + buf.append("0123456789abcdef"[n >> 4]) + buf.append("0123456789abcdef"[n & 0xF]) else: buf.append(c) @@ -238,51 +233,60 @@ def descr_eq(self, space, w_other): try: - return space.newbool(self._val(space) == self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) == self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_ne(self, space, w_other): try: - return space.newbool(self._val(space) != self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) != self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_lt(self, space, w_other): try: - return space.newbool(self._val(space) < self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) < self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_le(self, space, w_other): try: - return space.newbool(self._val(space) <= self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) <= self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_gt(self, space, w_other): try: - return space.newbool(self._val(space) > self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) > self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) def descr_ge(self, space, w_other): try: - return space.newbool(self._val(space) >= self._op_val(space, w_other)) - except OperationError, e: + res = self._val(space) >= self._op_val(space, w_other) + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise + return space.newbool(res) + + def descr_iter(self, space): + return space.newseqiter(self) def descr_buffer(self, space): return BytearrayBuffer(self.data) @@ -297,7 +301,7 @@ def descr_inplace_mul(self, space, w_times): try: times = space.getindex_w(w_times, space.w_OverflowError) - except OperationError, e: + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise @@ -312,12 +316,13 @@ _setitem_slice_helper(space, self.data, start, step, slicelength, sequence2, empty_elem='\x00') else: - idx = space.getindex_w(w_index, space.w_IndexError, "bytearray index") + idx = space.getindex_w(w_index, space.w_IndexError, + "bytearray index") try: self.data[idx] = getbytevalue(space, w_other) except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("bytearray index out of range")) + raise operationerrfmt(space.w_IndexError, + "bytearray index out of range") def descr_delitem(self, space, w_idx): if isinstance(w_idx, W_SliceObject): @@ -325,12 +330,13 @@ len(self.data)) _delitem_slice_helper(space, self.data, start, step, slicelength) else: - idx = space.getindex_w(w_idx, space.w_IndexError, "bytearray index") + idx = space.getindex_w(w_idx, space.w_IndexError, + "bytearray index") try: del self.data[idx] except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("bytearray deletion index out of range")) + raise operationerrfmt(space.w_IndexError, + "bytearray deletion index out of range") def descr_append(self, space, w_item): self.data.append(getbytevalue(space, w_item)) @@ -357,10 +363,9 @@ result = self.data.pop(index) except IndexError: if not self.data: - raise OperationError(space.w_IndexError, space.wrap( - "pop from empty bytearray")) - raise OperationError(space.w_IndexError, space.wrap( - "pop index out of range")) + raise operationerrfmt(space.w_IndexError, + "pop from empty bytearray") + raise operationerrfmt(space.w_IndexError, "pop index out of range") return space.wrap(ord(result)) def descr_remove(self, space, w_char): @@ -368,27 +373,55 @@ try: self.data.remove(chr(char)) except ValueError: - raise OperationError(space.w_ValueError, space.wrap( - "value not found in bytearray")) + raise operationerrfmt(space.w_ValueError, + "value not found in bytearray") + + _StringMethods_descr_contains = descr_contains + def descr_contains(self, space, w_sub): + if space.isinstance_w(w_sub, space.w_int): + char = space.int_w(w_sub) + return _descr_contains_bytearray(self.data, space, char) + return self._StringMethods_descr_contains(space, w_sub) def descr_reverse(self, space): self.data.reverse() + +# ____________________________________________________________ +# helpers for slow paths, moved out because they contain loops + +def _make_data(s): + return [s[i] for i in range(len(s))] + + +def _descr_contains_bytearray(data, space, char): + if not 0 <= char < 256: + raise operationerrfmt(space.w_ValueError, + "byte must be in range(0, 256)") + for c in data: + if ord(c) == char: + return space.w_True + return space.w_False + +# ____________________________________________________________ + + def getbytevalue(space, w_value): if space.isinstance_w(w_value, space.w_str): string = space.str_w(w_value) if len(string) != 1: - raise OperationError(space.w_ValueError, space.wrap( - "string must be of size 1")) + raise operationerrfmt(space.w_ValueError, + "string must be of size 1") return string[0] value = space.getindex_w(w_value, None) if not 0 <= value < 256: # this includes the OverflowError in case the long is too large - raise OperationError(space.w_ValueError, space.wrap( - "byte must be in range(0, 256)")) + raise operationerrfmt(space.w_ValueError, + "byte must be in range(0, 256)") return chr(value) + def new_bytearray(space, w_bytearraytype, data): w_obj = space.allocate_instance(W_BytearrayObject, w_bytearraytype) W_BytearrayObject.__init__(w_obj, data) @@ -399,7 +432,7 @@ # String-like argument try: string = space.bufferstr_new_w(w_source) - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_TypeError): raise else: @@ -413,7 +446,7 @@ while True: try: w_item = space.next(w_iter) - except OperationError, e: + except OperationError as e: if not e.match(space, space.w_StopIteration): raise break @@ -424,6 +457,7 @@ resizelist_hint(data, extended) return data + def _hex_digit_to_int(d): val = ord(d) if 47 < val < 58: @@ -560,12 +594,12 @@ def decode(): """B.decode(encoding=None, errors='strict') -> unicode - Decode B using the codec registered for encoding. encoding defaults - to the default encoding. errors may be given to set a different error - handling scheme. Default is 'strict' meaning that encoding errors raise - a UnicodeDecodeError. Other possible values are 'ignore' and 'replace' - as well as any other name registered with codecs.register_error that is - able to handle UnicodeDecodeErrors. + Decode B using the codec registered for encoding. encoding defaults to + the default encoding. errors may be given to set a different error + handling scheme. Default is 'strict' meaning that encoding errors + raise a UnicodeDecodeError. Other possible values are 'ignore' and + 'replace' as well as any other name registered with + codecs.register_error that is able to handle UnicodeDecodeErrors. """ def endswith(): @@ -602,7 +636,7 @@ """ def fromhex(): - """bytearray.fromhex(string) -> bytearray (static method) + r"""bytearray.fromhex(string) -> bytearray (static method) Create a bytearray object from a string of hexadecimal numbers. Spaces between two numbers are accepted. @@ -884,6 +918,8 @@ __ge__ = interp2app(W_BytearrayObject.descr_ge, doc=BytearrayDocstrings.__ge__.__doc__), + __iter__ = interp2app(W_BytearrayObject.descr_iter, + doc=BytearrayDocstrings.__iter__.__doc__), __len__ = interp2app(W_BytearrayObject.descr_len, doc=BytearrayDocstrings.__len__.__doc__), __contains__ = interp2app(W_BytearrayObject.descr_contains, @@ -1024,9 +1060,10 @@ _space_chars = ''.join([chr(c) for c in [9, 10, 11, 12, 13, 32]]) -#XXX share the code again with the stuff in listobject.py + +# XXX share the code again with the stuff in listobject.py def _delitem_slice_helper(space, items, start, step, slicelength): - if slicelength==0: + if slicelength == 0: return if step < 0: @@ -1056,6 +1093,7 @@ assert start >= 0 # annotator hint del items[start:] + def _setitem_slice_helper(space, items, start, step, slicelength, sequence2, empty_elem): assert slicelength >= 0 diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -1,19 +1,23 @@ """The builtin str implementation""" +from rpython.rlib.jit import we_are_jitted +from rpython.rlib.objectmodel import ( + compute_hash, compute_unique_id, import_from_mixin) +from rpython.rlib.rstring import StringBuilder, replace + from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import StringBuffer from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault, interpindirect2app +from pypy.interpreter.gateway import ( + WrappedDefault, interp2app, interpindirect2app, unwrap_spec) from pypy.objspace.std import newformat from pypy.objspace.std.basestringtype import basestring_typedef from pypy.objspace.std.formatting import mod_format from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods -from pypy.objspace.std.unicodeobject import (unicode_from_string, - decode_object, unicode_from_encoded_object, _get_encoding_and_errors) -from rpython.rlib.jit import we_are_jitted -from rpython.rlib.objectmodel import compute_hash, compute_unique_id, import_from_mixin -from rpython.rlib.rstring import StringBuilder, replace +from pypy.objspace.std.unicodeobject import ( + _get_encoding_and_errors, decode_object, unicode_from_encoded_object, + unicode_from_string) class W_AbstractBytesObject(W_Root): @@ -184,8 +188,8 @@ def descr_format(self, space, __args__): """S.format(*args, **kwargs) -> string - Return a formatted version of S, using substitutions from args and kwargs. - The substitutions are identified by braces ('{' and '}'). + Return a formatted version of S, using substitutions from args and + kwargs. The substitutions are identified by braces ('{' and '}'). """ def descr_index(self, space, w_sub, w_start=None, w_end=None): @@ -319,8 +323,8 @@ """S.rpartition(sep) -> (head, sep, tail) Search for the separator sep in S, starting at the end of S, and return - the part before it, the separator itself, and the part after it. If the - separator is not found, return two empty strings and S. + the part before it, the separator itself, and the part after it. If + the separator is not found, return two empty strings and S. """ @unwrap_spec(maxsplit=int) @@ -432,7 +436,7 @@ self._value = str def __repr__(self): - """ representation for debugging purposes """ + """representation for debugging purposes""" return "%s(%r)" % (self.__class__.__name__, self._value) def unwrap(self, space): @@ -521,7 +525,7 @@ return space.newlist_bytes(lst) @staticmethod - @unwrap_spec(w_object = WrappedDefault("")) + @unwrap_spec(w_object=WrappedDefault("")) def descr_new(space, w_stringtype, w_object): # NB. the default value of w_object is really a *wrapped* empty string: # there is gateway magic at work @@ -624,7 +628,8 @@ _StringMethods_descr_add = descr_add def descr_add(self, space, w_other): if space.isinstance_w(w_other, space.w_unicode): - self_as_unicode = unicode_from_encoded_object(space, self, None, None) + self_as_unicode = unicode_from_encoded_object(space, self, None, + None) return space.add(self_as_unicode, w_other) elif space.isinstance_w(w_other, space.w_bytearray): # XXX: eliminate double-copy @@ -635,7 +640,7 @@ from pypy.objspace.std.strbufobject import W_StringBufferObject try: other = self._op_val(space, w_other) - except OperationError, e: + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise @@ -648,24 +653,32 @@ _StringMethods__startswith = _startswith def _startswith(self, space, value, w_prefix, start, end): if space.isinstance_w(w_prefix, space.w_unicode): - self_as_unicode = unicode_from_encoded_object(space, self, None, None) - return self_as_unicode._startswith(space, self_as_unicode._value, w_prefix, start, end) - return self._StringMethods__startswith(space, value, w_prefix, start, end) + self_as_unicode = unicode_from_encoded_object(space, self, None, + None) + return self_as_unicode._startswith(space, self_as_unicode._value, + w_prefix, start, end) + return self._StringMethods__startswith(space, value, w_prefix, start, + end) _StringMethods__endswith = _endswith def _endswith(self, space, value, w_suffix, start, end): if space.isinstance_w(w_suffix, space.w_unicode): - self_as_unicode = unicode_from_encoded_object(space, self, None, None) - return self_as_unicode._endswith(space, self_as_unicode._value, w_suffix, start, end) - return self._StringMethods__endswith(space, value, w_suffix, start, end) + self_as_unicode = unicode_from_encoded_object(space, self, None, + None) + return self_as_unicode._endswith(space, self_as_unicode._value, + w_suffix, start, end) + return self._StringMethods__endswith(space, value, w_suffix, start, + end) _StringMethods_descr_contains = descr_contains def descr_contains(self, space, w_sub): if space.isinstance_w(w_sub, space.w_unicode): from pypy.objspace.std.unicodeobject import W_UnicodeObject assert isinstance(w_sub, W_UnicodeObject) - self_as_unicode = unicode_from_encoded_object(space, self, None, None) - return space.newbool(self_as_unicode._value.find(w_sub._value) >= 0) + self_as_unicode = unicode_from_encoded_object(space, self, None, + None) + return space.newbool( + self_as_unicode._value.find(w_sub._value) >= 0) return self._StringMethods_descr_contains(space, w_sub) _StringMethods_descr_replace = descr_replace @@ -685,16 +698,19 @@ try: res = replace(input, sub, by, count) except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("replace string is too long")) + raise operationerrfmt(space.w_OverflowError, + "replace string is too long") return self_as_uni._new(res) return self._StringMethods_descr_replace(space, w_old, w_new, count) - def descr_lower(self, space): - return W_BytesObject(self._value.lower()) - - def descr_upper(self, space): - return W_BytesObject(self._value.upper()) + _StringMethods_descr_join = descr_join + def descr_join(self, space, w_list): + l = space.listview_bytes(w_list) + if l is not None: + if len(l) == 1: + return space.wrap(l[0]) + return space.wrap(self._val(space).join(l)) + return self._StringMethods_descr_join(space, w_list) def _join_return_one(self, space, w_obj): return (space.is_w(space.type(w_obj), space.w_str) or @@ -714,6 +730,12 @@ w_u = space.call_function(space.w_unicode, self) return space.call_method(w_u, "join", w_list) + def descr_lower(self, space): + return W_BytesObject(self._value.lower()) + + def descr_upper(self, space): + return W_BytesObject(self._value.upper()) + def descr_formatter_parser(self, space): from pypy.objspace.std.newformat import str_template_formatter tformat = str_template_formatter(space, space.str_w(self)) @@ -751,6 +773,7 @@ return W_BytesObject.EMPTY return W_BytesObject(s) + def wrapchar(space, c): if space.config.objspace.std.withprebuiltchar and not we_are_jitted(): return W_BytesObject.PREBUILT[ord(c)] @@ -830,7 +853,8 @@ __format__ = interpindirect2app(W_BytesObject.descr__format__), __mod__ = interpindirect2app(W_BytesObject.descr_mod), __buffer__ = interpindirect2app(W_AbstractBytesObject.descr_buffer), - __getnewargs__ = interpindirect2app(W_AbstractBytesObject.descr_getnewargs), + __getnewargs__ = interpindirect2app( + W_AbstractBytesObject.descr_getnewargs), _formatter_parser = interp2app(W_BytesObject.descr_formatter_parser), _formatter_field_name_split = interp2app(W_BytesObject.descr_formatter_field_name_split), @@ -865,8 +889,8 @@ buf.append_slice(s, startslice, i) startslice = i + 1 buf.append('\\x') - buf.append("0123456789abcdef"[n>>4]) - buf.append("0123456789abcdef"[n&0xF]) + buf.append("0123456789abcdef"[n >> 4]) + buf.append("0123456789abcdef"[n & 0xF]) if use_bs_char: if i != startslice: diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -162,9 +162,9 @@ return self @staticmethod - def newlist_bytes(space, list_s): + def newlist_bytes(space, list_b): strategy = space.fromcache(BytesListStrategy) - storage = strategy.erase(list_s) + storage = strategy.erase(list_b) return W_ListObject.from_storage_and_strategy(space, storage, strategy) @staticmethod diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -1,15 +1,16 @@ import weakref -from rpython.rlib import jit, objectmodel, debug + +from rpython.rlib import jit, objectmodel, debug, rerased from rpython.rlib.rarithmetic import intmask, r_uint -from rpython.rlib import rerased from pypy.interpreter.baseobjspace import W_Root -from pypy.objspace.std.dictmultiobject import W_DictMultiObject, DictStrategy, ObjectDictStrategy -from pypy.objspace.std.dictmultiobject import BaseKeyIterator, BaseValueIterator, BaseItemIterator -from pypy.objspace.std.dictmultiobject import _never_equal_to_string -from pypy.objspace.std.objectobject import W_ObjectObject +from pypy.objspace.std.dictmultiobject import ( + W_DictMultiObject, DictStrategy, ObjectDictStrategy, BaseKeyIterator, + BaseValueIterator, BaseItemIterator, _never_equal_to_string +) from pypy.objspace.std.typeobject import TypeCell + # ____________________________________________________________ # attribute shapes @@ -19,7 +20,7 @@ # we want to propagate knowledge that the result cannot be negative class AbstractAttribute(object): - _immutable_fields_ = ['terminator'] + _immutable_fields_ = ['terminator', 'ever_mutated?'] cache_attrs = None _size_estimate = 0 @@ -27,46 +28,60 @@ self.space = space assert isinstance(terminator, Terminator) self.terminator = terminator + self.ever_mutated = False def read(self, obj, selector): - index = self.index(selector) - if index < 0: + attr = self.find_map_attr(selector) + if attr is None: return self.terminator._read_terminator(obj, selector) - return obj._mapdict_read_storage(index) + if ( + jit.isconstant(attr.storageindex) and + jit.isconstant(obj) and + not attr.ever_mutated + ): + return self._pure_mapdict_read_storage(obj, attr.storageindex) + else: + return obj._mapdict_read_storage(attr.storageindex) + + @jit.elidable + def _pure_mapdict_read_storage(self, obj, storageindex): + return obj._mapdict_read_storage(storageindex) def write(self, obj, selector, w_value): - index = self.index(selector) - if index < 0: + attr = self.find_map_attr(selector) + if attr is None: return self.terminator._write_terminator(obj, selector, w_value) - obj._mapdict_write_storage(index, w_value) + if not attr.ever_mutated: + attr.ever_mutated = True + obj._mapdict_write_storage(attr.storageindex, w_value) return True def delete(self, obj, selector): return None - def index(self, selector): + def find_map_attr(self, selector): if jit.we_are_jitted(): # hack for the jit: - # the _index method is pure too, but its argument is never + # the _find_map_attr method is pure too, but its argument is never # constant, because it is always a new tuple - return self._index_jit_pure(selector[0], selector[1]) + return self._find_map_attr_jit_pure(selector[0], selector[1]) else: - return self._index_indirection(selector) + return self._find_map_attr_indirection(selector) @jit.elidable - def _index_jit_pure(self, name, index): - return self._index_indirection((name, index)) + def _find_map_attr_jit_pure(self, name, index): + return self._find_map_attr_indirection((name, index)) @jit.dont_look_inside - def _index_indirection(self, selector): + def _find_map_attr_indirection(self, selector): if (self.space.config.objspace.std.withmethodcache): - return self._index_cache(selector) - return self._index(selector) + return self._find_map_attr_cache(selector) + return self._find_map_attr(selector) @jit.dont_look_inside - def _index_cache(self, selector): + def _find_map_attr_cache(self, selector): space = self.space - cache = space.fromcache(IndexCache) + cache = space.fromcache(MapAttrCache) SHIFT2 = r_uint.BITS - space.config.objspace.std.methodcachesizeexp SHIFT1 = SHIFT2 - 5 attrs_as_int = objectmodel.current_object_addr_as_int(self) @@ -74,32 +89,32 @@ # _pure_lookup_where_with_method_cache() hash_selector = objectmodel.compute_hash(selector) product = intmask(attrs_as_int * hash_selector) - index_hash = (r_uint(product) ^ (r_uint(product) << SHIFT1)) >> SHIFT2 + attr_hash = (r_uint(product) ^ (r_uint(product) << SHIFT1)) >> SHIFT2 # ^^^Note2: same comment too - cached_attr = cache.attrs[index_hash] + cached_attr = cache.attrs[attr_hash] if cached_attr is self: - cached_selector = cache.selectors[index_hash] + cached_selector = cache.selectors[attr_hash] if cached_selector == selector: - index = cache.indices[index_hash] + attr = cache.cached_attrs[attr_hash] if space.config.objspace.std.withmethodcachecounter: name = selector[0] cache.hits[name] = cache.hits.get(name, 0) + 1 - return index - index = self._index(selector) - cache.attrs[index_hash] = self - cache.selectors[index_hash] = selector - cache.indices[index_hash] = index + return attr + attr = self._find_map_attr(selector) + cache.attrs[attr_hash] = self + cache.selectors[attr_hash] = selector + cache.cached_attrs[attr_hash] = attr if space.config.objspace.std.withmethodcachecounter: name = selector[0] cache.misses[name] = cache.misses.get(name, 0) + 1 - return index + return attr - def _index(self, selector): + def _find_map_attr(self, selector): while isinstance(self, PlainAttribute): if selector == self.selector: - return self.position + return self self = self.back - return -1 + return None def copy(self, obj): raise NotImplementedError("abstract base class") @@ -155,7 +170,7 @@ # the order is important here: first change the map, then the storage, # for the benefit of the special subclasses obj._set_mapdict_map(attr) - obj._mapdict_write_storage(attr.position, w_value) + obj._mapdict_write_storage(attr.storageindex, w_value) def materialize_r_dict(self, space, obj, dict_w): raise NotImplementedError("abstract base class") @@ -261,11 +276,11 @@ return Terminator.set_terminator(self, obj, terminator) class PlainAttribute(AbstractAttribute): - _immutable_fields_ = ['selector', 'position', 'back'] + _immutable_fields_ = ['selector', 'storageindex', 'back'] def __init__(self, selector, back): AbstractAttribute.__init__(self, back.space, back.terminator) self.selector = selector - self.position = back.length() + self.storageindex = back.length() self.back = back self._size_estimate = self.length() * NUM_DIGITS_POW2 @@ -288,7 +303,7 @@ return new_obj def length(self): - return self.position + 1 + return self.storageindex + 1 def set_terminator(self, obj, terminator): new_obj = self.back.set_terminator(obj, terminator) @@ -304,7 +319,7 @@ new_obj = self.back.materialize_r_dict(space, obj, dict_w) if self.selector[1] == DICT: w_attr = space.wrap(self.selector[0]) - dict_w[w_attr] = obj._mapdict_read_storage(self.position) + dict_w[w_attr] = obj._mapdict_read_storage(self.storageindex) else: self._copy_attr(obj, new_obj) return new_obj @@ -316,21 +331,21 @@ return new_obj def __repr__(self): - return "" % (self.selector, self.position, self.back) + return "" % (self.selector, self.storageindex, self.back) def _become(w_obj, new_obj): # this is like the _become method, really, but we cannot use that due to # RPython reasons w_obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map) -class IndexCache(object): +class MapAttrCache(object): def __init__(self, space): assert space.config.objspace.std.withmethodcache SIZE = 1 << space.config.objspace.std.methodcachesizeexp self.attrs = [None] * SIZE self._empty_selector = (None, INVALID) self.selectors = [self._empty_selector] * SIZE - self.indices = [0] * SIZE + self.cached_attrs = [None] * SIZE if space.config.objspace.std.withmethodcachecounter: self.hits = {} self.misses = {} @@ -340,6 +355,8 @@ self.attrs[i] = None for i in range(len(self.selectors)): self.selectors[i] = self._empty_selector + for i in range(len(self.cached_attrs)): + self.cached_attrs[i] = None # ____________________________________________________________ # object implementation @@ -416,16 +433,16 @@ self.typedef is W_InstanceObject.typedef) self._init_empty(w_subtype.terminator) - def getslotvalue(self, index): - key = ("slot", SLOTS_STARTING_FROM + index) + def getslotvalue(self, slotindex): + key = ("slot", SLOTS_STARTING_FROM + slotindex) return self._get_mapdict_map().read(self, key) - def setslotvalue(self, index, w_value): - key = ("slot", SLOTS_STARTING_FROM + index) + def setslotvalue(self, slotindex, w_value): + key = ("slot", SLOTS_STARTING_FROM + slotindex) self._get_mapdict_map().write(self, key, w_value) - def delslotvalue(self, index): - key = ("slot", SLOTS_STARTING_FROM + index) + def delslotvalue(self, slotindex): + key = ("slot", SLOTS_STARTING_FROM + slotindex) new_obj = self._get_mapdict_map().delete(self, key) if new_obj is None: return False @@ -460,11 +477,13 @@ self.map = map self.storage = make_sure_not_resized([None] * map.size_estimate()) - def _mapdict_read_storage(self, index): - assert index >= 0 - return self.storage[index] - def _mapdict_write_storage(self, index, value): - self.storage[index] = value + def _mapdict_read_storage(self, storageindex): + assert storageindex >= 0 + return self.storage[storageindex] + + def _mapdict_write_storage(self, storageindex, value): + self.storage[storageindex] = value + def _mapdict_storage_length(self): return len(self.storage) def _set_mapdict_storage_and_map(self, storage, map): @@ -519,7 +538,6 @@ rangenmin1 = unroll.unrolling_iterable(range(nmin1)) class subcls(BaseMapdictObject, supercls): def _init_empty(self, map): - from rpython.rlib.debug import make_sure_not_resized for i in rangen: setattr(self, "_value%s" % i, erase_item(None)) self.map = map @@ -531,26 +549,26 @@ erased = getattr(self, "_value%s" % nmin1) return unerase_list(erased) - def _mapdict_read_storage(self, index): - assert index >= 0 - if index < nmin1: + def _mapdict_read_storage(self, storageindex): + assert storageindex >= 0 + if storageindex < nmin1: for i in rangenmin1: - if index == i: + if storageindex == i: erased = getattr(self, "_value%s" % i) return unerase_item(erased) if self._has_storage_list(): - return self._mapdict_get_storage_list()[index - nmin1] + return self._mapdict_get_storage_list()[storageindex - nmin1] erased = getattr(self, "_value%s" % nmin1) return unerase_item(erased) - def _mapdict_write_storage(self, index, value): + def _mapdict_write_storage(self, storageindex, value): erased = erase_item(value) for i in rangenmin1: - if index == i: + if storageindex == i: setattr(self, "_value%s" % i, erased) return if self._has_storage_list(): - self._mapdict_get_storage_list()[index - nmin1] = value + self._mapdict_get_storage_list()[storageindex - nmin1] = value return setattr(self, "_value%s" % nmin1, erased) @@ -785,7 +803,7 @@ class CacheEntry(object): version_tag = None - index = 0 + storageindex = 0 w_method = None # for callmethod success_counter = 0 failure_counter = 0 @@ -818,14 +836,14 @@ pycode._mapdict_caches = [INVALID_CACHE_ENTRY] * num_entries @jit.dont_look_inside -def _fill_cache(pycode, nameindex, map, version_tag, index, w_method=None): +def _fill_cache(pycode, nameindex, map, version_tag, storageindex, w_method=None): entry = pycode._mapdict_caches[nameindex] if entry is INVALID_CACHE_ENTRY: entry = CacheEntry() pycode._mapdict_caches[nameindex] = entry entry.map_wref = weakref.ref(map) entry.version_tag = version_tag - entry.index = index + entry.storageindex = storageindex entry.w_method = w_method if pycode.space.config.objspace.std.withmethodcachecounter: entry.failure_counter += 1 @@ -837,7 +855,7 @@ map = w_obj._get_mapdict_map() if entry.is_valid_for_map(map) and entry.w_method is None: # everything matches, it's incredibly fast - return w_obj._mapdict_read_storage(entry.index) + return w_obj._mapdict_read_storage(entry.storageindex) return LOAD_ATTR_slowpath(pycode, w_obj, nameindex, map) LOAD_ATTR_caching._always_inline_ = True @@ -871,19 +889,19 @@ selector = ("slot", SLOTS_STARTING_FROM + w_descr.index) else: # There is a non-data descriptor in the class. If there is - # also a dict attribute, use the latter, caching its position. + # also a dict attribute, use the latter, caching its storageindex. # If not, we loose. We could do better in this case too, # but we don't care too much; the common case of a method # invocation is handled by LOOKUP_METHOD_xxx below. selector = (name, DICT) # if selector[1] != INVALID: - index = map.index(selector) - if index >= 0: + attr = map.find_map_attr(selector) + if attr is not None: # Note that if map.terminator is a DevolvedDictTerminator, - # map.index() will always return -1 if selector[1]==DICT. - _fill_cache(pycode, nameindex, map, version_tag, index) - return w_obj._mapdict_read_storage(index) + # map.find_map_attr will always return None if selector[1]==DICT. + _fill_cache(pycode, nameindex, map, version_tag, attr.storageindex) + return w_obj._mapdict_read_storage(attr.storageindex) if space.config.objspace.std.withmethodcachecounter: INVALID_CACHE_ENTRY.failure_counter += 1 return space.getattr(w_obj, w_name) diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -1,18 +1,22 @@ -from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.gateway import unwrap_spec, WrappedDefault -from pypy.objspace.std import slicetype -from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice +"""Functionality shared between bytes/bytearray/unicode""" + from rpython.rlib import jit from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import ovfcheck -from rpython.rlib.rstring import split, rsplit, replace, startswith, endswith +from rpython.rlib.rstring import endswith, replace, rsplit, split, startswith + +from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.gateway import WrappedDefault, unwrap_spec +from pypy.objspace.std import slicetype +from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice class StringMethods(object): def _sliced(self, space, s, start, stop, orig_obj): assert start >= 0 assert stop >= 0 - #if start == 0 and stop == len(s) and space.is_w(space.type(orig_obj), space.w_str): + #if start == 0 and stop == len(s) and space.is_w(space.type(orig_obj), + # space.w_str): # return orig_obj return self._new(s[start:stop]) @@ -21,7 +25,7 @@ value = self._val(space) lenself = len(value) start, end = slicetype.unwrap_start_stop( - space, lenself, w_start, w_end, upper_bound=upper_bound) + space, lenself, w_start, w_end, upper_bound=upper_bound) return (value, start, end) def descr_len(self, space): @@ -31,17 +35,14 @@ # pass def descr_contains(self, space, w_sub): - from pypy.objspace.std.bytearrayobject import W_BytearrayObject - if (isinstance(self, W_BytearrayObject) and - space.isinstance_w(w_sub, space.w_int)): - char = space.int_w(w_sub) - return _descr_contains_bytearray(self.data, space, char) - return space.newbool(self._val(space).find(self._op_val(space, w_sub)) >= 0) + value = self._val(space) + other = self._op_val(space, w_sub) + return space.newbool(value.find(other) >= 0) def descr_add(self, space, w_other): try: other = self._op_val(space, w_other) - except OperationError, e: + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise @@ -50,7 +51,7 @@ def descr_mul(self, space, w_times): try: times = space.getindex_w(w_times, space.w_OverflowError) - except OperationError, e: + except OperationError as e: if e.match(space, space.w_TypeError): return space.w_NotImplemented raise @@ -82,12 +83,11 @@ if index < 0: index += selflen if index < 0 or index >= selflen: - raise OperationError(space.w_IndexError, - space.wrap("string index out of range")) + raise operationerrfmt(space.w_IndexError, + "string index out of range") from pypy.objspace.std.bytearrayobject import W_BytearrayObject if isinstance(self, W_BytearrayObject): return space.wrap(ord(selfvalue[index])) - #return wrapchar(space, selfvalue[index]) return self._new(selfvalue[index]) def descr_getslice(self, space, w_start, w_stop): @@ -115,35 +115,39 @@ value = self._val(space) fillchar = self._op_val(space, w_fillchar) if len(fillchar) != 1: - raise OperationError(space.w_TypeError, - space.wrap("center() argument 2 must be a single character")) + raise operationerrfmt(space.w_TypeError, + "center() argument 2 must be a single " + "character") d = width - len(value) - if d>0: + if d > 0: offset = d//2 + (d & width & 1) fillchar = fillchar[0] # annotator hint: it's a single character - u_centered = offset * fillchar + value + (d - offset) * fillchar + centered = offset * fillchar + value + (d - offset) * fillchar else: - u_centered = value + centered = value - return self._new(u_centered) + return self._new(centered) def descr_count(self, space, w_sub, w_start=None, w_end=None): value, start, end = self._convert_idx_params(space, w_start, w_end) - return space.newint(value.count(self._op_val(space, w_sub), start, end)) + return space.newint(value.count(self._op_val(space, w_sub), start, + end)) def descr_decode(self, space, w_encoding=None, w_errors=None): - from pypy.objspace.std.unicodeobject import _get_encoding_and_errors, \ - unicode_from_string, decode_object - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + from pypy.objspace.std.unicodeobject import ( + _get_encoding_and_errors, decode_object, unicode_from_string) + encoding, errors = _get_encoding_and_errors(space, w_encoding, + w_errors) if encoding is None and errors is None: return unicode_from_string(space, self) return decode_object(space, self, encoding, errors) def descr_encode(self, space, w_encoding=None, w_errors=None): - from pypy.objspace.std.unicodeobject import _get_encoding_and_errors, \ - encode_object - encoding, errors = _get_encoding_and_errors(space, w_encoding, w_errors) + from pypy.objspace.std.unicodeobject import ( + _get_encoding_and_errors, encode_object) + encoding, errors = _get_encoding_and_errors(space, w_encoding, + w_errors) return encode_object(space, self, encoding, errors) @unwrap_spec(tabsize=int) @@ -156,18 +160,19 @@ try: ovfcheck(len(splitted) * tabsize) except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("new string is too long")) + raise operationerrfmt(space.w_OverflowError, + "new string is too long") expanded = oldtoken = splitted.pop(0) for token in splitted: - expanded += self._chr(' ') * self._tabindent(oldtoken, tabsize) + token + expanded += self._chr(' ') * self._tabindent(oldtoken, + tabsize) + token oldtoken = token return self._new(expanded) def _tabindent(self, token, tabsize): - "calculates distance behind the token to the next tabstop" + """calculates distance behind the token to the next tabstop""" distance = tabsize if token: @@ -203,8 +208,8 @@ (value, start, end) = self._convert_idx_params(space, w_start, w_end) res = value.find(self._op_val(space, w_sub), start, end) if res < 0: - raise OperationError(space.w_ValueError, - space.wrap("substring not found in string.index")) + raise operationerrfmt(space.w_ValueError, + "substring not found in string.index") return space.wrap(res) @@ -212,8 +217,8 @@ (value, start, end) = self._convert_idx_params(space, w_start, w_end) res = value.rfind(self._op_val(space, w_sub), start, end) if res < 0: - raise OperationError(space.w_ValueError, - space.wrap("substring not found in string.rindex")) + raise operationerrfmt(space.w_ValueError, + "substring not found in string.rindex") return space.wrap(res) @@ -307,22 +312,6 @@ return space.newbool(cased) def descr_join(self, space, w_list): - from pypy.objspace.std.bytesobject import W_BytesObject - from pypy.objspace.std.unicodeobject import W_UnicodeObject - - if isinstance(self, W_BytesObject): - l = space.listview_bytes(w_list) - if l is not None: - if len(l) == 1: - return space.wrap(l[0]) - return space.wrap(self._val(space).join(l)) - elif isinstance(self, W_UnicodeObject): - l = space.listview_unicode(w_list) - if l is not None: - if len(l) == 1: - return space.wrap(l[0]) - return space.wrap(self._val(space).join(l)) - list_w = space.listview(w_list) size = len(list_w) @@ -349,8 +338,7 @@ if check_item == 1: raise operationerrfmt( space.w_TypeError, - "sequence item %d: expected string, %s " - "found", i, space.type(w_s).getname(space)) + "sequence item %d: expected string, %T found", i, w_s) elif check_item == 2: return self._join_autoconvert(space, list_w) prealloc_size += len(self._op_val(space, w_s)) @@ -370,9 +358,9 @@ value = self._val(space) fillchar = self._op_val(space, w_fillchar) if len(fillchar) != 1: - raise OperationError(space.w_TypeError, - space.wrap("ljust() argument 2 must be a single character")) - + raise operationerrfmt(space.w_TypeError, + "ljust() argument 2 must be a single " + "character") d = width - len(value) if d > 0: fillchar = fillchar[0] # annotator hint: it's a single character @@ -385,9 +373,9 @@ value = self._val(space) fillchar = self._op_val(space, w_fillchar) if len(fillchar) != 1: - raise OperationError(space.w_TypeError, - space.wrap("rjust() argument 2 must be a single character")) - + raise operationerrfmt(space.w_TypeError, + "rjust() argument 2 must be a single " + "character") d = width - len(value) if d > 0: fillchar = fillchar[0] # annotator hint: it's a single character @@ -406,8 +394,7 @@ value = self._val(space) From noreply at buildbot.pypy.org Sun Aug 31 18:51:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 18:51:04 +0200 (CEST) Subject: [pypy-commit] pypy arm-longlong: Returning a long long Message-ID: <20140831165104.196201C06D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: arm-longlong Changeset: r73242:a1f2e41e252b Date: 2014-08-31 19:49 +0300 http://bitbucket.org/pypy/pypy/changeset/a1f2e41e252b/ Log: Returning a long long diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -441,6 +441,9 @@ resloc = self.resloc if self.restype == 'S': self.mc.VMOV_sc(resloc.value, r.s0.value) + elif self.restype == 'L': + assert resloc.is_vfp_reg() + self.mc.FMDRR(resloc.value, r.r0.value, r.r1.value) # ensure the result is wellformed and stored in the correct location if resloc is not None and resloc.is_core_reg(): self._ensure_result_bit_extension(resloc, @@ -450,7 +453,10 @@ if self.resloc is None: return [], [] if self.resloc.is_vfp_reg(): - return [], [r.d0] + if self.restype == 'L': # long long + return [r.r0, r.r1], [] + else: + return [], [r.d0] assert self.resloc.is_core_reg() return [r.r0], [] From noreply at buildbot.pypy.org Sun Aug 31 19:23:50 2014 From: noreply at buildbot.pypy.org (dripton) Date: Sun, 31 Aug 2014 19:23:50 +0200 (CEST) Subject: [pypy-commit] pypy fix_find_executable_bug: issue #1856, find_executable should only find executable files. Message-ID: <20140831172350.74F521D38CE@cobra.cs.uni-duesseldorf.de> Author: David Ripton Branch: fix_find_executable_bug Changeset: r73243:8ff323403724 Date: 2014-08-31 11:20 -0400 http://bitbucket.org/pypy/pypy/changeset/8ff323403724/ Log: issue #1856, find_executable should only find executable files. We use os.access to check whether the file is executable by the current user. os.access compares against the user's real uid and gid, not the effective uid and gid. I think that's okay because nobody should be running pypy setuid/setgid. diff --git a/pypy/module/sys/initpath.py b/pypy/module/sys/initpath.py --- a/pypy/module/sys/initpath.py +++ b/pypy/module/sys/initpath.py @@ -35,8 +35,12 @@ for dir in path.split(os.pathsep): fn = os.path.join(dir, executable) if os.path.isfile(fn): - executable = fn - break + # os.access checks using the user's real uid and gid. + # Since pypy should not be run setuid/setgid, this + # should be sufficient. + if os.access(fn, os.X_OK): + executable = fn + break executable = rpath.rabspath(executable) # 'sys.executable' should not end up being an non-existing file; diff --git a/pypy/module/sys/test/test_initpath.py b/pypy/module/sys/test/test_initpath.py --- a/pypy/module/sys/test/test_initpath.py +++ b/pypy/module/sys/test/test_initpath.py @@ -57,6 +57,7 @@ a.join('pypy').ensure(file=True) b.join('pypy').ensure(file=True) # + monkeypatch.setattr(os, 'access', lambda x, y: True) # if there is already a slash, don't do anything monkeypatch.chdir(tmpdir) assert find_executable('a/pypy') == a.join('pypy') @@ -82,7 +83,11 @@ # if pypy is found but it's not a file, ignore it c.join('pypy').ensure(dir=True) assert find_executable('pypy') == a.join('pypy') + # if pypy is found but it's not executable, ignore it + monkeypatch.setattr(os, 'access', lambda x, y: False) + assert find_executable('pypy') == '' # + monkeypatch.setattr(os, 'access', lambda x, y: True) monkeypatch.setattr(initpath, 'we_are_translated', lambda: True) monkeypatch.setattr(initpath, '_WIN32', True) monkeypatch.setenv('PATH', str(a)) From noreply at buildbot.pypy.org Sun Aug 31 19:23:51 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Sun, 31 Aug 2014 19:23:51 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in dripton/pypy/fix_find_executable_bug (pull request #276) Message-ID: <20140831172351.9C1761D38CE@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r73244:b4f58b28e57d Date: 2014-08-31 10:23 -0700 http://bitbucket.org/pypy/pypy/changeset/b4f58b28e57d/ Log: Merged in dripton/pypy/fix_find_executable_bug (pull request #276) issue #1856, find_executable should only find executable files. diff --git a/pypy/module/sys/initpath.py b/pypy/module/sys/initpath.py --- a/pypy/module/sys/initpath.py +++ b/pypy/module/sys/initpath.py @@ -35,8 +35,12 @@ for dir in path.split(os.pathsep): fn = os.path.join(dir, executable) if os.path.isfile(fn): - executable = fn - break + # os.access checks using the user's real uid and gid. + # Since pypy should not be run setuid/setgid, this + # should be sufficient. + if os.access(fn, os.X_OK): + executable = fn + break executable = rpath.rabspath(executable) # 'sys.executable' should not end up being an non-existing file; diff --git a/pypy/module/sys/test/test_initpath.py b/pypy/module/sys/test/test_initpath.py --- a/pypy/module/sys/test/test_initpath.py +++ b/pypy/module/sys/test/test_initpath.py @@ -57,6 +57,7 @@ a.join('pypy').ensure(file=True) b.join('pypy').ensure(file=True) # + monkeypatch.setattr(os, 'access', lambda x, y: True) # if there is already a slash, don't do anything monkeypatch.chdir(tmpdir) assert find_executable('a/pypy') == a.join('pypy') @@ -82,7 +83,11 @@ # if pypy is found but it's not a file, ignore it c.join('pypy').ensure(dir=True) assert find_executable('pypy') == a.join('pypy') + # if pypy is found but it's not executable, ignore it + monkeypatch.setattr(os, 'access', lambda x, y: False) + assert find_executable('pypy') == '' # + monkeypatch.setattr(os, 'access', lambda x, y: True) monkeypatch.setattr(initpath, 'we_are_translated', lambda: True) monkeypatch.setattr(initpath, '_WIN32', True) monkeypatch.setenv('PATH', str(a)) From noreply at buildbot.pypy.org Sun Aug 31 19:35:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 19:35:06 +0200 (CEST) Subject: [pypy-commit] pypy arm-longlong: Rewrite this test to no longer depend on (possibly buggy) ctypes Message-ID: <20140831173506.972661D38CE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: arm-longlong Changeset: r73245:9e40d9efd4d2 Date: 2014-08-31 19:34 +0200 http://bitbucket.org/pypy/pypy/changeset/9e40d9efd4d2/ Log: Rewrite this test to no longer depend on (possibly buggy) ctypes callbacks. diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2718,12 +2718,11 @@ assert r == result def test_call_release_gil_variable_function_and_arguments(self): - # NOTE NOTE NOTE - # This also works as a test for ctypes and libffi. - # On some platforms, one of these is buggy... + from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rlib.libffi import types from rpython.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong from rpython.rlib.rarithmetic import r_singlefloat + from rpython.translator.c import primitive cpu = self.cpu rnd = random.Random(525) @@ -2752,24 +2751,65 @@ (types.float, rffi.FLOAT), ] * 4 - for k in range(100): + NB_TESTS = 100 + c_source = [] + all_tests = [] + export_symbols = [] + + def prepare_c_source(): POSSIBLE_TYPES = [rnd.choice(ALL_TYPES) for i in range(random.randrange(2, 5))] load_factor = rnd.random() keepalive_factor = rnd.random() # - def pseudo_c_function(*args): - seen.append(list(args)) - # ffitypes = [] ARGTYPES = [] for i in range(rnd.randrange(4, 20)): ffitype, TP = rnd.choice(POSSIBLE_TYPES) ffitypes.append(ffitype) ARGTYPES.append(TP) + fn_name = 'vartest%d' % k + all_tests.append((ARGTYPES, ffitypes, fn_name)) # - FPTR = self.Ptr(self.FuncType(ARGTYPES, lltype.Void)) - func_ptr = llhelper(FPTR, pseudo_c_function) + fn_args = [] + for i, ARG in enumerate(ARGTYPES): + arg_decl = primitive.cdecl(primitive.PrimitiveType[ARG], + 'x%d' % i) + fn_args.append(arg_decl) + var_name = 'argcopy_%s_x%d' % (fn_name, i) + var_decl = primitive.cdecl(primitive.PrimitiveType[ARG], + var_name) + c_source.append('static %s;' % var_decl) + getter_name = '%s_get%d' % (fn_name, i) + export_symbols.append(getter_name) + c_source.append('void %s(%s) { *p = %s; }' % ( + getter_name, + primitive.cdecl(primitive.PrimitiveType[ARG], '*p'), + var_name)) + export_symbols.append(fn_name) + c_source.append('') + c_source.append('void %s(%s)' % (fn_name, ', '.join(fn_args))) + c_source.append('{') + for i in range(len(ARGTYPES)): + c_source.append(' argcopy_%s_x%d = x%d;' % (fn_name, i, i)) + c_source.append('}') + c_source.append('') + + for k in range(NB_TESTS): + prepare_c_source() + + eci = ExternalCompilationInfo( + separate_module_sources=['\n'.join(c_source)], + export_symbols=export_symbols) + + for k in range(NB_TESTS): + ARGTYPES, ffitypes, fn_name = all_tests[k] + func_ptr = rffi.llexternal(fn_name, ARGTYPES, lltype.Void, + compilation_info=eci, _nowrapper=True) + load_factor = rnd.random() + keepalive_factor = rnd.random() + # + FPTR = lltype.typeOf(func_ptr) funcbox = self.get_funcbox(cpu, func_ptr) calldescr = cpu._calldescr_dynamic_for_tests(ffitypes, types.void) faildescr = BasicFailDescr(1) @@ -2840,12 +2880,21 @@ looptoken = JitCellToken() self.cpu.compile_loop(argboxes, ops, looptoken) # - seen = [] deadframe = self.cpu.execute_token(looptoken, *argvalues_normal) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 0 expected = argvalues[1:] - [got] = seen + got = [] + for i, ARG in enumerate(ARGTYPES): + PARG = rffi.CArrayPtr(ARG) + getter_name = '%s_get%d' % (fn_name, i) + getter_ptr = rffi.llexternal(getter_name, [PARG], lltype.Void, + compilation_info=eci, + _nowrapper=True) + my_arg = lltype.malloc(PARG.TO, 1, zero=True, flavor='raw') + getter_ptr(my_arg) + got.append(my_arg[0]) + lltype.free(my_arg, flavor='raw') different_values = ['%r != %r' % (a, b) for a, b in zip(got, expected) if a != b] From noreply at buildbot.pypy.org Sun Aug 31 19:38:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 19:38:42 +0200 (CEST) Subject: [pypy-commit] pypy arm-longlong: Comments Message-ID: <20140831173842.61A9F1D38CE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: arm-longlong Changeset: r73246:9b92c5b09665 Date: 2014-08-31 19:38 +0200 http://bitbucket.org/pypy/pypy/changeset/9b92c5b09665/ Log: Comments diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2757,6 +2757,12 @@ export_symbols = [] def prepare_c_source(): + """Pick a random choice of argument types and length, + and build a C function with these arguments. The C + function will simply copy them all into static global + variables. There are then additional functions to fetch + them, one per argument, with a signature 'void(ARG *)'. + """ POSSIBLE_TYPES = [rnd.choice(ALL_TYPES) for i in range(random.randrange(2, 5))] load_factor = rnd.random() From noreply at buildbot.pypy.org Sun Aug 31 19:50:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 19:50:36 +0200 (CEST) Subject: [pypy-commit] pypy arm-longlong: better error message Message-ID: <20140831175036.E63461C31B7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: arm-longlong Changeset: r73247:02f54f076c4f Date: 2014-08-31 19:50 +0200 http://bitbucket.org/pypy/pypy/changeset/02f54f076c4f/ Log: better error message diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2901,10 +2901,11 @@ getter_ptr(my_arg) got.append(my_arg[0]) lltype.free(my_arg, flavor='raw') - different_values = ['%r != %r' % (a, b) - for a, b in zip(got, expected) - if a != b] - assert got == expected, ', '.join(different_values) + different_values = ['x%d: got %r, expected %r' % (i, a, b) + for i, (a, b) in enumerate(zip(got, expected)) + if a != b] + assert got == expected, '\n'.join( + ['bad args, signature %r' % codes[1:]] + different_values) def test_guard_not_invalidated(self): From noreply at buildbot.pypy.org Sun Aug 31 20:09:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 20:09:10 +0200 (CEST) Subject: [pypy-commit] pypy arm-longlong: in-progress Message-ID: <20140831180910.401B21C06D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: arm-longlong Changeset: r73248:c7bc3237fdaa Date: 2014-08-31 20:08 +0200 http://bitbucket.org/pypy/pypy/changeset/c7bc3237fdaa/ Log: in-progress diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -80,15 +80,6 @@ self.mc.gen_load_int(r.ip.value, n) self.mc.SUB_rr(r.sp.value, r.sp.value, r.ip.value) - def _must_remap_fnloc(self): - fnloc = self.fnloc - if fnloc.is_stack(): - return True - if self.is_call_release_gil: - if fnloc is r.r5 or fnloc is r.r6 or fnloc is r.r7: - return True - return False - def call_releasegil_addr_and_move_real_arguments(self, fastgil): assert self.is_call_release_gil assert not self.asm._is_asmgcc() @@ -121,7 +112,7 @@ self.mc.STREX(r.r3.value, r.ip.value, r.r6.value, c=c.EQ) # try to claim the lock self.mc.CMP_ri(r.r3.value, 0, cond=c.EQ) # did this succeed? - self.mc.DMB(c=c.EQ) + self.mc.DMB() # the success of the lock acquisition is defined by # 'EQ is true', or equivalently by 'r3 == 0'. # @@ -268,7 +259,7 @@ # or on the stack, which we can not access later # If this happens to be the case we remap the register to r4 and use r4 # to call the function - if self.fnloc in r.argument_regs or self._must_remap_fnloc(): + if not self.fnloc.is_imm(): non_float_locs.append(self.fnloc) non_float_regs.append(r.r4) self.fnloc = r.r4 @@ -358,7 +349,6 @@ argtype = FLOAT reg = self.get_next_vfp(argtype) if reg: - assert len(float_regs) < len(r.vfp_argument_regs) float_locs.append(arg) assert reg not in float_regs float_regs.append(reg) @@ -408,7 +398,7 @@ # or on the stack, which we can not access later # If this happens to be the case we remap the register to r4 and use r4 # to call the function - if self.fnloc in non_float_regs or self._must_remap_fnloc(): + if not self.fnloc.is_imm(): non_float_locs.append(self.fnloc) non_float_regs.append(r.r4) self.fnloc = r.r4 diff --git a/rpython/jit/backend/arm/codebuilder.py b/rpython/jit/backend/arm/codebuilder.py --- a/rpython/jit/backend/arm/codebuilder.py +++ b/rpython/jit/backend/arm/codebuilder.py @@ -332,8 +332,9 @@ | (rd & 0xF) << 12 | (rn & 0xF) << 16) - def DMB(self, c=cond.AL): - self.write32(c << 28 | 0x157ff05f) + def DMB(self): + # note: 'cond' is only permitted on Thumb here + self.write32(0xf57ff05f) DIV = binary_helper_call('int_div') MOD = binary_helper_call('int_mod') From noreply at buildbot.pypy.org Sun Aug 31 20:40:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 20:40:00 +0200 (CEST) Subject: [pypy-commit] pypy arm-longlong: bug fix: the signature 'iiiIi' used to think the last 'i' goes into r3, Message-ID: <20140831184000.8FA791C31B7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: arm-longlong Changeset: r73249:373572aa281c Date: 2014-08-31 20:39 +0200 http://bitbucket.org/pypy/pypy/changeset/373572aa281c/ Log: bug fix: the signature 'iiiIi' used to think the last 'i' goes into r3, but actually nothing goes into r3. diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -313,8 +313,8 @@ argtypes = self.argtypes r_register_count = 0 - count = 0 # stack alignment counter on_stack = 0 + for i in range(len(arglocs)): argtype = INT if i < len(argtypes) and argtypes[i] == 'S': @@ -342,6 +342,8 @@ longlong_mask |= 2 r_register_count = 4 continue + elif r_register_count == 3: + r_register_count = 4 else: # A 64-bit float argument. Goes into the next free v# # register, or if none, to the stack aligned to an @@ -354,9 +356,8 @@ float_regs.append(reg) continue # float or longlong argument that needs to go on the stack - if count % 2 != 0: + if on_stack & 1: # odd: realign stack_args.append(None) - count = 0 on_stack += 1 stack_args.append(arg) on_stack += 2 @@ -371,9 +372,8 @@ singlefloats.append((arg, tgt)) else: # Singlefloat argument that needs to go on the stack # treated the same as a regular core register argument - count += 1 + stack_args.append(arg) on_stack += 1 - stack_args.append(arg) else: # Regular one-word argument. Goes into the next register # free from the list r0, r1, r2, r3, or to the stack. @@ -383,12 +383,11 @@ non_float_locs.append(arg) non_float_regs.append(reg) else: # non-float argument that needs to go on the stack - count += 1 + stack_args.append(arg) on_stack += 1 - stack_args.append(arg) # align the stack - if count % 2 != 0: + if on_stack & 1: # odd: realign stack_args.append(None) on_stack += 1 self._push_stack_args(stack_args, on_stack*WORD) From noreply at buildbot.pypy.org Sun Aug 31 21:15:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 21:15:25 +0200 (CEST) Subject: [pypy-commit] pypy arm-longlong: Fix for the strange interleaving possible with a mixture of d and s Message-ID: <20140831191525.3193F1C06D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: arm-longlong Changeset: r73250:3463d820c160 Date: 2014-08-31 21:14 +0200 http://bitbucket.org/pypy/pypy/changeset/3463d820c160/ Log: Fix for the strange interleaving possible with a mixture of d and s registers diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -276,29 +276,22 @@ def get_next_vfp(self, tp): assert tp in 'fS' - if self.next_arg_vfp == -1: - return None - if tp == 'S': + if tp == 'f': + # 64bit double + i = max(self.next_arg_vfp, (self.next_arg_svfp + 1) >> 1) + if i >= len(r.vfp_argument_regs): + return None + self.next_arg_vfp = i + 1 + return r.vfp_argument_regs[i] + else: + # 32bit float i = self.next_arg_svfp - next_vfp = (i >> 1) + 1 - if not (i + 1) & 1: # i is even - self.next_arg_vfp = max(self.next_arg_vfp, next_vfp) - self.next_arg_svfp = self.next_arg_vfp << 1 - else: - self.next_arg_svfp += 1 - self.next_arg_vfp = next_vfp - lst = r.svfp_argument_regs - else: # 64bit double - i = self.next_arg_vfp - self.next_arg_vfp += 1 - if self.next_arg_svfp >> 1 == i: - self.next_arg_svfp = self.next_arg_vfp << 1 - lst = r.vfp_argument_regs - try: - return lst[i] - except IndexError: - self.next_arg_vfp = self.next_arg_svfp = -1 - return None + if not (i & 1): # if i is even + i = max(i, self.next_arg_vfp << 1) + if i >= len(r.svfp_argument_regs): + return None + self.next_arg_svfp = i + 1 + return r.svfp_argument_regs[i] def prepare_arguments(self): non_float_locs = [] diff --git a/rpython/jit/backend/arm/test/test_callbuilder.py b/rpython/jit/backend/arm/test/test_callbuilder.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/arm/test/test_callbuilder.py @@ -0,0 +1,34 @@ +from rpython.jit.backend.arm.callbuilder import HardFloatCallBuilder +from rpython.jit.backend.arm import registers as r + + + +def test_hf_vfp_registers_all_singlefloat(): + hf = HardFloatCallBuilder.__new__(HardFloatCallBuilder) + got = [hf.get_next_vfp('S') for i in range(18)] + assert got == [r.s0, r.s1, r.s2, r.s3, r.s4, r.s5, r.s6, r.s7, + r.s8, r.s9, r.s10, r.s11, r.s12, r.s13, r.s14, r.s15, + None, None] + +def test_hf_vfp_registers_all_doublefloat(): + hf = HardFloatCallBuilder.__new__(HardFloatCallBuilder) + got = [hf.get_next_vfp('f') for i in range(10)] + assert got == [r.d0, r.d1, r.d2, r.d3, r.d4, r.d5, r.d6, r.d7, + None, None] + +def test_hf_vfp_registers_mixture(): + hf = HardFloatCallBuilder.__new__(HardFloatCallBuilder) + got = [hf.get_next_vfp('S'), hf.get_next_vfp('f'), + hf.get_next_vfp('S'), hf.get_next_vfp('f'), + hf.get_next_vfp('S'), hf.get_next_vfp('f'), + hf.get_next_vfp('S'), hf.get_next_vfp('f'), + hf.get_next_vfp('S'), hf.get_next_vfp('f'), + hf.get_next_vfp('S'), hf.get_next_vfp('f'), + hf.get_next_vfp('S'), hf.get_next_vfp('f')] + assert got == [r.s0, r.d1, + r.s1, r.d2, + r.s6, r.d4, + r.s7, r.d5, + r.s12, r.d7, + r.s13, None, + None, None] From noreply at buildbot.pypy.org Sun Aug 31 22:17:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 22:17:28 +0200 (CEST) Subject: [pypy-commit] pypy arm-longlong: More fixes to avoid ctypes/libffi interference Message-ID: <20140831201728.ABE271C06D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: arm-longlong Changeset: r73251:026488ee760e Date: 2014-08-31 22:17 +0200 http://bitbucket.org/pypy/pypy/changeset/026488ee760e/ Log: More fixes to avoid ctypes/libffi interference diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2794,11 +2794,16 @@ var_name)) export_symbols.append(fn_name) c_source.append('') - c_source.append('void %s(%s)' % (fn_name, ', '.join(fn_args))) + c_source.append('static void real%s(%s)' % ( + fn_name, ', '.join(fn_args))) c_source.append('{') for i in range(len(ARGTYPES)): c_source.append(' argcopy_%s_x%d = x%d;' % (fn_name, i, i)) c_source.append('}') + c_source.append('void *%s(void)' % fn_name) + c_source.append('{') + c_source.append(' return (void *)&real%s;' % fn_name) + c_source.append('}') c_source.append('') for k in range(NB_TESTS): @@ -2810,13 +2815,12 @@ for k in range(NB_TESTS): ARGTYPES, ffitypes, fn_name = all_tests[k] - func_ptr = rffi.llexternal(fn_name, ARGTYPES, lltype.Void, - compilation_info=eci, _nowrapper=True) + func_getter_ptr = rffi.llexternal(fn_name, [], lltype.Signed, + compilation_info=eci, _nowrapper=True) load_factor = rnd.random() keepalive_factor = rnd.random() # - FPTR = lltype.typeOf(func_ptr) - funcbox = self.get_funcbox(cpu, func_ptr) + func_raw = func_getter_ptr() calldescr = cpu._calldescr_dynamic_for_tests(ffitypes, types.void) faildescr = BasicFailDescr(1) # @@ -2836,7 +2840,7 @@ print print codes # - argvalues = [funcbox.getint()] + argvalues = [func_raw] for TP in ARGTYPES: r = (rnd.random() - 0.5) * 999999999999.9 r = rffi.cast(TP, r) From noreply at buildbot.pypy.org Sun Aug 31 22:35:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 22:35:13 +0200 (CEST) Subject: [pypy-commit] pypy arm-longlong: Test and fix Message-ID: <20140831203513.2C5C01C328B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: arm-longlong Changeset: r73252:31554946e159 Date: 2014-08-31 23:30 +0300 http://bitbucket.org/pypy/pypy/changeset/31554946e159/ Log: Test and fix diff --git a/rpython/jit/backend/arm/callbuilder.py b/rpython/jit/backend/arm/callbuilder.py --- a/rpython/jit/backend/arm/callbuilder.py +++ b/rpython/jit/backend/arm/callbuilder.py @@ -280,6 +280,7 @@ # 64bit double i = max(self.next_arg_vfp, (self.next_arg_svfp + 1) >> 1) if i >= len(r.vfp_argument_regs): + self.next_arg_svfp = 1000 # stop that sequence too return None self.next_arg_vfp = i + 1 return r.vfp_argument_regs[i] diff --git a/rpython/jit/backend/arm/test/test_callbuilder.py b/rpython/jit/backend/arm/test/test_callbuilder.py --- a/rpython/jit/backend/arm/test/test_callbuilder.py +++ b/rpython/jit/backend/arm/test/test_callbuilder.py @@ -32,3 +32,16 @@ r.s12, r.d7, r.s13, None, None, None] + +def test_hf_vfp_registers_mixture_2(): + hf = HardFloatCallBuilder.__new__(HardFloatCallBuilder) + got = [hf.get_next_vfp('f'), hf.get_next_vfp('f'), + hf.get_next_vfp('f'), hf.get_next_vfp('f'), + hf.get_next_vfp('f'), hf.get_next_vfp('f'), + hf.get_next_vfp('f'), hf.get_next_vfp('S'), + hf.get_next_vfp('f'), hf.get_next_vfp('S')] + assert got == [r.d0, r.d1, + r.d2, r.d3, + r.d4, r.d5, + r.d6, r.s14, + None, None] # <- and not r.s15 for the last item From noreply at buildbot.pypy.org Sun Aug 31 23:07:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 31 Aug 2014 23:07:29 +0200 (CEST) Subject: [pypy-commit] buildbot default: increase the timeout even more Message-ID: <20140831210729.E8E411D283E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r915:3c03276b825f Date: 2014-08-31 23:07 +0200 http://bitbucket.org/pypy/buildbot/changeset/3c03276b825f/ Log: increase the timeout even more diff --git a/bot2/pypybuildbot/arm_master.py b/bot2/pypybuildbot/arm_master.py --- a/bot2/pypybuildbot/arm_master.py +++ b/bot2/pypybuildbot/arm_master.py @@ -15,7 +15,7 @@ "jit/backend/llsupport", "jit/backend/test", # kill this one in case it is too slow ]), - timeout=6 * 3600) + timeout=36000) pypyJitOnlyOwnTestFactoryARM = pypybuilds.Own(cherrypick="jit", timeout=2 * 3600) pypyOwnTestFactoryARM = pypybuilds.Own(timeout=2 * 3600)