From noreply at buildbot.pypy.org Thu Oct 1 00:55:02 2015 From: noreply at buildbot.pypy.org (mjacob) Date: Thu, 1 Oct 2015 00:55:02 +0200 (CEST) Subject: [pypy-commit] pypy py3k: hg merge 5345333d8dcd (last changeset in default branch that was merged into PyPy2 release branch). Message-ID: <20150930225502.6E68A1C1311@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k Changeset: r79914:865002a1287a Date: 2015-10-01 00:51 +0200 http://bitbucket.org/pypy/pypy/changeset/865002a1287a/ Log: hg merge 5345333d8dcd (last changeset in default branch that was merged into PyPy2 release branch). diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -352,8 +352,7 @@ Except when otherwise stated (look for LICENSE files or copyright/license information at the beginning of each file) the files in the 'lib-python/2.7' directory are all copyrighted by the Python Software Foundation and licensed -under the Python Software License of which you can find a copy here: -http://www.python.org/doc/Copyright.html +under the terms that you can find here: https://docs.python.org/2/license.html License for 'pypy/module/unicodedata/' ====================================== @@ -435,4 +434,4 @@ The code is based on gperftools. You may see a copy of the License for it at - https://code.google.com/p/gperftools/source/browse/COPYING + https://github.com/gperftools/gperftools/blob/master/COPYING diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.2.1 +Version: 1.3.0 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.2.1" -__version_info__ = (1, 2, 1) +__version__ = "1.3.0" +__version_info__ = (1, 3, 0) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -214,6 +214,12 @@ (size) == 8 ? ((sign) ? _CFFI_PRIM_INT64 : _CFFI_PRIM_UINT64) : \ _CFFI__UNKNOWN_PRIM) +#define _cffi_prim_float(size) \ + ((size) == sizeof(float) ? _CFFI_PRIM_FLOAT : \ + (size) == sizeof(double) ? _CFFI_PRIM_DOUBLE : \ + (size) == sizeof(long double) ? _CFFI__UNKNOWN_LONG_DOUBLE : \ + _CFFI__UNKNOWN_FLOAT_PRIM) + #define _cffi_check_int(got, got_nonpos, expected) \ ((got_nonpos) == (expected <= 0) && \ (got) == (unsigned long long)expected) diff --git a/lib_pypy/cffi/cffi_opcode.py b/lib_pypy/cffi/cffi_opcode.py --- a/lib_pypy/cffi/cffi_opcode.py +++ b/lib_pypy/cffi/cffi_opcode.py @@ -106,7 +106,9 @@ PRIM_UINTMAX = 47 _NUM_PRIM = 48 -_UNKNOWN_PRIM = -1 +_UNKNOWN_PRIM = -1 +_UNKNOWN_FLOAT_PRIM = -2 +_UNKNOWN_LONG_DOUBLE = -3 PRIMITIVE_TO_INDEX = { 'char': PRIM_CHAR, diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -648,10 +648,21 @@ assert typenames[-1] == '__dotdotdot__' if len(typenames) == 1: return model.unknown_type(decl.name) - for t in typenames[:-1]: - if t not in ['int', 'short', 'long', 'signed', 'unsigned', 'char']: - raise api.FFIError(':%d: bad usage of "..."' % decl.coord.line) + + if (typenames[:-1] == ['float'] or + typenames[:-1] == ['double']): + # not for 'long double' so far + result = model.UnknownFloatType(decl.name) + else: + for t in typenames[:-1]: + if t not in ['int', 'short', 'long', 'signed', + 'unsigned', 'char']: + raise api.FFIError(':%d: bad usage of "..."' % + decl.coord.line) + result = model.UnknownIntegerType(decl.name) + if self._uses_new_feature is None: self._uses_new_feature = "'typedef %s... %s'" % ( ' '.join(typenames[:-1]), decl.name) - return model.UnknownIntegerType(decl.name) + + return result diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -158,12 +158,23 @@ self.c_name_with_marker = name + '&' def is_integer_type(self): - return True # for now + return True def build_backend_type(self, ffi, finishlist): raise NotImplementedError("integer type '%s' can only be used after " "compilation" % self.name) +class UnknownFloatType(BasePrimitiveType): + _attrs_ = ('name', ) + + def __init__(self, name): + self.name = name + self.c_name_with_marker = name + '&' + + def build_backend_type(self, ffi, finishlist): + raise NotImplementedError("float type '%s' can only be used after " + "compilation" % self.name) + class BaseFunctionType(BaseType): _attrs_ = ('args', 'result', 'ellipsis') diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h --- a/lib_pypy/cffi/parse_c_type.h +++ b/lib_pypy/cffi/parse_c_type.h @@ -79,7 +79,9 @@ #define _CFFI_PRIM_UINTMAX 47 #define _CFFI__NUM_PRIM 48 -#define _CFFI__UNKNOWN_PRIM (-1) +#define _CFFI__UNKNOWN_PRIM (-1) +#define _CFFI__UNKNOWN_FLOAT_PRIM (-2) +#define _CFFI__UNKNOWN_LONG_DOUBLE (-3) struct _cffi_global_s { diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -468,6 +468,10 @@ if tp.is_integer_type() and tp.name != '_Bool': converter = '_cffi_to_c_int' extraarg = ', %s' % tp.name + elif isinstance(tp, model.UnknownFloatType): + # don't check with is_float_type(): it may be a 'long + # double' here, and _cffi_to_c_double would loose precision + converter = '(%s)_cffi_to_c_double' % (tp.get_c_name(''),) else: converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''), tp.name.replace(' ', '_')) @@ -522,6 +526,8 @@ if isinstance(tp, model.BasePrimitiveType): if tp.is_integer_type(): return '_cffi_from_c_int(%s, %s)' % (var, tp.name) + elif isinstance(tp, model.UnknownFloatType): + return '_cffi_from_c_double(%s)' % (var,) elif tp.name != 'long double': return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var) else: @@ -1107,6 +1113,12 @@ ' ) <= 0)' % (tp.name, tp.name, tp.name)) self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s) + def _emit_bytecode_UnknownFloatType(self, tp, index): + s = ('_cffi_prim_float(sizeof(%s) *\n' + ' (((%s)1) / 2) * 2 /* integer => 0, float => 1 */\n' + ' )' % (tp.name, tp.name)) + self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s) + def _emit_bytecode_RawFunctionType(self, tp, index): self.cffi_types[index] = CffiOp(OP_FUNCTION, self._typesdict[tp.result]) index += 1 diff --git a/lib_pypy/ctypes_support.py b/lib_pypy/ctypes_support.py --- a/lib_pypy/ctypes_support.py +++ b/lib_pypy/ctypes_support.py @@ -28,7 +28,7 @@ def _where_is_errno(): return standard_c_lib.__errno_location() -elif sys.platform in ('darwin', 'freebsd7', 'freebsd8', 'freebsd9'): +elif sys.platform == 'darwin' or sys.platform.startswith('freebsd'): standard_c_lib.__error.restype = ctypes.POINTER(ctypes.c_int) standard_c_lib.__error.argtypes = None def _where_is_errno(): diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -31,15 +31,14 @@ and add the new file to pypy/doc/index-of-whatsnew.rst * go to pypy/tool/release and run ``force-builds.py `` - The following binaries should be built, however, we need more buildbots - - JIT: windows, linux, os/x, armhf, armel - - no JIT: windows, linux, os/x - - sandbox: linux, os/x + The following JIT binaries should be built, however, we need more buildbots + windows, linux-32, linux-64, osx64, armhf-raring, armhf-raspberrian, armel, + freebsd64 * wait for builds to complete, make sure there are no failures * download the builds, repackage binaries. Tag the release version and download and repackage source from bitbucket. You may find it - convenient to use the ``repackage.sh`` script in pypy/tools to do this. + convenient to use the ``repackage.sh`` script in pypy/tool/release to do this. Otherwise repackage and upload source "-src.tar.bz2" to bitbucket and to cobra, as some packagers prefer a clearly labeled source package diff --git a/pypy/doc/whatsnew-2.6.1.rst b/pypy/doc/whatsnew-2.6.1.rst --- a/pypy/doc/whatsnew-2.6.1.rst +++ b/pypy/doc/whatsnew-2.6.1.rst @@ -70,3 +70,7 @@ .. branch: vmprof-review Clean up of vmprof, notably to handle correctly multiple threads + +.. branch: no_boehm_dl + +Remove extra link library from Boehm GC diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,5 +3,5 @@ ======================= .. this is a revision shortly after release-2.6.1 -.. startrev: 83ebc73d4fcb +.. startrev: 07769be4057b diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -2,7 +2,7 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rlib import rdynload -VERSION = "1.2.1" +VERSION = "1.3.0" class Module(MixedModule): diff --git a/pypy/module/_cffi_backend/cffi_opcode.py b/pypy/module/_cffi_backend/cffi_opcode.py --- a/pypy/module/_cffi_backend/cffi_opcode.py +++ b/pypy/module/_cffi_backend/cffi_opcode.py @@ -9,16 +9,16 @@ assert isinstance(self.arg, str) return '(_cffi_opcode_t)(%s)' % (self.arg,) classname = CLASS_NAME[self.op] - return '_CFFI_OP(_CFFI_OP_%s, %d)' % (classname, self.arg) + return '_CFFI_OP(_CFFI_OP_%s, %s)' % (classname, self.arg) def as_python_bytes(self): - if self.op is None: - if self.arg.isdigit(): - value = int(self.arg) # non-negative: '-' not in self.arg - if value >= 2**31: - raise OverflowError("cannot emit %r: limited to 2**31-1" - % (self.arg,)) - return format_four_bytes(value) + if self.op is None and self.arg.isdigit(): + value = int(self.arg) # non-negative: '-' not in self.arg + if value >= 2**31: + raise OverflowError("cannot emit %r: limited to 2**31-1" + % (self.arg,)) + return format_four_bytes(value) + if isinstance(self.arg, str): from .ffiplatform import VerificationError raise VerificationError("cannot emit to Python: %r" % (self.arg,)) return format_four_bytes((self.arg << 8) | self.op) @@ -106,7 +106,9 @@ PRIM_UINTMAX = 47 _NUM_PRIM = 48 -_UNKNOWN_PRIM = -1 +_UNKNOWN_PRIM = -1 +_UNKNOWN_FLOAT_PRIM = -2 +_UNKNOWN_LONG_DOUBLE = -3 PRIMITIVE_TO_INDEX = { 'char': PRIM_CHAR, diff --git a/pypy/module/_cffi_backend/realize_c_type.py b/pypy/module/_cffi_backend/realize_c_type.py --- a/pypy/module/_cffi_backend/realize_c_type.py +++ b/pypy/module/_cffi_backend/realize_c_type.py @@ -81,6 +81,13 @@ if num == cffi_opcode._UNKNOWN_PRIM: raise oefmt(ffi.w_FFIError, "primitive integer type with an " "unexpected size (or not an integer type at all)") + elif num == cffi_opcode._UNKNOWN_FLOAT_PRIM: + raise oefmt(ffi.w_FFIError, "primitive floating-point type with an " + "unexpected size (or not a float type at all)") + elif num == cffi_opcode._UNKNOWN_LONG_DOUBLE: + raise oefmt(ffi.w_FFIError, "primitive floating-point type is " + "'long double', not supported for now with " + "the syntax 'typedef double... xxx;'") else: raise oefmt(space.w_NotImplementedError, "prim=%d", num) realize_cache = space.fromcache(RealizeCache) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,6 +1,9 @@ # ____________________________________________________________ import sys +assert __version__ == "1.3.0", ("This test_c.py file is for testing a version" + " of cffi that differs from the one that we" + " get from 'import _cffi_backend'") if sys.version_info < (3,): type_or_class = "type" mandatory_b_prefix = '' @@ -3424,7 +3427,3 @@ "be 'foo *', but the types are different (check " "that you are not e.g. mixing up different ffi " "instances)") - -def test_version(): - # this test is here mostly for PyPy - assert __version__ == "1.2.1" diff --git a/pypy/module/_multiprocessing/test/test_memory.py b/pypy/module/_multiprocessing/test/test_memory.py --- a/pypy/module/_multiprocessing/test/test_memory.py +++ b/pypy/module/_multiprocessing/test/test_memory.py @@ -1,8 +1,12 @@ +import sys + class AppTestMemory: spaceconfig = dict(usemodules=('_multiprocessing', 'mmap', '_rawffi', - 'signal', 'select', 'fcntl', + 'signal', 'select', 'binascii')) + if sys.platform != 'win32': + spaceconfig['usemodules'] += ('fcntl',) def test_address_of(self): import _multiprocessing diff --git a/pypy/module/_multiprocessing/test/test_semaphore.py b/pypy/module/_multiprocessing/test/test_semaphore.py --- a/pypy/module/_multiprocessing/test/test_semaphore.py +++ b/pypy/module/_multiprocessing/test/test_semaphore.py @@ -1,12 +1,19 @@ +import sys + from pypy.module._multiprocessing.interp_semaphore import ( RECURSIVE_MUTEX, SEMAPHORE) class AppTestSemaphore: spaceconfig = dict(usemodules=('_multiprocessing', 'thread', - 'signal', 'select', 'fcntl', + 'signal', 'select', 'binascii', 'struct')) + if sys.platform == 'win32': + spaceconfig['usemodules'] += ('_rawffi',) + else: + spaceconfig['usemodules'] += ('fcntl',) + def setup_class(cls): cls.w_SEMAPHORE = cls.space.wrap(SEMAPHORE) cls.w_RECURSIVE = cls.space.wrap(RECURSIVE_MUTEX) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1,7 +1,7 @@ # Generated by pypy/tool/import_cffi.py import sys, os, py -from cffi import FFI, VerificationError +from cffi import FFI, VerificationError, FFIError from cffi import recompiler from pypy.module.test_lib_pypy.cffi_tests.udir import udir from pypy.module.test_lib_pypy.cffi_tests.support import u @@ -1057,14 +1057,54 @@ assert lib.nu == 20 def test_some_float_type(): - py.test.skip("later") ffi = FFI() - ffi.cdef("typedef double... foo_t; foo_t sum(foo_t[]);") + ffi.cdef(""" + typedef double... foo_t; + typedef float... bar_t; + foo_t sum(foo_t[]); + bar_t neg(bar_t); + """) lib = verify(ffi, 'test_some_float_type', """ typedef float foo_t; static foo_t sum(foo_t x[]) { return x[0] + x[1]; } + typedef double bar_t; + static double neg(double x) { return -x; } """) assert lib.sum([40.0, 2.25]) == 42.25 + assert lib.sum([12.3, 45.6]) != 12.3 + 45.6 # precision loss + assert lib.neg(12.3) == -12.3 # no precision loss + assert ffi.sizeof("foo_t") == ffi.sizeof("float") + assert ffi.sizeof("bar_t") == ffi.sizeof("double") + +def test_some_float_invalid_1(): + ffi = FFI() + py.test.raises(FFIError, ffi.cdef, "typedef long double... foo_t;") + +def test_some_float_invalid_2(): + ffi = FFI() + ffi.cdef("typedef double... foo_t; foo_t neg(foo_t);") + lib = verify(ffi, 'test_some_float_invalid_2', """ + typedef unsigned long foo_t; + foo_t neg(foo_t x) { return -x; } + """) + e = py.test.raises(ffi.error, getattr, lib, 'neg') + assert str(e.value) == ("primitive floating-point type with an unexpected " + "size (or not a float type at all)") + +def test_some_float_invalid_3(): + ffi = FFI() + ffi.cdef("typedef double... foo_t; foo_t neg(foo_t);") + lib = verify(ffi, 'test_some_float_invalid_3', """ + typedef long double foo_t; + foo_t neg(foo_t x) { return -x; } + """) + if ffi.sizeof("long double") == ffi.sizeof("double"): + assert lib.neg(12.3) == -12.3 + else: + e = py.test.raises(ffi.error, getattr, lib, 'neg') + assert str(e.value) == ("primitive floating-point type is " + "'long double', not supported for now with " + "the syntax 'typedef double... xxx;'") def test_issue200(): ffi = FFI() diff --git a/pypy/module/thread/test/test_lock.py b/pypy/module/thread/test/test_lock.py --- a/pypy/module/thread/test/test_lock.py +++ b/pypy/module/thread/test/test_lock.py @@ -175,9 +175,6 @@ class AppTestLockSignals(GenericTestThread): pytestmark = py.test.mark.skipif("os.name != 'posix'") - def setup_class(cls): - cls.w_using_pthread_cond = cls.space.wrap(sys.platform == 'freebsd6') - def w_acquire_retries_on_intr(self, lock): import _thread, os, signal, time self.sig_recvd = False @@ -222,8 +219,6 @@ raise KeyboardInterrupt def test_lock_acquire_interruption(self): - if self.using_pthread_cond: - skip('POSIX condition variables cannot be interrupted') import _thread, signal, time # Mimic receiving a SIGINT (KeyboardInterrupt) with SIGALRM while stuck # in a deadlock. diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -28,6 +28,7 @@ # 'pypy-c-app-level-win-x86-32', 'pypy-c-jit-linux-x86-32', 'pypy-c-jit-linux-x86-64', + 'pypy-c-jit-freebsd-9-x86-64', 'pypy-c-jit-macosx-x86-64', 'pypy-c-jit-win-x86-32', 'build-pypy-c-jit-linux-armhf-raring', @@ -42,7 +43,7 @@ import pwd return pwd.getpwuid(os.getuid())[0] -def main(branch, server): +def main(branch, server, user): #XXX: handle release tags #XXX: handle validity checks lock = defer.DeferredLock() @@ -56,7 +57,7 @@ print 'Forcing', builder, '...' url = "http://" + server + "/builders/" + builder + "/force" args = [ - ('username', get_user()), + ('username', user), ('revision', ''), ('forcescheduler', 'Force Scheduler'), ('submit', 'Force Build'), @@ -78,7 +79,8 @@ parser = optparse.OptionParser() parser.add_option("-b", "--branch", help="branch to build", default='') parser.add_option("-s", "--server", help="buildbot server", default="buildbot.pypy.org") + parser.add_option("-u", "--user", help="user name to report", default=get_user()) (options, args) = parser.parse_args() if not options.branch: parser.error("branch option required") - main(options.branch, options.server) + main(options.branch, options.server, user=options.user) diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -1,12 +1,12 @@ # Edit these appropriately before running this script maj=2 min=6 -rev=0 +rev=1 # This script will download latest builds from the buildmaster, rename the top # level directory, and repackage ready to be uploaded to bitbucket. It will also # download source, assuming a tag for the release already exists, and repackage them. -for plat in linux linux64 linux-armhf-raspbian linux-armhf-raring linux-armel osx64 +for plat in linux linux64 linux-armhf-raspbian linux-armhf-raring linux-armel osx64 freebsd64 do wget http://buildbot.pypy.org/nightly/release-$maj.$min.x/pypy-c-jit-latest-$plat.tar.bz2 tar -xf pypy-c-jit-latest-$plat.tar.bz2 diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -631,9 +631,11 @@ raise Exception("TreeLoop.token is killed") token = property(_token, _token) - # This is the jitcell where the trace starts. Labels within the trace might - # belong to some other jitcells in the sens that jumping to this other - # jitcell will result in a jump to the label. + # This is the jitcell where the trace starts. Labels within the + # trace might belong to some other jitcells, i.e. they might have + # TargetTokens with a different value for 'targeting_jitcell_token'. + # But these TargetTokens also have a 'original_jitcell_token' field, + # which must be equal to this one. original_jitcell_token = None def __init__(self, name): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -8445,6 +8445,7 @@ self.optimize_loop(ops, expected) def test_issue1080_infinitie_loop_virtual(self): + # Same comment as the following test_issue1080_infinitie_loop_simple ops = """ [p10] p52 = getfield_gc(p10, descr=nextdescr) # inst_storage @@ -8467,6 +8468,10 @@ self.raises(InvalidLoop, self.optimize_loop, ops, ops) def test_issue1080_infinitie_loop_simple(self): + # 'quasiimmutdescr' is a QuasiImmutDescr initialized with the + # 'quasibox' as the quasi-immutable instance. We close the loop + # with ConstPtr(myptr), i.e. a different pointer. The test checks + # that the resulting loop is invalid. ops = """ [p69] quasiimmut_field(p69, descr=quasiimmutdescr) diff --git a/rpython/rlib/clibffi.py b/rpython/rlib/clibffi.py --- a/rpython/rlib/clibffi.py +++ b/rpython/rlib/clibffi.py @@ -32,7 +32,6 @@ _WIN32 = _MSVC or _MINGW _WIN64 = _WIN32 and is_emulated_long _MAC_OS = platform.name == "darwin" -_FREEBSD_7 = platform.name == "freebsd7" _LITTLE_ENDIAN = sys.byteorder == 'little' _BIG_ENDIAN = sys.byteorder == 'big' diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -46,6 +46,7 @@ _POSIX = os.name == "posix" _MS_WINDOWS = os.name == "nt" +_FREEBSD = sys.platform.startswith('freebsd') _64BIT = "64bit" in host_platform.architecture()[0] @@ -1080,8 +1081,11 @@ return ctypes.util.find_library('c') libc_name = get_libc_name() # Make sure the name is determined during import, not at runtime + if _FREEBSD: + RTLD_DEFAULT = -2 # see + rtld_default_lib = ctypes.CDLL("RTLD_DEFAULT", handle=RTLD_DEFAULT, **load_library_kwargs) # XXX is this always correct??? - standard_c_lib = ctypes.CDLL(get_libc_name(), **load_library_kwargs) + standard_c_lib = ctypes.CDLL(libc_name, **load_library_kwargs) # ____________________________________________ @@ -1174,7 +1178,10 @@ not_found.append(libname) if cfunc is None: - cfunc = get_on_lib(standard_c_lib, funcname) + if _FREEBSD and funcname in ('dlopen', 'fdlopen', 'dlsym', 'dlfunc', 'dlerror', 'dlclose'): + cfunc = get_on_lib(rtld_default_lib, funcname) + else: + cfunc = get_on_lib(standard_c_lib, funcname) # XXX magic: on Windows try to load the function from 'kernel32' too if cfunc is None and hasattr(ctypes, 'windll'): cfunc = get_on_lib(ctypes.windll.kernel32, funcname) @@ -1493,18 +1500,17 @@ _where_is_errno().contents.value = TLS.errno if ctypes: - if sys.platform == 'win32': + if _MS_WINDOWS: standard_c_lib._errno.restype = ctypes.POINTER(ctypes.c_int) def _where_is_errno(): return standard_c_lib._errno() - elif sys.platform.startswith('linux') or sys.platform == 'freebsd6': + elif sys.platform.startswith('linux'): standard_c_lib.__errno_location.restype = ctypes.POINTER(ctypes.c_int) def _where_is_errno(): return standard_c_lib.__errno_location() - elif any(plat in sys.platform - for plat in ('darwin', 'freebsd7', 'freebsd8', 'freebsd9')): + elif sys.platform == 'darwin' or _FREEBSD: standard_c_lib.__error.restype = ctypes.POINTER(ctypes.c_int) def _where_is_errno(): return standard_c_lib.__error() diff --git a/rpython/rtyper/tool/rffi_platform.py b/rpython/rtyper/tool/rffi_platform.py --- a/rpython/rtyper/tool/rffi_platform.py +++ b/rpython/rtyper/tool/rffi_platform.py @@ -854,7 +854,7 @@ platform = None else: library_dir = '' - libraries = ['gc', 'dl'] + libraries = ['gc'] includes=['gc/gc.h'] eci = ExternalCompilationInfo( platform=platform, From noreply at buildbot.pypy.org Thu Oct 1 00:55:04 2015 From: noreply at buildbot.pypy.org (mjacob) Date: Thu, 1 Oct 2015 00:55:04 +0200 (CEST) Subject: [pypy-commit] pypy py3k: 2to3 Message-ID: <20150930225504.993EC1C1311@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k Changeset: r79915:6029baccadac Date: 2015-09-30 16:29 +0200 http://bitbucket.org/pypy/pypy/changeset/6029baccadac/ Log: 2to3 diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -40,7 +40,7 @@ count += 1 i += 2 * WORD + size else: - raise AssertionError(ord(s[i])) + raise AssertionError(s[i]) return count import _vmprof From noreply at buildbot.pypy.org Thu Oct 1 12:04:52 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 1 Oct 2015 12:04:52 +0200 (CEST) Subject: [pypy-commit] pypy default: test, skip pickling object dtype ndarrays Message-ID: <20151001100452.C41181C130A@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r79916:044ec4c45280 Date: 2015-10-01 12:20 +0300 http://bitbucket.org/pypy/pypy/changeset/044ec4c45280/ Log: test, skip pickling object dtype ndarrays diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -1308,6 +1308,9 @@ [space.wrap(0)]), space.wrap("b")]) builder = StringBuilder() + if self.get_dtype().is_object(): + raise oefmt(space.w_NotImplementedError, + "reduce for 'object' dtype not supported yet") if isinstance(self.implementation, SliceArray): iter, state = self.implementation.create_iter() while not iter.done(state): diff --git a/pypy/module/micronumpy/test/test_object_arrays.py b/pypy/module/micronumpy/test/test_object_arrays.py --- a/pypy/module/micronumpy/test/test_object_arrays.py +++ b/pypy/module/micronumpy/test/test_object_arrays.py @@ -3,6 +3,8 @@ class AppTestObjectDtypes(BaseNumpyAppTest): + spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) + def setup_class(cls): BaseNumpyAppTest.setup_class.im_func(cls) cls.w_runappdirect = cls.space.wrap(option.runappdirect) @@ -187,3 +189,21 @@ assert b.shape == (1,) assert b.dtype == np.float_ assert (b == 1.0).all() + + + def test__reduce__(self): + from numpy import arange, dtype + from cPickle import loads, dumps + import sys + + a = arange(15).astype(object) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, dumps, a) + skip('not implemented yet') + b = loads(dumps(a)) + assert (a == b).all() + + a = arange(15).astype(object).reshape((3, 5)) + b = loads(dumps(a)) + assert (a == b).all() + From noreply at buildbot.pypy.org Thu Oct 1 13:21:50 2015 From: noreply at buildbot.pypy.org (mjacob) Date: Thu, 1 Oct 2015 13:21:50 +0200 (CEST) Subject: [pypy-commit] pypy py3k: Fix test. Message-ID: <20151001112150.590AD1C0EBB@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k Changeset: r79917:fff65eb52236 Date: 2015-10-01 13:21 +0200 http://bitbucket.org/pypy/pypy/changeset/fff65eb52236/ Log: Fix test. diff --git a/pypy/module/thread/test/test_lock.py b/pypy/module/thread/test/test_lock.py --- a/pypy/module/thread/test/test_lock.py +++ b/pypy/module/thread/test/test_lock.py @@ -248,8 +248,6 @@ signal.signal(signal.SIGALRM, oldalrm) def test_rlock_acquire_interruption(self): - if self.using_pthread_cond: - skip('POSIX condition variables cannot be interrupted') import _thread, signal, time # Mimic receiving a SIGINT (KeyboardInterrupt) with SIGALRM while stuck # in a deadlock. From noreply at buildbot.pypy.org Thu Oct 1 13:27:04 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Oct 2015 13:27:04 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: work on it Message-ID: <20151001112704.4D1531C0EBB@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: extradoc Changeset: r5559:a010b0057b87 Date: 2015-10-01 13:26 +0200 http://bitbucket.org/pypy/extradoc/changeset/a010b0057b87/ Log: work on it diff --git a/talk/pyconza2015/talk.rst b/talk/pyconza2015/talk.rst --- a/talk/pyconza2015/talk.rst +++ b/talk/pyconza2015/talk.rst @@ -29,6 +29,8 @@ * different base, not written in C +* ~7x faster than cpython + PyPy - the wider angle ---------------------- @@ -122,9 +124,31 @@ Guidos points about optimizing python ------------------------------------- -xxx find the link - Why we're here? --------------- -yyyyy +* because the points above don't really work + +* once you fix obvious mistakes, profiles tend to look flat + +* let's look at some of them + +the basics +---------- + +* have a metric + +a word about timeit +------------------- + +xxxxx + +introducing vmprof +------------------ + +* low-overhead profiler + +* statistical + +* visualization tools (work in progress) + From noreply at buildbot.pypy.org Thu Oct 1 13:30:07 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Oct 2015 13:30:07 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: update Message-ID: <20151001113007.A0D991C0EBB@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: extradoc Changeset: r5560:b80c359f91b5 Date: 2015-10-01 13:31 +0200 http://bitbucket.org/pypy/extradoc/changeset/b80c359f91b5/ Log: update diff --git a/talk/pyconza2015/Makefile b/talk/pyconza2015/Makefile new file mode 100644 --- /dev/null +++ b/talk/pyconza2015/Makefile @@ -0,0 +1,18 @@ +# you can find rst2beamer.py here: +# http://codespeak.net/svn/user/antocuni/bin/rst2beamer.py + +# WARNING: to work, it needs this patch for docutils +# https://sourceforge.net/tracker/?func=detail&atid=422032&aid=1459707&group_id=38414 + +talk.pdf: talk.rst author.latex stylesheet.latex + python ../bin/rst2beamer.py --stylesheet=stylesheet.latex --documentoptions=14pt --input-encoding=utf8 --output-encoding=utf8 talk.rst talk.latex || exit + #/home/antocuni/.virtualenvs/rst2beamer/bin/python `which rst2beamer.py` --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit + sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit + #sed 's/\\maketitle/\\input{title.latex}/' -i talk.latex || exit + pdflatex talk.latex || exit + +view: talk.pdf + evince talk.pdf & + +xpdf: talk.pdf + xpdf talk.pdf & diff --git a/talk/pyconza2015/author.latex b/talk/pyconza2015/author.latex new file mode 100644 --- /dev/null +++ b/talk/pyconza2015/author.latex @@ -0,0 +1,8 @@ +\definecolor{rrblitbackground}{rgb}{0.0, 0.0, 0.0} + +\title[Python and PyPy performance]{Python and PyPy performance\\(not) for dummies} +\author[antocuni,fijal] +{Antonio Cuni and Maciej Fijałkowski} + +\institute{EuroPython 2015} +\date{July 21, 2015} diff --git a/talk/pyconza2015/stylesheet.latex b/talk/pyconza2015/stylesheet.latex new file mode 100644 --- /dev/null +++ b/talk/pyconza2015/stylesheet.latex @@ -0,0 +1,10 @@ +\usetheme{Boadilla} +\setbeamercovered{transparent} +\setbeamertemplate{navigation symbols}{} + +\definecolor{darkgreen}{rgb}{0, 0.5, 0.0} +\newcommand{\docutilsrolegreen}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\docutilsrolered}[1]{\color{red}#1\normalcolor} + +\newcommand{\green}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\red}[1]{\color{red}#1\normalcolor} From noreply at buildbot.pypy.org Thu Oct 1 13:51:18 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Oct 2015 13:51:18 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: small updates Message-ID: <20151001115118.A1BB71C0369@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: extradoc Changeset: r5561:538c841b193b Date: 2015-10-01 13:52 +0200 http://bitbucket.org/pypy/extradoc/changeset/538c841b193b/ Log: small updates diff --git a/talk/pyconza2015/Makefile b/talk/pyconza2015/Makefile --- a/talk/pyconza2015/Makefile +++ b/talk/pyconza2015/Makefile @@ -5,7 +5,7 @@ # https://sourceforge.net/tracker/?func=detail&atid=422032&aid=1459707&group_id=38414 talk.pdf: talk.rst author.latex stylesheet.latex - python ../bin/rst2beamer.py --stylesheet=stylesheet.latex --documentoptions=14pt --input-encoding=utf8 --output-encoding=utf8 talk.rst talk.latex || exit + python ../bin/rst2beamer.py --stylesheet=stylesheet.latex --documentoptions=14pt --theme=Warsaw --input-encoding=utf8 --output-encoding=utf8 talk.rst talk.latex || exit #/home/antocuni/.virtualenvs/rst2beamer/bin/python `which rst2beamer.py` --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit #sed 's/\\maketitle/\\input{title.latex}/' -i talk.latex || exit diff --git a/talk/pyconza2015/author.latex b/talk/pyconza2015/author.latex --- a/talk/pyconza2015/author.latex +++ b/talk/pyconza2015/author.latex @@ -1,8 +1,8 @@ \definecolor{rrblitbackground}{rgb}{0.0, 0.0, 0.0} -\title[Python and PyPy performance]{Python and PyPy performance\\(not) for dummies} -\author[antocuni,fijal] -{Antonio Cuni and Maciej Fijałkowski} +\title[How PyPy runs your programs]{How PyPy runs your programs} +\author[fijal] +{Maciej Fijałkowski} -\institute{EuroPython 2015} -\date{July 21, 2015} +\institute{PyCon ZA 2015} +\date{Oct 2nd, 2015} diff --git a/talk/pyconza2015/talk.rst b/talk/pyconza2015/talk.rst --- a/talk/pyconza2015/talk.rst +++ b/talk/pyconza2015/talk.rst @@ -1,6 +1,6 @@ How PyPy runs your program -================== +========================== About me -------- @@ -9,6 +9,10 @@ * running consulting business baroquesoftware.com +* always interested in tooling and improving experience + +* originally from Poland, living in Cape Town + This talk --------- @@ -27,7 +31,7 @@ * uses magic to run code faster (most of the time) -* different base, not written in C +* completely different codebase, not written in C * ~7x faster than cpython From noreply at buildbot.pypy.org Thu Oct 1 14:32:57 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Oct 2015 14:32:57 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: work on talk Message-ID: <20151001123257.851441C0748@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: extradoc Changeset: r5562:a514b256a26f Date: 2015-10-01 14:33 +0200 http://bitbucket.org/pypy/extradoc/changeset/a514b256a26f/ Log: work on talk diff --git a/talk/pyconza2015/talk.rst b/talk/pyconza2015/talk.rst --- a/talk/pyconza2015/talk.rst +++ b/talk/pyconza2015/talk.rst @@ -7,7 +7,7 @@ * PyPy core developer for 8 years -* running consulting business baroquesoftware.com +* running consulting business ``http://baroquesoftware.com`` * always interested in tooling and improving experience @@ -20,9 +20,9 @@ * how pypy runs your programs -* how to assess the performance of your program +* how to assess the **performance** of your program -* additionally, why a lot of common folklore is not true +* additionally, why a lot of **common folklore** is not true The basics of PyPy ------------------ @@ -33,7 +33,7 @@ * completely different codebase, not written in C -* ~7x faster than cpython +* **~7x faster** than cpython PyPy - the wider angle ---------------------- @@ -133,6 +133,8 @@ * because the points above don't really work +* tradeoffs between productivity and performance + * once you fix obvious mistakes, profiles tend to look flat * let's look at some of them @@ -142,10 +144,22 @@ * have a metric +* a number, the shorter iteration the better + +* use science! + a word about timeit ------------------- -xxxxx +* don't use it + +* really, never + +* no, not even that time + +* minimum is a terrible thing + +* disables the GC introducing vmprof ------------------ @@ -156,3 +170,37 @@ * visualization tools (work in progress) +vmprofdemo +---------- + +* optimize for yourself + +* cpython, pypy + +more complicated example +------------------------ + +* django admin + +* 30 req/s + +* in 2015, that's 90mln operations to show you a simple website + +baroquesoftware.com +------------------- + + +* monetizing open source is a difficult question, warrants another talk + +* small consultancy on execution of programs + +* talk to me + +Questions? +---------- + +* http://pypy.org + +* http://baroquesoftware.com + +* http://vmprof.com From noreply at buildbot.pypy.org Thu Oct 1 14:54:23 2015 From: noreply at buildbot.pypy.org (mjacob) Date: Thu, 1 Oct 2015 14:54:23 +0200 (CEST) Subject: [pypy-commit] pypy py3k: Remove this test because it doesn't make sense anymore. Message-ID: <20151001125423.B50D21C0162@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: py3k Changeset: r79918:4d306d6f722e Date: 2015-10-01 14:54 +0200 http://bitbucket.org/pypy/pypy/changeset/4d306d6f722e/ Log: Remove this test because it doesn't make sense anymore. In the default branch (implementing 2.7), the test checks that, even though the script first writes to sys.stdout, stderr is output first. This works because both streams are buffered and, at interpreter shutdown, sys.stderr is flushed first. The order in which sys.stdout and sys.stderr are flushed isn't determistic if running on Python 3.x. Because of this, the test was changed in the py3k branch (at the time trying to implement Python 3.2) to check that both streams are always line buffered. However this assumption is false. Since Python 3.0 sys.stdout and sys.stderr are line buffered by default iff connected to a TTY or set otherwise unbuffered. diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -808,31 +808,6 @@ data = self.run('-S -i', expect_prompt=True, expect_banner=True) assert 'copyright' not in data - def test_non_interactive_stdout_fully_buffered(self): - if os.name == 'nt': - try: - import __pypy__ - except: - py.test.skip('app_main cannot run on non-pypy for windows') - path = getscript(r""" - import sys, time - sys.stdout.write('\x00(STDOUT)\n\x00') # stays in buffers - time.sleep(1) - sys.stderr.write('\x00[STDERR]\n\x00') - time.sleep(1) - # stdout flushed automatically here - """) - cmdline = '%s -u "%s" %s' % (python3, app_main, path) - print 'POPEN:', cmdline - child_in, child_out_err = os.popen4(cmdline) - data = child_out_err.read(11) - # Py3 is always at least line buffered - assert data == '\x00(STDOUT)\n\x00' # from stdout - child_in.close() - data = child_out_err.read(11) - assert data == '\x00[STDERR]\n\x00' # from stderr - child_out_err.close() - def test_non_interactive_stdout_unbuffered(self, monkeypatch): monkeypatch.setenv('PYTHONUNBUFFERED', '1') if os.name == 'nt': From noreply at buildbot.pypy.org Thu Oct 1 14:57:10 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 1 Oct 2015 14:57:10 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: first version of pdf Message-ID: <20151001125710.E193A1C0162@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: extradoc Changeset: r5563:8419bb0a7f83 Date: 2015-10-01 14:57 +0200 http://bitbucket.org/pypy/extradoc/changeset/8419bb0a7f83/ Log: first version of pdf diff --git a/talk/pyconza2015/talk.pdf b/talk/pyconza2015/talk.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c78cad48408ee797dfb60f4a2077a482a3c83756 GIT binary patch [cut] From noreply at buildbot.pypy.org Thu Oct 1 16:32:41 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Oct 2015 16:32:41 +0200 (CEST) Subject: [pypy-commit] pypy default: Allow RWeakValueDictionaries to be mixed with None Message-ID: <20151001143241.BEA6A1C0748@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79919:3a3f357b1c84 Date: 2015-10-01 10:27 +0200 http://bitbucket.org/pypy/pypy/changeset/3a3f357b1c84/ Log: Allow RWeakValueDictionaries to be mixed with None diff --git a/rpython/rlib/_rweakvaldict.py b/rpython/rlib/_rweakvaldict.py --- a/rpython/rlib/_rweakvaldict.py +++ b/rpython/rlib/_rweakvaldict.py @@ -2,6 +2,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rdict from rpython.rtyper.lltypesystem.llmemory import weakref_create, weakref_deref from rpython.rtyper import rclass +from rpython.rtyper.error import TyperError from rpython.rtyper.rclass import getinstancerepr from rpython.rtyper.rmodel import Repr from rpython.rlib.rweakref import RWeakValueDictionary @@ -60,6 +61,8 @@ self.dict_cache = {} def convert_const(self, weakdict): + if weakdict is None: + return lltype.nullptr(self.WEAKDICT) if not isinstance(weakdict, RWeakValueDictionary): raise TyperError("expected an RWeakValueDictionary: %r" % ( weakdict,)) diff --git a/rpython/rlib/rweakref.py b/rpython/rlib/rweakref.py --- a/rpython/rlib/rweakref.py +++ b/rpython/rlib/rweakref.py @@ -25,6 +25,9 @@ """A dictionary containing weak values.""" def __init__(self, keyclass, valueclass): + """'keyclass' can be an RPython class or a type like 'int' or 'str'. + On the other hand, 'valueclass' must be an RPython class. + """ self._dict = weakref.WeakValueDictionary() self._keyclass = keyclass self._valueclass = valueclass @@ -99,6 +102,12 @@ self.s_key = s_key self.valueclassdef = valueclassdef + def can_be_none(self): + return True + + def noneify(self): + return self + def rtyper_makerepr(self, rtyper): from rpython.rlib import _rweakvaldict return _rweakvaldict.WeakValueDictRepr(rtyper, diff --git a/rpython/rlib/test/test_rweakvaldict.py b/rpython/rlib/test/test_rweakvaldict.py --- a/rpython/rlib/test/test_rweakvaldict.py +++ b/rpython/rlib/test/test_rweakvaldict.py @@ -146,6 +146,25 @@ py.test.raises(Exception, interpret, g, [1]) +def test_rpython_RWeakValueDictionary_or_None(): + def g(d, key): + if d is None: + return None + return d.get(key) + def f(n): + x = X() + if n: + d = None + else: + d = RWeakValueDictionary(str, X) + d.set("a", x) + return g(d, "a") is x + assert f(0) + assert interpret(f, [0]) + assert not f(1) + assert not interpret(f, [1]) + + def test_bogus_makekey(): class X: pass class Y: pass From noreply at buildbot.pypy.org Thu Oct 1 16:32:43 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Oct 2015 16:32:43 +0200 (CEST) Subject: [pypy-commit] pypy default: Change in cffi 1.3: allow ctype objects to die Message-ID: <20151001143243.E4F281C0748@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79920:438df7ee1d33 Date: 2015-10-01 16:26 +0200 http://bitbucket.org/pypy/pypy/changeset/438df7ee1d33/ Log: Change in cffi 1.3: allow ctype objects to die diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -11,7 +11,8 @@ class W_CType(W_Root): - _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_'] + _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_', + '_pointer_type'] _immutable_fields_ = ['size?', 'name', 'name_position'] # note that 'size' is not strictly immutable, because it can change # from -1 to the real value in the W_CTypeStruct subclass. diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -168,7 +168,7 @@ class W_CTypePointer(W_CTypePtrBase): - _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr'] + _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr', '_array_types'] _immutable_fields_ = ['is_file', 'cache_array_type?', 'is_void_ptr'] kind = "pointer" cache_array_type = None diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -4,7 +4,7 @@ from rpython.rlib.objectmodel import specialize, r_dict, compute_identity_hash from rpython.rlib.rarithmetic import ovfcheck, intmask -from rpython.rlib import jit +from rpython.rlib import jit, rweakref from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform @@ -23,27 +23,12 @@ class UniqueCache: def __init__(self, space): - self.ctvoid = None # There can be only one - self.ctvoidp = None # Cache for self.pointers[self.ctvoid] - self.ctchara = None # Cache for self.arrays[charp, -1] - self.primitives = {} # Keys: name - self.pointers = {} # Keys: base_ctype - self.arrays = {} # Keys: (ptr_ctype, length_or_-1) - self.functions = r_dict(# Keys: (fargs, w_fresult, ellipsis) - _func_key_eq, _func_key_hash) - -def _func_key_eq((fargs1, w_fresult1, ellipsis1), - (fargs2, w_fresult2, ellipsis2)): - return (fargs1 == fargs2 and # list equality here - w_fresult1 is w_fresult2 and - ellipsis1 == ellipsis2) - -def _func_key_hash((fargs, w_fresult, ellipsis)): - x = compute_identity_hash(w_fresult) ^ ellipsis - for w_arg in fargs: - y = compute_identity_hash(w_arg) - x = intmask((1000003 * x) ^ y) - return x + self.ctvoid = None # Cache for the 'void' type + self.ctvoidp = None # Cache for the 'void *' type + self.ctchara = None # Cache for the 'char[]' type + self.primitives = {} # Cache for {name: primitive_type} + self.functions = [] # see _new_function_type() + self.for_testing = False def _clean_cache(space): "NOT_RPYTHON" @@ -165,20 +150,24 @@ # ____________________________________________________________ + at specialize.memo() +def _setup_wref(has_weakref_support): + assert has_weakref_support, "_cffi_backend requires weakrefs" + ctypeobj.W_CType._pointer_type = rweakref.dead_ref + ctypeptr.W_CTypePointer._array_types = None + @unwrap_spec(w_ctype=ctypeobj.W_CType) def new_pointer_type(space, w_ctype): return _new_pointer_type(space, w_ctype) @jit.elidable def _new_pointer_type(space, w_ctype): - unique_cache = space.fromcache(UniqueCache) - try: - return unique_cache.pointers[w_ctype] - except KeyError: - pass - ctypepointer = ctypeptr.W_CTypePointer(space, w_ctype) - unique_cache.pointers[w_ctype] = ctypepointer - return ctypepointer + _setup_wref(rweakref.has_weakref_support()) + ctptr = w_ctype._pointer_type() + if ctptr is None: + ctptr = ctypeptr.W_CTypePointer(space, w_ctype) + w_ctype._pointer_type = rweakref.ref(ctptr) + return ctptr # ____________________________________________________________ @@ -195,16 +184,19 @@ @jit.elidable def _new_array_type(space, w_ctptr, length): - unique_cache = space.fromcache(UniqueCache) - unique_key = (w_ctptr, length) - try: - return unique_cache.arrays[unique_key] - except KeyError: - pass - # + _setup_wref(rweakref.has_weakref_support()) if not isinstance(w_ctptr, ctypeptr.W_CTypePointer): raise OperationError(space.w_TypeError, space.wrap("first arg must be a pointer ctype")) + arrays = w_ctptr._array_types + if arrays is None: + arrays = rweakref.RWeakValueDictionary(int, ctypearray.W_CTypeArray) + w_ctptr._array_types = arrays + else: + ctype = arrays.get(length) + if ctype is not None: + return ctype + # ctitem = w_ctptr.ctitem if ctitem.size < 0: raise oefmt(space.w_ValueError, "array item of unknown size: '%s'", @@ -222,7 +214,7 @@ extra = '[%d]' % length # ctype = ctypearray.W_CTypeArray(space, w_ctptr, length, arraysize, extra) - unique_cache.arrays[unique_key] = ctype + arrays.set(length, ctype) return ctype # ____________________________________________________________ @@ -612,29 +604,69 @@ fargs.append(w_farg) return _new_function_type(space, fargs, w_fresult, bool(ellipsis)) +def _func_key_hash(unique_cache, fargs, fresult, ellipsis): + x = compute_identity_hash(fresult) + for w_arg in fargs: + y = compute_identity_hash(w_arg) + x = intmask((1000003 * x) ^ y) + x ^= ellipsis + if unique_cache.for_testing: # constant-folded to False in translation; + x &= 3 # but for test, keep only 2 bits of hash + return x + # can't use @jit.elidable here, because it might call back to random # space functions via force_lazy_struct() -def _new_function_type(space, fargs, w_fresult, ellipsis=False): +def _new_function_type(space, fargs, fresult, ellipsis=False): + try: + return _get_function_type(space, fargs, fresult, ellipsis) + except KeyError: + return _build_function_type(space, fargs, fresult, ellipsis) + + at jit.elidable +def _get_function_type(space, fargs, fresult, ellipsis): + # This function is elidable because if called again with exactly the + # same arguments (and if it didn't raise KeyError), it would give + # the same result, at least as long as this result is still live. + # + # 'unique_cache.functions' is a list of weak dicts, each mapping + # the func_hash number to a W_CTypeFunc. There is normally only + # one such dict, but in case of hash collision, there might be + # more. + unique_cache = space.fromcache(UniqueCache) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + for weakdict in unique_cache.functions: + ctype = weakdict.get(func_hash) + if (ctype is not None and + ctype.ctitem is fresult and + ctype.fargs == fargs and + ctype.ellipsis == ellipsis): + return ctype + raise KeyError + + at jit.dont_look_inside +def _build_function_type(space, fargs, fresult, ellipsis): from pypy.module._cffi_backend import ctypefunc # - unique_cache = space.fromcache(UniqueCache) - unique_key = (fargs, w_fresult, ellipsis) - try: - return unique_cache.functions[unique_key] - except KeyError: - pass - # - if ((w_fresult.size < 0 and - not isinstance(w_fresult, ctypevoid.W_CTypeVoid)) - or isinstance(w_fresult, ctypearray.W_CTypeArray)): - if (isinstance(w_fresult, ctypestruct.W_CTypeStructOrUnion) and - w_fresult.size < 0): + if ((fresult.size < 0 and + not isinstance(fresult, ctypevoid.W_CTypeVoid)) + or isinstance(fresult, ctypearray.W_CTypeArray)): + if (isinstance(fresult, ctypestruct.W_CTypeStructOrUnion) and + fresult.size < 0): raise oefmt(space.w_TypeError, - "result type '%s' is opaque", w_fresult.name) + "result type '%s' is opaque", fresult.name) else: raise oefmt(space.w_TypeError, - "invalid result type: '%s'", w_fresult.name) + "invalid result type: '%s'", fresult.name) # - fct = ctypefunc.W_CTypeFunc(space, fargs, w_fresult, ellipsis) - unique_cache.functions[unique_key] = fct + fct = ctypefunc.W_CTypeFunc(space, fargs, fresult, ellipsis) + unique_cache = space.fromcache(UniqueCache) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + for weakdict in unique_cache.functions: + if weakdict.get(func_hash) is None: + weakdict.set(func_hash, fct) + break + else: + weakdict = rweakref.RWeakValueDictionary(int, ctypefunc.W_CTypeFunc) + unique_cache.functions.append(weakdict) + weakdict.set(func_hash, fct) return fct diff --git a/pypy/module/_cffi_backend/test/test_c.py b/pypy/module/_cffi_backend/test/test_c.py --- a/pypy/module/_cffi_backend/test/test_c.py +++ b/pypy/module/_cffi_backend/test/test_c.py @@ -22,7 +22,7 @@ from rpython.tool.udir import udir from pypy.interpreter import gateway from pypy.module._cffi_backend import Module -from pypy.module._cffi_backend.newtype import _clean_cache +from pypy.module._cffi_backend.newtype import _clean_cache, UniqueCache from rpython.translator import cdir from rpython.translator.platform import host from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -86,8 +86,10 @@ _all_test_c.find_and_load_library = func _all_test_c._testfunc = testfunc """) + UniqueCache.for_testing = True def teardown_method(self, method): + UniqueCache.for_testing = False _clean_cache(self.space) From noreply at buildbot.pypy.org Thu Oct 1 16:32:46 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Oct 2015 16:32:46 +0200 (CEST) Subject: [pypy-commit] pypy default: import cffi 059aca3cb3dc Message-ID: <20151001143246.0E2381C0748@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79921:a1369d833047 Date: 2015-10-01 16:27 +0200 http://bitbucket.org/pypy/pypy/changeset/a1369d833047/ Log: import cffi 059aca3cb3dc diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -609,7 +609,7 @@ def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: - tp = ffi._parser._declarations[key] + tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) try: value = backendlib.load_function(BType, name) @@ -620,7 +620,7 @@ # key = 'variable ' + name if key in ffi._parser._declarations: - tp = ffi._parser._declarations[key] + tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) read_variable = backendlib.read_variable write_variable = backendlib.write_variable @@ -631,12 +631,23 @@ # if not copied_enums: from . import model - for key, tp in ffi._parser._declarations.items(): + error = None + for key, (tp, _) in ffi._parser._declarations.items(): if not isinstance(tp, model.EnumType): continue + try: + tp.check_not_partial() + except Exception as e: + error = e + continue for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + if error is not None: + if name in library.__dict__: + return # ignore error, about a different enum + raise error + for key, val in ffi._parser._int_constants.items(): if key not in library.__dict__: library.__dict__[key] = val diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -192,6 +192,7 @@ if not decl.name: raise api.CDefError("typedef does not declare any name", decl) + quals = 0 if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) and decl.type.type.names[-1] == '__dotdotdot__'): realtype = self._get_unknown_type(decl) @@ -202,8 +203,9 @@ decl.type.type.type.names == ['__dotdotdot__']): realtype = model.unknown_ptr_type(decl.name) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) + realtype, quals = self._get_type_and_quals( + decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype, quals=quals) else: raise api.CDefError("unrecognized construct", decl) except api.FFIError as e: @@ -255,9 +257,9 @@ def _parse_decl(self, decl): node = decl.type if isinstance(node, pycparser.c_ast.FuncDecl): - tp = self._get_type(node, name=decl.name) + tp, quals = self._get_type_and_quals(node, name=decl.name) assert isinstance(tp, model.RawFunctionType) - tp = self._get_type_pointer(tp) + tp = self._get_type_pointer(tp, quals) self._declare('function ' + decl.name, tp) else: if isinstance(node, pycparser.c_ast.Struct): @@ -271,9 +273,10 @@ decl) # if decl.name: - tp = self._get_type(node, partial_length_ok=True) + tp, quals = self._get_type_and_quals(node, + partial_length_ok=True) if tp.is_raw_function: - tp = self._get_type_pointer(tp) + tp = self._get_type_pointer(tp, quals) self._declare('function ' + decl.name, tp) elif (tp.is_integer_type() and hasattr(decl, 'init') and @@ -287,10 +290,10 @@ _r_int_literal.match(decl.init.expr.value)): self._add_integer_constant(decl.name, '-' + decl.init.expr.value) - elif self._is_constant_globalvar(node): - self._declare('constant ' + decl.name, tp) + elif (quals & model.Q_CONST) and not tp.is_array_type: + self._declare('constant ' + decl.name, tp, quals=quals) else: - self._declare('variable ' + decl.name, tp) + self._declare('variable ' + decl.name, tp, quals=quals) def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] @@ -298,40 +301,51 @@ exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type) + tp, quals = self._get_type_and_quals(exprnode.type) + return tp - def _declare(self, name, obj, included=False): + def _declare(self, name, obj, included=False, quals=0): if name in self._declarations: - if self._declarations[name] is obj: + prevobj, prevquals = self._declarations[name] + if prevobj is obj and prevquals == quals: return if not self._override: raise api.FFIError( "multiple declarations of %s (for interactive usage, " "try cdef(xx, override=True))" % (name,)) assert '__dotdotdot__' not in name.split() - self._declarations[name] = obj + self._declarations[name] = (obj, quals) if included: self._included_declarations.add(obj) - def _get_type_pointer(self, type, const=False, declname=None): + def _extract_quals(self, type): + quals = 0 + if isinstance(type, (pycparser.c_ast.TypeDecl, + pycparser.c_ast.PtrDecl)): + if 'const' in type.quals: + quals |= model.Q_CONST + if 'restrict' in type.quals: + quals |= model.Q_RESTRICT + return quals + + def _get_type_pointer(self, type, quals, declname=None): if isinstance(type, model.RawFunctionType): return type.as_function_pointer() if (isinstance(type, model.StructOrUnionOrEnum) and type.name.startswith('$') and type.name[1:].isdigit() and type.forcename is None and declname is not None): - return model.NamedPointerType(type, declname) - if const: - return model.ConstPointerType(type) - return model.PointerType(type) + return model.NamedPointerType(type, declname, quals) + return model.PointerType(type, quals) - def _get_type(self, typenode, name=None, partial_length_ok=False): + def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): - type = self._declarations['typedef ' + typenode.type.names[0]] - return type + tp, quals = self._declarations['typedef ' + typenode.type.names[0]] + quals |= self._extract_quals(typenode) + return tp, quals # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type @@ -340,18 +354,19 @@ else: length = self._parse_constant( typenode.dim, partial_length_ok=partial_length_ok) - tp = self._get_type(typenode.type, + tp, quals = self._get_type_and_quals(typenode.type, partial_length_ok=partial_length_ok) - return model.ArrayType(tp, length) + return model.ArrayType(tp, length), quals # if isinstance(typenode, pycparser.c_ast.PtrDecl): # pointer type - const = (isinstance(typenode.type, pycparser.c_ast.TypeDecl) - and 'const' in typenode.type.quals) - return self._get_type_pointer(self._get_type(typenode.type), const, - declname=name) + itemtype, itemquals = self._get_type_and_quals(typenode.type) + tp = self._get_type_pointer(itemtype, itemquals, declname=name) + quals = self._extract_quals(typenode) + return tp, quals # if isinstance(typenode, pycparser.c_ast.TypeDecl): + quals = self._extract_quals(typenode) type = typenode.type if isinstance(type, pycparser.c_ast.IdentifierType): # assume a primitive type. get it from .names, but reduce @@ -379,35 +394,38 @@ names = newnames + names ident = ' '.join(names) if ident == 'void': - return model.void_type + return model.void_type, quals if ident == '__dotdotdot__': raise api.FFIError(':%d: bad usage of "..."' % typenode.coord.line) - return resolve_common_type(ident) + return resolve_common_type(ident), quals # if isinstance(type, pycparser.c_ast.Struct): # 'struct foobar' - return self._get_struct_union_enum_type('struct', type, name) + tp = self._get_struct_union_enum_type('struct', type, name) + return tp, quals # if isinstance(type, pycparser.c_ast.Union): # 'union foobar' - return self._get_struct_union_enum_type('union', type, name) + tp = self._get_struct_union_enum_type('union', type, name) + return tp, quals # if isinstance(type, pycparser.c_ast.Enum): # 'enum foobar' - return self._get_struct_union_enum_type('enum', type, name) + tp = self._get_struct_union_enum_type('enum', type, name) + return tp, quals # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - return self._parse_function_type(typenode, name) + return self._parse_function_type(typenode, name), 0 # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): return self._get_struct_union_enum_type('struct', typenode, name, - nested=True) + nested=True), 0 if isinstance(typenode, pycparser.c_ast.Union): return self._get_struct_union_enum_type('union', typenode, name, - nested=True) + nested=True), 0 # raise api.FFIError(":%d: bad or unsupported type declaration" % typenode.coord.line) @@ -426,28 +444,21 @@ raise api.CDefError( "%s: a function with only '(...)' as argument" " is not correct C" % (funcname or 'in expression')) - args = [self._as_func_arg(self._get_type(argdeclnode.type)) + args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type)) for argdeclnode in params] if not ellipsis and args == [model.void_type]: args = [] - result = self._get_type(typenode.type) + result, quals = self._get_type_and_quals(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) - def _as_func_arg(self, type): + def _as_func_arg(self, type, quals): if isinstance(type, model.ArrayType): - return model.PointerType(type.item) + return model.PointerType(type.item, quals) elif isinstance(type, model.RawFunctionType): return type.as_function_pointer() else: return type - def _is_constant_globalvar(self, typenode): - if isinstance(typenode, pycparser.c_ast.PtrDecl): - return 'const' in typenode.quals - if isinstance(typenode, pycparser.c_ast.TypeDecl): - return 'const' in typenode.quals - return False - def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): # First, a level of caching on the exact 'type' node of the AST. # This is obscure, but needed because pycparser "unrolls" declarations @@ -486,7 +497,7 @@ else: explicit_name = name key = '%s %s' % (kind, name) - tp = self._declarations.get(key, None) + tp, _ = self._declarations.get(key, (None, None)) # if tp is None: if kind == 'struct': @@ -528,6 +539,7 @@ fldnames = [] fldtypes = [] fldbitsize = [] + fldquals = [] for decl in type.decls: if (isinstance(decl.type, pycparser.c_ast.IdentifierType) and ''.join(decl.type.names) == '__dotdotdot__'): @@ -541,7 +553,8 @@ else: bitsize = self._parse_constant(decl.bitsize) self._partial_length = False - type = self._get_type(decl.type, partial_length_ok=True) + type, fqual = self._get_type_and_quals(decl.type, + partial_length_ok=True) if self._partial_length: self._make_partial(tp, nested) if isinstance(type, model.StructType) and type.partial: @@ -549,9 +562,11 @@ fldnames.append(decl.name or '') fldtypes.append(type) fldbitsize.append(bitsize) + fldquals.append(fqual) tp.fldnames = tuple(fldnames) tp.fldtypes = tuple(fldtypes) tp.fldbitsize = tuple(fldbitsize) + tp.fldquals = tuple(fldquals) if fldbitsize != [-1] * len(fldbitsize): if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" @@ -632,14 +647,12 @@ return tp def include(self, other): - for name, tp in other._declarations.items(): + for name, (tp, quals) in other._declarations.items(): if name.startswith('anonymous $enum_$'): continue # fix for test_anonymous_enum_include kind = name.split(' ', 1)[0] - if kind in ('struct', 'union', 'enum', 'anonymous'): - self._declare(name, tp, included=True) - elif kind == 'typedef': - self._declare(name, tp, included=True) + if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'): + self._declare(name, tp, included=True, quals=quals) for k, v in other._int_constants.items(): self._add_constants(k, v) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -4,11 +4,26 @@ from .lock import allocate_lock +# type qualifiers +Q_CONST = 0x01 +Q_RESTRICT = 0x02 + +def qualify(quals, replace_with): + if quals & Q_CONST: + replace_with = ' const ' + replace_with.lstrip() + if quals & Q_RESTRICT: + # It seems that __restrict is supported by gcc and msvc. + # If you hit some different compiler, add a #define in + # _cffi_include.h for it (and in its copies, documented there) + replace_with = ' __restrict ' + replace_with.lstrip() + return replace_with + + class BaseTypeByIdentity(object): is_array_type = False is_raw_function = False - def get_c_name(self, replace_with='', context='a C file'): + def get_c_name(self, replace_with='', context='a C file', quals=0): result = self.c_name_with_marker assert result.count('&') == 1 # some logic duplication with ffi.getctype()... :-( @@ -18,6 +33,7 @@ replace_with = '(%s)' % replace_with elif not replace_with[0] in '[(': replace_with = ' ' + replace_with + replace_with = qualify(quals, replace_with) result = result.replace('&', replace_with) if '$' in result: from .ffiplatform import VerificationError @@ -225,16 +241,14 @@ class PointerType(BaseType): - _attrs_ = ('totype',) - _base_pattern = " *&" - _base_pattern_array = "(*&)" + _attrs_ = ('totype', 'quals') - def __init__(self, totype): + def __init__(self, totype, quals=0): self.totype = totype + self.quals = quals + extra = qualify(quals, " *&") if totype.is_array_type: - extra = self._base_pattern_array - else: - extra = self._base_pattern + extra = "(%s)" % (extra.lstrip(),) self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra) def build_backend_type(self, ffi, finishlist): @@ -243,10 +257,8 @@ voidp_type = PointerType(void_type) - -class ConstPointerType(PointerType): - _base_pattern = " const *&" - _base_pattern_array = "(const *&)" +def ConstPointerType(totype): + return PointerType(totype, Q_CONST) const_voidp_type = ConstPointerType(void_type) @@ -254,8 +266,8 @@ class NamedPointerType(PointerType): _attrs_ = ('totype', 'name') - def __init__(self, totype, name): - PointerType.__init__(self, totype) + def __init__(self, totype, name, quals=0): + PointerType.__init__(self, totype, quals) self.name = name self.c_name_with_marker = name + '&' @@ -315,11 +327,12 @@ partial = False packed = False - def __init__(self, name, fldnames, fldtypes, fldbitsize): + def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None): self.name = name self.fldnames = fldnames self.fldtypes = fldtypes self.fldbitsize = fldbitsize + self.fldquals = fldquals self.build_c_name_with_marker() def has_anonymous_struct_fields(self): @@ -331,14 +344,17 @@ return False def enumfields(self): - for name, type, bitsize in zip(self.fldnames, self.fldtypes, - self.fldbitsize): + fldquals = self.fldquals + if fldquals is None: + fldquals = (0,) * len(self.fldnames) + for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes, + self.fldbitsize, fldquals): if name == '' and isinstance(type, StructOrUnion): # nested anonymous struct/union for result in type.enumfields(): yield result else: - yield (name, type, bitsize) + yield (name, type, bitsize, quals) def force_flatten(self): # force the struct or union to have a declaration that lists @@ -347,13 +363,16 @@ names = [] types = [] bitsizes = [] - for name, type, bitsize in self.enumfields(): + fldquals = [] + for name, type, bitsize, quals in self.enumfields(): names.append(name) types.append(type) bitsizes.append(bitsize) + fldquals.append(quals) self.fldnames = tuple(names) self.fldtypes = tuple(types) self.fldbitsize = tuple(bitsizes) + self.fldquals = tuple(fldquals) def get_cached_btype(self, ffi, finishlist, can_delay=False): BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist, diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -195,17 +195,15 @@ elif isinstance(tp, model.StructOrUnion): if tp.fldtypes is not None and ( tp not in self.ffi._parser._included_declarations): - for name1, tp1, _ in tp.enumfields(): + for name1, tp1, _, _ in tp.enumfields(): self._do_collect_type(self._field_type(tp, name1, tp1)) else: for _, x in tp._get_items(): self._do_collect_type(x) - def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) - def _generate(self, step_name): - for name, tp in self._get_declarations(): + lst = self.ffi._parser._declarations.items() + for name, (tp, quals) in sorted(lst): kind, realname = name.split(' ', 1) try: method = getattr(self, '_generate_cpy_%s_%s' % (kind, @@ -214,6 +212,7 @@ raise ffiplatform.VerificationError( "not implemented in recompile(): %r" % name) try: + self._current_quals = quals method(tp, realname) except Exception as e: model.attach_exception_info(e, name) @@ -774,7 +773,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): try: if ftype.is_integer_type() or fbitsize >= 0: # accept all integers, but complain on float or double @@ -789,7 +788,8 @@ ftype = ftype.item fname = fname + '[0]' prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -823,7 +823,7 @@ c_fields = [] if reason_for_not_expanding is None: enumfields = list(tp.enumfields()) - for fldname, fldtype, fbitsize in enumfields: + for fldname, fldtype, fbitsize, fqual in enumfields: fldtype = self._field_type(tp, fldname, fldtype) # cname is None for _add_missing_struct_unions() only op = OP_NOOP @@ -879,7 +879,9 @@ # because they don't have any known C name. Check that they are # not partial (we can't complete or verify them!) and emit them # anonymously. - for tp in list(self._struct_unions): + lst = list(self._struct_unions.items()) + lst.sort(key=lambda tp_order: tp_order[1]) + for tp, order in lst: if tp not in self._seen_struct_unions: if tp.partial: raise NotImplementedError("internal inconsistency: %r is " @@ -1004,6 +1006,8 @@ def _enum_ctx(self, tp, cname): type_index = self._typesdict[tp] type_op = CffiOp(OP_ENUM, -1) + if self.target_is_python: + tp.check_not_partial() for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): self._lsts["global"].append( GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op, @@ -1081,7 +1085,8 @@ # if 'tp' were a function type, but that is not possible here. # (If 'tp' is a function _pointer_ type, then casts from "fn_t # **" to "void *" are again no-ops, as far as I can tell.) - prnt('static ' + tp.get_c_name('*_cffi_var_%s(void)' % (name,))) + decl = '*_cffi_var_%s(void)' % (name,) + prnt('static ' + tp.get_c_name(decl, quals=self._current_quals)) prnt('{') prnt(' return %s(%s);' % (ampersand, name)) prnt('}') diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -197,7 +197,10 @@ return library def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst def _generate(self, step_name): for name, tp in self._get_declarations(): @@ -468,7 +471,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double @@ -477,7 +480,8 @@ # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -488,7 +492,7 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) @@ -552,7 +556,7 @@ check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -87,7 +87,10 @@ return library def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst def _generate(self, step_name): for name, tp in self._get_declarations(): @@ -260,7 +263,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double @@ -269,7 +272,8 @@ # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -280,7 +284,7 @@ prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) @@ -342,7 +346,7 @@ check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -2,7 +2,7 @@ import py import platform import sys, ctypes -from cffi import FFI, CDefError, FFIError +from cffi import FFI, CDefError, FFIError, VerificationMissing from pypy.module.test_lib_pypy.cffi_tests.support import * SIZE_OF_INT = ctypes.sizeof(ctypes.c_int) @@ -927,6 +927,14 @@ assert ffi.string(ffi.cast("enum foo", -16)) == "E" assert ffi.string(ffi.cast("enum foo", -8)) == "F" + def test_enum_partial(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef(r"enum foo {A, ...}; enum bar { B, C };") + lib = ffi.dlopen(None) + assert lib.B == 0 + py.test.raises(VerificationMissing, getattr, lib, "A") + assert lib.C == 1 + def test_array_of_struct(self): ffi = FFI(backend=self.Backend()) ffi.cdef("struct foo { int a, b; };") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py @@ -58,6 +58,11 @@ assert ptr_type.get_c_name("") == "int(const *)[5]" assert ptr_type.get_c_name("*x") == "int(const * *x)[5]" +def test_qual_pointer_type(): + ptr_type = PointerType(PrimitiveType("long long"), Q_RESTRICT) + assert ptr_type.get_c_name("") == "long long __restrict *" + assert const_voidp_type.get_c_name("") == "void const *" + def test_unknown_pointer_type(): ptr_type = unknown_ptr_type("foo_p") assert ptr_type.get_c_name("") == "foo_p" diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py @@ -308,7 +308,6 @@ ffi.cdef("void f(WPARAM);") def test__is_constant_globalvar(): - from cffi.cparser import Parser, _get_parser for input, expected_output in [ ("int a;", False), ("const int a;", True), @@ -325,11 +324,36 @@ ("int a[5][6];", False), ("const int a[5][6];", False), ]: - p = Parser() - ast = _get_parser().parse(input) - decl = ast.children()[0][1] - node = decl.type - assert p._is_constant_globalvar(node) == expected_output + ffi = FFI() + ffi.cdef(input) + declarations = ffi._parser._declarations + assert ('constant a' in declarations) == expected_output + assert ('variable a' in declarations) == (not expected_output) + +def test_restrict(): + from cffi import model + for input, expected_output in [ + ("int a;", False), + ("restrict int a;", True), + ("int *a;", False), + ]: + ffi = FFI() + ffi.cdef(input) + tp, quals = ffi._parser._declarations['variable a'] + assert bool(quals & model.Q_RESTRICT) == expected_output + +def test_different_const_funcptr_types(): + lst = [] + for input in [ + "int(*)(int *a)", + "int(*)(int const *a)", + "int(*)(int * const a)", + "int(*)(int const a[])"]: + ffi = FFI(backend=FakeBackend()) + lst.append(ffi._parser.parse_type(input)) + assert lst[0] != lst[1] + assert lst[0] == lst[2] + assert lst[1] == lst[3] def test_enum(): ffi = FFI() diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py @@ -1636,11 +1636,11 @@ def test_FILE_stored_explicitly(): ffi = FFI() - ffi.cdef("int myprintf(const char *, int); FILE *myfile;") + ffi.cdef("int myprintf11(const char *, int); FILE *myfile;") lib = ffi.verify(""" #include FILE *myfile; - int myprintf(const char *out, int value) { + int myprintf11(const char *out, int value) { return fprintf(myfile, out, value); } """) @@ -1650,7 +1650,7 @@ lib.myfile = ffi.cast("FILE *", fw1) # fw1.write(b"X") - r = lib.myprintf(b"hello, %d!\n", ffi.cast("int", 42)) + r = lib.myprintf11(b"hello, %d!\n", ffi.cast("int", 42)) fw1.close() assert r == len("hello, 42!\n") # @@ -2248,3 +2248,13 @@ e = py.test.raises(VerificationError, ffi.verify, "") assert str(e.value) == ("feature not supported with ffi.verify(), but only " "with ffi.set_source(): 'typedef unsigned long... t1'") + +def test_const_fields(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a; void *const b; };""") + ffi.verify("""struct foo_s { const int a; void *const b; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int") + assert foo_s.fields[1][0] == 'b' + assert foo_s.fields[1][1].type is ffi.typeof("void *") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py @@ -30,6 +30,32 @@ assert ffi.typeof("int[][10]") is ffi.typeof("int[][10]") assert ffi.typeof("int(*)()") is ffi.typeof("int(*)()") +def test_ffi_type_not_immortal(): + import weakref, gc + ffi = _cffi1_backend.FFI() + t1 = ffi.typeof("int **") + t2 = ffi.typeof("int *") + w1 = weakref.ref(t1) + w2 = weakref.ref(t2) + del t1, ffi + gc.collect() + assert w1() is None + assert w2() is t2 + ffi = _cffi1_backend.FFI() + assert ffi.typeof(ffi.new("int **")[0]) is t2 + # + ffi = _cffi1_backend.FFI() + t1 = ffi.typeof("int ***") + t2 = ffi.typeof("int **") + w1 = weakref.ref(t1) + w2 = weakref.ref(t2) + del t2, ffi + gc.collect() + assert w1() is t1 + assert w2() is not None # kept alive by t1 + ffi = _cffi1_backend.FFI() + assert ffi.typeof("int * *") is t1.item + def test_ffi_cache_type_globally(): ffi1 = _cffi1_backend.FFI() ffi2 = _cffi1_backend.FFI() diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py @@ -2,7 +2,7 @@ import sys import py from cffi import FFI -from cffi import recompiler, ffiplatform +from cffi import recompiler, ffiplatform, VerificationMissing from pypy.module.test_lib_pypy.cffi_tests.udir import udir @@ -204,3 +204,10 @@ "foobar", _version=0x2594) assert str(e.value).startswith( "cffi out-of-line Python module 'foobar' has unknown version") + +def test_partial_enum(): + ffi = FFI() + ffi.cdef("enum foo { A, B, ... };") + ffi.set_source('test_partial_enum', None) + py.test.raises(VerificationMissing, ffi.emit_python_code, + str(tmpdir.join('test_partial_enum.py'))) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1192,3 +1192,92 @@ py.test.raises(ffi.error, getattr, lib, 'my_value') e = py.test.raises(ffi.error, setattr, lib, 'my_value', 50) assert str(e.value) == "global variable 'my_value' is at address NULL" + +def test_const_fields(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a; void *const b; };""") + lib = verify(ffi, 'test_const_fields', """ + struct foo_s { const int a; void *const b; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int") + assert foo_s.fields[1][0] == 'b' + assert foo_s.fields[1][1].type is ffi.typeof("void *") + +def test_restrict_fields(): + if sys.platform == 'win32': + py.test.skip("'__restrict__' probably not recognized") + ffi = FFI() + ffi.cdef("""struct foo_s { void * restrict b; };""") + lib = verify(ffi, 'test_restrict_fields', """ + struct foo_s { void * __restrict__ b; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'b' + assert foo_s.fields[0][1].type is ffi.typeof("void *") + +def test_const_array_fields(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a[4]; };""") + lib = verify(ffi, 'test_const_array_fields', """ + struct foo_s { const int a[4]; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int[4]") + +def test_const_array_fields_varlength(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a[]; ...; };""") + lib = verify(ffi, 'test_const_array_fields_varlength', """ + struct foo_s { const int a[4]; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int[]") + +def test_const_array_fields_unknownlength(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a[...]; ...; };""") + lib = verify(ffi, 'test_const_array_fields_unknownlength', """ + struct foo_s { const int a[4]; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int[4]") + +def test_const_function_args(): + ffi = FFI() + ffi.cdef("""int foobar(const int a, const int *b, const int c[]);""") + lib = verify(ffi, 'test_const_function_args', """ + int foobar(const int a, const int *b, const int c[]) { + return a + *b + *c; + } + """) + assert lib.foobar(100, ffi.new("int *", 40), ffi.new("int *", 2)) == 142 + +def test_const_function_type_args(): + ffi = FFI() + ffi.cdef("""int (*foobar)(const int a, const int *b, const int c[]);""") + lib = verify(ffi, 'test_const_function_type_args', """ + int (*foobar)(const int a, const int *b, const int c[]); + """) + t = ffi.typeof(lib.foobar) + assert t.args[0] is ffi.typeof("int") + assert t.args[1] is ffi.typeof("int *") + assert t.args[2] is ffi.typeof("int *") + +def test_const_constant(): + ffi = FFI() + ffi.cdef("""struct foo_s { int x,y; }; const struct foo_s myfoo;""") + lib = verify(ffi, 'test_const_constant', """ + struct foo_s { int x,y; }; const struct foo_s myfoo = { 40, 2 }; + """) + assert lib.myfoo.x == 40 + assert lib.myfoo.y == 2 + +def test_const_via_typedef(): + ffi = FFI() + ffi.cdef("""typedef const int const_t; const_t aaa;""") + lib = verify(ffi, 'test_const_via_typedef', """ + typedef const int const_t; + #define aaa 42 + """) + assert lib.aaa == 42 + py.test.raises(AttributeError, "lib.aaa = 43") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py @@ -1623,11 +1623,11 @@ def test_FILE_stored_explicitly(): ffi = FFI() - ffi.cdef("int myprintf(const char *, int); FILE *myfile;") + ffi.cdef("int myprintf11(const char *, int); FILE *myfile;") lib = ffi.verify(""" #include FILE *myfile; - int myprintf(const char *out, int value) { + int myprintf11(const char *out, int value) { return fprintf(myfile, out, value); } """) @@ -1637,7 +1637,7 @@ lib.myfile = ffi.cast("FILE *", fw1) # fw1.write(b"X") - r = lib.myprintf(b"hello, %d!\n", ffi.cast("int", 42)) + r = lib.myprintf11(b"hello, %d!\n", ffi.cast("int", 42)) fw1.close() assert r == len("hello, 42!\n") # @@ -1923,7 +1923,7 @@ assert repr(ffi.typeof(lib.a)) == "" def test_bug_const_char_ptr_array_2(): - ffi = FFI_warnings_not_error() # ignore warnings + ffi = FFI() ffi.cdef("""const int a[];""") lib = ffi.verify("""const int a[5];""") assert repr(ffi.typeof(lib.a)) == "" From noreply at buildbot.pypy.org Thu Oct 1 16:32:48 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Oct 2015 16:32:48 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20151001143248.4ADAB1C0748@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79922:bb9eab0bf5eb Date: 2015-10-01 16:32 +0200 http://bitbucket.org/pypy/pypy/changeset/bb9eab0bf5eb/ Log: merge heads diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -1308,6 +1308,9 @@ [space.wrap(0)]), space.wrap("b")]) builder = StringBuilder() + if self.get_dtype().is_object(): + raise oefmt(space.w_NotImplementedError, + "reduce for 'object' dtype not supported yet") if isinstance(self.implementation, SliceArray): iter, state = self.implementation.create_iter() while not iter.done(state): diff --git a/pypy/module/micronumpy/test/test_object_arrays.py b/pypy/module/micronumpy/test/test_object_arrays.py --- a/pypy/module/micronumpy/test/test_object_arrays.py +++ b/pypy/module/micronumpy/test/test_object_arrays.py @@ -3,6 +3,8 @@ class AppTestObjectDtypes(BaseNumpyAppTest): + spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) + def setup_class(cls): BaseNumpyAppTest.setup_class.im_func(cls) cls.w_runappdirect = cls.space.wrap(option.runappdirect) @@ -187,3 +189,21 @@ assert b.shape == (1,) assert b.dtype == np.float_ assert (b == 1.0).all() + + + def test__reduce__(self): + from numpy import arange, dtype + from cPickle import loads, dumps + import sys + + a = arange(15).astype(object) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, dumps, a) + skip('not implemented yet') + b = loads(dumps(a)) + assert (a == b).all() + + a = arange(15).astype(object).reshape((3, 5)) + b = loads(dumps(a)) + assert (a == b).all() + From noreply at buildbot.pypy.org Thu Oct 1 20:27:43 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 1 Oct 2015 20:27:43 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: unpacking just one element now generates a scalar variable Message-ID: <20151001182743.EF2C41C1D98@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r79923:66ad64b27bdb Date: 2015-10-01 20:27 +0200 http://bitbucket.org/pypy/pypy/changeset/66ad64b27bdb/ Log: unpacking just one element now generates a scalar variable diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -477,6 +477,11 @@ return self.getopnum() == rop.LABEL def is_vector(self): + if self.getopnum() in (rop.VEC_UNPACK_I, rop.VEC_UNPACK_F): + arg = self.getarg(2) + from rpython.jit.metainterp.history import ConstInt + assert isinstance(arg, ConstInt) + return arg.value > 1 return self.vector == -2 def returns_void(self): diff --git a/rpython/jit/metainterp/test/test_resoperation.py b/rpython/jit/metainterp/test/test_resoperation.py --- a/rpython/jit/metainterp/test/test_resoperation.py +++ b/rpython/jit/metainterp/test/test_resoperation.py @@ -107,6 +107,12 @@ if 'cast_to' in kwargs: assert op.cast_to() == kwargs['cast_to'] +def test_unpack_1(): + op = rop.ResOperation(rop.rop.VEC_UNPACK_I, [rop.InputArgVector(), ConstInt(0), ConstInt(1)]) + assert (op.type, op.datatype, op.bytesize, op.is_vector()) == ('i', 'i', 8, False) + op = rop.ResOperation(rop.rop.VEC_UNPACK_I, [rop.InputArgVector(), ConstInt(0), ConstInt(2)]) + assert (op.type, op.datatype, op.bytesize, op.is_vector()) == ('i', 'i', 8, True) + def test_types(): op = rop.ResOperation(rop.rop.INT_ADD, [ConstInt(0),ConstInt(1)]) assert op.type == 'i' From noreply at buildbot.pypy.org Thu Oct 1 20:57:25 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 1 Oct 2015 20:57:25 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: added llgraph impl for int_and int_or (vector ops), added a test to ensure the assembler to work correctly with the scalar unpacked value Message-ID: <20151001185725.75CD61C130A@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r79924:7499f3065b10 Date: 2015-10-01 20:57 +0200 http://bitbucket.org/pypy/pypy/changeset/7499f3065b10/ Log: added llgraph impl for int_and int_or (vector ops), added a test to ensure the assembler to work correctly with the scalar unpacked value diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -799,6 +799,8 @@ exec py.code.Source(vector_arith_code.format('int','add','+')).compile() exec py.code.Source(vector_arith_code.format('int','sub','-')).compile() exec py.code.Source(vector_arith_code.format('int','mul','*')).compile() + exec py.code.Source(vector_arith_code.format('int','and','&')).compile() + exec py.code.Source(vector_arith_code.format('int','or','|')).compile() exec py.code.Source(vector_arith_code.format('float','add','+')).compile() exec py.code.Source(vector_arith_code.format('float','sub','-')).compile() exec py.code.Source(vector_arith_code.format('float','mul','*')).compile() diff --git a/rpython/jit/metainterp/test/test_vector.py b/rpython/jit/metainterp/test/test_vector.py --- a/rpython/jit/metainterp/test/test_vector.py +++ b/rpython/jit/metainterp/test/test_vector.py @@ -386,5 +386,28 @@ res = self.meta_interp(f, [size], vec_all=True) assert res == f(size) + def test_max_byte(self): + myjitdriver = JitDriver(greens = [], reds = 'auto') + T = lltype.Array(rffi.SIGNEDCHAR, hints={'nolength': True}) + def f(size): + vector_a = malloc(T, size) + for i in range(size): + vector_a[i] = rffi.r_signedchar(1) + for i in range(size/2,size): + vector_a[i] = rffi.r_signedchar(i) + i = 0 + max = -127 + while i < size: + myjitdriver.jit_merge_point() + a = intmask(vector_a[i]) + a = a & 255 + if a > max: + max = a + i += 1 + free(vector_a) + return max + res = self.meta_interp(f, [128], vec_all=True) + assert res == f(128) + class TestLLtype(LLJitMixin, VectorizeTests): pass From noreply at buildbot.pypy.org Thu Oct 1 23:57:30 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 1 Oct 2015 23:57:30 +0200 (CEST) Subject: [pypy-commit] cffi default: Fix tests to run on more 32-bit x86es Message-ID: <20151001215730.DA3751C0369@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2298:1a73afff26e2 Date: 2015-10-01 23:58 +0200 http://bitbucket.org/cffi/cffi/changeset/1a73afff26e2/ Log: Fix tests to run on more 32-bit x86es diff --git a/testing/cffi0/test_verify.py b/testing/cffi0/test_verify.py --- a/testing/cffi0/test_verify.py +++ b/testing/cffi0/test_verify.py @@ -208,6 +208,9 @@ # Check the particular results on Intel import platform if (platform.machine().startswith('i386') or + platform.machine().startswith('i486') or + platform.machine().startswith('i586') or + platform.machine().startswith('i686') or platform.machine().startswith('x86')): assert abs(more_precise - 0.656769) < 0.001 assert abs(less_precise - 3.99091) < 0.001 diff --git a/testing/cffi1/test_verify1.py b/testing/cffi1/test_verify1.py --- a/testing/cffi1/test_verify1.py +++ b/testing/cffi1/test_verify1.py @@ -188,6 +188,9 @@ # Check the particular results on Intel import platform if (platform.machine().startswith('i386') or + platform.machine().startswith('i486') or + platform.machine().startswith('i586') or + platform.machine().startswith('i686') or platform.machine().startswith('x86')): assert abs(more_precise - 0.656769) < 0.001 assert abs(less_precise - 3.99091) < 0.001 From noreply at buildbot.pypy.org Fri Oct 2 09:59:12 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Oct 2015 09:59:12 +0200 (CEST) Subject: [pypy-commit] pypy default: Issue #2137: try harder to avoid infinite recursion in some cases of Message-ID: <20151002075912.964D11C1453@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79925:fed018f3c786 Date: 2015-10-02 09:59 +0200 http://bitbucket.org/pypy/pypy/changeset/fed018f3c786/ Log: Issue #2137: try harder to avoid infinite recursion in some cases of __coerce__() with old-style instances diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -253,27 +253,30 @@ def binaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_meth = self.getattr(space, specialname, False) + if isinstance(w_a, W_InstanceObject): + w_meth = w_a.getattr(space, specialname, False) if w_meth is None: return space.w_NotImplemented return space.call_function(w_meth, w_b) else: + # fall back to space.xxx() if coerce returns a non-W_Instance + # object as first argument return getattr(space, objspacename)(w_a, w_b) binaryop.func_name = name def rbinaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None or w_a is self: - w_meth = self.getattr(space, rspecialname, False) + if isinstance(w_a, W_InstanceObject): + w_meth = w_a.getattr(space, rspecialname, False) if w_meth is None: return space.w_NotImplemented - return space.call_function(w_meth, w_other) + return space.call_function(w_meth, w_b) else: - return getattr(space, objspacename)(w_b, w_a) + # here, if coerce returns a non-W_Instance object as first + # argument, then give up. The idea is that this strange + # case should already have been handled by the binaryop() + # called from descroperation first. + return space.w_NotImplemented rbinaryop.func_name = "r" + name return binaryop, rbinaryop @@ -283,7 +286,7 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - return [None, None] + return [w_self, w_other] return space.fixedview(w_tup, 2) def descr_instance_new(space, w_type, w_class, w_dict=None): @@ -523,13 +526,9 @@ def descr_cmp(self, space, w_other): # do all the work here like CPython w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - else: - if (not isinstance(w_a, W_InstanceObject) and - not isinstance(w_b, W_InstanceObject)): - return space.cmp(w_a, w_b) + if (not isinstance(w_a, W_InstanceObject) and + not isinstance(w_b, W_InstanceObject)): + return space.cmp(w_a, w_b) if isinstance(w_a, W_InstanceObject): w_func = w_a.getattr(space, '__cmp__', False) if w_func is not None: @@ -636,42 +635,36 @@ def descr_pow(self, space, w_other, w_modulo=None): if space.is_none(w_modulo): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) - return space.w_NotImplemented + if isinstance(w_a, W_InstanceObject): + w_func = w_a.getattr(space, '__pow__', False) + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other) else: return space.pow(w_a, w_b, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other, w_modulo) def descr_rpow(self, space, w_other, w_modulo=None): if space.is_none(w_modulo): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) + if isinstance(w_a, W_InstanceObject): + w_func = w_a.getattr(space, '__rpow__', False) + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other) + else: return space.w_NotImplemented - else: - return space.pow(w_b, w_a, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other, w_modulo) def descr_next(self, space): w_func = self.getattr(space, 'next', False) diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -417,6 +417,14 @@ pass raises(TypeError, coerce, B(), []) + def test_coerce_inf(self): + class B: + def __coerce__(self, other): + return B(), B() + def __add__(self, other): + return 42 + assert B() + B() == 42 + def test_binaryop(self): class A: def __add__(self, other): From noreply at buildbot.pypy.org Fri Oct 2 10:51:53 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Oct 2015 10:51:53 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: PPC Backend #6 step 1 Message-ID: <20151002085153.C05131C0F47@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: ppc-updated-backend Changeset: r79926:7d9d9d7d1398 Date: 2015-10-02 10:35 +0200 http://bitbucket.org/pypy/pypy/changeset/7d9d9d7d1398/ Log: PPC Backend #6 step 1 diff too long, truncating to 2000 out of 4756 lines diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -225,6 +225,10 @@ if not for_frame: self._push_all_regs_to_jitframe(mc, [], withfloats, callee_only=True) else: + # NOTE: don't save registers on the jitframe here! It might + # override already-saved values that will be restored + # later... + # # we're possibly called from the slowpath of malloc # save the caller saved registers # assuming we do not collect here diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -1259,18 +1259,6 @@ self.possibly_free_vars(guard_op.getfailargs()) return locs + [resloc, tmploc] - def _prepare_args_for_new_op(self, new_args): - gc_ll_descr = self.cpu.gc_ll_descr - args = gc_ll_descr.args_for_new(new_args) - arglocs = [] - for i in range(len(args)): - arg = args[i] - t = TempInt() - l = self.force_allocate_reg(t, selected_reg=r.all_regs[i]) - self.assembler.load(l, imm(arg)) - arglocs.append(t) - return arglocs - prepare_op_float_add = prepare_float_op(name='prepare_op_float_add') prepare_op_float_sub = prepare_float_op(name='prepare_op_float_sub') prepare_op_float_mul = prepare_float_op(name='prepare_op_float_mul') diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -303,7 +303,7 @@ for line in open(str(logfile)): if 'guard_class' in line: guard_class += 1 - # if we get many more guard_classes, it means that we generate + # if we get many more guard_classes (~93), it means that we generate # guards that always fail (the following assert's original purpose # is to catch the following case: each GUARD_CLASS is misgenerated # and always fails with "gcremovetypeptr") diff --git a/rpython/jit/backend/ppc/callbuilder.py b/rpython/jit/backend/ppc/callbuilder.py --- a/rpython/jit/backend/ppc/callbuilder.py +++ b/rpython/jit/backend/ppc/callbuilder.py @@ -126,8 +126,8 @@ if gcrootmap.is_shadow_stack and self.is_call_release_gil: # in this mode, 'ebx' happens to contain the shadowstack # top at this point, so reuse it instead of loading it again - ssreg = ebx - self.asm._reload_frame_if_necessary(self.mc) + ssreg = self.RSHADOWPTR + self.asm._reload_frame_if_necessary(self.mc, shadowstack_reg=ssreg) def emit_raw_call(self): self.mc.raw_call() @@ -151,9 +151,10 @@ # Save this thread's shadowstack pointer into r29, for later comparison gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap if gcrootmap: - rst = gcrootmap.get_root_stack_top_addr() - self.mc.load_imm(RSHADOWPTR, rst) - self.mc.load(RSHADOWOLD.value, RSHADOWPTR.value, 0) + if gcrootmap.is_shadow_stack: + rst = gcrootmap.get_root_stack_top_addr() + self.mc.load_imm(RSHADOWPTR, rst) + self.mc.load(RSHADOWOLD.value, RSHADOWPTR.value, 0) # # change 'rpy_fastgil' to 0 (it should be non-zero right now) self.mc.load_imm(RFASTGILPTR, fastgil) @@ -184,7 +185,8 @@ self.mc.cmpdi(0, r.r10.value, 0) b1_location = self.mc.currpos() - self.mc.trap() # patched with a BEQ: jump if r10 is zero + self.mc.trap() # boehm: patched with a BEQ: jump if r10 is zero + # shadowstack: patched with BNE instead if self.asm.cpu.gc_ll_descr.gcrootmap: # When doing a call_release_gil with shadowstack, there @@ -192,20 +194,23 @@ # current shadowstack can be the one of a different # thread. So here we check if the shadowstack pointer # is still the same as before we released the GIL (saved - # in 'r7'), and if not, we fall back to 'reacqgil_addr'. - XXXXXXXXXXXXXXXXXXX - self.mc.LDR_ri(r.ip.value, r.r5.value, cond=c.EQ) - self.mc.CMP_rr(r.ip.value, r.r7.value, cond=c.EQ) + # in RSHADOWOLD), and if not, we fall back to 'reacqgil_addr'. + self.mc.load(r.r9.value, RSHADOWPTR.value, 0) + self.mc.cmpdi(0, r.r9.value, RSHADOWOLD.value) + bne_location = b1_location b1_location = self.mc.currpos() - self.mc.BKPT() # BEQ below - # there are two cases here: either EQ was false from - # the beginning, or EQ was true at first but the CMP - # made it false. In the second case we need to - # release the fastgil here. We know which case it is - # by checking again r3. - self.mc.CMP_ri(r.r3.value, 0) - self.mc.STR_ri(r.r3.value, r.r6.value, cond=c.EQ) + self.mc.trap() + + # revert the rpy_fastgil acquired above, so that the + # general 'reacqgil_addr' below can acquire it again... + # (here, r10 is conveniently zero) + self.mc.std(r.r10.value, RFASTGILPTR.value, 0) + + pmc = OverwritingBuilder(self.mc, bne_location, 1) + pmc.bne(self.mc.currpos() - bne_location) + pmc.overwrite() # + # Yes, we need to call the reacqgil() function. # save the result we just got RSAVEDRES = RFASTGILPTR # can reuse this reg here reg = self.resloc @@ -225,9 +230,8 @@ PARAM_SAVE_AREA_OFFSET + 7 * WORD) # replace b1_location with BEQ(here) - jmp_target = self.mc.currpos() pmc = OverwritingBuilder(self.mc, b1_location, 1) - pmc.beq(jmp_target - b1_location) + pmc.beq(self.mc.currpos() - b1_location) pmc.overwrite() if not we_are_translated(): # for testing: now we can access diff --git a/rpython/jit/backend/ppc/codebuilder.py b/rpython/jit/backend/ppc/codebuilder.py --- a/rpython/jit/backend/ppc/codebuilder.py +++ b/rpython/jit/backend/ppc/codebuilder.py @@ -17,11 +17,15 @@ from rpython.jit.backend.ppc.rassemblermaker import make_rassembler -# these are the *forbidden* encodings that don't accept register r0: -# addi rX, r0, immed -# subi rX, r0, immed -# addis rX, r0, immed -# subis rX, r0, immed +# the following instructions can't accept "r0" as the second argument +# (i.e. the base address): it is recognized as "0" instead, or is +# even invalid (load-with-update, store-with-update). +# +# any load or store instruction +# addi rD, r0, immed +# subi rD, r0, immed +# addis rD, r0, immed +# subis rD, r0, immed A = Form("frD", "frA", "frB", "XO3", "Rc") @@ -1000,12 +1004,23 @@ if word & 0xFFFF != 0: self.ori(rD, rD, lo(word)) + def load_imm_plus(self, dest_reg, word): + """Like load_imm(), but with one instruction less, and + leaves the loaded value off by some signed 16-bit difference. + Returns that difference.""" + diff = rffi.cast(lltype.Signed, rffi.cast(rffi.SHORT, word)) + word -= diff + assert word & 0xFFFF == 0 + self.load_imm(dest_reg, word) + return diff + def load_from_addr(self, rD, addr): - self.load_imm(rD, addr) + assert rD is not r.r0 + diff = self.load_imm_plus(rD, addr) if IS_PPC_32: - self.lwzx(rD.value, 0, rD.value) + self.lwz(rD.value, rD.value, diff) else: - self.ldx(rD.value, 0, rD.value) + self.ld(rD.value, rD.value, diff) def b_offset(self, target): curpos = self.currpos() @@ -1073,60 +1088,6 @@ # Call the function self.bctrl() - ## def call(self, address): - ## """ do a call to an absolute address - ## """ - ## with scratch_reg(self): - ## if IS_PPC_32: - ## self.load_imm(r.SCRATCH, address) - ## else: - ## self.store(r.TOC.value, r.SP.value, 5 * WORD) - ## self.load_imm(r.r11, address) - ## self.load(r.SCRATCH.value, r.r11.value, 0) - ## self.load(r.TOC.value, r.r11.value, WORD) - ## self.load(r.r11.value, r.r11.value, 2 * WORD) - ## self.mtctr(r.SCRATCH.value) - ## self.bctrl() - - ## if IS_PPC_64: - ## self.load(r.TOC.value, r.SP.value, 5 * WORD) - - ## def call_register(self, call_reg): - ## """ do a call to an address given in a register - ## """ - ## assert isinstance(call_reg, RegisterLocation) - ## with scratch_reg(self): - ## if IS_PPC_32: - ## self.mr(r.SCRATCH.value, call_reg.value) - ## else: - ## self.store(r.TOC.value, r.SP.value, 5 * WORD) - ## self.mr(r.r11.value, call_reg.value) - ## self.load(r.SCRATCH.value, r.r11.value, 0) - ## self.load(r.TOC.value, r.r11.value, WORD) - ## self.load(r.r11.value, r.r11.value, 2 * WORD) - ## self.mtctr(r.SCRATCH.value) - ## self.bctrl() - - ## if IS_PPC_64: - ## self.load(r.TOC.value, r.SP.value, 5 * WORD) - - ## def make_function_prologue(self, frame_size): - ## """ Build a new stackframe of size frame_size - ## and store the LR in the previous frame. - ## """ - ## with scratch_reg(self): - ## self.store_update(r.SP.value, r.SP.value, -frame_size) - ## self.mflr(r.SCRATCH.value) - ## self.store(r.SCRATCH.value, r.SP.value, frame_size + LR_BC_OFFSET) - - def restore_LR_from_caller_frame(self, frame_size): - """ Restore the LR from the calling frame. - frame_size is the size of the current frame. - """ - with scratch_reg(self): - lr_offset = frame_size + LR_BC_OFFSET - self.load(r.SCRATCH.value, r.SP.value, lr_offset) - self.mtlr(r.SCRATCH.value) def load(self, target_reg, base_reg, offset): if IS_PPC_32: @@ -1266,6 +1227,22 @@ #assert self.r0_in_use #self.r0_in_use = False + def get_assembler_function(self): + "NOT_RPYTHON: tests only" + from rpython.jit.backend.llsupport.asmmemmgr import AsmMemoryManager + class FakeCPU: + HAS_CODEMAP = False + asmmemmgr = AsmMemoryManager() + addr = self.materialize(FakeCPU(), []) + if IS_BIG_ENDIAN: + mc = PPCBuilder() + mc.write64(addr) # the 3-words descriptor + mc.write64(0) + mc.write64(0) + addr = mc.materialize(FakeCPU(), []) + return rffi.cast(lltype.Ptr(lltype.FuncType([], lltype.Signed)), addr) + + class scratch_reg(object): def __init__(self, mc): self.mc = mc diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -59,23 +59,23 @@ else: self.mc.mulld(res.value, l0.value, l1.value) - def do_emit_int_binary_ovf(self, op, arglocs, emit): + def do_emit_int_binary_ovf(self, op, arglocs): l0, l1, res = arglocs[0], arglocs[1], arglocs[2] self.mc.load_imm(r.SCRATCH, 0) self.mc.mtxer(r.SCRATCH.value) - emit(res.value, l0.value, l1.value) + return (res.value, l0.value, l1.value) def emit_int_add_ovf(self, op, arglocs, regalloc): - self.do_emit_int_binary_ovf(op, arglocs, self.mc.addox) + self.mc.addox(*self.do_emit_int_binary_ovf(op, arglocs)) def emit_int_sub_ovf(self, op, arglocs, regalloc): - self.do_emit_int_binary_ovf(op, arglocs, self.mc.subox) + self.mc.subox(*self.do_emit_int_binary_ovf(op, arglocs)) def emit_int_mul_ovf(self, op, arglocs, regalloc): if IS_PPC_32: - self.do_emit_int_binary_ovf(op, arglocs, self.mc.mullwox) + self.mc.mullwox(*self.do_emit_int_binary_ovf(op, arglocs)) else: - self.do_emit_int_binary_ovf(op, arglocs, self.mc.mulldox) + self.mc.mulldox(*self.do_emit_int_binary_ovf(op, arglocs)) def emit_int_floordiv(self, op, arglocs, regalloc): l0, l1, res = arglocs @@ -343,12 +343,11 @@ # this half-word is at offset 0 on a little-endian machine; # but it is at offset 2 (32 bit) or 4 (64 bit) on a # big-endian machine. - with scratch_reg(self.mc): - if IS_PPC_32: - self.mc.lhz(r.SCRATCH.value, locs[0].value, 2) - else: - self.mc.lwz(r.SCRATCH.value, locs[0].value, 4) - self.mc.cmp_op(0, r.SCRATCH.value, typeid.value, imm=typeid.is_imm()) + if IS_PPC_32: + self.mc.lhz(r.SCRATCH.value, locs[0].value, 2 * IS_BIG_ENDIAN) + else: + self.mc.lwz(r.SCRATCH.value, locs[0].value, 4 * IS_BIG_ENDIAN) + self.mc.cmp_op(0, r.SCRATCH.value, typeid.value, imm=typeid.is_imm()) def emit_guard_not_invalidated(self, op, arglocs, regalloc): self._emit_guard(op, arglocs, is_guard_not_invalidated=True) @@ -461,23 +460,24 @@ pmc.overwrite() def emit_guard_exception(self, op, arglocs, regalloc): - # XXX FIXME - # XXX pos_exc_value and pos_exception are 8 bytes apart, don't need both - loc, loc1, resloc, pos_exc_value, pos_exception = arglocs[:5] - failargs = arglocs[5:] - self.mc.load_imm(loc1, pos_exception.value) - self.mc.load(r.SCRATCH.value, loc1.value, 0) - self.mc.cmp_op(0, r.SCRATCH.value, loc.value) + loc, resloc = arglocs[:2] + failargs = arglocs[2:] + + mc = self.mc + mc.load_imm(r.SCRATCH2, self.cpu.pos_exc_value()) + diff = self.cpu.pos_exception() - self.cpu.pos_exc_value() + assert _check_imm_arg(diff) + + mc.load(r.SCRATCH.value, r.SCRATCH2.value, diff) + mc.cmp_op(0, r.SCRATCH.value, loc.value) self.guard_success_cc = c.EQ self._emit_guard(op, failargs, save_exc=True) - self.mc.load_imm(loc, pos_exc_value.value) if resloc: - self.mc.load(resloc.value, loc.value, 0) - - self.mc.load_imm(r.SCRATCH, 0) - self.mc.store(r.SCRATCH.value, loc.value, 0) - self.mc.store(r.SCRATCH.value, loc1.value, 0) + mc.load(resloc.value, r.SCRATCH2.value, 0) + mc.load_imm(r.SCRATCH, 0) + mc.store(r.SCRATCH.value, r.SCRATCH2.value, 0) + mc.store(r.SCRATCH.value, r.SCRATCH2.value, diff) class CallOpAssembler(object): @@ -687,7 +687,7 @@ if _check_imm_arg(multiply_by): self.mc.mulli(scratch_loc.value, loc.value, multiply_by) else: - self.mc.load_imm(scratch_loc.value, multiply_by) + self.mc.load_imm(scratch_loc, multiply_by) if IS_PPC_32: self.mc.mullw(scratch_loc.value, loc.value, scratch_loc.value) @@ -766,6 +766,23 @@ self.mc.mr(r.SCRATCH2.value, loc.value) return r.SCRATCH2 + # RPythonic workaround for emit_zero_array() + def eza_stXux(self, a, b, c, itemsize): + if itemsize & 1: self.mc.stbux(a, b, c) + elif itemsize & 2: self.mc.sthux(a, b, c) + elif (itemsize & 4) or IS_PPC_32: self.mc.stwux(a, b, c) + else: self.mc.stdux(a, b, c) + def eza_stXu(self, a, b, c, itemsize): + if itemsize & 1: self.mc.stbu(a, b, c) + elif itemsize & 2: self.mc.sthu(a, b, c) + elif (itemsize & 4) or IS_PPC_32: self.mc.stwu(a, b, c) + else: self.mc.stdu(a, b, c) + def eza_stX(self, a, b, c, itemsize): + if itemsize & 1: self.mc.stb(a, b, c) + elif itemsize & 2: self.mc.sth(a, b, c) + elif (itemsize & 4) or IS_PPC_32: self.mc.stw(a, b, c) + else: self.mc.std(a, b, c) + def emit_zero_array(self, op, arglocs, regalloc): base_loc, startindex_loc, length_loc, ofs_loc, itemsize_loc = arglocs @@ -774,26 +791,10 @@ # * if N % 4 == 0, then all items are aligned to a multiple of 4 # * if N % 8 == 0, then all items are aligned to a multiple of 8 itemsize = itemsize_loc.getint() - if itemsize & 1: - stepsize = 1 - stXux = self.mc.stbux - stXu = self.mc.stbu - stX = self.mc.stb - elif itemsize & 2: - stepsize = 2 - stXux = self.mc.sthux - stXu = self.mc.sthu - stX = self.mc.sth - elif (itemsize & 4) or IS_PPC_32: - stepsize = 4 - stXux = self.mc.stwux - stXu = self.mc.stwu - stX = self.mc.stw - else: - stepsize = WORD - stXux = self.mc.stdux - stXu = self.mc.stdu - stX = self.mc.std + if itemsize & 1: stepsize = 1 + elif itemsize & 2: stepsize = 2 + elif (itemsize & 4) or IS_PPC_32: stepsize = 4 + else: stepsize = WORD repeat_factor = itemsize // stepsize if repeat_factor != 1: @@ -816,9 +817,11 @@ if unroll > 0: assert repeat_factor == 1 self.mc.li(r.SCRATCH.value, 0) - stXux(r.SCRATCH.value, ofs_loc.value, base_loc.value) + self.eza_stXux(r.SCRATCH.value, ofs_loc.value, base_loc.value, + itemsize) for i in range(1, unroll): - stX(r.SCRATCH.value, ofs_loc.value, i * stepsize) + self.eza_stX(r.SCRATCH.value, ofs_loc.value, i * stepsize, + itemsize) else: if length_loc.is_imm(): @@ -836,12 +839,14 @@ self.mc.mtctr(length_loc.value) self.mc.li(r.SCRATCH.value, 0) - stXux(r.SCRATCH.value, ofs_loc.value, base_loc.value) + self.eza_stXux(r.SCRATCH.value, ofs_loc.value, base_loc.value, + itemsize) bdz_location = self.mc.currpos() self.mc.trap() loop_location = self.mc.currpos() - stXu(r.SCRATCH.value, ofs_loc.value, stepsize) + self.eza_stXu(r.SCRATCH.value, ofs_loc.value, stepsize, + itemsize) self.mc.bdnz(loop_location - self.mc.currpos()) pmc = OverwritingBuilder(self.mc, bdz_location, 1) @@ -958,10 +963,13 @@ def emit_call_malloc_nursery_varsize(self, op, arglocs, regalloc): # registers r.RES and r.RSZ are allocated for this call + gc_ll_descr = self.cpu.gc_ll_descr + if not hasattr(gc_ll_descr, 'max_size_of_young_obj'): + raise Exception("unreachable code") + # for boehm, this function should never be called [lengthloc] = arglocs arraydescr = op.getdescr() itemsize = op.getarg(1).getint() - gc_ll_descr = self.cpu.gc_ll_descr maxlength = (gc_ll_descr.max_size_of_young_obj - WORD * 2) / itemsize gcmap = regalloc.get_gcmap([r.RES, r.RSZ]) self.malloc_cond_varsize( @@ -976,6 +984,12 @@ emit_jit_debug = emit_debug_merge_point emit_keepalive = emit_debug_merge_point + def emit_enter_portal_frame(self, op, arglocs, regalloc): + self.enter_portal_frame(op) + + def emit_leave_portal_frame(self, op, arglocs, regalloc): + self.leave_portal_frame(op) + def _write_barrier_fastpath(self, mc, descr, arglocs, regalloc, array=False, is_frame=False): # Write code equivalent to write_barrier() in the GC: it checks @@ -1212,6 +1226,7 @@ StrOpAssembler, CallOpAssembler, UnicodeOpAssembler, ForceOpAssembler, AllocOpAssembler, FloatOpAssembler): + _mixin_ = True def nop(self): self.mc.ori(0, 0, 0) diff --git a/rpython/jit/backend/ppc/ppc_assembler.py b/rpython/jit/backend/ppc/ppc_assembler.py --- a/rpython/jit/backend/ppc/ppc_assembler.py +++ b/rpython/jit/backend/ppc/ppc_assembler.py @@ -127,20 +127,20 @@ def _call_header_shadowstack(self, gcrootmap): # we need to put one word into the shadowstack: the jitframe (SPP) mc = self.mc - mc.load_imm(r.RCS1, gcrootmap.get_root_stack_top_addr()) - mc.load(r.RCS2.value, r.RCS1.value, 0) # ld RCS2, [rootstacktop] + diff = mc.load_imm_plus(r.RCS1, gcrootmap.get_root_stack_top_addr()) + mc.load(r.RCS2.value, r.RCS1.value, diff) # ld RCS2, [rootstacktop] # mc.addi(r.RCS3.value, r.RCS2.value, WORD) # add RCS3, RCS2, WORD mc.store(r.SPP.value, r.RCS2.value, 0) # std SPP, RCS2 # - mc.store(r.RCS3.value, r.RCS1.value, 0) # std RCS3, [rootstacktop] + mc.store(r.RCS3.value, r.RCS1.value, diff)# std RCS3, [rootstacktop] def _call_footer_shadowstack(self, gcrootmap): mc = self.mc - mc.load_imm(r.RCS1, gcrootmap.get_root_stack_top_addr()) - mc.load(r.RCS2.value, r.RCS1.value, 0) # ld RCS2, [rootstacktop] - mc.addi(r.RCS2.value, r.RCS2.value, WORD) # sub RCS2, RCS2, WORD - mc.store(r.RCS2.value, r.RCS1.value, 0) # std RCS2, [rootstacktop] + diff = mc.load_imm_plus(r.RCS1, gcrootmap.get_root_stack_top_addr()) + mc.load(r.RCS2.value, r.RCS1.value, diff) # ld RCS2, [rootstacktop] + mc.subi(r.RCS2.value, r.RCS2.value, WORD) # sub RCS2, RCS2, WORD + mc.store(r.RCS2.value, r.RCS1.value, diff) # std RCS2, [rootstacktop] def new_stack_loc(self, i, tp): base_ofs = self.cpu.get_baseofs_of_frame_field() @@ -248,8 +248,8 @@ gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: - mc.load_imm(r.r5, gcrootmap.get_root_stack_top_addr()) - mc.load(r.r5.value, r.r5.value, 0) + diff = mc.load_imm_plus(r.r5, gcrootmap.get_root_stack_top_addr()) + mc.load(r.r5.value, r.r5.value, diff) mc.store(r.r3.value, r.r5.value, -WORD) mc.mtlr(r.RCS1.value) # restore LR @@ -283,13 +283,16 @@ mc.store(excvalloc.value, r.r2.value, 0) mc.store(exctploc.value, r.r2.value, diff) - def _reload_frame_if_necessary(self, mc): + def _reload_frame_if_necessary(self, mc, shadowstack_reg=None): gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap: if gcrootmap.is_shadow_stack: - mc.load_imm(r.SPP, gcrootmap.get_root_stack_top_addr()) - mc.load(r.SPP.value, r.SPP.value, 0) - mc.load(r.SPP.value, r.SPP.value, -WORD) + if shadowstack_reg is None: + diff = mc.load_imm_plus(r.SPP, + gcrootmap.get_root_stack_top_addr()) + mc.load(r.SPP.value, r.SPP.value, diff) + shadowstack_reg = r.SPP + mc.load(r.SPP.value, shadowstack_reg.value, -WORD) wbdescr = self.cpu.gc_ll_descr.write_barrier_descr if gcrootmap and wbdescr: # frame never uses card marking, so we enforce this is not @@ -430,100 +433,38 @@ if slowpathaddr == 0 or not self.cpu.propagate_exception_descr: return # no stack check (for tests, or non-translated) # - # make a "function" that is called immediately at the start of - # an assembler function. In particular, the stack looks like: + # make a regular function that is called from a point near the start + # of an assembler function (after it adjusts the stack and saves + # registers). + mc = PPCBuilder() # - # | | - # | OLD BACKCHAIN | - # | | - # =============================== - - # | | | - # | BACKCHAIN | | > MINI FRAME (BACHCHAIN SIZE * WORD) - # | | | - # =============================== - - # | | - # | SAVED PARAM REGS | - # | | - # ------------------------------- - # | | - # | BACKCHAIN | - # | | - # =============================== <- SP + # Save away the LR inside r30 + mc.mflr(r.RCS1.value) # - mc = PPCBuilder() - - # make small frame to store data (parameter regs + LR + SCRATCH) in - # there. Allocate additional fixed save area for PPC64. - PARAM_AREA = len(r.PARAM_REGS) - FIXED_AREA = BACKCHAIN_SIZE - if IS_PPC_64: - FIXED_AREA += MAX_REG_PARAMS - frame_size = (FIXED_AREA + PARAM_AREA) * WORD - - # align the SP - MINIFRAME_SIZE = BACKCHAIN_SIZE * WORD - while (frame_size + MINIFRAME_SIZE) % (4 * WORD) != 0: - frame_size += WORD - - # write function descriptor - if IS_PPC_64 and IS_BIG_ENDIAN: - for _ in range(3): - mc.write64(0) - - # build frame - mc.make_function_prologue(frame_size) - - # save parameter registers - for i, reg in enumerate(r.PARAM_REGS): - mc.store(reg.value, r.SP.value, (i + FIXED_AREA) * WORD) - + # Do the call # use SP as single parameter for the call mc.mr(r.r3.value, r.SP.value) - - # stack still aligned - mc.call(slowpathaddr) - - with scratch_reg(mc): - mc.load_imm(r.SCRATCH, self.cpu.pos_exception()) - mc.loadx(r.SCRATCH.value, 0, r.SCRATCH.value) - # if this comparison is true, then everything is ok, - # else we have an exception - mc.cmp_op(0, r.SCRATCH.value, 0, imm=True) - - jnz_location = mc.currpos() - mc.trap() - - # restore parameter registers - for i, reg in enumerate(r.PARAM_REGS): - mc.load(reg.value, r.SP.value, (i + FIXED_AREA) * WORD) - - # restore LR - mc.restore_LR_from_caller_frame(frame_size) - - # reset SP - mc.addi(r.SP.value, r.SP.value, frame_size) - #mc.blr() - mc.b(self.propagate_exception_path) - - pmc = OverwritingBuilder(mc, jnz_location, 1) - pmc.bne(mc.currpos() - jnz_location) - pmc.overwrite() - - # restore link register out of preprevious frame - offset_LR = frame_size + MINIFRAME_SIZE + LR_BC_OFFSET - - with scratch_reg(mc): - mc.load(r.SCRATCH.value, r.SP.value, offset_LR) - mc.mtlr(r.SCRATCH.value) - - # remove this frame and the miniframe - both_framesizes = frame_size + MINIFRAME_SIZE - mc.addi(r.SP.value, r.SP.value, both_framesizes) - mc.blr() - + mc.load_imm(mc.RAW_CALL_REG, slowpathaddr) + mc.raw_call() + # + # Restore LR + mc.mtlr(r.RCS1.value) + # + # Check if it raised StackOverflow + mc.load_imm(r.SCRATCH, self.cpu.pos_exception()) + mc.loadx(r.SCRATCH.value, 0, r.SCRATCH.value) + # if this comparison is true, then everything is ok, + # else we have an exception + mc.cmp_op(0, r.SCRATCH.value, 0, imm=True) + # + # So we return to LR back to our caller, conditionally if "EQ" + mc.beqlr() + # + # Else, jump to propagate_exception_path + assert self.propagate_exception_path + mc.b_abs(self.propagate_exception_path) + # rawstart = mc.materialize(self.cpu, []) - if IS_PPC_64: - self.write_64_bit_func_descr(rawstart, rawstart+3*WORD) self.stack_check_slowpath = rawstart def _build_wb_slowpath(self, withcards, withfloats=False, for_frame=False): @@ -553,6 +494,10 @@ self.mc = mc if for_frame: + # NOTE: don't save registers on the jitframe here! It might + # override already-saved values that will be restored + # later... + # # This 'for_frame' version is called after a CALL. It does not # need to save many registers: the registers that are anyway # destroyed by the call can be ignored (VOLATILES), and the @@ -560,8 +505,19 @@ # to save r.RCS1 (used below), r3 and f1 (possible results of # the call), and two more non-volatile registers (used to store # the RPython exception that occurred in the CALL, if any). - saved_regs = [r.r3, r.RCS1, r.RCS2, r.RCS3] - saved_fp_regs = [r.f1] + # + # We need to increase our stack frame size a bit to store them. + # + self.mc.load(r.SCRATCH.value, r.SP.value, 0) # SP back chain + self.mc.store_update(r.SCRATCH.value, r.SP.value, -6 * WORD) + self.mc.std(r.RCS1.value, r.SP.value, 1 * WORD) + self.mc.std(r.RCS2.value, r.SP.value, 2 * WORD) + self.mc.std(r.RCS3.value, r.SP.value, 3 * WORD) + self.mc.std(r.r3.value, r.SP.value, 4 * WORD) + self.mc.stfd(r.f1.value, r.SP.value, 5 * WORD) + saved_regs = None + saved_fp_regs = None + else: # push all volatile registers, push RCS1, and sometimes push RCS2 if withcards: @@ -573,8 +529,8 @@ else: saved_fp_regs = [] - self._push_core_regs_to_jitframe(mc, saved_regs) - self._push_fp_regs_to_jitframe(mc, saved_fp_regs) + self._push_core_regs_to_jitframe(mc, saved_regs) + self._push_fp_regs_to_jitframe(mc, saved_fp_regs) if for_frame: # note that it's safe to store the exception in register, @@ -608,8 +564,18 @@ mc.lbz(r.RCS2.value, r.RCS2.value, descr.jit_wb_if_flag_byteofs) mc.andix(r.RCS2.value, r.RCS2.value, card_marking_mask & 0xFF) - self._pop_core_regs_from_jitframe(mc, saved_regs) - self._pop_fp_regs_from_jitframe(mc, saved_fp_regs) + if for_frame: + self.mc.ld(r.RCS1.value, r.SP.value, 1 * WORD) + self.mc.ld(r.RCS2.value, r.SP.value, 2 * WORD) + self.mc.ld(r.RCS3.value, r.SP.value, 3 * WORD) + self.mc.ld(r.r3.value, r.SP.value, 4 * WORD) + self.mc.lfd(r.f1.value, r.SP.value, 5 * WORD) + self.mc.addi(r.SP.value, r.SP.value, 6 * WORD) + + else: + self._pop_core_regs_from_jitframe(mc, saved_regs) + self._pop_fp_regs_from_jitframe(mc, saved_fp_regs) + mc.blr() self.mc = old_mc @@ -675,54 +641,19 @@ if self.stack_check_slowpath == 0: pass # not translated else: - XXXX - # this is the size for the miniframe - frame_size = BACKCHAIN_SIZE * WORD + endaddr, lengthaddr, _ = self.cpu.insert_stack_check() + diff = lengthaddr - endaddr + assert _check_imm_arg(diff) - endaddr, lengthaddr, _ = self.cpu.insert_stack_check() - - # save r16 - self.mc.mtctr(r.r16.value) - - with scratch_reg(self.mc): - self.mc.load_imm(r.SCRATCH, endaddr) # load SCRATCH, [start] - self.mc.loadx(r.SCRATCH.value, 0, r.SCRATCH.value) - self.mc.subf(r.SCRATCH.value, r.SP.value, r.SCRATCH.value) - self.mc.load_imm(r.r16, lengthaddr) - self.mc.load(r.r16.value, r.r16.value, 0) - self.mc.cmp_op(0, r.SCRATCH.value, r.r16.value, signed=False) - - # restore r16 - self.mc.mfctr(r.r16.value) - - patch_loc = self.mc.currpos() - self.mc.trap() - - # make minimal frame which contains the LR - # - # | OLD FRAME | - # ============================== - # | | - # | BACKCHAIN | > BACKCHAIN_SIZE * WORD - # | | - # ============================== <- SP - - self.mc.make_function_prologue(frame_size) - - # make check - self.mc.call(self.stack_check_slowpath) - - # restore LR - self.mc.restore_LR_from_caller_frame(frame_size) - - # remove minimal frame - self.mc.addi(r.SP.value, r.SP.value, frame_size) - - offset = self.mc.currpos() - patch_loc - # - pmc = OverwritingBuilder(self.mc, patch_loc, 1) - pmc.ble(offset) # jump if SCRATCH <= r16, i. e. not(SCRATCH > r16) - pmc.overwrite() + mc = self.mc + mc.load_imm(r.SCRATCH, self.stack_check_slowpath) + mc.load_imm(r.SCRATCH2, endaddr) # li r2, endaddr + mc.mtctr(r.SCRATCH.value) + mc.load(r.SCRATCH.value, r.SCRATCH2.value, 0) # ld r0, [end] + mc.load(r.SCRATCH2.value, r.SCRATCH2.value, diff)# ld r2, [length] + mc.subf(r.SCRATCH.value, r.SP.value, r.SCRATCH.value) # sub r0, SP + mc.cmp_op(0, r.SCRATCH.value, r.SCRATCH2.value, signed=False) + mc.bgtctrl() def _call_footer(self): # the return value is the jitframe @@ -1012,8 +943,7 @@ addr = rawstart + tok.pos_jump_offset # # XXX see patch_jump_for_descr() - #tok.faildescr.adr_jump_offset = addr - tok.faildescr.adr_recovery_stub = rawstart + tok.pos_recovery_stub + tok.faildescr.adr_jump_offset = rawstart + tok.pos_recovery_stub # relative_target = tok.pos_recovery_stub - tok.pos_jump_offset # @@ -1039,7 +969,9 @@ # --- XXX for now we always use the second solution --- mc = PPCBuilder() mc.b_abs(adr_new_target) - mc.copy_to_raw_memory(faildescr.adr_recovery_stub) + mc.copy_to_raw_memory(faildescr.adr_jump_offset) + assert faildescr.adr_jump_offset != 0 + faildescr.adr_jump_offset = 0 # means "patched" def get_asmmemmgr_blocks(self, looptoken): clt = looptoken.compiled_loop_token @@ -1390,16 +1322,7 @@ with scratch_reg(self.mc): self.mc.load_imm(r.SCRATCH, fail_index) self.mc.store(r.SCRATCH.value, r.SPP.value, FORCE_INDEX_OFS) - - def load(self, loc, value): - assert (loc.is_reg() and value.is_imm() - or loc.is_fp_reg() and value.is_imm_float()) - if value.is_imm(): - self.mc.load_imm(loc, value.getint()) - elif value.is_imm_float(): - with scratch_reg(self.mc): - self.mc.load_imm(r.SCRATCH, value.getint()) - self.mc.lfdx(loc.value, 0, r.SCRATCH.value) + def notimplemented_op(self, op, arglocs, regalloc): print "[PPC/asm] %s not implemented" % op.getopname() diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -61,11 +61,15 @@ save_around_call_regs = r.VOLATILES_FLOAT assert set(save_around_call_regs).issubset(all_regs) - def convert_to_imm(self, c): + def convert_to_adr(self, c): assert isinstance(c, ConstFloat) adr = self.assembler.datablockwrapper.malloc_aligned(8, 8) x = c.getfloatstorage() rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), adr)[0] = x + return adr + + def convert_to_imm(self, c): + adr = self.convert_to_adr(c) return locations.ConstFloatLoc(adr) def __init__(self, longevity, frame_manager=None, assembler=None): @@ -77,8 +81,10 @@ def ensure_reg(self, box): if isinstance(box, Const): loc = self.get_scratch_reg() - immvalue = self.convert_to_imm(box) - self.assembler.load(loc, immvalue) + immadrvalue = self.convert_to_adr(box) + mc = self.assembler.mc + mc.load_imm(r.SCRATCH, immadrvalue) + mc.lfdx(loc.value, 0, r.SCRATCH.value) else: assert box in self.temp_boxes loc = self.make_sure_var_in_reg(box, @@ -134,19 +140,22 @@ def call_result_location(self, v): return r.r3 - def convert_to_imm(self, c): + def convert_to_int(self, c): if isinstance(c, ConstInt): - val = rffi.cast(lltype.Signed, c.value) - return locations.ImmLocation(val) + return rffi.cast(lltype.Signed, c.value) else: assert isinstance(c, ConstPtr) - return locations.ImmLocation(rffi.cast(lltype.Signed, c.value)) + return rffi.cast(lltype.Signed, c.value) + + def convert_to_imm(self, c): + val = self.convert_to_int(c) + return locations.ImmLocation(val) def ensure_reg(self, box): if isinstance(box, Const): loc = self.get_scratch_reg() - immvalue = self.convert_to_imm(box) - self.assembler.load(loc, immvalue) + immvalue = self.convert_to_int(box) + self.assembler.mc.load_imm(loc, immvalue) else: assert box in self.temp_boxes loc = self.make_sure_var_in_reg(box, @@ -593,15 +602,11 @@ def prepare_guard_exception(self, op): loc = self.ensure_reg(op.getarg(0)) - loc1 = r.SCRATCH2 if op.result in self.longevity: resloc = self.force_allocate_reg(op.result) else: resloc = None - pos_exc_value = imm(self.cpu.pos_exc_value()) - pos_exception = imm(self.cpu.pos_exception()) - arglocs = self._prepare_guard(op, - [loc, loc1, resloc, pos_exc_value, pos_exception]) + arglocs = self._prepare_guard(op, [loc, resloc]) return arglocs def prepare_guard_no_exception(self, op): @@ -644,7 +649,7 @@ # offset in type_info_group # - add 16/32 bytes, to go past the TYPE_INFO structure classptr = y_val - from pypy.rpython.memory.gctypelayout import GCData + from rpython.memory.gctypelayout import GCData sizeof_ti = rffi.sizeof(GCData.TYPE_INFO) type_info_group = llop.gc_get_type_info_group(llmemory.Address) type_info_group = rffi.cast(lltype.Signed, type_info_group) @@ -962,10 +967,6 @@ return [sizeloc] def prepare_call_malloc_nursery_varsize(self, op): - gc_ll_descr = self.assembler.cpu.gc_ll_descr - if not hasattr(gc_ll_descr, 'max_size_of_young_obj'): - raise Exception("unreachable code") - # for boehm, this function should never be called # the result will be in r.RES self.rm.force_allocate_reg(op.result, selected_reg=r.RES) self.rm.temp_boxes.append(op.result) @@ -984,6 +985,8 @@ prepare_debug_merge_point = void prepare_jit_debug = void prepare_keepalive = void + prepare_enter_portal_frame = void + prepare_leave_portal_frame = void def prepare_cond_call_gc_wb(self, op): arglocs = [self.ensure_reg(op.getarg(0))] @@ -1019,9 +1022,8 @@ # # we need to make sure that no variable is stored in spp (=r31) for arg in inputargs: - if self.loc(arg) is r.SPP: - loc2 = self.fm.loc(arg) - self.assembler.mc.store(r.SPP, loc2) + assert self.loc(arg) is not r.SPP, ( + "variable stored in spp in prepare_label") self.rm.bindings_to_frame_reg.clear() # for i in range(len(inputargs)): @@ -1062,18 +1064,6 @@ resloc = self.after_call(op.result) return [resloc] + locs - def _prepare_args_for_new_op(self, new_args): - gc_ll_descr = self.cpu.gc_ll_descr - args = gc_ll_descr.args_for_new(new_args) - arglocs = [] - for i in range(len(args)): - arg = args[i] - t = TempInt() - l = self.force_allocate_reg(t, selected_reg=r.MANAGED_REGS[i]) - self.assembler.load(l, imm(arg)) - arglocs.append(t) - return arglocs - def prepare_force_spill(self, op): self.force_spill_var(op.getarg(0)) return [] diff --git a/rpython/jit/backend/ppc/runner.py b/rpython/jit/backend/ppc/runner.py --- a/rpython/jit/backend/ppc/runner.py +++ b/rpython/jit/backend/ppc/runner.py @@ -2,6 +2,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rtyper.llinterp import LLInterpreter from rpython.rlib import rgc +from rpython.rlib.jit_hooks import LOOP_RUN_CONTAINER from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU from rpython.jit.backend.ppc.ppc_assembler import AssemblerPPC from rpython.jit.backend.ppc.arch import WORD @@ -79,3 +80,7 @@ mc.copy_to_raw_memory(jmp) # positions invalidated looptoken.compiled_loop_token.invalidate_positions = [] + + def get_all_loop_runs(self): + # not implemented + return lltype.malloc(LOOP_RUN_CONTAINER, 0) diff --git a/rpython/jit/backend/ppc/test/autopath.py b/rpython/jit/backend/ppc/test/autopath.py deleted file mode 100644 --- a/rpython/jit/backend/ppc/test/autopath.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -self cloning, automatic path configuration - -copy this into any subdirectory of pypy from which scripts need -to be run, typically all of the test subdirs. -The idea is that any such script simply issues - - import autopath - -and this will make sure that the parent directory containing "pypy" -is in sys.path. - -If you modify the master "autopath.py" version (in pypy/tool/autopath.py) -you can directly run it which will copy itself on all autopath.py files -it finds under the pypy root directory. - -This module always provides these attributes: - - pypydir pypy root directory path - this_dir directory where this autopath.py resides - -""" - - -def __dirinfo(part): - """ return (partdir, this_dir) and insert parent of partdir - into sys.path. If the parent directories don't have the part - an EnvironmentError is raised.""" - - import sys, os - try: - head = this_dir = os.path.realpath(os.path.dirname(__file__)) - except NameError: - head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - - while head: - partdir = head - head, tail = os.path.split(head) - if tail == part: - break - else: - raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir) - - pypy_root = os.path.join(head, '') - try: - sys.path.remove(head) - except ValueError: - pass - sys.path.insert(0, head) - - munged = {} - for name, mod in sys.modules.items(): - if '.' in name: - continue - fn = getattr(mod, '__file__', None) - if not isinstance(fn, str): - continue - newname = os.path.splitext(os.path.basename(fn))[0] - if not newname.startswith(part + '.'): - continue - path = os.path.join(os.path.dirname(os.path.realpath(fn)), '') - if path.startswith(pypy_root) and newname != part: - modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep) - if newname != '__init__': - modpaths.append(newname) - modpath = '.'.join(modpaths) - if modpath not in sys.modules: - munged[modpath] = mod - - for name, mod in munged.iteritems(): - if name not in sys.modules: - sys.modules[name] = mod - if '.' in name: - prename = name[:name.rfind('.')] - postname = name[len(prename)+1:] - if prename not in sys.modules: - __import__(prename) - if not hasattr(sys.modules[prename], postname): - setattr(sys.modules[prename], postname, mod) - - return partdir, this_dir - -def __clone(): - """ clone master version of autopath.py into all subdirs """ - from os.path import join, walk - if not this_dir.endswith(join('pypy','tool')): - raise EnvironmentError("can only clone master version " - "'%s'" % join(pypydir, 'tool',_myname)) - - - def sync_walker(arg, dirname, fnames): - if _myname in fnames: - fn = join(dirname, _myname) - f = open(fn, 'rwb+') - try: - if f.read() == arg: - print "checkok", fn - else: - print "syncing", fn - f = open(fn, 'w') - f.write(arg) - finally: - f.close() - s = open(join(pypydir, 'tool', _myname), 'rb').read() - walk(pypydir, sync_walker, s) - -_myname = 'autopath.py' - -# set guaranteed attributes - -pypydir, this_dir = __dirinfo('pypy') - -if __name__ == '__main__': - __clone() diff --git a/rpython/jit/backend/ppc/test/test_call_assembler.py b/rpython/jit/backend/ppc/test/test_call_assembler.py deleted file mode 100644 --- a/rpython/jit/backend/ppc/test/test_call_assembler.py +++ /dev/null @@ -1,76 +0,0 @@ -import py -from rpython.jit.metainterp.history import BoxInt, ConstInt -from rpython.jit.metainterp.history import (BoxPtr, ConstPtr, BasicFailDescr, - BasicFinalDescr) -from rpython.jit.metainterp.history import JitCellToken -from rpython.jit.metainterp.resoperation import rop, ResOperation -from rpython.jit.codewriter import heaptracker -from rpython.jit.backend.llsupport.descr import GcCache -from rpython.jit.backend.llsupport.gc import GcLLDescription -from rpython.jit.backend.detect_cpu import getcpuclass -from rpython.jit.tool.oparser import parse -from rpython.rtyper.lltypesystem import lltype, llmemory, rffi -from rpython.rtyper.annlowlevel import llhelper -from rpython.rtyper.lltypesystem import rclass, rstr -from rpython.jit.backend.llsupport.gc import GcLLDescr_framework - -from rpython.jit.codewriter.effectinfo import EffectInfo -from rpython.jit.backend.ppc.runner import PPC_CPU -from rpython.jit.backend.ppc.test.test_runner import FakeStats - -class TestAssembler(object): - - type_system = 'lltype' - - def setup_class(cls): - cls.cpu = PPC_CPU(rtyper=None, stats=FakeStats()) - cls.cpu.setup_once() - - def interpret_direct_entry_point(self, ops, args, namespace): - loop = self.parse(ops, namespace) - looptoken = JitCellToken() - self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - param_sign_list = [] - for i, arg in enumerate(args): - if isinstance(arg, int): - param_sign_list.append(lltype.Signed) - elif isinstance(arg, float): - assert 0, "not implemented yet" - else: - assert 0, "not implemented yet" - - signature = lltype.FuncType(param_sign_list, lltype.Signed) - fail_descr = self.cpu.execute_token(looptoken, *args) - return fail_descr - - def parse(self, s, namespace, boxkinds=None): - return parse(s, self.cpu, namespace, - type_system=self.type_system, - boxkinds=boxkinds) - - # XXX this test should also be used by the other backends - def test_call_assembler_vary_arguments(self): - namespace = {} - numargs = 20 - - for i in range(numargs + 1): - namespace["fdescr%d" % i] = BasicFailDescr(i) - namespace["finishdescr"] = BasicFinalDescr(numargs + 1) - - for i in range(1, numargs + 1): - arglist = [] - guardlist = [] - - for k in range(i): - name = "i%d" % k - arglist.append(name) - guardlist.append("guard_value(%s, %d, descr=fdescr%d) [%s]" - % (name, k, k, name)) - - argstr = "".join(("[", ", ".join(arglist), "]\n")) - guardstr = "\n".join(guardlist) + "\n" - finish = "finish(descr=finishdescr)\n" - - trace = "".join((argstr, guardstr, finish)) - fail_descr = self.interpret_direct_entry_point(trace, range(i), namespace) - assert fail_descr.identifier == namespace["finishdescr"].identifier diff --git a/rpython/jit/backend/ppc/test/test_calling_convention.py b/rpython/jit/backend/ppc/test/test_calling_convention.py --- a/rpython/jit/backend/ppc/test/test_calling_convention.py +++ b/rpython/jit/backend/ppc/test/test_calling_convention.py @@ -1,5 +1,6 @@ from rpython.jit.backend.test.calling_convention_test import CallingConvTests from rpython.jit.backend.ppc.codebuilder import PPCBuilder +from rpython.rtyper.lltypesystem import lltype, rffi import rpython.jit.backend.ppc.register as r @@ -10,7 +11,7 @@ mc = PPCBuilder() mc.mr(r.r3.value, r.r1.value) mc.blr() - return mc.materialize(self.cpu, []) + return rffi.cast(lltype.Signed, mc.get_assembler_function()) def get_alignment_requirements(self): return 16 diff --git a/rpython/jit/backend/ppc/test/test_field.py b/rpython/jit/backend/ppc/test/test_field.py --- a/rpython/jit/backend/ppc/test/test_field.py +++ b/rpython/jit/backend/ppc/test/test_field.py @@ -1,5 +1,3 @@ -import autopath - from rpython.jit.backend.ppc.field import Field from py.test import raises diff --git a/rpython/jit/backend/ppc/test/test_form.py b/rpython/jit/backend/ppc/test/test_form.py --- a/rpython/jit/backend/ppc/test/test_form.py +++ b/rpython/jit/backend/ppc/test/test_form.py @@ -1,11 +1,11 @@ -import autopath from rpython.jit.backend.ppc.codebuilder import b import random import sys +from py.test import raises from rpython.jit.backend.ppc.form import Form, FormException from rpython.jit.backend.ppc.field import Field -from rpython.jit.backend.ppc.assembler import Assembler +from rpython.jit.backend.ppc.opassembler import OpAssembler as Assembler # 0 31 # +-------------------------------+ @@ -23,9 +23,9 @@ 'hh': Field('hh', 0, 7), } -def p(w): +def p(a): import struct - w = w.assemble() + w = a.insts[-1].assemble() return struct.pack('>i', w) class TestForm(Form): @@ -43,26 +43,28 @@ j = i(h=1) k = i(l=3) raises(FormException, k, l=0) + insts = [] a = T() a.i(5, 6) - assert p(a.assemble0()[0]) == '\000\005\000\006' + assert p(a) == '\000\005\000\006' a = T() a.j(2) - assert p(a.assemble0()[0]) == '\000\001\000\002' + assert p(a) == '\000\001\000\002' a = T() a.k(4) - assert p(a.assemble0()[0]) == '\000\004\000\003' + assert p(a) == '\000\004\000\003' def test_defdesc(self): class T(Assembler): i = TestForm('hh', 'hl', 'lh', 'll')() i.default(hl=0).default(hh=1) + insts = [] a = T() a.i(1, 2, 3, 4) - assert p(a.assemble0()[0]) == '\001\002\003\004' + assert p(a) == '\001\002\003\004' a = T() a.i(1, 3, 4) - assert p(a.assemble0()[0]) == '\001\000\003\004' + assert p(a) == '\001\000\003\004' a = T() a.i(3, 4) - assert p(a.assemble0()[0]) == '\001\000\003\004' + assert p(a) == '\001\000\003\004' diff --git a/rpython/jit/backend/ppc/test/test_generated.py b/rpython/jit/backend/ppc/test/test_generated.py deleted file mode 100644 --- a/rpython/jit/backend/ppc/test/test_generated.py +++ /dev/null @@ -1,525 +0,0 @@ -import py -from rpython.jit.metainterp.history import (AbstractFailDescr, - AbstractDescr, - BasicFailDescr, - BoxInt, Box, BoxPtr, - ConstInt, ConstPtr, - BoxObj, Const, - ConstObj, BoxFloat, ConstFloat) -from rpython.jit.metainterp.history import JitCellToken -from rpython.jit.metainterp.resoperation import ResOperation, rop -from rpython.rtyper.test.test_llinterp import interpret -from rpython.jit.backend.detect_cpu import getcpuclass - -CPU = getcpuclass() -class TestStuff(object): - - def test0(self): - faildescr1 = BasicFailDescr(1) - faildescr2 = BasicFailDescr(2) - v1 = BoxInt() - v2 = BoxInt() - v3 = BoxInt() - v4 = BoxInt() - v5 = BoxInt() - v6 = BoxInt() - v7 = BoxInt() - v8 = BoxInt() - v9 = BoxInt() - v10 = BoxInt() - v11 = BoxInt() - v12 = BoxInt() - cpu = CPU(None, None) - cpu.setup_once() - inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] - operations = [ - ResOperation(rop.INT_SUB, [ConstInt(-1073741824), v7], v11), - ResOperation(rop.INT_GE, [v3, ConstInt(23)], v12), - ResOperation(rop.GUARD_TRUE, [v12], None, descr=faildescr1), - ResOperation(rop.FINISH, [v9, v6, v10, v2, v8, v5, v1, v4], None, descr=faildescr2), - ] - looptoken = JitCellToken() - operations[2].setfailargs([v12, v8, v3, v2, v1, v11]) - cpu.compile_loop(inputargs, operations, looptoken) - args = [-12 , -26 , -19 , 7 , -5 , -24 , -37 , 62 , 9 , 12] - op = cpu.execute_token(looptoken, *args) - assert cpu.get_latest_value_int(0) == 0 - assert cpu.get_latest_value_int(1) == 62 - assert cpu.get_latest_value_int(2) == -19 - assert cpu.get_latest_value_int(3) == -26 - assert cpu.get_latest_value_int(4) == -12 - assert cpu.get_latest_value_int(5) == -1073741787 - - def test_overflow(self): - faildescr1 = BasicFailDescr(1) - faildescr2 = BasicFailDescr(2) - faildescr3 = BasicFailDescr(3) - v1 = BoxInt() - v2 = BoxInt() - v3 = BoxInt() - v4 = BoxInt() - v5 = BoxInt() - v6 = BoxInt() - v7 = BoxInt() - v8 = BoxInt() - v9 = BoxInt() - v10 = BoxInt() - v11 = BoxInt() - v12 = BoxInt() - v13 = BoxInt() - v14 = BoxInt() - v15 = BoxInt() - v16 = BoxInt() - v17 = BoxInt() - v18 = BoxInt() - cpu = CPU(None, None) - cpu.setup_once() - inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] - operations = [ - ResOperation(rop.INT_SUB, [ConstInt(21), v5], v11), - ResOperation(rop.INT_MUL_OVF, [v8, v4], v12), - ResOperation(rop.GUARD_NO_OVERFLOW, [], None, descr=faildescr1), - ResOperation(rop.UINT_LT, [v10, v3], v13), - ResOperation(rop.INT_IS_TRUE, [v3], v14), - ResOperation(rop.INT_XOR, [v9, v8], v15), - ResOperation(rop.INT_LE, [v12, v6], v16), - ResOperation(rop.UINT_GT, [v15, v5], v17), - ResOperation(rop.UINT_LE, [ConstInt(-9), v13], v18), - ResOperation(rop.GUARD_FALSE, [v13], None, descr=faildescr2), - ResOperation(rop.FINISH, [v7, v1, v2], None, descr=faildescr3), - ] - operations[2].setfailargs([v10, v6]) - operations[9].setfailargs([v15, v7, v10, v18, v4, v17, v1]) - looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) - args = [16 , 5 , 5 , 16 , 46 , 6 , 63 , 39 , 78 , 0] - op = cpu.execute_token(looptoken, *args) - assert cpu.get_latest_value_int(0) == 105 - assert cpu.get_latest_value_int(1) == 63 - assert cpu.get_latest_value_int(2) == 0 - assert cpu.get_latest_value_int(3) == 0 - assert cpu.get_latest_value_int(4) == 16 - assert cpu.get_latest_value_int(5) == 1 - assert cpu.get_latest_value_int(6) == 16 - - def test_sub_with_neg_const_first_arg(self): - faildescr1 = BasicFailDescr(1) - faildescr2 = BasicFailDescr(2) - faildescr3 = BasicFailDescr(3) - v1 = BoxInt() - v2 = BoxInt() - v3 = BoxInt() - v4 = BoxInt() - v5 = BoxInt() - v6 = BoxInt() - v7 = BoxInt() - v8 = BoxInt() - v9 = BoxInt() - v10 = BoxInt() - v11 = BoxInt() - v12 = BoxInt() - tmp13 = BoxInt() - cpu = CPU(None, None) - cpu.setup_once() - inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] - operations = [ - ResOperation(rop.INT_EQ, [ConstInt(17), v9], v11), - ResOperation(rop.INT_SUB_OVF, [ConstInt(-32), v7], v12), - ResOperation(rop.GUARD_NO_OVERFLOW, [], None, descr=faildescr1), - ResOperation(rop.INT_IS_ZERO, [v12], tmp13), - ResOperation(rop.GUARD_TRUE, [tmp13], None, descr=faildescr2), - ResOperation(rop.FINISH, [v5, v2, v1, v10, v3, v8, v4, v6], None, descr=faildescr3) - ] - operations[2].setfailargs([v8, v3]) - operations[4].setfailargs([v2, v12, v1, v3, v4]) - looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) - args = [-5 , 24 , 46 , -15 , 13 , -8 , 0 , -6 , 6 , 6] - op = cpu.execute_token(looptoken, *args) - assert op.identifier == 2 - assert cpu.get_latest_value_int(0) == 24 - assert cpu.get_latest_value_int(1) == -32 - assert cpu.get_latest_value_int(2) == -5 - assert cpu.get_latest_value_int(3) == 46 - assert cpu.get_latest_value_int(4) == -15 - - def test_tempbox_spilling_in_sub(self): - faildescr1 = BasicFailDescr(1) - faildescr2 = BasicFailDescr(2) - v1 = BoxInt() - v2 = BoxInt() - v3 = BoxInt() - v4 = BoxInt() - v5 = BoxInt() - v6 = BoxInt() - v7 = BoxInt() - v8 = BoxInt() - v9 = BoxInt() - v10 = BoxInt() - v11 = BoxInt() - v12 = BoxInt() - v13 = BoxInt() - v14 = BoxInt() - v15 = BoxInt() - cpu = CPU(None, None) - cpu.setup_once() - inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] - operations = [ - ResOperation(rop.INT_LT, [v9, v9], v11), - ResOperation(rop.INT_ADD, [ConstInt(715827882), v4], v12), - ResOperation(rop.INT_NEG, [v11], v13), - ResOperation(rop.INT_IS_TRUE, [v3], v14), - ResOperation(rop.INT_SUB_OVF, [v3, ConstInt(-95)], v15), - ResOperation(rop.GUARD_NO_OVERFLOW, [], None, descr=faildescr1), - ResOperation(rop.FINISH, [v8, v2, v6, v5, v7, v1, v10], None, descr=faildescr2), - ] - operations[5].setfailargs([]) - looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) - args = [19 , -3 , -58 , -7 , 12 , 22 , -54 , -29 , -19 , -64] - op = cpu.execute_token(looptoken, *args) - assert cpu.get_latest_value_int(0) == -29 - assert cpu.get_latest_value_int(1) == -3 - assert cpu.get_latest_value_int(2) == 22 - assert cpu.get_latest_value_int(3) == 12 - assert cpu.get_latest_value_int(4) == -54 - assert cpu.get_latest_value_int(5) == 19 - assert cpu.get_latest_value_int(6) == -64 - - def test_tempbox2(self): - faildescr1 = BasicFailDescr(1) - faildescr2 = BasicFailDescr(2) - v1 = BoxInt() - v2 = BoxInt() - v3 = BoxInt() - v4 = BoxInt() - v5 = BoxInt() - v6 = BoxInt() - v7 = BoxInt() - v8 = BoxInt() - v9 = BoxInt() - v10 = BoxInt() - v11 = BoxInt() - v12 = BoxInt() - v13 = BoxInt() - v14 = BoxInt() - v15 = BoxInt() - cpu = CPU(None, None) - cpu.setup_once() - inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] - operations = [ - ResOperation(rop.INT_LT, [v5, ConstInt(-67)], v11), - ResOperation(rop.INT_INVERT, [v2], v12), - ResOperation(rop.INT_SUB, [ConstInt(-45), v2], v13), - ResOperation(rop.INT_SUB, [ConstInt(99), v6], v14), - ResOperation(rop.INT_MUL_OVF, [v6, v9], v15), - ResOperation(rop.GUARD_NO_OVERFLOW, [], None, descr=faildescr1), - ResOperation(rop.FINISH, [v1, v4, v10, v8, v7, v3], None, descr=faildescr2), - ] - looptoken = JitCellToken() - operations[5].setfailargs([]) - cpu.compile_loop(inputargs, operations, looptoken) - args = [1073741824 , 95 , -16 , 5 , 92 , 12 , 32 , 17 , 37 , -63] - op = cpu.execute_token(looptoken, *args) - assert cpu.get_latest_value_int(0) == 1073741824 - assert cpu.get_latest_value_int(1) == 5 - assert cpu.get_latest_value_int(2) == -63 - assert cpu.get_latest_value_int(3) == 17 - assert cpu.get_latest_value_int(4) == 32 - assert cpu.get_latest_value_int(5) == -16 - - def test_wrong_guard(self): - # generated by: - # ../test/ test/test_zll_random.py -l -k arm -s --block-length=10 --random-seed=4338 - - faildescr1 = BasicFailDescr(1) - faildescr2 = BasicFailDescr(2) - faildescr3 = BasicFailDescr(3) - faildescr4 = BasicFailDescr(4) - v1 = BoxInt(32) - v2 = BoxInt(41) - v3 = BoxInt(-9) - v4 = BoxInt(12) - v5 = BoxInt(-18) - v6 = BoxInt(46) - v7 = BoxInt(15) - v8 = BoxInt(17) - v9 = BoxInt(10) - v10 = BoxInt(12) - v11 = BoxInt() - v12 = BoxInt() - v13 = BoxInt() - v14 = BoxInt() - tmp15 = BoxInt() - tmp16 = BoxInt() - tmp17 = BoxInt() - cpu = CPU(None, None) - cpu.setup_once() - inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] - operations = [ - ResOperation(rop.INT_IS_TRUE, [v1], tmp15), - ResOperation(rop.GUARD_TRUE, [tmp15], None, descr=faildescr1), - ResOperation(rop.INT_GT, [v4, v5], v11), - ResOperation(rop.INT_XOR, [ConstInt(-4), v7], v12), - ResOperation(rop.INT_MUL, [ConstInt(23), v11], v13), - ResOperation(rop.UINT_GE, [ConstInt(1), v13], v14), - ResOperation(rop.INT_IS_ZERO, [v14], tmp16), - ResOperation(rop.GUARD_TRUE, [tmp16], None, descr=faildescr2), - ResOperation(rop.INT_IS_TRUE, [v12], tmp17), - ResOperation(rop.GUARD_FALSE, [tmp17], None, descr=faildescr3), - ResOperation(rop.FINISH, [v8, v10, v6, v3, v2, v9], None, descr=faildescr4), - ] - looptoken = JitCellToken() - operations[1].setfailargs([v8, v6, v1]) - operations[7].setfailargs([v4]) - operations[9].setfailargs([v10, v13]) - args = [32 , 41 , -9 , 12 , -18 , 46 , 15 , 17 , 10 , 12] - cpu.compile_loop(inputargs, operations, looptoken) - op = cpu.execute_token(looptoken, *args) - assert op.identifier == 3 - assert cpu.get_latest_value_int(0) == 12 - assert cpu.get_latest_value_int(1) == 23 - - def test_wrong_guard2(self): - # random seed: 8029 - # block length: 10 - faildescr1 = BasicFailDescr(1) - faildescr2 = BasicFailDescr(2) - faildescr3 = BasicFailDescr(3) - v1 = BoxInt() - v2 = BoxInt() - v3 = BoxInt() - v4 = BoxInt() - v5 = BoxInt() - v6 = BoxInt() - v7 = BoxInt() - v8 = BoxInt() - v9 = BoxInt() - v10 = BoxInt() - v11 = BoxInt() - v12 = BoxInt() - v13 = BoxInt() - v14 = BoxInt() - v15 = BoxInt() - v16 = BoxInt() - tmp17 = BoxInt() - cpu = CPU(None, None) - cpu.setup_once() - inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] - operations = [ - ResOperation(rop.INT_ADD_OVF, [v8, ConstInt(-30)], v11), - ResOperation(rop.GUARD_NO_OVERFLOW, [], None, descr=faildescr1), - ResOperation(rop.UINT_LE, [v11, v1], v12), - ResOperation(rop.INT_AND, [v11, ConstInt(31)], tmp17), - ResOperation(rop.UINT_RSHIFT, [v12, tmp17], v13), - ResOperation(rop.INT_NE, [v3, v2], v14), - ResOperation(rop.INT_NE, [ConstInt(1), v11], v15), - ResOperation(rop.INT_NE, [ConstInt(23), v15], v16), - ResOperation(rop.GUARD_FALSE, [v15], None, descr=faildescr2), - ResOperation(rop.FINISH, [v4, v10, v6, v5, v9, v7], None, descr=faildescr3), - ] - operations[1].setfailargs([v6, v8, v1, v4]) - operations[8].setfailargs([v5, v9]) - looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) - args = [-8 , 0 , 62 , 35 , 16 , 9 , 30 , 581610154 , -1 , 738197503] - op = cpu.execute_token(looptoken, *args) - assert op.identifier == 2 - assert cpu.get_latest_value_int(0) == 16 - assert cpu.get_latest_value_int(1) == -1 - - def test_wrong_guard3(self): - # random seed: 8029 - # block length: 10 - faildescr1 = BasicFailDescr(1) - faildescr2 = BasicFailDescr(2) - faildescr3 = BasicFailDescr(3) - faildescr4 = BasicFailDescr(4) - v1 = BoxInt() - v2 = BoxInt() - v3 = BoxInt() - v4 = BoxInt() - v5 = BoxInt() - v6 = BoxInt() - v7 = BoxInt() - v8 = BoxInt() - v9 = BoxInt() - v10 = BoxInt() - v11 = BoxInt() - v12 = BoxInt() - v13 = BoxInt() - v14 = BoxInt() - v15 = BoxInt() - v16 = BoxInt() - cpu = CPU(None, None) - cpu.setup_once() - inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] - operations = [ - ResOperation(rop.UINT_LT, [ConstInt(-11), v7], v11), - ResOperation(rop.INT_GE, [v3, v5], v12), - ResOperation(rop.INT_INVERT, [v9], v13), - ResOperation(rop.GUARD_VALUE, [v13, ConstInt(14)], None, descr=faildescr3), - ResOperation(rop.INT_IS_ZERO, [v12], v14), - ResOperation(rop.INT_SUB, [v2, v13], v15), - ResOperation(rop.GUARD_VALUE, [v15, ConstInt(-32)], None, descr=faildescr4), - ResOperation(rop.INT_FLOORDIV, [v3, ConstInt(805306366)], v16), - ResOperation(rop.GUARD_VALUE, [v15, ConstInt(0)], None, descr=faildescr1), - ResOperation(rop.FINISH, [v10, v8, v1, v6, v4], None, descr=faildescr2), - ] - operations[3].setfailargs([]) - operations[-4].setfailargs([v15]) - operations[-2].setfailargs([v9, v4, v10, v11, v14]) - looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) - args = [-39 , -18 , 1588243114 , -9 , -4 , 1252698794 , 0 , 715827882 , -15 , 536870912] - op = cpu.execute_token(looptoken, *args) - assert op.identifier == 1 - assert cpu.get_latest_value_int(0) == -15 - assert cpu.get_latest_value_int(1) == -9 - assert cpu.get_latest_value_int(2) == 536870912 - assert cpu.get_latest_value_int(3) == 0 - assert cpu.get_latest_value_int(4) == 0 - - def test_wrong_result(self): - # generated by: - # ../test/ test/test_zll_random.py -l -k arm -s --block-length=10 --random-seed=7389 - faildescr1 = BasicFailDescr(1) - faildescr2 = BasicFailDescr(2) - faildescr3 = BasicFailDescr(3) - faildescr4 = BasicFailDescr(4) - v1 = BoxInt() - v2 = BoxInt() - v3 = BoxInt() - v4 = BoxInt() - v5 = BoxInt() - v6 = BoxInt() - v7 = BoxInt() - v8 = BoxInt() - v9 = BoxInt() - v10 = BoxInt() - v11 = BoxInt() - v12 = BoxInt() - v13 = BoxInt() - v14 = BoxInt() - v15 = BoxInt() - tmp16 = BoxInt() - tmp17 = BoxInt() - cpu = CPU(None, None) - cpu.setup_once() - inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] - operations = [ - ResOperation(rop.INT_IS_TRUE, [v3], tmp16), - ResOperation(rop.GUARD_TRUE, [tmp16], None, descr=faildescr1), - ResOperation(rop.INT_AND, [v7, ConstInt(31)], tmp17), - ResOperation(rop.INT_RSHIFT, [v5, tmp17], v11), - ResOperation(rop.INT_OR, [v6, v8], v12), - ResOperation(rop.GUARD_VALUE, [v11, ConstInt(-2)], None, descr=faildescr2), - ResOperation(rop.INT_LE, [ConstInt(1789569706), v10], v13), - ResOperation(rop.INT_IS_TRUE, [v4], v14), - ResOperation(rop.INT_XOR, [v14, v3], v15), - ResOperation(rop.GUARD_VALUE, [v8, ConstInt(-8)], None, descr=faildescr3), - ResOperation(rop.FINISH, [v1, v2, v9], None, descr=faildescr4), - ] - operations[1].setfailargs([v9, v1]) - operations[5].setfailargs([v10, v2, v11, v3]) - operations[9].setfailargs([v5, v7, v12, v14, v2, v13, v8]) - looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) - args = [0 , -2 , 24 , 1 , -4 , 13 , -95 , 33 , 2 , -44] - op = cpu.execute_token(looptoken, *args) - assert op.identifier == 3 - assert cpu.get_latest_value_int(0) == -4 - assert cpu.get_latest_value_int(1) == -95 - assert cpu.get_latest_value_int(2) == 45 - assert cpu.get_latest_value_int(3) == 1 - assert cpu.get_latest_value_int(4) == -2 - assert cpu.get_latest_value_int(5) == 0 - assert cpu.get_latest_value_int(6) == 33 - - def test_int_add(self): - # random seed: 1202 - # block length: 4 - # AssertionError: Got 1431655764, expected 357913940 for value #3 - faildescr1 = BasicFailDescr(1) - faildescr2 = BasicFailDescr(2) - v1 = BoxInt() - v2 = BoxInt() - v3 = BoxInt() - v4 = BoxInt() - v5 = BoxInt() - v6 = BoxInt() - v7 = BoxInt() - v8 = BoxInt() - v9 = BoxInt() - v10 = BoxInt() - v11 = BoxInt() - tmp12 = BoxInt() - cpu = CPU(None, None) - cpu.setup_once() - inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] - operations = [ - ResOperation(rop.INT_ADD, [ConstInt(-1073741825), v3], v11), - ResOperation(rop.INT_IS_TRUE, [v1], tmp12), - ResOperation(rop.GUARD_FALSE, [tmp12], None, descr=faildescr1), - ResOperation(rop.FINISH, [v8, v2, v10, v6, v7, v9, v5, v4], None, descr=faildescr2), - ] - operations[2].setfailargs([v10, v3, v6, v11, v9, v2]) - looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) - args = [3 , -5 , 1431655765 , 47 , 12 , 1789569706 , 15 , 939524096 , 16 , -43] - op = cpu.execute_token(looptoken, *args) - assert op.identifier == 1 - assert cpu.get_latest_value_int(0) == -43 - assert cpu.get_latest_value_int(1) == 1431655765 - assert cpu.get_latest_value_int(2) == 1789569706 - assert cpu.get_latest_value_int(3) == 357913940 - assert cpu.get_latest_value_int(4) == 16 - assert cpu.get_latest_value_int(5) == -5 - - def test_wrong_result2(self): - # block length 10 - # random seed 1 - f1 = BasicFailDescr(1) - f2 = BasicFailDescr(2) - f3 = BasicFailDescr(3) - v1 = BoxInt() - v2 = BoxInt() - v3 = BoxInt() - v4 = BoxInt() - v5 = BoxInt() - v6 = BoxInt() - v7 = BoxInt() - v8 = BoxInt() - v9 = BoxInt() - v10 = BoxInt() - v11 = BoxInt() - v12 = BoxInt() - v13 = BoxInt() - v14 = BoxInt() - v15 = BoxInt() - cpu = CPU(None, None) - cpu.setup_once() - inputargs = [v1, v2, v3, v4, v5, v6, v7, v8, v9, v10] - operations = [ - ResOperation(rop.INT_LE, [v6, v1], v11), - ResOperation(rop.SAME_AS, [ConstInt(-14)], v12), - ResOperation(rop.INT_ADD, [ConstInt(24), v4], v13), - ResOperation(rop.UINT_RSHIFT, [v6, ConstInt(0)], v14), - ResOperation(rop.GUARD_VALUE, [v14, ConstInt(1)], None, descr=f3), - ResOperation(rop.INT_MUL, [v13, ConstInt(12)], v15), - ResOperation(rop.GUARD_FALSE, [v11], None, descr=f1), - ResOperation(rop.FINISH, [v2, v3, v5, v7, v10, v8, v9], None, descr=f2), - ] - operations[-2].setfailargs([v4, v10, v3, v9, v14, v2]) - operations[4].setfailargs([v14]) - looptoken = JitCellToken() - cpu.compile_loop(inputargs, operations, looptoken) - args = [14 , -20 , 18 , -2058005163 , 6 , 1 , -16 , 11 , 0 , 19] - op = cpu.execute_token(looptoken, *args) - assert op.identifier == 1 - assert cpu.get_latest_value_int(0) == -2058005163 - assert cpu.get_latest_value_int(1) == 19 - assert cpu.get_latest_value_int(2) == 18 - assert cpu.get_latest_value_int(3) == 0 - assert cpu.get_latest_value_int(4) == 1 - assert cpu.get_latest_value_int(5) == -20 diff --git a/rpython/jit/backend/ppc/test/test_ppc.py b/rpython/jit/backend/ppc/test/test_ppc.py --- a/rpython/jit/backend/ppc/test/test_ppc.py +++ b/rpython/jit/backend/ppc/test/test_ppc.py @@ -6,7 +6,8 @@ from rpython.jit.backend.ppc.register import * from rpython.jit.backend.ppc import form from rpython.jit.backend import detect_cpu -from rpython.jit.backend.ppc.arch import IS_PPC_32, IS_PPC_64, WORD +from rpython.jit.backend.ppc.arch import IS_PPC_32, IS_PPC_64, IS_BIG_ENDIAN +from rpython.jit.backend.ppc.arch import WORD from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.annlowlevel import llhelper @@ -15,7 +16,8 @@ class TestDisassemble(object): def test_match(self): - A = BasicPPCAssembler + class A(BasicPPCAssembler): + insts = [] a = A() a.add(1, 2, 3) inst = a.insts[-1] @@ -29,12 +31,11 @@ - Create a function and call it - Compare the return value with the expected result """ -def asmtest(expected=-1): +def asmtest(expected): def testmaker(test): def newtest(self): a = PPCBuilder() test(self, a) - #f = a.assemble() f = a.get_assembler_function() assert f() == expected return newtest @@ -196,10 +197,16 @@ a.li(3, 50) if IS_PPC_32: a.load_imm(r10, call_addr) - else: + elif IS_BIG_ENDIAN: + # load the 3-words descriptor a.load_from_addr(r10, call_addr) a.load_from_addr(r2, call_addr+WORD) a.load_from_addr(r11, call_addr+2*WORD) + else: + # no descriptor on little-endian, but the ABI says r12 must + # contain the function pointer + a.load_imm(r10, call_addr) + a.mr(12, 10) a.mtctr(10) a.bctr() a.blr() @@ -306,21 +313,6 @@ lltype.free(p, flavor="raw") -class AsmCode(object): - def __init__(self, size): - self.code = MachineCodeBlockWrapper() - - def emit(self, insn): - bytes = struct.pack("i", insn) - for byte in bytes: - self.code.writechar(byte) - - def get_function(self): - i = self.code.materialize(AsmMemoryManager(), []) - t = lltype.FuncType([], lltype.Signed) - return rffi.cast(lltype.Ptr(t), i) - - def func(arg): return arg + 15 diff --git a/rpython/jit/backend/ppc/test/test_rassemblermaker.py b/rpython/jit/backend/ppc/test/test_rassemblermaker.py deleted file mode 100644 --- a/rpython/jit/backend/ppc/test/test_rassemblermaker.py +++ /dev/null @@ -1,39 +0,0 @@ -from rpython.jit.backend.ppc.rassemblermaker import make_rassembler -from rpython.jit.backend.ppc.codebuilder import PPCAssembler - -RPPCAssembler = make_rassembler(PPCAssembler) - -_a = PPCAssembler() -_a.add(3, 3, 4) -add_r3_r3_r4 = _a.insts[0] - -def test_simple(): - ra = RPPCAssembler() - ra.add(3, 3, 4) - assert ra.insts == [add_r3_r3_r4] - -def test_rtyped(): - from rpython.rtyper.test.test_llinterp import interpret - def f(): - ra = RPPCAssembler() - ra.add(3, 3, 4) - ra.lwz(1, 1, 1) # ensure that high bit doesn't produce long but r_uint - return ra.insts[0] - res = interpret(f, []) - assert res == add_r3_r3_r4 - -def test_mnemonic(): - mrs = [] - for A in PPCAssembler, RPPCAssembler: - a = A() - a.mr(3, 4) - mrs.append(a.insts[0]) - assert mrs[0] == mrs[1] - -def test_spr_coding(): - mrs = [] - for A in PPCAssembler, RPPCAssembler: - a = A() - a.mtctr(3) - mrs.append(a.insts[0]) - assert mrs[0] == mrs[1] diff --git a/rpython/jit/backend/ppc/test/test_regalloc.py b/rpython/jit/backend/ppc/test/test_regalloc.py --- a/rpython/jit/backend/ppc/test/test_regalloc.py +++ b/rpython/jit/backend/ppc/test/test_regalloc.py @@ -1,5 +1,6 @@ from rpython.rtyper.lltypesystem import lltype, llmemory -from rpython.rtyper.lltypesystem import rclass, rstr +from rpython.rtyper.lltypesystem import rstr +from rpython.rtyper import rclass from rpython.rtyper.annlowlevel import llhelper from rpython.rlib.objectmodel import instantiate from rpython.jit.backend.ppc.locations import (imm, RegisterLocation, @@ -8,8 +9,6 @@ from rpython.jit.backend.ppc.codebuilder import hi, lo from rpython.jit.backend.ppc.ppc_assembler import AssemblerPPC from rpython.jit.backend.ppc.arch import WORD -from rpython.jit.backend.ppc.locations import get_spp_offset -from rpython.jit.backend.detect_cpu import getcpuclass from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.codewriter import longlong from rpython.jit.metainterp.history import BasicFailDescr, \ @@ -118,8 +117,8 @@ def test_mem_to_reg(self): self.asm.regalloc_mov(stack(5), reg(10)) self.asm.regalloc_mov(stack(0), reg(0)) - exp_instrs = [MI("load", r10.value, SPP.value, -(5 * WORD + WORD)), - MI("load", r0.value, SPP.value, -(WORD))] + exp_instrs = [MI("load", r10.value, SPP.value, get_spp_offset(5)), + MI("load", r0.value, SPP.value, get_spp_offset(0))] assert self.asm.mc.instrs == exp_instrs def test_mem_to_mem(self): @@ -141,143 +140,15 @@ def test_reg_to_mem(self): self.asm.regalloc_mov(reg(5), stack(10)) self.asm.regalloc_mov(reg(0), stack(2)) - exp_instrs = [MI("store", r5.value, SPP.value, -(10 * WORD + WORD)), - MI("store", r0.value, SPP.value, -(2 * WORD + WORD))] + exp_instrs = [MI("store", r5.value, SPP.value, get_spp_offset(10)), + MI("store", r0.value, SPP.value, get_spp_offset(2))] assert self.asm.mc.instrs == exp_instrs def reg(i): return RegisterLocation(i) def stack(i): - return StackLocation(i) + return StackLocation(i, get_spp_offset(i)) -CPU = getcpuclass() From noreply at buildbot.pypy.org Fri Oct 2 10:52:01 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Oct 2015 10:52:01 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: hg merge default Message-ID: <20151002085201.19DA51C0F47@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: ppc-updated-backend Changeset: r79927:add2cdb7a761 Date: 2015-09-21 14:14 +0200 http://bitbucket.org/pypy/pypy/changeset/add2cdb7a761/ Log: hg merge default diff too long, truncating to 2000 out of 50549 lines diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -303,7 +303,7 @@ RegrTest('test_memoryio.py'), RegrTest('test_memoryview.py'), RegrTest('test_md5.py'), - RegrTest('test_mhlib.py'), + RegrTest('test_mhlib.py', usemodules='binascii struct'), RegrTest('test_mimetools.py'), RegrTest('test_mimetypes.py'), RegrTest('test_MimeWriter.py', core=False, usemodules='binascii'), diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -1026,16 +1026,22 @@ def tigetflag(capname): _ensure_initialised_setupterm() + if isinstance(capname, unicode): + capname = capname.encode('ascii') return lib.tigetflag(capname) def tigetnum(capname): _ensure_initialised_setupterm() + if isinstance(capname, unicode): + capname = capname.encode('ascii') return lib.tigetnum(capname) def tigetstr(capname): _ensure_initialised_setupterm() + if isinstance(capname, unicode): + capname = capname.encode('ascii') val = lib.tigetstr(capname) if int(ffi.cast("intptr_t", val)) in (0, -1): return None diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -92,6 +92,8 @@ if sys.platform == "win32": module_suggests["cpyext"].append(("translation.shared", True)) + +# NOTE: this dictionary is not used any more module_import_dependencies = { # no _rawffi if importing rpython.rlib.clibffi raises ImportError # or CompilationError or py.test.skip.Exception @@ -108,6 +110,7 @@ } def get_module_validator(modname): + # NOTE: this function is not used any more if modname in module_import_dependencies: modlist = module_import_dependencies[modname] def validator(config): diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -20,10 +20,6 @@ It initializes the RPython/PyPy GC and does a bunch of necessary startup code. This function cannot fail. -.. function:: void pypy_init_threads(void); - - Initialize threads. Only need to be called if there are any threads involved - .. function:: int pypy_setup_home(char* home, int verbose); This function searches the PyPy standard library starting from the given @@ -38,6 +34,11 @@ Function returns 0 on success or -1 on failure, can be called multiple times until the library is found. +.. function:: void pypy_init_threads(void); + + Initialize threads. Only need to be called if there are any threads involved. + *Must be called after pypy_setup_home()* + .. function:: int pypy_execute_source(char* source); Execute the Python source code given in the ``source`` argument. In case of diff --git a/pypy/doc/jit-hooks.rst b/pypy/doc/jit-hooks.rst --- a/pypy/doc/jit-hooks.rst +++ b/pypy/doc/jit-hooks.rst @@ -5,19 +5,8 @@ understanding what's pypy's JIT doing while running your program. There are three functions related to that coming from the ``pypyjit`` module: -.. function:: set_optimize_hook(callable) - Set a compiling hook that will be called each time a loop is optimized, - but before assembler compilation. This allows adding additional - optimizations on Python level. - - The callable will be called with the ``pypyjit.JitLoopInfo`` object. - Refer to it's documentation for details. - - Result value will be the resulting list of operations, or None - - -.. function:: set_compile_hook(callable) +.. function:: set_compile_hook(callable, operations=True) Set a compiling hook that will be called each time a loop is compiled. @@ -28,6 +17,9 @@ inside the jit hook is itself jitted, it will get compiled, but the jit hook won't be called for that. + if operations=False, no list of operations will be available. Useful + if the hook is supposed to be very lighweight. + .. function:: set_abort_hook(hook) Set a hook (callable) that will be called each time there is tracing @@ -66,3 +58,25 @@ * ``loop_run_times`` - counters for number of times loops are run, only works when ``enable_debug`` is called. + +.. class:: JitLoopInfo + + A class containing information about the compiled loop. Usable attributes: + + * ``operations`` - list of operations, if requested + + * ``jitdriver_name`` - the name of jitdriver associated with this loop + + * ``greenkey`` - a key at which the loop got compiled (e.g. code position, + is_being_profiled, pycode tuple for python jitdriver) + + * ``loop_no`` - loop cardinal number + + * ``bridge_no`` - id of the fail descr + + * ``type`` - "entry bridge", "loop" or "bridge" + + * ``asmaddr`` - an address in raw memory where assembler resides + + * ``asmlen`` - length of raw memory with assembler associated + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,3 +5,31 @@ .. this is a revision shortly after release-2.6.1 .. startrev: 07769be4057b +.. branch: keys_with_hash +Improve the performance of dict.update() and a bunch of methods from +sets, by reusing the hash value stored in one dict when inspecting +or changing another dict with that key. + +.. branch: optresult-unroll +A major refactoring of the ResOperations that kills Box. Also rewrote +unrolling to enable future enhancements. Should improve warmup time +by 20% or so. + +.. branch: optimize-cond-call +Optimize common sequences of operations like +``int_lt/cond_call`` in the JIT backends + +.. branch: missing_openssl_include +Fix for missing headers in OpenBSD, already applied in downstream ports + +.. branch: gc-more-incremental +Remove a source of non-incremental-ness in the GC: now +external_malloc() no longer runs gc_step_until() any more. If there +is a currently-running major collection, we do only so many steps +before returning. This number of steps depends on the size of the +allocated object. It is controlled by tracking the general progress +of these major collection steps and the size of old objects that +keep adding up between them. + +.. branch: remember-tracing-counts +Reenable jithooks diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -111,7 +111,6 @@ 'interp_magic.mapdict_cache_counter') PYC_MAGIC = get_pyc_magic(self.space) self.extra_interpdef('PYC_MAGIC', 'space.wrap(%d)' % PYC_MAGIC) - # try: from rpython.jit.backend import detect_cpu model = detect_cpu.autodetect() @@ -121,7 +120,7 @@ raise else: pass # ok fine to ignore in this case - # + if self.space.config.translation.jit: features = detect_cpu.getcpufeatures(model) self.extra_interpdef('jit_backend_features', diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -19,13 +19,27 @@ # ____________________________________________________________ +class Closure(object): + """This small class is here to have a __del__ outside any cycle.""" + + ll_error = lltype.nullptr(rffi.CCHARP.TO) # set manually + + def __init__(self, ptr): + self.ptr = ptr + + def __del__(self): + clibffi.closureHeap.free(rffi.cast(clibffi.FFI_CLOSUREP, self.ptr)) + if self.ll_error: + lltype.free(self.ll_error, flavor='raw') + + class W_CDataCallback(W_CData): #_immutable_fields_ = ... - ll_error = lltype.nullptr(rffi.CCHARP.TO) w_onerror = None def __init__(self, space, ctype, w_callable, w_error, w_onerror): raw_closure = rffi.cast(rffi.CCHARP, clibffi.closureHeap.alloc()) + self._closure = Closure(raw_closure) W_CData.__init__(self, space, raw_closure, ctype) # if not space.is_true(space.callable(w_callable)): @@ -44,10 +58,11 @@ if size > 0: if fresult.is_primitive_integer and size < SIZE_OF_FFI_ARG: size = SIZE_OF_FFI_ARG - self.ll_error = lltype.malloc(rffi.CCHARP.TO, size, flavor='raw', - zero=True) + self._closure.ll_error = lltype.malloc(rffi.CCHARP.TO, size, + flavor='raw', zero=True) if not space.is_none(w_error): - convert_from_object_fficallback(fresult, self.ll_error, w_error) + convert_from_object_fficallback(fresult, self._closure.ll_error, + w_error) # self.unique_id = compute_unique_id(self) global_callback_mapping.set(self.unique_id, self) @@ -74,12 +89,6 @@ from pypy.module.thread.os_thread import setup_threads setup_threads(space) - #@rgc.must_be_light_finalizer - def __del__(self): - clibffi.closureHeap.free(rffi.cast(clibffi.FFI_CLOSUREP, self._ptr)) - if self.ll_error: - lltype.free(self.ll_error, flavor='raw') - def _repr_extra(self): space = self.space return 'calling ' + space.str_w(space.repr(self.w_callable)) @@ -114,8 +123,8 @@ def write_error_return_value(self, ll_res): fresult = self.getfunctype().ctitem if fresult.size > 0: - misc._raw_memcopy(self.ll_error, ll_res, fresult.size) - keepalive_until_here(self) # to keep self.ll_error alive + misc._raw_memcopy(self._closure.ll_error, ll_res, fresult.size) + keepalive_until_here(self) # to keep self._closure.ll_error alive global_callback_mapping = rweakref.RWeakValueDictionary(int, W_CDataCallback) diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -2,6 +2,7 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from rpython.rlib import jit class W_Count(W_Root): @@ -322,6 +323,11 @@ """) +islice_ignore_items_driver = jit.JitDriver(name='islice_ignore_items', + greens=['tp'], + reds=['num', 'w_islice', + 'w_iterator']) + class W_ISlice(W_Root): def __init__(self, space, w_iterable, w_startstop, args_w): self.iterable = space.iter(w_iterable) @@ -407,11 +413,18 @@ raise def _ignore_items(self, num): - if self.iterable is None: + w_iterator = self.iterable + if w_iterator is None: raise OperationError(self.space.w_StopIteration, self.space.w_None) + + tp = self.space.type(w_iterator) while True: + islice_ignore_items_driver.jit_merge_point(tp=tp, + num=num, + w_islice=self, + w_iterator=w_iterator) try: - self.space.next(self.iterable) + self.space.next(w_iterator) except OperationError as e: if e.match(self.space, self.space.w_StopIteration): self.iterable = None diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -1085,3 +1085,18 @@ assert list(itertools.islice(c2, 3)) == expected c3 = pickle.loads(pickle.dumps(c)) assert list(itertools.islice(c3, 3)) == expected + + def test_islice_attack(self): + import itertools + class Iterator(object): + first = True + def __iter__(self): + return self + def next(self): + if self.first: + self.first = False + list(islice) + return 52 + myiter = Iterator() + islice = itertools.islice(myiter, 5, 8) + raises(StopIteration, islice.next) diff --git a/pypy/module/pypyjit/__init__.py b/pypy/module/pypyjit/__init__.py --- a/pypy/module/pypyjit/__init__.py +++ b/pypy/module/pypyjit/__init__.py @@ -8,8 +8,11 @@ 'set_param': 'interp_jit.set_param', 'residual_call': 'interp_jit.residual_call', 'not_from_assembler': 'interp_jit.W_NotFromAssembler', + 'get_jitcell_at_key': 'interp_jit.get_jitcell_at_key', + 'dont_trace_here': 'interp_jit.dont_trace_here', + 'trace_next_iteration': 'interp_jit.trace_next_iteration', + 'trace_next_iteration_hash': 'interp_jit.trace_next_iteration_hash', 'set_compile_hook': 'interp_resop.set_compile_hook', - 'set_optimize_hook': 'interp_resop.set_optimize_hook', 'set_abort_hook': 'interp_resop.set_abort_hook', 'get_stats_snapshot': 'interp_resop.get_stats_snapshot', 'enable_debug': 'interp_resop.enable_debug', @@ -17,7 +20,6 @@ 'ResOperation': 'interp_resop.WrappedOp', 'DebugMergePoint': 'interp_resop.DebugMergePoint', 'JitLoopInfo': 'interp_resop.W_JitLoopInfo', - 'Box': 'interp_resop.WrappedBox', 'PARAMETER_DOCS': 'space.wrap(rpython.rlib.jit.PARAMETER_DOCS)', } diff --git a/pypy/module/pypyjit/hooks.py b/pypy/module/pypyjit/hooks.py --- a/pypy/module/pypyjit/hooks.py +++ b/pypy/module/pypyjit/hooks.py @@ -35,10 +35,10 @@ self._compile_hook(debug_info, is_bridge=True) def before_compile(self, debug_info): - self._optimize_hook(debug_info, is_bridge=False) + pass def before_compile_bridge(self, debug_info): - self._optimize_hook(debug_info, is_bridge=True) + pass def _compile_hook(self, debug_info, is_bridge): space = self.space @@ -46,7 +46,8 @@ if cache.in_recursion: return if space.is_true(cache.w_compile_hook): - w_debug_info = W_JitLoopInfo(space, debug_info, is_bridge) + w_debug_info = W_JitLoopInfo(space, debug_info, is_bridge, + cache.compile_hook_with_ops) cache.in_recursion = True try: try: @@ -57,33 +58,4 @@ finally: cache.in_recursion = False - def _optimize_hook(self, debug_info, is_bridge=False): - space = self.space - cache = space.fromcache(Cache) - if cache.in_recursion: - return - if space.is_true(cache.w_optimize_hook): - w_debug_info = W_JitLoopInfo(space, debug_info, is_bridge) - cache.in_recursion = True - try: - try: - w_res = space.call_function(cache.w_optimize_hook, - space.wrap(w_debug_info)) - if space.is_w(w_res, space.w_None): - return - l = [] - for w_item in space.listview(w_res): - item = space.interp_w(WrappedOp, w_item) - l.append(jit_hooks._cast_to_resop(item.op)) - del debug_info.operations[:] # modifying operations above is - # probably not a great idea since types may not work - # and we'll end up with half-working list and - # a segfault/fatal RPython error - for elem in l: - debug_info.operations.append(elem) - except OperationError, e: - e.write_unraisable(space, "jit hook ", cache.w_compile_hook) - finally: - cache.in_recursion = False - pypy_hooks = PyPyJitIface() diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -5,11 +5,14 @@ from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rlib.jit import JitDriver, hint, we_are_jitted, dont_look_inside -from rpython.rlib import jit -from rpython.rlib.jit import current_trace_length, unroll_parameters +from rpython.rlib import jit, jit_hooks +from rpython.rlib.jit import current_trace_length, unroll_parameters,\ + JitHookInterface +from rpython.rtyper.annlowlevel import cast_instance_to_gcref import pypy.interpreter.pyopcode # for side-effects from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.pycode import CO_GENERATOR, PyCode +from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pyopcode import ExitFrame, Yield from pypy.interpreter.baseobjspace import W_Root @@ -188,3 +191,100 @@ __call__ = interp2app(W_NotFromAssembler.descr_call), ) W_NotFromAssembler.typedef.acceptable_as_base_class = False + + at unwrap_spec(next_instr=int, is_being_profiled=bool, w_pycode=PyCode) + at dont_look_inside +def get_jitcell_at_key(space, next_instr, is_being_profiled, w_pycode): + ll_pycode = cast_instance_to_gcref(w_pycode) + return space.wrap(bool(jit_hooks.get_jitcell_at_key( + 'pypyjit', r_uint(next_instr), int(is_being_profiled), ll_pycode))) + + at unwrap_spec(next_instr=int, is_being_profiled=bool, w_pycode=PyCode) + at dont_look_inside +def dont_trace_here(space, next_instr, is_being_profiled, w_pycode): + ll_pycode = cast_instance_to_gcref(w_pycode) + jit_hooks.dont_trace_here( + 'pypyjit', r_uint(next_instr), int(is_being_profiled), ll_pycode) + return space.w_None + + at unwrap_spec(next_instr=int, is_being_profiled=bool, w_pycode=PyCode) + at dont_look_inside +def trace_next_iteration(space, next_instr, is_being_profiled, w_pycode): + ll_pycode = cast_instance_to_gcref(w_pycode) + jit_hooks.trace_next_iteration( + 'pypyjit', r_uint(next_instr), int(is_being_profiled), ll_pycode) + return space.w_None + + at unwrap_spec(hash=r_uint) + at dont_look_inside +def trace_next_iteration_hash(space, hash): + jit_hooks.trace_next_iteration_hash('pypyjit', hash) + return space.w_None + +# class Cache(object): +# in_recursion = False + +# def __init__(self, space): +# self.w_compile_bridge = space.w_None +# self.w_compile_loop = space.w_None + +# def set_compile_bridge(space, w_hook): +# cache = space.fromcache(Cache) +# assert w_hook is not None +# cache.w_compile_bridge = w_hook + +# def set_compile_loop(space, w_hook): +# from rpython.rlib.nonconst import NonConstant + +# cache = space.fromcache(Cache) +# assert w_hook is not None +# cache.w_compile_loop = w_hook +# cache.in_recursion = NonConstant(False) + +# class PyPyJitHookInterface(JitHookInterface): +# def after_compile(self, debug_info): +# space = self.space +# cache = space.fromcache(Cache) +# if cache.in_recursion: +# return +# l_w = [] +# if not space.is_true(cache.w_compile_loop): +# return +# for i, op in enumerate(debug_info.operations): +# if op.is_guard(): +# w_t = space.newtuple([space.wrap(i), space.wrap(op.getopnum()), space.wrap(op.getdescr().get_jitcounter_hash())]) +# l_w.append(w_t) +# try: +# cache.in_recursion = True +# try: +# space.call_function(cache.w_compile_loop, space.newlist(l_w)) +# except OperationError, e: +# e.write_unraisable(space, "jit hook ", cache.w_compile_bridge) +# finally: +# cache.in_recursion = False + +# def after_compile_bridge(self, debug_info): +# space = self.space +# cache = space.fromcache(Cache) +# if cache.in_recursion: +# return +# if not space.is_true(cache.w_compile_bridge): +# return +# w_hash = space.wrap(debug_info.fail_descr.get_jitcounter_hash()) +# try: +# cache.in_recursion = True +# try: +# space.call_function(cache.w_compile_bridge, w_hash) +# except OperationError, e: +# e.write_unraisable(space, "jit hook ", cache.w_compile_bridge) +# finally: +# cache.in_recursion = False + +# def before_compile(self, debug_info): +# pass + +# def before_compile_bridge(self, debug_info): +# pass + +# pypy_hooks = PyPyJitHookInterface() + diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -8,7 +8,7 @@ from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance, hlstr from rpython.rtyper.rclass import OBJECT -from rpython.jit.metainterp.resoperation import rop +#from rpython.jit.metainterp.resoperation import rop from rpython.rlib.nonconst import NonConstant from rpython.rlib import jit_hooks from rpython.rlib.jit import Counters @@ -22,7 +22,6 @@ def __init__(self, space): self.w_compile_hook = space.w_None self.w_abort_hook = space.w_None - self.w_optimize_hook = space.w_None def getno(self): self.no += 1 @@ -43,8 +42,9 @@ else: return space.wrap(greenkey_repr) -def set_compile_hook(space, w_hook): - """ set_compile_hook(hook) + at unwrap_spec(operations=bool) +def set_compile_hook(space, w_hook, operations=True): + """ set_compile_hook(hook, operations=True) Set a compiling hook that will be called each time a loop is compiled. @@ -58,25 +58,9 @@ cache = space.fromcache(Cache) assert w_hook is not None cache.w_compile_hook = w_hook + cache.compile_hook_with_ops = operations cache.in_recursion = NonConstant(False) -def set_optimize_hook(space, w_hook): - """ set_optimize_hook(hook) - - Set a compiling hook that will be called each time a loop is optimized, - but before assembler compilation. This allows adding additional - optimizations on Python level. - - The hook will be called with the pypyjit.JitLoopInfo object. Refer to it's - docstring for details. - - Result value will be the resulting list of operations, or None - """ - cache = space.fromcache(Cache) - cache.w_optimize_hook = w_hook - cache.in_recursion = NonConstant(False) - - def set_abort_hook(space, w_hook): """ set_abort_hook(hook) @@ -96,6 +80,9 @@ cache.in_recursion = NonConstant(False) def wrap_oplist(space, logops, operations, ops_offset=None): + # this function is called from the JIT + from rpython.jit.metainterp.resoperation import rop + l_w = [] jitdrivers_sd = logops.metainterp_sd.jitdrivers_sd for op in operations: @@ -103,117 +90,58 @@ ofs = -1 else: ofs = ops_offset.get(op, 0) - if op.opnum == rop.DEBUG_MERGE_POINT: + num = op.getopnum() + name = op.getopname() + if num == rop.DEBUG_MERGE_POINT: jd_sd = jitdrivers_sd[op.getarg(0).getint()] greenkey = op.getarglist()[3:] repr = jd_sd.warmstate.get_location_str(greenkey) w_greenkey = wrap_greenkey(space, jd_sd.jitdriver, greenkey, repr) - l_w.append(DebugMergePoint(space, jit_hooks._cast_to_gcref(op), + l_w.append(DebugMergePoint(space, name, logops.repr_of_resop(op), jd_sd.jitdriver.name, op.getarg(1).getint(), op.getarg(2).getint(), w_greenkey)) else: - l_w.append(WrappedOp(jit_hooks._cast_to_gcref(op), ofs, - logops.repr_of_resop(op))) + l_w.append(WrappedOp(name, ofs, logops.repr_of_resop(op))) return l_w + at unwrap_spec(offset=int, repr=str, name=str) +def descr_new_resop(space, w_tp, name, offset=-1, repr=''): + return WrappedOp(name, offset, repr) -class WrappedBox(W_Root): - """ A class representing a single box - """ - def __init__(self, llbox): - self.llbox = llbox - - def descr_getint(self, space): - if not jit_hooks.box_isint(self.llbox): - raise OperationError(space.w_NotImplementedError, - space.wrap("Box has no int value")) - return space.wrap(jit_hooks.box_getint(self.llbox)) - - at unwrap_spec(no=int) -def descr_new_box(space, w_tp, no): - return WrappedBox(jit_hooks.boxint_new(no)) - -WrappedBox.typedef = TypeDef( - 'Box', - __new__ = interp2app(descr_new_box), - getint = interp2app(WrappedBox.descr_getint), -) - - at unwrap_spec(num=int, offset=int, repr=str, w_res=W_Root) -def descr_new_resop(space, w_tp, num, w_args, w_res, offset=-1, - repr=''): - args = [space.interp_w(WrappedBox, w_arg).llbox for w_arg in - space.listview(w_args)] - if space.is_none(w_res): - llres = jit_hooks.emptyval() - else: - if not isinstance(w_res, WrappedBox): - raise OperationError(space.w_TypeError, space.wrap( - "expected box type, got %s" % space.type(w_res))) - llres = w_res.llbox - return WrappedOp(jit_hooks.resop_new(num, args, llres), offset, repr) - - at unwrap_spec(repr=str, jd_name=str, call_depth=int, call_id=int) -def descr_new_dmp(space, w_tp, w_args, repr, jd_name, call_depth, call_id, + at unwrap_spec(repr=str, name=str, jd_name=str, call_depth=int, call_id=int) +def descr_new_dmp(space, w_tp, name, repr, jd_name, call_depth, call_id, w_greenkey): - args = [space.interp_w(WrappedBox, w_arg).llbox for w_arg in - space.listview(w_args)] - num = rop.DEBUG_MERGE_POINT - return DebugMergePoint(space, - jit_hooks.resop_new(num, args, jit_hooks.emptyval()), + return DebugMergePoint(space, name, repr, jd_name, call_depth, call_id, w_greenkey) class WrappedOp(W_Root): """ A class representing a single ResOperation, wrapped nicely """ - def __init__(self, op, offset, repr_of_resop): - self.op = op + def __init__(self, name, offset, repr_of_resop): self.offset = offset + self.name = name self.repr_of_resop = repr_of_resop def descr_repr(self, space): return space.wrap(self.repr_of_resop) - def descr_num(self, space): - return space.wrap(jit_hooks.resop_getopnum(self.op)) - def descr_name(self, space): - return space.wrap(hlstr(jit_hooks.resop_getopname(self.op))) - - @unwrap_spec(no=int) - def descr_getarg(self, space, no): - try: - box = jit_hooks.resop_getarg(self.op, no) - except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("Index out of range")) - return WrappedBox(box) - - @unwrap_spec(no=int, w_box=WrappedBox) - def descr_setarg(self, space, no, w_box): - jit_hooks.resop_setarg(self.op, no, w_box.llbox) - - def descr_getresult(self, space): - return WrappedBox(jit_hooks.resop_getresult(self.op)) - - def descr_setresult(self, space, w_box): - box = space.interp_w(WrappedBox, w_box) - jit_hooks.resop_setresult(self.op, box.llbox) + return space.wrap(self.name) class DebugMergePoint(WrappedOp): """ A class representing Debug Merge Point - the entry point to a jitted loop. """ - def __init__(self, space, op, repr_of_resop, jd_name, call_depth, call_id, - w_greenkey): + def __init__(self, space, name, repr_of_resop, jd_name, call_depth, + call_id, w_greenkey): - WrappedOp.__init__(self, op, -1, repr_of_resop) + WrappedOp.__init__(self, name, -1, repr_of_resop) self.jd_name = jd_name self.call_depth = call_depth self.call_id = call_id @@ -237,12 +165,7 @@ __doc__ = WrappedOp.__doc__, __new__ = interp2app(descr_new_resop), __repr__ = interp2app(WrappedOp.descr_repr), - num = GetSetProperty(WrappedOp.descr_num), name = GetSetProperty(WrappedOp.descr_name), - getarg = interp2app(WrappedOp.descr_getarg), - setarg = interp2app(WrappedOp.descr_setarg), - result = GetSetProperty(WrappedOp.descr_getresult, - WrappedOp.descr_setresult), offset = interp_attrproperty("offset", cls=WrappedOp), ) WrappedOp.typedef.acceptable_as_base_class = False @@ -278,14 +201,18 @@ asmaddr = 0 asmlen = 0 - def __init__(self, space, debug_info, is_bridge=False): - logops = debug_info.logger._make_log_operations() - if debug_info.asminfo is not None: - ofs = debug_info.asminfo.ops_offset + def __init__(self, space, debug_info, is_bridge=False, wrap_ops=True): + if wrap_ops: + memo = {} + logops = debug_info.logger._make_log_operations(memo) + if debug_info.asminfo is not None: + ofs = debug_info.asminfo.ops_offset + else: + ofs = {} + ops = debug_info.operations + self.w_ops = space.newlist(wrap_oplist(space, logops, ops, ofs)) else: - ofs = {} - self.w_ops = space.newlist( - wrap_oplist(space, logops, debug_info.operations, ofs)) + self.w_ops = space.w_None self.jd_name = debug_info.get_jitdriver().name self.type = debug_info.type diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -136,7 +136,6 @@ assert dmp.call_id == 0 assert dmp.offset == -1 assert int_add.name == 'int_add' - assert int_add.num == self.int_add_num assert int_add.offset == 0 self.on_compile_bridge() expected = (') + ticker0 = getfield_raw_i(#, descr=) ticker_cond0 = int_lt(ticker0, 0) guard_false(ticker_cond0, descr=...) """ @@ -335,7 +335,7 @@ # this is the ticker check generated if we have threads thread_ticker_check = """ guard_not_invalidated? - ticker0 = getfield_raw(#, descr=) + ticker0 = getfield_raw_i(#, descr=) ticker1 = int_sub(ticker0, #) setfield_raw(#, ticker1, descr=) ticker_cond0 = int_lt(ticker1, 0) @@ -345,7 +345,7 @@ # # this is the ticker check generated in PyFrame.handle_operation_error exc_ticker_check = """ - ticker2 = getfield_raw(#, descr=) + ticker2 = getfield_raw_i(#, descr=) ticker_cond1 = int_lt(ticker2, 0) guard_false(ticker_cond1, descr=...) """ diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -76,7 +76,7 @@ stdout = stdout.splitlines(True)[-1] # # parse the JIT log - rawlog = logparser.parse_log_file(str(logfile)) + rawlog = logparser.parse_log_file(str(logfile), verbose=False) rawtraces = logparser.extract_category(rawlog, 'jit-log-opt-') log = Log(rawtraces) log.result = eval(stdout) @@ -471,7 +471,7 @@ # this is the actual loop 'int_lt', 'guard_true', 'int_add', # this is the signal checking stuff - 'guard_not_invalidated', 'getfield_raw', 'int_lt', 'guard_false', + 'guard_not_invalidated', 'getfield_raw_i', 'int_lt', 'guard_false', 'jump' ] @@ -536,7 +536,7 @@ # this is the actual loop 'int_lt', 'guard_true', 'force_token', 'int_add', # this is the signal checking stuff - 'guard_not_invalidated', 'getfield_raw', 'int_lt', 'guard_false', + 'guard_not_invalidated', 'getfield_raw_i', 'int_lt', 'guard_false', 'jump' ] @@ -555,7 +555,7 @@ i8 = int_add(i4, 1) # signal checking stuff guard_not_invalidated(descr=...) - i10 = getfield_raw(..., descr=<.* pypysig_long_struct.c_value .*>) + i10 = getfield_raw_i(..., descr=<.* pypysig_long_struct.c_value .*>) i14 = int_lt(i10, 0) guard_false(i14, descr=...) jump(..., descr=...) @@ -609,13 +609,13 @@ log = self.run(f, import_site=True) loop, = log.loops_by_id('ntohs') assert loop.match_by_id('ntohs', """ - p12 = call(ConstClass(ntohs), 1, descr=...) + i12 = call_i(ConstClass(ntohs), 1, descr=...) guard_no_exception(descr=...) """, include_guard_not_invalidated=False) # py.test.raises(InvalidMatch, loop.match_by_id, 'ntohs', """ guard_not_invalidated(descr=...) - p12 = call(ConstClass(foobar), 1, descr=...) + i12 = call_i(ConstClass(foobar), 1, descr=...) guard_no_exception(descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py b/pypy/module/pypyjit/test_pypy_c/test_array.py --- a/pypy/module/pypyjit/test_pypy_c/test_array.py +++ b/pypy/module/pypyjit/test_pypy_c/test_array.py @@ -42,7 +42,7 @@ guard_not_invalidated? i13 = int_lt(i7, i9) guard_true(i13, descr=...) - i15 = getarrayitem_raw(i10, i7, descr=) + i15 = getarrayitem_raw_i(i10, i7, descr=) i16 = int_add_ovf(i8, i15) guard_no_overflow(descr=...) i18 = int_add(i7, 1) @@ -74,12 +74,12 @@ guard_true(i13, descr=...) guard_not_invalidated(descr=...) # the bound check guard on img has been killed (thanks to the asserts) - i14 = getarrayitem_raw(i10, i8, descr=) + i14 = getarrayitem_raw_i(i10, i8, descr=) i15 = int_add_ovf(i9, i14) guard_no_overflow(descr=...) i17 = int_sub(i8, 640) # the bound check guard on intimg has been killed (thanks to the asserts) - i18 = getarrayitem_raw(i11, i17, descr=) + i18 = getarrayitem_raw_i(i11, i17, descr=) i19 = int_add_ovf(i18, i15) guard_no_overflow(descr=...) setarrayitem_raw(i11, i8, _, descr=) @@ -93,7 +93,7 @@ guard_true(i13, descr=...) guard_not_invalidated(descr=...) # the bound check guard on img has been killed (thanks to the asserts) - i14 = getarrayitem_raw(i10, i8, descr=) + i14 = getarrayitem_raw_i(i10, i8, descr=) # advanced: the following int_add cannot overflow, because: # - i14 fits inside 32 bits # - i9 fits inside 33 bits, because: @@ -107,7 +107,7 @@ i15 = int_add(i9, i14) i17 = int_sub(i8, 640) # the bound check guard on intimg has been killed (thanks to the asserts) - i18 = getarrayitem_raw(i11, i17, descr=) + i18 = getarrayitem_raw_i(i11, i17, descr=) i19 = int_add(i18, i15) # guard checking that i19 actually fits into 32bit i20 = int_signext(i19, 4) @@ -139,10 +139,10 @@ guard_true(i10, descr=...) i11 = int_lt(i6, i7) guard_true(i11, descr=...) - f13 = getarrayitem_raw(i8, i6, descr=) + f13 = getarrayitem_raw_f(i8, i6, descr=) f15 = float_add(f13, 20.500000) setarrayitem_raw(i8, i6, f15, descr=) - f16 = getarrayitem_raw(i8, i6, descr=) + f16 = getarrayitem_raw_f(i8, i6, descr=) i18 = float_eq(f16, 42.000000) guard_true(i18, descr=...) i20 = int_add(i6, 1) @@ -175,12 +175,12 @@ guard_true(i10, descr=...) i11 = int_lt(i6, i7) guard_true(i11, descr=...) - i13 = getarrayitem_raw(i8, i6, descr=) + i13 = getarrayitem_raw_i(i8, i6, descr=) f14 = cast_singlefloat_to_float(i13) f16 = float_add(f14, 20.500000) i17 = cast_float_to_singlefloat(f16) setarrayitem_raw(i8, i6,i17, descr=) - i18 = getarrayitem_raw(i8, i6, descr=) + i18 = getarrayitem_raw_i(i8, i6, descr=) f19 = cast_singlefloat_to_float(i18) i21 = float_eq(f19, 42.000000) guard_true(i21, descr=...) @@ -225,23 +225,23 @@ ... i20 = int_ge(i18, i8) guard_false(i20, descr=...) - f21 = getarrayitem_raw(i13, i18, descr=...) + f21 = getarrayitem_raw_f(i13, i18, descr=...) i14 = int_sub(i6, 1) i15 = int_ge(i14, i8) guard_false(i15, descr=...) - f23 = getarrayitem_raw(i13, i14, descr=...) + f23 = getarrayitem_raw_f(i13, i14, descr=...) f24 = float_add(f21, f23) - f26 = getarrayitem_raw(i13, i6, descr=...) + f26 = getarrayitem_raw_f(i13, i6, descr=...) f27 = float_add(f24, f26) i29 = int_add(i6, 1) i31 = int_ge(i29, i8) guard_false(i31, descr=...) - f33 = getarrayitem_raw(i13, i29, descr=...) + f33 = getarrayitem_raw_f(i13, i29, descr=...) f34 = float_add(f27, f33) i36 = int_add(i6, 2) i38 = int_ge(i36, i8) guard_false(i38, descr=...) - f39 = getarrayitem_raw(i13, i36, descr=...) + f39 = getarrayitem_raw_f(i13, i36, descr=...) ... """) @@ -276,20 +276,20 @@ expected_src=""" ... i17 = int_and(i14, 255) - f18 = getarrayitem_raw(i8, i17, descr=...) + f18 = getarrayitem_raw_f(i8, i17, descr=...) i19s = int_sub_ovf(i6, 1) guard_no_overflow(descr=...) i22s = int_and(i19s, 255) - f20 = getarrayitem_raw(i8, i22s, descr=...) + f20 = getarrayitem_raw_f(i8, i22s, descr=...) f21 = float_add(f18, f20) - f23 = getarrayitem_raw(i8, i10, descr=...) + f23 = getarrayitem_raw_f(i8, i10, descr=...) f24 = float_add(f21, f23) i26 = int_add(i6, 1) i29 = int_and(i26, 255) - f30 = getarrayitem_raw(i8, i29, descr=...) + f30 = getarrayitem_raw_f(i8, i29, descr=...) f31 = float_add(f24, f30) i33 = int_add(i6, 2) i36 = int_and(i33, 255) - f37 = getarrayitem_raw(i8, i36, descr=...) + f37 = getarrayitem_raw_f(i8, i36, descr=...) ... """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -18,7 +18,7 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id('match', """ guard_not_invalidated(descr=...) - i65 = getfield_gc(p18, descr=...) + i65 = getfield_gc_i(p18, descr=...) i67 = int_gt(0, i65) guard_false(i67, descr=...) i69 = int_gt(#, i65) @@ -42,7 +42,7 @@ assert loop.match_by_id('unpack', """ guard_not_invalidated(descr=...) p90 = newstr(4) - call(ConstClass(copy_raw_to_string), i55, p90, 0, 4, descr=) + call_n(ConstClass(copy_raw_to_string), i55, p90, 0, 4, descr=) guard_no_exception(descr=...) i91 = strgetitem(p90, 0) i92 = strgetitem(p90, 1) @@ -56,7 +56,7 @@ guard_false(i99, descr=...) i100 = int_lshift(i98, 24) i101 = int_or(i97, i100) - i102 = getfield_raw(#, descr=) + i102 = getfield_raw_i(#, descr=) i103 = int_lt(i102, 0) guard_false(i103, descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -31,7 +31,7 @@ functrace, loop = log.loops_by_filename(self.filepath) assert loop.match_by_id('call_rec', """ ... - p53 = call_assembler(..., descr=...) + p53 = call_assembler_r(..., descr=...) guard_not_forced(descr=...) keepalive(...) guard_no_exception(descr=...) @@ -73,7 +73,7 @@ ops = entry_bridge.ops_by_id('cond', opcode='LOAD_GLOBAL') assert log.opnames(ops) == ["guard_value", "guard_value", - "getfield_gc", "guard_value", + "getfield_gc_r", "guard_value", "guard_not_invalidated"] ops = entry_bridge.ops_by_id('add', opcode='LOAD_GLOBAL') assert log.opnames(ops) == [] @@ -82,12 +82,12 @@ assert log.opnames(ops) == [] # assert entry_bridge.match_by_id('call', """ - p38 = call(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) - p39 = getfield_gc(p38, descr=) + p38 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) + p39 = getfield_gc_r(p38, descr=) i40 = force_token() - p41 = getfield_gc_pure(p38, descr=) + p41 = getfield_gc_pure_r(p38, descr=) guard_value(p41, ConstPtr(ptr42), descr=...) - i42 = getfield_gc_pure(p38, descr=) + i42 = getfield_gc_pure_i(p38, descr=) i43 = int_is_zero(i42) guard_true(i43, descr=...) i50 = force_token() @@ -130,7 +130,8 @@ # ------------------------------- entry_bridge, = log.loops_by_filename(self.filepath, is_entry_bridge=True) ops = entry_bridge.ops_by_id('meth1', opcode='LOOKUP_METHOD') - assert log.opnames(ops) == ['guard_value', 'getfield_gc', 'guard_value', + assert log.opnames(ops) == ['guard_value', 'getfield_gc_r', + 'guard_value', 'guard_not_invalidated'] # the second LOOKUP_METHOD is folded away assert list(entry_bridge.ops_by_id('meth2', opcode='LOOKUP_METHOD')) == [] @@ -349,15 +350,13 @@ # the int strategy is used here assert loop.match_by_id('append', """ guard_not_invalidated? - i13 = getfield_gc(p8, descr=) i15 = int_add(i13, 1) - p15 = getfield_gc(p8, descr=) i17 = arraylen_gc(p15, descr=) i18 = int_lt(i17, i15) # a cond call to _ll_list_resize_hint_really_look_inside_iff cond_call(i18, _, p8, i15, 1, descr=) guard_no_exception(descr=...) - p17 = getfield_gc(p8, descr=) + p17 = getfield_gc_r(p8, descr=) setarrayitem_gc(p17, i13, i12, descr=) """) @@ -381,9 +380,9 @@ # make sure that the "block" is not allocated ... p20 = force_token() - p22 = new_with_vtable(...) + p22 = new_with_vtable(descr=) p24 = new_array_clear(1, descr=) - p26 = new_with_vtable(ConstClass(W_ListObject)) + p26 = new_with_vtable(descr=) {{{ setfield_gc(p0, p20, descr=) setfield_gc(p22, ConstPtr(null), descr=) @@ -395,7 +394,7 @@ setarrayitem_gc(p24, 0, p26, descr=) setfield_gc(p22, p24, descr=) }}} - p32 = call_may_force(_, p18, p22, descr=) + p32 = call_may_force_r(_, p18, p22, descr=) ... """) @@ -436,24 +435,24 @@ guard_isnull(p5, descr=...) guard_nonnull_class(p12, ConstClass(W_IntObject), descr=...) guard_value(p2, ConstPtr(ptr21), descr=...) - i22 = getfield_gc_pure(p12, descr=) + i22 = getfield_gc_pure_i(p12, descr=) i24 = int_lt(i22, 5000) guard_true(i24, descr=...) guard_value(p7, ConstPtr(ptr25), descr=...) - p26 = getfield_gc(p7, descr=) + p26 = getfield_gc_r(p7, descr=) guard_value(p26, ConstPtr(ptr27), descr=...) guard_not_invalidated(descr=...) - p29 = call(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) - p30 = getfield_gc(p29, descr=) + p29 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) + p30 = getfield_gc_r(p29, descr=) p31 = force_token() - p32 = getfield_gc_pure(p29, descr=) + p32 = getfield_gc_pure_r(p29, descr=) guard_value(p32, ConstPtr(ptr33), descr=...) - i34 = getfield_gc_pure(p29, descr=) + i34 = getfield_gc_pure_i(p29, descr=) i35 = int_is_zero(i34) guard_true(i35, descr=...) - p37 = getfield_gc(ConstPtr(ptr36), descr=) + p37 = getfield_gc_r(ConstPtr(ptr36), descr=) guard_nonnull_class(p37, ConstClass(W_IntObject), descr=...) - i39 = getfield_gc_pure(p37, descr=) + i39 = getfield_gc_pure_i(p37, descr=) i40 = int_add_ovf(i22, i39) guard_no_overflow(descr=...) --TICK-- @@ -470,13 +469,13 @@ """, []) loop, = log.loops_by_id('call') assert loop.match(""" - i8 = getfield_gc_pure(p6, descr=) + i8 = getfield_gc_pure_i(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) i11 = force_token() i13 = int_add(i8, 1) --TICK-- - p22 = new_with_vtable(ConstClass(W_IntObject)) + p22 = new_with_vtable(descr=) setfield_gc(p22, i13, descr=) setfield_gc(p4, p22, descr=) jump(..., descr=...) @@ -576,8 +575,8 @@ allops = loop.allops() calls = [op for op in allops if op.name.startswith('call')] assert OpMatcher(calls).match(''' - p93 = call(ConstClass(view_as_kwargs), p35, p12, descr=<.*>) - i103 = call(ConstClass(_match_keywords), ConstPtr(ptr52), 0, 0, p94, p98, 0, descr=<.*>) + p93 = call_r(ConstClass(view_as_kwargs), p35, p12, descr=<.*>) + i103 = call_i(ConstClass(_match_keywords), ConstPtr(ptr52), 0, 0, p94, p98, 0, descr=<.*>) ''') assert len([op for op in allops if op.name.startswith('new')]) == 1 # 1 alloc diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -43,9 +43,9 @@ # can't change ;) assert loop.match_by_id("getitem", """ ... - i26 = call(ConstClass(ll_call_lookup_function), p18, p6, i25, 0, descr=...) + i26 = call_i(ConstClass(ll_call_lookup_function), p18, p6, i25, 0, descr=...) ... - p33 = getinteriorfield_gc(p31, i26, descr=>) + p33 = getinteriorfield_gc_r(p31, i26, descr=>) ... """) @@ -64,9 +64,9 @@ i8 = int_lt(i5, i7) guard_true(i8, descr=...) guard_not_invalidated(descr=...) - p10 = call(ConstClass(ll_str__IntegerR_SignedConst_Signed), i5, descr=) + p10 = call_r(ConstClass(ll_str__IntegerR_SignedConst_Signed), i5, descr=) guard_no_exception(descr=...) - i12 = call(ConstClass(ll_strhash), p10, descr=) + i12 = call_i(ConstClass(ll_strhash), p10, descr=) p13 = new(descr=...) p15 = new_array_clear(16, descr=) {{{ @@ -74,25 +74,25 @@ setfield_gc(p13, p15, descr=) setfield_gc(p13, ConstPtr(0), descr=) }}} - i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, 1, descr=) + i17 = call_i(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, 1, descr=) {{{ setfield_gc(p13, 0, descr=) setfield_gc(p13, 0, descr=) setfield_gc(p13, 32, descr=) }}} guard_no_exception(descr=...) - p20 = new_with_vtable(ConstClass(W_IntObject)) - call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) + p20 = new_with_vtable(descr=...) + call_n(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, p10, p20, i12, i17, descr=) setfield_gc(p20, i5, descr=) guard_no_exception(descr=...) - i23 = call(ConstClass(ll_call_lookup_function), p13, p10, i12, 0, descr=) + i23 = call_i(ConstClass(ll_call_lookup_function), p13, p10, i12, 0, descr=) guard_no_exception(descr=...) i27 = int_lt(i23, 0) guard_false(i27, descr=...) - p28 = getfield_gc(p13, descr=) - p29 = getinteriorfield_gc(p28, i23, descr=>) + p28 = getfield_gc_r(p13, descr=) + p29 = getinteriorfield_gc_r(p28, i23, descr=>) guard_nonnull_class(p29, ConstClass(W_IntObject), descr=...) - i31 = getfield_gc_pure(p29, descr=) + i31 = getfield_gc_pure_i(p29, descr=) i32 = int_sub_ovf(i31, i5) guard_no_overflow(descr=...) i34 = int_add_ovf(i32, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_cprofile.py b/pypy/module/pypyjit/test_pypy_c/test_cprofile.py --- a/pypy/module/pypyjit/test_pypy_c/test_cprofile.py +++ b/pypy/module/pypyjit/test_pypy_c/test_cprofile.py @@ -31,7 +31,7 @@ # but all calls can be special-cased by the backend if # supported. On 64-bit there is only the two calls to # read_timestamp. - r = re.compile(r" call[(]ConstClass[(](.+?)[)]") + r = re.compile(r" call_\w[(]ConstClass[(](.+?)[)]") calls = r.findall(repr(loop.ops_by_id(method))) if sys.maxint == 2147483647: assert len(calls) == 6 diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py b/pypy/module/pypyjit/test_pypy_c/test_ffi.py --- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py +++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py @@ -163,7 +163,7 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id('getfield', """ guard_not_invalidated(descr=...) - i57 = getfield_raw(i46, descr=) + i57 = getfield_raw_i(i46, descr=) """) assert loop.match_by_id('setfield', """ setfield_raw(i44, i57, descr=) @@ -202,7 +202,7 @@ assert loop.match_by_id('cfficall', """ p96 = force_token() setfield_gc(p0, p96, descr=) - f97 = call_release_gil(91, i59, 1.0, 3, descr=) + f97 = call_release_gil_f(91, i59, 1.0, 3, descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) """, ignore_ops=['guard_not_invalidated']) @@ -244,7 +244,7 @@ assert loop.match_by_id('cfficall', """ p96 = force_token() setfield_gc(p0, p96, descr=) - i97 = call_release_gil(91, i59, i50, descr=) + i97 = call_release_gil_i(91, i59, i50, descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) %s @@ -288,10 +288,10 @@ assert loop.match_by_id('cfficall', """ p96 = force_token() setfield_gc(p0, p96, descr=) - i97 = call_release_gil(91, i59, i10, i12, 1, descr=) + i97 = call_release_gil_i(91, i59, i10, i12, 1, descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) - p98 = call(ConstClass(fromrarith_int__r_uint), i97, descr=) + p98 = call_r(ConstClass(fromrarith_int__r_uint), i97, descr=) guard_no_exception(descr=...) """, ignore_ops=['guard_not_invalidated']) @@ -354,7 +354,7 @@ loop, = log.loops_by_id('cfficall') assert loop.match_by_id('cfficall', """ ... - f1 = call_release_gil(..., descr=) + i1 = call_release_gil_i(..., descr=) ... """) @@ -414,11 +414,7 @@ guard_not_invalidated(descr=...) p163 = force_token() p164 = force_token() - p165 = getarrayitem_gc(p67, 0, descr=) - guard_value(p165, ConstPtr(ptr70), descr=...) - p166 = getfield_gc(p165, descr=) - guard_value(p166, ConstPtr(ptr72), descr=...) - p167 = call(ConstClass(_ll_0_alloc_with_del___), descr=) + p167 = call_r(ConstClass(_ll_0_alloc_with_del___), descr=) guard_no_exception(descr=...) i112 = int_signext(i160, 2) setfield_gc(p167, ConstPtr(ptr85), descr=) @@ -426,11 +422,11 @@ i114 = int_ne(i160, i112) guard_false(i114, descr=...) --TICK-- - i119 = call(ConstClass(_ll_1_raw_malloc_varsize__Signed), 6, descr=) + i123 = arraylen_gc(p67, descr=) + i119 = call_i(ConstClass(_ll_1_raw_malloc_varsize__Signed), 6, descr=) raw_store(i119, 0, i160, descr=) raw_store(i119, 2, i160, descr=) raw_store(i119, 4, i160, descr=) setfield_gc(p167, i119, descr=) - i123 = arraylen_gc(p67, descr=) jump(..., descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_generators.py b/pypy/module/pypyjit/test_pypy_c/test_generators.py --- a/pypy/module/pypyjit/test_pypy_c/test_generators.py +++ b/pypy/module/pypyjit/test_pypy_c/test_generators.py @@ -21,10 +21,10 @@ assert loop.match_by_id("generator", """ cond_call(..., descr=...) i16 = force_token() - p45 = new_with_vtable(ConstClass(W_IntObject)) + p45 = new_with_vtable(descr=<.*>) + ifoo = arraylen_gc(p8, descr=) setfield_gc(p45, i29, descr=) setarrayitem_gc(p8, 0, p45, descr=) - i47 = arraylen_gc(p8, descr=) # Should be removed by backend jump(..., descr=...) """) assert loop.match_by_id("subtract", """ @@ -50,10 +50,10 @@ assert loop.match_by_id("generator", """ cond_call(..., descr=...) i16 = force_token() - p45 = new_with_vtable(ConstClass(W_IntObject)) + p45 = new_with_vtable(descr=<.*>) + i47 = arraylen_gc(p8, descr=) # Should be removed by backend setfield_gc(p45, i29, descr=) setarrayitem_gc(p8, 0, p45, descr=) - i47 = arraylen_gc(p8, descr=) # Should be removed by backend jump(..., descr=...) """) assert loop.match_by_id("subtract", """ diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -16,9 +16,9 @@ assert log.result == 500 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("loadglobal", """ - p12 = getfield_gc(p10, descr=) + p12 = getfield_gc_r(p10, descr=) guard_value(p12, ConstPtr(ptr13), descr=...) guard_not_invalidated(descr=...) - p19 = getfield_gc(ConstPtr(p17), descr=) + p19 = getfield_gc_r(ConstPtr(p17), descr=) guard_value(p19, ConstPtr(ptr20), descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -106,7 +106,7 @@ entry_bridge, = log.loops_by_filename(self.filepath, is_entry_bridge=True) ops = entry_bridge.ops_by_id('mutate', opcode='LOAD_ATTR') assert log.opnames(ops) == ['guard_value', 'guard_not_invalidated', - 'getfield_gc'] + 'getfield_gc_i'] # the STORE_ATTR is folded away assert list(entry_bridge.ops_by_id('meth1', opcode='STORE_ATTR')) == [] # @@ -120,11 +120,11 @@ i59 = int_add_ovf(i57, 1) guard_no_overflow(descr=...) p60 = force_token() - i61 = getfield_raw(..., descr=...) + i61 = getfield_raw_i(..., descr=...) setfield_gc(ConstPtr(ptr39), i59, descr=...) i62 = int_lt(i61, 0) guard_false(i62, descr=...) - jump(p0, p1, p3, p6, p7, p12, i59, p18, i31, i59, descr=...) + jump(p0, p1, p3, p6, p7, p12, i59, p18, i31, i59, p100, descr=...) """) def test_mutate_class(self): @@ -154,8 +154,8 @@ entry_bridge, = log.loops_by_filename(self.filepath, is_entry_bridge=True) ops = entry_bridge.ops_by_id('mutate', opcode='LOAD_ATTR') assert log.opnames(ops) == ['guard_value', 'guard_not_invalidated', - 'getfield_gc', 'guard_nonnull_class', - 'getfield_gc', 'guard_value', # type check on the attribute + 'getfield_gc_r', 'guard_nonnull_class', + 'getfield_gc_r', 'guard_value', # type check on the attribute ] # the STORE_ATTR is folded away assert list(entry_bridge.ops_by_id('meth1', opcode='STORE_ATTR')) == [] @@ -167,15 +167,15 @@ i70 = int_lt(i58, i33) guard_true(i70, descr=...) guard_not_invalidated(descr=...) - p71 = getfield_gc(p64, descr=...) + p71 = getfield_gc_r(p64, descr=...) guard_value(p71, ConstPtr(ptr42), descr=...) p72 = force_token() p73 = force_token() i74 = int_add(i58, 1) - i75 = getfield_raw(..., descr=...) + i75 = getfield_raw_i(..., descr=...) i76 = int_lt(i75, 0) guard_false(i76, descr=...) - p77 = new_with_vtable(...) + p77 = new_with_vtable(descr=...) setfield_gc(p77, p64, descr=...) setfield_gc(p77, ConstPtr(null), descr=...) setfield_gc(p77, ConstPtr(null), descr=...) @@ -183,7 +183,7 @@ setfield_gc(p77, ConstPtr(null), descr=...) setfield_gc(p77, ConstPtr(ptr42), descr=...) setfield_gc(ConstPtr(ptr69), p77, descr=...) - jump(p0, p1, p3, p6, p7, p12, i74, p20, p26, i33, p77, descr=...) + jump(p0, p1, p3, p6, p7, p12, i74, p20, p26, i33, p77, p100, descr=...) """) @@ -209,11 +209,11 @@ assert loop.match_by_id('loadattr1', ''' guard_not_invalidated(descr=...) - i19 = call(ConstClass(ll_call_lookup_function), _, _, _, 0, descr=...) + i19 = call_i(ConstClass(ll_call_lookup_function), _, _, _, 0, descr=...) guard_no_exception(descr=...) i22 = int_lt(i19, 0) guard_true(i22, descr=...) - i26 = call(ConstClass(ll_call_lookup_function), _, _, _, 0, descr=...) + i26 = call_i(ConstClass(ll_call_lookup_function), _, _, _, 0, descr=...) guard_no_exception(descr=...) i29 = int_lt(i26, 0) guard_true(i29, descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_math.py b/pypy/module/pypyjit/test_pypy_c/test_math.py --- a/pypy/module/pypyjit/test_pypy_c/test_math.py +++ b/pypy/module/pypyjit/test_pypy_c/test_math.py @@ -23,8 +23,8 @@ f1 = cast_int_to_float(i0) i3 = float_le(f1, 0.0) guard_false(i3, descr=...) - f2 = call(ConstClass(log), f1, descr=) - f3 = call(ConstClass(log10), f1, descr=) + f2 = call_f(ConstClass(log), f1, descr=) + f3 = call_f(ConstClass(log10), f1, descr=) f4 = float_sub(f2, f3) f5 = float_add(f0, f4) i4 = int_add(i0, 1) @@ -52,8 +52,8 @@ f1 = cast_int_to_float(i0) i6 = --ISINF--(f1) guard_false(i6, descr=...) - f2 = call(ConstClass(sin), f1, descr=) - f3 = call(ConstClass(cos), f1, descr=) + f2 = call_f(ConstClass(sin), f1, descr=) + f3 = call_f(ConstClass(cos), f1, descr=) f4 = float_sub(f2, f3) f5 = float_add(f0, f4) i7 = int_add(i0, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -15,39 +15,39 @@ loop = log._filter(log.loops[0]) assert loop.match(""" guard_class(p1, #, descr=...) - p4 = getfield_gc_pure(p1, descr=) - i5 = getfield_gc(p0, descr=) - p6 = getfield_gc_pure(p4, descr=) - p7 = getfield_gc_pure(p6, descr=) + p4 = getfield_gc_pure_r(p1, descr=) + i5 = getfield_gc_i(p0, descr=) + p6 = getfield_gc_pure_r(p4, descr=) + p7 = getfield_gc_pure_r(p6, descr=) guard_class(p7, ConstClass(Float64), descr=...) - i9 = getfield_gc_pure(p4, descr=) - i10 = getfield_gc_pure(p6, descr=) + i9 = getfield_gc_pure_i(p4, descr=) + i10 = getfield_gc_pure_i(p6, descr=) i12 = int_eq(i10, 61) i14 = int_eq(i10, 60) i15 = int_or(i12, i14) - f16 = raw_load(i9, i5, descr=) + f16 = raw_load_f(i9, i5, descr=) guard_true(i15, descr=...) guard_not_invalidated(descr=...) i18 = float_ne(f16, 0.000000) guard_true(i18, descr=...) guard_nonnull_class(p2, ConstClass(W_BoolBox), descr=...) - i20 = getfield_gc_pure(p2, descr=) + i20 = getfield_gc_pure_i(p2, descr=) i21 = int_is_true(i20) guard_false(i21, descr=...) - i22 = getfield_gc(p0, descr=) - i23 = getfield_gc_pure(p1, descr=) + i22 = getfield_gc_i(p0, descr=) + i23 = getfield_gc_pure_i(p1, descr=) guard_true(i23, descr=...) i25 = int_add(i22, 1) - p26 = getfield_gc_pure(p0, descr=) - i27 = getfield_gc_pure(p1, descr=) + p26 = getfield_gc_pure_r(p0, descr=) + i27 = getfield_gc_pure_i(p1, descr=) i28 = int_is_true(i27) guard_true(i28, descr=...) - i29 = getfield_gc_pure(p6, descr=) + i29 = getfield_gc_pure_i(p6, descr=) i30 = int_add(i5, i29) - i31 = getfield_gc_pure(p1, descr=) + i31 = getfield_gc_pure_i(p1, descr=) i32 = int_ge(i25, i31) guard_false(i32, descr=...) - p34 = new_with_vtable(#) + p34 = new_with_vtable(descr=...) {{{ setfield_gc(p34, p1, descr=) setfield_gc(p34, i25, descr=) @@ -68,13 +68,13 @@ assert len(log.loops) == 1 loop = log._filter(log.loops[0]) assert loop.match(""" - f31 = raw_load(i9, i29, descr=) + f31 = raw_load_f(i9, i29, descr=) guard_not_invalidated(descr=...) i32 = float_ne(f31, 0.000000) guard_true(i32, descr=...) - i34 = getarrayitem_raw(#, #, descr=) # XXX what are these? + i34 = getarrayitem_raw_i(#, #, descr=) # XXX what are these? guard_value(i34, #, descr=...) # XXX don't appear in - i35 = getarrayitem_raw(#, #, descr=) # XXX equiv test_zjit + i35 = getarrayitem_raw_i(#, #, descr=) # XXX equiv test_zjit i36 = int_add(i24, 1) i37 = int_add(i29, i28) i38 = int_ge(i36, i30) @@ -112,7 +112,7 @@ i78 = int_mul(i71, i61) i79 = int_add(i55, i78) """ + alignment_check + """ - f80 = raw_load(i67, i79, descr=) + f80 = raw_load_f(i67, i79, descr=) i81 = int_add(i71, 1) --TICK-- jump(..., descr=...) @@ -149,12 +149,12 @@ i83 = int_mul(i76, i64) i84 = int_add(i58, i83) """ + alignment_check + """ - f85 = raw_load(i70, i84, descr=) + f85 = raw_load_f(i70, i84, descr=) guard_not_invalidated(descr=...) f86 = float_add(f74, f85) i87 = int_add(i76, 1) --TICK-- - jump(p0, p1, p6, p7, p8, p11, p13, f86, p17, i87, i62, p42, i58, p48, i41, i64, i70, descr=...) + jump(..., descr=...) """) def test_array_flatiter_next(self): @@ -176,11 +176,11 @@ guard_not_invalidated(descr=...) i88 = int_ge(i87, i59) guard_false(i88, descr=...) - f90 = raw_load(i67, i89, descr=) + f90 = raw_load_f(i67, i89, descr=) i91 = int_add(i87, 1) i93 = int_add(i89, i76) i94 = int_add(i79, 1) - i95 = getfield_raw(#, descr=) + i95 = getfield_raw_i(#, descr=) setfield_gc(p97, i91, descr=) setfield_gc(p97, i93, descr=) i96 = int_lt(i95, 0) @@ -208,11 +208,11 @@ guard_true(i126, descr=...) i128 = int_mul(i117, i59) i129 = int_add(i55, i128) - f149 = raw_load(i100, i129, descr=) + f149 = raw_load_f(i100, i129, descr=) i151 = int_add(i117, 1) + setfield_gc(p156, i55, descr=) setarrayitem_gc(p150, 1, 0, descr=) setarrayitem_gc(p150, 0, 0, descr=) - setfield_gc(p156, i55, descr=) --TICK-- jump(..., descr=...) """) @@ -240,10 +240,10 @@ guard_not_invalidated(descr=...) raw_store(i103, i132, 42.000000, descr=) i153 = int_add(i120, 1) - i154 = getfield_raw(#, descr=) + i154 = getfield_raw_i(#, descr=) + setfield_gc(p158, i53, descr=) setarrayitem_gc(p152, 1, 0, descr=) setarrayitem_gc(p152, 0, 0, descr=) - setfield_gc(p158, i53, descr=) i157 = int_lt(i154, 0) guard_false(i157, descr=...) jump(..., descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_min_max.py b/pypy/module/pypyjit/test_pypy_c/test_min_max.py --- a/pypy/module/pypyjit/test_pypy_c/test_min_max.py +++ b/pypy/module/pypyjit/test_pypy_c/test_min_max.py @@ -38,7 +38,7 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match(""" ... - p76 = call_assembler(_, _, _, _, descr=...) + p76 = call_assembler_r(_, _, _, _, descr=...) ... """) loop2 = log.loops[0] @@ -50,11 +50,11 @@ guard_not_invalidated? i17 = int_ge(i11, i7) guard_false(i17, descr=...) - p18 = getarrayitem_gc(p5, i11, descr=...) + p18 = getarrayitem_gc_r(p5, i11, descr=...) i19 = int_add(i11, 1) setfield_gc(p2, i19, descr=...) guard_nonnull_class(p18, ConstClass(W_IntObject), descr=...) - i20 = getfield_gc_pure(p18, descr=...) + i20 = getfield_gc_pure_i(p18, descr=...) i21 = int_gt(i20, i14) guard_true(i21, descr=...) jump(..., descr=...) @@ -79,6 +79,6 @@ assert len(guards) < 20 assert loop.match(""" ... - p76 = call_assembler(_, _, _, _, descr=...) + p76 = call_assembler_r(_, _, _, _, descr=...) ... """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -65,7 +65,7 @@ assert loop.match(""" i7 = int_gt(i4, 1) guard_true(i7, descr=...) - p11 = call(ConstClass(rbigint.int_mul), p5, i4, descr=...) + p11 = call_r(ConstClass(rbigint.int_mul), p5, i4, descr=...) guard_no_exception(descr=...) i13 = int_sub(i4, 1) --TICK-- @@ -113,14 +113,14 @@ i12 = int_is_true(i4) guard_true(i12, descr=...) guard_not_invalidated(descr=...) - i13 = int_add_ovf(i8, i9) - guard_no_overflow(descr=...) - i10p = getfield_gc_pure(p10, descr=...) + i10p = getfield_gc_pure_i(p10, descr=...) i10 = int_mul_ovf(2, i10p) guard_no_overflow(descr=...) i14 = int_add_ovf(i13, i10) guard_no_overflow(descr=...) - setfield_gc(p7, p11, descr=...) + i13 = int_add_ovf(i14, i9) + guard_no_overflow(descr=...) + setfield_gc(p17, p10, descr=...) i17 = int_sub_ovf(i4, 1) guard_no_overflow(descr=...) --TICK-- @@ -148,6 +148,7 @@ i18 = force_token() setfield_gc(p9, i17, descr=<.* .*W_XRangeIterator.inst_current .*>) guard_not_invalidated(descr=...) + i84 = int_sub(i14, 1) i21 = int_lt(i10, 0) guard_false(i21, descr=...) i22 = int_lt(i10, i14) @@ -180,6 +181,7 @@ i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? + i88 = int_sub(i9, 1) i25 = int_ge(i11, i9) guard_false(i25, descr=...) i27 = int_add_ovf(i7, i11) @@ -212,6 +214,7 @@ i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? + i95 = int_sub(i9, 1) i23 = int_lt(i18, 0) guard_false(i23, descr=...) i25 = int_ge(i18, i9) @@ -260,25 +263,24 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match(""" guard_not_invalidated? - i14 = getfield_gc(p12, descr=) i16 = uint_ge(i12, i14) guard_false(i16, descr=...) - p16 = getfield_gc(p12, descr=) - p17 = getarrayitem_gc(p16, i12, descr=) + p17 = getarrayitem_gc_r(p16, i12, descr=) i19 = int_add(i12, 1) setfield_gc(p9, i19, descr=) guard_nonnull_class(p17, ..., descr=...) guard_not_invalidated? - i21 = getfield_gc(p17, descr=) + i21 = getfield_gc_i(p17, descr=) i23 = int_lt(0, i21) guard_true(i23, descr=...) - i24 = getfield_gc(p17, descr=) - i25 = getarrayitem_raw(i24, 0, descr=<.*>) + i24 = getfield_gc_i(p17, descr=) + i25 = getarrayitem_raw_i(i24, 0, descr=<.*>) i27 = int_lt(1, i21) guard_false(i27, descr=...) i28 = int_add_ovf(i10, i25) guard_no_overflow(descr=...) --TICK-- + if00 = arraylen_gc(p16, descr=...) jump(..., descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -43,9 +43,9 @@ i25 = unicodegetitem(p13, i19) p27 = newstr(1) strsetitem(p27, 0, i23) - p30 = call(ConstClass(ll_str2unicode__rpy_stringPtr), p27, descr=...) + p30 = call_r(ConstClass(ll_str2unicode__rpy_stringPtr), p27, descr=...) guard_no_exception(descr=...) - i32 = call(ConstClass(_ll_2_str_eq_checknull_char__rpy_unicodePtr_UniChar), p30, i25, descr=...) + i32 = call_i(ConstClass(_ll_2_str_eq_checknull_char__rpy_unicodePtr_UniChar), p30, i25, descr=...) guard_true(i32, descr=...) i34 = int_add(i6, 1) --TICK-- @@ -80,12 +80,12 @@ i23 = strgetitem(p10, i19) p25 = newstr(1) strsetitem(p25, 0, i23) - p93 = call(ConstClass(fromstr), p25, 16, descr=) + p93 = call_r(ConstClass(fromstr), p25, 16, descr=) guard_no_exception(descr=...) - i95 = getfield_gc_pure(p93, descr=) + i95 = getfield_gc_pure_i(p93, descr=) i96 = int_gt(i95, #) guard_false(i96, descr=...) - i94 = call(ConstClass(rbigint._toint_helper), p93, descr=) + i94 = call_i(ConstClass(rbigint._toint_helper), p93, descr=) guard_no_exception(descr=...) i95 = int_add_ovf(i6, i94) guard_no_overflow(descr=...) @@ -108,7 +108,7 @@ i79 = int_gt(i74, 0) guard_true(i79, descr=...) guard_not_invalidated(descr=...) - p80 = call(ConstClass(ll_int2dec__Signed), i74, descr=) + p80 = call_r(ConstClass(ll_int2dec__Signed), i74, descr=) guard_no_exception(descr=...) i85 = strlen(p80) p86 = new(descr=) @@ -119,21 +119,21 @@ setfield_gc(p86, 23, descr=) setfield_gc(p86, 23, descr=) }}} - call(ConstClass(ll_append_res0__stringbuilderPtr_rpy_stringPtr), p86, p80, descr=) + call_n(ConstClass(ll_append_res0__stringbuilderPtr_rpy_stringPtr), p86, p80, descr=) guard_no_exception(descr=...) - i89 = getfield_gc(p86, descr=) - i90 = getfield_gc(p86, descr=) + i89 = getfield_gc_i(p86, descr=) + i90 = getfield_gc_i(p86, descr=) i91 = int_eq(i89, i90) cond_call(i91, ConstClass(ll_grow_by__stringbuilderPtr_Signed), p86, 1, descr=) guard_no_exception(descr=...) - i92 = getfield_gc(p86, descr=) + i92 = getfield_gc_i(p86, descr=) i93 = int_add(i92, 1) - p94 = getfield_gc(p86, descr=) + p94 = getfield_gc_r(p86, descr=) strsetitem(p94, i92, 32) setfield_gc(p86, i93, descr=) - call(ConstClass(ll_append_res0__stringbuilderPtr_rpy_stringPtr), p86, p80, descr=) + call_n(ConstClass(ll_append_res0__stringbuilderPtr_rpy_stringPtr), p86, p80, descr=) guard_no_exception(descr=...) - p95 = call(..., descr=) # ll_build + p95 = call_r(..., descr=) # ll_build guard_no_exception(descr=...) i96 = strlen(p95) i97 = int_add_ovf(i71, i96) @@ -176,7 +176,7 @@ strsetitem(p35, 3, 104) strsetitem(p35, 4, 95) copystrcontent(p31, p35, 0, 5, i32) - i49 = call(ConstClass(_ll_2_str_eq_nonnull__rpy_stringPtr_rpy_stringPtr), p35, ConstPtr(ptr48), descr=) + i49 = call_i(ConstClass(_ll_2_str_eq_nonnull__rpy_stringPtr_rpy_stringPtr), p35, ConstPtr(ptr48), descr=) guard_value(i49, 1, descr=...) ''') @@ -195,7 +195,7 @@ loops = log.loops_by_filename(self.filepath) loop, = loops assert loop.match_by_id('callone', ''' - p114 = call(ConstClass(ll_lower__rpy_stringPtr), p113, descr=) + p114 = call_r(ConstClass(ll_lower__rpy_stringPtr), p113, descr=) guard_no_exception(descr=...) ''') assert loop.match_by_id('calltwo', '') # nothing @@ -248,9 +248,9 @@ i50 = int_add(i47, 1) setfield_gc(p15, i50, descr=) guard_not_invalidated(descr=...) - p80 = call(ConstClass(ll_str__IntegerR_SignedConst_Signed), i47, descr=) + p80 = call_r(ConstClass(ll_str__IntegerR_SignedConst_Signed), i47, descr=) guard_no_exception(descr=...) - p53 = call(ConstClass(fast_str_decode_ascii), p80, descr=) + p53 = call_r(ConstClass(fast_str_decode_ascii), p80, descr=) guard_no_exception(descr=...) guard_nonnull(p53, descr=...) --TICK-- diff --git a/pypy/module/pypyjit/test_pypy_c/test_thread.py b/pypy/module/pypyjit/test_pypy_c/test_thread.py --- a/pypy/module/pypyjit/test_pypy_c/test_thread.py +++ b/pypy/module/pypyjit/test_pypy_c/test_thread.py @@ -64,11 +64,11 @@ guard_true(i56, descr=...) p57 = force_token() setfield_gc(p0, p57, descr=) - i58 = call_release_gil(0, _, i37, 1, descr=) + i58 = call_release_gil_i(0, _, i37, 1, descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) i58 = int_sub(i44, 1) - i59 = call(ConstClass(RPyThreadReleaseLock), i37, descr=) + i59 = call_i(ConstClass(RPyThreadReleaseLock), i37, descr=) i60 = int_is_true(i59) guard_false(i60, descr=...) guard_not_invalidated(descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -19,23 +19,23 @@ """, [500]) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" - i58 = getfield_gc(p18, descr=) + i58 = getfield_gc_i(p18, descr=) i60 = int_lt(i58, i31) guard_true(i60, descr=...) i61 = int_add(i58, 1) - p62 = getfield_gc(ConstPtr(ptr37), descr=) + p62 = getfield_gc_r(ConstPtr(ptr37), descr=) setfield_gc(p18, i61, descr=) guard_value(p62, ConstPtr(ptr39), descr=...) guard_not_invalidated(descr=...) - p64 = getfield_gc(ConstPtr(ptr40), descr=) + p64 = getfield_gc_r(ConstPtr(ptr40), descr=) guard_value(p64, ConstPtr(ptr42), descr=...) - p65 = getfield_gc(p14, descr=) + p65 = getfield_gc_r(p14, descr=) guard_value(p65, ConstPtr(ptr45), descr=...) - p66 = getfield_gc(p14, descr=) + p66 = getfield_gc_r(p14, descr=) guard_nonnull_class(p66, ..., descr=...) p67 = force_token() setfield_gc(p0, p67, descr=) - p68 = call_may_force(ConstClass(WeakrefLifelineWithCallbacks.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=) + p68 = call_may_force_r(ConstClass(WeakrefLifelineWithCallbacks.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) guard_nonnull_class(p68, ..., descr=...) diff --git a/pypy/tool/pypyjit.py b/pypy/tool/pypyjit.py --- a/pypy/tool/pypyjit.py +++ b/pypy/tool/pypyjit.py @@ -14,6 +14,9 @@ print >> sys.stderr, __doc__ sys.exit(2) +import sys +sys.setrecursionlimit(100000000) + from pypy.objspace.std import Space from rpython.config.translationoption import set_opt_level from pypy.config.pypyoption import get_pypy_config, set_pypy_opt_level @@ -22,6 +25,7 @@ from rpython.rtyper.lltypesystem import lltype from pypy.interpreter.pycode import PyCode from rpython.translator.goal import unixcheckpoint +import pypy.module.pypyjit.interp_jit config = get_pypy_config(translating=True) config.translation.backendopt.inline_threshold = 0.1 @@ -33,6 +37,8 @@ config.objspace.usemodules.pypyjit = True config.objspace.usemodules.array = False config.objspace.usemodules._weakref = False +config.objspace.usemodules.struct = True +config.objspace.usemodules.time = True config.objspace.usemodules._sre = False config.objspace.usemodules._lsprof = False # @@ -73,6 +79,7 @@ read_code_ptr = llhelper(FPTR, read_code) def entry_point(): + space.startup() from pypy.module.marshal.interp_marshal import loads code = loads(space, space.wrap(hlstr(read_code_ptr()))) assert isinstance(code, PyCode) diff --git a/pypy/tool/pypyjit_demo.py b/pypy/tool/pypyjit_demo.py --- a/pypy/tool/pypyjit_demo.py +++ b/pypy/tool/pypyjit_demo.py @@ -1,8 +1,31 @@ -def f(): - i = 0 - while i < 1303: - i += 1 - return i +import time +l = [] -f() +for i in range(100): From noreply at buildbot.pypy.org Fri Oct 2 10:52:03 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 2 Oct 2015 10:52:03 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: PPC Backend #6: most tests pass Message-ID: <20151002085203.3D34D1C0F47@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: ppc-updated-backend Changeset: r79928:0766a869fc86 Date: 2015-10-02 10:49 +0200 http://bitbucket.org/pypy/pypy/changeset/0766a869fc86/ Log: PPC Backend #6: most tests pass Various remaining fixes, until most tests pass. Took the relevant tests, copied and adapted from the x86 backend. This also includes test_zll_stress_*.py from rpython/jit/backend/test. Update to default for the "optresult" changes. diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -41,11 +41,7 @@ from rpython.jit.backend.llsupport.descr import CallDescr -# xxx hack: set a default value for TargetToken._ll_loop_code. If 0, we know -# that it is a LABEL that was not compiled yet. -TargetToken._ll_loop_code = 0 - -class TempInt(TempVar): +class TempInt(TempBox): type = INT def __repr__(self): diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -3,7 +3,7 @@ """ import py -import re +import re, sys, struct from rpython.jit.metainterp.history import TargetToken, BasicFinalDescr,\ JitCellToken, BasicFailDescr, AbstractDescr from rpython.jit.backend.llsupport.gc import GcLLDescription, GcLLDescr_boehm,\ @@ -613,7 +613,10 @@ cpu = CPU(None, None) cpu.gc_ll_descr = GCDescrShadowstackDirect() wbd = cpu.gc_ll_descr.write_barrier_descr - wbd.jit_wb_if_flag_byteofs = 0 # directly into 'hdr' field + if sys.byteorder == 'little': + wbd.jit_wb_if_flag_byteofs = 0 # directly into 'hdr' field + else: + wbd.jit_wb_if_flag_byteofs = struct.calcsize("l") - 1 S = lltype.GcForwardReference() S.become(lltype.GcStruct('S', ('hdr', lltype.Signed), diff --git a/rpython/jit/backend/ppc/helper/regalloc.py b/rpython/jit/backend/ppc/helper/regalloc.py --- a/rpython/jit/backend/ppc/helper/regalloc.py +++ b/rpython/jit/backend/ppc/helper/regalloc.py @@ -1,4 +1,4 @@ -from rpython.jit.metainterp.history import ConstInt, Box, FLOAT +from rpython.jit.metainterp.history import ConstInt, FLOAT from rpython.jit.backend.ppc.locations import imm def check_imm_box(arg, lower_bound=-2**15, upper_bound=2**15-1): @@ -21,7 +21,7 @@ else: l1 = self.ensure_reg(a1) self.free_op_vars() - res = self.force_allocate_reg_or_cc(op.result) + res = self.force_allocate_reg_or_cc(op) return [l0, l1, res] return f prepare_cmp_op = _prepare_cmp_op(signed=True) @@ -31,27 +31,27 @@ l0 = self.ensure_reg(op.getarg(0)) l1 = imm(0) self.free_op_vars() - res = self.force_allocate_reg_or_cc(op.result) + res = self.force_allocate_reg_or_cc(op) return [l0, l1, res] def prepare_float_cmp(self, op): l0 = self.ensure_reg(op.getarg(0)) l1 = self.ensure_reg(op.getarg(1)) self.free_op_vars() - res = self.force_allocate_reg_or_cc(op.result) + res = self.force_allocate_reg_or_cc(op) return [l0, l1, res] def prepare_unary_op(self, op): l0 = self.ensure_reg(op.getarg(0)) self.free_op_vars() - res = self.force_allocate_reg(op.result) + res = self.force_allocate_reg(op) return [l0, res] def prepare_binary_op(self, op): reg1 = self.ensure_reg(op.getarg(0)) reg2 = self.ensure_reg(op.getarg(1)) self.free_op_vars() - res = self.force_allocate_reg(op.result) + res = self.force_allocate_reg(op) return [reg1, reg2, res] def prepare_int_add_or_mul(self, op): @@ -65,7 +65,7 @@ else: l1 = self.ensure_reg(a1) self.free_op_vars() - res = self.force_allocate_reg(op.result) + res = self.force_allocate_reg(op) return [l0, l1, res] def prepare_int_sub(self, op): @@ -76,5 +76,5 @@ else: l1 = self.ensure_reg(a1) self.free_op_vars() - res = self.force_allocate_reg(op.result) + res = self.force_allocate_reg(op) return [l0, l1, res] diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -10,9 +10,9 @@ THREADLOCAL_ADDR_OFFSET, IS_BIG_ENDIAN) -from rpython.jit.metainterp.history import (JitCellToken, TargetToken, Box, +from rpython.jit.metainterp.history import (JitCellToken, TargetToken, AbstractFailDescr, FLOAT, INT, REF, - ConstInt) + ConstInt, VOID) from rpython.rlib.objectmodel import we_are_translated from rpython.jit.backend.ppc.helper.assembler import (Saved_Volatiles) from rpython.jit.backend.ppc.jump import remap_frame_layout @@ -24,6 +24,7 @@ from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.rtyper.lltypesystem import rstr, rffi, lltype from rpython.rtyper.annlowlevel import cast_instance_to_gcref +from rpython.rtyper import rclass from rpython.jit.metainterp.resoperation import rop from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.backend.ppc import callbuilder @@ -41,6 +42,8 @@ else: self.mc.add(res.value, l0.value, l1.value) + emit_nursery_ptr_increment = emit_int_add + def emit_int_sub(self, op, arglocs, regalloc): l0, l1, res = arglocs assert not l0.is_imm() @@ -317,7 +320,7 @@ def emit_guard_class(self, op, arglocs, regalloc): self._cmp_guard_class(op, arglocs, regalloc) self.guard_success_cc = c.EQ - self._emit_guard(op, arglocs[3:]) + self._emit_guard(op, arglocs[2:]) def emit_guard_nonnull_class(self, op, arglocs, regalloc): self.mc.cmp_op(0, arglocs[0].value, 1, imm=True, signed=False) @@ -328,26 +331,102 @@ pmc.blt(self.mc.currpos() - patch_pos) pmc.overwrite() self.guard_success_cc = c.EQ - self._emit_guard(op, arglocs[3:]) + self._emit_guard(op, arglocs[2:]) def _cmp_guard_class(self, op, locs, regalloc): - offset = locs[2] + offset = self.cpu.vtable_offset if offset is not None: - with scratch_reg(self.mc): - self.mc.load(r.SCRATCH.value, locs[0].value, offset.value) - self.mc.cmp_op(0, r.SCRATCH.value, locs[1].value) + # could be one instruction shorter, but don't care because + # it's not this case that is commonly translated + self.mc.load(r.SCRATCH.value, locs[0].value, offset) + self.mc.load_imm(r.SCRATCH2, locs[1].value) + self.mc.cmp_op(0, r.SCRATCH.value, r.SCRATCH2.value) else: - typeid = locs[1] - # here, we have to go back from 'classptr' to the value expected - # from reading the half-word in the object header. Note that - # this half-word is at offset 0 on a little-endian machine; - # but it is at offset 2 (32 bit) or 4 (64 bit) on a - # big-endian machine. - if IS_PPC_32: - self.mc.lhz(r.SCRATCH.value, locs[0].value, 2 * IS_BIG_ENDIAN) - else: - self.mc.lwz(r.SCRATCH.value, locs[0].value, 4 * IS_BIG_ENDIAN) - self.mc.cmp_op(0, r.SCRATCH.value, typeid.value, imm=typeid.is_imm()) + expected_typeid = (self.cpu.gc_ll_descr + .get_typeid_from_classptr_if_gcremovetypeptr(locs[1].value)) + self._cmp_guard_gc_type(locs[0], expected_typeid) + + def _read_typeid(self, targetreg, loc_ptr): + # Note that the typeid half-word is at offset 0 on a little-endian + # machine; it is at offset 2 or 4 on a big-endian machine. + assert self.cpu.supports_guard_gc_type + if IS_PPC_32: + self.mc.lhz(targetreg.value, loc_ptr.value, 2 * IS_BIG_ENDIAN) + else: + self.mc.lwz(targetreg.value, loc_ptr.value, 4 * IS_BIG_ENDIAN) + + def _cmp_guard_gc_type(self, loc_ptr, expected_typeid): + self._read_typeid(r.SCRATCH2, loc_ptr) + assert 0 <= expected_typeid <= 0x7fffffff # 4 bytes are always enough + if expected_typeid > 0xffff: # if 2 bytes are not enough + self.mc.subis(r.SCRATCH2.value, r.SCRATCH2.value, + expected_typeid >> 16) + expected_typeid = expected_typeid & 0xffff + self.mc.cmp_op(0, r.SCRATCH2.value, expected_typeid, + imm=True, signed=False) + + def emit_guard_gc_type(self, op, arglocs, regalloc): + self._cmp_guard_gc_type(arglocs[0], arglocs[1].value) + self.guard_success_cc = c.EQ + self._emit_guard(op, arglocs[2:]) + + def emit_guard_is_object(self, op, arglocs, regalloc): + assert self.cpu.supports_guard_gc_type + loc_object = arglocs[0] + # idea: read the typeid, fetch one byte of the field 'infobits' from + # the big typeinfo table, and check the flag 'T_IS_RPYTHON_INSTANCE'. + base_type_info, shift_by, sizeof_ti = ( + self.cpu.gc_ll_descr.get_translated_info_for_typeinfo()) + infobits_offset, IS_OBJECT_FLAG = ( + self.cpu.gc_ll_descr.get_translated_info_for_guard_is_object()) + + self._read_typeid(r.SCRATCH2, loc_object) + self.mc.load_imm(r.SCRATCH, base_type_info + infobits_offset) + assert shift_by == 0 # on PPC64; fixme for PPC32 + self.mc.lbzx(r.SCRATCH2.value, r.SCRATCH2.value, r.SCRATCH.value) + self.mc.andix(r.SCRATCH2.value, r.SCRATCH2.value, IS_OBJECT_FLAG & 0xff) + self.guard_success_cc = c.NE + self._emit_guard(op, arglocs[1:]) + + def emit_guard_subclass(self, op, arglocs, regalloc): + assert self.cpu.supports_guard_gc_type + loc_object = arglocs[0] + loc_check_against_class = arglocs[1] + offset = self.cpu.vtable_offset + offset2 = self.cpu.subclassrange_min_offset + if offset is not None: + # read this field to get the vtable pointer + self.mc.load(r.SCRATCH2.value, loc_object.value, offset) + # read the vtable's subclassrange_min field + assert _check_imm_arg(offset2) + self.mc.ld(r.SCRATCH2.value, r.SCRATCH2.value, offset2) + else: + # read the typeid + self._read_typeid(r.SCRATCH, loc_object) + # read the vtable's subclassrange_min field, as a single + # step with the correct offset + base_type_info, shift_by, sizeof_ti = ( + self.cpu.gc_ll_descr.get_translated_info_for_typeinfo()) + self.mc.load_imm(r.SCRATCH2, base_type_info + sizeof_ti + offset2) + assert shift_by == 0 # on PPC64; fixme for PPC32 + self.mc.ldx(r.SCRATCH2.value, r.SCRATCH2.value, r.SCRATCH.value) + # get the two bounds to check against + vtable_ptr = loc_check_against_class.getint() + vtable_ptr = rffi.cast(rclass.CLASSTYPE, vtable_ptr) + check_min = vtable_ptr.subclassrange_min + check_max = vtable_ptr.subclassrange_max + assert check_max > check_min + check_diff = check_max - check_min - 1 + # right now, a full PyPy uses less than 6000 numbers, + # so we'll assert here that it always fit inside 15 bits + assert 0 <= check_min <= 0x7fff + assert 0 <= check_diff <= 0xffff + # check by doing the unsigned comparison (tmp - min) < (max - min) + self.mc.subi(r.SCRATCH2.value, r.SCRATCH2.value, check_min) + self.mc.cmp_op(0, r.SCRATCH2.value, check_diff, imm=True, signed=False) + # the guard passes if we get a result of "below or equal" + self.guard_success_cc = c.LE + self._emit_guard(op, arglocs[2:]) def emit_guard_not_invalidated(self, op, arglocs, regalloc): self._emit_guard(op, arglocs, is_guard_not_invalidated=True) @@ -433,17 +512,20 @@ assert my_nbargs == target_nbargs if descr in self.target_tokens_currently_compiling: - self.mc.b_offset(descr._ppc_loop_code) + self.mc.b_offset(descr._ll_loop_code) else: - self.mc.b_abs(descr._ppc_loop_code) + self.mc.b_abs(descr._ll_loop_code) - def emit_same_as(self, op, arglocs, regalloc): + def _genop_same_as(self, op, arglocs, regalloc): argloc, resloc = arglocs if argloc is not resloc: self.regalloc_mov(argloc, resloc) - emit_cast_ptr_to_int = emit_same_as - emit_cast_int_to_ptr = emit_same_as + emit_same_as_i = _genop_same_as + emit_same_as_r = _genop_same_as + emit_same_as_f = _genop_same_as + emit_cast_ptr_to_int = _genop_same_as + emit_cast_int_to_ptr = _genop_same_as def emit_guard_no_exception(self, op, arglocs, regalloc): self.mc.load_from_addr(r.SCRATCH2, self.cpu.pos_exception()) @@ -504,20 +586,35 @@ else: cb.emit() - def emit_call(self, op, arglocs, regalloc): + def _genop_call(self, op, arglocs, regalloc): oopspecindex = regalloc.get_oopspecindex(op) if oopspecindex == EffectInfo.OS_MATH_SQRT: return self._emit_math_sqrt(op, arglocs, regalloc) self._emit_call(op, arglocs) - def emit_call_may_force(self, op, arglocs, regalloc): + emit_call_i = _genop_call + emit_call_r = _genop_call + emit_call_f = _genop_call + emit_call_n = _genop_call + + def _genop_call_may_force(self, op, arglocs, regalloc): self._store_force_index(self._find_nearby_operation(regalloc, +1)) self._emit_call(op, arglocs) - def emit_call_release_gil(self, op, arglocs, regalloc): + emit_call_may_force_i = _genop_call_may_force + emit_call_may_force_r = _genop_call_may_force + emit_call_may_force_f = _genop_call_may_force + emit_call_may_force_n = _genop_call_may_force + + def _genop_call_release_gil(self, op, arglocs, regalloc): self._store_force_index(self._find_nearby_operation(regalloc, +1)) self._emit_call(op, arglocs, is_call_release_gil=True) + emit_call_release_gil_i = _genop_call_release_gil + emit_call_release_gil_r = _genop_call_release_gil + emit_call_release_gil_f = _genop_call_release_gil + emit_call_release_gil_n = _genop_call_release_gil + def _store_force_index(self, guard_op): assert (guard_op.getopnum() == rop.GUARD_NOT_FORCED or guard_op.getopnum() == rop.GUARD_NOT_FORCED_2) @@ -667,13 +764,20 @@ else: assert 0, "size not supported" - def emit_getfield_gc(self, op, arglocs, regalloc): + def _genop_getfield(self, op, arglocs, regalloc): base_loc, ofs, res, size, sign = arglocs self._load_from_mem(res, base_loc, ofs, size, sign) - emit_getfield_raw = emit_getfield_gc - emit_getfield_raw_pure = emit_getfield_gc - emit_getfield_gc_pure = emit_getfield_gc + emit_getfield_gc_i = _genop_getfield + emit_getfield_gc_r = _genop_getfield + emit_getfield_gc_f = _genop_getfield + emit_getfield_gc_pure_i = _genop_getfield + emit_getfield_gc_pure_r = _genop_getfield + emit_getfield_gc_pure_f = _genop_getfield + emit_getfield_raw_i = _genop_getfield + emit_getfield_raw_f = _genop_getfield + emit_getfield_raw_pure_i = _genop_getfield + emit_getfield_raw_pure_f = _genop_getfield SIZE2SCALE = dict([(1<<_i, _i) for _i in range(32)]) @@ -729,13 +833,15 @@ index_loc = r.SCRATCH2 return index_loc - def emit_getinteriorfield_gc(self, op, arglocs, regalloc): + def _genop_getarray_or_interiorfield(self, op, arglocs, regalloc): (base_loc, index_loc, res_loc, ofs_loc, itemsize, fieldsize, fieldsign) = arglocs ofs_loc = self._apply_scale(ofs_loc, index_loc, itemsize) self._load_from_mem(res_loc, base_loc, ofs_loc, fieldsize, fieldsign) - emit_getinteriorfield_raw = emit_getinteriorfield_gc + emit_getinteriorfield_gc_i = _genop_getarray_or_interiorfield + emit_getinteriorfield_gc_r = _genop_getarray_or_interiorfield + emit_getinteriorfield_gc_f = _genop_getarray_or_interiorfield def emit_setinteriorfield_gc(self, op, arglocs, regalloc): (base_loc, index_loc, value_loc, ofs_loc, @@ -752,12 +858,20 @@ emit_setarrayitem_gc = emit_setinteriorfield_gc emit_setarrayitem_raw = emit_setarrayitem_gc - emit_getarrayitem_gc = emit_getinteriorfield_gc - emit_getarrayitem_raw = emit_getarrayitem_gc - emit_getarrayitem_gc_pure = emit_getarrayitem_gc + emit_getarrayitem_gc_i = _genop_getarray_or_interiorfield + emit_getarrayitem_gc_r = _genop_getarray_or_interiorfield + emit_getarrayitem_gc_f = _genop_getarray_or_interiorfield + emit_getarrayitem_gc_pure_i = _genop_getarray_or_interiorfield + emit_getarrayitem_gc_pure_r = _genop_getarray_or_interiorfield + emit_getarrayitem_gc_pure_f = _genop_getarray_or_interiorfield + emit_getarrayitem_raw_i = _genop_getarray_or_interiorfield + emit_getarrayitem_raw_f = _genop_getarray_or_interiorfield + emit_getarrayitem_raw_pure_i = _genop_getarray_or_interiorfield + emit_getarrayitem_raw_pure_f = _genop_getarray_or_interiorfield emit_raw_store = emit_setarrayitem_gc - emit_raw_load = emit_getarrayitem_gc + emit_raw_load_i = _genop_getarray_or_interiorfield + emit_raw_load_f = _genop_getarray_or_interiorfield def _copy_in_scratch2(self, loc): if loc.is_imm(): @@ -862,8 +976,8 @@ _mixin_ = True - emit_strlen = FieldOpAssembler.emit_getfield_gc - emit_strgetitem = FieldOpAssembler.emit_getarrayitem_gc + emit_strlen = FieldOpAssembler._genop_getfield + emit_strgetitem = FieldOpAssembler._genop_getarray_or_interiorfield emit_strsetitem = FieldOpAssembler.emit_setarrayitem_gc def emit_copystrcontent(self, op, arglocs, regalloc): @@ -926,8 +1040,8 @@ _mixin_ = True - emit_unicodelen = FieldOpAssembler.emit_getfield_gc - emit_unicodegetitem = FieldOpAssembler.emit_getarrayitem_gc + emit_unicodelen = FieldOpAssembler._genop_getfield + emit_unicodegetitem = FieldOpAssembler._genop_getarray_or_interiorfield emit_unicodesetitem = FieldOpAssembler.emit_setarrayitem_gc @@ -936,7 +1050,7 @@ _mixin_ = True def emit_call_malloc_gc(self, op, arglocs, regalloc): - self.emit_call(op, arglocs, regalloc) + self._emit_call(op, arglocs) self.propagate_memoryerror_if_r3_is_null() def emit_call_malloc_nursery(self, op, arglocs, regalloc): @@ -1130,16 +1244,21 @@ res_loc = arglocs[0] self.mc.mr(res_loc.value, r.SPP.value) - def emit_call_assembler(self, op, arglocs, regalloc): + def _genop_call_assembler(self, op, arglocs, regalloc): if len(arglocs) == 3: [result_loc, argloc, vloc] = arglocs else: [result_loc, argloc] = arglocs vloc = imm(0) self._store_force_index(self._find_nearby_operation(regalloc, +1)) - # 'result_loc' is either r3 or f1 + # 'result_loc' is either r3 or f1, or None self.call_assembler(op, argloc, vloc, result_loc, r.r3) + emit_call_assembler_i = _genop_call_assembler + emit_call_assembler_r = _genop_call_assembler + emit_call_assembler_f = _genop_call_assembler + emit_call_assembler_n = _genop_call_assembler + imm = staticmethod(imm) # for call_assembler() def _call_assembler_emit_call(self, addr, argloc, _): @@ -1177,9 +1296,9 @@ return jump_to_done def _call_assembler_load_result(self, op, result_loc): - if op.result is not None: + if op.type != VOID: # load the return value from the dead frame's value index 0 - kind = op.result.type + kind = op.type descr = self.cpu.getarraydescr_for_frame(kind) ofs = self.cpu.unpack_arraydescr(descr) if kind == FLOAT: @@ -1202,6 +1321,10 @@ assert old_nbargs == new_nbargs oldadr = oldlooptoken._ll_function_addr target = newlooptoken._ll_function_addr + # copy frame-info data + baseofs = self.cpu.get_baseofs_of_frame_field() + newlooptoken.compiled_loop_token.update_frame_info( + oldlooptoken.compiled_loop_token, baseofs) if IS_PPC_32 or not IS_BIG_ENDIAN: # we overwrite the instructions at the old _ll_function_addr # to start with a JMP to the new _ll_function_addr. diff --git a/rpython/jit/backend/ppc/ppc_assembler.py b/rpython/jit/backend/ppc/ppc_assembler.py --- a/rpython/jit/backend/ppc/ppc_assembler.py +++ b/rpython/jit/backend/ppc/ppc_assembler.py @@ -15,7 +15,6 @@ import rpython.jit.backend.ppc.condition as c from rpython.jit.backend.ppc.register import JITFRAME_FIXED_SIZE from rpython.jit.metainterp.history import AbstractFailDescr -from rpython.jit.metainterp.history import ConstInt, BoxInt from rpython.jit.backend.llsupport import jitframe, rewrite from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from rpython.jit.backend.llsupport.assembler import (DEBUG_COUNTER, debug_bridge, @@ -770,7 +769,7 @@ if IS_PPC_64 and IS_BIG_ENDIAN: # fix the function descriptor (3 words) rffi.cast(rffi.LONGP, rawstart)[0] = rawstart + 3 * WORD # - looptoken._ppc_loop_code = looppos + rawstart + looptoken._ll_loop_code = looppos + rawstart debug_start("jit-backend-addr") debug_print("Loop %d (%s) has address 0x%x to 0x%x (bootstrap 0x%x)" % ( looptoken.number, loopname, @@ -870,7 +869,7 @@ def fixup_target_tokens(self, rawstart): for targettoken in self.target_tokens_currently_compiling: - targettoken._ppc_loop_code += rawstart + targettoken._ll_loop_code += rawstart self.target_tokens_currently_compiling = None def target_arglocs(self, looptoken): diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -1,5 +1,5 @@ from rpython.jit.backend.llsupport.regalloc import (RegisterManager, FrameManager, - TempBox, compute_vars_longevity, + TempVar, compute_vars_longevity, BaseRegalloc) from rpython.jit.backend.ppc.arch import (WORD, MY_COPY_OF_REGS, IS_PPC_32) from rpython.jit.codewriter import longlong @@ -9,8 +9,7 @@ from rpython.jit.backend.ppc.helper.regalloc import _check_imm_arg, check_imm_box from rpython.jit.backend.ppc.helper import regalloc as helper from rpython.jit.metainterp.history import (Const, ConstInt, ConstFloat, ConstPtr, - Box, BoxPtr, - INT, REF, FLOAT) + INT, REF, FLOAT, VOID) from rpython.jit.metainterp.history import JitCellToken, TargetToken from rpython.jit.metainterp.resoperation import rop from rpython.jit.backend.ppc import locations @@ -32,23 +31,20 @@ LIMIT_LOOP_BREAK = 15000 # should be much smaller than 32 KB -# xxx hack: set a default value for TargetToken._arm_loop_code. If 0, we know -# that it is a LABEL that was not compiled yet. -TargetToken._ppc_loop_code = 0 -class TempInt(TempBox): +class TempInt(TempVar): type = INT def __repr__(self): return "" % (id(self),) -class TempPtr(TempBox): +class TempPtr(TempVar): type = REF def __repr__(self): return "" % (id(self),) -class TempFloat(TempBox): +class TempFloat(TempVar): type = FLOAT def __repr__(self): @@ -163,7 +159,7 @@ return loc def get_scratch_reg(self): - box = TempBox() + box = TempVar() reg = self.force_allocate_reg(box, forbidden_vars=self.temp_boxes) self.temp_boxes.append(box) return reg @@ -320,7 +316,7 @@ self.assembler.mc.mark_op(op) self.rm.position = i self.fprm.position = i - if op.has_no_side_effect() and op.result not in self.longevity: + if op.has_no_side_effect() and op not in self.longevity: i += 1 self.possibly_free_vars_for_op(op) continue @@ -333,13 +329,13 @@ self.fprm.temp_boxes.append(box) # opnum = op.getopnum() - if not we_are_translated() and opnum == -124: + if not we_are_translated() and opnum == -127: self._consider_force_spill(op) else: arglocs = oplist[opnum](self, op) asm_operations[opnum](self.assembler, op, arglocs, self) self.free_op_vars() - self.possibly_free_var(op.result) + self.possibly_free_var(op) self.rm._check_invariants() self.fprm._check_invariants() if self.assembler.mc.get_relative_pos() > self.limit_loop_break: @@ -462,6 +458,7 @@ prepare_int_add = helper.prepare_int_add_or_mul prepare_int_sub = helper.prepare_int_sub prepare_int_mul = helper.prepare_int_add_or_mul + prepare_nursery_ptr_increment = prepare_int_add prepare_int_floordiv = helper.prepare_binary_op prepare_int_mod = helper.prepare_binary_op @@ -521,29 +518,29 @@ def _prepare_math_sqrt(self, op): loc = self.ensure_reg(op.getarg(1)) self.free_op_vars() - res = self.fprm.force_allocate_reg(op.result) + res = self.fprm.force_allocate_reg(op) return [loc, res] def prepare_cast_float_to_int(self, op): loc1 = self.ensure_reg(op.getarg(0)) self.free_op_vars() temp_loc = self.get_scratch_reg(FLOAT) - res = self.rm.force_allocate_reg(op.result) + res = self.rm.force_allocate_reg(op) return [loc1, temp_loc, res] def prepare_cast_int_to_float(self, op): loc1 = self.ensure_reg(op.getarg(0)) - res = self.fprm.force_allocate_reg(op.result) + res = self.fprm.force_allocate_reg(op) return [loc1, res] def prepare_convert_float_bytes_to_longlong(self, op): loc1 = self.ensure_reg(op.getarg(0)) - res = self.rm.force_allocate_reg(op.result) + res = self.rm.force_allocate_reg(op) return [loc1, res] def prepare_convert_longlong_bytes_to_float(self, op): loc1 = self.ensure_reg(op.getarg(0)) - res = self.fprm.force_allocate_reg(op.result) + res = self.fprm.force_allocate_reg(op) return [loc1, res] def prepare_finish(self, op): @@ -602,8 +599,8 @@ def prepare_guard_exception(self, op): loc = self.ensure_reg(op.getarg(0)) - if op.result in self.longevity: - resloc = self.force_allocate_reg(op.result) + if op in self.longevity: + resloc = self.force_allocate_reg(op) else: resloc = None arglocs = self._prepare_guard(op, [loc, resloc]) @@ -626,41 +623,17 @@ def prepare_guard_class(self, op): x = self.ensure_reg(op.getarg(0)) y_val = force_int(op.getarg(1).getint()) - - arglocs = [x, None, None] - - offset = self.cpu.vtable_offset - if offset is not None: - y = r.SCRATCH2 - self.assembler.mc.load_imm(y, y_val) - - assert _check_imm_arg(offset) - offset_loc = imm(offset) - - arglocs[1] = y - arglocs[2] = offset_loc - - else: - # XXX hard-coded assumption: to go from an object to its class - # we use the following algorithm: - # - read the typeid from mem(locs[0]), i.e. at offset 0 - # - keep the lower half-word read there - # - multiply by 4 (on 32-bits only) and use it as an - # offset in type_info_group - # - add 16/32 bytes, to go past the TYPE_INFO structure - classptr = y_val - from rpython.memory.gctypelayout import GCData - sizeof_ti = rffi.sizeof(GCData.TYPE_INFO) - type_info_group = llop.gc_get_type_info_group(llmemory.Address) - type_info_group = rffi.cast(lltype.Signed, type_info_group) - expected_typeid = classptr - sizeof_ti - type_info_group - if IS_PPC_32: - expected_typeid >>= 2 - arglocs[1] = self.ensure_reg_or_16bit_imm(ConstInt(expected_typeid)) - - return self._prepare_guard(op, arglocs) + arglocs = self._prepare_guard(op, [x, imm(y_val)]) + return arglocs prepare_guard_nonnull_class = prepare_guard_class + prepare_guard_gc_type = prepare_guard_class + prepare_guard_subclass = prepare_guard_class + + def prepare_guard_is_object(self, op): + loc_object = self.ensure_reg(op.getarg(0)) + arglocs = self._prepare_guard(op, [loc_object]) + return arglocs def compute_hint_frame_locations(self, operations): # optimization only: fill in the 'hint_frame_locations' dictionary @@ -672,7 +645,7 @@ self.final_jump_op = op descr = op.getdescr() assert isinstance(descr, TargetToken) - if descr._ppc_loop_code != 0: + if descr._ll_loop_code != 0: # if the target LABEL was already compiled, i.e. if it belongs # to some already-compiled piece of code self._compute_hint_frame_locations_from_descr(descr) @@ -688,7 +661,7 @@ assert len(arglocs) == jump_op.numargs() for i in range(jump_op.numargs()): box = jump_op.getarg(i) - if isinstance(box, Box): + if not isinstance(box, Const): loc = arglocs[i] if loc is not None and loc.is_stack(): self.fm.hint_frame_pos[box] = self.fm.get_loc_index(loc) @@ -735,35 +708,44 @@ prepare_setfield_raw = prepare_setfield_gc - def prepare_getfield_gc(self, op): + def _prepare_getfield(self, op): ofs, size, sign = unpack_fielddescr(op.getdescr()) base_loc = self.ensure_reg(op.getarg(0)) ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) self.free_op_vars() - res = self.force_allocate_reg(op.result) + res = self.force_allocate_reg(op) return [base_loc, ofs_loc, res, imm(size), imm(sign)] - prepare_getfield_raw = prepare_getfield_gc - prepare_getfield_raw_pure = prepare_getfield_gc - prepare_getfield_gc_pure = prepare_getfield_gc + prepare_getfield_gc_i = _prepare_getfield + prepare_getfield_gc_r = _prepare_getfield + prepare_getfield_gc_f = _prepare_getfield + prepare_getfield_raw_i = _prepare_getfield + prepare_getfield_raw_f = _prepare_getfield + prepare_getfield_raw_pure_i = _prepare_getfield + prepare_getfield_raw_pure_f = _prepare_getfield + prepare_getfield_gc_pure_i = _prepare_getfield + prepare_getfield_gc_pure_r = _prepare_getfield + prepare_getfield_gc_pure_f = _prepare_getfield def prepare_increment_debug_counter(self, op): base_loc = self.ensure_reg(op.getarg(0)) temp_loc = r.SCRATCH2 return [base_loc, temp_loc] - def prepare_getinteriorfield_gc(self, op): + def _prepare_getinteriorfield(self, op): t = unpack_interiorfielddescr(op.getdescr()) ofs, itemsize, fieldsize, sign = t base_loc = self.ensure_reg(op.getarg(0)) index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) self.free_op_vars() - result_loc = self.force_allocate_reg(op.result) + result_loc = self.force_allocate_reg(op) return [base_loc, index_loc, result_loc, ofs_loc, imm(itemsize), imm(fieldsize), imm(sign)] - prepare_getinteriorfield_raw = prepare_getinteriorfield_gc + prepare_getinteriorfield_gc_i = _prepare_getinteriorfield + prepare_getinteriorfield_gc_r = _prepare_getinteriorfield + prepare_getinteriorfield_gc_f = _prepare_getinteriorfield def prepare_setinteriorfield_gc(self, op): t = unpack_interiorfielddescr(op.getdescr()) @@ -784,7 +766,7 @@ assert _check_imm_arg(ofs) base_loc = self.ensure_reg(op.getarg(0)) self.free_op_vars() - res = self.force_allocate_reg(op.result) + res = self.force_allocate_reg(op) return [res, base_loc, imm(ofs)] def prepare_setarrayitem_gc(self, op): @@ -808,36 +790,47 @@ return [base_loc, index_loc, value_loc, ofs_loc, imm(1), imm(size)] - def prepare_getarrayitem_gc(self, op): + def _prepare_getarrayitem(self, op): size, ofs, sign = unpack_arraydescr(op.getdescr()) base_loc = self.ensure_reg(op.getarg(0)) index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) self.free_op_vars() - result_loc = self.force_allocate_reg(op.result) + result_loc = self.force_allocate_reg(op) imm_size = imm(size) return [base_loc, index_loc, result_loc, ofs_loc, imm_size, imm_size, imm(sign)] - prepare_getarrayitem_raw = prepare_getarrayitem_gc - prepare_getarrayitem_gc_pure = prepare_getarrayitem_gc + prepare_getarrayitem_gc_i = _prepare_getarrayitem + prepare_getarrayitem_gc_r = _prepare_getarrayitem + prepare_getarrayitem_gc_f = _prepare_getarrayitem + prepare_getarrayitem_raw_i = _prepare_getarrayitem + prepare_getarrayitem_raw_f = _prepare_getarrayitem + prepare_getarrayitem_raw_pure_i = _prepare_getarrayitem + prepare_getarrayitem_raw_pure_f = _prepare_getarrayitem + prepare_getarrayitem_gc_pure_i = _prepare_getarrayitem + prepare_getarrayitem_gc_pure_r = _prepare_getarrayitem + prepare_getarrayitem_gc_pure_f = _prepare_getarrayitem - def prepare_raw_load(self, op): + def _prepare_raw_load(self, op): size, ofs, sign = unpack_arraydescr(op.getdescr()) base_loc = self.ensure_reg(op.getarg(0)) index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) self.free_op_vars() - result_loc = self.force_allocate_reg(op.result) + result_loc = self.force_allocate_reg(op) return [base_loc, index_loc, result_loc, ofs_loc, imm(1), imm(size), imm(sign)] + prepare_raw_load_i = _prepare_raw_load + prepare_raw_load_f = _prepare_raw_load + def prepare_strlen(self, op): basesize, itemsize, ofs_length = symbolic.get_array_token(rstr.STR, self.cpu.translate_support_code) base_loc = self.ensure_reg(op.getarg(0)) self.free_op_vars() - result_loc = self.force_allocate_reg(op.result) + result_loc = self.force_allocate_reg(op) return [base_loc, imm(ofs_length), result_loc, imm(WORD), imm(0)] def prepare_strgetitem(self, op): @@ -847,7 +840,7 @@ index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) self.free_op_vars() - result_loc = self.force_allocate_reg(op.result) + result_loc = self.force_allocate_reg(op) imm_size = imm(itemsize) return [base_loc, index_loc, result_loc, ofs_loc, imm_size, imm_size, imm(0)] @@ -880,7 +873,7 @@ self.cpu.translate_support_code) base_loc = self.ensure_reg(op.getarg(0)) self.free_op_vars() - result_loc = self.force_allocate_reg(op.result) + result_loc = self.force_allocate_reg(op) return [base_loc, imm(ofs_length), result_loc, imm(WORD), imm(0)] def prepare_unicodegetitem(self, op): @@ -890,7 +883,7 @@ index_loc = self.ensure_reg_or_any_imm(op.getarg(1)) ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(basesize)) self.free_op_vars() - result_loc = self.force_allocate_reg(op.result) + result_loc = self.force_allocate_reg(op) imm_size = imm(itemsize) return [base_loc, index_loc, result_loc, ofs_loc, imm_size, imm_size, imm(0)] @@ -906,9 +899,11 @@ return [base_loc, index_loc, value_loc, ofs_loc, imm_size, imm_size] - prepare_same_as = helper.prepare_unary_op - prepare_cast_ptr_to_int = prepare_same_as - prepare_cast_int_to_ptr = prepare_same_as + prepare_same_as_i = helper.prepare_unary_op + prepare_same_as_r = helper.prepare_unary_op + prepare_same_as_f = helper.prepare_unary_op + prepare_cast_ptr_to_int = helper.prepare_unary_op + prepare_cast_int_to_ptr = helper.prepare_unary_op def get_oopspecindex(self, op): descr = op.getdescr() @@ -918,12 +913,17 @@ return effectinfo.oopspecindex return EffectInfo.OS_NONE - def prepare_call(self, op): + def _prepare_call(self, op): oopspecindex = self.get_oopspecindex(op) if oopspecindex == EffectInfo.OS_MATH_SQRT: return self._prepare_math_sqrt(op) return self._prepare_call(op) + prepare_call_i = _prepare_call + prepare_call_r = _prepare_call + prepare_call_f = _prepare_call + prepare_call_n = _prepare_call + def _spill_before_call(self, save_all_regs=False): # spill variables that need to be saved around calls self.fprm.before_call(save_all_regs=save_all_regs) @@ -939,14 +939,14 @@ for i in range(op.numargs()): args.append(self.loc(op.getarg(i))) self._spill_before_call(save_all_regs) - if op.result: - resloc = self.after_call(op.result) + if op.type != VOID: + resloc = self.after_call(op) args[0] = resloc return args def prepare_call_malloc_nursery(self, op): - self.rm.force_allocate_reg(op.result, selected_reg=r.RES) - self.rm.temp_boxes.append(op.result) + self.rm.force_allocate_reg(op, selected_reg=r.RES) + self.rm.temp_boxes.append(op) tmp_box = TempInt() self.rm.force_allocate_reg(tmp_box, selected_reg=r.RSZ) self.rm.temp_boxes.append(tmp_box) @@ -958,8 +958,8 @@ # (we take care explicitly of conflicts with r.RES or r.RSZ) self.free_op_vars() # the result will be in r.RES - self.rm.force_allocate_reg(op.result, selected_reg=r.RES) - self.rm.temp_boxes.append(op.result) + self.rm.force_allocate_reg(op, selected_reg=r.RES) + self.rm.temp_boxes.append(op) # we need r.RSZ as a temporary tmp_box = TempInt() self.rm.force_allocate_reg(tmp_box, selected_reg=r.RSZ) @@ -968,8 +968,8 @@ def prepare_call_malloc_nursery_varsize(self, op): # the result will be in r.RES - self.rm.force_allocate_reg(op.result, selected_reg=r.RES) - self.rm.temp_boxes.append(op.result) + self.rm.force_allocate_reg(op, selected_reg=r.RES) + self.rm.temp_boxes.append(op) # we need r.RSZ as a temporary tmp_box = TempInt() self.rm.force_allocate_reg(tmp_box, selected_reg=r.RSZ) @@ -1001,7 +1001,7 @@ return arglocs def prepare_force_token(self, op): - res_loc = self.force_allocate_reg(op.result) + res_loc = self.force_allocate_reg(op) return [res_loc] def prepare_label(self, op): @@ -1016,7 +1016,7 @@ # of some guard position = self.rm.position for arg in inputargs: - assert isinstance(arg, Box) + assert not isinstance(arg, Const) if self.last_real_usage.get(arg, -1) <= position: self.force_spill_var(arg) # @@ -1028,7 +1028,7 @@ # for i in range(len(inputargs)): arg = inputargs[i] - assert isinstance(arg, Box) + assert not isinstance(arg, Const) loc = self.loc(arg) assert loc is not r.SPP arglocs[i] = loc @@ -1040,7 +1040,7 @@ self.flush_loop() # descr._ppc_arglocs = arglocs - descr._ppc_loop_code = self.assembler.mc.currpos() + descr._ll_loop_code = self.assembler.mc.currpos() descr._ppc_clt = self.assembler.current_clt self.assembler.target_tokens_currently_compiling[descr] = None self.possibly_free_vars_for_op(op) @@ -1053,17 +1053,33 @@ if jump_op is not None and jump_op.getdescr() is descr: self._compute_hint_frame_locations_from_descr(descr) - def prepare_call_may_force(self, op): + def _prepare_call_may_force(self, op): return self._prepare_call(op, save_all_regs=True) - prepare_call_release_gil = prepare_call_may_force + prepare_call_may_force_i = _prepare_call_may_force + prepare_call_may_force_r = _prepare_call_may_force + prepare_call_may_force_f = _prepare_call_may_force + prepare_call_may_force_n = _prepare_call_may_force - def prepare_call_assembler(self, op): + prepare_call_release_gil_i = _prepare_call_may_force + prepare_call_release_gil_r = _prepare_call_may_force + prepare_call_release_gil_f = _prepare_call_may_force + prepare_call_release_gil_n = _prepare_call_may_force + + def _prepare_call_assembler(self, op): locs = self.locs_for_call_assembler(op) self._spill_before_call(save_all_regs=True) - resloc = self.after_call(op.result) + if op.type != VOID: + resloc = self.after_call(op) + else: + resloc = None return [resloc] + locs + prepare_call_assembler_i = _prepare_call_assembler + prepare_call_assembler_r = _prepare_call_assembler + prepare_call_assembler_f = _prepare_call_assembler + prepare_call_assembler_n = _prepare_call_assembler + def prepare_force_spill(self, op): self.force_spill_var(op.getarg(0)) return [] diff --git a/rpython/jit/backend/ppc/runner.py b/rpython/jit/backend/ppc/runner.py --- a/rpython/jit/backend/ppc/runner.py +++ b/rpython/jit/backend/ppc/runner.py @@ -84,3 +84,9 @@ def get_all_loop_runs(self): # not implemented return lltype.malloc(LOOP_RUN_CONTAINER, 0) + + def build_regalloc(self): + ''' for tests''' + from rpython.jit.backend.ppc.regalloc import Regalloc + assert self.assembler is not None + return Regalloc(self.assembler) diff --git a/rpython/jit/backend/ppc/test/support.py b/rpython/jit/backend/ppc/test/support.py --- a/rpython/jit/backend/ppc/test/support.py +++ b/rpython/jit/backend/ppc/test/support.py @@ -4,6 +4,9 @@ class JitPPCMixin(support.LLJitMixin): type_system = 'lltype' CPUClass = getcpuclass() + # we have to disable unroll + enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap" + basic = False def check_jumps(self, maxcount): pass diff --git a/rpython/jit/backend/x86/test/test_recursive.py b/rpython/jit/backend/ppc/test/test_recursive.py copy from rpython/jit/backend/x86/test/test_recursive.py copy to rpython/jit/backend/ppc/test/test_recursive.py --- a/rpython/jit/backend/x86/test/test_recursive.py +++ b/rpython/jit/backend/ppc/test/test_recursive.py @@ -1,30 +1,8 @@ from rpython.jit.metainterp.test.test_recursive import RecursiveTests -from rpython.jit.backend.x86.test.test_basic import Jit386Mixin -from rpython.jit.backend.llsupport.codemap import unpack_traceback -from rpython.jit.backend.x86.arch import WORD +from rpython.jit.backend.ppc.test.support import JitPPCMixin -class TestRecursive(Jit386Mixin, RecursiveTests): +class TestRecursive(JitPPCMixin, RecursiveTests): # for the individual tests see # ====> ../../../metainterp/test/test_recursive.py - def check_get_unique_id(self, codemaps): - if WORD == 4: - return # this is 64 bit only check - - assert len(codemaps) == 3 - # we want to create a map of differences, so unpacking the tracebacks - # byte by byte - codemaps.sort(lambda a, b: cmp(a[1], b[1])) - # biggest is the big loop, smallest is the bridge - def get_ranges(c): - ranges = [] - prev_traceback = None - for b in range(c[0], c[0] + c[1]): - tb = unpack_traceback(b) - if tb != prev_traceback: - ranges.append(tb) - prev_traceback = tb - return ranges - assert get_ranges(codemaps[2]) == [[4], [4, 2], [4]] - assert get_ranges(codemaps[1]) == [[2]] - assert get_ranges(codemaps[0]) == [[2], []] + pass diff --git a/rpython/jit/backend/ppc/test/test_regalloc_3.py b/rpython/jit/backend/ppc/test/test_regalloc_3.py --- a/rpython/jit/backend/ppc/test/test_regalloc_3.py +++ b/rpython/jit/backend/ppc/test/test_regalloc_3.py @@ -1,7 +1,5 @@ import py -from rpython.jit.metainterp.history import ResOperation, BoxInt, ConstInt,\ - BoxPtr, ConstPtr, BasicFailDescr, JitCellToken -from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.history import JitCellToken from rpython.jit.backend.detect_cpu import getcpuclass from rpython.jit.backend.ppc.arch import WORD from rpython.jit.tool.oparser import parse @@ -79,7 +77,7 @@ i38 = uint_gt(i33, -11) i39 = int_neg(i7) i40 = int_gt(i24, i32) - i99 = same_as(0) + i99 = same_as_i(0) guard_true(i99) [i40, i36, i37, i31, i16, i34, i35, i23, i22, i29, i14, i39, i30, i38] finish(42) ''') @@ -136,7 +134,7 @@ i38 = int_gt(i4, i11) i39 = int_lt(i27, i22) i40 = int_neg(i27) - i99 = same_as(0) + i99 = same_as_i(0) guard_true(i99) [i40, i10, i36, i26, i13, i30, i21, i33, i18, i25, i31, i32, i28, i29, i35, i38, i20, i39, i34, i23, i37] finish(-42) ''') diff --git a/rpython/jit/backend/ppc/test/test_runner.py b/rpython/jit/backend/ppc/test/test_runner.py --- a/rpython/jit/backend/ppc/test/test_runner.py +++ b/rpython/jit/backend/ppc/test/test_runner.py @@ -4,11 +4,10 @@ from rpython.jit.metainterp.history import (AbstractFailDescr, AbstractDescr, BasicFailDescr, BasicFinalDescr, - BoxInt, Box, BoxPtr, JitCellToken, TargetToken, ConstInt, ConstPtr, - Const, - BoxFloat, ConstFloat) + Const, ConstFloat) +from rpython.jit.metainterp.resoperation import InputArgInt, InputArgFloat from rpython.rtyper.lltypesystem import lltype from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.jit.backend.ppc.arch import IS_PPC_32 @@ -105,51 +104,28 @@ def test_unicodesetitem_really_needs_temploc(self): u_box = self.alloc_unicode(u"abcdsdasdsaddefg") - i0 = BoxInt() - i1 = BoxInt() - i2 = BoxInt() - i3 = BoxInt() - i4 = BoxInt() - i5 = BoxInt() - i6 = BoxInt() - i7 = BoxInt() - i8 = BoxInt() - i9 = BoxInt() - p10 = BoxPtr() - - inputargs = [i0,i1,i2,i3,i4,i5,i6,i7,i8,i9,p10] - looptoken = JitCellToken() targettoken = TargetToken() finaldescr = BasicFinalDescr(1) + loop = parse(''' + [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, p10] + label(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, p10, descr=targettoken) + unicodesetitem(p10, i6, 123) + i11 = int_add(i0, i1) + i12 = int_add(i11, i2) + i13 = int_add(i12, i3) + i14 = int_add(i13, i4) + i15 = int_add(i14, i5) + i16 = int_add(i15, i6) + i17 = int_add(i16, i7) + i18 = int_add(i17, i8) + i19 = int_add(i18, i9) + finish(i19, descr=finaldescr) + ''', namespace={'targettoken': targettoken, + 'finaldescr': finaldescr}) - i11 = BoxInt() - i12 = BoxInt() - i13 = BoxInt() - i14 = BoxInt() - i15 = BoxInt() - i16 = BoxInt() - i17 = BoxInt() - i18 = BoxInt() - i19 = BoxInt() - - operations = [ - ResOperation(rop.LABEL, inputargs, None, descr=targettoken), - ResOperation(rop.UNICODESETITEM, - [p10, i6, ConstInt(123)], None), - ResOperation(rop.INT_ADD, [i0, i1], i11), - ResOperation(rop.INT_ADD, [i11, i2], i12), - ResOperation(rop.INT_ADD, [i12, i3], i13), - ResOperation(rop.INT_ADD, [i13, i4], i14), - ResOperation(rop.INT_ADD, [i14, i5], i15), - ResOperation(rop.INT_ADD, [i15, i6], i16), - ResOperation(rop.INT_ADD, [i16, i7], i17), - ResOperation(rop.INT_ADD, [i17, i8], i18), - ResOperation(rop.INT_ADD, [i18, i9], i19), - ResOperation(rop.FINISH, [i19], None, descr=finaldescr) - ] - + looptoken = JitCellToken() args = [(i + 1) for i in range(10)] + [u_box.getref_base()] - self.cpu.compile_loop(inputargs, operations, looptoken) + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, *args) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 1 @@ -200,27 +176,26 @@ def test_compile_more_than_32k(self): # the guard_true needs a "b.cond" jumping forward more than 32 kb - i0 = BoxInt() - i1 = BoxInt() looptoken = JitCellToken() targettoken = TargetToken() - operations = [ - ResOperation(rop.LABEL, [i0], None, descr=targettoken), - ResOperation(rop.INT_LE, [i0, ConstInt(9)], i1), - ResOperation(rop.GUARD_TRUE, [i1], None, descr=BasicFailDescr(5)), + ops = [ + '[i0]', + 'label(i0, descr=targettoken)', + 'i1 = int_le(i0, 9)', + 'guard_true(i1, descr=faildescr) [i0]', ] - operations[2].setfailargs([i0]) - inputargs = [i0] NUM = 8193 + iprevious = 'i0' for i in range(NUM): - i2 = BoxInt() - operations.append( - ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i2)) - i0 = i2 - operations.append( - ResOperation(rop.JUMP, [i0], None, descr=targettoken)) + inext = 'i%d' % (i + 2,) + ops.append('%s = int_add(%s, 1)' % (inext, iprevious)) + iprevious = inext + ops.append('jump(%s, descr=targettoken)' % (iprevious,)) - self.cpu.compile_loop(inputargs, operations, looptoken) + loop = parse('\n'.join(ops), namespace={'targettoken': targettoken, + 'faildescr': BasicFailDescr(5)}) + + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) deadframe = self.cpu.execute_token(looptoken, -42) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 5 @@ -252,13 +227,13 @@ argboxes = [] for x in argvals: if isinstance(x, float): - argboxes.append(BoxFloat(x)) + argboxes.append(InputArgFloat(x)) else: - argboxes.append(BoxInt(x)) - res = self.execute_operation(rop.CALL, + argboxes.append(InputArgInt(x)) + res = self.execute_operation(rop.CALL_I, [funcbox] + argboxes, 'int', descr=calldescr) - assert res.value == -42 + assert res == -42 assert seen == [argvals] def test_subi_range(self): diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1530,7 +1530,3 @@ if we_are_translated(): llop.debug_print(lltype.Void, msg) raise NotImplementedError(msg) - -# xxx hack: set a default value for TargetToken._ll_loop_code. -# If 0, we know that it is a LABEL that was not compiled yet. -TargetToken._ll_loop_code = 0 diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py --- a/rpython/jit/backend/x86/test/test_runner.py +++ b/rpython/jit/backend/x86/test/test_runner.py @@ -39,7 +39,7 @@ 'nop; ' # for the label 'add; test; je; jmp;') # plus some padding bridge_loop_instructions = ( - 'cmp; jge; mov; mov; mov;( mov ;)? call; mov; jmp;') + 'cmp; jge; mov;( movabs;)? mov; mov(abs)?; call; mov(abs)?; jmp;') def get_cpu(self): cpu = CPU(rtyper=None, stats=FakeStats()) diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -438,6 +438,9 @@ self.compiled_loop_token.cpu.dump_loop_token(self) class TargetToken(AbstractDescr): + _ll_loop_code = 0 # for the backend. If 0, we know that it is + # a LABEL that was not compiled yet. + def __init__(self, targeting_jitcell_token=None, original_jitcell_token=None): # Warning, two different jitcell_tokens here! From noreply at buildbot.pypy.org Fri Oct 2 13:14:40 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Fri, 2 Oct 2015 13:14:40 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: same operation for iter states failed if index and _indices where not the same (fixed) Message-ID: <20151002111440.E97A91C0F47@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r79929:232d93b7d261 Date: 2015-10-02 13:10 +0200 http://bitbucket.org/pypy/pypy/changeset/232d93b7d261/ Log: same operation for iter states failed if index and _indices where not the same (fixed) concrete type of a loaded singlefloat was f, but singlefloat_to_float demands i as parameter diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -84,7 +84,9 @@ self.offset = offset def same(self, other): - if self.offset == other.offset: + if self.offset == other.offset and \ + self.index == other.index and \ + self._indices == other._indices: return self.iterator.same_shape(other.iterator) return False @@ -119,9 +121,9 @@ self.factors = factors def same_shape(self, other): - """ if two iterators share the same shape, - next() only needs to be called on one! - """ + """ Iterating over the same element """ + if not self.contiguous or not other.contiguous: + return False return (self.contiguous == other.contiguous and self.array.dtype is self.array.dtype and self.shape_m1 == other.shape_m1 and diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -895,7 +895,8 @@ a = [[1, 2, 3, 4], [3, 4, 5, 6], [5, 6, 7, 8], [7, 8, 9, 10], [9, 10, 11, 12], [11, 12, 13, 14], [13, 14, 15, 16], [16, 17, 18, 19]] b = a -> ::2 c = b + b - c -> 1 -> 1 + d = c -> 1 + d -> 1 """ def test_multidim_slice(self): @@ -904,7 +905,7 @@ self.check_trace_count(3) # ::2 creates a view object -> needs an inner loop # that iterates continous chunks of the matrix - self.check_vectorized(1,1) + self.check_vectorized(1,0) def define_dot_matrix(): return """ @@ -930,7 +931,6 @@ """ def test_pow(self): - py.test.skip("Not implemented CDefinedIntSymbolic('RPY_TLOFS_rpy_errno')") result = self.run("pow") assert result == 29 ** 2 self.check_trace_count(1) @@ -944,20 +944,6 @@ """ def test_pow_int(self): - py.test.skip("Not implemented CDefinedIntSymbolic('RPY_TLOFS_rpy_errno')") result = self.run("pow_int") assert result == 15 ** 2 self.check_trace_count(4) # extra one for the astype - - - def define_take(): - return """ - a = |10| - b = take(a, [1, 1, 3, 2]) - b -> 2 - """ - - def test_take(self): - py.test.skip("key error get item?") - result = self.run("take") - assert result == 3 diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -10,6 +10,7 @@ from rpython.jit.metainterp.jitexc import NotAProfitableLoop from rpython.rlib.objectmodel import specialize, always_inline from rpython.jit.metainterp.jitexc import NotAVectorizeableLoop, NotAProfitableLoop +from rpython.rtyper.lltypesystem.lloperation import llop class SchedulerState(object): @@ -162,6 +163,14 @@ for node in state.graph.nodes: assert node.emitted +def failnbail_transformation(msg): + msg = '%s\n' % msg + if we_are_translated(): + llop.debug_print(lltype.Void, msg) + else: + import pdb; pdb.set_trace() + raise NotImplementedError(msg) + class TypeRestrict(object): ANY_TYPE = '\x00' ANY_SIZE = -1 @@ -191,15 +200,27 @@ def check(self, value): assert value.datatype != '\x00' if self.type != TypeRestrict.ANY_TYPE: - assert self.type == value.datatype + if self.type != value.datatype: + msg = "type mismatch %s != %s" % \ + (self.type, value.datatype) + failnbail_transformation(msg) assert value.bytesize > 0 if not self.any_size(): - assert self.bytesize == value.bytesize + if self.bytesize != value.bytesize: + msg = "bytesize mismatch %s != %s" % \ + (self.bytesize, value.bytesize) + failnbail_transformation(msg) assert value.count > 0 if self.count != TypeRestrict.ANY_COUNT: - assert value.count >= self.count + if value.count < self.count: + msg = "count mismatch %s < %s" % \ + (self.count, value.count) + failnbail_transformation(msg) if self.sign != TypeRestrict.ANY_SIGN: - assert bool(self.sign) == value.sign + if bool(self.sign) == value.sign: + msg = "sign mismatch %s < %s" % \ + (self.sign, value.sign) + failnbail_transformation(msg) def max_input_count(self, count): """ How many """ diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -125,8 +125,8 @@ type = self.type if descr.is_array_of_floats(): type = 'f' - if isinstance(descr, ArrayDescr) and descr.getconcrete_type() == 'f': - type = 'f' + #if isinstance(descr, ArrayDescr) and descr.getconcrete_type() == 'f': + # type = 'f' self.bytesize = descr.get_item_size_in_bytes() self.signed = descr.is_item_signed() self.datatype = type diff --git a/rpython/jit/metainterp/test/test_resoperation.py b/rpython/jit/metainterp/test/test_resoperation.py --- a/rpython/jit/metainterp/test/test_resoperation.py +++ b/rpython/jit/metainterp/test/test_resoperation.py @@ -5,6 +5,7 @@ from rpython.jit.metainterp.history import ConstInt from rpython.jit.backend.llsupport.symbolic import (WORD as INT_WORD, SIZEOF_FLOAT as FLOAT_WORD) +from rpython.jit.backend.llsupport.descr import ArrayDescr def test_arity_mixins(): cases = [ @@ -108,10 +109,22 @@ assert op.cast_to() == kwargs['cast_to'] def test_unpack_1(): - op = rop.ResOperation(rop.rop.VEC_UNPACK_I, [rop.InputArgVector(), ConstInt(0), ConstInt(1)]) - assert (op.type, op.datatype, op.bytesize, op.is_vector()) == ('i', 'i', 8, False) - op = rop.ResOperation(rop.rop.VEC_UNPACK_I, [rop.InputArgVector(), ConstInt(0), ConstInt(2)]) - assert (op.type, op.datatype, op.bytesize, op.is_vector()) == ('i', 'i', 8, True) + op = rop.ResOperation(rop.rop.VEC_UNPACK_I, + [rop.InputArgVector(), ConstInt(0), ConstInt(1)]) + assert (op.type, op.datatype, op.bytesize, op.is_vector()) == \ + ('i', 'i', 8, False) + op = rop.ResOperation(rop.rop.VEC_UNPACK_I, + [rop.InputArgVector(), ConstInt(0), ConstInt(2)]) + assert (op.type, op.datatype, op.bytesize, op.is_vector()) == \ + ('i', 'i', 8, True) + +def test_load_singlefloat(): + descr = ArrayDescr(8,4, None, 'S', concrete_type='f') + op = rop.ResOperation(rop.rop.VEC_RAW_LOAD_I, + [rop.InputArgInt(), ConstInt(0)], + descr=descr) + assert (op.type, op.datatype, op.bytesize, op.is_vector()) == ('i', 'i', 4, True) + def test_types(): op = rop.ResOperation(rop.rop.INT_ADD, [ConstInt(0),ConstInt(1)]) diff --git a/rpython/jit/metainterp/test/test_vector.py b/rpython/jit/metainterp/test/test_vector.py --- a/rpython/jit/metainterp/test/test_vector.py +++ b/rpython/jit/metainterp/test/test_vector.py @@ -12,7 +12,9 @@ from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rlib.rawstorage import (alloc_raw_storage, raw_storage_setitem, free_raw_storage, raw_storage_getitem) +from rpython.rlib.objectmodel import specialize, is_annotation_constant + at specialize.argtype(0,1) def malloc(T,n): return lltype.malloc(T, n, flavor='raw', zero=True) def free(mem): @@ -409,5 +411,106 @@ res = self.meta_interp(f, [128], vec_all=True) assert res == f(128) + + def combinations(types, operators): + import itertools + size = 22 + + class Typ(object): + def __init__(self, type, storecast, loadcast): + self.type = type + self.storecast = storecast + self.loadcast = loadcast + def __repr__(self): + return self.type.replace(".","_") + + sizes = [22] + for t1, t2, t3, op, size in itertools.product(types, types, types, operators, sizes): + yield (size, Typ(*t1), Typ(*t2), Typ(*t3), op[0], op[1]) + types = [('rffi.DOUBLE', 'float', 'float'), + ('rffi.SIGNED', 'int', 'int'), + ('rffi.FLOAT', 'rffi.r_singlefloat', 'float'), + ] + operators = [('add', '+'), + ] + for size, typ1, typ2, typ3, opname, op in combinations(types, operators): + _source = """ + def test_binary_operations_{name}(self): + myjitdriver = JitDriver(greens = [], reds = 'auto') + T1 = lltype.Array({type_a}, hints={{'nolength': True}}) + T2 = lltype.Array({type_b}, hints={{'nolength': True}}) + T3 = lltype.Array({type_c}, hints={{'nolength': True}}) + def f(size): + vector_a = lltype.malloc(T1, size, flavor='raw') + vector_b = lltype.malloc(T2, size, flavor='raw') + vector_c = lltype.malloc(T3, size, flavor='raw') + for i in range(size): + vector_a[i] = {type_a_storecast}(i+1) + for i in range(size): + vector_b[i] = {type_b_storecast}(i+1) + for i in range(size): + vector_c[i] = {type_c_storecast}(i+1) + i = 0 + while i < size: + myjitdriver.jit_merge_point() + a = {type_a_loadcast}(vector_a[i]) + b = {type_b_loadcast}(vector_b[i]) + c = (a {op} b) + vector_c[i] = {type_c_storecast}(c) + i += 1 + lltype.free(vector_a, flavor='raw') + lltype.free(vector_b, flavor='raw') + c = {type_c_loadcast}(0.0) + for i in range(size): + c += {type_c_loadcast}(vector_c[i]) + lltype.free(vector_c, flavor='raw') + return c + res = self.meta_interp(f, [{size}], vec_all=True) + assert res == f({size}) + """ + env = { + 'type_a': typ1.type, + 'type_b': typ2.type, + 'type_c': typ3.type, + 'type_a_loadcast': typ1.loadcast, + 'type_b_loadcast': typ2.loadcast, + 'type_c_loadcast': typ3.loadcast, + 'type_a_storecast': typ1.storecast, + 'type_b_storecast': typ2.storecast, + 'type_c_storecast': typ3.storecast, + 'size': size, + 'name': str(typ1) + '__' + str(typ2) + '__' + str(typ3) + \ + '__' + str(size) + '__' + opname, + 'op': op, + } + formatted = _source.format(**env) + exec py.code.Source(formatted).compile() + + def test_binary_operations_aa(self): + myjitdriver = JitDriver(greens = [], reds = 'auto') + T1 = lltype.Array(rffi.DOUBLE, hints={'nolength': True}) + T3 = lltype.Array(rffi.SIGNED, hints={'nolength': True}) + def f(size): + vector_a = lltype.malloc(T1, size, flavor='raw', zero=True) + vector_b = lltype.malloc(T1, size, flavor='raw', zero=True) + vector_c = lltype.malloc(T3, size, flavor='raw', zero=True) + i = 0 + while i < size: + myjitdriver.jit_merge_point() + a = (vector_a[i]) + b = (vector_b[i]) + c = (a + b) + vector_c[i] = int(c) + i += 1 + free(vector_a) + free(vector_b) + #c = 0.0 + #for i in range(size): + # c += vector_c[i] + lltype.free(vector_c, flavor='raw') + return 0 + res = self.meta_interp(f, [22], vec_all=True) + assert res == f(22) + class TestLLtype(LLJitMixin, VectorizeTests): pass From noreply at buildbot.pypy.org Fri Oct 2 14:53:02 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Fri, 2 Oct 2015 14:53:02 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: store operations did not correctly split packs (wrong size used) and thus did not sign extend correctly some times Message-ID: <20151002125302.D7D7B1C0F47@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r79930:569c929fd2a1 Date: 2015-10-02 14:53 +0200 http://bitbucket.org/pypy/pypy/changeset/569c929fd2a1/ Log: store operations did not correctly split packs (wrong size used) and thus did not sign extend correctly some times diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -844,7 +844,7 @@ def test_where(self): result = self.run("where") assert result == -40 - self.check_vectorized(1, 0) # TODO might be possible to vectorize + self.check_vectorized(1, 1) def define_searchsorted(): return """ diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -235,6 +235,34 @@ def check_operation(self, state, pack, op): pass + def crop_vector(self, op, newsize, size): + return newsize, size + + def must_crop_vector(self, op, index): + restrict = self.argument_restrictions[index] + size = op.getarg(index).bytesize + newsize = self.crop_to_size(op, index) + return not restrict.any_size() and newsize != size + + @always_inline + def crop_to_size(self, op, index): + restrict = self.argument_restrictions[index] + return restrict.bytesize + +class StoreRestrict(OpRestrict): + def __init__(self, argument_restris): + self.argument_restrictions = argument_restris + + def must_crop_vector(self, op, index): + size = op.getarg(index).bytesize + return self.crop_to_size(op, index) != size + + @always_inline + def crop_to_size(self, op, index): + # there is only one parameter that needs to be transformed! + descr = op.getdescr() + return descr.get_item_size_in_bytes() + class OpMatchSizeTypeFirst(OpRestrict): def check_operation(self, state, pack, op): i = 0 @@ -283,9 +311,9 @@ rop.VEC_FLOAT_ABS: OpRestrict([TR_ANY_FLOAT]), rop.VEC_FLOAT_NEG: OpRestrict([TR_ANY_FLOAT]), - rop.VEC_RAW_STORE: OpRestrict([None, None, TR_ANY]), - rop.VEC_SETARRAYITEM_RAW: OpRestrict([None, None, TR_ANY]), - rop.VEC_SETARRAYITEM_GC: OpRestrict([None, None, TR_ANY]), + rop.VEC_RAW_STORE: StoreRestrict([None, None, TR_ANY]), + rop.VEC_SETARRAYITEM_RAW: StoreRestrict([None, None, TR_ANY]), + rop.VEC_SETARRAYITEM_GC: StoreRestrict([None, None, TR_ANY]), rop.GUARD_TRUE: OpRestrict([TR_ANY_INTEGER]), rop.GUARD_FALSE: OpRestrict([TR_ANY_INTEGER]), @@ -361,16 +389,18 @@ # 1) args[i] = vecop # a) assemble_scattered_values(state, pack, args, i) # c) - crop_vector(state, restrict, pack, args, i) # b) + crop_vector(state, oprestrict, restrict, pack, args, i) # b) position_values(state, restrict, pack, args, i, pos) # d) restrict.check(args[i]) @always_inline -def crop_vector(state, restrict, pack, args, i): +def crop_vector(state, oprestrict, restrict, pack, args, i): # convert size i64 -> i32, i32 -> i64, ... arg = args[i] - newsize, size = restrict.bytesize, arg.bytesize - if not restrict.any_size() and newsize != size: + size = arg.bytesize + left = pack.leftmost() + if oprestrict.must_crop_vector(left, i): + newsize = oprestrict.crop_to_size(left, i) assert arg.type == 'i' state._prevent_signext(newsize, size) count = arg.count @@ -713,8 +743,8 @@ op = pack.leftmost() if op.returns_void(): assert op.is_primitive_store() - arg = op.getarg(2) - return vec_reg_size // arg.bytesize + descr = op.getdescr() + return vec_reg_size // descr.get_item_size_in_bytes() if op.is_typecast(): if op.casts_down(): @@ -788,8 +818,9 @@ if left.is_primitive_store(): # make this case more general if it turns out this is # not the only case where packs need to be trashed - indexarg = left.getarg(2) - return indexarg.bytesize * self.numops() - vec_reg_size + descr = left.getdescr() + bytesize = descr.get_item_size_in_bytes() + return bytesize * self.numops() - vec_reg_size return 0 if self.numops() == 0: return -1 diff --git a/rpython/jit/metainterp/optimizeopt/test/test_schedule.py b/rpython/jit/metainterp/optimizeopt/test/test_schedule.py --- a/rpython/jit/metainterp/optimizeopt/test/test_schedule.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_schedule.py @@ -11,16 +11,11 @@ from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin from rpython.jit.metainterp.optimizeopt.test.test_dependency import (DependencyBaseTest) from rpython.jit.metainterp.optimizeopt.test.test_vecopt import (FakeMetaInterpStaticData, - FakeJitDriverStaticData) + FakeJitDriverStaticData, FakePackSet) from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.jit.tool.oparser import parse as opparse from rpython.jit.tool.oparser_model import get_model -class FakePackSet(PackSet): - def __init__(self, packs): - self.packs = packs - self.vec_reg_size = 16 - class FakeVecScheduleState(VecScheduleState): def __init__(self): self.expanded_map = {} diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py @@ -13,17 +13,26 @@ from rpython.jit.metainterp.optimizeopt.dependency import DependencyGraph from rpython.jit.metainterp.optimizeopt.vector import (VectorizingOptimizer, MemoryRef, isomorphic, Pair, NotAVectorizeableLoop, VectorLoop, - NotAProfitableLoop, GuardStrengthenOpt, CostModel, X86_CostModel) + NotAProfitableLoop, GuardStrengthenOpt, CostModel, X86_CostModel, + PackSet) from rpython.jit.metainterp.optimizeopt.schedule import (Scheduler, - SchedulerState, VecScheduleState) + SchedulerState, VecScheduleState, Pack) from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp import compile from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.jit.metainterp.optimizeopt.version import LoopVersionInfo +from rpython.jit.backend.llsupport.descr import ArrayDescr +from rpython.jit.metainterp.optimizeopt.schedule import opcount_filling_vector_register +from rpython.jit.metainterp.optimizeopt.dependency import Node, DependencyGraph class FakeJitDriverStaticData(object): vec=True +class FakePackSet(PackSet): + def __init__(self, packs): + self.packs = packs + self.vec_reg_size = 16 + class FakeLoopInfo(LoopVersionInfo): def __init__(self, loop): self.target_token = loop.label.getdescr() @@ -225,8 +234,24 @@ "operation %s at pos %d has no memory ref!" % \ (node.getoperation(), node.getindex()) +class FakeInput(object): + def __init__(self, type='f', datatype='f', size=8, signed=False): + self.type = type + self.datatype = datatype + self.bytesize = size + self.signed = signed + +def arg(type='f', size=8, signed=False, datatype='f'): + return FakeInput(type, datatype, size, signed) class BaseTestVectorize(VecTestHelper): + def test_opcount_filling(self): + descr = ArrayDescr(0,8, None, 'F', concrete_type='f') + pack = Pack([Node(ResOperation(rop.VEC_RAW_STORE, [0,0,arg('f',4)], descr), 0), + Node(ResOperation(rop.VEC_RAW_STORE, [0,0,arg('f',4)], descr), 0), + ]) + assert opcount_filling_vector_register(pack, 16) == 2 + def test_move_guard_first(self): trace = self.parse_trace(""" i10 = int_add(i0, i1) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -123,10 +123,6 @@ from rpython.jit.backend.llgraph.runner import _getdescr descr = _getdescr(self) type = self.type - if descr.is_array_of_floats(): - type = 'f' - #if isinstance(descr, ArrayDescr) and descr.getconcrete_type() == 'f': - # type = 'f' self.bytesize = descr.get_item_size_in_bytes() self.signed = descr.is_item_signed() self.datatype = type diff --git a/rpython/jit/metainterp/test/test_resoperation.py b/rpython/jit/metainterp/test/test_resoperation.py --- a/rpython/jit/metainterp/test/test_resoperation.py +++ b/rpython/jit/metainterp/test/test_resoperation.py @@ -124,7 +124,14 @@ [rop.InputArgInt(), ConstInt(0)], descr=descr) assert (op.type, op.datatype, op.bytesize, op.is_vector()) == ('i', 'i', 4, True) - + +def test_store(): + descr = ArrayDescr(0,8, None, 'F', concrete_type='f') + vec = rop.InputArgVector() + op = rop.ResOperation(rop.rop.VEC_RAW_STORE, + [rop.InputArgRef(), ConstInt(0), vec], + descr=descr) + assert (op.type, op.datatype, op.bytesize, op.is_vector()) == ('v', 'v', 8, True) def test_types(): op = rop.ResOperation(rop.rop.INT_ADD, [ConstInt(0),ConstInt(1)]) From noreply at buildbot.pypy.org Fri Oct 2 15:27:37 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Fri, 2 Oct 2015 15:27:37 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: removed commented (old code) Message-ID: <20151002132737.EC45B1C13CF@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r79931:4b9eb5c3d265 Date: 2015-10-02 15:03 +0200 http://bitbucket.org/pypy/pypy/changeset/4b9eb5c3d265/ Log: removed commented (old code) diff --git a/rpython/jit/metainterp/optimizeopt/guard.py b/rpython/jit/metainterp/optimizeopt/guard.py --- a/rpython/jit/metainterp/optimizeopt/guard.py +++ b/rpython/jit/metainterp/optimizeopt/guard.py @@ -269,31 +269,8 @@ def emit_operation(self, op): self.renamer.rename(op) - #if op.is_always_pure(): - # self.delay(op) - # return - #self.emit_delayed_for(op) - #if not op.is_always_pure(): self._newoperations.append(op) - # delay the pure ops - #def delay(self, op): - # self.delayed[op] = None - # print "delayed", op - - #def emit_delayed_for(self, op): - # if op.is_inputarg(): - # return - # additional = [] - # if op.is_guard(): - # additional = op.getfailargs() - # for arg in op.getarglist() + additional: - # if arg in self.delayed: - # del self.delayed[arg] - # self.emit_delayed_for(arg) - # self._newoperations.append(op) - - def operation_position(self): return len(self._newoperations) diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -621,10 +621,6 @@ op = node.getoperation() if op.is_guard(): # add accumulation info to the descriptor - # TODO for version in self.loop.versions: - # # this needs to be done for renamed (accum arguments) - # version.renamed_inputargs = [ renamer.rename_map.get(arg,arg) for arg in version.inputargs ] - #self.appendedvar_pos_arg_count = len(sched_data.invariant_vector_vars) failargs = op.getfailargs() descr = op.getdescr() # note: stitching a guard must resemble the order of the label From noreply at buildbot.pypy.org Fri Oct 2 15:27:40 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Fri, 2 Oct 2015 15:27:40 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: translation issue (missing import) Message-ID: <20151002132740.0CF8E1C13CF@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r79932:31753dc0d45d Date: 2015-10-02 15:27 +0200 http://bitbucket.org/pypy/pypy/changeset/31753dc0d45d/ Log: translation issue (missing import) diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -11,6 +11,7 @@ from rpython.rlib.objectmodel import specialize, always_inline from rpython.jit.metainterp.jitexc import NotAVectorizeableLoop, NotAProfitableLoop from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.lltypesystem import lltype class SchedulerState(object): From noreply at buildbot.pypy.org Fri Oct 2 15:39:54 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 2 Oct 2015 15:39:54 +0200 (CEST) Subject: [pypy-commit] pypy default: fix a corner case in a test Message-ID: <20151002133954.D43951C0F47@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r79933:b1465cb3c7e1 Date: 2015-10-02 15:32 +0200 http://bitbucket.org/pypy/pypy/changeset/b1465cb3c7e1/ Log: fix a corner case in a test diff --git a/rpython/rtyper/module/test/test_ll_time.py b/rpython/rtyper/module/test/test_ll_time.py --- a/rpython/rtyper/module/test/test_ll_time.py +++ b/rpython/rtyper/module/test/test_ll_time.py @@ -40,11 +40,11 @@ # we can only subtract two numbers returned by the same function. # Moreover they might have different precisions, but it should # be at least 0.01 seconds, hence the "sleeps". - assert 0.0199 <= t2-t0 <= 9.0 - assert 0.0199 <= t3-t1 <= t4-t0 <= 9.0 - assert 0.0199 <= t4-t2 <= t5-t1 <= t6-t0 <= 9.0 - assert 0.0199 <= t5-t3 <= t6-t2 <= 9.0 - assert 0.0199 <= t6-t4 <= 9.0 + assert 0.0099 <= t2-t0 <= 9.0 + assert 0.0099 <= t3-t1 <= t4-t0 <= 9.0 + assert 0.0099 <= t4-t2 <= t5-t1 <= t6-t0 <= 9.0 + assert 0.0099 <= t5-t3 <= t6-t2 <= 9.0 + assert 0.0099 <= t6-t4 <= 9.0 def test_time_sleep(self): def does_nothing(): From noreply at buildbot.pypy.org Fri Oct 2 15:39:56 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 2 Oct 2015 15:39:56 +0200 (CEST) Subject: [pypy-commit] pypy default: kill useless test Message-ID: <20151002133956.D15C51C0F47@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r79934:bde66c2cf46f Date: 2015-10-02 15:39 +0200 http://bitbucket.org/pypy/pypy/changeset/bde66c2cf46f/ Log: kill useless test diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -213,22 +213,6 @@ self.on_abort() assert l == [('pypyjit', 'ABORT_TOO_LONG', [])] - def test_on_optimize(self): - import pypyjit - l = [] - - def hook(info): - l.append(info.jitdriver_name) - - def optimize_hook(info): - return [] - - pypyjit.set_compile_hook(hook) - pypyjit.set_optimize_hook(optimize_hook) - self.on_optimize() - self.on_compile() - assert l == ['pypyjit'] - def test_creation(self): from pypyjit import ResOperation From noreply at buildbot.pypy.org Fri Oct 2 15:58:47 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 2 Oct 2015 15:58:47 +0200 (CEST) Subject: [pypy-commit] pypy default: make this test not dependant on importing stuff Message-ID: <20151002135847.E20001C0F47@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r79935:40a16dffa99f Date: 2015-10-02 15:51 +0200 http://bitbucket.org/pypy/pypy/changeset/40a16dffa99f/ Log: make this test not dependant on importing stuff diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -28,7 +28,7 @@ def test_struct_unpack(self): def main(n): - import struct + import _struct as struct import array a = array.array('c', struct.pack('i', 42)) i = 0 From noreply at buildbot.pypy.org Fri Oct 2 15:58:50 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 2 Oct 2015 15:58:50 +0200 (CEST) Subject: [pypy-commit] pypy default: fix the test Message-ID: <20151002135850.01DD81C0F47@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r79936:d01d9a0ef18b Date: 2015-10-02 15:58 +0200 http://bitbucket.org/pypy/pypy/changeset/d01d9a0ef18b/ Log: fix the test diff --git a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py --- a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py +++ b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py @@ -76,6 +76,6 @@ assert len(mod_bridges) in (1, 2, 3) # check that counts are reasonable (precise # may change in the future) - assert N - 2000 < sum(l.count for l in fn_with_bridges_loops) < N + 1000 + assert N - 2000 < sum(l.count for l in fn_with_bridges_loops) < N + 1500 From noreply at buildbot.pypy.org Fri Oct 2 17:40:30 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 2 Oct 2015 17:40:30 +0200 (CEST) Subject: [pypy-commit] pypy default: fix the segfault when no stats are present Message-ID: <20151002154030.E2EC11C0F47@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r79937:cfc4705ad692 Date: 2015-10-02 17:40 +0200 http://bitbucket.org/pypy/pypy/changeset/cfc4705ad692/ Log: fix the segfault when no stats are present diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -315,11 +315,12 @@ """ ll_times = jit_hooks.stats_get_loop_run_times(None) w_times = space.newdict() - for i in range(len(ll_times)): - w_key = space.newtuple([space.wrap(ll_times[i].type), - space.wrap(ll_times[i].number)]) - space.setitem(w_times, w_key, - space.wrap(ll_times[i].counter)) + if ll_times: + for i in range(len(ll_times)): + w_key = space.newtuple([space.wrap(ll_times[i].type), + space.wrap(ll_times[i].number)]) + space.setitem(w_times, w_key, + space.wrap(ll_times[i].counter)) w_counters = space.newdict() for i, counter_name in enumerate(Counters.counter_names): v = jit_hooks.stats_get_counter_value(None, i) From noreply at buildbot.pypy.org Sat Oct 3 08:32:23 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Sat, 3 Oct 2015 08:32:23 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: test_micronumpy used old --jit "vectorize" parameter Message-ID: <20151003063223.1AC341C148D@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r79938:2751e3b80936 Date: 2015-10-03 08:32 +0200 http://bitbucket.org/pypy/pypy/changeset/2751e3b80936/ Log: test_micronumpy used old --jit "vectorize" parameter diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -84,8 +84,8 @@ return a.{method}() """.format(method=op, dtype=dtype, count=count, a=a) exec py.code.Source(source).compile() - vlog = self.run(main, [], vectorize=1) - log = self.run(main, [], vectorize=0) + vlog = self.run(main, [], vec=1) + log = self.run(main, [], vec=0) assert log.result == vlog.result assert log.result == result diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py @@ -1366,21 +1366,24 @@ opt = self.vectorize(loop) self.debug_print_operations(loop) - def test_max(self): - # TODO + def test_111(self): trace = """ - [p3, i4, p2, i5, f6, i7, i8] - f9 = raw_load_f(i7, i5, descr=floatarraydescr) - guard_not_invalidated() [p2, f9, f6, i4, i5, p3] - i10 = float_ge(f6, f9) - guard_false(i10) [p2, f9, f6, None, i4, i5, p3] - i12 = float_ne(f6, f6) - guard_false(i12) [p2, f9, f6, None, i4, i5, p3] - i14 = int_add(i4, 1) - i16 = int_add(i5, 8) - i17 = int_ge(i14, i8) - guard_false(i17) [p2, i16, f9, i14, None, None, None, p3] - jump(p3, i14, p2, i16, f9, i7, i8) + [p0, p1, p2, p3, i4, p5, p6, p7, i8, p9, i10, p11] + guard_not_invalidated(descr=) [p1, p0, p2, p3, p5, p6, i4] + i12 = int_lt(i4, i8) + guard_true(i12, descr=) [p1, p0, p2, p3, p5, p6, i8, i4] + i13 = uint_ge(i4, i10) + guard_false(i13, descr=) [p1, p0, i10, i4, p9, p2, p3, p5, p6, None, None] + i15 = getarrayitem_gc_i(p11, i4, descr=arraydescr) + i17 = int_add_ovf(i15, 1) + guard_no_overflow(descr=) [p1, p0, i17, p2, p3, p5, p6, i15, None, i4] + setarrayitem_gc(p11, i4, i17, descr=arraydescr) + i19 = int_add(i4, 1) + i21 = getfield_raw_i(139972894828928, descr=) + i23 = int_lt(i21, 0) + guard_false(i23, descr=) [p1, p0, p2, p3, p5, p6, i19, None, None, None] + i24 = arraylen_gc(p11, descr=arraydescr) + jump(p0, p1, p2, p3, i19, p5, p6, p7, i8, p9, i10, p11) """ loop = self.parse_loop(trace) opt = self.schedule(loop, with_guard_opt=True) From noreply at buildbot.pypy.org Sat Oct 3 09:43:22 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 3 Oct 2015 09:43:22 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix for fed018f3c786: there were reasons for why even __radd__ would Message-ID: <20151003074322.D3BE81C083B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79939:d2674661565e Date: 2015-10-03 09:43 +0200 http://bitbucket.org/pypy/pypy/changeset/d2674661565e/ Log: Fix for fed018f3c786: there were reasons for why even __radd__ would fall back to space.add(). diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -272,11 +272,9 @@ return space.w_NotImplemented return space.call_function(w_meth, w_b) else: - # here, if coerce returns a non-W_Instance object as first - # argument, then give up. The idea is that this strange - # case should already have been handled by the binaryop() - # called from descroperation first. - return space.w_NotImplemented + # fall back to space.xxx() if coerce returns a non-W_Instance + # object as first argument + return getattr(space, objspacename)(w_b, w_a) rbinaryop.func_name = "r" + name return binaryop, rbinaryop @@ -658,7 +656,7 @@ return space.w_NotImplemented return space.call_function(w_func, w_other) else: - return space.w_NotImplemented + return space.pow(w_b, w_a, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__rpow__', False) diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -425,6 +425,14 @@ return 42 assert B() + B() == 42 + def test_coerce_reverse(self): + class CoerceNumber: + def __coerce__(self, other): + assert isinstance(other, int) + return (6, other) + assert 5 + CoerceNumber() == 11 + assert 2 ** CoerceNumber() == 64 + def test_binaryop(self): class A: def __add__(self, other): From noreply at buildbot.pypy.org Sat Oct 3 10:49:19 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Oct 2015 10:49:19 +0200 (CEST) Subject: [pypy-commit] pypy share-guard-info: fix test_zll_stress Message-ID: <20151003084920.0859A1C083B@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: share-guard-info Changeset: r79940:ce2945f49097 Date: 2015-10-03 10:49 +0200 http://bitbucket.org/pypy/pypy/changeset/ce2945f49097/ Log: fix test_zll_stress diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py --- a/rpython/jit/backend/test/test_random.py +++ b/rpython/jit/backend/test/test_random.py @@ -367,6 +367,7 @@ def produce_into(self, builder, r): fail_subset = builder.subset_of_intvars(r) original_intvars = builder.intvars[:] + builder.fakemetainterp.ovf_flag = False super(AbstractOvfOperation, self).produce_into(builder, r) if builder.fakemetainterp.ovf_flag: # overflow detected op = ResOperation(rop.GUARD_OVERFLOW, []) From noreply at buildbot.pypy.org Sat Oct 3 10:53:38 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Oct 2015 10:53:38 +0200 (CEST) Subject: [pypy-commit] pypy share-guard-info: fix test_flatten Message-ID: <20151003085338.BCB801C083B@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: share-guard-info Changeset: r79941:d841a152a652 Date: 2015-10-03 10:50 +0200 http://bitbucket.org/pypy/pypy/changeset/d841a152a652/ Log: fix test_flatten diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -590,7 +590,7 @@ void_return --- L1: - raise $<* struct object { typeptr=... }> + raise $<* struct object> """, transform=True, liveness=True) def test_residual_call_raising(self): From noreply at buildbot.pypy.org Sat Oct 3 10:53:40 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Oct 2015 10:53:40 +0200 (CEST) Subject: [pypy-commit] pypy share-guard-info: fix this test Message-ID: <20151003085340.DA3581C083B@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: share-guard-info Changeset: r79942:5953bf12aaa4 Date: 2015-10-03 10:53 +0200 http://bitbucket.org/pypy/pypy/changeset/5953bf12aaa4/ Log: fix this test diff --git a/rpython/jit/tool/jitoutput.py b/rpython/jit/tool/jitoutput.py --- a/rpython/jit/tool/jitoutput.py +++ b/rpython/jit/tool/jitoutput.py @@ -17,6 +17,7 @@ (('guards',), '^guards:\s+(\d+)$'), (('opt_ops',), '^opt ops:\s+(\d+)$'), (('opt_guards',), '^opt guards:\s+(\d+)$'), + (('opt_guards_shared',), '^opt guards shared:\s+(\d+)$'), (('forcings',), '^forcings:\s+(\d+)$'), (('abort.trace_too_long',), '^abort: trace too long:\s+(\d+)$'), (('abort.compiling',), '^abort: compiling:\s+(\d+)$'), diff --git a/rpython/jit/tool/test/test_jitoutput.py b/rpython/jit/tool/test/test_jitoutput.py --- a/rpython/jit/tool/test/test_jitoutput.py +++ b/rpython/jit/tool/test/test_jitoutput.py @@ -52,6 +52,7 @@ guards: 1 opt ops: 6 opt guards: 1 +opt guards shared: 1 forcings: 1 abort: trace too long: 10 abort: compiling: 11 From noreply at buildbot.pypy.org Sat Oct 3 11:03:23 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Oct 2015 11:03:23 +0200 (CEST) Subject: [pypy-commit] pypy share-guard-info: merge default Message-ID: <20151003090323.551761C0FFE@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: share-guard-info Changeset: r79943:c95aabc0cb96 Date: 2015-10-03 10:55 +0200 http://bitbucket.org/pypy/pypy/changeset/c95aabc0cb96/ Log: merge default diff too long, truncating to 2000 out of 2171 lines diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -609,7 +609,7 @@ def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: - tp = ffi._parser._declarations[key] + tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) try: value = backendlib.load_function(BType, name) @@ -620,7 +620,7 @@ # key = 'variable ' + name if key in ffi._parser._declarations: - tp = ffi._parser._declarations[key] + tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) read_variable = backendlib.read_variable write_variable = backendlib.write_variable @@ -631,12 +631,23 @@ # if not copied_enums: from . import model - for key, tp in ffi._parser._declarations.items(): + error = None + for key, (tp, _) in ffi._parser._declarations.items(): if not isinstance(tp, model.EnumType): continue + try: + tp.check_not_partial() + except Exception as e: + error = e + continue for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + if error is not None: + if name in library.__dict__: + return # ignore error, about a different enum + raise error + for key, val in ffi._parser._int_constants.items(): if key not in library.__dict__: library.__dict__[key] = val diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -192,6 +192,7 @@ if not decl.name: raise api.CDefError("typedef does not declare any name", decl) + quals = 0 if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) and decl.type.type.names[-1] == '__dotdotdot__'): realtype = self._get_unknown_type(decl) @@ -202,8 +203,9 @@ decl.type.type.type.names == ['__dotdotdot__']): realtype = model.unknown_ptr_type(decl.name) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) + realtype, quals = self._get_type_and_quals( + decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype, quals=quals) else: raise api.CDefError("unrecognized construct", decl) except api.FFIError as e: @@ -255,9 +257,9 @@ def _parse_decl(self, decl): node = decl.type if isinstance(node, pycparser.c_ast.FuncDecl): - tp = self._get_type(node, name=decl.name) + tp, quals = self._get_type_and_quals(node, name=decl.name) assert isinstance(tp, model.RawFunctionType) - tp = self._get_type_pointer(tp) + tp = self._get_type_pointer(tp, quals) self._declare('function ' + decl.name, tp) else: if isinstance(node, pycparser.c_ast.Struct): @@ -271,9 +273,10 @@ decl) # if decl.name: - tp = self._get_type(node, partial_length_ok=True) + tp, quals = self._get_type_and_quals(node, + partial_length_ok=True) if tp.is_raw_function: - tp = self._get_type_pointer(tp) + tp = self._get_type_pointer(tp, quals) self._declare('function ' + decl.name, tp) elif (tp.is_integer_type() and hasattr(decl, 'init') and @@ -287,10 +290,10 @@ _r_int_literal.match(decl.init.expr.value)): self._add_integer_constant(decl.name, '-' + decl.init.expr.value) - elif self._is_constant_globalvar(node): - self._declare('constant ' + decl.name, tp) + elif (quals & model.Q_CONST) and not tp.is_array_type: + self._declare('constant ' + decl.name, tp, quals=quals) else: - self._declare('variable ' + decl.name, tp) + self._declare('variable ' + decl.name, tp, quals=quals) def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] @@ -298,40 +301,51 @@ exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type) + tp, quals = self._get_type_and_quals(exprnode.type) + return tp - def _declare(self, name, obj, included=False): + def _declare(self, name, obj, included=False, quals=0): if name in self._declarations: - if self._declarations[name] is obj: + prevobj, prevquals = self._declarations[name] + if prevobj is obj and prevquals == quals: return if not self._override: raise api.FFIError( "multiple declarations of %s (for interactive usage, " "try cdef(xx, override=True))" % (name,)) assert '__dotdotdot__' not in name.split() - self._declarations[name] = obj + self._declarations[name] = (obj, quals) if included: self._included_declarations.add(obj) - def _get_type_pointer(self, type, const=False, declname=None): + def _extract_quals(self, type): + quals = 0 + if isinstance(type, (pycparser.c_ast.TypeDecl, + pycparser.c_ast.PtrDecl)): + if 'const' in type.quals: + quals |= model.Q_CONST + if 'restrict' in type.quals: + quals |= model.Q_RESTRICT + return quals + + def _get_type_pointer(self, type, quals, declname=None): if isinstance(type, model.RawFunctionType): return type.as_function_pointer() if (isinstance(type, model.StructOrUnionOrEnum) and type.name.startswith('$') and type.name[1:].isdigit() and type.forcename is None and declname is not None): - return model.NamedPointerType(type, declname) - if const: - return model.ConstPointerType(type) - return model.PointerType(type) + return model.NamedPointerType(type, declname, quals) + return model.PointerType(type, quals) - def _get_type(self, typenode, name=None, partial_length_ok=False): + def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): - type = self._declarations['typedef ' + typenode.type.names[0]] - return type + tp, quals = self._declarations['typedef ' + typenode.type.names[0]] + quals |= self._extract_quals(typenode) + return tp, quals # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type @@ -340,18 +354,19 @@ else: length = self._parse_constant( typenode.dim, partial_length_ok=partial_length_ok) - tp = self._get_type(typenode.type, + tp, quals = self._get_type_and_quals(typenode.type, partial_length_ok=partial_length_ok) - return model.ArrayType(tp, length) + return model.ArrayType(tp, length), quals # if isinstance(typenode, pycparser.c_ast.PtrDecl): # pointer type - const = (isinstance(typenode.type, pycparser.c_ast.TypeDecl) - and 'const' in typenode.type.quals) - return self._get_type_pointer(self._get_type(typenode.type), const, - declname=name) + itemtype, itemquals = self._get_type_and_quals(typenode.type) + tp = self._get_type_pointer(itemtype, itemquals, declname=name) + quals = self._extract_quals(typenode) + return tp, quals # if isinstance(typenode, pycparser.c_ast.TypeDecl): + quals = self._extract_quals(typenode) type = typenode.type if isinstance(type, pycparser.c_ast.IdentifierType): # assume a primitive type. get it from .names, but reduce @@ -379,35 +394,38 @@ names = newnames + names ident = ' '.join(names) if ident == 'void': - return model.void_type + return model.void_type, quals if ident == '__dotdotdot__': raise api.FFIError(':%d: bad usage of "..."' % typenode.coord.line) - return resolve_common_type(ident) + return resolve_common_type(ident), quals # if isinstance(type, pycparser.c_ast.Struct): # 'struct foobar' - return self._get_struct_union_enum_type('struct', type, name) + tp = self._get_struct_union_enum_type('struct', type, name) + return tp, quals # if isinstance(type, pycparser.c_ast.Union): # 'union foobar' - return self._get_struct_union_enum_type('union', type, name) + tp = self._get_struct_union_enum_type('union', type, name) + return tp, quals # if isinstance(type, pycparser.c_ast.Enum): # 'enum foobar' - return self._get_struct_union_enum_type('enum', type, name) + tp = self._get_struct_union_enum_type('enum', type, name) + return tp, quals # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - return self._parse_function_type(typenode, name) + return self._parse_function_type(typenode, name), 0 # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): return self._get_struct_union_enum_type('struct', typenode, name, - nested=True) + nested=True), 0 if isinstance(typenode, pycparser.c_ast.Union): return self._get_struct_union_enum_type('union', typenode, name, - nested=True) + nested=True), 0 # raise api.FFIError(":%d: bad or unsupported type declaration" % typenode.coord.line) @@ -426,28 +444,21 @@ raise api.CDefError( "%s: a function with only '(...)' as argument" " is not correct C" % (funcname or 'in expression')) - args = [self._as_func_arg(self._get_type(argdeclnode.type)) + args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type)) for argdeclnode in params] if not ellipsis and args == [model.void_type]: args = [] - result = self._get_type(typenode.type) + result, quals = self._get_type_and_quals(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) - def _as_func_arg(self, type): + def _as_func_arg(self, type, quals): if isinstance(type, model.ArrayType): - return model.PointerType(type.item) + return model.PointerType(type.item, quals) elif isinstance(type, model.RawFunctionType): return type.as_function_pointer() else: return type - def _is_constant_globalvar(self, typenode): - if isinstance(typenode, pycparser.c_ast.PtrDecl): - return 'const' in typenode.quals - if isinstance(typenode, pycparser.c_ast.TypeDecl): - return 'const' in typenode.quals - return False - def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): # First, a level of caching on the exact 'type' node of the AST. # This is obscure, but needed because pycparser "unrolls" declarations @@ -486,7 +497,7 @@ else: explicit_name = name key = '%s %s' % (kind, name) - tp = self._declarations.get(key, None) + tp, _ = self._declarations.get(key, (None, None)) # if tp is None: if kind == 'struct': @@ -528,6 +539,7 @@ fldnames = [] fldtypes = [] fldbitsize = [] + fldquals = [] for decl in type.decls: if (isinstance(decl.type, pycparser.c_ast.IdentifierType) and ''.join(decl.type.names) == '__dotdotdot__'): @@ -541,7 +553,8 @@ else: bitsize = self._parse_constant(decl.bitsize) self._partial_length = False - type = self._get_type(decl.type, partial_length_ok=True) + type, fqual = self._get_type_and_quals(decl.type, + partial_length_ok=True) if self._partial_length: self._make_partial(tp, nested) if isinstance(type, model.StructType) and type.partial: @@ -549,9 +562,11 @@ fldnames.append(decl.name or '') fldtypes.append(type) fldbitsize.append(bitsize) + fldquals.append(fqual) tp.fldnames = tuple(fldnames) tp.fldtypes = tuple(fldtypes) tp.fldbitsize = tuple(fldbitsize) + tp.fldquals = tuple(fldquals) if fldbitsize != [-1] * len(fldbitsize): if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" @@ -632,14 +647,12 @@ return tp def include(self, other): - for name, tp in other._declarations.items(): + for name, (tp, quals) in other._declarations.items(): if name.startswith('anonymous $enum_$'): continue # fix for test_anonymous_enum_include kind = name.split(' ', 1)[0] - if kind in ('struct', 'union', 'enum', 'anonymous'): - self._declare(name, tp, included=True) - elif kind == 'typedef': - self._declare(name, tp, included=True) + if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'): + self._declare(name, tp, included=True, quals=quals) for k, v in other._int_constants.items(): self._add_constants(k, v) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -4,11 +4,26 @@ from .lock import allocate_lock +# type qualifiers +Q_CONST = 0x01 +Q_RESTRICT = 0x02 + +def qualify(quals, replace_with): + if quals & Q_CONST: + replace_with = ' const ' + replace_with.lstrip() + if quals & Q_RESTRICT: + # It seems that __restrict is supported by gcc and msvc. + # If you hit some different compiler, add a #define in + # _cffi_include.h for it (and in its copies, documented there) + replace_with = ' __restrict ' + replace_with.lstrip() + return replace_with + + class BaseTypeByIdentity(object): is_array_type = False is_raw_function = False - def get_c_name(self, replace_with='', context='a C file'): + def get_c_name(self, replace_with='', context='a C file', quals=0): result = self.c_name_with_marker assert result.count('&') == 1 # some logic duplication with ffi.getctype()... :-( @@ -18,6 +33,7 @@ replace_with = '(%s)' % replace_with elif not replace_with[0] in '[(': replace_with = ' ' + replace_with + replace_with = qualify(quals, replace_with) result = result.replace('&', replace_with) if '$' in result: from .ffiplatform import VerificationError @@ -225,16 +241,14 @@ class PointerType(BaseType): - _attrs_ = ('totype',) - _base_pattern = " *&" - _base_pattern_array = "(*&)" + _attrs_ = ('totype', 'quals') - def __init__(self, totype): + def __init__(self, totype, quals=0): self.totype = totype + self.quals = quals + extra = qualify(quals, " *&") if totype.is_array_type: - extra = self._base_pattern_array - else: - extra = self._base_pattern + extra = "(%s)" % (extra.lstrip(),) self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra) def build_backend_type(self, ffi, finishlist): @@ -243,10 +257,8 @@ voidp_type = PointerType(void_type) - -class ConstPointerType(PointerType): - _base_pattern = " const *&" - _base_pattern_array = "(const *&)" +def ConstPointerType(totype): + return PointerType(totype, Q_CONST) const_voidp_type = ConstPointerType(void_type) @@ -254,8 +266,8 @@ class NamedPointerType(PointerType): _attrs_ = ('totype', 'name') - def __init__(self, totype, name): - PointerType.__init__(self, totype) + def __init__(self, totype, name, quals=0): + PointerType.__init__(self, totype, quals) self.name = name self.c_name_with_marker = name + '&' @@ -315,11 +327,12 @@ partial = False packed = False - def __init__(self, name, fldnames, fldtypes, fldbitsize): + def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None): self.name = name self.fldnames = fldnames self.fldtypes = fldtypes self.fldbitsize = fldbitsize + self.fldquals = fldquals self.build_c_name_with_marker() def has_anonymous_struct_fields(self): @@ -331,14 +344,17 @@ return False def enumfields(self): - for name, type, bitsize in zip(self.fldnames, self.fldtypes, - self.fldbitsize): + fldquals = self.fldquals + if fldquals is None: + fldquals = (0,) * len(self.fldnames) + for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes, + self.fldbitsize, fldquals): if name == '' and isinstance(type, StructOrUnion): # nested anonymous struct/union for result in type.enumfields(): yield result else: - yield (name, type, bitsize) + yield (name, type, bitsize, quals) def force_flatten(self): # force the struct or union to have a declaration that lists @@ -347,13 +363,16 @@ names = [] types = [] bitsizes = [] - for name, type, bitsize in self.enumfields(): + fldquals = [] + for name, type, bitsize, quals in self.enumfields(): names.append(name) types.append(type) bitsizes.append(bitsize) + fldquals.append(quals) self.fldnames = tuple(names) self.fldtypes = tuple(types) self.fldbitsize = tuple(bitsizes) + self.fldquals = tuple(fldquals) def get_cached_btype(self, ffi, finishlist, can_delay=False): BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist, diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -195,17 +195,15 @@ elif isinstance(tp, model.StructOrUnion): if tp.fldtypes is not None and ( tp not in self.ffi._parser._included_declarations): - for name1, tp1, _ in tp.enumfields(): + for name1, tp1, _, _ in tp.enumfields(): self._do_collect_type(self._field_type(tp, name1, tp1)) else: for _, x in tp._get_items(): self._do_collect_type(x) - def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) - def _generate(self, step_name): - for name, tp in self._get_declarations(): + lst = self.ffi._parser._declarations.items() + for name, (tp, quals) in sorted(lst): kind, realname = name.split(' ', 1) try: method = getattr(self, '_generate_cpy_%s_%s' % (kind, @@ -214,6 +212,7 @@ raise ffiplatform.VerificationError( "not implemented in recompile(): %r" % name) try: + self._current_quals = quals method(tp, realname) except Exception as e: model.attach_exception_info(e, name) @@ -774,7 +773,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): try: if ftype.is_integer_type() or fbitsize >= 0: # accept all integers, but complain on float or double @@ -789,7 +788,8 @@ ftype = ftype.item fname = fname + '[0]' prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -823,7 +823,7 @@ c_fields = [] if reason_for_not_expanding is None: enumfields = list(tp.enumfields()) - for fldname, fldtype, fbitsize in enumfields: + for fldname, fldtype, fbitsize, fqual in enumfields: fldtype = self._field_type(tp, fldname, fldtype) # cname is None for _add_missing_struct_unions() only op = OP_NOOP @@ -879,7 +879,9 @@ # because they don't have any known C name. Check that they are # not partial (we can't complete or verify them!) and emit them # anonymously. - for tp in list(self._struct_unions): + lst = list(self._struct_unions.items()) + lst.sort(key=lambda tp_order: tp_order[1]) + for tp, order in lst: if tp not in self._seen_struct_unions: if tp.partial: raise NotImplementedError("internal inconsistency: %r is " @@ -1004,6 +1006,8 @@ def _enum_ctx(self, tp, cname): type_index = self._typesdict[tp] type_op = CffiOp(OP_ENUM, -1) + if self.target_is_python: + tp.check_not_partial() for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): self._lsts["global"].append( GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op, @@ -1081,7 +1085,8 @@ # if 'tp' were a function type, but that is not possible here. # (If 'tp' is a function _pointer_ type, then casts from "fn_t # **" to "void *" are again no-ops, as far as I can tell.) - prnt('static ' + tp.get_c_name('*_cffi_var_%s(void)' % (name,))) + decl = '*_cffi_var_%s(void)' % (name,) + prnt('static ' + tp.get_c_name(decl, quals=self._current_quals)) prnt('{') prnt(' return %s(%s);' % (ampersand, name)) prnt('}') diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -197,7 +197,10 @@ return library def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst def _generate(self, step_name): for name, tp in self._get_declarations(): @@ -468,7 +471,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double @@ -477,7 +480,8 @@ # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -488,7 +492,7 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) @@ -552,7 +556,7 @@ check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -87,7 +87,10 @@ return library def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst def _generate(self, step_name): for name, tp in self._get_declarations(): @@ -260,7 +263,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double @@ -269,7 +272,8 @@ # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -280,7 +284,7 @@ prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) @@ -342,7 +346,7 @@ check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -39,8 +39,9 @@ "_csv", "cppyy", "_pypyjson" ]) -if (sys.platform.startswith('linux') and os.uname()[4] == 'x86_64' - and sys.maxint > 2**32): # it's not enough that we get x86_64 +if ((sys.platform.startswith('linux') or sys.platform == 'darwin') + and os.uname()[4] == 'x86_64' and sys.maxint > 2**32): + # it's not enough that we get x86_64 working_modules.add('_vmprof') translation_modules = default_modules.copy() diff --git a/pypy/module/__builtin__/app_functional.py b/pypy/module/__builtin__/app_functional.py --- a/pypy/module/__builtin__/app_functional.py +++ b/pypy/module/__builtin__/app_functional.py @@ -5,6 +5,7 @@ from __future__ import with_statement import operator from __pypy__ import resizelist_hint, newlist_hint +from __pypy__ import specialized_zip_2_lists # ____________________________________________________________ @@ -217,11 +218,16 @@ in length to the length of the shortest argument sequence.""" l = len(sequences) if l == 2: + # A very fast path if the two sequences are lists + seq0 = sequences[0] + seq1 = sequences[1] + try: + return specialized_zip_2_lists(seq0, seq1) + except TypeError: + pass # This is functionally the same as the code below, but more # efficient because it unrolls the loops over 'sequences'. # Only for two arguments, which is the most common case. - seq0 = sequences[0] - seq1 = sequences[1] iter0 = iter(seq0) iter1 = iter(seq1) hint = min(100000000, # max 100M diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -253,26 +253,27 @@ def binaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_meth = self.getattr(space, specialname, False) + if isinstance(w_a, W_InstanceObject): + w_meth = w_a.getattr(space, specialname, False) if w_meth is None: return space.w_NotImplemented return space.call_function(w_meth, w_b) else: + # fall back to space.xxx() if coerce returns a non-W_Instance + # object as first argument return getattr(space, objspacename)(w_a, w_b) binaryop.func_name = name def rbinaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None or w_a is self: - w_meth = self.getattr(space, rspecialname, False) + if isinstance(w_a, W_InstanceObject): + w_meth = w_a.getattr(space, rspecialname, False) if w_meth is None: return space.w_NotImplemented - return space.call_function(w_meth, w_other) + return space.call_function(w_meth, w_b) else: + # fall back to space.xxx() if coerce returns a non-W_Instance + # object as first argument return getattr(space, objspacename)(w_b, w_a) rbinaryop.func_name = "r" + name return binaryop, rbinaryop @@ -283,7 +284,7 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - return [None, None] + return [w_self, w_other] return space.fixedview(w_tup, 2) def descr_instance_new(space, w_type, w_class, w_dict=None): @@ -523,13 +524,9 @@ def descr_cmp(self, space, w_other): # do all the work here like CPython w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - else: - if (not isinstance(w_a, W_InstanceObject) and - not isinstance(w_b, W_InstanceObject)): - return space.cmp(w_a, w_b) + if (not isinstance(w_a, W_InstanceObject) and + not isinstance(w_b, W_InstanceObject)): + return space.cmp(w_a, w_b) if isinstance(w_a, W_InstanceObject): w_func = w_a.getattr(space, '__cmp__', False) if w_func is not None: @@ -636,42 +633,36 @@ def descr_pow(self, space, w_other, w_modulo=None): if space.is_none(w_modulo): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) - return space.w_NotImplemented + if isinstance(w_a, W_InstanceObject): + w_func = w_a.getattr(space, '__pow__', False) + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other) else: return space.pow(w_a, w_b, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other, w_modulo) def descr_rpow(self, space, w_other, w_modulo=None): if space.is_none(w_modulo): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) - return space.w_NotImplemented + if isinstance(w_a, W_InstanceObject): + w_func = w_a.getattr(space, '__rpow__', False) + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other) else: return space.pow(w_b, w_a, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other, w_modulo) def descr_next(self, space): w_func = self.getattr(space, 'next', False) diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -417,6 +417,22 @@ pass raises(TypeError, coerce, B(), []) + def test_coerce_inf(self): + class B: + def __coerce__(self, other): + return B(), B() + def __add__(self, other): + return 42 + assert B() + B() == 42 + + def test_coerce_reverse(self): + class CoerceNumber: + def __coerce__(self, other): + assert isinstance(other, int) + return (6, other) + assert 5 + CoerceNumber() == 11 + assert 2 ** CoerceNumber() == 64 + def test_binaryop(self): class A: def __add__(self, other): diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -83,6 +83,7 @@ 'newdict' : 'interp_dict.newdict', 'reversed_dict' : 'interp_dict.reversed_dict', 'strategy' : 'interp_magic.strategy', # dict,set,list + 'specialized_zip_2_lists' : 'interp_magic.specialized_zip_2_lists', 'set_debug' : 'interp_magic.set_debug', 'locals_to_fast' : 'interp_magic.locals_to_fast', 'save_module_content_for_future_reload': diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -147,3 +147,7 @@ @unwrap_spec(w_module=MixedModule) def save_module_content_for_future_reload(space, w_module): w_module.save_module_content_for_future_reload() + +def specialized_zip_2_lists(space, w_list1, w_list2): + from pypy.objspace.std.specialisedtupleobject import specialized_zip_2_lists + return specialized_zip_2_lists(space, w_list1, w_list2) diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -11,7 +11,8 @@ class W_CType(W_Root): - _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_'] + _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_', + '_pointer_type'] _immutable_fields_ = ['size?', 'name', 'name_position'] # note that 'size' is not strictly immutable, because it can change # from -1 to the real value in the W_CTypeStruct subclass. diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -7,6 +7,7 @@ from rpython.rlib.rarithmetic import r_uint, r_ulonglong, intmask from rpython.rlib import jit from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.tool import rfficache from pypy.interpreter.error import oefmt from pypy.module._cffi_backend import cdataobj, misc @@ -130,7 +131,8 @@ # though it may be signed when 'wchar_t' is written to C). WCHAR_INT = {(2, False): rffi.USHORT, (4, False): rffi.UINT, - (4, True): rffi.INT}[rffi.sizeof(lltype.UniChar), rffi.r_wchar_t.SIGN] + (4, True): rffi.INT}[rffi.sizeof(lltype.UniChar), + rfficache.signof_c_type('wchar_t')] WCHAR_INTP = rffi.CArrayPtr(WCHAR_INT) class W_CTypePrimitiveUniChar(W_CTypePrimitiveCharOrUniChar): diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -168,7 +168,7 @@ class W_CTypePointer(W_CTypePtrBase): - _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr'] + _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr', '_array_types'] _immutable_fields_ = ['is_file', 'cache_array_type?', 'is_void_ptr'] kind = "pointer" cache_array_type = None diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -4,7 +4,7 @@ from rpython.rlib.objectmodel import specialize, r_dict, compute_identity_hash from rpython.rlib.rarithmetic import ovfcheck, intmask -from rpython.rlib import jit +from rpython.rlib import jit, rweakref from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform @@ -23,27 +23,12 @@ class UniqueCache: def __init__(self, space): - self.ctvoid = None # There can be only one - self.ctvoidp = None # Cache for self.pointers[self.ctvoid] - self.ctchara = None # Cache for self.arrays[charp, -1] - self.primitives = {} # Keys: name - self.pointers = {} # Keys: base_ctype - self.arrays = {} # Keys: (ptr_ctype, length_or_-1) - self.functions = r_dict(# Keys: (fargs, w_fresult, ellipsis) - _func_key_eq, _func_key_hash) - -def _func_key_eq((fargs1, w_fresult1, ellipsis1), - (fargs2, w_fresult2, ellipsis2)): - return (fargs1 == fargs2 and # list equality here - w_fresult1 is w_fresult2 and - ellipsis1 == ellipsis2) - -def _func_key_hash((fargs, w_fresult, ellipsis)): - x = compute_identity_hash(w_fresult) ^ ellipsis - for w_arg in fargs: - y = compute_identity_hash(w_arg) - x = intmask((1000003 * x) ^ y) - return x + self.ctvoid = None # Cache for the 'void' type + self.ctvoidp = None # Cache for the 'void *' type + self.ctchara = None # Cache for the 'char[]' type + self.primitives = {} # Cache for {name: primitive_type} + self.functions = [] # see _new_function_type() + self.for_testing = False def _clean_cache(space): "NOT_RPYTHON" @@ -165,20 +150,24 @@ # ____________________________________________________________ + at specialize.memo() +def _setup_wref(has_weakref_support): + assert has_weakref_support, "_cffi_backend requires weakrefs" + ctypeobj.W_CType._pointer_type = rweakref.dead_ref + ctypeptr.W_CTypePointer._array_types = None + @unwrap_spec(w_ctype=ctypeobj.W_CType) def new_pointer_type(space, w_ctype): return _new_pointer_type(space, w_ctype) @jit.elidable def _new_pointer_type(space, w_ctype): - unique_cache = space.fromcache(UniqueCache) - try: - return unique_cache.pointers[w_ctype] - except KeyError: - pass - ctypepointer = ctypeptr.W_CTypePointer(space, w_ctype) - unique_cache.pointers[w_ctype] = ctypepointer - return ctypepointer + _setup_wref(rweakref.has_weakref_support()) + ctptr = w_ctype._pointer_type() + if ctptr is None: + ctptr = ctypeptr.W_CTypePointer(space, w_ctype) + w_ctype._pointer_type = rweakref.ref(ctptr) + return ctptr # ____________________________________________________________ @@ -195,16 +184,19 @@ @jit.elidable def _new_array_type(space, w_ctptr, length): - unique_cache = space.fromcache(UniqueCache) - unique_key = (w_ctptr, length) - try: - return unique_cache.arrays[unique_key] - except KeyError: - pass - # + _setup_wref(rweakref.has_weakref_support()) if not isinstance(w_ctptr, ctypeptr.W_CTypePointer): raise OperationError(space.w_TypeError, space.wrap("first arg must be a pointer ctype")) + arrays = w_ctptr._array_types + if arrays is None: + arrays = rweakref.RWeakValueDictionary(int, ctypearray.W_CTypeArray) + w_ctptr._array_types = arrays + else: + ctype = arrays.get(length) + if ctype is not None: + return ctype + # ctitem = w_ctptr.ctitem if ctitem.size < 0: raise oefmt(space.w_ValueError, "array item of unknown size: '%s'", @@ -222,7 +214,7 @@ extra = '[%d]' % length # ctype = ctypearray.W_CTypeArray(space, w_ctptr, length, arraysize, extra) - unique_cache.arrays[unique_key] = ctype + arrays.set(length, ctype) return ctype # ____________________________________________________________ @@ -612,29 +604,69 @@ fargs.append(w_farg) return _new_function_type(space, fargs, w_fresult, bool(ellipsis)) +def _func_key_hash(unique_cache, fargs, fresult, ellipsis): + x = compute_identity_hash(fresult) + for w_arg in fargs: + y = compute_identity_hash(w_arg) + x = intmask((1000003 * x) ^ y) + x ^= ellipsis + if unique_cache.for_testing: # constant-folded to False in translation; + x &= 3 # but for test, keep only 2 bits of hash + return x + # can't use @jit.elidable here, because it might call back to random # space functions via force_lazy_struct() -def _new_function_type(space, fargs, w_fresult, ellipsis=False): +def _new_function_type(space, fargs, fresult, ellipsis=False): + try: + return _get_function_type(space, fargs, fresult, ellipsis) + except KeyError: + return _build_function_type(space, fargs, fresult, ellipsis) + + at jit.elidable +def _get_function_type(space, fargs, fresult, ellipsis): + # This function is elidable because if called again with exactly the + # same arguments (and if it didn't raise KeyError), it would give + # the same result, at least as long as this result is still live. + # + # 'unique_cache.functions' is a list of weak dicts, each mapping + # the func_hash number to a W_CTypeFunc. There is normally only + # one such dict, but in case of hash collision, there might be + # more. + unique_cache = space.fromcache(UniqueCache) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + for weakdict in unique_cache.functions: + ctype = weakdict.get(func_hash) + if (ctype is not None and + ctype.ctitem is fresult and + ctype.fargs == fargs and + ctype.ellipsis == ellipsis): + return ctype + raise KeyError + + at jit.dont_look_inside +def _build_function_type(space, fargs, fresult, ellipsis): from pypy.module._cffi_backend import ctypefunc # - unique_cache = space.fromcache(UniqueCache) - unique_key = (fargs, w_fresult, ellipsis) - try: - return unique_cache.functions[unique_key] - except KeyError: - pass - # - if ((w_fresult.size < 0 and - not isinstance(w_fresult, ctypevoid.W_CTypeVoid)) - or isinstance(w_fresult, ctypearray.W_CTypeArray)): - if (isinstance(w_fresult, ctypestruct.W_CTypeStructOrUnion) and - w_fresult.size < 0): + if ((fresult.size < 0 and + not isinstance(fresult, ctypevoid.W_CTypeVoid)) + or isinstance(fresult, ctypearray.W_CTypeArray)): + if (isinstance(fresult, ctypestruct.W_CTypeStructOrUnion) and + fresult.size < 0): raise oefmt(space.w_TypeError, - "result type '%s' is opaque", w_fresult.name) + "result type '%s' is opaque", fresult.name) else: raise oefmt(space.w_TypeError, - "invalid result type: '%s'", w_fresult.name) + "invalid result type: '%s'", fresult.name) # - fct = ctypefunc.W_CTypeFunc(space, fargs, w_fresult, ellipsis) - unique_cache.functions[unique_key] = fct + fct = ctypefunc.W_CTypeFunc(space, fargs, fresult, ellipsis) + unique_cache = space.fromcache(UniqueCache) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + for weakdict in unique_cache.functions: + if weakdict.get(func_hash) is None: + weakdict.set(func_hash, fct) + break + else: + weakdict = rweakref.RWeakValueDictionary(int, ctypefunc.W_CTypeFunc) + unique_cache.functions.append(weakdict) + weakdict.set(func_hash, fct) return fct diff --git a/pypy/module/_cffi_backend/test/test_c.py b/pypy/module/_cffi_backend/test/test_c.py --- a/pypy/module/_cffi_backend/test/test_c.py +++ b/pypy/module/_cffi_backend/test/test_c.py @@ -22,7 +22,7 @@ from rpython.tool.udir import udir from pypy.interpreter import gateway from pypy.module._cffi_backend import Module -from pypy.module._cffi_backend.newtype import _clean_cache +from pypy.module._cffi_backend.newtype import _clean_cache, UniqueCache from rpython.translator import cdir from rpython.translator.platform import host from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -86,8 +86,10 @@ _all_test_c.find_and_load_library = func _all_test_c._testfunc = testfunc """) + UniqueCache.for_testing = True def teardown_method(self, method): + UniqueCache.for_testing = False _clean_cache(self.space) diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -844,6 +844,18 @@ b.byteswap() assert a != b + def test_unicode_ord_positive(self): + import sys + if sys.maxunicode == 0xffff: + skip("test for 32-bit unicodes") + a = self.array('u', '\xff\xff\xff\xff') + assert len(a) == 1 + assert repr(a[0]) == "u'\Uffffffff'" + if sys.maxint == 2147483647: + assert ord(a[0]) == -1 + else: + assert ord(a[0]) == 4294967295 + def test_weakref(self): import weakref a = self.array('c', 'Hi!') diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -117,12 +117,14 @@ return W_NDimArray(impl) @staticmethod - def new_slice(space, offset, strides, backstrides, shape, parent, orig_arr, dtype=None): + def new_slice(space, offset, strides, backstrides, shape, parent, w_arr, dtype=None): from pypy.module.micronumpy import concrete - + w_base = w_arr + if w_arr.implementation.base() is not None: + w_base = w_arr.implementation.base() impl = concrete.SliceArray(offset, strides, backstrides, shape, parent, - orig_arr, dtype) - return wrap_impl(space, space.type(orig_arr), orig_arr, impl) + w_base, dtype) + return wrap_impl(space, space.type(w_arr), w_arr, impl) @staticmethod def new_scalar(space, dtype, w_val=None): diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -568,11 +568,6 @@ self.size = ovfcheck(support.product_check(shape) * self.dtype.elsize) except OverflowError: raise oefmt(dtype.itemtype.space.w_ValueError, "array is too big.") - while orig_arr is not None: - assert isinstance(orig_arr, W_NDimArray) - if orig_arr.implementation.base() is None: - break - orig_arr = orig_arr.implementation.base() self.start = start self.orig_arr = orig_arr flags = parent.flags & NPY.ARRAY_ALIGNED diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -133,7 +133,9 @@ return w_arr else: imp = w_object.implementation - w_base = imp.base() or w_object + w_base = w_object + if imp.base() is not None: + w_base = imp.base() with imp as storage: sz = support.product(w_object.get_shape()) * dtype.elsize return W_NDimArray.from_shape_and_storage(space, diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -1308,6 +1308,9 @@ [space.wrap(0)]), space.wrap("b")]) builder = StringBuilder() + if self.get_dtype().is_object(): + raise oefmt(space.w_NotImplementedError, + "reduce for 'object' dtype not supported yet") if isinstance(self.implementation, SliceArray): iter, state = self.implementation.create_iter() while not iter.done(state): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2519,10 +2519,10 @@ assert b.shape == b[...].shape assert (b == b[...]).all() - a = np.arange(6).reshape(2, 3) + a = np.arange(6) if '__pypy__' in sys.builtin_module_names: raises(ValueError, "a[..., ...]") - b = a [..., 0] + b = a.reshape(2, 3)[..., 0] assert (b == [0, 3]).all() assert b.base is a diff --git a/pypy/module/micronumpy/test/test_object_arrays.py b/pypy/module/micronumpy/test/test_object_arrays.py --- a/pypy/module/micronumpy/test/test_object_arrays.py +++ b/pypy/module/micronumpy/test/test_object_arrays.py @@ -3,6 +3,8 @@ class AppTestObjectDtypes(BaseNumpyAppTest): + spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) + def setup_class(cls): BaseNumpyAppTest.setup_class.im_func(cls) cls.w_runappdirect = cls.space.wrap(option.runappdirect) @@ -187,3 +189,21 @@ assert b.shape == (1,) assert b.dtype == np.float_ assert (b == 1.0).all() + + + def test__reduce__(self): + from numpy import arange, dtype + from cPickle import loads, dumps + import sys + + a = arange(15).astype(object) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, dumps, a) + skip('not implemented yet') + b = loads(dumps(a)) + assert (a == b).all() + + a = arange(15).astype(object).reshape((3, 5)) + b = loads(dumps(a)) + assert (a == b).all() + diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -315,11 +315,12 @@ """ ll_times = jit_hooks.stats_get_loop_run_times(None) w_times = space.newdict() - for i in range(len(ll_times)): - w_key = space.newtuple([space.wrap(ll_times[i].type), - space.wrap(ll_times[i].number)]) - space.setitem(w_times, w_key, - space.wrap(ll_times[i].counter)) + if ll_times: + for i in range(len(ll_times)): + w_key = space.newtuple([space.wrap(ll_times[i].type), + space.wrap(ll_times[i].number)]) + space.setitem(w_times, w_key, + space.wrap(ll_times[i].counter)) w_counters = space.newdict() for i, counter_name in enumerate(Counters.counter_names): v = jit_hooks.stats_get_counter_value(None, i) diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -213,22 +213,6 @@ self.on_abort() assert l == [('pypyjit', 'ABORT_TOO_LONG', [])] - def test_on_optimize(self): - import pypyjit - l = [] - - def hook(info): - l.append(info.jitdriver_name) - - def optimize_hook(info): - return [] - - pypyjit.set_compile_hook(hook) - pypyjit.set_optimize_hook(optimize_hook) - self.on_optimize() - self.on_compile() - assert l == ['pypyjit'] - def test_creation(self): from pypyjit import ResOperation diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -28,7 +28,7 @@ def test_struct_unpack(self): def main(n): - import struct + import _struct as struct import array a = array.array('c', struct.pack('i', 42)) i = 0 diff --git a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py --- a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py +++ b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py @@ -76,6 +76,6 @@ assert len(mod_bridges) in (1, 2, 3) # check that counts are reasonable (precise # may change in the future) - assert N - 2000 < sum(l.count for l in fn_with_bridges_loops) < N + 1000 + assert N - 2000 < sum(l.count for l in fn_with_bridges_loops) < N + 1500 diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -2,7 +2,7 @@ import py import platform import sys, ctypes -from cffi import FFI, CDefError, FFIError +from cffi import FFI, CDefError, FFIError, VerificationMissing from pypy.module.test_lib_pypy.cffi_tests.support import * SIZE_OF_INT = ctypes.sizeof(ctypes.c_int) @@ -927,6 +927,14 @@ assert ffi.string(ffi.cast("enum foo", -16)) == "E" assert ffi.string(ffi.cast("enum foo", -8)) == "F" + def test_enum_partial(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef(r"enum foo {A, ...}; enum bar { B, C };") + lib = ffi.dlopen(None) + assert lib.B == 0 + py.test.raises(VerificationMissing, getattr, lib, "A") + assert lib.C == 1 + def test_array_of_struct(self): ffi = FFI(backend=self.Backend()) ffi.cdef("struct foo { int a, b; };") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py @@ -58,6 +58,11 @@ assert ptr_type.get_c_name("") == "int(const *)[5]" assert ptr_type.get_c_name("*x") == "int(const * *x)[5]" +def test_qual_pointer_type(): + ptr_type = PointerType(PrimitiveType("long long"), Q_RESTRICT) + assert ptr_type.get_c_name("") == "long long __restrict *" + assert const_voidp_type.get_c_name("") == "void const *" + def test_unknown_pointer_type(): ptr_type = unknown_ptr_type("foo_p") assert ptr_type.get_c_name("") == "foo_p" diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py @@ -308,7 +308,6 @@ ffi.cdef("void f(WPARAM);") def test__is_constant_globalvar(): - from cffi.cparser import Parser, _get_parser for input, expected_output in [ ("int a;", False), ("const int a;", True), @@ -325,11 +324,36 @@ ("int a[5][6];", False), ("const int a[5][6];", False), ]: - p = Parser() - ast = _get_parser().parse(input) - decl = ast.children()[0][1] - node = decl.type - assert p._is_constant_globalvar(node) == expected_output + ffi = FFI() + ffi.cdef(input) + declarations = ffi._parser._declarations + assert ('constant a' in declarations) == expected_output + assert ('variable a' in declarations) == (not expected_output) + +def test_restrict(): + from cffi import model + for input, expected_output in [ + ("int a;", False), + ("restrict int a;", True), + ("int *a;", False), + ]: + ffi = FFI() + ffi.cdef(input) + tp, quals = ffi._parser._declarations['variable a'] + assert bool(quals & model.Q_RESTRICT) == expected_output + +def test_different_const_funcptr_types(): + lst = [] + for input in [ + "int(*)(int *a)", + "int(*)(int const *a)", + "int(*)(int * const a)", + "int(*)(int const a[])"]: + ffi = FFI(backend=FakeBackend()) + lst.append(ffi._parser.parse_type(input)) + assert lst[0] != lst[1] + assert lst[0] == lst[2] + assert lst[1] == lst[3] def test_enum(): ffi = FFI() diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py @@ -1636,11 +1636,11 @@ def test_FILE_stored_explicitly(): ffi = FFI() - ffi.cdef("int myprintf(const char *, int); FILE *myfile;") + ffi.cdef("int myprintf11(const char *, int); FILE *myfile;") lib = ffi.verify(""" #include FILE *myfile; - int myprintf(const char *out, int value) { + int myprintf11(const char *out, int value) { return fprintf(myfile, out, value); } """) @@ -1650,7 +1650,7 @@ lib.myfile = ffi.cast("FILE *", fw1) # fw1.write(b"X") - r = lib.myprintf(b"hello, %d!\n", ffi.cast("int", 42)) + r = lib.myprintf11(b"hello, %d!\n", ffi.cast("int", 42)) fw1.close() assert r == len("hello, 42!\n") # @@ -2248,3 +2248,13 @@ e = py.test.raises(VerificationError, ffi.verify, "") assert str(e.value) == ("feature not supported with ffi.verify(), but only " "with ffi.set_source(): 'typedef unsigned long... t1'") + +def test_const_fields(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a; void *const b; };""") + ffi.verify("""struct foo_s { const int a; void *const b; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int") + assert foo_s.fields[1][0] == 'b' + assert foo_s.fields[1][1].type is ffi.typeof("void *") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py @@ -30,6 +30,32 @@ assert ffi.typeof("int[][10]") is ffi.typeof("int[][10]") assert ffi.typeof("int(*)()") is ffi.typeof("int(*)()") +def test_ffi_type_not_immortal(): + import weakref, gc + ffi = _cffi1_backend.FFI() + t1 = ffi.typeof("int **") + t2 = ffi.typeof("int *") + w1 = weakref.ref(t1) + w2 = weakref.ref(t2) + del t1, ffi + gc.collect() + assert w1() is None + assert w2() is t2 + ffi = _cffi1_backend.FFI() + assert ffi.typeof(ffi.new("int **")[0]) is t2 + # + ffi = _cffi1_backend.FFI() + t1 = ffi.typeof("int ***") + t2 = ffi.typeof("int **") + w1 = weakref.ref(t1) + w2 = weakref.ref(t2) + del t2, ffi + gc.collect() + assert w1() is t1 + assert w2() is not None # kept alive by t1 + ffi = _cffi1_backend.FFI() + assert ffi.typeof("int * *") is t1.item + def test_ffi_cache_type_globally(): ffi1 = _cffi1_backend.FFI() ffi2 = _cffi1_backend.FFI() diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py @@ -2,7 +2,7 @@ import sys import py from cffi import FFI -from cffi import recompiler, ffiplatform +from cffi import recompiler, ffiplatform, VerificationMissing from pypy.module.test_lib_pypy.cffi_tests.udir import udir @@ -204,3 +204,10 @@ "foobar", _version=0x2594) assert str(e.value).startswith( "cffi out-of-line Python module 'foobar' has unknown version") + +def test_partial_enum(): + ffi = FFI() + ffi.cdef("enum foo { A, B, ... };") + ffi.set_source('test_partial_enum', None) + py.test.raises(VerificationMissing, ffi.emit_python_code, + str(tmpdir.join('test_partial_enum.py'))) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1192,3 +1192,92 @@ py.test.raises(ffi.error, getattr, lib, 'my_value') e = py.test.raises(ffi.error, setattr, lib, 'my_value', 50) assert str(e.value) == "global variable 'my_value' is at address NULL" + +def test_const_fields(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a; void *const b; };""") + lib = verify(ffi, 'test_const_fields', """ + struct foo_s { const int a; void *const b; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int") + assert foo_s.fields[1][0] == 'b' + assert foo_s.fields[1][1].type is ffi.typeof("void *") + +def test_restrict_fields(): + if sys.platform == 'win32': + py.test.skip("'__restrict__' probably not recognized") + ffi = FFI() + ffi.cdef("""struct foo_s { void * restrict b; };""") + lib = verify(ffi, 'test_restrict_fields', """ + struct foo_s { void * __restrict__ b; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'b' + assert foo_s.fields[0][1].type is ffi.typeof("void *") + +def test_const_array_fields(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a[4]; };""") + lib = verify(ffi, 'test_const_array_fields', """ + struct foo_s { const int a[4]; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int[4]") + +def test_const_array_fields_varlength(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a[]; ...; };""") + lib = verify(ffi, 'test_const_array_fields_varlength', """ + struct foo_s { const int a[4]; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int[]") + +def test_const_array_fields_unknownlength(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a[...]; ...; };""") + lib = verify(ffi, 'test_const_array_fields_unknownlength', """ + struct foo_s { const int a[4]; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int[4]") + +def test_const_function_args(): + ffi = FFI() + ffi.cdef("""int foobar(const int a, const int *b, const int c[]);""") + lib = verify(ffi, 'test_const_function_args', """ + int foobar(const int a, const int *b, const int c[]) { + return a + *b + *c; + } + """) + assert lib.foobar(100, ffi.new("int *", 40), ffi.new("int *", 2)) == 142 + +def test_const_function_type_args(): + ffi = FFI() + ffi.cdef("""int (*foobar)(const int a, const int *b, const int c[]);""") + lib = verify(ffi, 'test_const_function_type_args', """ + int (*foobar)(const int a, const int *b, const int c[]); + """) + t = ffi.typeof(lib.foobar) + assert t.args[0] is ffi.typeof("int") + assert t.args[1] is ffi.typeof("int *") + assert t.args[2] is ffi.typeof("int *") + +def test_const_constant(): + ffi = FFI() + ffi.cdef("""struct foo_s { int x,y; }; const struct foo_s myfoo;""") + lib = verify(ffi, 'test_const_constant', """ + struct foo_s { int x,y; }; const struct foo_s myfoo = { 40, 2 }; + """) + assert lib.myfoo.x == 40 + assert lib.myfoo.y == 2 + +def test_const_via_typedef(): + ffi = FFI() + ffi.cdef("""typedef const int const_t; const_t aaa;""") + lib = verify(ffi, 'test_const_via_typedef', """ + typedef const int const_t; + #define aaa 42 + """) + assert lib.aaa == 42 + py.test.raises(AttributeError, "lib.aaa = 43") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py @@ -1623,11 +1623,11 @@ def test_FILE_stored_explicitly(): ffi = FFI() - ffi.cdef("int myprintf(const char *, int); FILE *myfile;") + ffi.cdef("int myprintf11(const char *, int); FILE *myfile;") lib = ffi.verify(""" #include FILE *myfile; - int myprintf(const char *out, int value) { + int myprintf11(const char *out, int value) { return fprintf(myfile, out, value); } """) @@ -1637,7 +1637,7 @@ lib.myfile = ffi.cast("FILE *", fw1) # fw1.write(b"X") - r = lib.myprintf(b"hello, %d!\n", ffi.cast("int", 42)) + r = lib.myprintf11(b"hello, %d!\n", ffi.cast("int", 42)) fw1.close() assert r == len("hello, 42!\n") # @@ -1923,7 +1923,7 @@ assert repr(ffi.typeof(lib.a)) == "" def test_bug_const_char_ptr_array_2(): - ffi = FFI_warnings_not_error() # ignore warnings + ffi = FFI() ffi.cdef("""const int a[];""") lib = ffi.verify("""const int a[5];""") assert repr(ffi.typeof(lib.a)) == "" diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -123,7 +123,7 @@ f1 = self.floatval i2 = space.int_w(w_other) # (double-)floats have always at least 48 bits of precision - if LONG_BIT > 32 and not int_between((-1)<<48, i2, 1<<48): + if LONG_BIT > 32 and not int_between(-1, i2 >> 48, 1): res = do_compare_bigint(f1, rbigint.fromint(i2)) else: f2 = float(i2) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1396,16 +1396,19 @@ else: subitems_w = [self._none_value] * length l = self.unerase(w_list.lstorage) - for i in range(length): - try: - subitems_w[i] = l[start] - start += step - except IndexError: - raise + self._fill_in_with_sliced_items(subitems_w, l, start, step, length) storage = self.erase(subitems_w) return W_ListObject.from_storage_and_strategy( self.space, storage, self) + def _fill_in_with_sliced_items(self, subitems_w, l, start, step, length): + for i in range(length): + try: + subitems_w[i] = l[start] + start += step + except IndexError: + raise + def switch_to_next_strategy(self, w_list, w_sample_item): w_list.switch_to_object_strategy() diff --git a/pypy/objspace/std/sliceobject.py b/pypy/objspace/std/sliceobject.py --- a/pypy/objspace/std/sliceobject.py +++ b/pypy/objspace/std/sliceobject.py @@ -1,10 +1,12 @@ """Slice object""" +import sys from pypy.interpreter import gateway from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError from pypy.interpreter.typedef import GetSetProperty, TypeDef from rpython.rlib.objectmodel import specialize +from rpython.rlib import jit class W_SliceObject(W_Root): @@ -234,10 +236,19 @@ assert length >= 0 if start < 0: start = 0 - if stop < start: - stop = start - if stop > length: - stop = length - if start > length: - start = length + # hack for the JIT, for slices with no end specified: + # this avoids the two comparisons that follow + if jit.isconstant(stop) and stop == sys.maxint: + pass + else: + if stop < start: + stop = start + if stop <= length: + return start, stop + # here is the case where 'stop' is larger than the list + stop = length + if jit.isconstant(start) and start == 0: + pass # no need to do the following check here + elif start > stop: + start = stop return start, stop diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -1,7 +1,7 @@ from pypy.interpreter.error import OperationError from pypy.objspace.std.tupleobject import W_AbstractTupleObject from pypy.objspace.std.util import negate -from rpython.rlib.objectmodel import compute_hash +from rpython.rlib.objectmodel import compute_hash, specialize from rpython.rlib.rarithmetic import intmask from rpython.rlib.unroll import unrolling_iterable from rpython.tool.sourcetools import func_with_new_name @@ -146,3 +146,64 @@ return Cls_oo(space, w_arg1, w_arg2) else: raise NotSpecialised + +# -------------------------------------------------- +# Special code based on list strategies to implement zip(), +# here with two list arguments only. This builds a zipped +# list that differs from what the app-level code would build: +# if the source lists contain sometimes ints/floats and +# sometimes not, here we will use uniformly 'Cls_oo' instead +# of using 'Cls_ii' or 'Cls_ff' for the elements that match. +# This is a trade-off, but it looks like a good idea to keep +# the list uniform for the JIT---not to mention, it is much +# faster to move the decision out of the loop. + + at specialize.arg(1) +def _build_zipped_spec(space, Cls, lst1, lst2): + length = min(len(lst1), len(lst2)) + return [Cls(space, space.wrap(lst1[i]), + space.wrap(lst2[i])) for i in range(length)] + +def _build_zipped_spec_oo(space, w_list1, w_list2): + strat1 = w_list1.strategy + strat2 = w_list2.strategy + length = min(strat1.length(w_list1), strat2.length(w_list2)) + return [Cls_oo(space, strat1.getitem(w_list1, i), + strat2.getitem(w_list2, i)) for i in range(length)] + +def _build_zipped_unspec(space, w_list1, w_list2): + strat1 = w_list1.strategy + strat2 = w_list2.strategy + length = min(strat1.length(w_list1), strat2.length(w_list2)) + return [space.newtuple([strat1.getitem(w_list1, i), + strat2.getitem(w_list2, i)]) for i in range(length)] + +def specialized_zip_2_lists(space, w_list1, w_list2): + from pypy.objspace.std.listobject import W_ListObject + if (not isinstance(w_list1, W_ListObject) or + not isinstance(w_list2, W_ListObject)): + raise OperationError(space.w_TypeError, + space.wrap("expected two lists")) + + if space.config.objspace.std.withspecialisedtuple: + intlist1 = w_list1.getitems_int() + if intlist1 is not None: + intlist2 = w_list2.getitems_int() + if intlist2 is not None: + lst_w = _build_zipped_spec(space, Cls_ii, intlist1, intlist2) + return space.newlist(lst_w) + else: + floatlist1 = w_list1.getitems_float() + if floatlist1 is not None: + floatlist2 = w_list2.getitems_float() + if floatlist2 is not None: + lst_w = _build_zipped_spec(space, Cls_ff, floatlist1, + floatlist2) + return space.newlist(lst_w) + + lst_w = _build_zipped_spec_oo(space, w_list1, w_list2) + return space.newlist(lst_w) + + else: + lst_w = _build_zipped_unspec(space, w_list1, w_list2) + return space.newlist(lst_w) diff --git a/pypy/objspace/std/test/test_tupleobject.py b/pypy/objspace/std/test/test_tupleobject.py --- a/pypy/objspace/std/test/test_tupleobject.py +++ b/pypy/objspace/std/test/test_tupleobject.py @@ -407,3 +407,21 @@ assert (() != object()) is True assert ((1,) != object()) is True assert ((1, 2) != object()) is True + + def test_zip_two_lists(self): + try: + from __pypy__ import specialized_zip_2_lists + except ImportError: + specialized_zip_2_lists = zip + raises(TypeError, specialized_zip_2_lists, [], ()) + raises(TypeError, specialized_zip_2_lists, (), []) + assert specialized_zip_2_lists([], []) == [ + ] + assert specialized_zip_2_lists([2, 3], []) == [ + ] + assert specialized_zip_2_lists([2, 3], [4, 5, 6]) == [ + (2, 4), (3, 5)] + assert specialized_zip_2_lists([4.1, 3.6, 7.2], [2.3, 4.8]) == [ + (4.1, 2.3), (3.6, 4.8)] + assert specialized_zip_2_lists(["foo", "bar"], [6, 2]) == [ + ("foo", 6), ("bar", 2)] diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -652,11 +652,11 @@ def len(self): return immutablevalue(1) +class __extend__(SomeChar): + def ord(self): return SomeInteger(nonneg=True) -class __extend__(SomeChar): - def method_isspace(self): return s_Bool @@ -675,6 +675,13 @@ def method_upper(self): return self +class __extend__(SomeUnicodeCodePoint): + + def ord(self): + # warning, on 32-bit with 32-bit unichars, this might return + # negative numbers + return SomeInteger() + class __extend__(SomeIterator): def iter(self): diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1073,7 +1073,6 @@ genop_nursery_ptr_increment = _binaryop_or_lea('ADD', is_add=True) genop_int_sub = _binaryop_or_lea("SUB", is_add=False) genop_int_mul = _binaryop("IMUL") - genop_int_and = _binaryop("AND") genop_int_or = _binaryop("OR") genop_int_xor = _binaryop("XOR") genop_int_lshift = _binaryop("SHL") @@ -1084,6 +1083,15 @@ genop_float_mul = _binaryop('MULSD') genop_float_truediv = _binaryop('DIVSD') + def genop_int_and(self, op, arglocs, result_loc): + arg1 = arglocs[1] + if IS_X86_64 and (isinstance(arg1, ImmedLoc) and + arg1.value == (1 << 32) - 1): + # special case + self.mc.MOV32(arglocs[0], arglocs[0]) + else: + self.mc.AND(arglocs[0], arg1) + genop_int_lt = _cmpop("L", "G") genop_int_le = _cmpop("LE", "GE") genop_int_eq = _cmpop("E", "E") diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py --- a/rpython/jit/backend/x86/test/test_runner.py +++ b/rpython/jit/backend/x86/test/test_runner.py @@ -272,6 +272,17 @@ 'void', ofsi) assert p.i == 3**33 + def test_and_mask_common_patterns(self): + cases = [8, 16, 24] + if WORD == 8: + cases.append(32) + for i in cases: + box = InputArgInt(0xAAAAAAAAAAAA) + res = self.execute_operation(rop.INT_AND, + [box, ConstInt(2 ** i - 1)], + 'int') + assert res == 0xAAAAAAAAAAAA & (2 ** i - 1) + def test_nullity_with_guard(self): allops = [rop.INT_IS_TRUE] guards = [rop.GUARD_TRUE, rop.GUARD_FALSE] diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -8912,6 +8912,8 @@ guard_value(i2, 12345) [] jump() """ + # getting InvalidLoop would be a good idea, too. + # (this test was written to show it would previously crash) self.optimize_loop(ops, ops) diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -4343,14 +4343,14 @@ self.meta_interp(allfuncs, [9, 2000]) - def test_unichar_might_be_signed(self): - py.test.skip("wchar_t is sometimes a signed 32-bit integer type, " - "but RPython inteprets it as unsigned (but still " - "translates to wchar_t, so can create confusion)") + def test_unichar_ord_is_never_signed_on_64bit(self): + import sys + if sys.maxunicode == 0xffff: + py.test.skip("test for 32-bit unicodes") def f(x): - return rffi.cast(lltype.Signed, rffi.cast(lltype.UniChar, x)) + return ord(rffi.cast(lltype.UniChar, x)) res = self.interp_operations(f, [-1]) - if rffi.r_wchar_t.SIGN: + if sys.maxint == 2147483647: assert res == -1 else: - assert res == 2 ** 16 - 1 or res == 2 ** 32 - 1 + assert res == 4294967295 diff --git a/rpython/rlib/_rweakvaldict.py b/rpython/rlib/_rweakvaldict.py --- a/rpython/rlib/_rweakvaldict.py +++ b/rpython/rlib/_rweakvaldict.py @@ -2,6 +2,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rdict from rpython.rtyper.lltypesystem.llmemory import weakref_create, weakref_deref from rpython.rtyper import rclass +from rpython.rtyper.error import TyperError from rpython.rtyper.rclass import getinstancerepr from rpython.rtyper.rmodel import Repr from rpython.rlib.rweakref import RWeakValueDictionary @@ -60,6 +61,8 @@ self.dict_cache = {} From noreply at buildbot.pypy.org Sat Oct 3 11:03:25 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Oct 2015 11:03:25 +0200 (CEST) Subject: [pypy-commit] pypy share-guard-info: close to be merged branch Message-ID: <20151003090325.896591C0FFE@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: share-guard-info Changeset: r79944:aac949b314cf Date: 2015-10-03 11:00 +0200 http://bitbucket.org/pypy/pypy/changeset/aac949b314cf/ Log: close to be merged branch From noreply at buildbot.pypy.org Sat Oct 3 11:03:28 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Oct 2015 11:03:28 +0200 (CEST) Subject: [pypy-commit] pypy default: Merge share-guard-info Message-ID: <20151003090328.AB25E1C0FFE@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r79945:054840779e3e Date: 2015-10-03 11:03 +0200 http://bitbucket.org/pypy/pypy/changeset/054840779e3e/ Log: Merge share-guard-info This branch shares guard resume descr wherever possible during the backend. Saves quite a bit of memory and also time when tracing diff too long, truncating to 2000 out of 2705 lines diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -39,7 +39,10 @@ def product_check(s): i = 1 for x in s: - i = ovfcheck(i * x) + try: + i = ovfcheck(i * x) + except OverflowError: + raise return i def check_and_adjust_index(space, index, size, axis): diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -249,11 +249,17 @@ ix = 1 while iw > 0: if iw & 1: - ix = ovfcheck(ix * temp) + try: + ix = ovfcheck(ix * temp) + except OverflowError: + raise iw >>= 1 # Shift exponent down by 1 bit if iw == 0: break - temp = ovfcheck(temp * temp) # Square the value of temp + try: + temp = ovfcheck(temp * temp) # Square the value of temp + except OverflowError: + raise if iz: # If we did a multiplication, perform a modulo ix %= iz diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -671,6 +671,8 @@ a0, a1 = boxes imm_a1 = check_imm_box(a1) l0 = self.make_sure_var_in_reg(a0, boxes) + op.getdescr().make_a_counter_per_value(op, + self.cpu.all_reg_indexes[l0.value]) if not imm_a1: l1 = self.make_sure_var_in_reg(a1, boxes) else: diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -40,6 +40,10 @@ self.inputargs = map(mapping, inputargs) self.operations = [] for op in operations: + if op.getopnum() == rop.GUARD_VALUE: + # we don't care about the value 13 here, because we gonna + # fish it from the extra slot on frame anyway + op.getdescr().make_a_counter_per_value(op, 13) if op.getdescr() is not None: if op.is_guard() or op.getopnum() == rop.FINISH: newdescr = op.getdescr() @@ -372,6 +376,18 @@ except ExecutionFinished, e: return e.deadframe + def get_value_direct(self, deadframe, tp, index): + v = deadframe._extra_value + if tp == 'i': + assert lltype.typeOf(v) == lltype.Signed + elif tp == 'r': + assert lltype.typeOf(v) == llmemory.GCREF + elif tp == 'f': + assert lltype.typeOf(v) == longlong.FLOATSTORAGE + else: + assert False + return v + def get_int_value(self, deadframe, index): v = deadframe._values[index] assert lltype.typeOf(v) == lltype.Signed @@ -775,11 +791,13 @@ _TYPE = llmemory.GCREF def __init__(self, latest_descr, values, - last_exception=None, saved_data=None): + last_exception=None, saved_data=None, + extra_value=None): self._latest_descr = latest_descr self._values = values self._last_exception = last_exception self._saved_data = saved_data + self._extra_value = extra_value class LLFrame(object): @@ -872,7 +890,7 @@ # ----------------------------------------------------- - def fail_guard(self, descr, saved_data=None): + def fail_guard(self, descr, saved_data=None, extra_value=None): values = [] for box in self.current_op.getfailargs(): if box is not None: @@ -887,7 +905,7 @@ else: raise ExecutionFinished(LLDeadFrame(descr, values, self.last_exception, - saved_data)) + saved_data, extra_value)) def execute_force_spill(self, _, arg): pass @@ -909,7 +927,7 @@ def execute_guard_value(self, descr, arg1, arg2): if arg1 != arg2: - self.fail_guard(descr) + self.fail_guard(descr, extra_value=arg1) def execute_guard_nonnull(self, descr, arg): if not arg: @@ -1028,7 +1046,6 @@ def execute_guard_overflow(self, descr): if not self.overflow_flag: self.fail_guard(descr) - return lltype.nullptr(llmemory.GCREF.TO) # I think it's fine.... def execute_jump(self, descr, *args): raise Jump(descr._llgraph_target, args) diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -389,20 +389,40 @@ descr = self.get_latest_descr(deadframe) return rffi.cast(lltype.Signed, descr.rd_locs[index]) * WORD + @specialize.arg(2) + def get_value_direct(self, deadframe, tp, index): + if tp == 'i': + return self.get_int_value_direct(deadframe, index * WORD) + elif tp == 'r': + return self.get_ref_value_direct(deadframe, index * WORD) + elif tp == 'f': + return self.get_float_value_direct(deadframe, index * WORD) + else: + assert False + def get_int_value(self, deadframe, index): pos = self._decode_pos(deadframe, index) + return self.get_int_value_direct(deadframe, pos) + + def get_int_value_direct(self, deadframe, pos): descr = self.gc_ll_descr.getframedescrs(self).arraydescr ofs = self.unpack_arraydescr(descr) return self.read_int_at_mem(deadframe, pos + ofs, WORD, 1) def get_ref_value(self, deadframe, index): pos = self._decode_pos(deadframe, index) + return self.get_ref_value_direct(deadframe, pos) + + def get_ref_value_direct(self, deadframe, pos): descr = self.gc_ll_descr.getframedescrs(self).arraydescr ofs = self.unpack_arraydescr(descr) return self.read_ref_at_mem(deadframe, pos + ofs) def get_float_value(self, deadframe, index): pos = self._decode_pos(deadframe, index) + return self.get_float_value_direct(deadframe, pos) + + def get_float_value_direct(self, deadframe, pos): descr = self.gc_ll_descr.getframedescrs(self).arraydescr ofs = self.unpack_arraydescr(descr) return self.read_float_at_mem(deadframe, pos + ofs) diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py --- a/rpython/jit/backend/test/test_random.py +++ b/rpython/jit/backend/test/test_random.py @@ -22,6 +22,8 @@ self.operations = subops class FakeMetaInterp(object): + ovf_flag = False + def execute_raised(self, exc, constant=False): self._got_exc = exc @@ -365,9 +367,9 @@ def produce_into(self, builder, r): fail_subset = builder.subset_of_intvars(r) original_intvars = builder.intvars[:] + builder.fakemetainterp.ovf_flag = False super(AbstractOvfOperation, self).produce_into(builder, r) - if builder.fakemetainterp._got_exc: # overflow detected - assert isinstance(builder.fakemetainterp._got_exc, OverflowError) + if builder.fakemetainterp.ovf_flag: # overflow detected op = ResOperation(rop.GUARD_OVERFLOW, []) # the overflowed result should not be used any more, but can # be used on the failure path: recompute fail_subset including diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -424,6 +424,8 @@ def consider_guard_value(self, op): x = self.make_sure_var_in_reg(op.getarg(0)) + loc = self.assembler.cpu.all_reg_indexes[x.value] + op.getdescr().make_a_counter_per_value(op, loc) y = self.loc(op.getarg(1)) self.perform_guard(op, [x, y], None) diff --git a/rpython/jit/codewriter/codewriter.py b/rpython/jit/codewriter/codewriter.py --- a/rpython/jit/codewriter/codewriter.py +++ b/rpython/jit/codewriter/codewriter.py @@ -48,7 +48,7 @@ # which means mostly producing a linear list of operations and # inserting jumps or conditional jumps. This is a list of tuples # of the shape ("opname", arg1, ..., argN) or (Label(...),). - ssarepr = flatten_graph(graph, regallocs) + ssarepr = flatten_graph(graph, regallocs, cpu=self.callcontrol.cpu) # # step 3b: compute the liveness around certain operations compute_liveness(ssarepr) diff --git a/rpython/jit/codewriter/flatten.py b/rpython/jit/codewriter/flatten.py --- a/rpython/jit/codewriter/flatten.py +++ b/rpython/jit/codewriter/flatten.py @@ -1,4 +1,4 @@ -from rpython.flowspace.model import Variable, Constant +from rpython.flowspace.model import Variable, Constant, c_last_exception from rpython.jit.metainterp.history import AbstractDescr, getkind from rpython.rtyper.lltypesystem import lltype @@ -60,10 +60,11 @@ # ____________________________________________________________ -def flatten_graph(graph, regallocs, _include_all_exc_links=False): +def flatten_graph(graph, regallocs, _include_all_exc_links=False, + cpu=None): """Flatten the graph into an SSARepr, with already-computed register allocations. 'regallocs' in a dict {kind: RegAlloc}.""" - flattener = GraphFlattener(graph, regallocs, _include_all_exc_links) + flattener = GraphFlattener(graph, regallocs, _include_all_exc_links, cpu) flattener.enforce_input_args() flattener.generate_ssa_form() return flattener.ssarepr @@ -71,9 +72,11 @@ class GraphFlattener(object): - def __init__(self, graph, regallocs, _include_all_exc_links=False): + def __init__(self, graph, regallocs, _include_all_exc_links=False, + cpu=None): self.graph = graph self.regallocs = regallocs + self.cpu = cpu self._include_all_exc_links = _include_all_exc_links self.registers = {} if graph: @@ -100,7 +103,7 @@ self.seen_blocks = {} self.make_bytecode_block(self.graph.startblock) - def make_bytecode_block(self, block): + def make_bytecode_block(self, block, handling_ovf=False): if block.exits == (): self.make_return(block.inputargs) return @@ -114,9 +117,15 @@ # operations = block.operations for i, op in enumerate(operations): + if '_ovf' in op.opname: + if (len(block.exits) not in (2, 3) or + block.exitswitch is not c_last_exception): + raise Exception("detected a block containing ovfcheck()" + " but no OverflowError is caught, this" + " is not legal in jitted blocks") self.serialize_op(op) # - self.insert_exits(block) + self.insert_exits(block, handling_ovf) def make_return(self, args): if len(args) == 1: @@ -136,16 +145,16 @@ raise Exception("?") self.emitline("---") - def make_link(self, link): + def make_link(self, link, handling_ovf): if (link.target.exits == () and link.last_exception not in link.args and link.last_exc_value not in link.args): self.make_return(link.args) # optimization only return self.insert_renamings(link) - self.make_bytecode_block(link.target) + self.make_bytecode_block(link.target, handling_ovf) - def make_exception_link(self, link): + def make_exception_link(self, link, handling_ovf): # Like make_link(), but also introduces the 'last_exception' and # 'last_exc_value' as variables if needed. Also check if the link # is jumping directly to the re-raising exception block. @@ -153,54 +162,74 @@ assert link.last_exc_value is not None if link.target.operations == () and link.args == [link.last_exception, link.last_exc_value]: - self.emitline("reraise") + if handling_ovf: + exc_data = self.cpu.rtyper.exceptiondata + ll_ovf = exc_data.get_standard_ll_exc_instance_by_class( + OverflowError) + c = Constant(ll_ovf, concretetype=lltype.typeOf(ll_ovf)) + self.emitline("raise", c) + else: + self.emitline("reraise") self.emitline("---") return # done - self.make_link(link) + self.make_link(link, handling_ovf) - def insert_exits(self, block): + def insert_exits(self, block, handling_ovf=False): if len(block.exits) == 1: # A single link, fall-through link = block.exits[0] assert link.exitcase in (None, False, True) # the cases False or True should not really occur, but can show # up in the manually hacked graphs for generators... - self.make_link(link) + self.make_link(link, handling_ovf) # elif block.canraise: # An exception block. See test_exc_exitswitch in test_flatten.py # for an example of what kind of code this makes. index = -1 - while True: - lastopname = block.operations[index].opname - if lastopname != '-live-': - break - index -= 1 + opname = block.operations[index].opname + if '_ovf' in opname: + # ovf checking operation as a lat thing, -live- should be + # one before it + line = self.popline() + self.emitline(opname[:7] + '_jump_if_ovf', + TLabel(block.exits[1]), *line[1:]) + assert len(block.exits) in (2, 3) + self.make_link(block.exits[0], False) + self.emitline(Label(block.exits[1])) + self.make_exception_link(block.exits[1], True) + if len(block.exits) == 3: + assert block.exits[2].exitcase is Exception + self.make_exception_link(block.exits[2], False) + return + else: + while True: + lastopname = block.operations[index].opname + if lastopname != '-live-': + break + index -= 1 assert block.exits[0].exitcase is None # is this always True? # if not self._include_all_exc_links: if index == -1: # cannot raise: the last instruction is not # actually a '-live-' - self.make_link(block.exits[0]) + self.make_link(block.exits[0], False) return # self.emitline('catch_exception', TLabel(block.exits[0])) - self.make_link(block.exits[0]) + self.make_link(block.exits[0], False) self.emitline(Label(block.exits[0])) for link in block.exits[1:]: - if (link.exitcase is Exception or - (link.exitcase is OverflowError and - lastopname.startswith('int_') and - lastopname.endswith('_ovf'))): + if link.exitcase is Exception: # this link captures all exceptions - self.make_exception_link(link) + self.make_exception_link(link, False) break self.emitline('goto_if_exception_mismatch', Constant(link.llexitcase, lltype.typeOf(link.llexitcase)), TLabel(link)) - self.make_exception_link(link) + self.make_exception_link(link, False) self.emitline(Label(link)) else: # no link captures all exceptions, so we have to put a reraise @@ -216,29 +245,26 @@ if linkfalse.llexitcase == True: linkfalse, linktrue = linktrue, linkfalse opname = 'goto_if_not' - livebefore = False if isinstance(block.exitswitch, tuple): # special case produced by jtransform.optimize_goto_if_not() opname = 'goto_if_not_' + block.exitswitch[0] opargs = block.exitswitch[1:] if opargs[-1] == '-live-before': - livebefore = True opargs = opargs[:-1] else: assert block.exitswitch.concretetype == lltype.Bool opargs = [block.exitswitch] # lst = self.flatten_list(opargs) + [TLabel(linkfalse)] - if livebefore: - self.emitline('-live-') + self.emitline('-live-') self.emitline(opname, *lst) - if not livebefore: - self.emitline('-live-', TLabel(linkfalse)) + #if not livebefore: + # self.emitline('-live-', TLabel(linkfalse)) # true path: - self.make_link(linktrue) + self.make_link(linktrue, handling_ovf) # false path: self.emitline(Label(linkfalse)) - self.make_link(linkfalse) + self.make_link(linkfalse, handling_ovf) # else: # A switch. @@ -261,7 +287,7 @@ switchdict) # emit the default path if block.exits[-1].exitcase == 'default': - self.make_link(block.exits[-1]) + self.make_link(block.exits[-1], handling_ovf) else: self.emitline("unreachable") self.emitline("---") @@ -275,7 +301,7 @@ # if the switched value doesn't match any case. self.emitline(Label(switch)) self.emitline('-live-') - self.make_link(switch) + self.make_link(switch, handling_ovf) def insert_renamings(self, link): renamings = {} @@ -323,6 +349,9 @@ def emitline(self, *line): self.ssarepr.insns.append(line) + def popline(self): + return self.ssarepr.insns.pop() + def flatten_list(self, arglist): args = [] for v in arglist: diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -8,7 +8,8 @@ from rpython.jit.metainterp.history import getkind from rpython.jit.metainterp.typesystem import deref, arrayItem from rpython.jit.metainterp.blackhole import BlackholeInterpreter -from rpython.flowspace.model import SpaceOperation, Variable, Constant +from rpython.flowspace.model import SpaceOperation, Variable, Constant,\ + c_last_exception from rpython.rlib import objectmodel from rpython.rlib.jit import _we_are_jitted from rpython.rlib.rgc import lltype_is_gc @@ -211,8 +212,8 @@ # ok! optimize this case block.operations.remove(op) block.exitswitch = (op.opname,) + tuple(op.args) - if op.opname in ('ptr_iszero', 'ptr_nonzero'): - block.exitswitch += ('-live-before',) + #if op.opname in ('ptr_iszero', 'ptr_nonzero'): + block.exitswitch += ('-live-before',) # if the variable escape to the next block along a link, # replace it with a constant, because we know its value for link in block.exits: @@ -333,13 +334,13 @@ def rewrite_op_int_add_ovf(self, op): op0 = self._rewrite_symmetric(op) op1 = SpaceOperation('-live-', [], None) - return [op0, op1] + return [op1, op0] rewrite_op_int_mul_ovf = rewrite_op_int_add_ovf def rewrite_op_int_sub_ovf(self, op): op1 = SpaceOperation('-live-', [], None) - return [op, op1] + return [op1, op] def _noop_rewrite(self, op): return op diff --git a/rpython/jit/codewriter/test/test_codewriter.py b/rpython/jit/codewriter/test/test_codewriter.py --- a/rpython/jit/codewriter/test/test_codewriter.py +++ b/rpython/jit/codewriter/test/test_codewriter.py @@ -76,11 +76,11 @@ assert jitcode.num_regs_i() == 2 assert jitcode.num_regs_r() == 0 assert jitcode.num_regs_f() == 0 - assert jitcode._live_vars(5) == '%i0 %i1' + assert jitcode._live_vars(0) == '%i0 %i1' # from rpython.jit.codewriter.jitcode import MissingLiveness for i in range(len(jitcode.code)+1): - if i != 5: + if i != 0: py.test.raises(MissingLiveness, jitcode._live_vars, i) def test_call(): diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -140,6 +140,7 @@ def encoding_test(self, func, args, expected, transform=False, liveness=False, cc=None, jd=None): + graphs = self.make_graphs(func, args) #graphs[0].show() if transform: @@ -147,7 +148,8 @@ cc = cc or FakeCallControl() transform_graph(graphs[0], FakeCPU(self.rtyper), cc, jd) ssarepr = flatten_graph(graphs[0], fake_regallocs(), - _include_all_exc_links=not transform) + _include_all_exc_links=not transform, + cpu=FakeCPU(self.rtyper)) if liveness: from rpython.jit.codewriter.liveness import compute_liveness compute_liveness(ssarepr) @@ -169,8 +171,8 @@ return n + 1 self.encoding_test(f, [10], """ int_gt %i0, $0 -> %i1 + -live- goto_if_not %i1, L1 - -live- L1 int_copy %i0 -> %i2 int_sub %i2, $3 -> %i3 int_copy %i3 -> %i4 @@ -194,8 +196,8 @@ int_copy %i1 -> %i3 L1: int_gt %i2, $0 -> %i4 + -live- goto_if_not %i4, L2 - -live- L2 int_copy %i2 -> %i5 int_copy %i3 -> %i6 int_add %i6, %i5 -> %i7 @@ -218,8 +220,8 @@ int_copy %i0 -> %i2 int_copy %i1 -> %i3 L1: + -live- goto_if_not_int_gt %i2, $0, L2 - -live- L2 int_copy %i2 -> %i4 int_copy %i3 -> %i5 int_add %i5, %i4 -> %i6 @@ -457,8 +459,8 @@ # note that 'goto_if_not_int_is_true' is not the same thing # as just 'goto_if_not', because the last one expects a boolean self.encoding_test(f, [7], """ + -live- goto_if_not_int_is_true %i0, L1 - -live- L1 int_return $False --- L1: @@ -523,8 +525,8 @@ else: return m2 self.encoding_test(f, [4, 5, 6], """ + -live- %i0, %i1, %i2 goto_if_not_int_is_true %i0, L1 - -live- %i1, %i2, L1 int_return %i1 --- L1: @@ -538,15 +540,59 @@ except OverflowError: return 42 self.encoding_test(f, [7, 2], """ - int_add_ovf %i0, %i1 -> %i2 - -live- %i2 - catch_exception L1 + -live- %i0, %i1 + int_add_jump_if_ovf L1, %i0, %i1 -> %i2 int_return %i2 --- L1: int_return $42 """, transform=True, liveness=True) + def test_multiple_int_add_ovf(self): + def f(i, j): + try: + ovfcheck(j + i) + return ovfcheck(i + j) + except OverflowError: + return 42 + self.encoding_test(f, [7, 2], """ + -live- %i0, %i1 + int_add_jump_if_ovf L1, %i1, %i0 -> %i2 + int_copy %i1 -> %i3 + int_copy %i0 -> %i4 + -live- %i3, %i4 + int_add_jump_if_ovf L2, %i4, %i3 -> %i5 + int_return %i5 + --- + L2: + int_return $42 + --- + L1: + int_return $42 + """, transform=True, liveness=True) + + def test_ovfcheck_no_catch(self): + def f(i, j): + return ovfcheck(i + j) + err = py.test.raises(Exception, "self.encoding_test(f, [7, 2], ''," + "transform=True, liveness=True)") + assert "ovfcheck()" in str(err) + + def test_ovfcheck_reraise(self): + def f(i, j): + try: + ovfcheck(j + i) + except OverflowError: + raise + self.encoding_test(f, [7, 2], """ + -live- %i0, %i1 + int_add_jump_if_ovf L1, %i1, %i0 -> %i2 + void_return + --- + L1: + raise $<* struct object> + """, transform=True, liveness=True) + def test_residual_call_raising(self): @dont_look_inside def g(i, j): diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -15,7 +15,7 @@ for prod in result: yield tuple(prod) -from rpython.flowspace.model import FunctionGraph, Block, Link +from rpython.flowspace.model import FunctionGraph, Block, Link, c_last_exception from rpython.flowspace.model import SpaceOperation, Variable, Constant from rpython.rtyper.lltypesystem import lltype, llmemory, rstr, rffi from rpython.rtyper import rclass @@ -187,7 +187,7 @@ res = Transformer().optimize_goto_if_not(block) assert res == True assert block.operations == [sp1, sp2] - assert block.exitswitch == ('int_gt', v1, v2) + assert block.exitswitch == ('int_gt', v1, v2, '-live-before') assert block.exits == exits def test_optimize_goto_if_not__incoming(): @@ -211,7 +211,7 @@ res = Transformer().optimize_goto_if_not(block) assert res == True assert block.operations == [] - assert block.exitswitch == ('int_gt', v1, v2) + assert block.exitswitch == ('int_gt', v1, v2, '-live-before') assert block.exits == exits assert exits[1].args == [const(True)] @@ -235,7 +235,7 @@ res = Transformer().optimize_goto_if_not(block) assert res == True assert block.operations == [] - assert block.exitswitch == (opname, v1, v2) + assert block.exitswitch == (opname, v1, v2, '-live-before') assert block.exits == exits def test_optimize_goto_if_not__ptr_iszero(): @@ -287,7 +287,7 @@ for v2 in [varoftype(lltype.Signed), const(43)]: op = SpaceOperation('int_add_nonneg_ovf', [v1, v2], v3) oplist = Transformer(FakeCPU()).rewrite_operation(op) - op0, op1 = oplist + op1, op0 = oplist assert op0.opname == 'int_add_ovf' if isinstance(v1, Constant) and isinstance(v2, Variable): assert op0.args == [v2, v1] diff --git a/rpython/jit/codewriter/test/test_regalloc.py b/rpython/jit/codewriter/test/test_regalloc.py --- a/rpython/jit/codewriter/test/test_regalloc.py +++ b/rpython/jit/codewriter/test/test_regalloc.py @@ -63,8 +63,8 @@ self.check_assembler(graph, """ L1: int_gt %i0, $0 -> %i2 + -live- goto_if_not %i2, L2 - -live- L2 int_add %i1, %i0 -> %i1 int_sub %i0, $1 -> %i0 goto L1 @@ -82,8 +82,8 @@ self.check_assembler(graph, """ L1: int_gt %i0, $0 -> %i2 + -live- goto_if_not %i2, L2 - -live- L2 int_push %i1 int_copy %i0 -> %i1 int_pop -> %i0 @@ -102,8 +102,8 @@ self.check_assembler(graph, """ L1: int_gt %i0, $0 -> %i0 + -live- goto_if_not %i0, L2 - -live- L2 int_copy %i1 -> %i0 int_copy $2 -> %i1 goto L1 @@ -121,8 +121,8 @@ self.check_assembler(graph, """ L1: int_gt %i0, $0 -> %i3 + -live- goto_if_not %i3, L2 - -live- L2 int_push %i1 int_copy %i2 -> %i1 int_copy %i0 -> %i2 @@ -142,8 +142,8 @@ self.check_assembler(graph, """ L1: int_gt %i0, $0 -> %i3 + -live- goto_if_not %i3, L2 - -live- L2 int_copy %i2 -> %i1 goto L1 --- @@ -236,8 +236,8 @@ self.check_assembler(graph, """ int_lshift %i0, %i1 -> %i2 int_rshift %i2, %i1 -> %i1 + -live- goto_if_not_int_ne %i1, %i0, L1 - -live- L1 raise $<* struct object> --- L1: diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -212,6 +212,20 @@ assert lltype.typeOf(result) is longlong.FLOATSTORAGE self.registers_f[ord(code[position])] = result position += 1 + elif resulttype == "iL": + result, new_position = result + if new_position != -1: + position = new_position + next_argcode = next_argcode + 2 + else: + assert argcodes[next_argcode] == '>' + assert argcodes[next_argcode + 1] == 'i' + next_argcode = next_argcode + 2 + if lltype.typeOf(result) is lltype.Bool: + result = int(result) + assert lltype.typeOf(result) is lltype.Signed + self.registers_i[ord(code[position])] = result + position += 1 elif resulttype == 'L': assert result >= 0 position = result @@ -394,17 +408,26 @@ def bhimpl_int_mul(a, b): return intmask(a * b) - @arguments("i", "i", returns="i") - def bhimpl_int_add_ovf(a, b): - return ovfcheck(a + b) + @arguments("L", "i", "i", returns="iL") + def bhimpl_int_add_jump_if_ovf(label, a, b): + try: + return ovfcheck(a + b), -1 + except OverflowError: + return 0, label - @arguments("i", "i", returns="i") - def bhimpl_int_sub_ovf(a, b): - return ovfcheck(a - b) + @arguments("L", "i", "i", returns="iL") + def bhimpl_int_sub_jump_if_ovf(label, a, b): + try: + return ovfcheck(a - b), -1 + except OverflowError: + return 0, label - @arguments("i", "i", returns="i") - def bhimpl_int_mul_ovf(a, b): - return ovfcheck(a * b) + @arguments("L", "i", "i", returns="iL") + def bhimpl_int_mul_jump_if_ovf(label, a, b): + try: + return ovfcheck(a * b), -1 + except OverflowError: + return 0, label @arguments("i", "i", returns="i") def bhimpl_int_floordiv(a, b): @@ -1465,57 +1488,9 @@ assert kind == 'v' return lltype.nullptr(rclass.OBJECTPTR.TO) - def _prepare_resume_from_failure(self, opnum, deadframe): - from rpython.jit.metainterp.resoperation import rop - # - if opnum == rop.GUARD_FUTURE_CONDITION: - pass - elif opnum == rop.GUARD_TRUE: - # Produced directly by some goto_if_not_xxx() opcode that did not - # jump, but which must now jump. The pc is just after the opcode. - self.position = self.jitcode.follow_jump(self.position) - # - elif opnum == rop.GUARD_FALSE: - # Produced directly by some goto_if_not_xxx() opcode that jumped, - # but which must no longer jump. The pc is just after the opcode. - pass - # - elif opnum == rop.GUARD_VALUE or opnum == rop.GUARD_CLASS: - # Produced by guard_class(), xxx_guard_value(), or a few other - # opcodes like switch(). The pc is at the start of the opcode - # (so it will be redone). - pass - # - elif (opnum == rop.GUARD_NONNULL or - opnum == rop.GUARD_ISNULL or - opnum == rop.GUARD_NONNULL_CLASS): - # Produced by goto_if_not_ptr_{non,is}zero(). The pc is at the - # start of the opcode (so it will be redone); this is needed - # because of GUARD_NONNULL_CLASS. - pass - # - elif (opnum == rop.GUARD_NO_EXCEPTION or - opnum == rop.GUARD_EXCEPTION or - opnum == rop.GUARD_NOT_FORCED): - return lltype.cast_opaque_ptr(rclass.OBJECTPTR, - self.cpu.grab_exc_value(deadframe)) - # - elif opnum == rop.GUARD_NO_OVERFLOW: - # Produced by int_xxx_ovf(). The pc is just after the opcode. - # We get here because it did not used to overflow, but now it does. - return get_llexception(self.cpu, OverflowError()) - # - elif opnum == rop.GUARD_OVERFLOW: - # Produced by int_xxx_ovf(). The pc is just after the opcode. - # We get here because it used to overflow, but now it no longer - # does. - pass - elif opnum == rop.GUARD_NOT_INVALIDATED: - pass - else: - from rpython.jit.metainterp.resoperation import opname - raise NotImplementedError(opname[opnum]) - return lltype.nullptr(rclass.OBJECTPTR.TO) + def _prepare_resume_from_failure(self, deadframe): + return lltype.cast_opaque_ptr(rclass.OBJECTPTR, + self.cpu.grab_exc_value(deadframe)) # connect the return of values from the called frame to the # 'xxx_call_yyy' instructions from the caller frame @@ -1641,8 +1616,7 @@ deadframe, all_virtuals) - current_exc = blackholeinterp._prepare_resume_from_failure( - resumedescr.guard_opnum, deadframe) + current_exc = blackholeinterp._prepare_resume_from_failure(deadframe) _run_forever(blackholeinterp, current_exc) resume_in_blackhole._dont_inline_ = True diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -767,12 +767,15 @@ # fetch the actual value of the guard_value, possibly turning # it to an integer if typetag == self.TY_INT: - intval = metainterp_sd.cpu.get_int_value(deadframe, index) + intval = metainterp_sd.cpu.get_value_direct(deadframe, 'i', + index) elif typetag == self.TY_REF: - refval = metainterp_sd.cpu.get_ref_value(deadframe, index) + refval = metainterp_sd.cpu.get_value_direct(deadframe, 'r', + index) intval = lltype.cast_ptr_to_int(refval) elif typetag == self.TY_FLOAT: - floatval = metainterp_sd.cpu.get_float_value(deadframe, index) + floatval = metainterp_sd.cpu.get_value_direct(deadframe, 'f', + index) intval = longlong.gethash_fast(floatval) else: assert 0, typetag @@ -788,11 +791,6 @@ increment = jitdriver_sd.warmstate.increment_trace_eagerness return jitcounter.tick(hash, increment) - def get_index_of_guard_value(self): - if (self.status & self.ST_TYPE_MASK) == 0: - return -1 - return intmask(self.status >> self.ST_SHIFT) - def start_compiling(self): # start tracing and compiling from this guard. self.status |= self.ST_BUSY_FLAG @@ -819,62 +817,24 @@ new_loop.original_jitcell_token, metainterp.box_names_memo) - def make_a_counter_per_value(self, guard_value_op): + def make_a_counter_per_value(self, guard_value_op, index): assert guard_value_op.getopnum() == rop.GUARD_VALUE box = guard_value_op.getarg(0) - try: - i = guard_value_op.getfailargs().index(box) - except ValueError: - return # xxx probably very rare + if box.type == history.INT: + ty = self.TY_INT + elif box.type == history.REF: + ty = self.TY_REF + elif box.type == history.FLOAT: + ty = self.TY_FLOAT else: - if box.type == history.INT: - ty = self.TY_INT - elif box.type == history.REF: - ty = self.TY_REF - elif box.type == history.FLOAT: - ty = self.TY_FLOAT - else: - assert 0, box.type - self.status = ty | (r_uint(i) << self.ST_SHIFT) + assert 0, box.type + self.status = ty | (r_uint(index) << self.ST_SHIFT) -class ResumeGuardNonnullDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_NONNULL - -class ResumeGuardIsnullDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_ISNULL - -class ResumeGuardClassDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_CLASS - -class ResumeGuardTrueDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_TRUE - -class ResumeGuardFalseDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_FALSE - -class ResumeGuardNonnullClassDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_NONNULL_CLASS - -class ResumeGuardExceptionDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_EXCEPTION - -class ResumeGuardNoExceptionDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_NO_EXCEPTION - -class ResumeGuardOverflowDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_OVERFLOW - -class ResumeGuardNoOverflowDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_NO_OVERFLOW - -class ResumeGuardValueDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_VALUE - -class ResumeGuardNotInvalidated(ResumeGuardDescr): - guard_opnum = rop.GUARD_NOT_INVALIDATED +class ResumeGuardExcDescr(ResumeGuardDescr): + pass class ResumeAtPositionDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_FUTURE_CONDITION + pass class AllVirtuals: llopaque = True @@ -895,8 +855,6 @@ class ResumeGuardForcedDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_NOT_FORCED - def _init(self, metainterp_sd, jitdriver_sd): # to please the annotator self.metainterp_sd = metainterp_sd @@ -959,37 +917,13 @@ if opnum == rop.GUARD_NOT_FORCED or opnum == rop.GUARD_NOT_FORCED_2: resumedescr = ResumeGuardForcedDescr() resumedescr._init(optimizer.metainterp_sd, optimizer.jitdriver_sd) - elif opnum == rop.GUARD_NOT_INVALIDATED: - resumedescr = ResumeGuardNotInvalidated() - elif opnum == rop.GUARD_FUTURE_CONDITION: - resumedescr = ResumeAtPositionDescr() - elif opnum == rop.GUARD_VALUE: - resumedescr = ResumeGuardValueDescr() - elif opnum == rop.GUARD_NONNULL: - resumedescr = ResumeGuardNonnullDescr() - elif opnum == rop.GUARD_ISNULL: - resumedescr = ResumeGuardIsnullDescr() - elif opnum == rop.GUARD_NONNULL_CLASS: - resumedescr = ResumeGuardNonnullClassDescr() - elif opnum == rop.GUARD_CLASS: - resumedescr = ResumeGuardClassDescr() - elif opnum == rop.GUARD_TRUE: - resumedescr = ResumeGuardTrueDescr() - elif opnum == rop.GUARD_FALSE: - resumedescr = ResumeGuardFalseDescr() - elif opnum == rop.GUARD_EXCEPTION: - resumedescr = ResumeGuardExceptionDescr() - elif opnum == rop.GUARD_NO_EXCEPTION: - resumedescr = ResumeGuardNoExceptionDescr() - elif opnum == rop.GUARD_OVERFLOW: - resumedescr = ResumeGuardOverflowDescr() - elif opnum == rop.GUARD_NO_OVERFLOW: - resumedescr = ResumeGuardNoOverflowDescr() elif opnum in (rop.GUARD_IS_OBJECT, rop.GUARD_SUBCLASS, rop.GUARD_GC_TYPE): # note - this only happens in tests resumedescr = ResumeAtPositionDescr() + elif opnum in (rop.GUARD_EXCEPTION, rop.GUARD_NO_EXCEPTION): + resumedescr = ResumeGuardExcDescr() else: - assert False + resumedescr = ResumeGuardDescr() return resumedescr class ResumeFromInterpDescr(ResumeDescr): diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -253,7 +253,7 @@ z = ovfcheck(a + b) except OverflowError: assert metainterp is not None - metainterp.execute_raised(OverflowError(), constant=True) + metainterp.ovf_flag = True z = 0 return z @@ -264,7 +264,7 @@ z = ovfcheck(a - b) except OverflowError: assert metainterp is not None - metainterp.execute_raised(OverflowError(), constant=True) + metainterp.ovf_flag = True z = 0 return z @@ -275,7 +275,7 @@ z = ovfcheck(a * b) except OverflowError: assert metainterp is not None - metainterp.execute_raised(OverflowError(), constant=True) + metainterp.ovf_flag = True z = 0 return z diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -157,6 +157,9 @@ def __init__(self, identifier=None): self.identifier = identifier # for testing + def make_a_counter_per_value(self, op, index): + pass # for testing + @specialize.argtype(0) def newconst(value): @@ -540,6 +543,9 @@ def check_consistency_of_branch(operations, seen, check_descr=True): "NOT_RPYTHON" for num, op in enumerate(operations): + if op.is_ovf(): + assert operations[num + 1].getopnum() in (rop.GUARD_NO_OVERFLOW, + rop.GUARD_OVERFLOW) for i in range(op.numargs()): box = op.getarg(i) if not isinstance(box, Const): @@ -750,7 +756,6 @@ return tokens def check_history(self, expected=None, **check): - return insns = {} for op in self.operations: opname = op.getopname() diff --git a/rpython/jit/metainterp/jitprof.py b/rpython/jit/metainterp/jitprof.py --- a/rpython/jit/metainterp/jitprof.py +++ b/rpython/jit/metainterp/jitprof.py @@ -143,6 +143,7 @@ self._print_intline("guards", cnt[Counters.GUARDS]) self._print_intline("opt ops", cnt[Counters.OPT_OPS]) self._print_intline("opt guards", cnt[Counters.OPT_GUARDS]) + self._print_intline("opt guards shared", cnt[Counters.OPT_GUARDS_SHARED]) self._print_intline("forcings", cnt[Counters.OPT_FORCINGS]) self._print_intline("abort: trace too long", cnt[Counters.ABORT_TOO_LONG]) diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -11,7 +11,7 @@ from rpython.jit.metainterp.optimizeopt.shortpreamble import PreambleOp from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.resoperation import rop, ResOperation, OpHelpers,\ - AbstractResOp + AbstractResOp, GuardResOp from rpython.rlib.objectmodel import we_are_translated from rpython.jit.metainterp.optimizeopt import info @@ -288,7 +288,7 @@ cf = submap[index] = ArrayCachedField(index) return cf - def emit_operation(self, op): + def emit_operation(self, op): self.emitting_operation(op) self.emit_postponed_op() if (op.is_comparison() or op.is_call_may_force() diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -104,6 +104,11 @@ self.last_guard_pos = -1 def mark_last_guard(self, optimizer): + if (optimizer.getlastop() is None or + not optimizer.getlastop().is_guard()): + # there can be a really emitted operation that's not a guard + # e.g. a setfield, ignore those + return self.last_guard_pos = len(optimizer._newoperations) - 1 assert self.get_last_guard(optimizer).is_guard() diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -1,7 +1,6 @@ from rpython.jit.metainterp import jitprof, resume, compile from rpython.jit.metainterp.executor import execute_nonspec_const -from rpython.jit.metainterp.logger import LogOperations -from rpython.jit.metainterp.history import Const, ConstInt, REF, ConstPtr +from rpython.jit.metainterp.history import Const, ConstInt, ConstPtr from rpython.jit.metainterp.optimizeopt.intutils import IntBound,\ ConstIntBound, MININT, MAXINT, IntUnbounded from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method @@ -10,6 +9,7 @@ from rpython.jit.metainterp.optimizeopt import info from rpython.jit.metainterp.typesystem import llhelper from rpython.rlib.objectmodel import specialize, we_are_translated +from rpython.rlib.debug import debug_print @@ -260,6 +260,8 @@ self.optearlyforce = None self.optunroll = None + self._last_guard_op = None + self.set_optimizations(optimizations) self.setup() @@ -526,6 +528,7 @@ if extra_jump: self.first_optimization.propagate_forward(ops[-1]) self.resumedata_memo.update_counters(self.metainterp_sd.profiler) + return (BasicLoopInfo(newargs, self.quasi_immutable_deps), self._newoperations) @@ -566,6 +569,7 @@ op.setarg(i, arg) self.metainterp_sd.profiler.count(jitprof.Counters.OPT_OPS) if op.is_guard(): + assert isinstance(op, GuardResOp) self.metainterp_sd.profiler.count(jitprof.Counters.OPT_GUARDS) pendingfields = self.pendingfields self.pendingfields = None @@ -574,20 +578,85 @@ del self.replaces_guard[orig_op] return else: - guard_op = self.replace_op_with(op, op.getopnum()) - op = self.store_final_boxes_in_guard(guard_op, pendingfields) - # for unrolling - for farg in op.getfailargs(): - if farg: - self.force_box(farg) + op = self.emit_guard_operation(op, pendingfields) elif op.can_raise(): self.exception_might_have_happened = True + if ((op.has_no_side_effect() or op.is_guard() or op.is_jit_debug() or + op.is_ovf()) and not self.is_call_pure_pure_canraise(op)): + pass + else: + self._last_guard_op = None self._really_emitted_operation = op self._newoperations.append(op) + def emit_guard_operation(self, op, pendingfields): + guard_op = self.replace_op_with(op, op.getopnum()) + opnum = guard_op.getopnum() + if (self._last_guard_op and guard_op.getdescr() is None): + self.metainterp_sd.profiler.count_ops(opnum, + jitprof.Counters.OPT_GUARDS_SHARED) + op = self._copy_resume_data_from(guard_op, + self._last_guard_op) + else: + op = self.store_final_boxes_in_guard(guard_op, pendingfields) + self._last_guard_op = op + # for unrolling + for farg in op.getfailargs(): + if farg: + self.force_box(farg) + if op.getopnum() == rop.GUARD_EXCEPTION: + self._last_guard_op = None + return op + + def potentially_change_ovf_op_to_no_ovf(self, op): + # if last emitted operations was int_xxx_ovf and we are not emitting + # a guard_no_overflow change to int_add + if op.getopnum() != rop.GUARD_NO_OVERFLOW: + return + if not self._newoperations: + # got optimized otherwise + return + op = self._newoperations[-1] + if not op.is_ovf(): + return + newop = self.replace_op_with_no_ovf(op) + self._newoperations[-1] = newop + + def replace_op_with_no_ovf(self, op): + if op.getopnum() == rop.INT_MUL_OVF: + return self.replace_op_with(op, rop.INT_MUL) + elif op.getopnum() == rop.INT_ADD_OVF: + return self.replace_op_with(op, rop.INT_ADD) + elif op.getopnum() == rop.INT_SUB_OVF: + return self.replace_op_with(op, rop.INT_SUB) + else: + assert False + + + def _copy_resume_data_from(self, guard_op, last_guard_op): + if guard_op.getopnum() in (rop.GUARD_NO_EXCEPTION, rop.GUARD_EXCEPTION): + assert last_guard_op.getopnum() == rop.GUARD_NOT_FORCED + descr = compile.invent_fail_descr_for_op(guard_op.getopnum(), self) + descr.copy_all_attributes_from(last_guard_op.getdescr()) + guard_op.setdescr(descr) + descr.store_final_boxes(guard_op, last_guard_op.getfailargs(), + self.metainterp_sd) + assert isinstance(guard_op, GuardResOp) + if guard_op.getopnum() == rop.GUARD_VALUE: + guard_op = self._maybe_replace_guard_value(guard_op, descr) + return guard_op + def getlastop(self): return self._really_emitted_operation + def is_call_pure_pure_canraise(self, op): + if not op.is_call_pure(): + return False + effectinfo = op.getdescr().get_extra_info() + if effectinfo.check_can_raise(ignore_memoryerror=True): + return True + return False + def replace_guard_op(self, old_op_pos, new_op): old_op = self._newoperations[old_op_pos] assert old_op.is_guard() @@ -625,24 +694,26 @@ descr.store_final_boxes(op, newboxes, self.metainterp_sd) # if op.getopnum() == rop.GUARD_VALUE: - if op.getarg(0).type == 'i': - b = self.getintbound(op.getarg(0)) - if b.is_bool(): - # Hack: turn guard_value(bool) into guard_true/guard_false. - # This is done after the operation is emitted to let - # store_final_boxes_in_guard set the guard_opnum field of - # the descr to the original rop.GUARD_VALUE. - constvalue = op.getarg(1).getint() - if constvalue == 0: - opnum = rop.GUARD_FALSE - elif constvalue == 1: - opnum = rop.GUARD_TRUE - else: - raise AssertionError("uh?") - newop = self.replace_op_with(op, opnum, [op.getarg(0)], descr) - return newop - # a real GUARD_VALUE. Make it use one counter per value. - descr.make_a_counter_per_value(op) + op = self._maybe_replace_guard_value(op, descr) + return op + + def _maybe_replace_guard_value(self, op, descr): + if op.getarg(0).type == 'i': + b = self.getintbound(op.getarg(0)) + if b.is_bool(): + # Hack: turn guard_value(bool) into guard_true/guard_false. + # This is done after the operation is emitted to let + # store_final_boxes_in_guard set the guard_opnum field of + # the descr to the original rop.GUARD_VALUE. + constvalue = op.getarg(1).getint() + if constvalue == 0: + opnum = rop.GUARD_FALSE + elif constvalue == 1: + opnum = rop.GUARD_TRUE + else: + raise AssertionError("uh?") + newop = self.replace_op_with(op, opnum, [op.getarg(0)], descr) + return newop return op def optimize_default(self, op): diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -401,7 +401,7 @@ r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) raise InvalidLoop('A GUARD_VALUE (%s) was proven to ' 'always fail' % r) - descr = compile.ResumeGuardValueDescr() + descr = compile.ResumeGuardDescr() op = old_guard_op.copy_and_change(rop.GUARD_VALUE, args = [old_guard_op.getarg(0), op.getarg(1)], descr = descr) @@ -411,7 +411,6 @@ # not put in short preambles guard_xxx and guard_value # on the same box. self.optimizer.replace_guard(op, info) - descr.make_a_counter_per_value(op) # to be safe info.reset_last_guard_pos() return op @@ -453,7 +452,7 @@ if old_guard_op.getopnum() == rop.GUARD_NONNULL: # it was a guard_nonnull, which we replace with a # guard_nonnull_class. - descr = compile.ResumeGuardNonnullClassDescr() + descr = compile.ResumeGuardDescr() op = old_guard_op.copy_and_change (rop.GUARD_NONNULL_CLASS, args = [old_guard_op.getarg(0), op.getarg(1)], descr=descr) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -2022,6 +2022,7 @@ None) def test_merge_guard_class_guard_value(self): + py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_class(p1, ConstClass(node_vtable)) [i0] @@ -2055,6 +2056,7 @@ self.check_expanded_fail_descr("i0", rop.GUARD_NONNULL_CLASS) def test_merge_guard_nonnull_guard_value(self): + py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -2072,6 +2074,7 @@ self.check_expanded_fail_descr("i0", rop.GUARD_VALUE) def test_merge_guard_nonnull_guard_class_guard_value(self): + py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -2502,7 +2505,6 @@ if values is not None: fail_args = values fdescr = guard_op.getdescr() - assert fdescr.guard_opnum == guard_opnum reader = ResumeDataFakeReader(fdescr, fail_args, MyMetaInterp(self.cpu)) boxes = reader.consume_boxes() diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -2967,6 +2967,7 @@ assert "promote of a virtual" in exc.msg def test_merge_guard_class_guard_value(self): + py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_class(p1, ConstClass(node_vtable)) [i0] @@ -3012,6 +3013,7 @@ #self.check_expanded_fail_descr("i0", rop.GUARD_NONNULL_CLASS) def test_merge_guard_nonnull_guard_value(self): + py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -3035,6 +3037,7 @@ #self.check_expanded_fail_descr("i0", rop.GUARD_VALUE) def test_merge_guard_nonnull_guard_class_guard_value(self): + py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -21,6 +21,7 @@ from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper.lltypesystem import lltype, rffi, llmemory from rpython.rtyper import rclass +from rpython.rlib.objectmodel import compute_unique_id @@ -228,17 +229,23 @@ ''' % (_opimpl, FASTPATHS_SAME_BOXES[_opimpl.split("_")[-1]], _opimpl.upper()) ).compile() - for _opimpl in ['int_add_ovf', 'int_sub_ovf', 'int_mul_ovf']: + for (_opimpl, resop) in [ + ('int_add_jump_if_ovf', 'INT_ADD_OVF'), + ('int_sub_jump_if_ovf', 'INT_SUB_OVF'), + ('int_mul_jump_if_ovf', 'INT_MUL_OVF')]: exec py.code.Source(''' - @arguments("box", "box") - def opimpl_%s(self, b1, b2): - self.metainterp.clear_exception() + @arguments("label", "box", "box", "orgpc") + def opimpl_%s(self, lbl, b1, b2, orgpc): + self.metainterp.ovf_flag = False resbox = self.execute(rop.%s, b1, b2) - self.make_result_of_lastop(resbox) # same as execute_varargs() if not isinstance(resbox, Const): - self.metainterp.handle_possible_overflow_error() + return self.handle_possible_overflow_error(lbl, orgpc, + resbox) + elif self.metainterp.ovf_flag: + self.pc = lbl + return None # but don't emit GUARD_OVERFLOW return resbox - ''' % (_opimpl, _opimpl.upper())).compile() + ''' % (_opimpl, resop)).compile() for _opimpl in ['int_is_true', 'int_is_zero', 'int_neg', 'int_invert', 'cast_float_to_int', 'cast_int_to_float', @@ -329,37 +336,37 @@ def opimpl_goto(self, target): self.pc = target - @arguments("box", "label") - def opimpl_goto_if_not(self, box, target): + @arguments("box", "label", "orgpc") + def opimpl_goto_if_not(self, box, target, orgpc): switchcase = box.getint() if switchcase: opnum = rop.GUARD_TRUE else: opnum = rop.GUARD_FALSE - self.metainterp.generate_guard(opnum, box) + self.metainterp.generate_guard(opnum, box, resumepc=orgpc) if not switchcase: self.pc = target - @arguments("box", "label") - def opimpl_goto_if_not_int_is_true(self, box, target): + @arguments("box", "label", "orgpc") + def opimpl_goto_if_not_int_is_true(self, box, target, orgpc): condbox = self.execute(rop.INT_IS_TRUE, box) - self.opimpl_goto_if_not(condbox, target) + self.opimpl_goto_if_not(condbox, target, orgpc) - @arguments("box", "label") - def opimpl_goto_if_not_int_is_zero(self, box, target): + @arguments("box", "label", "orgpc") + def opimpl_goto_if_not_int_is_zero(self, box, target, orgpc): condbox = self.execute(rop.INT_IS_ZERO, box) - self.opimpl_goto_if_not(condbox, target) + self.opimpl_goto_if_not(condbox, target, orgpc) for _opimpl in ['int_lt', 'int_le', 'int_eq', 'int_ne', 'int_gt', 'int_ge', 'ptr_eq', 'ptr_ne']: exec py.code.Source(''' - @arguments("box", "box", "label") - def opimpl_goto_if_not_%s(self, b1, b2, target): + @arguments("box", "box", "label", "orgpc") + def opimpl_goto_if_not_%s(self, b1, b2, target, orgpc): if b1 is b2: condbox = %s else: condbox = self.execute(rop.%s, b1, b2) - self.opimpl_goto_if_not(condbox, target) + self.opimpl_goto_if_not(condbox, target, orgpc) ''' % (_opimpl, FASTPATHS_SAME_BOXES[_opimpl.split("_")[-1]], _opimpl.upper()) ).compile() @@ -418,7 +425,7 @@ assert box.getint() == 0 target = switchdict.dict[const1.getint()] self.metainterp.generate_guard(rop.GUARD_FALSE, box, - resumepc=target) + resumepc=orgpc) else: # found one of the cases self.implement_guard_value(valuebox, orgpc) @@ -1457,6 +1464,17 @@ def setup_resume_at_op(self, pc): self.pc = pc + def handle_possible_overflow_error(self, label, orgpc, resbox): + if self.metainterp.ovf_flag: + self.metainterp.generate_guard(rop.GUARD_OVERFLOW, None, + resumepc=orgpc) + self.pc = label + return None + else: + self.metainterp.generate_guard(rop.GUARD_NO_OVERFLOW, None, + resumepc=orgpc) + return resbox + def run_one_step(self): # Execute the frame forward. This method contains a loop that leaves # whenever the 'opcode_implementations' (which is one of the 'opimpl_' @@ -2022,7 +2040,7 @@ moreargs = [box] + extraargs else: moreargs = list(extraargs) - if opnum == rop.GUARD_EXCEPTION or opnum == rop.GUARD_OVERFLOW: + if opnum == rop.GUARD_EXCEPTION: guard_op = self.history.record(opnum, moreargs, lltype.nullptr(llmemory.GCREF.TO)) else: @@ -2309,7 +2327,7 @@ if isinstance(key, compile.ResumeAtPositionDescr): self.seen_loop_header_for_jdindex = self.jitdriver_sd.index try: - self.prepare_resume_from_failure(key.guard_opnum, deadframe) + self.prepare_resume_from_failure(deadframe, key) if self.resumekey_original_loop_token is None: # very rare case raise SwitchToBlackhole(Counters.ABORT_BRIDGE) self.interpret() @@ -2452,22 +2470,9 @@ else: assert 0 self.jitdriver_sd.warmstate.execute_assembler(loop_token, *args) - def prepare_resume_from_failure(self, opnum, deadframe): - frame = self.framestack[-1] - if opnum == rop.GUARD_FUTURE_CONDITION: - pass - elif opnum == rop.GUARD_TRUE: # a goto_if_not that jumps only now - frame.pc = frame.jitcode.follow_jump(frame.pc) - elif opnum == rop.GUARD_FALSE: # a goto_if_not that stops jumping; - pass # or a switch that was in its "default" case - elif opnum == rop.GUARD_VALUE or opnum == rop.GUARD_CLASS: - pass # the pc is already set to the *start* of the opcode - elif (opnum == rop.GUARD_NONNULL or - opnum == rop.GUARD_ISNULL or - opnum == rop.GUARD_NONNULL_CLASS): - pass # the pc is already set to the *start* of the opcode - elif opnum == rop.GUARD_NO_EXCEPTION or opnum == rop.GUARD_EXCEPTION: - exception = self.cpu.grab_exc_value(deadframe) + def prepare_resume_from_failure(self, deadframe, resumedescr): + exception = self.cpu.grab_exc_value(deadframe) + if isinstance(resumedescr, compile.ResumeGuardExcDescr): if exception: self.execute_ll_raised(lltype.cast_opaque_ptr(rclass.OBJECTPTR, exception)) @@ -2477,20 +2482,8 @@ self.handle_possible_exception() except ChangeFrame: pass - elif opnum == rop.GUARD_NOT_INVALIDATED: - pass # XXX we want to do something special in resume descr, - # but not now - elif opnum == rop.GUARD_NO_OVERFLOW: # an overflow now detected - self.execute_raised(OverflowError(), constant=True) - try: - self.finishframe_exception() - except ChangeFrame: - pass - elif opnum == rop.GUARD_OVERFLOW: # no longer overflowing - self.clear_exception() else: - from rpython.jit.metainterp.resoperation import opname - raise NotImplementedError(opname[opnum]) + assert not exception def get_procedure_token(self, greenkey, with_compiled_targets=False): JitCell = self.jitdriver_sd.warmstate.JitCell @@ -2773,18 +2766,6 @@ else: self.generate_guard(rop.GUARD_NO_EXCEPTION, None, []) - def handle_possible_overflow_error(self): - if self.last_exc_value: - op = self.generate_guard(rop.GUARD_OVERFLOW, None) - op.setref_base(lltype.cast_opaque_ptr(llmemory.GCREF, - self.last_exc_value)) - assert self.class_of_last_exc_is_const - self.last_exc_box = ConstPtr( - lltype.cast_opaque_ptr(llmemory.GCREF, self.last_exc_value)) - self.finishframe_exception() - else: - self.generate_guard(rop.GUARD_NO_OVERFLOW, None) - def assert_no_exception(self): assert not self.last_exc_value @@ -3250,16 +3231,17 @@ print '-> %r' % (resultbox,) assert argcodes[next_argcode] == '>' result_argcode = argcodes[next_argcode + 1] - assert resultbox.type == {'i': history.INT, - 'r': history.REF, - 'f': history.FLOAT}[result_argcode] + if 'ovf' not in name: + assert resultbox.type == {'i': history.INT, + 'r': history.REF, + 'f': history.FLOAT}[result_argcode] else: resultbox = unboundmethod(self, *args) # if resultbox is not None: self.make_result_of_lastop(resultbox) elif not we_are_translated(): - assert self._result_argcode in 'v?' + assert self._result_argcode in 'v?' or 'ovf' in name # unboundmethod = getattr(MIFrame, 'opimpl_' + name).im_func argtypes = unrolling_iterable(unboundmethod.argtypes) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -236,6 +236,9 @@ return (self.getopnum() == rop.GUARD_OVERFLOW or self.getopnum() == rop.GUARD_NO_OVERFLOW) + def is_jit_debug(self): + return rop._JIT_DEBUG_FIRST <= self.getopnum() <= rop._JIT_DEBUG_LAST + def is_always_pure(self): return rop._ALWAYS_PURE_FIRST <= self.getopnum() <= rop._ALWAYS_PURE_LAST @@ -375,6 +378,7 @@ newop.rd_frame_info_list = self.rd_frame_info_list return newop + # =========== # type mixins # =========== @@ -689,7 +693,7 @@ 'GUARD_NO_EXCEPTION/0d/n', # may be called with an exception currently set 'GUARD_EXCEPTION/1d/r', # may be called with an exception currently set 'GUARD_NO_OVERFLOW/0d/n', - 'GUARD_OVERFLOW/0d/r', + 'GUARD_OVERFLOW/0d/n', 'GUARD_NOT_FORCED/0d/n', # may be called with an exception currently set 'GUARD_NOT_FORCED_2/0d/n', # same as GUARD_NOT_FORCED, but for finish() 'GUARD_NOT_INVALIDATED/0d/n', @@ -806,10 +810,12 @@ 'UNICODESETITEM/3/n', 'COND_CALL_GC_WB/1d/n', # [objptr] (for the write barrier) 'COND_CALL_GC_WB_ARRAY/2d/n', # [objptr, arrayindex] (write barr. for array) + '_JIT_DEBUG_FIRST', 'DEBUG_MERGE_POINT/*/n', # debugging only 'ENTER_PORTAL_FRAME/2/n', # debugging only 'LEAVE_PORTAL_FRAME/1/n', # debugging only 'JIT_DEBUG/*/n', # debugging only + '_JIT_DEBUG_LAST', 'VIRTUAL_REF_FINISH/2/n', # removed before it's passed to the backend 'COPYSTRCONTENT/5/n', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5/n', diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -115,10 +115,13 @@ while y > 0: myjitdriver.can_enter_jit(x=x, y=y, res=res) myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += ovfcheck(x * x) - x += 1 - res += ovfcheck(x * x) - y -= 1 + try: + res += ovfcheck(x * x) + x += 1 + res += ovfcheck(x * x) + y -= 1 + except OverflowError: + assert 0 return res res = self.meta_interp(f, [6, 7]) assert res == 1323 @@ -151,7 +154,10 @@ myjitdriver.can_enter_jit(x=x, y=y, res=res) myjitdriver.jit_merge_point(x=x, y=y, res=res) b = y * 2 - res += ovfcheck(x * x) + b + try: + res += ovfcheck(x * x) + b + except OverflowError: + assert 0 y -= 1 return res res = self.meta_interp(f, [6, 7]) @@ -230,8 +236,8 @@ res = self.meta_interp(f, [6, 32, 16]) assert res == 1692 self.check_trace_count(3) - self.check_resops({'int_lt': 2, 'int_gt': 4, 'guard_false': 2, - 'guard_true': 4, 'int_sub': 4, 'jump': 3, + self.check_resops({'int_lt': 4, 'int_gt': 4, 'guard_false': 2, + 'guard_true': 6, 'int_sub': 4, 'jump': 3, 'int_mul': 3, 'int_add': 4}) def test_loop_invariant_mul_ovf2(self): @@ -400,7 +406,7 @@ return externfn(n, n+1) res = self.interp_operations(f, [6]) assert res == 42 - self.check_operations_history(int_add=1, int_mul=0, call=1, guard_no_exception=0) + self.check_operations_history(int_add=1, int_mul=0, call_i=1, guard_no_exception=0) def test_residual_call_elidable(self): def externfn(x, y): @@ -413,7 +419,7 @@ assert res == 42 # CALL_PURE is not recorded in the history if all-constant args self.check_operations_history(int_add=0, int_mul=0, - call=0, call_pure_i=0) + call_i=0, call_pure_i=0) def test_residual_call_elidable_1(self): @elidable @@ -425,7 +431,7 @@ assert res == 42 # CALL_PURE is recorded in the history if not-all-constant args self.check_operations_history(int_add=1, int_mul=0, - call=0, call_pure_i=1) + call_i=0, call_pure_i=1) def test_residual_call_elidable_2(self): myjitdriver = JitDriver(greens = [], reds = ['n']) @@ -653,11 +659,11 @@ # res = self.meta_interp(f, [3, 6], repeat=7, function_threshold=0) assert res == 6 - 4 - 5 - self.check_history(call=0) # because the trace starts in the middle + self.check_history(call_n=0) # because the trace starts in the middle # res = self.meta_interp(f, [60, 84], repeat=7) assert res == 84 - 61 - 62 - self.check_history(call=1) # because the trace starts immediately + self.check_history(call_n=1) # because the trace starts immediately def test_unroll_one_loop_iteration(self): def unroll(code): @@ -679,11 +685,11 @@ res = self.meta_interp(f, [1, 4, 1], enable_opts="", inline=True) assert res == f(1, 4, 1) - self.check_history(call_assembler=0) + self.check_history(call_assembler_i=0) res = self.meta_interp(f, [1, 4, 2], enable_opts="", inline=True) assert res == f(1, 4, 2) - self.check_history(call_assembler=1) + self.check_history(call_assembler_i=1) def test_format(self): def f(n): @@ -723,6 +729,7 @@ elif n == 7: a = 3 else: a = 2 x = intmask(x * 10 + a) + #print "XXXXXXXXXXXXXXXX", x i += 1 return x res = self.meta_interp(f, [0], backendopt=True) @@ -834,7 +841,7 @@ return a.foo * x res = self.interp_operations(f, [42]) assert res == 210 - self.check_operations_history(getfield_gc=1) + self.check_operations_history(getfield_gc_i=1) def test_getfield_immutable(self): class A: @@ -851,7 +858,7 @@ return a.foo * x res = self.interp_operations(f, [42]) assert res == 210 - self.check_operations_history(getfield_gc=0) + self.check_operations_history(getfield_gc_i=0) def test_setfield_bool(self): class A: @@ -882,6 +889,24 @@ res = self.interp_operations(f, [1, sys.maxint]) assert res == -42 + def test_ovf_raise(self): + def g(x, y): + try: + return ovfcheck(x * y) + except OverflowError: + raise + + def f(x, y): + try: + return g(x, y) + except OverflowError: + return 3 + + res = self.interp_operations(f, [sys.maxint, 2]) + assert res == 3 + res = self.interp_operations(f, [3, 2]) + assert res == 6 + def test_int_sub_ovf(self): def f(x, y): try: @@ -1356,7 +1381,7 @@ return g(a, b) res = self.interp_operations(f, [3, 5]) assert res == 8 - self.check_operations_history(int_add=0, call=1) + self.check_operations_history(int_add=0, call_i=1) def test_listcomp(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'lst']) @@ -1380,7 +1405,7 @@ return tup[1] res = self.interp_operations(f, [3, 5]) assert res == 5 - self.check_operations_history(setfield_gc=2, getfield_gc_pure=0) + self.check_operations_history(setfield_gc=2, getfield_gc_pure_i=0) def test_oosend_look_inside_only_one(self): class A: @@ -1455,16 +1480,6 @@ res = self.meta_interp(f, [299], listops=True) assert res == f(299) self.check_resops(guard_class=0, guard_value=6) - # - # The original 'guard_class' is rewritten to be directly 'guard_value'. - # Check that this rewrite does not interfere with the descr, which - # should be a full-fledged multivalued 'guard_value' descr. - if self.basic: - for loop in get_stats().get_all_loops(): - for op in loop.get_operations(): - if op.getopname() == "guard_value": - descr = op.getdescr() - assert descr.get_index_of_guard_value() >= 0 def test_merge_guardnonnull_guardclass(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) @@ -1866,7 +1881,8 @@ res = self.meta_interp(g, [6, 20]) assert res == g(6, 20) self.check_trace_count(8) - self.check_resops(getarrayitem_gc_i=10) + # 6 extra from sharing guard data + self.check_resops(getarrayitem_gc_i=10 + 6) def test_multiple_specialied_versions_bridge(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) @@ -2055,8 +2071,8 @@ res = self.meta_interp(g, [3, 23]) assert res == 7068153 self.check_trace_count(6) - self.check_resops(guard_true=6, guard_class=2, int_mul=3, - int_add=3, guard_false=3) + self.check_resops(guard_true=8, guard_class=2, int_mul=3, + int_add=3, guard_false=4) def test_dont_trace_every_iteration(self): myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'i', 'sa']) @@ -2079,7 +2095,7 @@ self.check_enter_count(2) def test_current_trace_length(self): - myjitdriver = JitDriver(greens = ['g'], reds = ['x']) + myjitdriver = JitDriver(greens = ['g'], reds = ['x', 'l']) @dont_look_inside def residual(): print "hi there" @@ -2090,14 +2106,15 @@ residual() y += 1 def f(x, g): + l = [] n = 0 while x > 0: - myjitdriver.can_enter_jit(x=x, g=g) - myjitdriver.jit_merge_point(x=x, g=g) + myjitdriver.can_enter_jit(x=x, g=g, l=l) + myjitdriver.jit_merge_point(x=x, g=g, l=l) loop(g) x -= 1 - n = current_trace_length() - return n + l.append(current_trace_length()) + return l[-2] # not the blackholed version res = self.meta_interp(f, [5, 8]) assert 14 < res < 42 res = self.meta_interp(f, [5, 2]) @@ -2619,7 +2636,10 @@ node2.val = 7 if a >= 100: sa += 1 - sa += ovfcheck(i + i) + try: + sa += ovfcheck(i + i) + except OverflowError: + assert 0 node1 = A(i) i += 1 assert self.meta_interp(f, [20, 7]) == f(20, 7) @@ -2638,7 +2658,7 @@ i += 1 return sa assert self.meta_interp(f, [20]) == f(20) - self.check_resops(int_lt=4, int_le=0, int_ge=0, int_gt=2) + self.check_resops(int_lt=4, int_le=0, int_ge=0, int_gt=4) def test_intbounds_not_generalized1(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa']) @@ -2655,7 +2675,7 @@ i += 1 return sa assert self.meta_interp(f, [20]) == f(20) - self.check_resops(int_lt=6, int_le=2, int_ge=4, int_gt=3) + self.check_resops(int_lt=6, int_le=2, int_ge=4, int_gt=5) def test_intbounds_not_generalized2(self): @@ -2676,7 +2696,7 @@ i += 1 return sa assert self.meta_interp(f, [20]) == f(20) - self.check_resops(int_lt=4, int_le=3, int_ge=3, int_gt=2) + self.check_resops(int_lt=4, int_le=3, int_ge=3, int_gt=4) def test_retrace_limit1(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a']) @@ -3876,6 +3896,7 @@ class TestLLtype(BaseLLtypeTests, LLJitMixin): def test_tagged(self): + py.test.skip("tagged unsupported") from rpython.rlib.objectmodel import UnboxedValue class Base(object): __slots__ = () @@ -3887,8 +3908,10 @@ return self.a > 0 def dec(self): - return Int(self.a - 1) - + try: + return Int(self.a - 1) + except OverflowError: + raise class Float(Base): def __init__(self, a): @@ -3997,7 +4020,7 @@ From noreply at buildbot.pypy.org Sat Oct 3 12:31:42 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Sat, 3 Oct 2015 12:31:42 +0200 (CEST) Subject: [pypy-commit] pypy value-profiling: fix some of the test_pypy_c tests Message-ID: <20151003103142.C7E9A1C146A@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r79946:5bd6e59523f6 Date: 2015-10-02 18:22 +0100 http://bitbucket.org/pypy/pypy/changeset/5bd6e59523f6/ Log: fix some of the test_pypy_c tests diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -70,7 +70,7 @@ py.test.skip(stderr) if stderr.startswith('debug_alloc.h:'): # lldebug builds stderr = '' - assert not stderr + #assert not stderr # if discard_stdout_before_last_line: stdout = stdout.splitlines(True)[-1] @@ -448,7 +448,7 @@ loops = log.loops_by_filename(self.filepath, is_entry_bridge=True) assert len(loops) == 1 assert len([op for op in loops[0].allops() if op.name == 'label']) == 0 - assert len([op for op in loops[0].allops() if op.name == 'guard_nonnull_class']) > 0 + assert len([op for op in loops[0].allops() if op.name == 'guard_nonnull_class']) == 0 # loops = log.loops_by_filename(self.filepath, is_entry_bridge='*') assert len(loops) == 1 @@ -469,9 +469,9 @@ ops = loop.allops() assert log.opnames(ops) == [ # this is the actual loop - 'int_lt', 'guard_true', 'int_add', + 'guard_not_invalidated', 'int_lt', 'guard_true', 'int_add', # this is the signal checking stuff - 'guard_not_invalidated', 'getfield_raw_i', 'int_lt', 'guard_false', + 'getfield_raw_i', 'int_lt', 'guard_false', 'jump' ] @@ -493,7 +493,7 @@ # the 'jump' at the end is because the last opcode in the loop # coincides with the first, and so it thinks that 'jump' belongs to # the id - assert log.opnames(ops) == ['int_lt', 'guard_true', 'jump'] + assert log.opnames(ops) == ['guard_not_invalidated', 'int_lt', 'guard_true', 'jump'] def test_ops_by_id_and_opcode(self): def f(): @@ -534,9 +534,9 @@ ops = log.opnames(loop.allops()) assert ops == [ # this is the actual loop - 'int_lt', 'guard_true', 'force_token', 'int_add', + 'guard_not_invalidated', 'int_lt', 'guard_true', 'force_token', 'int_add', # this is the signal checking stuff - 'guard_not_invalidated', 'getfield_raw_i', 'int_lt', 'guard_false', + 'getfield_raw_i', 'int_lt', 'guard_false', 'jump' ] @@ -550,11 +550,11 @@ log = self.run(f) loop, = log.loops_by_id('increment') assert loop.match(""" + guard_not_invalidated(descr=...) i6 = int_lt(i4, 1003) guard_true(i6, descr=...) i8 = int_add(i4, 1) # signal checking stuff - guard_not_invalidated(descr=...) i10 = getfield_raw_i(..., descr=<.* pypysig_long_struct.c_value .*>) i14 = int_lt(i10, 0) guard_false(i14, descr=...) @@ -562,6 +562,7 @@ """) # assert loop.match(""" + guard_not_invalidated(descr=...) i6 = int_lt(i4, 1003) guard_true(i6, descr=...) i8 = int_add(i4, 1) @@ -570,6 +571,7 @@ """) # py.test.raises(InvalidMatch, loop.match, """ + guard_not_invalidated(descr=...) i6 = int_lt(i4, 1003) guard_true(i6) i8 = int_add(i5, 1) # variable mismatch diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py b/pypy/module/pypyjit/test_pypy_c/test_array.py --- a/pypy/module/pypyjit/test_pypy_c/test_array.py +++ b/pypy/module/pypyjit/test_pypy_c/test_array.py @@ -18,6 +18,7 @@ assert log.result == 500 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i7 = int_lt(i5, i6) guard_true(i7, descr=...) i9 = int_add(i5, 1) @@ -89,9 +90,9 @@ """) elif sys.maxint == 2 ** 63 - 1: assert loop.match(""" + guard_not_invalidated(descr=...) i13 = int_lt(i8, 307200) guard_true(i13, descr=...) - guard_not_invalidated(descr=...) # the bound check guard on img has been killed (thanks to the asserts) i14 = getarrayitem_raw_i(i10, i8, descr=) # advanced: the following int_add cannot overflow, because: diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -17,7 +17,6 @@ assert log.result == 1000 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id('match', """ - guard_not_invalidated(descr=...) i65 = getfield_gc_i(p18, descr=...) i67 = int_gt(0, i65) guard_false(i67, descr=...) @@ -40,7 +39,6 @@ assert log.result == 1000 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id('unpack', """ - guard_not_invalidated(descr=...) p90 = newstr(4) call_n(ConstClass(copy_raw_to_string), i55, p90, 0, 4, descr=) guard_no_exception(descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -72,9 +72,8 @@ # LOAD_GLOBAL of OFFSET ops = entry_bridge.ops_by_id('cond', opcode='LOAD_GLOBAL') assert log.opnames(ops) == ["guard_value", - "guard_value", - "getfield_gc_r", "guard_value", - "guard_not_invalidated"] + "getfield_gc_r", + "guard_value"] ops = entry_bridge.ops_by_id('add', opcode='LOAD_GLOBAL') assert log.opnames(ops) == [] # @@ -130,9 +129,8 @@ # ------------------------------- entry_bridge, = log.loops_by_filename(self.filepath, is_entry_bridge=True) ops = entry_bridge.ops_by_id('meth1', opcode='LOOKUP_METHOD') - assert log.opnames(ops) == ['guard_value', 'getfield_gc_r', - 'guard_value', - 'guard_not_invalidated'] + assert log.opnames(ops) == ['getfield_gc_r', + 'guard_value'] # the second LOOKUP_METHOD is folded away assert list(entry_bridge.ops_by_id('meth2', opcode='LOOKUP_METHOD')) == [] # @@ -140,14 +138,13 @@ # ---------------------- loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i15 = int_lt(i6, i9) guard_true(i15, descr=...) - guard_not_invalidated(descr=...) i16 = force_token() - i17 = int_add_ovf(i10, i6) - guard_no_overflow(descr=...) + i17 = int_add(1, i6) i18 = force_token() - i19 = int_add_ovf(i10, i17) + i19 = int_add_ovf(1, i17) guard_no_overflow(descr=...) --TICK-- jump(..., descr=...) @@ -174,9 +171,9 @@ assert log.result == 1000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i14 = int_lt(i6, i9) guard_true(i14, descr=...) - guard_not_invalidated(descr=...) i15 = force_token() i17 = int_add_ovf(i8, 1) guard_no_overflow(descr=...) @@ -409,9 +406,9 @@ assert log.result == 10000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i10 = int_lt(i5, i6) guard_true(i10, descr=...) - guard_not_invalidated(descr=...) i120 = int_add(i5, 1) --TICK-- jump(..., descr=...) @@ -433,15 +430,14 @@ assert loop.match(""" guard_value(i4, 1, descr=...) guard_isnull(p5, descr=...) - guard_nonnull_class(p12, ConstClass(W_IntObject), descr=...) guard_value(p2, ConstPtr(ptr21), descr=...) + guard_not_invalidated(descr=...) i22 = getfield_gc_pure_i(p12, descr=) i24 = int_lt(i22, 5000) guard_true(i24, descr=...) guard_value(p7, ConstPtr(ptr25), descr=...) p26 = getfield_gc_r(p7, descr=) guard_value(p26, ConstPtr(ptr27), descr=...) - guard_not_invalidated(descr=...) p29 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p30 = getfield_gc_r(p29, descr=) p31 = force_token() @@ -472,6 +468,7 @@ i8 = getfield_gc_pure_i(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) + guard_not_invalidated? i11 = force_token() i13 = int_add(i8, 1) --TICK-- @@ -495,9 +492,9 @@ assert log.result == 500 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated? i2 = int_lt(i0, i1) guard_true(i2, descr=...) - guard_not_invalidated? i3 = force_token() i4 = int_add(i0, 1) --TICK-- @@ -523,9 +520,9 @@ assert log.result == 500 loop, = log.loops_by_id('call') assert loop.match(""" + guard_not_invalidated(..., descr=...) i65 = int_lt(i58, i29) guard_true(i65, descr=...) - guard_not_invalidated(..., descr=...) i66 = force_token() i67 = force_token() i69 = int_sub_ovf(1, i56) diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -61,9 +61,9 @@ assert log.result == main(1000) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i8 = int_lt(i5, i7) guard_true(i8, descr=...) - guard_not_invalidated(descr=...) p10 = call_r(ConstClass(ll_str__IntegerR_SignedConst_Signed), i5, descr=) guard_no_exception(descr=...) i12 = call_i(ConstClass(ll_strhash), p10, descr=) @@ -119,9 +119,9 @@ assert log.result == main(1000) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i7 = int_lt(i5, i6) guard_true(i7, descr=...) - guard_not_invalidated(descr=...) i9 = int_add(i5, 1) --TICK-- jump(..., descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_exception.py b/pypy/module/pypyjit/test_pypy_c/test_exception.py --- a/pypy/module/pypyjit/test_pypy_c/test_exception.py +++ b/pypy/module/pypyjit/test_pypy_c/test_exception.py @@ -35,9 +35,9 @@ assert log.result == 0 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i5 = int_is_true(i3) guard_true(i5, descr=...) - guard_not_invalidated(descr=...) --EXC-TICK-- i12 = int_sub_ovf(i3, 1) guard_no_overflow(descr=...) @@ -83,9 +83,9 @@ assert log.result == 100000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i7 = int_lt(i4, i5) guard_true(i7, descr=...) - guard_not_invalidated(descr=...) --EXC-TICK-- i14 = int_add(i4, 1) --TICK-- @@ -109,6 +109,7 @@ assert log.result == 2001 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i3 = int_lt(i1, i2) guard_true(i3, descr=...) i4 = int_add(i1, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_getframe.py b/pypy/module/pypyjit/test_pypy_c/test_getframe.py --- a/pypy/module/pypyjit/test_pypy_c/test_getframe.py +++ b/pypy/module/pypyjit/test_pypy_c/test_getframe.py @@ -16,9 +16,9 @@ assert log.result == 300 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i54 = int_lt(i47, i28) guard_true(i54, descr=...) - guard_not_invalidated(descr=...) i55 = int_add(i47, 1) --TICK-- jump(..., descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -18,7 +18,6 @@ assert loop.match_by_id("loadglobal", """ p12 = getfield_gc_r(p10, descr=) guard_value(p12, ConstPtr(ptr13), descr=...) - guard_not_invalidated(descr=...) p19 = getfield_gc_r(ConstPtr(p17), descr=) guard_value(p19, ConstPtr(ptr20), descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_import.py b/pypy/module/pypyjit/test_pypy_c/test_import.py --- a/pypy/module/pypyjit/test_pypy_c/test_import.py +++ b/pypy/module/pypyjit/test_pypy_c/test_import.py @@ -15,7 +15,6 @@ assert log.result == 500 loop, = log.loops_by_id('import') assert loop.match_by_id('import', """ - guard_not_invalidated(descr=...) """) def test_import_fast_path(self, tmpdir): diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -21,9 +21,9 @@ assert log.result == 1000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i7 = int_lt(i5, i6) guard_true(i7, descr=...) - guard_not_invalidated(descr=...) i9 = int_add_ovf(i5, 2) guard_no_overflow(descr=...) --TICK-- @@ -46,9 +46,9 @@ assert log.result == 1000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i9 = int_lt(i5, i6) guard_true(i9, descr=...) - guard_not_invalidated(descr=...) i10 = int_add(i5, 1) --TICK-- jump(..., descr=...) @@ -105,8 +105,7 @@ # ------------------------------- entry_bridge, = log.loops_by_filename(self.filepath, is_entry_bridge=True) ops = entry_bridge.ops_by_id('mutate', opcode='LOAD_ATTR') - assert log.opnames(ops) == ['guard_value', 'guard_not_invalidated', - 'getfield_gc_i'] + assert log.opnames(ops) == ['guard_value', 'getfield_gc_i'] # the STORE_ATTR is folded away assert list(entry_bridge.ops_by_id('meth1', opcode='STORE_ATTR')) == [] # @@ -114,9 +113,9 @@ # ---------------------- loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i58 = int_lt(i38, i31) guard_true(i58, descr=...) - guard_not_invalidated(descr=...) i59 = int_add_ovf(i57, 1) guard_no_overflow(descr=...) p60 = force_token() @@ -153,7 +152,7 @@ # ------------------------------- entry_bridge, = log.loops_by_filename(self.filepath, is_entry_bridge=True) ops = entry_bridge.ops_by_id('mutate', opcode='LOAD_ATTR') - assert log.opnames(ops) == ['guard_value', 'guard_not_invalidated', + assert log.opnames(ops) == ['guard_value', 'getfield_gc_r', 'guard_nonnull_class', 'getfield_gc_r', 'guard_value', # type check on the attribute ] @@ -164,9 +163,9 @@ # ---------------------- loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i70 = int_lt(i58, i33) guard_true(i70, descr=...) - guard_not_invalidated(descr=...) p71 = getfield_gc_r(p64, descr=...) guard_value(p71, ConstPtr(ptr42), descr=...) p72 = force_token() @@ -208,7 +207,6 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id('loadattr1', ''' - guard_not_invalidated(descr=...) i19 = call_i(ConstClass(ll_call_lookup_function), _, _, _, 0, descr=...) guard_no_exception(descr=...) i22 = int_lt(i19, 0) @@ -235,7 +233,6 @@ log = self.run(main, [], threshold=80) loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("contains", """ - guard_not_invalidated(descr=...) i11 = force_token() i12 = int_add(i5, 1) """) @@ -274,9 +271,9 @@ log = self.run(main, []) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i78 = int_lt(i72, 300) guard_true(i78, descr=...) - guard_not_invalidated(descr=...) i79 = force_token() i80 = force_token() i81 = int_add(i72, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_intbound.py b/pypy/module/pypyjit/test_pypy_c/test_intbound.py --- a/pypy/module/pypyjit/test_pypy_c/test_intbound.py +++ b/pypy/module/pypyjit/test_pypy_c/test_intbound.py @@ -89,6 +89,7 @@ assert log.result == (300, 300) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated? i10 = int_lt(i8, i9) guard_true(i10, descr=...) i12 = int_add_ovf(i7, 1) @@ -113,6 +114,7 @@ assert log.result == 300 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated? i7 = int_lt(i5, 300) guard_true(i7, descr=...) i9 = int_sub_ovf(i5, 10) @@ -139,6 +141,7 @@ assert log.result == (300, 300) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated? i10 = int_lt(i8, i9) guard_true(i10, descr=...) i12 = int_add_ovf(i8, 5) @@ -168,6 +171,7 @@ assert log.result == (300, 300) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated? i10 = int_lt(i8, 300) guard_true(i10, descr=...) i12 = int_add(i8, 5) @@ -202,6 +206,7 @@ assert log.result == main(42, 300) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated? i10 = int_lt(i8, 300) guard_true(i10, descr=...) i12 = int_eq(i8, 10) @@ -229,9 +234,9 @@ assert log.result == 300 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated? i8 = int_lt(i6, 300) guard_true(i8, descr=...) - guard_not_invalidated? i10 = int_lshift(i6, 1) i12 = int_add_ovf(i5, 1) guard_no_overflow(descr=...) @@ -252,9 +257,9 @@ assert log.result == 300*8 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated? i8 = int_lt(i6, 300) guard_true(i8, descr=...) - guard_not_invalidated? i10 = int_add_ovf(i5, 8) guard_no_overflow(descr=...) i12 = int_add(i6, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_math.py b/pypy/module/pypyjit/test_pypy_c/test_math.py --- a/pypy/module/pypyjit/test_pypy_c/test_math.py +++ b/pypy/module/pypyjit/test_pypy_c/test_math.py @@ -17,9 +17,9 @@ assert round(log.result, 6) == round(main(500), 6) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i2 = int_lt(i0, i1) guard_true(i2, descr=...) - guard_not_invalidated(descr=...) f1 = cast_int_to_float(i0) i3 = float_le(f1, 0.0) guard_false(i3, descr=...) @@ -46,9 +46,9 @@ assert round(log.result, 6) == round(main(500), 6) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i2 = int_lt(i0, i1) guard_true(i2, descr=...) - guard_not_invalidated(descr=...) f1 = cast_int_to_float(i0) i6 = --ISINF--(f1) guard_false(i6, descr=...) @@ -75,9 +75,9 @@ assert log.result == main(500) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i1 = int_gt(i0, 0) guard_true(i1, descr=...) - guard_not_invalidated(descr=...) f1 = cast_int_to_float(i0) i2 = float_eq(f1, inf) i3 = float_eq(f1, -inf) @@ -102,7 +102,6 @@ assert abs(log.result - main(500)) < 1e-9 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("pow", """ - guard_not_invalidated(descr=...) f38 = float_mul(f30, f30) f39 = float_sub(f30, f38) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -105,6 +105,7 @@ guard_true(i94, descr=...) """ assert loop.match(""" + guard_not_invalidated(descr=...) i76 = int_lt(i71, 300) guard_true(i76, descr=...) i77 = int_ge(i71, i59) @@ -142,6 +143,7 @@ guard_true(i98, descr=...) """ assert loop.match(""" + guard_not_invalidated(descr=...) i81 = int_lt(i76, 300) guard_true(i81, descr=...) i82 = int_ge(i76, i62) @@ -150,7 +152,6 @@ i84 = int_add(i58, i83) """ + alignment_check + """ f85 = raw_load_f(i70, i84, descr=) - guard_not_invalidated(descr=...) f86 = float_add(f74, f85) i87 = int_add(i76, 1) --TICK-- @@ -171,9 +172,9 @@ assert log.result == 42.0 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i86 = int_lt(i79, i45) guard_true(i86, descr=...) - guard_not_invalidated(descr=...) i88 = int_ge(i87, i59) guard_false(i88, descr=...) f90 = raw_load_f(i67, i89, descr=) @@ -202,6 +203,7 @@ assert log.result == 42.0 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i125 = int_lt(i117, i44) guard_true(i125, descr=...) i126 = int_lt(i117, i50) @@ -231,13 +233,13 @@ assert log.result == 42.0 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i128 = int_lt(i120, i42) guard_true(i128, descr=...) i129 = int_lt(i120, i48) guard_true(i129, descr=...) i131 = int_mul(i120, i57) i132 = int_add(i53, i131) - guard_not_invalidated(descr=...) raw_store(i103, i132, 42.000000, descr=) i153 = int_add(i120, 1) i154 = getfield_raw_i(#, descr=) diff --git a/pypy/module/pypyjit/test_pypy_c/test_min_max.py b/pypy/module/pypyjit/test_pypy_c/test_min_max.py --- a/pypy/module/pypyjit/test_pypy_c/test_min_max.py +++ b/pypy/module/pypyjit/test_pypy_c/test_min_max.py @@ -14,9 +14,9 @@ assert log.result == 300*3000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i7 = int_lt(i4, 300) guard_true(i7, descr=...) - guard_not_invalidated(descr=...) i9 = int_add_ovf(i5, 3000) guard_no_overflow(descr=...) i11 = int_add(i4, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -23,6 +23,7 @@ # to contain the very same operations loop0, loop1 = log.loops_by_filename(self.filepath) expected = """ + guard_not_invalidated(descr=...) i9 = int_le(i7, i8) guard_true(i9, descr=...) i11 = int_add_ovf(i7, 1) @@ -50,6 +51,7 @@ assert log.result == 5040 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i7 = int_gt(i4, 1) guard_true(i7, descr=...) i8 = int_mul_ovf(i5, i4) @@ -63,6 +65,7 @@ assert log.result == 15511210043330985984000000L loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i7 = int_gt(i4, 1) guard_true(i7, descr=...) p11 = call_r(ConstClass(rbigint.int_mul), p5, i4, descr=...) @@ -84,6 +87,7 @@ assert log.result == 1000.0 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i9 = float_lt(f5, f7) guard_true(i9, descr=...) f10 = float_add(f8, f5) @@ -110,9 +114,9 @@ assert log.result == 4000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i12 = int_is_true(i4) guard_true(i12, descr=...) - guard_not_invalidated(descr=...) i10p = getfield_gc_pure_i(p10, descr=...) i10 = int_mul_ovf(2, i10p) guard_no_overflow(descr=...) @@ -142,12 +146,12 @@ assert log.result == 1000 * 999 / 2 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i15 = int_lt(i10, i11) guard_true(i15, descr=...) i17 = int_add(i10, 1) i18 = force_token() setfield_gc(p9, i17, descr=<.* .*W_XRangeIterator.inst_current .*>) - guard_not_invalidated(descr=...) i21 = int_lt(i10, 0) guard_false(i21, descr=...) i22 = int_lt(i10, i14) From noreply at buildbot.pypy.org Sat Oct 3 12:31:45 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Sat, 3 Oct 2015 12:31:45 +0200 (CEST) Subject: [pypy-commit] pypy value-profiling: merge Message-ID: <20151003103145.0CEC91C146A@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r79947:07d912e7d126 Date: 2015-10-03 11:42 +0100 http://bitbucket.org/pypy/pypy/changeset/07d912e7d126/ Log: merge diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -70,7 +70,7 @@ py.test.skip(stderr) if stderr.startswith('debug_alloc.h:'): # lldebug builds stderr = '' - assert not stderr + #assert not stderr # if discard_stdout_before_last_line: stdout = stdout.splitlines(True)[-1] @@ -448,7 +448,7 @@ loops = log.loops_by_filename(self.filepath, is_entry_bridge=True) assert len(loops) == 1 assert len([op for op in loops[0].allops() if op.name == 'label']) == 0 - assert len([op for op in loops[0].allops() if op.name == 'guard_nonnull_class']) > 0 + assert len([op for op in loops[0].allops() if op.name == 'guard_nonnull_class']) == 0 # loops = log.loops_by_filename(self.filepath, is_entry_bridge='*') assert len(loops) == 1 @@ -469,9 +469,9 @@ ops = loop.allops() assert log.opnames(ops) == [ # this is the actual loop - 'int_lt', 'guard_true', 'int_add', + 'guard_not_invalidated', 'int_lt', 'guard_true', 'int_add', # this is the signal checking stuff - 'guard_not_invalidated', 'getfield_raw_i', 'int_lt', 'guard_false', + 'getfield_raw_i', 'int_lt', 'guard_false', 'jump' ] @@ -493,7 +493,7 @@ # the 'jump' at the end is because the last opcode in the loop # coincides with the first, and so it thinks that 'jump' belongs to # the id - assert log.opnames(ops) == ['int_lt', 'guard_true', 'jump'] + assert log.opnames(ops) == ['guard_not_invalidated', 'int_lt', 'guard_true', 'jump'] def test_ops_by_id_and_opcode(self): def f(): @@ -534,9 +534,9 @@ ops = log.opnames(loop.allops()) assert ops == [ # this is the actual loop - 'int_lt', 'guard_true', 'force_token', 'int_add', + 'guard_not_invalidated', 'int_lt', 'guard_true', 'force_token', 'int_add', # this is the signal checking stuff - 'guard_not_invalidated', 'getfield_raw_i', 'int_lt', 'guard_false', + 'getfield_raw_i', 'int_lt', 'guard_false', 'jump' ] @@ -550,11 +550,11 @@ log = self.run(f) loop, = log.loops_by_id('increment') assert loop.match(""" + guard_not_invalidated(descr=...) i6 = int_lt(i4, 1003) guard_true(i6, descr=...) i8 = int_add(i4, 1) # signal checking stuff - guard_not_invalidated(descr=...) i10 = getfield_raw_i(..., descr=<.* pypysig_long_struct.c_value .*>) i14 = int_lt(i10, 0) guard_false(i14, descr=...) @@ -562,6 +562,7 @@ """) # assert loop.match(""" + guard_not_invalidated(descr=...) i6 = int_lt(i4, 1003) guard_true(i6, descr=...) i8 = int_add(i4, 1) @@ -570,6 +571,7 @@ """) # py.test.raises(InvalidMatch, loop.match, """ + guard_not_invalidated(descr=...) i6 = int_lt(i4, 1003) guard_true(i6) i8 = int_add(i5, 1) # variable mismatch diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py b/pypy/module/pypyjit/test_pypy_c/test_array.py --- a/pypy/module/pypyjit/test_pypy_c/test_array.py +++ b/pypy/module/pypyjit/test_pypy_c/test_array.py @@ -18,6 +18,7 @@ assert log.result == 500 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i7 = int_lt(i5, i6) guard_true(i7, descr=...) i9 = int_add(i5, 1) @@ -89,9 +90,9 @@ """) elif sys.maxint == 2 ** 63 - 1: assert loop.match(""" + guard_not_invalidated(descr=...) i13 = int_lt(i8, 307200) guard_true(i13, descr=...) - guard_not_invalidated(descr=...) # the bound check guard on img has been killed (thanks to the asserts) i14 = getarrayitem_raw_i(i10, i8, descr=) # advanced: the following int_add cannot overflow, because: diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -17,7 +17,6 @@ assert log.result == 1000 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id('match', """ - guard_not_invalidated(descr=...) i65 = getfield_gc_i(p18, descr=...) i67 = int_gt(0, i65) guard_false(i67, descr=...) @@ -40,7 +39,6 @@ assert log.result == 1000 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id('unpack', """ - guard_not_invalidated(descr=...) p90 = newstr(4) call_n(ConstClass(copy_raw_to_string), i55, p90, 0, 4, descr=) guard_no_exception(descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -72,9 +72,8 @@ # LOAD_GLOBAL of OFFSET ops = entry_bridge.ops_by_id('cond', opcode='LOAD_GLOBAL') assert log.opnames(ops) == ["guard_value", - "guard_value", - "getfield_gc_r", "guard_value", - "guard_not_invalidated"] + "getfield_gc_r", + "guard_value"] ops = entry_bridge.ops_by_id('add', opcode='LOAD_GLOBAL') assert log.opnames(ops) == [] # @@ -130,9 +129,8 @@ # ------------------------------- entry_bridge, = log.loops_by_filename(self.filepath, is_entry_bridge=True) ops = entry_bridge.ops_by_id('meth1', opcode='LOOKUP_METHOD') - assert log.opnames(ops) == ['guard_value', 'getfield_gc_r', - 'guard_value', - 'guard_not_invalidated'] + assert log.opnames(ops) == ['getfield_gc_r', + 'guard_value'] # the second LOOKUP_METHOD is folded away assert list(entry_bridge.ops_by_id('meth2', opcode='LOOKUP_METHOD')) == [] # @@ -140,14 +138,13 @@ # ---------------------- loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i15 = int_lt(i6, i9) guard_true(i15, descr=...) - guard_not_invalidated(descr=...) i16 = force_token() - i17 = int_add_ovf(i10, i6) - guard_no_overflow(descr=...) + i17 = int_add(1, i6) i18 = force_token() - i19 = int_add_ovf(i10, i17) + i19 = int_add_ovf(1, i17) guard_no_overflow(descr=...) --TICK-- jump(..., descr=...) @@ -174,9 +171,9 @@ assert log.result == 1000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i14 = int_lt(i6, i9) guard_true(i14, descr=...) - guard_not_invalidated(descr=...) i15 = force_token() i17 = int_add_ovf(i8, 1) guard_no_overflow(descr=...) @@ -409,9 +406,9 @@ assert log.result == 10000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i10 = int_lt(i5, i6) guard_true(i10, descr=...) - guard_not_invalidated(descr=...) i120 = int_add(i5, 1) --TICK-- jump(..., descr=...) @@ -433,15 +430,14 @@ assert loop.match(""" guard_value(i4, 1, descr=...) guard_isnull(p5, descr=...) - guard_nonnull_class(p12, ConstClass(W_IntObject), descr=...) guard_value(p2, ConstPtr(ptr21), descr=...) + guard_not_invalidated(descr=...) i22 = getfield_gc_pure_i(p12, descr=) i24 = int_lt(i22, 5000) guard_true(i24, descr=...) guard_value(p7, ConstPtr(ptr25), descr=...) p26 = getfield_gc_r(p7, descr=) guard_value(p26, ConstPtr(ptr27), descr=...) - guard_not_invalidated(descr=...) p29 = call_r(ConstClass(_ll_1_threadlocalref_get__Ptr_GcStruct_objectLlT_Signed), #, descr=) p30 = getfield_gc_r(p29, descr=) p31 = force_token() @@ -472,6 +468,7 @@ i8 = getfield_gc_pure_i(p6, descr=) i10 = int_lt(i8, 5000) guard_true(i10, descr=...) + guard_not_invalidated? i11 = force_token() i13 = int_add(i8, 1) --TICK-- @@ -495,9 +492,9 @@ assert log.result == 500 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated? i2 = int_lt(i0, i1) guard_true(i2, descr=...) - guard_not_invalidated? i3 = force_token() i4 = int_add(i0, 1) --TICK-- @@ -523,9 +520,9 @@ assert log.result == 500 loop, = log.loops_by_id('call') assert loop.match(""" + guard_not_invalidated(..., descr=...) i65 = int_lt(i58, i29) guard_true(i65, descr=...) - guard_not_invalidated(..., descr=...) i66 = force_token() i67 = force_token() i69 = int_sub_ovf(1, i56) diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py --- a/pypy/module/pypyjit/test_pypy_c/test_containers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py @@ -61,9 +61,9 @@ assert log.result == main(1000) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i8 = int_lt(i5, i7) guard_true(i8, descr=...) - guard_not_invalidated(descr=...) p10 = call_r(ConstClass(ll_str__IntegerR_SignedConst_Signed), i5, descr=) guard_no_exception(descr=...) i12 = call_i(ConstClass(ll_strhash), p10, descr=) @@ -119,9 +119,9 @@ assert log.result == main(1000) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i7 = int_lt(i5, i6) guard_true(i7, descr=...) - guard_not_invalidated(descr=...) i9 = int_add(i5, 1) --TICK-- jump(..., descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_exception.py b/pypy/module/pypyjit/test_pypy_c/test_exception.py --- a/pypy/module/pypyjit/test_pypy_c/test_exception.py +++ b/pypy/module/pypyjit/test_pypy_c/test_exception.py @@ -35,9 +35,9 @@ assert log.result == 0 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i5 = int_is_true(i3) guard_true(i5, descr=...) - guard_not_invalidated(descr=...) --EXC-TICK-- i12 = int_sub_ovf(i3, 1) guard_no_overflow(descr=...) @@ -83,9 +83,9 @@ assert log.result == 100000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i7 = int_lt(i4, i5) guard_true(i7, descr=...) - guard_not_invalidated(descr=...) --EXC-TICK-- i14 = int_add(i4, 1) --TICK-- @@ -109,6 +109,7 @@ assert log.result == 2001 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i3 = int_lt(i1, i2) guard_true(i3, descr=...) i4 = int_add(i1, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_getframe.py b/pypy/module/pypyjit/test_pypy_c/test_getframe.py --- a/pypy/module/pypyjit/test_pypy_c/test_getframe.py +++ b/pypy/module/pypyjit/test_pypy_c/test_getframe.py @@ -16,9 +16,9 @@ assert log.result == 300 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i54 = int_lt(i47, i28) guard_true(i54, descr=...) - guard_not_invalidated(descr=...) i55 = int_add(i47, 1) --TICK-- jump(..., descr=...) diff --git a/pypy/module/pypyjit/test_pypy_c/test_globals.py b/pypy/module/pypyjit/test_pypy_c/test_globals.py --- a/pypy/module/pypyjit/test_pypy_c/test_globals.py +++ b/pypy/module/pypyjit/test_pypy_c/test_globals.py @@ -18,7 +18,6 @@ assert loop.match_by_id("loadglobal", """ p12 = getfield_gc_r(p10, descr=) guard_value(p12, ConstPtr(ptr13), descr=...) - guard_not_invalidated(descr=...) p19 = getfield_gc_r(ConstPtr(p17), descr=) guard_value(p19, ConstPtr(ptr20), descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_import.py b/pypy/module/pypyjit/test_pypy_c/test_import.py --- a/pypy/module/pypyjit/test_pypy_c/test_import.py +++ b/pypy/module/pypyjit/test_pypy_c/test_import.py @@ -15,7 +15,6 @@ assert log.result == 500 loop, = log.loops_by_id('import') assert loop.match_by_id('import', """ - guard_not_invalidated(descr=...) """) def test_import_fast_path(self, tmpdir): diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py --- a/pypy/module/pypyjit/test_pypy_c/test_instance.py +++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py @@ -21,9 +21,9 @@ assert log.result == 1000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i7 = int_lt(i5, i6) guard_true(i7, descr=...) - guard_not_invalidated(descr=...) i9 = int_add_ovf(i5, 2) guard_no_overflow(descr=...) --TICK-- @@ -46,9 +46,9 @@ assert log.result == 1000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i9 = int_lt(i5, i6) guard_true(i9, descr=...) - guard_not_invalidated(descr=...) i10 = int_add(i5, 1) --TICK-- jump(..., descr=...) @@ -105,8 +105,7 @@ # ------------------------------- entry_bridge, = log.loops_by_filename(self.filepath, is_entry_bridge=True) ops = entry_bridge.ops_by_id('mutate', opcode='LOAD_ATTR') - assert log.opnames(ops) == ['guard_value', 'guard_not_invalidated', - 'getfield_gc_i'] + assert log.opnames(ops) == ['guard_value', 'getfield_gc_i'] # the STORE_ATTR is folded away assert list(entry_bridge.ops_by_id('meth1', opcode='STORE_ATTR')) == [] # @@ -114,9 +113,9 @@ # ---------------------- loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i58 = int_lt(i38, i31) guard_true(i58, descr=...) - guard_not_invalidated(descr=...) i59 = int_add_ovf(i57, 1) guard_no_overflow(descr=...) p60 = force_token() @@ -153,7 +152,7 @@ # ------------------------------- entry_bridge, = log.loops_by_filename(self.filepath, is_entry_bridge=True) ops = entry_bridge.ops_by_id('mutate', opcode='LOAD_ATTR') - assert log.opnames(ops) == ['guard_value', 'guard_not_invalidated', + assert log.opnames(ops) == ['guard_value', 'getfield_gc_r', 'guard_nonnull_class', 'getfield_gc_r', 'guard_value', # type check on the attribute ] @@ -164,9 +163,9 @@ # ---------------------- loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i70 = int_lt(i58, i33) guard_true(i70, descr=...) - guard_not_invalidated(descr=...) p71 = getfield_gc_r(p64, descr=...) guard_value(p71, ConstPtr(ptr42), descr=...) p72 = force_token() @@ -208,7 +207,6 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id('loadattr1', ''' - guard_not_invalidated(descr=...) i19 = call_i(ConstClass(ll_call_lookup_function), _, _, _, 0, descr=...) guard_no_exception(descr=...) i22 = int_lt(i19, 0) @@ -235,7 +233,6 @@ log = self.run(main, [], threshold=80) loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("contains", """ - guard_not_invalidated(descr=...) i11 = force_token() i12 = int_add(i5, 1) """) @@ -274,9 +271,9 @@ log = self.run(main, []) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i78 = int_lt(i72, 300) guard_true(i78, descr=...) - guard_not_invalidated(descr=...) i79 = force_token() i80 = force_token() i81 = int_add(i72, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_intbound.py b/pypy/module/pypyjit/test_pypy_c/test_intbound.py --- a/pypy/module/pypyjit/test_pypy_c/test_intbound.py +++ b/pypy/module/pypyjit/test_pypy_c/test_intbound.py @@ -89,6 +89,7 @@ assert log.result == (300, 300) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated? i10 = int_lt(i8, i9) guard_true(i10, descr=...) i12 = int_add_ovf(i7, 1) @@ -113,6 +114,7 @@ assert log.result == 300 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated? i7 = int_lt(i5, 300) guard_true(i7, descr=...) i9 = int_sub_ovf(i5, 10) @@ -139,6 +141,7 @@ assert log.result == (300, 300) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated? i10 = int_lt(i8, i9) guard_true(i10, descr=...) i12 = int_add_ovf(i8, 5) @@ -168,6 +171,7 @@ assert log.result == (300, 300) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated? i10 = int_lt(i8, 300) guard_true(i10, descr=...) i12 = int_add(i8, 5) @@ -202,6 +206,7 @@ assert log.result == main(42, 300) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated? i10 = int_lt(i8, 300) guard_true(i10, descr=...) i12 = int_eq(i8, 10) @@ -229,9 +234,9 @@ assert log.result == 300 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated? i8 = int_lt(i6, 300) guard_true(i8, descr=...) - guard_not_invalidated? i10 = int_lshift(i6, 1) i12 = int_add_ovf(i5, 1) guard_no_overflow(descr=...) @@ -252,9 +257,9 @@ assert log.result == 300*8 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated? i8 = int_lt(i6, 300) guard_true(i8, descr=...) - guard_not_invalidated? i10 = int_add_ovf(i5, 8) guard_no_overflow(descr=...) i12 = int_add(i6, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_math.py b/pypy/module/pypyjit/test_pypy_c/test_math.py --- a/pypy/module/pypyjit/test_pypy_c/test_math.py +++ b/pypy/module/pypyjit/test_pypy_c/test_math.py @@ -17,9 +17,9 @@ assert round(log.result, 6) == round(main(500), 6) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i2 = int_lt(i0, i1) guard_true(i2, descr=...) - guard_not_invalidated(descr=...) f1 = cast_int_to_float(i0) i3 = float_le(f1, 0.0) guard_false(i3, descr=...) @@ -46,9 +46,9 @@ assert round(log.result, 6) == round(main(500), 6) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i2 = int_lt(i0, i1) guard_true(i2, descr=...) - guard_not_invalidated(descr=...) f1 = cast_int_to_float(i0) i6 = --ISINF--(f1) guard_false(i6, descr=...) @@ -75,9 +75,9 @@ assert log.result == main(500) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i1 = int_gt(i0, 0) guard_true(i1, descr=...) - guard_not_invalidated(descr=...) f1 = cast_int_to_float(i0) i2 = float_eq(f1, inf) i3 = float_eq(f1, -inf) @@ -102,7 +102,6 @@ assert abs(log.result - main(500)) < 1e-9 loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id("pow", """ - guard_not_invalidated(descr=...) f38 = float_mul(f30, f30) f39 = float_sub(f30, f38) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -105,6 +105,7 @@ guard_true(i94, descr=...) """ assert loop.match(""" + guard_not_invalidated(descr=...) i76 = int_lt(i71, 300) guard_true(i76, descr=...) i77 = int_ge(i71, i59) @@ -142,6 +143,7 @@ guard_true(i98, descr=...) """ assert loop.match(""" + guard_not_invalidated(descr=...) i81 = int_lt(i76, 300) guard_true(i81, descr=...) i82 = int_ge(i76, i62) @@ -150,7 +152,6 @@ i84 = int_add(i58, i83) """ + alignment_check + """ f85 = raw_load_f(i70, i84, descr=) - guard_not_invalidated(descr=...) f86 = float_add(f74, f85) i87 = int_add(i76, 1) --TICK-- @@ -171,9 +172,9 @@ assert log.result == 42.0 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i86 = int_lt(i79, i45) guard_true(i86, descr=...) - guard_not_invalidated(descr=...) i88 = int_ge(i87, i59) guard_false(i88, descr=...) f90 = raw_load_f(i67, i89, descr=) @@ -202,6 +203,7 @@ assert log.result == 42.0 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i125 = int_lt(i117, i44) guard_true(i125, descr=...) i126 = int_lt(i117, i50) @@ -231,13 +233,13 @@ assert log.result == 42.0 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i128 = int_lt(i120, i42) guard_true(i128, descr=...) i129 = int_lt(i120, i48) guard_true(i129, descr=...) i131 = int_mul(i120, i57) i132 = int_add(i53, i131) - guard_not_invalidated(descr=...) raw_store(i103, i132, 42.000000, descr=) i153 = int_add(i120, 1) i154 = getfield_raw_i(#, descr=) diff --git a/pypy/module/pypyjit/test_pypy_c/test_min_max.py b/pypy/module/pypyjit/test_pypy_c/test_min_max.py --- a/pypy/module/pypyjit/test_pypy_c/test_min_max.py +++ b/pypy/module/pypyjit/test_pypy_c/test_min_max.py @@ -14,9 +14,9 @@ assert log.result == 300*3000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i7 = int_lt(i4, 300) guard_true(i7, descr=...) - guard_not_invalidated(descr=...) i9 = int_add_ovf(i5, 3000) guard_no_overflow(descr=...) i11 = int_add(i4, 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -23,6 +23,7 @@ # to contain the very same operations loop0, loop1 = log.loops_by_filename(self.filepath) expected = """ + guard_not_invalidated(descr=...) i9 = int_le(i7, i8) guard_true(i9, descr=...) i11 = int_add_ovf(i7, 1) @@ -50,6 +51,7 @@ assert log.result == 5040 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i7 = int_gt(i4, 1) guard_true(i7, descr=...) i8 = int_mul_ovf(i5, i4) @@ -63,6 +65,7 @@ assert log.result == 15511210043330985984000000L loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i7 = int_gt(i4, 1) guard_true(i7, descr=...) p11 = call_r(ConstClass(rbigint.int_mul), p5, i4, descr=...) @@ -84,6 +87,7 @@ assert log.result == 1000.0 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i9 = float_lt(f5, f7) guard_true(i9, descr=...) f10 = float_add(f8, f5) @@ -110,9 +114,9 @@ assert log.result == 4000 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i12 = int_is_true(i4) guard_true(i12, descr=...) - guard_not_invalidated(descr=...) i10p = getfield_gc_pure_i(p10, descr=...) i10 = int_mul_ovf(2, i10p) guard_no_overflow(descr=...) @@ -142,13 +146,12 @@ assert log.result == 1000 * 999 / 2 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i15 = int_lt(i10, i11) guard_true(i15, descr=...) i17 = int_add(i10, 1) i18 = force_token() setfield_gc(p9, i17, descr=<.* .*W_XRangeIterator.inst_current .*>) - guard_not_invalidated(descr=...) - i84 = int_sub(i14, 1) i21 = int_lt(i10, 0) guard_false(i21, descr=...) i22 = int_lt(i10, i14) From noreply at buildbot.pypy.org Sat Oct 3 12:31:49 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Sat, 3 Oct 2015 12:31:49 +0200 (CEST) Subject: [pypy-commit] pypy value-profiling: merge default Message-ID: <20151003103149.CFF5F1C146A@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r79948:43bd40a0945b Date: 2015-10-03 11:42 +0100 http://bitbucket.org/pypy/pypy/changeset/43bd40a0945b/ Log: merge default diff too long, truncating to 2000 out of 3839 lines diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -609,7 +609,7 @@ def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: - tp = ffi._parser._declarations[key] + tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) try: value = backendlib.load_function(BType, name) @@ -620,7 +620,7 @@ # key = 'variable ' + name if key in ffi._parser._declarations: - tp = ffi._parser._declarations[key] + tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) read_variable = backendlib.read_variable write_variable = backendlib.write_variable @@ -631,12 +631,23 @@ # if not copied_enums: from . import model - for key, tp in ffi._parser._declarations.items(): + error = None + for key, (tp, _) in ffi._parser._declarations.items(): if not isinstance(tp, model.EnumType): continue + try: + tp.check_not_partial() + except Exception as e: + error = e + continue for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + if error is not None: + if name in library.__dict__: + return # ignore error, about a different enum + raise error + for key, val in ffi._parser._int_constants.items(): if key not in library.__dict__: library.__dict__[key] = val diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -192,6 +192,7 @@ if not decl.name: raise api.CDefError("typedef does not declare any name", decl) + quals = 0 if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) and decl.type.type.names[-1] == '__dotdotdot__'): realtype = self._get_unknown_type(decl) @@ -202,8 +203,9 @@ decl.type.type.type.names == ['__dotdotdot__']): realtype = model.unknown_ptr_type(decl.name) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) + realtype, quals = self._get_type_and_quals( + decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype, quals=quals) else: raise api.CDefError("unrecognized construct", decl) except api.FFIError as e: @@ -255,9 +257,9 @@ def _parse_decl(self, decl): node = decl.type if isinstance(node, pycparser.c_ast.FuncDecl): - tp = self._get_type(node, name=decl.name) + tp, quals = self._get_type_and_quals(node, name=decl.name) assert isinstance(tp, model.RawFunctionType) - tp = self._get_type_pointer(tp) + tp = self._get_type_pointer(tp, quals) self._declare('function ' + decl.name, tp) else: if isinstance(node, pycparser.c_ast.Struct): @@ -271,9 +273,10 @@ decl) # if decl.name: - tp = self._get_type(node, partial_length_ok=True) + tp, quals = self._get_type_and_quals(node, + partial_length_ok=True) if tp.is_raw_function: - tp = self._get_type_pointer(tp) + tp = self._get_type_pointer(tp, quals) self._declare('function ' + decl.name, tp) elif (tp.is_integer_type() and hasattr(decl, 'init') and @@ -287,10 +290,10 @@ _r_int_literal.match(decl.init.expr.value)): self._add_integer_constant(decl.name, '-' + decl.init.expr.value) - elif self._is_constant_globalvar(node): - self._declare('constant ' + decl.name, tp) + elif (quals & model.Q_CONST) and not tp.is_array_type: + self._declare('constant ' + decl.name, tp, quals=quals) else: - self._declare('variable ' + decl.name, tp) + self._declare('variable ' + decl.name, tp, quals=quals) def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] @@ -298,40 +301,51 @@ exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type) + tp, quals = self._get_type_and_quals(exprnode.type) + return tp - def _declare(self, name, obj, included=False): + def _declare(self, name, obj, included=False, quals=0): if name in self._declarations: - if self._declarations[name] is obj: + prevobj, prevquals = self._declarations[name] + if prevobj is obj and prevquals == quals: return if not self._override: raise api.FFIError( "multiple declarations of %s (for interactive usage, " "try cdef(xx, override=True))" % (name,)) assert '__dotdotdot__' not in name.split() - self._declarations[name] = obj + self._declarations[name] = (obj, quals) if included: self._included_declarations.add(obj) - def _get_type_pointer(self, type, const=False, declname=None): + def _extract_quals(self, type): + quals = 0 + if isinstance(type, (pycparser.c_ast.TypeDecl, + pycparser.c_ast.PtrDecl)): + if 'const' in type.quals: + quals |= model.Q_CONST + if 'restrict' in type.quals: + quals |= model.Q_RESTRICT + return quals + + def _get_type_pointer(self, type, quals, declname=None): if isinstance(type, model.RawFunctionType): return type.as_function_pointer() if (isinstance(type, model.StructOrUnionOrEnum) and type.name.startswith('$') and type.name[1:].isdigit() and type.forcename is None and declname is not None): - return model.NamedPointerType(type, declname) - if const: - return model.ConstPointerType(type) - return model.PointerType(type) + return model.NamedPointerType(type, declname, quals) + return model.PointerType(type, quals) - def _get_type(self, typenode, name=None, partial_length_ok=False): + def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): - type = self._declarations['typedef ' + typenode.type.names[0]] - return type + tp, quals = self._declarations['typedef ' + typenode.type.names[0]] + quals |= self._extract_quals(typenode) + return tp, quals # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type @@ -340,18 +354,19 @@ else: length = self._parse_constant( typenode.dim, partial_length_ok=partial_length_ok) - tp = self._get_type(typenode.type, + tp, quals = self._get_type_and_quals(typenode.type, partial_length_ok=partial_length_ok) - return model.ArrayType(tp, length) + return model.ArrayType(tp, length), quals # if isinstance(typenode, pycparser.c_ast.PtrDecl): # pointer type - const = (isinstance(typenode.type, pycparser.c_ast.TypeDecl) - and 'const' in typenode.type.quals) - return self._get_type_pointer(self._get_type(typenode.type), const, - declname=name) + itemtype, itemquals = self._get_type_and_quals(typenode.type) + tp = self._get_type_pointer(itemtype, itemquals, declname=name) + quals = self._extract_quals(typenode) + return tp, quals # if isinstance(typenode, pycparser.c_ast.TypeDecl): + quals = self._extract_quals(typenode) type = typenode.type if isinstance(type, pycparser.c_ast.IdentifierType): # assume a primitive type. get it from .names, but reduce @@ -379,35 +394,38 @@ names = newnames + names ident = ' '.join(names) if ident == 'void': - return model.void_type + return model.void_type, quals if ident == '__dotdotdot__': raise api.FFIError(':%d: bad usage of "..."' % typenode.coord.line) - return resolve_common_type(ident) + return resolve_common_type(ident), quals # if isinstance(type, pycparser.c_ast.Struct): # 'struct foobar' - return self._get_struct_union_enum_type('struct', type, name) + tp = self._get_struct_union_enum_type('struct', type, name) + return tp, quals # if isinstance(type, pycparser.c_ast.Union): # 'union foobar' - return self._get_struct_union_enum_type('union', type, name) + tp = self._get_struct_union_enum_type('union', type, name) + return tp, quals # if isinstance(type, pycparser.c_ast.Enum): # 'enum foobar' - return self._get_struct_union_enum_type('enum', type, name) + tp = self._get_struct_union_enum_type('enum', type, name) + return tp, quals # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - return self._parse_function_type(typenode, name) + return self._parse_function_type(typenode, name), 0 # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): return self._get_struct_union_enum_type('struct', typenode, name, - nested=True) + nested=True), 0 if isinstance(typenode, pycparser.c_ast.Union): return self._get_struct_union_enum_type('union', typenode, name, - nested=True) + nested=True), 0 # raise api.FFIError(":%d: bad or unsupported type declaration" % typenode.coord.line) @@ -426,28 +444,21 @@ raise api.CDefError( "%s: a function with only '(...)' as argument" " is not correct C" % (funcname or 'in expression')) - args = [self._as_func_arg(self._get_type(argdeclnode.type)) + args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type)) for argdeclnode in params] if not ellipsis and args == [model.void_type]: args = [] - result = self._get_type(typenode.type) + result, quals = self._get_type_and_quals(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) - def _as_func_arg(self, type): + def _as_func_arg(self, type, quals): if isinstance(type, model.ArrayType): - return model.PointerType(type.item) + return model.PointerType(type.item, quals) elif isinstance(type, model.RawFunctionType): return type.as_function_pointer() else: return type - def _is_constant_globalvar(self, typenode): - if isinstance(typenode, pycparser.c_ast.PtrDecl): - return 'const' in typenode.quals - if isinstance(typenode, pycparser.c_ast.TypeDecl): - return 'const' in typenode.quals - return False - def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): # First, a level of caching on the exact 'type' node of the AST. # This is obscure, but needed because pycparser "unrolls" declarations @@ -486,7 +497,7 @@ else: explicit_name = name key = '%s %s' % (kind, name) - tp = self._declarations.get(key, None) + tp, _ = self._declarations.get(key, (None, None)) # if tp is None: if kind == 'struct': @@ -528,6 +539,7 @@ fldnames = [] fldtypes = [] fldbitsize = [] + fldquals = [] for decl in type.decls: if (isinstance(decl.type, pycparser.c_ast.IdentifierType) and ''.join(decl.type.names) == '__dotdotdot__'): @@ -541,7 +553,8 @@ else: bitsize = self._parse_constant(decl.bitsize) self._partial_length = False - type = self._get_type(decl.type, partial_length_ok=True) + type, fqual = self._get_type_and_quals(decl.type, + partial_length_ok=True) if self._partial_length: self._make_partial(tp, nested) if isinstance(type, model.StructType) and type.partial: @@ -549,9 +562,11 @@ fldnames.append(decl.name or '') fldtypes.append(type) fldbitsize.append(bitsize) + fldquals.append(fqual) tp.fldnames = tuple(fldnames) tp.fldtypes = tuple(fldtypes) tp.fldbitsize = tuple(fldbitsize) + tp.fldquals = tuple(fldquals) if fldbitsize != [-1] * len(fldbitsize): if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" @@ -632,14 +647,12 @@ return tp def include(self, other): - for name, tp in other._declarations.items(): + for name, (tp, quals) in other._declarations.items(): if name.startswith('anonymous $enum_$'): continue # fix for test_anonymous_enum_include kind = name.split(' ', 1)[0] - if kind in ('struct', 'union', 'enum', 'anonymous'): - self._declare(name, tp, included=True) - elif kind == 'typedef': - self._declare(name, tp, included=True) + if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'): + self._declare(name, tp, included=True, quals=quals) for k, v in other._int_constants.items(): self._add_constants(k, v) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -4,11 +4,26 @@ from .lock import allocate_lock +# type qualifiers +Q_CONST = 0x01 +Q_RESTRICT = 0x02 + +def qualify(quals, replace_with): + if quals & Q_CONST: + replace_with = ' const ' + replace_with.lstrip() + if quals & Q_RESTRICT: + # It seems that __restrict is supported by gcc and msvc. + # If you hit some different compiler, add a #define in + # _cffi_include.h for it (and in its copies, documented there) + replace_with = ' __restrict ' + replace_with.lstrip() + return replace_with + + class BaseTypeByIdentity(object): is_array_type = False is_raw_function = False - def get_c_name(self, replace_with='', context='a C file'): + def get_c_name(self, replace_with='', context='a C file', quals=0): result = self.c_name_with_marker assert result.count('&') == 1 # some logic duplication with ffi.getctype()... :-( @@ -18,6 +33,7 @@ replace_with = '(%s)' % replace_with elif not replace_with[0] in '[(': replace_with = ' ' + replace_with + replace_with = qualify(quals, replace_with) result = result.replace('&', replace_with) if '$' in result: from .ffiplatform import VerificationError @@ -225,16 +241,14 @@ class PointerType(BaseType): - _attrs_ = ('totype',) - _base_pattern = " *&" - _base_pattern_array = "(*&)" + _attrs_ = ('totype', 'quals') - def __init__(self, totype): + def __init__(self, totype, quals=0): self.totype = totype + self.quals = quals + extra = qualify(quals, " *&") if totype.is_array_type: - extra = self._base_pattern_array - else: - extra = self._base_pattern + extra = "(%s)" % (extra.lstrip(),) self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra) def build_backend_type(self, ffi, finishlist): @@ -243,10 +257,8 @@ voidp_type = PointerType(void_type) - -class ConstPointerType(PointerType): - _base_pattern = " const *&" - _base_pattern_array = "(const *&)" +def ConstPointerType(totype): + return PointerType(totype, Q_CONST) const_voidp_type = ConstPointerType(void_type) @@ -254,8 +266,8 @@ class NamedPointerType(PointerType): _attrs_ = ('totype', 'name') - def __init__(self, totype, name): - PointerType.__init__(self, totype) + def __init__(self, totype, name, quals=0): + PointerType.__init__(self, totype, quals) self.name = name self.c_name_with_marker = name + '&' @@ -315,11 +327,12 @@ partial = False packed = False - def __init__(self, name, fldnames, fldtypes, fldbitsize): + def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None): self.name = name self.fldnames = fldnames self.fldtypes = fldtypes self.fldbitsize = fldbitsize + self.fldquals = fldquals self.build_c_name_with_marker() def has_anonymous_struct_fields(self): @@ -331,14 +344,17 @@ return False def enumfields(self): - for name, type, bitsize in zip(self.fldnames, self.fldtypes, - self.fldbitsize): + fldquals = self.fldquals + if fldquals is None: + fldquals = (0,) * len(self.fldnames) + for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes, + self.fldbitsize, fldquals): if name == '' and isinstance(type, StructOrUnion): # nested anonymous struct/union for result in type.enumfields(): yield result else: - yield (name, type, bitsize) + yield (name, type, bitsize, quals) def force_flatten(self): # force the struct or union to have a declaration that lists @@ -347,13 +363,16 @@ names = [] types = [] bitsizes = [] - for name, type, bitsize in self.enumfields(): + fldquals = [] + for name, type, bitsize, quals in self.enumfields(): names.append(name) types.append(type) bitsizes.append(bitsize) + fldquals.append(quals) self.fldnames = tuple(names) self.fldtypes = tuple(types) self.fldbitsize = tuple(bitsizes) + self.fldquals = tuple(fldquals) def get_cached_btype(self, ffi, finishlist, can_delay=False): BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist, diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -195,17 +195,15 @@ elif isinstance(tp, model.StructOrUnion): if tp.fldtypes is not None and ( tp not in self.ffi._parser._included_declarations): - for name1, tp1, _ in tp.enumfields(): + for name1, tp1, _, _ in tp.enumfields(): self._do_collect_type(self._field_type(tp, name1, tp1)) else: for _, x in tp._get_items(): self._do_collect_type(x) - def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) - def _generate(self, step_name): - for name, tp in self._get_declarations(): + lst = self.ffi._parser._declarations.items() + for name, (tp, quals) in sorted(lst): kind, realname = name.split(' ', 1) try: method = getattr(self, '_generate_cpy_%s_%s' % (kind, @@ -214,6 +212,7 @@ raise ffiplatform.VerificationError( "not implemented in recompile(): %r" % name) try: + self._current_quals = quals method(tp, realname) except Exception as e: model.attach_exception_info(e, name) @@ -774,7 +773,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): try: if ftype.is_integer_type() or fbitsize >= 0: # accept all integers, but complain on float or double @@ -789,7 +788,8 @@ ftype = ftype.item fname = fname + '[0]' prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -823,7 +823,7 @@ c_fields = [] if reason_for_not_expanding is None: enumfields = list(tp.enumfields()) - for fldname, fldtype, fbitsize in enumfields: + for fldname, fldtype, fbitsize, fqual in enumfields: fldtype = self._field_type(tp, fldname, fldtype) # cname is None for _add_missing_struct_unions() only op = OP_NOOP @@ -879,7 +879,9 @@ # because they don't have any known C name. Check that they are # not partial (we can't complete or verify them!) and emit them # anonymously. - for tp in list(self._struct_unions): + lst = list(self._struct_unions.items()) + lst.sort(key=lambda tp_order: tp_order[1]) + for tp, order in lst: if tp not in self._seen_struct_unions: if tp.partial: raise NotImplementedError("internal inconsistency: %r is " @@ -1004,6 +1006,8 @@ def _enum_ctx(self, tp, cname): type_index = self._typesdict[tp] type_op = CffiOp(OP_ENUM, -1) + if self.target_is_python: + tp.check_not_partial() for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): self._lsts["global"].append( GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op, @@ -1081,7 +1085,8 @@ # if 'tp' were a function type, but that is not possible here. # (If 'tp' is a function _pointer_ type, then casts from "fn_t # **" to "void *" are again no-ops, as far as I can tell.) - prnt('static ' + tp.get_c_name('*_cffi_var_%s(void)' % (name,))) + decl = '*_cffi_var_%s(void)' % (name,) + prnt('static ' + tp.get_c_name(decl, quals=self._current_quals)) prnt('{') prnt(' return %s(%s);' % (ampersand, name)) prnt('}') diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -197,7 +197,10 @@ return library def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst def _generate(self, step_name): for name, tp in self._get_declarations(): @@ -468,7 +471,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double @@ -477,7 +480,8 @@ # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -488,7 +492,7 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) @@ -552,7 +556,7 @@ check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -87,7 +87,10 @@ return library def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst def _generate(self, step_name): for name, tp in self._get_declarations(): @@ -260,7 +263,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double @@ -269,7 +272,8 @@ # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -280,7 +284,7 @@ prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) @@ -342,7 +346,7 @@ check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -39,8 +39,9 @@ "_csv", "cppyy", "_pypyjson" ]) -if (sys.platform.startswith('linux') and os.uname()[4] == 'x86_64' - and sys.maxint > 2**32): # it's not enough that we get x86_64 +if ((sys.platform.startswith('linux') or sys.platform == 'darwin') + and os.uname()[4] == 'x86_64' and sys.maxint > 2**32): + # it's not enough that we get x86_64 working_modules.add('_vmprof') translation_modules = default_modules.copy() diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -5,8 +5,8 @@ with any external library. Right now, there are the following possibilities of providing -third-party modules for the PyPy python interpreter (in order of -usefulness): +third-party modules for the PyPy python interpreter (in order, from most +directly useful to most messy to use with PyPy): * Write them in pure Python and use CFFI_. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -33,3 +33,13 @@ .. branch: remember-tracing-counts Reenable jithooks + +.. branch: detect_egd2 + +.. branch: shadowstack-no-move-2 +Issue #2141: fix a crash on Windows and OS/X and ARM when running +at least 20 threads. + +.. branch: numpy-ctypes + +Add support for ndarray.ctypes property. diff --git a/pypy/module/__builtin__/app_functional.py b/pypy/module/__builtin__/app_functional.py --- a/pypy/module/__builtin__/app_functional.py +++ b/pypy/module/__builtin__/app_functional.py @@ -5,6 +5,7 @@ from __future__ import with_statement import operator from __pypy__ import resizelist_hint, newlist_hint +from __pypy__ import specialized_zip_2_lists # ____________________________________________________________ @@ -217,11 +218,16 @@ in length to the length of the shortest argument sequence.""" l = len(sequences) if l == 2: + # A very fast path if the two sequences are lists + seq0 = sequences[0] + seq1 = sequences[1] + try: + return specialized_zip_2_lists(seq0, seq1) + except TypeError: + pass # This is functionally the same as the code below, but more # efficient because it unrolls the loops over 'sequences'. # Only for two arguments, which is the most common case. - seq0 = sequences[0] - seq1 = sequences[1] iter0 = iter(seq0) iter1 = iter(seq1) hint = min(100000000, # max 100M diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -253,27 +253,30 @@ def binaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_meth = self.getattr(space, specialname, False) + if isinstance(w_a, W_InstanceObject): + w_meth = w_a.getattr(space, specialname, False) if w_meth is None: return space.w_NotImplemented return space.call_function(w_meth, w_b) else: + # fall back to space.xxx() if coerce returns a non-W_Instance + # object as first argument return getattr(space, objspacename)(w_a, w_b) binaryop.func_name = name def rbinaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None or w_a is self: - w_meth = self.getattr(space, rspecialname, False) + if isinstance(w_a, W_InstanceObject): + w_meth = w_a.getattr(space, rspecialname, False) if w_meth is None: return space.w_NotImplemented - return space.call_function(w_meth, w_other) + return space.call_function(w_meth, w_b) else: - return getattr(space, objspacename)(w_b, w_a) + # here, if coerce returns a non-W_Instance object as first + # argument, then give up. The idea is that this strange + # case should already have been handled by the binaryop() + # called from descroperation first. + return space.w_NotImplemented rbinaryop.func_name = "r" + name return binaryop, rbinaryop @@ -283,7 +286,7 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - return [None, None] + return [w_self, w_other] return space.fixedview(w_tup, 2) def descr_instance_new(space, w_type, w_class, w_dict=None): @@ -523,13 +526,9 @@ def descr_cmp(self, space, w_other): # do all the work here like CPython w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - else: - if (not isinstance(w_a, W_InstanceObject) and - not isinstance(w_b, W_InstanceObject)): - return space.cmp(w_a, w_b) + if (not isinstance(w_a, W_InstanceObject) and + not isinstance(w_b, W_InstanceObject)): + return space.cmp(w_a, w_b) if isinstance(w_a, W_InstanceObject): w_func = w_a.getattr(space, '__cmp__', False) if w_func is not None: @@ -636,42 +635,36 @@ def descr_pow(self, space, w_other, w_modulo=None): if space.is_none(w_modulo): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) - return space.w_NotImplemented + if isinstance(w_a, W_InstanceObject): + w_func = w_a.getattr(space, '__pow__', False) + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other) else: return space.pow(w_a, w_b, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other, w_modulo) def descr_rpow(self, space, w_other, w_modulo=None): if space.is_none(w_modulo): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) + if isinstance(w_a, W_InstanceObject): + w_func = w_a.getattr(space, '__rpow__', False) + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other) + else: return space.w_NotImplemented - else: - return space.pow(w_b, w_a, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other, w_modulo) def descr_next(self, space): w_func = self.getattr(space, 'next', False) diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -417,6 +417,14 @@ pass raises(TypeError, coerce, B(), []) + def test_coerce_inf(self): + class B: + def __coerce__(self, other): + return B(), B() + def __add__(self, other): + return 42 + assert B() + B() == 42 + def test_binaryop(self): class A: def __add__(self, other): diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -83,6 +83,7 @@ 'newdict' : 'interp_dict.newdict', 'reversed_dict' : 'interp_dict.reversed_dict', 'strategy' : 'interp_magic.strategy', # dict,set,list + 'specialized_zip_2_lists' : 'interp_magic.specialized_zip_2_lists', 'set_debug' : 'interp_magic.set_debug', 'locals_to_fast' : 'interp_magic.locals_to_fast', 'save_module_content_for_future_reload': diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -147,3 +147,7 @@ @unwrap_spec(w_module=MixedModule) def save_module_content_for_future_reload(space, w_module): w_module.save_module_content_for_future_reload() + +def specialized_zip_2_lists(space, w_list1, w_list2): + from pypy.objspace.std.specialisedtupleobject import specialized_zip_2_lists + return specialized_zip_2_lists(space, w_list1, w_list2) diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -11,7 +11,8 @@ class W_CType(W_Root): - _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_'] + _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_', + '_pointer_type'] _immutable_fields_ = ['size?', 'name', 'name_position'] # note that 'size' is not strictly immutable, because it can change # from -1 to the real value in the W_CTypeStruct subclass. diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -7,6 +7,7 @@ from rpython.rlib.rarithmetic import r_uint, r_ulonglong, intmask from rpython.rlib import jit from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.tool import rfficache from pypy.interpreter.error import oefmt from pypy.module._cffi_backend import cdataobj, misc @@ -125,12 +126,25 @@ cdata[0] = value +# XXX explicitly use an integer type instead of lltype.UniChar here, +# because for now the latter is defined as unsigned by RPython (even +# though it may be signed when 'wchar_t' is written to C). +WCHAR_INT = {(2, False): rffi.USHORT, + (4, False): rffi.UINT, + (4, True): rffi.INT}[rffi.sizeof(lltype.UniChar), + rfficache.signof_c_type('wchar_t')] +WCHAR_INTP = rffi.CArrayPtr(WCHAR_INT) + class W_CTypePrimitiveUniChar(W_CTypePrimitiveCharOrUniChar): _attrs_ = [] + if rffi.r_wchar_t.SIGN: + def write_raw_integer_data(self, w_cdata, value): + w_cdata.write_raw_signed_data(value) + def cast_to_int(self, cdata): - unichardata = rffi.cast(rffi.CWCHARP, cdata) - return self.space.wrap(ord(unichardata[0])) + unichardata = rffi.cast(WCHAR_INTP, cdata) + return self.space.wrap(unichardata[0]) def convert_to_object(self, cdata): unichardata = rffi.cast(rffi.CWCHARP, cdata) diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -168,7 +168,7 @@ class W_CTypePointer(W_CTypePtrBase): - _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr'] + _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr', '_array_types'] _immutable_fields_ = ['is_file', 'cache_array_type?', 'is_void_ptr'] kind = "pointer" cache_array_type = None diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -4,7 +4,7 @@ from rpython.rlib.objectmodel import specialize, r_dict, compute_identity_hash from rpython.rlib.rarithmetic import ovfcheck, intmask -from rpython.rlib import jit +from rpython.rlib import jit, rweakref from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform @@ -23,27 +23,12 @@ class UniqueCache: def __init__(self, space): - self.ctvoid = None # There can be only one - self.ctvoidp = None # Cache for self.pointers[self.ctvoid] - self.ctchara = None # Cache for self.arrays[charp, -1] - self.primitives = {} # Keys: name - self.pointers = {} # Keys: base_ctype - self.arrays = {} # Keys: (ptr_ctype, length_or_-1) - self.functions = r_dict(# Keys: (fargs, w_fresult, ellipsis) - _func_key_eq, _func_key_hash) - -def _func_key_eq((fargs1, w_fresult1, ellipsis1), - (fargs2, w_fresult2, ellipsis2)): - return (fargs1 == fargs2 and # list equality here - w_fresult1 is w_fresult2 and - ellipsis1 == ellipsis2) - -def _func_key_hash((fargs, w_fresult, ellipsis)): - x = compute_identity_hash(w_fresult) ^ ellipsis - for w_arg in fargs: - y = compute_identity_hash(w_arg) - x = intmask((1000003 * x) ^ y) - return x + self.ctvoid = None # Cache for the 'void' type + self.ctvoidp = None # Cache for the 'void *' type + self.ctchara = None # Cache for the 'char[]' type + self.primitives = {} # Cache for {name: primitive_type} + self.functions = [] # see _new_function_type() + self.for_testing = False def _clean_cache(space): "NOT_RPYTHON" @@ -165,20 +150,24 @@ # ____________________________________________________________ + at specialize.memo() +def _setup_wref(has_weakref_support): + assert has_weakref_support, "_cffi_backend requires weakrefs" + ctypeobj.W_CType._pointer_type = rweakref.dead_ref + ctypeptr.W_CTypePointer._array_types = None + @unwrap_spec(w_ctype=ctypeobj.W_CType) def new_pointer_type(space, w_ctype): return _new_pointer_type(space, w_ctype) @jit.elidable def _new_pointer_type(space, w_ctype): - unique_cache = space.fromcache(UniqueCache) - try: - return unique_cache.pointers[w_ctype] - except KeyError: - pass - ctypepointer = ctypeptr.W_CTypePointer(space, w_ctype) - unique_cache.pointers[w_ctype] = ctypepointer - return ctypepointer + _setup_wref(rweakref.has_weakref_support()) + ctptr = w_ctype._pointer_type() + if ctptr is None: + ctptr = ctypeptr.W_CTypePointer(space, w_ctype) + w_ctype._pointer_type = rweakref.ref(ctptr) + return ctptr # ____________________________________________________________ @@ -195,16 +184,19 @@ @jit.elidable def _new_array_type(space, w_ctptr, length): - unique_cache = space.fromcache(UniqueCache) - unique_key = (w_ctptr, length) - try: - return unique_cache.arrays[unique_key] - except KeyError: - pass - # + _setup_wref(rweakref.has_weakref_support()) if not isinstance(w_ctptr, ctypeptr.W_CTypePointer): raise OperationError(space.w_TypeError, space.wrap("first arg must be a pointer ctype")) + arrays = w_ctptr._array_types + if arrays is None: + arrays = rweakref.RWeakValueDictionary(int, ctypearray.W_CTypeArray) + w_ctptr._array_types = arrays + else: + ctype = arrays.get(length) + if ctype is not None: + return ctype + # ctitem = w_ctptr.ctitem if ctitem.size < 0: raise oefmt(space.w_ValueError, "array item of unknown size: '%s'", @@ -222,7 +214,7 @@ extra = '[%d]' % length # ctype = ctypearray.W_CTypeArray(space, w_ctptr, length, arraysize, extra) - unique_cache.arrays[unique_key] = ctype + arrays.set(length, ctype) return ctype # ____________________________________________________________ @@ -612,29 +604,69 @@ fargs.append(w_farg) return _new_function_type(space, fargs, w_fresult, bool(ellipsis)) +def _func_key_hash(unique_cache, fargs, fresult, ellipsis): + x = compute_identity_hash(fresult) + for w_arg in fargs: + y = compute_identity_hash(w_arg) + x = intmask((1000003 * x) ^ y) + x ^= ellipsis + if unique_cache.for_testing: # constant-folded to False in translation; + x &= 3 # but for test, keep only 2 bits of hash + return x + # can't use @jit.elidable here, because it might call back to random # space functions via force_lazy_struct() -def _new_function_type(space, fargs, w_fresult, ellipsis=False): +def _new_function_type(space, fargs, fresult, ellipsis=False): + try: + return _get_function_type(space, fargs, fresult, ellipsis) + except KeyError: + return _build_function_type(space, fargs, fresult, ellipsis) + + at jit.elidable +def _get_function_type(space, fargs, fresult, ellipsis): + # This function is elidable because if called again with exactly the + # same arguments (and if it didn't raise KeyError), it would give + # the same result, at least as long as this result is still live. + # + # 'unique_cache.functions' is a list of weak dicts, each mapping + # the func_hash number to a W_CTypeFunc. There is normally only + # one such dict, but in case of hash collision, there might be + # more. + unique_cache = space.fromcache(UniqueCache) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + for weakdict in unique_cache.functions: + ctype = weakdict.get(func_hash) + if (ctype is not None and + ctype.ctitem is fresult and + ctype.fargs == fargs and + ctype.ellipsis == ellipsis): + return ctype + raise KeyError + + at jit.dont_look_inside +def _build_function_type(space, fargs, fresult, ellipsis): from pypy.module._cffi_backend import ctypefunc # - unique_cache = space.fromcache(UniqueCache) - unique_key = (fargs, w_fresult, ellipsis) - try: - return unique_cache.functions[unique_key] - except KeyError: - pass - # - if ((w_fresult.size < 0 and - not isinstance(w_fresult, ctypevoid.W_CTypeVoid)) - or isinstance(w_fresult, ctypearray.W_CTypeArray)): - if (isinstance(w_fresult, ctypestruct.W_CTypeStructOrUnion) and - w_fresult.size < 0): + if ((fresult.size < 0 and + not isinstance(fresult, ctypevoid.W_CTypeVoid)) + or isinstance(fresult, ctypearray.W_CTypeArray)): + if (isinstance(fresult, ctypestruct.W_CTypeStructOrUnion) and + fresult.size < 0): raise oefmt(space.w_TypeError, - "result type '%s' is opaque", w_fresult.name) + "result type '%s' is opaque", fresult.name) else: raise oefmt(space.w_TypeError, - "invalid result type: '%s'", w_fresult.name) + "invalid result type: '%s'", fresult.name) # - fct = ctypefunc.W_CTypeFunc(space, fargs, w_fresult, ellipsis) - unique_cache.functions[unique_key] = fct + fct = ctypefunc.W_CTypeFunc(space, fargs, fresult, ellipsis) + unique_cache = space.fromcache(UniqueCache) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + for weakdict in unique_cache.functions: + if weakdict.get(func_hash) is None: + weakdict.set(func_hash, fct) + break + else: + weakdict = rweakref.RWeakValueDictionary(int, ctypefunc.W_CTypeFunc) + unique_cache.functions.append(weakdict) + weakdict.set(func_hash, fct) return fct diff --git a/pypy/module/_cffi_backend/test/test_c.py b/pypy/module/_cffi_backend/test/test_c.py --- a/pypy/module/_cffi_backend/test/test_c.py +++ b/pypy/module/_cffi_backend/test/test_c.py @@ -22,7 +22,7 @@ from rpython.tool.udir import udir from pypy.interpreter import gateway from pypy.module._cffi_backend import Module -from pypy.module._cffi_backend.newtype import _clean_cache +from pypy.module._cffi_backend.newtype import _clean_cache, UniqueCache from rpython.translator import cdir from rpython.translator.platform import host from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -86,8 +86,10 @@ _all_test_c.find_and_load_library = func _all_test_c._testfunc = testfunc """) + UniqueCache.for_testing = True def teardown_method(self, method): + UniqueCache.for_testing = False _clean_cache(self.space) diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -241,20 +241,26 @@ res = libssl_RAND_status() return space.wrap(res) - @unwrap_spec(path=str) - def RAND_egd(space, path): - """RAND_egd(path) -> bytes + if HAVE_OPENSSL_RAND_EGD: + @unwrap_spec(path=str) + def RAND_egd(space, path): + """RAND_egd(path) -> bytes - Queries the entropy gather daemon (EGD) on socket path. Returns number - of bytes read. Raises socket.sslerror if connection to EGD fails or - if it does provide enough data to seed PRNG.""" - with rffi.scoped_str2charp(path) as socket_path: - bytes = libssl_RAND_egd(socket_path) - if bytes == -1: - raise ssl_error(space, - "EGD connection failed or EGD did not return " - "enough data to seed the PRNG") - return space.wrap(bytes) + Queries the entropy gather daemon (EGD) on socket path. Returns number + of bytes read. Raises socket.sslerror if connection to EGD fails or + if it does provide enough data to seed PRNG.""" + with rffi.scoped_str2charp(path) as socket_path: + bytes = libssl_RAND_egd(socket_path) + if bytes == -1: + raise ssl_error(space, + "EGD connection failed or EGD did not return " + "enough data to seed the PRNG") + return space.wrap(bytes) + else: + # Dummy func for platforms missing RAND_egd(). Most likely LibreSSL. + @unwrap_spec(path=str) + def RAND_egd(space, path): + raise ssl_error(space, "RAND_egd unavailable") class _SSLSocket(W_Root): diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -36,7 +36,8 @@ assert isinstance(_ssl.OPENSSL_VERSION_INFO, tuple) assert len(_ssl.OPENSSL_VERSION_INFO) == 5 assert isinstance(_ssl.OPENSSL_VERSION, str) - assert 'openssl' in _ssl.OPENSSL_VERSION.lower() + lower_version = _ssl.OPENSSL_VERSION.lower() + assert 'openssl' in lower_version or "libressl" in lower_version assert isinstance(_ssl.ALERT_DESCRIPTION_ACCESS_DENIED, int) @@ -69,8 +70,9 @@ def test_sslwrap(self): import _ssl, _socket, sys, gc - if sys.platform == 'darwin' or 'freebsd' in sys.platform: - skip("hangs indefinitely on OSX & FreeBSD (also on CPython)") + if sys.platform == 'darwin' or 'freebsd' in sys.platform or \ + 'openbsd' in sys.platform: + skip("hangs indefinitely on OSX & BSD (also on CPython)") s = _socket.socket() if sys.version_info < (2, 7, 9): ss = _ssl.sslwrap(s, 0) diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -34,6 +34,7 @@ i += 1 _, size = struct.unpack("ll", s[i:i + 2 * WORD]) i += 2 * WORD + size * struct.calcsize("P") + i += WORD # thread id elif s[i] == '\x02': i += 1 _, size = struct.unpack("ll", s[i:i + 2 * WORD]) diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -844,6 +844,18 @@ b.byteswap() assert a != b + def test_unicode_ord_positive(self): + import sys + if sys.maxunicode == 0xffff: + skip("test for 32-bit unicodes") + a = self.array('u', '\xff\xff\xff\xff') + assert len(a) == 1 + assert repr(a[0]) == "u'\Uffffffff'" + if sys.maxint == 2147483647: + assert ord(a[0]) == -1 + else: + assert ord(a[0]) == 4294967295 + def test_weakref(self): import weakref a = self.array('c', 'Hi!') diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -9,6 +9,7 @@ 'ndarray': 'ndarray.W_NDimArray', 'dtype': 'descriptor.W_Dtype', 'flatiter': 'flatiter.W_FlatIterator', + 'flagsobj': 'flagsobj.W_FlagsObject', '_reconstruct' : 'ndarray._reconstruct', 'scalar' : 'ctors.build_scalar', diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -117,12 +117,14 @@ return W_NDimArray(impl) @staticmethod - def new_slice(space, offset, strides, backstrides, shape, parent, orig_arr, dtype=None): + def new_slice(space, offset, strides, backstrides, shape, parent, w_arr, dtype=None): from pypy.module.micronumpy import concrete - + w_base = w_arr + if w_arr.implementation.base() is not None: + w_base = w_arr.implementation.base() impl = concrete.SliceArray(offset, strides, backstrides, shape, parent, - orig_arr, dtype) - return wrap_impl(space, space.type(orig_arr), orig_arr, impl) + w_base, dtype) + return wrap_impl(space, space.type(w_arr), w_arr, impl) @staticmethod def new_scalar(space, dtype, w_val=None): diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -147,7 +147,7 @@ def get_flags(self): return (NPY.ARRAY_C_CONTIGUOUS | NPY.ARRAY_F_CONTIGUOUS | - NPY.ARRAY_WRITEABLE | NPY.ARRAY_OWNDATA) + NPY.ARRAY_ALIGNED | NPY.ARRAY_OWNDATA) def item(self, space): return self.get_dtype(space).itemtype.to_builtin_type(space, self) diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -1,6 +1,7 @@ from pypy.interpreter.error import OperationError, oefmt from rpython.rlib import jit, rgc from rpython.rlib.rarithmetic import ovfcheck +from rpython.rlib.listsort import make_timsort_class from rpython.rlib.buffer import Buffer from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage, \ @@ -17,6 +18,19 @@ is_f_contiguous) from rpython.rlib.objectmodel import keepalive_until_here +TimSort = make_timsort_class() +class StrideSort(TimSort): + ''' + argsort (return the indices to sort) a list of strides + ''' + def __init__(self, rangelist, strides): + self.strides = strides + TimSort.__init__(self, rangelist) + + def lt(self, a, b): + return self.strides[a] < self.strides[b] + + class BaseConcreteArray(object): _immutable_fields_ = ['dtype?', 'storage', 'start', 'size', 'shape[*]', 'strides[*]', 'backstrides[*]', 'order', 'gcstruct', @@ -354,12 +368,15 @@ elif order != self.order: t_strides, backstrides = calc_strides(shape, dtype, order) else: - mins = strides[0] + indx_array = range(len(strides)) + list_sorter = StrideSort(indx_array, strides) + list_sorter.sort() t_elsize = dtype.elsize - for s in strides: - if s < mins: - mins = s - t_strides = [s * t_elsize / mins for s in strides] + t_strides = strides[:] + base = dtype.elsize + for i in indx_array: + t_strides[i] = base + base *= shape[i] backstrides = calc_backstrides(t_strides, shape) impl = ConcreteArray(shape, dtype, order, t_strides, backstrides) loop.setslice(space, impl.get_shape(), impl, self) diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -133,7 +133,9 @@ return w_arr else: imp = w_object.implementation - w_base = imp.base() or w_object + w_base = w_object + if imp.base() is not None: + w_base = imp.base() with imp as storage: sz = support.product(w_object.get_shape()) * dtype.elsize return W_NDimArray.from_shape_and_storage(space, diff --git a/pypy/module/micronumpy/flagsobj.py b/pypy/module/micronumpy/flagsobj.py --- a/pypy/module/micronumpy/flagsobj.py +++ b/pypy/module/micronumpy/flagsobj.py @@ -57,6 +57,9 @@ self.flags & NPY.ARRAY_F_CONTIGUOUS or self.flags & NPY.ARRAY_C_CONTIGUOUS )) + def descr_get_num(self, space): + return space.wrap(self.flags) + def descr_getitem(self, space, w_item): key = space.str_w(w_item) if key == "C" or key == "CONTIGUOUS" or key == "C_CONTIGUOUS": @@ -122,4 +125,5 @@ aligned = GetSetProperty(W_FlagsObject.descr_get_aligned), fnc = GetSetProperty(W_FlagsObject.descr_get_fnc), forc = GetSetProperty(W_FlagsObject.descr_get_forc), + num = GetSetProperty(W_FlagsObject.descr_get_num), ) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -747,8 +747,12 @@ return out def descr_get_ctypes(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - "ctypes not implemented yet")) + w_result = space.appexec([self], """(arr): + from numpy.core import _internal + p_data = arr.__array_interface__['data'][0] + return _internal._ctypes(arr, p_data) + """) + return w_result def buffer_w(self, space, flags): return self.implementation.get_buffer(space, True) @@ -1304,6 +1308,9 @@ [space.wrap(0)]), space.wrap("b")]) builder = StringBuilder() + if self.get_dtype().is_object(): + raise oefmt(space.w_NotImplementedError, + "reduce for 'object' dtype not supported yet") if isinstance(self.implementation, SliceArray): iter, state = self.implementation.create_iter() while not iter.done(state): diff --git a/pypy/module/micronumpy/test/test_flagsobj.py b/pypy/module/micronumpy/test/test_flagsobj.py --- a/pypy/module/micronumpy/test/test_flagsobj.py +++ b/pypy/module/micronumpy/test/test_flagsobj.py @@ -30,6 +30,7 @@ assert a.flags.forc == True assert a.flags['FNC'] == False assert a.flags['FORC'] == True + assert a.flags.num == 1287 raises(KeyError, "a.flags['blah']") raises(KeyError, "a.flags['C_CONTIGUOUS'] = False") raises((TypeError, AttributeError), "a.flags.c_contiguous = False") @@ -38,6 +39,7 @@ import numpy as np a = np.int32(2) assert a.flags.c_contiguous == True + assert a.flags.num == 263 def test_compare(self): import numpy as np diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2218,7 +2218,7 @@ assert _weakref.ref(a) def test_astype(self): - from numpy import array, arange + from numpy import array, arange, empty b = array(1).astype(float) assert b == 1 assert b.dtype == float @@ -2273,14 +2273,36 @@ b = a.astype('f4', order='C', copy=False) assert a is b + a = empty([3, 3, 3, 3], 'uint8') + a[:] = 0 + b = a[2] + c = b[:, :2, :] + d = c.swapaxes(1, -1) + e = d.astype('complex128') + assert e.shape == (3, 3, 2) + assert e.strides == (96, 16, 48) + assert (e.real == d).all() + def test_base(self): - from numpy import array + from numpy import array, empty assert array(1).base is None assert array([1, 2]).base is None a = array([1, 2, 3, 4]) b = a[::2] assert b.base is a + a = empty([3, 3, 3, 3], 'uint8') + a[:] = 0 + b = a[2] + assert b.base.base is None + c = b[:, :2, :] + d = c.swapaxes(1, -1) + assert c.base.base is None + assert d.base.base is None + assert d.shape == (3, 3, 2) + assert d.__array_interface__['data'][0] == \ + a.__array_interface__['data'][0] + a.strides[0] * 2 + def test_byteswap(self): from numpy import array @@ -2497,10 +2519,10 @@ assert b.shape == b[...].shape assert (b == b[...]).all() - a = np.arange(6).reshape(2, 3) + a = np.arange(6) if '__pypy__' in sys.builtin_module_names: raises(ValueError, "a[..., ...]") - b = a [..., 0] + b = a.reshape(2, 3)[..., 0] assert (b == [0, 3]).all() assert b.base is a diff --git a/pypy/module/micronumpy/test/test_object_arrays.py b/pypy/module/micronumpy/test/test_object_arrays.py --- a/pypy/module/micronumpy/test/test_object_arrays.py +++ b/pypy/module/micronumpy/test/test_object_arrays.py @@ -3,6 +3,8 @@ class AppTestObjectDtypes(BaseNumpyAppTest): + spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) + def setup_class(cls): BaseNumpyAppTest.setup_class.im_func(cls) cls.w_runappdirect = cls.space.wrap(option.runappdirect) @@ -187,3 +189,21 @@ assert b.shape == (1,) assert b.dtype == np.float_ assert (b == 1.0).all() + + + def test__reduce__(self): + from numpy import arange, dtype + from cPickle import loads, dumps + import sys + + a = arange(15).astype(object) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, dumps, a) + skip('not implemented yet') + b = loads(dumps(a)) + assert (a == b).all() + + a = arange(15).astype(object).reshape((3, 5)) + b = loads(dumps(a)) + assert (a == b).all() + diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -315,11 +315,12 @@ """ ll_times = jit_hooks.stats_get_loop_run_times(None) w_times = space.newdict() - for i in range(len(ll_times)): - w_key = space.newtuple([space.wrap(ll_times[i].type), - space.wrap(ll_times[i].number)]) - space.setitem(w_times, w_key, - space.wrap(ll_times[i].counter)) + if ll_times: + for i in range(len(ll_times)): + w_key = space.newtuple([space.wrap(ll_times[i].type), + space.wrap(ll_times[i].number)]) + space.setitem(w_times, w_key, + space.wrap(ll_times[i].counter)) w_counters = space.newdict() for i, counter_name in enumerate(Counters.counter_names): v = jit_hooks.stats_get_counter_value(None, i) diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -213,22 +213,6 @@ self.on_abort() assert l == [('pypyjit', 'ABORT_TOO_LONG', [])] - def test_on_optimize(self): - import pypyjit - l = [] - - def hook(info): - l.append(info.jitdriver_name) - - def optimize_hook(info): - return [] - - pypyjit.set_compile_hook(hook) - pypyjit.set_optimize_hook(optimize_hook) - self.on_optimize() - self.on_compile() - assert l == ['pypyjit'] - def test_creation(self): from pypyjit import ResOperation diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -27,7 +27,7 @@ def test_struct_unpack(self): def main(n): - import struct + import _struct as struct import array a = array.array('c', struct.pack('i', 42)) i = 0 diff --git a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py --- a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py +++ b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py @@ -76,6 +76,6 @@ assert len(mod_bridges) in (1, 2, 3) # check that counts are reasonable (precise # may change in the future) - assert N - 2000 < sum(l.count for l in fn_with_bridges_loops) < N + 1000 + assert N - 2000 < sum(l.count for l in fn_with_bridges_loops) < N + 1500 diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -2,7 +2,7 @@ import py import platform import sys, ctypes -from cffi import FFI, CDefError, FFIError +from cffi import FFI, CDefError, FFIError, VerificationMissing from pypy.module.test_lib_pypy.cffi_tests.support import * SIZE_OF_INT = ctypes.sizeof(ctypes.c_int) @@ -927,6 +927,14 @@ assert ffi.string(ffi.cast("enum foo", -16)) == "E" assert ffi.string(ffi.cast("enum foo", -8)) == "F" + def test_enum_partial(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef(r"enum foo {A, ...}; enum bar { B, C };") + lib = ffi.dlopen(None) + assert lib.B == 0 + py.test.raises(VerificationMissing, getattr, lib, "A") + assert lib.C == 1 + def test_array_of_struct(self): ffi = FFI(backend=self.Backend()) ffi.cdef("struct foo { int a, b; };") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py @@ -58,6 +58,11 @@ assert ptr_type.get_c_name("") == "int(const *)[5]" assert ptr_type.get_c_name("*x") == "int(const * *x)[5]" +def test_qual_pointer_type(): + ptr_type = PointerType(PrimitiveType("long long"), Q_RESTRICT) + assert ptr_type.get_c_name("") == "long long __restrict *" + assert const_voidp_type.get_c_name("") == "void const *" + def test_unknown_pointer_type(): ptr_type = unknown_ptr_type("foo_p") assert ptr_type.get_c_name("") == "foo_p" diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py @@ -308,7 +308,6 @@ ffi.cdef("void f(WPARAM);") def test__is_constant_globalvar(): - from cffi.cparser import Parser, _get_parser for input, expected_output in [ ("int a;", False), ("const int a;", True), @@ -325,11 +324,36 @@ ("int a[5][6];", False), ("const int a[5][6];", False), ]: - p = Parser() - ast = _get_parser().parse(input) - decl = ast.children()[0][1] - node = decl.type - assert p._is_constant_globalvar(node) == expected_output + ffi = FFI() + ffi.cdef(input) + declarations = ffi._parser._declarations + assert ('constant a' in declarations) == expected_output + assert ('variable a' in declarations) == (not expected_output) + +def test_restrict(): + from cffi import model + for input, expected_output in [ + ("int a;", False), + ("restrict int a;", True), + ("int *a;", False), + ]: + ffi = FFI() + ffi.cdef(input) + tp, quals = ffi._parser._declarations['variable a'] + assert bool(quals & model.Q_RESTRICT) == expected_output + +def test_different_const_funcptr_types(): + lst = [] + for input in [ + "int(*)(int *a)", + "int(*)(int const *a)", + "int(*)(int * const a)", + "int(*)(int const a[])"]: + ffi = FFI(backend=FakeBackend()) + lst.append(ffi._parser.parse_type(input)) + assert lst[0] != lst[1] + assert lst[0] == lst[2] + assert lst[1] == lst[3] def test_enum(): ffi = FFI() diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py @@ -1636,11 +1636,11 @@ def test_FILE_stored_explicitly(): ffi = FFI() - ffi.cdef("int myprintf(const char *, int); FILE *myfile;") + ffi.cdef("int myprintf11(const char *, int); FILE *myfile;") lib = ffi.verify(""" #include FILE *myfile; - int myprintf(const char *out, int value) { + int myprintf11(const char *out, int value) { return fprintf(myfile, out, value); } """) @@ -1650,7 +1650,7 @@ lib.myfile = ffi.cast("FILE *", fw1) # fw1.write(b"X") - r = lib.myprintf(b"hello, %d!\n", ffi.cast("int", 42)) + r = lib.myprintf11(b"hello, %d!\n", ffi.cast("int", 42)) fw1.close() assert r == len("hello, 42!\n") # @@ -2248,3 +2248,13 @@ e = py.test.raises(VerificationError, ffi.verify, "") assert str(e.value) == ("feature not supported with ffi.verify(), but only " "with ffi.set_source(): 'typedef unsigned long... t1'") + +def test_const_fields(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a; void *const b; };""") + ffi.verify("""struct foo_s { const int a; void *const b; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int") + assert foo_s.fields[1][0] == 'b' + assert foo_s.fields[1][1].type is ffi.typeof("void *") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py @@ -30,6 +30,32 @@ assert ffi.typeof("int[][10]") is ffi.typeof("int[][10]") assert ffi.typeof("int(*)()") is ffi.typeof("int(*)()") +def test_ffi_type_not_immortal(): + import weakref, gc + ffi = _cffi1_backend.FFI() + t1 = ffi.typeof("int **") + t2 = ffi.typeof("int *") + w1 = weakref.ref(t1) + w2 = weakref.ref(t2) + del t1, ffi + gc.collect() + assert w1() is None + assert w2() is t2 + ffi = _cffi1_backend.FFI() + assert ffi.typeof(ffi.new("int **")[0]) is t2 + # + ffi = _cffi1_backend.FFI() + t1 = ffi.typeof("int ***") + t2 = ffi.typeof("int **") + w1 = weakref.ref(t1) + w2 = weakref.ref(t2) + del t2, ffi + gc.collect() + assert w1() is t1 + assert w2() is not None # kept alive by t1 + ffi = _cffi1_backend.FFI() + assert ffi.typeof("int * *") is t1.item + def test_ffi_cache_type_globally(): ffi1 = _cffi1_backend.FFI() ffi2 = _cffi1_backend.FFI() diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py @@ -2,7 +2,7 @@ import sys import py from cffi import FFI -from cffi import recompiler, ffiplatform +from cffi import recompiler, ffiplatform, VerificationMissing from pypy.module.test_lib_pypy.cffi_tests.udir import udir @@ -204,3 +204,10 @@ "foobar", _version=0x2594) assert str(e.value).startswith( "cffi out-of-line Python module 'foobar' has unknown version") + +def test_partial_enum(): + ffi = FFI() + ffi.cdef("enum foo { A, B, ... };") + ffi.set_source('test_partial_enum', None) + py.test.raises(VerificationMissing, ffi.emit_python_code, + str(tmpdir.join('test_partial_enum.py'))) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1192,3 +1192,92 @@ py.test.raises(ffi.error, getattr, lib, 'my_value') e = py.test.raises(ffi.error, setattr, lib, 'my_value', 50) assert str(e.value) == "global variable 'my_value' is at address NULL" + +def test_const_fields(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a; void *const b; };""") + lib = verify(ffi, 'test_const_fields', """ + struct foo_s { const int a; void *const b; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int") + assert foo_s.fields[1][0] == 'b' + assert foo_s.fields[1][1].type is ffi.typeof("void *") + +def test_restrict_fields(): + if sys.platform == 'win32': + py.test.skip("'__restrict__' probably not recognized") + ffi = FFI() + ffi.cdef("""struct foo_s { void * restrict b; };""") + lib = verify(ffi, 'test_restrict_fields', """ + struct foo_s { void * __restrict__ b; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'b' + assert foo_s.fields[0][1].type is ffi.typeof("void *") + +def test_const_array_fields(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a[4]; };""") + lib = verify(ffi, 'test_const_array_fields', """ + struct foo_s { const int a[4]; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int[4]") + +def test_const_array_fields_varlength(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a[]; ...; };""") + lib = verify(ffi, 'test_const_array_fields_varlength', """ + struct foo_s { const int a[4]; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int[]") + +def test_const_array_fields_unknownlength(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a[...]; ...; };""") + lib = verify(ffi, 'test_const_array_fields_unknownlength', """ + struct foo_s { const int a[4]; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int[4]") + +def test_const_function_args(): + ffi = FFI() + ffi.cdef("""int foobar(const int a, const int *b, const int c[]);""") + lib = verify(ffi, 'test_const_function_args', """ + int foobar(const int a, const int *b, const int c[]) { + return a + *b + *c; + } + """) + assert lib.foobar(100, ffi.new("int *", 40), ffi.new("int *", 2)) == 142 + +def test_const_function_type_args(): + ffi = FFI() + ffi.cdef("""int (*foobar)(const int a, const int *b, const int c[]);""") + lib = verify(ffi, 'test_const_function_type_args', """ + int (*foobar)(const int a, const int *b, const int c[]); + """) + t = ffi.typeof(lib.foobar) + assert t.args[0] is ffi.typeof("int") + assert t.args[1] is ffi.typeof("int *") + assert t.args[2] is ffi.typeof("int *") + +def test_const_constant(): + ffi = FFI() + ffi.cdef("""struct foo_s { int x,y; }; const struct foo_s myfoo;""") + lib = verify(ffi, 'test_const_constant', """ + struct foo_s { int x,y; }; const struct foo_s myfoo = { 40, 2 }; + """) + assert lib.myfoo.x == 40 + assert lib.myfoo.y == 2 + +def test_const_via_typedef(): + ffi = FFI() + ffi.cdef("""typedef const int const_t; const_t aaa;""") + lib = verify(ffi, 'test_const_via_typedef', """ + typedef const int const_t; + #define aaa 42 + """) + assert lib.aaa == 42 + py.test.raises(AttributeError, "lib.aaa = 43") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py @@ -1623,11 +1623,11 @@ def test_FILE_stored_explicitly(): ffi = FFI() - ffi.cdef("int myprintf(const char *, int); FILE *myfile;") + ffi.cdef("int myprintf11(const char *, int); FILE *myfile;") lib = ffi.verify(""" #include FILE *myfile; - int myprintf(const char *out, int value) { + int myprintf11(const char *out, int value) { return fprintf(myfile, out, value); } """) @@ -1637,7 +1637,7 @@ lib.myfile = ffi.cast("FILE *", fw1) # fw1.write(b"X") - r = lib.myprintf(b"hello, %d!\n", ffi.cast("int", 42)) + r = lib.myprintf11(b"hello, %d!\n", ffi.cast("int", 42)) fw1.close() assert r == len("hello, 42!\n") # @@ -1923,7 +1923,7 @@ assert repr(ffi.typeof(lib.a)) == "" def test_bug_const_char_ptr_array_2(): - ffi = FFI_warnings_not_error() # ignore warnings + ffi = FFI() ffi.cdef("""const int a[];""") lib = ffi.verify("""const int a[5];""") assert repr(ffi.typeof(lib.a)) == "" diff --git a/pypy/module/thread/test/test_lock.py b/pypy/module/thread/test/test_lock.py --- a/pypy/module/thread/test/test_lock.py +++ b/pypy/module/thread/test/test_lock.py @@ -123,23 +123,26 @@ self.sig_recvd = True old_handler = signal.signal(signal.SIGUSR1, my_handler) try: + ready = thread.allocate_lock() + ready.acquire() def other_thread(): # Acquire the lock in a non-main thread, so this test works for # RLocks. lock.acquire() - # Wait until the main thread is blocked in the lock acquire, and - # then wake it up with this. - time.sleep(0.5) + # Notify the main thread that we're ready + ready.release() + # Wait for 5 seconds here + for n in range(50): + time.sleep(0.1) + # Send the signal os.kill(os.getpid(), signal.SIGUSR1) # Let the main thread take the interrupt, handle it, and retry # the lock acquisition. Then we'll let it run. - time.sleep(0.5) + for n in range(50): + time.sleep(0.1) lock.release() From noreply at buildbot.pypy.org Sat Oct 3 12:37:08 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Sat, 3 Oct 2015 12:37:08 +0200 (CEST) Subject: [pypy-commit] pypy value-profiling: merge default Message-ID: <20151003103708.264E81C148D@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r79949:516d4b0fc7fa Date: 2015-10-03 11:47 +0100 http://bitbucket.org/pypy/pypy/changeset/516d4b0fc7fa/ Log: merge default diff too long, truncating to 2000 out of 2750 lines diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -272,11 +272,9 @@ return space.w_NotImplemented return space.call_function(w_meth, w_b) else: - # here, if coerce returns a non-W_Instance object as first - # argument, then give up. The idea is that this strange - # case should already have been handled by the binaryop() - # called from descroperation first. - return space.w_NotImplemented + # fall back to space.xxx() if coerce returns a non-W_Instance + # object as first argument + return getattr(space, objspacename)(w_b, w_a) rbinaryop.func_name = "r" + name return binaryop, rbinaryop @@ -658,7 +656,7 @@ return space.w_NotImplemented return space.call_function(w_func, w_other) else: - return space.w_NotImplemented + return space.pow(w_b, w_a, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__rpow__', False) diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -425,6 +425,14 @@ return 42 assert B() + B() == 42 + def test_coerce_reverse(self): + class CoerceNumber: + def __coerce__(self, other): + assert isinstance(other, int) + return (6, other) + assert 5 + CoerceNumber() == 11 + assert 2 ** CoerceNumber() == 64 + def test_binaryop(self): class A: def __add__(self, other): diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -39,7 +39,10 @@ def product_check(s): i = 1 for x in s: - i = ovfcheck(i * x) + try: + i = ovfcheck(i * x) + except OverflowError: + raise return i def check_and_adjust_index(space, index, size, axis): diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -249,11 +249,17 @@ ix = 1 while iw > 0: if iw & 1: - ix = ovfcheck(ix * temp) + try: + ix = ovfcheck(ix * temp) + except OverflowError: + raise iw >>= 1 # Shift exponent down by 1 bit if iw == 0: break - temp = ovfcheck(temp * temp) # Square the value of temp + try: + temp = ovfcheck(temp * temp) # Square the value of temp + except OverflowError: + raise if iz: # If we did a multiplication, perform a modulo ix %= iz diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -671,6 +671,8 @@ a0, a1 = boxes imm_a1 = check_imm_box(a1) l0 = self.make_sure_var_in_reg(a0, boxes) + op.getdescr().make_a_counter_per_value(op, + self.cpu.all_reg_indexes[l0.value]) if not imm_a1: l1 = self.make_sure_var_in_reg(a1, boxes) else: diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -40,6 +40,10 @@ self.inputargs = map(mapping, inputargs) self.operations = [] for op in operations: + if op.getopnum() == rop.GUARD_VALUE: + # we don't care about the value 13 here, because we gonna + # fish it from the extra slot on frame anyway + op.getdescr().make_a_counter_per_value(op, 13) if op.getdescr() is not None: if op.is_guard() or op.getopnum() == rop.FINISH: newdescr = op.getdescr() @@ -372,6 +376,18 @@ except ExecutionFinished, e: return e.deadframe + def get_value_direct(self, deadframe, tp, index): + v = deadframe._extra_value + if tp == 'i': + assert lltype.typeOf(v) == lltype.Signed + elif tp == 'r': + assert lltype.typeOf(v) == llmemory.GCREF + elif tp == 'f': + assert lltype.typeOf(v) == longlong.FLOATSTORAGE + else: + assert False + return v + def get_int_value(self, deadframe, index): v = deadframe._values[index] assert lltype.typeOf(v) == lltype.Signed @@ -775,11 +791,13 @@ _TYPE = llmemory.GCREF def __init__(self, latest_descr, values, - last_exception=None, saved_data=None): + last_exception=None, saved_data=None, + extra_value=None): self._latest_descr = latest_descr self._values = values self._last_exception = last_exception self._saved_data = saved_data + self._extra_value = extra_value class LLFrame(object): @@ -872,7 +890,7 @@ # ----------------------------------------------------- - def fail_guard(self, descr, saved_data=None): + def fail_guard(self, descr, saved_data=None, extra_value=None): values = [] for box in self.current_op.getfailargs(): if box is not None: @@ -887,7 +905,7 @@ else: raise ExecutionFinished(LLDeadFrame(descr, values, self.last_exception, - saved_data)) + saved_data, extra_value)) def execute_force_spill(self, _, arg): pass @@ -909,7 +927,7 @@ def execute_guard_value(self, descr, arg1, arg2): if arg1 != arg2: - self.fail_guard(descr) + self.fail_guard(descr, extra_value=arg1) def execute_guard_nonnull(self, descr, arg): if not arg: @@ -1028,7 +1046,6 @@ def execute_guard_overflow(self, descr): if not self.overflow_flag: self.fail_guard(descr) - return lltype.nullptr(llmemory.GCREF.TO) # I think it's fine.... def execute_jump(self, descr, *args): raise Jump(descr._llgraph_target, args) diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -389,20 +389,40 @@ descr = self.get_latest_descr(deadframe) return rffi.cast(lltype.Signed, descr.rd_locs[index]) * WORD + @specialize.arg(2) + def get_value_direct(self, deadframe, tp, index): + if tp == 'i': + return self.get_int_value_direct(deadframe, index * WORD) + elif tp == 'r': + return self.get_ref_value_direct(deadframe, index * WORD) + elif tp == 'f': + return self.get_float_value_direct(deadframe, index * WORD) + else: + assert False + def get_int_value(self, deadframe, index): pos = self._decode_pos(deadframe, index) + return self.get_int_value_direct(deadframe, pos) + + def get_int_value_direct(self, deadframe, pos): descr = self.gc_ll_descr.getframedescrs(self).arraydescr ofs = self.unpack_arraydescr(descr) return self.read_int_at_mem(deadframe, pos + ofs, WORD, 1) def get_ref_value(self, deadframe, index): pos = self._decode_pos(deadframe, index) + return self.get_ref_value_direct(deadframe, pos) + + def get_ref_value_direct(self, deadframe, pos): descr = self.gc_ll_descr.getframedescrs(self).arraydescr ofs = self.unpack_arraydescr(descr) return self.read_ref_at_mem(deadframe, pos + ofs) def get_float_value(self, deadframe, index): pos = self._decode_pos(deadframe, index) + return self.get_float_value_direct(deadframe, pos) + + def get_float_value_direct(self, deadframe, pos): descr = self.gc_ll_descr.getframedescrs(self).arraydescr ofs = self.unpack_arraydescr(descr) return self.read_float_at_mem(deadframe, pos + ofs) diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py --- a/rpython/jit/backend/test/test_random.py +++ b/rpython/jit/backend/test/test_random.py @@ -22,6 +22,8 @@ self.operations = subops class FakeMetaInterp(object): + ovf_flag = False + def execute_raised(self, exc, constant=False): self._got_exc = exc @@ -365,9 +367,9 @@ def produce_into(self, builder, r): fail_subset = builder.subset_of_intvars(r) original_intvars = builder.intvars[:] + builder.fakemetainterp.ovf_flag = False super(AbstractOvfOperation, self).produce_into(builder, r) - if builder.fakemetainterp._got_exc: # overflow detected - assert isinstance(builder.fakemetainterp._got_exc, OverflowError) + if builder.fakemetainterp.ovf_flag: # overflow detected op = ResOperation(rop.GUARD_OVERFLOW, []) # the overflowed result should not be used any more, but can # be used on the failure path: recompute fail_subset including diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -424,6 +424,8 @@ def consider_guard_value(self, op): x = self.make_sure_var_in_reg(op.getarg(0)) + loc = self.assembler.cpu.all_reg_indexes[x.value] + op.getdescr().make_a_counter_per_value(op, loc) y = self.loc(op.getarg(1)) self.perform_guard(op, [x, y], None) diff --git a/rpython/jit/codewriter/codewriter.py b/rpython/jit/codewriter/codewriter.py --- a/rpython/jit/codewriter/codewriter.py +++ b/rpython/jit/codewriter/codewriter.py @@ -48,7 +48,7 @@ # which means mostly producing a linear list of operations and # inserting jumps or conditional jumps. This is a list of tuples # of the shape ("opname", arg1, ..., argN) or (Label(...),). - ssarepr = flatten_graph(graph, regallocs) + ssarepr = flatten_graph(graph, regallocs, cpu=self.callcontrol.cpu) # # step 3b: compute the liveness around certain operations compute_liveness(ssarepr) diff --git a/rpython/jit/codewriter/flatten.py b/rpython/jit/codewriter/flatten.py --- a/rpython/jit/codewriter/flatten.py +++ b/rpython/jit/codewriter/flatten.py @@ -1,4 +1,4 @@ -from rpython.flowspace.model import Variable, Constant +from rpython.flowspace.model import Variable, Constant, c_last_exception from rpython.jit.metainterp.history import AbstractDescr, getkind from rpython.rtyper.lltypesystem import lltype @@ -60,10 +60,11 @@ # ____________________________________________________________ -def flatten_graph(graph, regallocs, _include_all_exc_links=False): +def flatten_graph(graph, regallocs, _include_all_exc_links=False, + cpu=None): """Flatten the graph into an SSARepr, with already-computed register allocations. 'regallocs' in a dict {kind: RegAlloc}.""" - flattener = GraphFlattener(graph, regallocs, _include_all_exc_links) + flattener = GraphFlattener(graph, regallocs, _include_all_exc_links, cpu) flattener.enforce_input_args() flattener.generate_ssa_form() return flattener.ssarepr @@ -71,9 +72,11 @@ class GraphFlattener(object): - def __init__(self, graph, regallocs, _include_all_exc_links=False): + def __init__(self, graph, regallocs, _include_all_exc_links=False, + cpu=None): self.graph = graph self.regallocs = regallocs + self.cpu = cpu self._include_all_exc_links = _include_all_exc_links self.registers = {} if graph: @@ -100,7 +103,7 @@ self.seen_blocks = {} self.make_bytecode_block(self.graph.startblock) - def make_bytecode_block(self, block): + def make_bytecode_block(self, block, handling_ovf=False): if block.exits == (): self.make_return(block.inputargs) return @@ -114,9 +117,15 @@ # operations = block.operations for i, op in enumerate(operations): + if '_ovf' in op.opname: + if (len(block.exits) not in (2, 3) or + block.exitswitch is not c_last_exception): + raise Exception("detected a block containing ovfcheck()" + " but no OverflowError is caught, this" + " is not legal in jitted blocks") self.serialize_op(op) # - self.insert_exits(block) + self.insert_exits(block, handling_ovf) def make_return(self, args): if len(args) == 1: @@ -136,16 +145,16 @@ raise Exception("?") self.emitline("---") - def make_link(self, link): + def make_link(self, link, handling_ovf): if (link.target.exits == () and link.last_exception not in link.args and link.last_exc_value not in link.args): self.make_return(link.args) # optimization only return self.insert_renamings(link) - self.make_bytecode_block(link.target) + self.make_bytecode_block(link.target, handling_ovf) - def make_exception_link(self, link): + def make_exception_link(self, link, handling_ovf): # Like make_link(), but also introduces the 'last_exception' and # 'last_exc_value' as variables if needed. Also check if the link # is jumping directly to the re-raising exception block. @@ -153,54 +162,74 @@ assert link.last_exc_value is not None if link.target.operations == () and link.args == [link.last_exception, link.last_exc_value]: - self.emitline("reraise") + if handling_ovf: + exc_data = self.cpu.rtyper.exceptiondata + ll_ovf = exc_data.get_standard_ll_exc_instance_by_class( + OverflowError) + c = Constant(ll_ovf, concretetype=lltype.typeOf(ll_ovf)) + self.emitline("raise", c) + else: + self.emitline("reraise") self.emitline("---") return # done - self.make_link(link) + self.make_link(link, handling_ovf) - def insert_exits(self, block): + def insert_exits(self, block, handling_ovf=False): if len(block.exits) == 1: # A single link, fall-through link = block.exits[0] assert link.exitcase in (None, False, True) # the cases False or True should not really occur, but can show # up in the manually hacked graphs for generators... - self.make_link(link) + self.make_link(link, handling_ovf) # elif block.canraise: # An exception block. See test_exc_exitswitch in test_flatten.py # for an example of what kind of code this makes. index = -1 - while True: - lastopname = block.operations[index].opname - if lastopname != '-live-': - break - index -= 1 + opname = block.operations[index].opname + if '_ovf' in opname: + # ovf checking operation as a lat thing, -live- should be + # one before it + line = self.popline() + self.emitline(opname[:7] + '_jump_if_ovf', + TLabel(block.exits[1]), *line[1:]) + assert len(block.exits) in (2, 3) + self.make_link(block.exits[0], False) + self.emitline(Label(block.exits[1])) + self.make_exception_link(block.exits[1], True) + if len(block.exits) == 3: + assert block.exits[2].exitcase is Exception + self.make_exception_link(block.exits[2], False) + return + else: + while True: + lastopname = block.operations[index].opname + if lastopname != '-live-': + break + index -= 1 assert block.exits[0].exitcase is None # is this always True? # if not self._include_all_exc_links: if index == -1: # cannot raise: the last instruction is not # actually a '-live-' - self.make_link(block.exits[0]) + self.make_link(block.exits[0], False) return # self.emitline('catch_exception', TLabel(block.exits[0])) - self.make_link(block.exits[0]) + self.make_link(block.exits[0], False) self.emitline(Label(block.exits[0])) for link in block.exits[1:]: - if (link.exitcase is Exception or - (link.exitcase is OverflowError and - lastopname.startswith('int_') and - lastopname.endswith('_ovf'))): + if link.exitcase is Exception: # this link captures all exceptions - self.make_exception_link(link) + self.make_exception_link(link, False) break self.emitline('goto_if_exception_mismatch', Constant(link.llexitcase, lltype.typeOf(link.llexitcase)), TLabel(link)) - self.make_exception_link(link) + self.make_exception_link(link, False) self.emitline(Label(link)) else: # no link captures all exceptions, so we have to put a reraise @@ -216,29 +245,26 @@ if linkfalse.llexitcase == True: linkfalse, linktrue = linktrue, linkfalse opname = 'goto_if_not' - livebefore = False if isinstance(block.exitswitch, tuple): # special case produced by jtransform.optimize_goto_if_not() opname = 'goto_if_not_' + block.exitswitch[0] opargs = block.exitswitch[1:] if opargs[-1] == '-live-before': - livebefore = True opargs = opargs[:-1] else: assert block.exitswitch.concretetype == lltype.Bool opargs = [block.exitswitch] # lst = self.flatten_list(opargs) + [TLabel(linkfalse)] - if livebefore: - self.emitline('-live-') + self.emitline('-live-') self.emitline(opname, *lst) - if not livebefore: - self.emitline('-live-', TLabel(linkfalse)) + #if not livebefore: + # self.emitline('-live-', TLabel(linkfalse)) # true path: - self.make_link(linktrue) + self.make_link(linktrue, handling_ovf) # false path: self.emitline(Label(linkfalse)) - self.make_link(linkfalse) + self.make_link(linkfalse, handling_ovf) # else: # A switch. @@ -261,7 +287,7 @@ switchdict) # emit the default path if block.exits[-1].exitcase == 'default': - self.make_link(block.exits[-1]) + self.make_link(block.exits[-1], handling_ovf) else: self.emitline("unreachable") self.emitline("---") @@ -275,7 +301,7 @@ # if the switched value doesn't match any case. self.emitline(Label(switch)) self.emitline('-live-') - self.make_link(switch) + self.make_link(switch, handling_ovf) def insert_renamings(self, link): renamings = {} @@ -323,6 +349,9 @@ def emitline(self, *line): self.ssarepr.insns.append(line) + def popline(self): + return self.ssarepr.insns.pop() + def flatten_list(self, arglist): args = [] for v in arglist: diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -8,7 +8,8 @@ from rpython.jit.metainterp.history import getkind from rpython.jit.metainterp.typesystem import deref, arrayItem from rpython.jit.metainterp.blackhole import BlackholeInterpreter -from rpython.flowspace.model import SpaceOperation, Variable, Constant +from rpython.flowspace.model import SpaceOperation, Variable, Constant,\ + c_last_exception from rpython.rlib import objectmodel from rpython.rlib.jit import _we_are_jitted from rpython.rlib.rgc import lltype_is_gc @@ -211,8 +212,8 @@ # ok! optimize this case block.operations.remove(op) block.exitswitch = (op.opname,) + tuple(op.args) - if op.opname in ('ptr_iszero', 'ptr_nonzero'): - block.exitswitch += ('-live-before',) + #if op.opname in ('ptr_iszero', 'ptr_nonzero'): + block.exitswitch += ('-live-before',) # if the variable escape to the next block along a link, # replace it with a constant, because we know its value for link in block.exits: @@ -333,13 +334,13 @@ def rewrite_op_int_add_ovf(self, op): op0 = self._rewrite_symmetric(op) op1 = SpaceOperation('-live-', [], None) - return [op0, op1] + return [op1, op0] rewrite_op_int_mul_ovf = rewrite_op_int_add_ovf def rewrite_op_int_sub_ovf(self, op): op1 = SpaceOperation('-live-', [], None) - return [op, op1] + return [op1, op] def _noop_rewrite(self, op): return op diff --git a/rpython/jit/codewriter/test/test_codewriter.py b/rpython/jit/codewriter/test/test_codewriter.py --- a/rpython/jit/codewriter/test/test_codewriter.py +++ b/rpython/jit/codewriter/test/test_codewriter.py @@ -76,11 +76,11 @@ assert jitcode.num_regs_i() == 2 assert jitcode.num_regs_r() == 0 assert jitcode.num_regs_f() == 0 - assert jitcode._live_vars(5) == '%i0 %i1' + assert jitcode._live_vars(0) == '%i0 %i1' # from rpython.jit.codewriter.jitcode import MissingLiveness for i in range(len(jitcode.code)+1): - if i != 5: + if i != 0: py.test.raises(MissingLiveness, jitcode._live_vars, i) def test_call(): diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -140,6 +140,7 @@ def encoding_test(self, func, args, expected, transform=False, liveness=False, cc=None, jd=None): + graphs = self.make_graphs(func, args) #graphs[0].show() if transform: @@ -147,7 +148,8 @@ cc = cc or FakeCallControl() transform_graph(graphs[0], FakeCPU(self.rtyper), cc, jd) ssarepr = flatten_graph(graphs[0], fake_regallocs(), - _include_all_exc_links=not transform) + _include_all_exc_links=not transform, + cpu=FakeCPU(self.rtyper)) if liveness: from rpython.jit.codewriter.liveness import compute_liveness compute_liveness(ssarepr) @@ -169,8 +171,8 @@ return n + 1 self.encoding_test(f, [10], """ int_gt %i0, $0 -> %i1 + -live- goto_if_not %i1, L1 - -live- L1 int_copy %i0 -> %i2 int_sub %i2, $3 -> %i3 int_copy %i3 -> %i4 @@ -194,8 +196,8 @@ int_copy %i1 -> %i3 L1: int_gt %i2, $0 -> %i4 + -live- goto_if_not %i4, L2 - -live- L2 int_copy %i2 -> %i5 int_copy %i3 -> %i6 int_add %i6, %i5 -> %i7 @@ -218,8 +220,8 @@ int_copy %i0 -> %i2 int_copy %i1 -> %i3 L1: + -live- goto_if_not_int_gt %i2, $0, L2 - -live- L2 int_copy %i2 -> %i4 int_copy %i3 -> %i5 int_add %i5, %i4 -> %i6 @@ -457,8 +459,8 @@ # note that 'goto_if_not_int_is_true' is not the same thing # as just 'goto_if_not', because the last one expects a boolean self.encoding_test(f, [7], """ + -live- goto_if_not_int_is_true %i0, L1 - -live- L1 int_return $False --- L1: @@ -523,8 +525,8 @@ else: return m2 self.encoding_test(f, [4, 5, 6], """ + -live- %i0, %i1, %i2 goto_if_not_int_is_true %i0, L1 - -live- %i1, %i2, L1 int_return %i1 --- L1: @@ -538,15 +540,59 @@ except OverflowError: return 42 self.encoding_test(f, [7, 2], """ - int_add_ovf %i0, %i1 -> %i2 - -live- %i2 - catch_exception L1 + -live- %i0, %i1 + int_add_jump_if_ovf L1, %i0, %i1 -> %i2 int_return %i2 --- L1: int_return $42 """, transform=True, liveness=True) + def test_multiple_int_add_ovf(self): + def f(i, j): + try: + ovfcheck(j + i) + return ovfcheck(i + j) + except OverflowError: + return 42 + self.encoding_test(f, [7, 2], """ + -live- %i0, %i1 + int_add_jump_if_ovf L1, %i1, %i0 -> %i2 + int_copy %i1 -> %i3 + int_copy %i0 -> %i4 + -live- %i3, %i4 + int_add_jump_if_ovf L2, %i4, %i3 -> %i5 + int_return %i5 + --- + L2: + int_return $42 + --- + L1: + int_return $42 + """, transform=True, liveness=True) + + def test_ovfcheck_no_catch(self): + def f(i, j): + return ovfcheck(i + j) + err = py.test.raises(Exception, "self.encoding_test(f, [7, 2], ''," + "transform=True, liveness=True)") + assert "ovfcheck()" in str(err) + + def test_ovfcheck_reraise(self): + def f(i, j): + try: + ovfcheck(j + i) + except OverflowError: + raise + self.encoding_test(f, [7, 2], """ + -live- %i0, %i1 + int_add_jump_if_ovf L1, %i1, %i0 -> %i2 + void_return + --- + L1: + raise $<* struct object> + """, transform=True, liveness=True) + def test_residual_call_raising(self): @dont_look_inside def g(i, j): diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -15,7 +15,7 @@ for prod in result: yield tuple(prod) -from rpython.flowspace.model import FunctionGraph, Block, Link +from rpython.flowspace.model import FunctionGraph, Block, Link, c_last_exception from rpython.flowspace.model import SpaceOperation, Variable, Constant from rpython.rtyper.lltypesystem import lltype, llmemory, rstr, rffi from rpython.rtyper import rclass @@ -187,7 +187,7 @@ res = Transformer().optimize_goto_if_not(block) assert res == True assert block.operations == [sp1, sp2] - assert block.exitswitch == ('int_gt', v1, v2) + assert block.exitswitch == ('int_gt', v1, v2, '-live-before') assert block.exits == exits def test_optimize_goto_if_not__incoming(): @@ -211,7 +211,7 @@ res = Transformer().optimize_goto_if_not(block) assert res == True assert block.operations == [] - assert block.exitswitch == ('int_gt', v1, v2) + assert block.exitswitch == ('int_gt', v1, v2, '-live-before') assert block.exits == exits assert exits[1].args == [const(True)] @@ -235,7 +235,7 @@ res = Transformer().optimize_goto_if_not(block) assert res == True assert block.operations == [] - assert block.exitswitch == (opname, v1, v2) + assert block.exitswitch == (opname, v1, v2, '-live-before') assert block.exits == exits def test_optimize_goto_if_not__ptr_iszero(): @@ -287,7 +287,7 @@ for v2 in [varoftype(lltype.Signed), const(43)]: op = SpaceOperation('int_add_nonneg_ovf', [v1, v2], v3) oplist = Transformer(FakeCPU()).rewrite_operation(op) - op0, op1 = oplist + op1, op0 = oplist assert op0.opname == 'int_add_ovf' if isinstance(v1, Constant) and isinstance(v2, Variable): assert op0.args == [v2, v1] diff --git a/rpython/jit/codewriter/test/test_regalloc.py b/rpython/jit/codewriter/test/test_regalloc.py --- a/rpython/jit/codewriter/test/test_regalloc.py +++ b/rpython/jit/codewriter/test/test_regalloc.py @@ -63,8 +63,8 @@ self.check_assembler(graph, """ L1: int_gt %i0, $0 -> %i2 + -live- goto_if_not %i2, L2 - -live- L2 int_add %i1, %i0 -> %i1 int_sub %i0, $1 -> %i0 goto L1 @@ -82,8 +82,8 @@ self.check_assembler(graph, """ L1: int_gt %i0, $0 -> %i2 + -live- goto_if_not %i2, L2 - -live- L2 int_push %i1 int_copy %i0 -> %i1 int_pop -> %i0 @@ -102,8 +102,8 @@ self.check_assembler(graph, """ L1: int_gt %i0, $0 -> %i0 + -live- goto_if_not %i0, L2 - -live- L2 int_copy %i1 -> %i0 int_copy $2 -> %i1 goto L1 @@ -121,8 +121,8 @@ self.check_assembler(graph, """ L1: int_gt %i0, $0 -> %i3 + -live- goto_if_not %i3, L2 - -live- L2 int_push %i1 int_copy %i2 -> %i1 int_copy %i0 -> %i2 @@ -142,8 +142,8 @@ self.check_assembler(graph, """ L1: int_gt %i0, $0 -> %i3 + -live- goto_if_not %i3, L2 - -live- L2 int_copy %i2 -> %i1 goto L1 --- @@ -236,8 +236,8 @@ self.check_assembler(graph, """ int_lshift %i0, %i1 -> %i2 int_rshift %i2, %i1 -> %i1 + -live- goto_if_not_int_ne %i1, %i0, L1 - -live- L1 raise $<* struct object> --- L1: diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -212,6 +212,20 @@ assert lltype.typeOf(result) is longlong.FLOATSTORAGE self.registers_f[ord(code[position])] = result position += 1 + elif resulttype == "iL": + result, new_position = result + if new_position != -1: + position = new_position + next_argcode = next_argcode + 2 + else: + assert argcodes[next_argcode] == '>' + assert argcodes[next_argcode + 1] == 'i' + next_argcode = next_argcode + 2 + if lltype.typeOf(result) is lltype.Bool: + result = int(result) + assert lltype.typeOf(result) is lltype.Signed + self.registers_i[ord(code[position])] = result + position += 1 elif resulttype == 'L': assert result >= 0 position = result @@ -394,17 +408,26 @@ def bhimpl_int_mul(a, b): return intmask(a * b) - @arguments("i", "i", returns="i") - def bhimpl_int_add_ovf(a, b): - return ovfcheck(a + b) + @arguments("L", "i", "i", returns="iL") + def bhimpl_int_add_jump_if_ovf(label, a, b): + try: + return ovfcheck(a + b), -1 + except OverflowError: + return 0, label - @arguments("i", "i", returns="i") - def bhimpl_int_sub_ovf(a, b): - return ovfcheck(a - b) + @arguments("L", "i", "i", returns="iL") + def bhimpl_int_sub_jump_if_ovf(label, a, b): + try: + return ovfcheck(a - b), -1 + except OverflowError: + return 0, label - @arguments("i", "i", returns="i") - def bhimpl_int_mul_ovf(a, b): - return ovfcheck(a * b) + @arguments("L", "i", "i", returns="iL") + def bhimpl_int_mul_jump_if_ovf(label, a, b): + try: + return ovfcheck(a * b), -1 + except OverflowError: + return 0, label @arguments("i", "i", returns="i") def bhimpl_int_floordiv(a, b): @@ -1465,57 +1488,9 @@ assert kind == 'v' return lltype.nullptr(rclass.OBJECTPTR.TO) - def _prepare_resume_from_failure(self, opnum, deadframe): - from rpython.jit.metainterp.resoperation import rop - # - if opnum == rop.GUARD_FUTURE_CONDITION: - pass - elif opnum == rop.GUARD_TRUE: - # Produced directly by some goto_if_not_xxx() opcode that did not - # jump, but which must now jump. The pc is just after the opcode. - self.position = self.jitcode.follow_jump(self.position) - # - elif opnum == rop.GUARD_FALSE: - # Produced directly by some goto_if_not_xxx() opcode that jumped, - # but which must no longer jump. The pc is just after the opcode. - pass - # - elif opnum == rop.GUARD_VALUE or opnum == rop.GUARD_CLASS: - # Produced by guard_class(), xxx_guard_value(), or a few other - # opcodes like switch(). The pc is at the start of the opcode - # (so it will be redone). - pass - # - elif (opnum == rop.GUARD_NONNULL or - opnum == rop.GUARD_ISNULL or - opnum == rop.GUARD_NONNULL_CLASS): - # Produced by goto_if_not_ptr_{non,is}zero(). The pc is at the - # start of the opcode (so it will be redone); this is needed - # because of GUARD_NONNULL_CLASS. - pass - # - elif (opnum == rop.GUARD_NO_EXCEPTION or - opnum == rop.GUARD_EXCEPTION or - opnum == rop.GUARD_NOT_FORCED): - return lltype.cast_opaque_ptr(rclass.OBJECTPTR, - self.cpu.grab_exc_value(deadframe)) - # - elif opnum == rop.GUARD_NO_OVERFLOW: - # Produced by int_xxx_ovf(). The pc is just after the opcode. - # We get here because it did not used to overflow, but now it does. - return get_llexception(self.cpu, OverflowError()) - # - elif opnum == rop.GUARD_OVERFLOW: - # Produced by int_xxx_ovf(). The pc is just after the opcode. - # We get here because it used to overflow, but now it no longer - # does. - pass - elif opnum == rop.GUARD_NOT_INVALIDATED: - pass - else: - from rpython.jit.metainterp.resoperation import opname - raise NotImplementedError(opname[opnum]) - return lltype.nullptr(rclass.OBJECTPTR.TO) + def _prepare_resume_from_failure(self, deadframe): + return lltype.cast_opaque_ptr(rclass.OBJECTPTR, + self.cpu.grab_exc_value(deadframe)) # connect the return of values from the called frame to the # 'xxx_call_yyy' instructions from the caller frame @@ -1641,8 +1616,7 @@ deadframe, all_virtuals) - current_exc = blackholeinterp._prepare_resume_from_failure( - resumedescr.guard_opnum, deadframe) + current_exc = blackholeinterp._prepare_resume_from_failure(deadframe) _run_forever(blackholeinterp, current_exc) resume_in_blackhole._dont_inline_ = True diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -767,12 +767,15 @@ # fetch the actual value of the guard_value, possibly turning # it to an integer if typetag == self.TY_INT: - intval = metainterp_sd.cpu.get_int_value(deadframe, index) + intval = metainterp_sd.cpu.get_value_direct(deadframe, 'i', + index) elif typetag == self.TY_REF: - refval = metainterp_sd.cpu.get_ref_value(deadframe, index) + refval = metainterp_sd.cpu.get_value_direct(deadframe, 'r', + index) intval = lltype.cast_ptr_to_int(refval) elif typetag == self.TY_FLOAT: - floatval = metainterp_sd.cpu.get_float_value(deadframe, index) + floatval = metainterp_sd.cpu.get_value_direct(deadframe, 'f', + index) intval = longlong.gethash_fast(floatval) else: assert 0, typetag @@ -788,11 +791,6 @@ increment = jitdriver_sd.warmstate.increment_trace_eagerness return jitcounter.tick(hash, increment) - def get_index_of_guard_value(self): - if (self.status & self.ST_TYPE_MASK) == 0: - return -1 - return intmask(self.status >> self.ST_SHIFT) - def start_compiling(self): # start tracing and compiling from this guard. self.status |= self.ST_BUSY_FLAG @@ -819,62 +817,24 @@ new_loop.original_jitcell_token, metainterp.box_names_memo) - def make_a_counter_per_value(self, guard_value_op): + def make_a_counter_per_value(self, guard_value_op, index): assert guard_value_op.getopnum() == rop.GUARD_VALUE box = guard_value_op.getarg(0) - try: - i = guard_value_op.getfailargs().index(box) - except ValueError: - return # xxx probably very rare + if box.type == history.INT: + ty = self.TY_INT + elif box.type == history.REF: + ty = self.TY_REF + elif box.type == history.FLOAT: + ty = self.TY_FLOAT else: - if box.type == history.INT: - ty = self.TY_INT - elif box.type == history.REF: - ty = self.TY_REF - elif box.type == history.FLOAT: - ty = self.TY_FLOAT - else: - assert 0, box.type - self.status = ty | (r_uint(i) << self.ST_SHIFT) + assert 0, box.type + self.status = ty | (r_uint(index) << self.ST_SHIFT) -class ResumeGuardNonnullDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_NONNULL - -class ResumeGuardIsnullDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_ISNULL - -class ResumeGuardClassDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_CLASS - -class ResumeGuardTrueDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_TRUE - -class ResumeGuardFalseDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_FALSE - -class ResumeGuardNonnullClassDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_NONNULL_CLASS - -class ResumeGuardExceptionDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_EXCEPTION - -class ResumeGuardNoExceptionDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_NO_EXCEPTION - -class ResumeGuardOverflowDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_OVERFLOW - -class ResumeGuardNoOverflowDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_NO_OVERFLOW - -class ResumeGuardValueDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_VALUE - -class ResumeGuardNotInvalidated(ResumeGuardDescr): - guard_opnum = rop.GUARD_NOT_INVALIDATED +class ResumeGuardExcDescr(ResumeGuardDescr): + pass class ResumeAtPositionDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_FUTURE_CONDITION + pass class AllVirtuals: llopaque = True @@ -895,8 +855,6 @@ class ResumeGuardForcedDescr(ResumeGuardDescr): - guard_opnum = rop.GUARD_NOT_FORCED - def _init(self, metainterp_sd, jitdriver_sd): # to please the annotator self.metainterp_sd = metainterp_sd @@ -959,37 +917,13 @@ if opnum == rop.GUARD_NOT_FORCED or opnum == rop.GUARD_NOT_FORCED_2: resumedescr = ResumeGuardForcedDescr() resumedescr._init(optimizer.metainterp_sd, optimizer.jitdriver_sd) - elif opnum == rop.GUARD_NOT_INVALIDATED: - resumedescr = ResumeGuardNotInvalidated() - elif opnum == rop.GUARD_FUTURE_CONDITION: - resumedescr = ResumeAtPositionDescr() - elif opnum == rop.GUARD_VALUE: - resumedescr = ResumeGuardValueDescr() - elif opnum == rop.GUARD_NONNULL: - resumedescr = ResumeGuardNonnullDescr() - elif opnum == rop.GUARD_ISNULL: - resumedescr = ResumeGuardIsnullDescr() - elif opnum == rop.GUARD_NONNULL_CLASS: - resumedescr = ResumeGuardNonnullClassDescr() - elif opnum == rop.GUARD_CLASS: - resumedescr = ResumeGuardClassDescr() - elif opnum == rop.GUARD_TRUE: - resumedescr = ResumeGuardTrueDescr() - elif opnum == rop.GUARD_FALSE: - resumedescr = ResumeGuardFalseDescr() - elif opnum == rop.GUARD_EXCEPTION: - resumedescr = ResumeGuardExceptionDescr() - elif opnum == rop.GUARD_NO_EXCEPTION: - resumedescr = ResumeGuardNoExceptionDescr() - elif opnum == rop.GUARD_OVERFLOW: - resumedescr = ResumeGuardOverflowDescr() - elif opnum == rop.GUARD_NO_OVERFLOW: - resumedescr = ResumeGuardNoOverflowDescr() elif opnum in (rop.GUARD_IS_OBJECT, rop.GUARD_SUBCLASS, rop.GUARD_GC_TYPE): # note - this only happens in tests resumedescr = ResumeAtPositionDescr() + elif opnum in (rop.GUARD_EXCEPTION, rop.GUARD_NO_EXCEPTION): + resumedescr = ResumeGuardExcDescr() else: - assert False + resumedescr = ResumeGuardDescr() return resumedescr class ResumeFromInterpDescr(ResumeDescr): diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -253,7 +253,7 @@ z = ovfcheck(a + b) except OverflowError: assert metainterp is not None - metainterp.execute_raised(OverflowError(), constant=True) + metainterp.ovf_flag = True z = 0 return z @@ -264,7 +264,7 @@ z = ovfcheck(a - b) except OverflowError: assert metainterp is not None - metainterp.execute_raised(OverflowError(), constant=True) + metainterp.ovf_flag = True z = 0 return z @@ -275,7 +275,7 @@ z = ovfcheck(a * b) except OverflowError: assert metainterp is not None - metainterp.execute_raised(OverflowError(), constant=True) + metainterp.ovf_flag = True z = 0 return z diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -157,6 +157,9 @@ def __init__(self, identifier=None): self.identifier = identifier # for testing + def make_a_counter_per_value(self, op, index): + pass # for testing + @specialize.argtype(0) def newconst(value): @@ -540,6 +543,9 @@ def check_consistency_of_branch(operations, seen, check_descr=True): "NOT_RPYTHON" for num, op in enumerate(operations): + if op.is_ovf(): + assert operations[num + 1].getopnum() in (rop.GUARD_NO_OVERFLOW, + rop.GUARD_OVERFLOW) for i in range(op.numargs()): box = op.getarg(i) if not isinstance(box, Const): @@ -750,7 +756,6 @@ return tokens def check_history(self, expected=None, **check): - return insns = {} for op in self.operations: opname = op.getopname() diff --git a/rpython/jit/metainterp/jitprof.py b/rpython/jit/metainterp/jitprof.py --- a/rpython/jit/metainterp/jitprof.py +++ b/rpython/jit/metainterp/jitprof.py @@ -143,6 +143,7 @@ self._print_intline("guards", cnt[Counters.GUARDS]) self._print_intline("opt ops", cnt[Counters.OPT_OPS]) self._print_intline("opt guards", cnt[Counters.OPT_GUARDS]) + self._print_intline("opt guards shared", cnt[Counters.OPT_GUARDS_SHARED]) self._print_intline("forcings", cnt[Counters.OPT_FORCINGS]) self._print_intline("abort: trace too long", cnt[Counters.ABORT_TOO_LONG]) diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -11,7 +11,7 @@ from rpython.jit.metainterp.optimizeopt.shortpreamble import PreambleOp from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.resoperation import rop, ResOperation, OpHelpers,\ - AbstractResOp + AbstractResOp, GuardResOp from rpython.rlib.objectmodel import we_are_translated from rpython.jit.metainterp.optimizeopt import info @@ -288,7 +288,7 @@ cf = submap[index] = ArrayCachedField(index) return cf - def emit_operation(self, op): + def emit_operation(self, op): self.emitting_operation(op) self.emit_postponed_op() if (op.is_comparison() or op.is_call_may_force() diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -104,6 +104,11 @@ self.last_guard_pos = -1 def mark_last_guard(self, optimizer): + if (optimizer.getlastop() is None or + not optimizer.getlastop().is_guard()): + # there can be a really emitted operation that's not a guard + # e.g. a setfield, ignore those + return self.last_guard_pos = len(optimizer._newoperations) - 1 assert self.get_last_guard(optimizer).is_guard() diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -1,7 +1,6 @@ from rpython.jit.metainterp import jitprof, resume, compile from rpython.jit.metainterp.executor import execute_nonspec_const -from rpython.jit.metainterp.logger import LogOperations -from rpython.jit.metainterp.history import Const, ConstInt, REF, ConstPtr +from rpython.jit.metainterp.history import Const, ConstInt, ConstPtr from rpython.jit.metainterp.optimizeopt.intutils import IntBound,\ ConstIntBound, MININT, MAXINT, IntUnbounded from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method @@ -10,6 +9,7 @@ from rpython.jit.metainterp.optimizeopt import info from rpython.jit.metainterp.typesystem import llhelper from rpython.rlib.objectmodel import specialize, we_are_translated +from rpython.rlib.debug import debug_print @@ -260,6 +260,8 @@ self.optearlyforce = None self.optunroll = None + self._last_guard_op = None + self.set_optimizations(optimizations) self.setup() @@ -526,6 +528,7 @@ if extra_jump: self.first_optimization.propagate_forward(ops[-1]) self.resumedata_memo.update_counters(self.metainterp_sd.profiler) + return (BasicLoopInfo(newargs, self.quasi_immutable_deps), self._newoperations) @@ -566,6 +569,7 @@ op.setarg(i, arg) self.metainterp_sd.profiler.count(jitprof.Counters.OPT_OPS) if op.is_guard(): + assert isinstance(op, GuardResOp) self.metainterp_sd.profiler.count(jitprof.Counters.OPT_GUARDS) pendingfields = self.pendingfields self.pendingfields = None @@ -574,20 +578,85 @@ del self.replaces_guard[orig_op] return else: - guard_op = self.replace_op_with(op, op.getopnum()) - op = self.store_final_boxes_in_guard(guard_op, pendingfields) - # for unrolling - for farg in op.getfailargs(): - if farg: - self.force_box(farg) + op = self.emit_guard_operation(op, pendingfields) elif op.can_raise(): self.exception_might_have_happened = True + if ((op.has_no_side_effect() or op.is_guard() or op.is_jit_debug() or + op.is_ovf()) and not self.is_call_pure_pure_canraise(op)): + pass + else: + self._last_guard_op = None self._really_emitted_operation = op self._newoperations.append(op) + def emit_guard_operation(self, op, pendingfields): + guard_op = self.replace_op_with(op, op.getopnum()) + opnum = guard_op.getopnum() + if (self._last_guard_op and guard_op.getdescr() is None): + self.metainterp_sd.profiler.count_ops(opnum, + jitprof.Counters.OPT_GUARDS_SHARED) + op = self._copy_resume_data_from(guard_op, + self._last_guard_op) + else: + op = self.store_final_boxes_in_guard(guard_op, pendingfields) + self._last_guard_op = op + # for unrolling + for farg in op.getfailargs(): + if farg: + self.force_box(farg) + if op.getopnum() == rop.GUARD_EXCEPTION: + self._last_guard_op = None + return op + + def potentially_change_ovf_op_to_no_ovf(self, op): + # if last emitted operations was int_xxx_ovf and we are not emitting + # a guard_no_overflow change to int_add + if op.getopnum() != rop.GUARD_NO_OVERFLOW: + return + if not self._newoperations: + # got optimized otherwise + return + op = self._newoperations[-1] + if not op.is_ovf(): + return + newop = self.replace_op_with_no_ovf(op) + self._newoperations[-1] = newop + + def replace_op_with_no_ovf(self, op): + if op.getopnum() == rop.INT_MUL_OVF: + return self.replace_op_with(op, rop.INT_MUL) + elif op.getopnum() == rop.INT_ADD_OVF: + return self.replace_op_with(op, rop.INT_ADD) + elif op.getopnum() == rop.INT_SUB_OVF: + return self.replace_op_with(op, rop.INT_SUB) + else: + assert False + + + def _copy_resume_data_from(self, guard_op, last_guard_op): + if guard_op.getopnum() in (rop.GUARD_NO_EXCEPTION, rop.GUARD_EXCEPTION): + assert last_guard_op.getopnum() == rop.GUARD_NOT_FORCED + descr = compile.invent_fail_descr_for_op(guard_op.getopnum(), self) + descr.copy_all_attributes_from(last_guard_op.getdescr()) + guard_op.setdescr(descr) + descr.store_final_boxes(guard_op, last_guard_op.getfailargs(), + self.metainterp_sd) + assert isinstance(guard_op, GuardResOp) + if guard_op.getopnum() == rop.GUARD_VALUE: + guard_op = self._maybe_replace_guard_value(guard_op, descr) + return guard_op + def getlastop(self): return self._really_emitted_operation + def is_call_pure_pure_canraise(self, op): + if not op.is_call_pure(): + return False + effectinfo = op.getdescr().get_extra_info() + if effectinfo.check_can_raise(ignore_memoryerror=True): + return True + return False + def replace_guard_op(self, old_op_pos, new_op): old_op = self._newoperations[old_op_pos] assert old_op.is_guard() @@ -625,24 +694,26 @@ descr.store_final_boxes(op, newboxes, self.metainterp_sd) # if op.getopnum() == rop.GUARD_VALUE: - if op.getarg(0).type == 'i': - b = self.getintbound(op.getarg(0)) - if b.is_bool(): - # Hack: turn guard_value(bool) into guard_true/guard_false. - # This is done after the operation is emitted to let - # store_final_boxes_in_guard set the guard_opnum field of - # the descr to the original rop.GUARD_VALUE. - constvalue = op.getarg(1).getint() - if constvalue == 0: - opnum = rop.GUARD_FALSE - elif constvalue == 1: - opnum = rop.GUARD_TRUE - else: - raise AssertionError("uh?") - newop = self.replace_op_with(op, opnum, [op.getarg(0)], descr) - return newop - # a real GUARD_VALUE. Make it use one counter per value. - descr.make_a_counter_per_value(op) + op = self._maybe_replace_guard_value(op, descr) + return op + + def _maybe_replace_guard_value(self, op, descr): + if op.getarg(0).type == 'i': + b = self.getintbound(op.getarg(0)) + if b.is_bool(): + # Hack: turn guard_value(bool) into guard_true/guard_false. + # This is done after the operation is emitted to let + # store_final_boxes_in_guard set the guard_opnum field of + # the descr to the original rop.GUARD_VALUE. + constvalue = op.getarg(1).getint() + if constvalue == 0: + opnum = rop.GUARD_FALSE + elif constvalue == 1: + opnum = rop.GUARD_TRUE + else: + raise AssertionError("uh?") + newop = self.replace_op_with(op, opnum, [op.getarg(0)], descr) + return newop return op def optimize_default(self, op): diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -401,7 +401,7 @@ r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) raise InvalidLoop('A GUARD_VALUE (%s) was proven to ' 'always fail' % r) - descr = compile.ResumeGuardValueDescr() + descr = compile.ResumeGuardDescr() op = old_guard_op.copy_and_change(rop.GUARD_VALUE, args = [old_guard_op.getarg(0), op.getarg(1)], descr = descr) @@ -411,7 +411,6 @@ # not put in short preambles guard_xxx and guard_value # on the same box. self.optimizer.replace_guard(op, info) - descr.make_a_counter_per_value(op) # to be safe info.reset_last_guard_pos() return op @@ -456,7 +455,7 @@ if old_guard_op.getopnum() == rop.GUARD_NONNULL: # it was a guard_nonnull, which we replace with a # guard_nonnull_class. - descr = compile.ResumeGuardNonnullClassDescr() + descr = compile.ResumeGuardDescr() op = old_guard_op.copy_and_change (rop.GUARD_NONNULL_CLASS, args = [old_guard_op.getarg(0), op.getarg(1)], descr=descr) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -2022,6 +2022,7 @@ None) def test_merge_guard_class_guard_value(self): + py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_class(p1, ConstClass(node_vtable)) [i0] @@ -2055,6 +2056,7 @@ self.check_expanded_fail_descr("i0", rop.GUARD_NONNULL_CLASS) def test_merge_guard_nonnull_guard_value(self): + py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -2072,6 +2074,7 @@ self.check_expanded_fail_descr("i0", rop.GUARD_VALUE) def test_merge_guard_nonnull_guard_class_guard_value(self): + py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -2502,7 +2505,6 @@ if values is not None: fail_args = values fdescr = guard_op.getdescr() - assert fdescr.guard_opnum == guard_opnum reader = ResumeDataFakeReader(fdescr, fail_args, MyMetaInterp(self.cpu)) boxes = reader.consume_boxes() diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -2967,6 +2967,7 @@ assert "promote of a virtual" in exc.msg def test_merge_guard_class_guard_value(self): + py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_class(p1, ConstClass(node_vtable)) [i0] @@ -3012,6 +3013,7 @@ #self.check_expanded_fail_descr("i0", rop.GUARD_NONNULL_CLASS) def test_merge_guard_nonnull_guard_value(self): + py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] @@ -3035,6 +3037,7 @@ #self.check_expanded_fail_descr("i0", rop.GUARD_VALUE) def test_merge_guard_nonnull_guard_class_guard_value(self): + py.test.skip("disabled") ops = """ [p1, i0, i1, i2, p2] guard_nonnull(p1) [i0] diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -21,6 +21,7 @@ from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper.lltypesystem import lltype, rffi, llmemory from rpython.rtyper import rclass +from rpython.rlib.objectmodel import compute_unique_id @@ -228,17 +229,23 @@ ''' % (_opimpl, FASTPATHS_SAME_BOXES[_opimpl.split("_")[-1]], _opimpl.upper()) ).compile() - for _opimpl in ['int_add_ovf', 'int_sub_ovf', 'int_mul_ovf']: + for (_opimpl, resop) in [ + ('int_add_jump_if_ovf', 'INT_ADD_OVF'), + ('int_sub_jump_if_ovf', 'INT_SUB_OVF'), + ('int_mul_jump_if_ovf', 'INT_MUL_OVF')]: exec py.code.Source(''' - @arguments("box", "box") - def opimpl_%s(self, b1, b2): - self.metainterp.clear_exception() + @arguments("label", "box", "box", "orgpc") + def opimpl_%s(self, lbl, b1, b2, orgpc): + self.metainterp.ovf_flag = False resbox = self.execute(rop.%s, b1, b2) - self.make_result_of_lastop(resbox) # same as execute_varargs() if not isinstance(resbox, Const): - self.metainterp.handle_possible_overflow_error() + return self.handle_possible_overflow_error(lbl, orgpc, + resbox) + elif self.metainterp.ovf_flag: + self.pc = lbl + return None # but don't emit GUARD_OVERFLOW return resbox - ''' % (_opimpl, _opimpl.upper())).compile() + ''' % (_opimpl, resop)).compile() for _opimpl in ['int_is_true', 'int_is_zero', 'int_neg', 'int_invert', 'cast_float_to_int', 'cast_int_to_float', @@ -330,37 +337,37 @@ def opimpl_goto(self, target): self.pc = target - @arguments("box", "label") - def opimpl_goto_if_not(self, box, target): + @arguments("box", "label", "orgpc") + def opimpl_goto_if_not(self, box, target, orgpc): switchcase = box.getint() if switchcase: opnum = rop.GUARD_TRUE else: opnum = rop.GUARD_FALSE - self.metainterp.generate_guard(opnum, box) + self.metainterp.generate_guard(opnum, box, resumepc=orgpc) if not switchcase: self.pc = target - @arguments("box", "label") - def opimpl_goto_if_not_int_is_true(self, box, target): + @arguments("box", "label", "orgpc") + def opimpl_goto_if_not_int_is_true(self, box, target, orgpc): condbox = self.execute(rop.INT_IS_TRUE, box) - self.opimpl_goto_if_not(condbox, target) + self.opimpl_goto_if_not(condbox, target, orgpc) - @arguments("box", "label") - def opimpl_goto_if_not_int_is_zero(self, box, target): + @arguments("box", "label", "orgpc") + def opimpl_goto_if_not_int_is_zero(self, box, target, orgpc): condbox = self.execute(rop.INT_IS_ZERO, box) - self.opimpl_goto_if_not(condbox, target) + self.opimpl_goto_if_not(condbox, target, orgpc) for _opimpl in ['int_lt', 'int_le', 'int_eq', 'int_ne', 'int_gt', 'int_ge', 'ptr_eq', 'ptr_ne']: exec py.code.Source(''' - @arguments("box", "box", "label") - def opimpl_goto_if_not_%s(self, b1, b2, target): + @arguments("box", "box", "label", "orgpc") + def opimpl_goto_if_not_%s(self, b1, b2, target, orgpc): if b1 is b2: condbox = %s else: condbox = self.execute(rop.%s, b1, b2) - self.opimpl_goto_if_not(condbox, target) + self.opimpl_goto_if_not(condbox, target, orgpc) ''' % (_opimpl, FASTPATHS_SAME_BOXES[_opimpl.split("_")[-1]], _opimpl.upper()) ).compile() @@ -419,7 +426,7 @@ assert box.getint() == 0 target = switchdict.dict[const1.getint()] self.metainterp.generate_guard(rop.GUARD_FALSE, box, - resumepc=target) + resumepc=orgpc) else: # found one of the cases self.implement_guard_value(valuebox, orgpc) @@ -1458,6 +1465,17 @@ def setup_resume_at_op(self, pc): self.pc = pc + def handle_possible_overflow_error(self, label, orgpc, resbox): + if self.metainterp.ovf_flag: + self.metainterp.generate_guard(rop.GUARD_OVERFLOW, None, + resumepc=orgpc) + self.pc = label + return None + else: + self.metainterp.generate_guard(rop.GUARD_NO_OVERFLOW, None, + resumepc=orgpc) + return resbox + def run_one_step(self): # Execute the frame forward. This method contains a loop that leaves # whenever the 'opcode_implementations' (which is one of the 'opimpl_' @@ -2023,7 +2041,7 @@ moreargs = [box] + extraargs else: moreargs = list(extraargs) - if opnum == rop.GUARD_EXCEPTION or opnum == rop.GUARD_OVERFLOW: + if opnum == rop.GUARD_EXCEPTION: guard_op = self.history.record(opnum, moreargs, lltype.nullptr(llmemory.GCREF.TO)) else: @@ -2310,7 +2328,7 @@ if isinstance(key, compile.ResumeAtPositionDescr): self.seen_loop_header_for_jdindex = self.jitdriver_sd.index try: - self.prepare_resume_from_failure(key.guard_opnum, deadframe) + self.prepare_resume_from_failure(deadframe, key) if self.resumekey_original_loop_token is None: # very rare case raise SwitchToBlackhole(Counters.ABORT_BRIDGE) self.interpret() @@ -2453,22 +2471,9 @@ else: assert 0 self.jitdriver_sd.warmstate.execute_assembler(loop_token, *args) - def prepare_resume_from_failure(self, opnum, deadframe): - frame = self.framestack[-1] - if opnum == rop.GUARD_FUTURE_CONDITION: - pass - elif opnum == rop.GUARD_TRUE: # a goto_if_not that jumps only now - frame.pc = frame.jitcode.follow_jump(frame.pc) - elif opnum == rop.GUARD_FALSE: # a goto_if_not that stops jumping; - pass # or a switch that was in its "default" case - elif opnum == rop.GUARD_VALUE or opnum == rop.GUARD_CLASS: - pass # the pc is already set to the *start* of the opcode - elif (opnum == rop.GUARD_NONNULL or - opnum == rop.GUARD_ISNULL or - opnum == rop.GUARD_NONNULL_CLASS): - pass # the pc is already set to the *start* of the opcode - elif opnum == rop.GUARD_NO_EXCEPTION or opnum == rop.GUARD_EXCEPTION: - exception = self.cpu.grab_exc_value(deadframe) + def prepare_resume_from_failure(self, deadframe, resumedescr): + exception = self.cpu.grab_exc_value(deadframe) + if isinstance(resumedescr, compile.ResumeGuardExcDescr): if exception: self.execute_ll_raised(lltype.cast_opaque_ptr(rclass.OBJECTPTR, exception)) @@ -2478,20 +2483,8 @@ self.handle_possible_exception() except ChangeFrame: pass - elif opnum == rop.GUARD_NOT_INVALIDATED: - pass # XXX we want to do something special in resume descr, - # but not now - elif opnum == rop.GUARD_NO_OVERFLOW: # an overflow now detected - self.execute_raised(OverflowError(), constant=True) - try: - self.finishframe_exception() - except ChangeFrame: - pass - elif opnum == rop.GUARD_OVERFLOW: # no longer overflowing - self.clear_exception() else: - from rpython.jit.metainterp.resoperation import opname - raise NotImplementedError(opname[opnum]) + assert not exception def get_procedure_token(self, greenkey, with_compiled_targets=False): JitCell = self.jitdriver_sd.warmstate.JitCell @@ -2774,18 +2767,6 @@ else: self.generate_guard(rop.GUARD_NO_EXCEPTION, None, []) - def handle_possible_overflow_error(self): - if self.last_exc_value: - op = self.generate_guard(rop.GUARD_OVERFLOW, None) - op.setref_base(lltype.cast_opaque_ptr(llmemory.GCREF, - self.last_exc_value)) - assert self.class_of_last_exc_is_const - self.last_exc_box = ConstPtr( - lltype.cast_opaque_ptr(llmemory.GCREF, self.last_exc_value)) - self.finishframe_exception() - else: - self.generate_guard(rop.GUARD_NO_OVERFLOW, None) - def assert_no_exception(self): assert not self.last_exc_value @@ -3251,16 +3232,17 @@ print '-> %r' % (resultbox,) assert argcodes[next_argcode] == '>' result_argcode = argcodes[next_argcode + 1] - assert resultbox.type == {'i': history.INT, - 'r': history.REF, - 'f': history.FLOAT}[result_argcode] + if 'ovf' not in name: + assert resultbox.type == {'i': history.INT, + 'r': history.REF, + 'f': history.FLOAT}[result_argcode] else: resultbox = unboundmethod(self, *args) # if resultbox is not None: self.make_result_of_lastop(resultbox) elif not we_are_translated(): - assert self._result_argcode in 'v?' + assert self._result_argcode in 'v?' or 'ovf' in name # unboundmethod = getattr(MIFrame, 'opimpl_' + name).im_func argtypes = unrolling_iterable(unboundmethod.argtypes) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -236,6 +236,9 @@ return (self.getopnum() == rop.GUARD_OVERFLOW or self.getopnum() == rop.GUARD_NO_OVERFLOW) + def is_jit_debug(self): + return rop._JIT_DEBUG_FIRST <= self.getopnum() <= rop._JIT_DEBUG_LAST + def is_always_pure(self): return rop._ALWAYS_PURE_FIRST <= self.getopnum() <= rop._ALWAYS_PURE_LAST @@ -375,6 +378,7 @@ newop.rd_frame_info_list = self.rd_frame_info_list return newop + # =========== # type mixins # =========== @@ -689,7 +693,7 @@ 'GUARD_NO_EXCEPTION/0d/n', # may be called with an exception currently set 'GUARD_EXCEPTION/1d/r', # may be called with an exception currently set 'GUARD_NO_OVERFLOW/0d/n', - 'GUARD_OVERFLOW/0d/r', + 'GUARD_OVERFLOW/0d/n', 'GUARD_NOT_FORCED/0d/n', # may be called with an exception currently set 'GUARD_NOT_FORCED_2/0d/n', # same as GUARD_NOT_FORCED, but for finish() 'GUARD_NOT_INVALIDATED/0d/n', @@ -806,10 +810,12 @@ 'UNICODESETITEM/3/n', 'COND_CALL_GC_WB/1d/n', # [objptr] (for the write barrier) 'COND_CALL_GC_WB_ARRAY/2d/n', # [objptr, arrayindex] (write barr. for array) + '_JIT_DEBUG_FIRST', 'DEBUG_MERGE_POINT/*/n', # debugging only 'ENTER_PORTAL_FRAME/2/n', # debugging only 'LEAVE_PORTAL_FRAME/1/n', # debugging only 'JIT_DEBUG/*/n', # debugging only + '_JIT_DEBUG_LAST', 'VIRTUAL_REF_FINISH/2/n', # removed before it's passed to the backend 'COPYSTRCONTENT/5/n', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5/n', diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -115,10 +115,13 @@ while y > 0: myjitdriver.can_enter_jit(x=x, y=y, res=res) myjitdriver.jit_merge_point(x=x, y=y, res=res) - res += ovfcheck(x * x) - x += 1 - res += ovfcheck(x * x) - y -= 1 + try: + res += ovfcheck(x * x) + x += 1 + res += ovfcheck(x * x) + y -= 1 + except OverflowError: + assert 0 return res res = self.meta_interp(f, [6, 7]) assert res == 1323 @@ -151,7 +154,10 @@ myjitdriver.can_enter_jit(x=x, y=y, res=res) myjitdriver.jit_merge_point(x=x, y=y, res=res) b = y * 2 - res += ovfcheck(x * x) + b + try: + res += ovfcheck(x * x) + b + except OverflowError: + assert 0 y -= 1 return res res = self.meta_interp(f, [6, 7]) @@ -230,8 +236,8 @@ res = self.meta_interp(f, [6, 32, 16]) assert res == 1692 self.check_trace_count(3) - self.check_resops({'int_lt': 2, 'int_gt': 4, 'guard_false': 2, - 'guard_true': 4, 'int_sub': 4, 'jump': 3, + self.check_resops({'int_lt': 4, 'int_gt': 4, 'guard_false': 2, + 'guard_true': 6, 'int_sub': 4, 'jump': 3, 'int_mul': 3, 'int_add': 4}) def test_loop_invariant_mul_ovf2(self): @@ -400,7 +406,7 @@ return externfn(n, n+1) res = self.interp_operations(f, [6]) assert res == 42 - self.check_operations_history(int_add=1, int_mul=0, call=1, guard_no_exception=0) + self.check_operations_history(int_add=1, int_mul=0, call_i=1, guard_no_exception=0) def test_residual_call_elidable(self): def externfn(x, y): @@ -413,7 +419,7 @@ assert res == 42 # CALL_PURE is not recorded in the history if all-constant args self.check_operations_history(int_add=0, int_mul=0, - call=0, call_pure_i=0) + call_i=0, call_pure_i=0) def test_residual_call_elidable_1(self): @elidable @@ -425,7 +431,7 @@ assert res == 42 # CALL_PURE is recorded in the history if not-all-constant args self.check_operations_history(int_add=1, int_mul=0, - call=0, call_pure_i=1) + call_i=0, call_pure_i=1) def test_residual_call_elidable_2(self): myjitdriver = JitDriver(greens = [], reds = ['n']) @@ -653,11 +659,11 @@ # res = self.meta_interp(f, [3, 6], repeat=7, function_threshold=0) assert res == 6 - 4 - 5 - self.check_history(call=0) # because the trace starts in the middle + self.check_history(call_n=0) # because the trace starts in the middle # res = self.meta_interp(f, [60, 84], repeat=7) assert res == 84 - 61 - 62 - self.check_history(call=1) # because the trace starts immediately + self.check_history(call_n=1) # because the trace starts immediately def test_unroll_one_loop_iteration(self): def unroll(code): @@ -679,11 +685,11 @@ res = self.meta_interp(f, [1, 4, 1], enable_opts="", inline=True) assert res == f(1, 4, 1) - self.check_history(call_assembler=0) + self.check_history(call_assembler_i=0) res = self.meta_interp(f, [1, 4, 2], enable_opts="", inline=True) assert res == f(1, 4, 2) - self.check_history(call_assembler=1) + self.check_history(call_assembler_i=1) def test_format(self): def f(n): @@ -723,6 +729,7 @@ elif n == 7: a = 3 else: a = 2 x = intmask(x * 10 + a) + #print "XXXXXXXXXXXXXXXX", x i += 1 return x res = self.meta_interp(f, [0], backendopt=True) @@ -834,7 +841,7 @@ return a.foo * x res = self.interp_operations(f, [42]) assert res == 210 - self.check_operations_history(getfield_gc=1) + self.check_operations_history(getfield_gc_i=1) def test_getfield_immutable(self): class A: @@ -851,7 +858,7 @@ return a.foo * x res = self.interp_operations(f, [42]) assert res == 210 - self.check_operations_history(getfield_gc=0) + self.check_operations_history(getfield_gc_i=0) def test_setfield_bool(self): class A: @@ -882,6 +889,24 @@ res = self.interp_operations(f, [1, sys.maxint]) assert res == -42 + def test_ovf_raise(self): + def g(x, y): + try: + return ovfcheck(x * y) + except OverflowError: + raise + + def f(x, y): + try: + return g(x, y) + except OverflowError: + return 3 + + res = self.interp_operations(f, [sys.maxint, 2]) + assert res == 3 + res = self.interp_operations(f, [3, 2]) + assert res == 6 + def test_int_sub_ovf(self): def f(x, y): try: @@ -1356,7 +1381,7 @@ return g(a, b) res = self.interp_operations(f, [3, 5]) assert res == 8 - self.check_operations_history(int_add=0, call=1) + self.check_operations_history(int_add=0, call_i=1) def test_listcomp(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'lst']) @@ -1380,7 +1405,7 @@ return tup[1] res = self.interp_operations(f, [3, 5]) assert res == 5 - self.check_operations_history(setfield_gc=2, getfield_gc_pure=0) + self.check_operations_history(setfield_gc=2, getfield_gc_pure_i=0) def test_oosend_look_inside_only_one(self): class A: @@ -1455,16 +1480,6 @@ res = self.meta_interp(f, [299], listops=True) assert res == f(299) self.check_resops(guard_class=0, guard_value=6) - # - # The original 'guard_class' is rewritten to be directly 'guard_value'. - # Check that this rewrite does not interfere with the descr, which - # should be a full-fledged multivalued 'guard_value' descr. - if self.basic: - for loop in get_stats().get_all_loops(): - for op in loop.get_operations(): - if op.getopname() == "guard_value": - descr = op.getdescr() - assert descr.get_index_of_guard_value() >= 0 def test_merge_guardnonnull_guardclass(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'l']) @@ -1866,7 +1881,8 @@ res = self.meta_interp(g, [6, 20]) assert res == g(6, 20) self.check_trace_count(8) - self.check_resops(getarrayitem_gc_i=10) + # 6 extra from sharing guard data + self.check_resops(getarrayitem_gc_i=10 + 6) def test_multiple_specialied_versions_bridge(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'z', 'res']) @@ -2055,8 +2071,8 @@ res = self.meta_interp(g, [3, 23]) assert res == 7068153 self.check_trace_count(6) - self.check_resops(guard_true=6, guard_class=2, int_mul=3, - int_add=3, guard_false=3) + self.check_resops(guard_true=8, guard_class=2, int_mul=3, + int_add=3, guard_false=4) def test_dont_trace_every_iteration(self): myjitdriver = JitDriver(greens = [], reds = ['a', 'b', 'i', 'sa']) @@ -2079,7 +2095,7 @@ self.check_enter_count(2) def test_current_trace_length(self): - myjitdriver = JitDriver(greens = ['g'], reds = ['x']) + myjitdriver = JitDriver(greens = ['g'], reds = ['x', 'l']) @dont_look_inside def residual(): print "hi there" @@ -2090,14 +2106,15 @@ residual() y += 1 def f(x, g): + l = [] n = 0 while x > 0: - myjitdriver.can_enter_jit(x=x, g=g) - myjitdriver.jit_merge_point(x=x, g=g) + myjitdriver.can_enter_jit(x=x, g=g, l=l) + myjitdriver.jit_merge_point(x=x, g=g, l=l) loop(g) x -= 1 - n = current_trace_length() - return n + l.append(current_trace_length()) + return l[-2] # not the blackholed version res = self.meta_interp(f, [5, 8]) assert 14 < res < 42 res = self.meta_interp(f, [5, 2]) @@ -2619,7 +2636,10 @@ node2.val = 7 if a >= 100: sa += 1 - sa += ovfcheck(i + i) + try: + sa += ovfcheck(i + i) + except OverflowError: + assert 0 node1 = A(i) i += 1 assert self.meta_interp(f, [20, 7]) == f(20, 7) @@ -2638,7 +2658,7 @@ i += 1 return sa assert self.meta_interp(f, [20]) == f(20) From noreply at buildbot.pypy.org Sat Oct 3 12:40:45 2015 From: noreply at buildbot.pypy.org (jerith) Date: Sat, 3 Oct 2015 12:40:45 +0200 (CEST) Subject: [pypy-commit] pypy default: (fijal, jerith) Kill done TODOs. Message-ID: <20151003104045.2011E1C0369@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: Changeset: r79950:201580bd2a40 Date: 2015-10-03 12:41 +0200 http://bitbucket.org/pypy/pypy/changeset/201580bd2a40/ Log: (fijal, jerith) Kill done TODOs. diff --git a/rpython/jit/metainterp/optimizeopt/TODO b/rpython/jit/metainterp/optimizeopt/TODO --- a/rpython/jit/metainterp/optimizeopt/TODO +++ b/rpython/jit/metainterp/optimizeopt/TODO @@ -1,6 +1,3 @@ * implement more cases of copying _fields and _items between normal info and ConstPtrInfo when proven constant (look at it) -* reenable cpu check (breaks --fork-before) -* reenable jit iface -* fix OS X, win, arm, 32bit * reenable the int_add optimization From noreply at buildbot.pypy.org Sat Oct 3 14:55:31 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Oct 2015 14:55:31 +0200 (CEST) Subject: [pypy-commit] pypy share-resume-info-frontend: start fighting with sharing guards on the frontend, give up for now Message-ID: <20151003125531.6D1811C0FFE@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: share-resume-info-frontend Changeset: r79951:8f127bb6d398 Date: 2015-10-03 14:55 +0200 http://bitbucket.org/pypy/pypy/changeset/8f127bb6d398/ Log: start fighting with sharing guards on the frontend, give up for now diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -615,10 +615,13 @@ def __init__(self): self.inputargs = None self.operations = [] + self.last_guard_valid = False @specialize.argtype(3) def record(self, opnum, argboxes, value, descr=None): op = ResOperation(opnum, argboxes, descr) + if op.can_invalidate_guard_operation(): + self.last_guard_valid = False if value is None: assert op.type == 'v' elif isinstance(value, bool): diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -581,8 +581,7 @@ op = self.emit_guard_operation(op, pendingfields) elif op.can_raise(): self.exception_might_have_happened = True - if ((op.has_no_side_effect() or op.is_guard() or op.is_jit_debug() or - op.is_ovf()) and not self.is_call_pure_pure_canraise(op)): + if not op.can_invalidate_guard_operation(): pass else: self._last_guard_op = None @@ -649,14 +648,6 @@ def getlastop(self): return self._really_emitted_operation - def is_call_pure_pure_canraise(self, op): - if not op.is_call_pure(): - return False - effectinfo = op.getdescr().get_extra_info() - if effectinfo.check_can_raise(ignore_memoryerror=True): - return True - return False - def replace_guard_op(self, old_op_pos, new_op): old_op = self._newoperations[old_op_pos] assert old_op.is_guard() diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2046,7 +2046,9 @@ else: guard_op = self.history.record(opnum, moreargs, None) assert isinstance(guard_op, GuardResOp) - self.capture_resumedata(guard_op, resumepc) + if not self.history.last_guard_valid: + self.capture_resumedata(guard_op, resumepc) + self.history.last_guard_valid = True self.staticdata.profiler.count_ops(opnum, Counters.GUARDS) # count self.attach_debug_info(guard_op) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -322,6 +322,21 @@ def forget_value(self): pass + def can_invalidate_guard_operation(self): + if ((self.has_no_side_effect() or self.is_guard() or self.is_jit_debug() or + self.is_ovf()) and not self.is_call_pure_pure_canraise()): + return False + return True + + def is_call_pure_pure_canraise(self): + if not self.is_call_pure(): + return False + effectinfo = self.getdescr().get_extra_info() + if effectinfo.check_can_raise(ignore_memoryerror=True): + return True + return False + + # =================== # Top of the hierachy From noreply at buildbot.pypy.org Sat Oct 3 15:36:18 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Sat, 3 Oct 2015 15:36:18 +0200 (CEST) Subject: [pypy-commit] pypy value-profiling: not necessary to do a write when overwriting a known constant attribute Message-ID: <20151003133618.C12B41C146A@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r79952:70119492d559 Date: 2015-10-03 15:36 +0200 http://bitbucket.org/pypy/pypy/changeset/70119492d559/ Log: not necessary to do a write when overwriting a known constant attribute diff --git a/pypy/interpreter/test/test_valueprof.py b/pypy/interpreter/test/test_valueprof.py --- a/pypy/interpreter/test/test_valueprof.py +++ b/pypy/interpreter/test/test_valueprof.py @@ -124,3 +124,37 @@ v.see_write(OtherValue()) assert v._vprof_status == SEEN_TOO_MUCH +def test_write_not_necessary_int(): + v = ValueProf() + assert v._vprof_status == SEEN_NOTHING + res = v.see_write(ValueInt(1)) + assert not res + res = v.see_write(ValueInt(1)) + assert res + res = v.see_write(ValueInt(1)) + assert res + res = v.see_write(ValueInt(2)) + assert not res + res = v.see_write(ValueInt(2)) + assert not res + res = v.see_write(Value()) + assert not res + +def test_write_not_necessary_obj(): + v = ValueProf() + assert v._vprof_status == SEEN_NOTHING + val = Value() + res = v.see_write(val) + assert not res + res = v.see_write(val) + assert res + res = v.see_write(val) + assert res + res = v.see_write(ValueInt(1)) + assert not res + res = v.see_write(ValueInt(2)) + assert not res + res = v.see_write(ValueInt(2)) + assert not res + res = v.see_write(Value()) + assert not res diff --git a/pypy/interpreter/valueprof.py b/pypy/interpreter/valueprof.py --- a/pypy/interpreter/valueprof.py +++ b/pypy/interpreter/valueprof.py @@ -29,13 +29,17 @@ raise NotImplementedError("abstract base") def see_write(self, w_value): + """ inform the value profiler of a write. returns False, unless the + value is known to be a constant, and w_value that constant (in that + case the caller can elide the write to the actual object, if that + object already stores a value). """ status = self._vprof_status if status == SEEN_TOO_MUCH: - return + return False if w_value is None: self._vprof_status = SEEN_TOO_MUCH - return + return False if status == SEEN_NOTHING: if self.is_int(w_value): @@ -55,6 +59,8 @@ if self.read_constant_int() != self.get_int_val(w_value): self._vprof_status = SEEN_CONSTANT_CLASS self._vprof_const_cls = w_value.__class__ + else: + return True else: self._vprof_status = SEEN_TOO_MUCH elif status == SEEN_CONSTANT_OBJ: @@ -66,10 +72,13 @@ self._vprof_status = SEEN_CONSTANT_CLASS else: self._vprof_status = SEEN_TOO_MUCH + else: + return True elif status == SEEN_CONSTANT_CLASS: cls = self.read_constant_cls() if cls is not w_value.__class__: self._vprof_status = SEEN_TOO_MUCH + return False def can_fold_read_int(self): return self._vprof_status == SEEN_CONSTANT_INT diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -63,10 +63,14 @@ attr = self.find_map_attr(selector) if attr is None: return self.terminator._write_terminator(obj, selector, w_value) - attr.see_write(w_value) + write_unnecessary = attr.see_write(w_value) if not attr.ever_mutated: attr.ever_mutated = True - obj._mapdict_write_storage(attr.storageindex, w_value) + # if this path is taken, the storage is already filled from the time we + # did the map transition. Therefore, if the value profiler says so, we + # can not do the write + if not write_unnecessary: + obj._mapdict_write_storage(attr.storageindex, w_value) return True def delete(self, obj, selector): diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -397,6 +397,25 @@ assert obj.getdictvalue(space, "a") == a assert seen == [(a, Value), (a, Value)] + +def test_value_profiling_elide_write(monkeypatch): + monkeypatch.setattr(jit, "we_are_jitted", lambda : True) + class Value(object): + pass + a = Value() + cls = Class() + obj = cls.instantiate() + a1 = Value() + obj.setdictvalue(space, "a", a1) + obj = cls.instantiate() + obj.setdictvalue(space, "a", a1) + storage = obj.storage + # replace storage, both reads and writes of a1 should still work + obj.storage = None + assert obj.getdictvalue(space, "a") is a1 + obj.setdictvalue(space, "a", a1) + assert obj.getdictvalue(space, "a") is a1 + # ___________________________________________________________ # dict tests From noreply at buildbot.pypy.org Sat Oct 3 16:21:55 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Oct 2015 16:21:55 +0200 (CEST) Subject: [pypy-commit] pypy more-pending-setfields: try to do more pending setfields Message-ID: <20151003142155.9813D1C069F@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: more-pending-setfields Changeset: r79953:39d268efdfe1 Date: 2015-10-03 16:22 +0200 http://bitbucket.org/pypy/pypy/changeset/39d268efdfe1/ Log: try to do more pending setfields diff --git a/dotviewer/graphclient.py b/dotviewer/graphclient.py --- a/dotviewer/graphclient.py +++ b/dotviewer/graphclient.py @@ -127,17 +127,17 @@ return spawn_graphserver_handler((host, port)) def spawn_local_handler(): - if hasattr(sys, 'pypy_objspaceclass'): - # if 'python' is actually PyPy, e.g. in a virtualenv, then - # try hard to find a real CPython - try: - python = subprocess.check_output( - 'env -i $SHELL -l -c "which python"', shell=True).strip() - except subprocess.CalledProcessError: - # did not work, fall back to 'python' - python = 'python' - else: - python = sys.executable + #if hasattr(sys, 'pypy_objspaceclass'): + # # if 'python' is actually PyPy, e.g. in a virtualenv, then + # # try hard to find a real CPython + # try: + # python = subprocess.check_output( + # 'env -i $SHELL -l -c "which python"', shell=True).strip() + # except subprocess.CalledProcessError: + # # did not work, fall back to 'python' + # python = 'python' + #else: + python = sys.executable args = [python, '-u', GRAPHSERVER, '--stdio'] p = subprocess.Popen(args, stdout=subprocess.PIPE, stdin=subprocess.PIPE) diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -469,10 +469,10 @@ if op is None: continue val = op.getarg(1) - if self.optimizer.is_virtual(val): - pendingfields.append(op) - continue - cf.force_lazy_setfield(self, descr) + #if self.optimizer.is_virtual(val): + pendingfields.append(op) + # continue + #cf.force_lazy_setfield(self, descr) for descr, submap in self.cached_arrayitems.iteritems(): for index, cf in submap.iteritems(): op = cf._lazy_setfield diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -417,8 +417,9 @@ self.register_box(box) self.register_box(fieldbox) info = optimizer.getptrinfo(fieldbox) - assert info is not None and info.is_virtual() - info.visitor_walk_recursive(fieldbox, self, optimizer) + #assert info is not None and info.is_virtual() + if info and info.is_virtual(): + info.visitor_walk_recursive(fieldbox, self, optimizer) self._number_virtuals(liveboxes, optimizer, v) self._add_pending_fields(optimizer, pending_setfields) From noreply at buildbot.pypy.org Sat Oct 3 16:28:33 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Sat, 3 Oct 2015 16:28:33 +0200 (CEST) Subject: [pypy-commit] pypy value-profiling: more test_pypy_c fixes Message-ID: <20151003142833.32E021C069F@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r79954:a891bd9b2efa Date: 2015-10-03 15:38 +0100 http://bitbucket.org/pypy/pypy/changeset/a891bd9b2efa/ Log: more test_pypy_c fixes diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -111,20 +111,16 @@ return sa # log = self.run(main, [1000]) - assert log.result == 4000 + assert log.result == main(1000) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" guard_not_invalidated(descr=...) i12 = int_is_true(i4) guard_true(i12, descr=...) - i10p = getfield_gc_pure_i(p10, descr=...) - i10 = int_mul_ovf(2, i10p) + i14 = int_add_ovf(i13, 2) guard_no_overflow(descr=...) - i14 = int_add_ovf(i13, i10) + i13 = int_add_ovf(i14, 2) guard_no_overflow(descr=...) - i13 = int_add_ovf(i14, i9) - guard_no_overflow(descr=...) - setfield_gc(p17, p10, descr=...) i17 = int_sub_ovf(i4, 1) guard_no_overflow(descr=...) --TICK-- @@ -146,12 +142,12 @@ assert log.result == 1000 * 999 / 2 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" - guard_not_invalidated(descr=...) i15 = int_lt(i10, i11) guard_true(i15, descr=...) i17 = int_add(i10, 1) + setfield_gc(p9, i17, descr=<.* .*W_XRangeIterator.inst_current .*>) + guard_not_invalidated(descr=...) i18 = force_token() - setfield_gc(p9, i17, descr=<.* .*W_XRangeIterator.inst_current .*>) i21 = int_lt(i10, 0) guard_false(i21, descr=...) i22 = int_lt(i10, i14) @@ -181,9 +177,9 @@ i16 = int_ge(i11, i12) guard_false(i16, descr=...) i20 = int_add(i11, 1) - i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? + i21 = force_token() i88 = int_sub(i9, 1) i25 = int_ge(i11, i9) guard_false(i25, descr=...) @@ -214,9 +210,9 @@ i17 = int_mul(i11, i14) i18 = int_add(i15, i17) i20 = int_add(i11, 1) - i21 = force_token() setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>) guard_not_invalidated? + i19 = force_token() i95 = int_sub(i9, 1) i23 = int_lt(i18, 0) guard_false(i23, descr=...) @@ -270,16 +266,15 @@ guard_false(i16, descr=...) p17 = getarrayitem_gc_r(p16, i12, descr=) i19 = int_add(i12, 1) + i21 = getfield_gc_i(p17, descr=) setfield_gc(p9, i19, descr=) - guard_nonnull_class(p17, ..., descr=...) - guard_not_invalidated? - i21 = getfield_gc_i(p17, descr=) i23 = int_lt(0, i21) guard_true(i23, descr=...) i24 = getfield_gc_i(p17, descr=) i25 = getarrayitem_raw_i(i24, 0, descr=<.*>) i27 = int_lt(1, i21) guard_false(i27, descr=...) + guard_not_invalidated? i28 = int_add_ovf(i10, i25) guard_no_overflow(descr=...) --TICK-- @@ -304,6 +299,7 @@ assert log.jit_summary.tracing_no == 1 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated? i11 = int_lt(i7, 300) guard_true(i11, descr=...) i12 = int_add_ovf(i8, i9) diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -24,9 +24,9 @@ assert log.result == 300 loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i14 = int_lt(i6, i9) guard_true(i14, descr=...) - guard_not_invalidated(descr=...) i16 = int_eq(i6, %d) guard_false(i16, descr=...) i15 = int_mod(i6, i10) @@ -64,9 +64,9 @@ assert log.result == main(1100) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i11 = int_lt(i6, i7) guard_true(i11, descr=...) - guard_not_invalidated(descr=...) i13 = int_eq(i6, %d) # value provided below guard_false(i13, descr=...) i15 = int_mod(i6, 10) @@ -105,9 +105,9 @@ assert log.result == main(1000) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i79 = int_gt(i74, 0) guard_true(i79, descr=...) - guard_not_invalidated(descr=...) p80 = call_r(ConstClass(ll_int2dec__Signed), i74, descr=) guard_no_exception(descr=...) i85 = strlen(p80) diff --git a/pypy/module/pypyjit/test_pypy_c/test_struct.py b/pypy/module/pypyjit/test_pypy_c/test_struct.py --- a/pypy/module/pypyjit/test_pypy_c/test_struct.py +++ b/pypy/module/pypyjit/test_pypy_c/test_struct.py @@ -30,7 +30,7 @@ # This could, of course stand some improvement, to remove all these # arithmatic ops, but we've removed all the core overhead. assert loop.match_by_id("struct", """ - guard_not_invalidated(descr=...) + guard_not_invalidated? # struct.pack %s i11 = int_and(i4, 255) @@ -67,7 +67,7 @@ loop, = log.loops_by_filename(self.filepath) assert loop.match_by_id('struct', """ - guard_not_invalidated(descr=...) + guard_not_invalidated? # struct.pack %s i11 = int_and(i4, 255) diff --git a/pypy/module/pypyjit/test_pypy_c/test_thread.py b/pypy/module/pypyjit/test_pypy_c/test_thread.py --- a/pypy/module/pypyjit/test_pypy_c/test_thread.py +++ b/pypy/module/pypyjit/test_pypy_c/test_thread.py @@ -20,6 +20,7 @@ assert round(log.result, 6) == round(main(500), 6) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i2 = int_lt(i0, i1) guard_true(i2, descr=...) i3 = int_add(i0, 1) @@ -40,12 +41,11 @@ assert round(log.result, 6) == round(main(500), 6) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i53 = int_lt(i48, i27) guard_true(i53, descr=...) - i54 = int_add_ovf(i48, i47) - guard_no_overflow(descr=...) + i54 = int_add(i48, 1) --TICK-- - i58 = arraylen_gc(p43, descr=...) jump(..., descr=...) """) @@ -60,6 +60,7 @@ assert log.result == main(500) loop, = log.loops_by_filename(self.filepath) assert loop.match(""" + guard_not_invalidated(descr=...) i56 = int_gt(i44, 0) guard_true(i56, descr=...) p57 = force_token() @@ -67,11 +68,11 @@ i58 = call_release_gil_i(0, _, i37, 1, descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) + guard_not_invalidated(descr=...) i58 = int_sub(i44, 1) i59 = call_i(ConstClass(RPyThreadReleaseLock), i37, descr=) i60 = int_is_true(i59) guard_false(i60, descr=...) - guard_not_invalidated(descr=...) --TICK-- jump(..., descr=...) """) diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -23,19 +23,17 @@ i60 = int_lt(i58, i31) guard_true(i60, descr=...) i61 = int_add(i58, 1) + setfield_gc(p18, i61, descr=) + guard_not_invalidated(descr=...) p62 = getfield_gc_r(ConstPtr(ptr37), descr=) - setfield_gc(p18, i61, descr=) guard_value(p62, ConstPtr(ptr39), descr=...) - guard_not_invalidated(descr=...) p64 = getfield_gc_r(ConstPtr(ptr40), descr=) guard_value(p64, ConstPtr(ptr42), descr=...) p65 = getfield_gc_r(p14, descr=) guard_value(p65, ConstPtr(ptr45), descr=...) - p66 = getfield_gc_r(p14, descr=) - guard_nonnull_class(p66, ..., descr=...) p67 = force_token() setfield_gc(p0, p67, descr=) - p68 = call_may_force_r(ConstClass(WeakrefLifelineWithCallbacks.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=) + p68 = call_may_force_r(ConstClass(WeakrefLifelineWithCallbacks.make_weakref_with_callback), ConstPtr(ptr49), ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) guard_nonnull_class(p68, ..., descr=...) From noreply at buildbot.pypy.org Sat Oct 3 16:28:35 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Sat, 3 Oct 2015 16:28:35 +0200 (CEST) Subject: [pypy-commit] pypy value-profiling: merge Message-ID: <20151003142835.6B0E01C069F@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: value-profiling Changeset: r79955:4a9ec1573aca Date: 2015-10-03 15:38 +0100 http://bitbucket.org/pypy/pypy/changeset/4a9ec1573aca/ Log: merge diff --git a/pypy/interpreter/test/test_valueprof.py b/pypy/interpreter/test/test_valueprof.py --- a/pypy/interpreter/test/test_valueprof.py +++ b/pypy/interpreter/test/test_valueprof.py @@ -124,3 +124,37 @@ v.see_write(OtherValue()) assert v._vprof_status == SEEN_TOO_MUCH +def test_write_not_necessary_int(): + v = ValueProf() + assert v._vprof_status == SEEN_NOTHING + res = v.see_write(ValueInt(1)) + assert not res + res = v.see_write(ValueInt(1)) + assert res + res = v.see_write(ValueInt(1)) + assert res + res = v.see_write(ValueInt(2)) + assert not res + res = v.see_write(ValueInt(2)) + assert not res + res = v.see_write(Value()) + assert not res + +def test_write_not_necessary_obj(): + v = ValueProf() + assert v._vprof_status == SEEN_NOTHING + val = Value() + res = v.see_write(val) + assert not res + res = v.see_write(val) + assert res + res = v.see_write(val) + assert res + res = v.see_write(ValueInt(1)) + assert not res + res = v.see_write(ValueInt(2)) + assert not res + res = v.see_write(ValueInt(2)) + assert not res + res = v.see_write(Value()) + assert not res diff --git a/pypy/interpreter/valueprof.py b/pypy/interpreter/valueprof.py --- a/pypy/interpreter/valueprof.py +++ b/pypy/interpreter/valueprof.py @@ -29,13 +29,17 @@ raise NotImplementedError("abstract base") def see_write(self, w_value): + """ inform the value profiler of a write. returns False, unless the + value is known to be a constant, and w_value that constant (in that + case the caller can elide the write to the actual object, if that + object already stores a value). """ status = self._vprof_status if status == SEEN_TOO_MUCH: - return + return False if w_value is None: self._vprof_status = SEEN_TOO_MUCH - return + return False if status == SEEN_NOTHING: if self.is_int(w_value): @@ -55,6 +59,8 @@ if self.read_constant_int() != self.get_int_val(w_value): self._vprof_status = SEEN_CONSTANT_CLASS self._vprof_const_cls = w_value.__class__ + else: + return True else: self._vprof_status = SEEN_TOO_MUCH elif status == SEEN_CONSTANT_OBJ: @@ -66,10 +72,13 @@ self._vprof_status = SEEN_CONSTANT_CLASS else: self._vprof_status = SEEN_TOO_MUCH + else: + return True elif status == SEEN_CONSTANT_CLASS: cls = self.read_constant_cls() if cls is not w_value.__class__: self._vprof_status = SEEN_TOO_MUCH + return False def can_fold_read_int(self): return self._vprof_status == SEEN_CONSTANT_INT diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -63,10 +63,14 @@ attr = self.find_map_attr(selector) if attr is None: return self.terminator._write_terminator(obj, selector, w_value) - attr.see_write(w_value) + write_unnecessary = attr.see_write(w_value) if not attr.ever_mutated: attr.ever_mutated = True - obj._mapdict_write_storage(attr.storageindex, w_value) + # if this path is taken, the storage is already filled from the time we + # did the map transition. Therefore, if the value profiler says so, we + # can not do the write + if not write_unnecessary: + obj._mapdict_write_storage(attr.storageindex, w_value) return True def delete(self, obj, selector): diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -397,6 +397,25 @@ assert obj.getdictvalue(space, "a") == a assert seen == [(a, Value), (a, Value)] + +def test_value_profiling_elide_write(monkeypatch): + monkeypatch.setattr(jit, "we_are_jitted", lambda : True) + class Value(object): + pass + a = Value() + cls = Class() + obj = cls.instantiate() + a1 = Value() + obj.setdictvalue(space, "a", a1) + obj = cls.instantiate() + obj.setdictvalue(space, "a", a1) + storage = obj.storage + # replace storage, both reads and writes of a1 should still work + obj.storage = None + assert obj.getdictvalue(space, "a") is a1 + obj.setdictvalue(space, "a", a1) + assert obj.getdictvalue(space, "a") is a1 + # ___________________________________________________________ # dict tests From noreply at buildbot.pypy.org Sat Oct 3 17:39:03 2015 From: noreply at buildbot.pypy.org (jerith) Date: Sat, 3 Oct 2015 17:39:03 +0200 (CEST) Subject: [pypy-commit] pypy unrecursive-opt: Move intbounds into non-recursive model. Message-ID: <20151003153903.F06791C0369@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: unrecursive-opt Changeset: r79956:9c9cd68a9013 Date: 2015-10-03 17:38 +0200 http://bitbucket.org/pypy/pypy/changeset/9c9cd68a9013/ Log: Move intbounds into non-recursive model. diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -500,6 +500,9 @@ # default case: produce the operation self.make_nonnull(op.getarg(0)) self.emit_operation(op) + self.optimize_GETFIELD_GC_I_callback(op, structinfo, cf) + + def optimize_GETFIELD_GC_I_callback(self, op, structinfo, cf): # then remember the result of reading the field structinfo.setfield(op.getdescr(), op.getarg(0), op, self, cf) optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I @@ -552,6 +555,9 @@ # default case: produce the operation self.make_nonnull(op.getarg(0)) self.emit_operation(op) + self.optimize_GETARRAYITEM_GC_I_callback(op, cf, arrayinfo, indexb) + + def optimize_GETARRAYITEM_GC_I_callback(self, op, cf, arrayinfo, indexb): # the remember the result of reading the array item if cf is not None: arrayinfo.setitem(op.getdescr(), indexb.getint(), diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -38,16 +38,27 @@ return n +class IntBoundsCallbackArgs(object): + def __init__(self, op, callback_func=None, b1=None, b2=None): + self.op = op + self.callback_func = callback_func + self.b1 = b1 + self.b2 = b2 + + def callback(self): + if self.callback_func is not None: + self.callback_func(self.op, self.b1, self.b2) + + class OptIntBounds(Optimization): """Keeps track of the bounds placed on integers by guards and remove redundant guards""" + def opt_default(self, op): + return IntBoundsCallbackArgs(op) + def propagate_forward(self, op): - dispatch_opt(self, op) - - def opt_default(self, op): - assert not op.is_ovf() - self.emit_operation(op) + return dispatch_opt(self, op) def propagate_bounds_backward(self, box): # FIXME: This takes care of the instruction where box is the reuslt @@ -61,7 +72,9 @@ dispatch_bounds_ops(self, box) def _optimize_guard_true_false_value(self, op): - self.emit_operation(op) + return IntBoundsCallbackArgs(op, self._optimize_guard_true_false_value_callback) + + def _optimize_guard_true_false_value_callback(self, op, bn1, bn2): if op.getarg(0).type == 'i': self.propagate_bounds_backward(op.getarg(0)) @@ -79,8 +92,10 @@ self.make_equal_to(op, v1) else: self.make_constant_int(op, 0) - return - self.emit_operation(op) + return None + return IntBoundsCallbackArgs(op, self.optimize_INT_OR_or_XOR_callback, b1, b2) + + def optimize_INT_OR_or_XOR_callback(self, op, b1, b2): if b1.known_ge(IntBound(0, 0)) and \ b2.known_ge(IntBound(0, 0)): r = self.getintbound(op) @@ -93,8 +108,9 @@ def optimize_INT_AND(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) - self.emit_operation(op) + return IntBoundsCallbackArgs(op, self.optimize_INT_AND_callback, b1, b2) + def optimize_INT_AND_callback(self, op, b1, b2): r = self.getintbound(op) if b2.is_constant(): val = b2.lower @@ -109,7 +125,9 @@ r.intersect(IntBound(0, next_pow2_m1(lesser))) def optimize_INT_SUB(self, op): - self.emit_operation(op) + return IntBoundsCallbackArgs(op, self.optimize_INT_SUB_callback) + + def optimize_INT_SUB_callback(self, op, bn1, bn2): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) b = b1.sub_bound(b2) @@ -120,8 +138,7 @@ arg1 = self.get_box_replacement(op.getarg(0)) arg2 = self.get_box_replacement(op.getarg(1)) if self.is_raw_ptr(arg1) or self.is_raw_ptr(arg2): - self.emit_operation(op) - return + return IntBoundsCallbackArgs(op) v1 = self.getintbound(arg1) v2 = self.getintbound(arg2) @@ -155,7 +172,9 @@ arg2 = ConstInt(sum) op = self.replace_op_with(op, rop.INT_ADD, args=[arg1, arg2]) - self.emit_operation(op) + return IntBoundsCallbackArgs(op, self.optimize_INT_ADD_callback) + + def optimize_INT_ADD_callback(self, op, bn1, bn2): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) @@ -166,7 +185,9 @@ def optimize_INT_MUL(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) - self.emit_operation(op) + return IntBoundsCallbackArgs(op, self.optimize_INT_MUL_callback, b1, b2) + + def optimize_INT_MUL_callback(self, op, b1, b2): r = self.getintbound(op) b = b1.mul_bound(b2) if b.bounded(): @@ -175,7 +196,9 @@ def optimize_INT_FLOORDIV(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) - self.emit_operation(op) + return IntBoundsCallbackArgs(op, self.optimize_INT_FLOORDIV_callback, b1, b2) + + def optimize_INT_FLOORDIV_callback(self, op, b1, b2): r = self.getintbound(op) r.intersect(b1.div_bound(b2)) @@ -192,13 +215,15 @@ arg2 = ConstInt(val-1) op = self.replace_op_with(op, rop.INT_AND, args=[arg1, arg2]) - self.emit_operation(op) + return IntBoundsCallbackArgs(op, self.optimize_INT_MOD_callback, b2, known_nonneg) + + def optimize_INT_MOD_callback(self, op, b2, known_nonneg): if b2.is_constant(): val = b2.getint() r = self.getintbound(op) if val < 0: if val == -sys.maxint-1: - return # give up + return None # give up val = -val if known_nonneg: r.make_ge(IntBound(0, 0)) @@ -211,7 +236,11 @@ b1 = self.getintbound(arg0) arg1 = self.get_box_replacement(op.getarg(1)) b2 = self.getintbound(arg1) - self.emit_operation(op) + return IntBoundsCallbackArgs(op, self.optimize_INT_LSHIFT_callback, b1, b2) + + def optimize_INT_LSHIFT_callback(self, op, b1, b2): + arg0 = self.get_box_replacement(op.getarg(0)) + arg1 = self.get_box_replacement(op.getarg(1)) r = self.getintbound(op) b = b1.lshift_bound(b2) r.intersect(b) @@ -230,10 +259,12 @@ if b.has_lower and b.has_upper and b.lower == b.upper: # constant result (likely 0, for rshifts that kill all bits) self.make_constant_int(op, b.lower) - else: - self.emit_operation(op) - r = self.getintbound(op) - r.intersect(b) + return None + return IntBoundsCallbackArgs(op, self.optimize_INT_RSHIFT_callback, b) + + def optimize_INT_RSHIFT_callback(self, op, b, bn): + r = self.getintbound(op) + r.intersect(b) def optimize_GUARD_NO_OVERFLOW(self, op): lastop = self.last_emitted_operation @@ -259,7 +290,7 @@ self.pure_from_args(rop.INT_SUB, [args[0], result], args[1]) #elif opnum == rop.INT_MUL_OVF: # self.pure(rop.INT_MUL, args[:], result) - self.emit_operation(op) + return IntBoundsCallbackArgs(op) def optimize_GUARD_OVERFLOW(self, op): # If INT_xxx_OVF was replaced by INT_xxx, *but* we still see @@ -272,7 +303,7 @@ raise InvalidLoop('An INT_xxx_OVF was proven not to overflow but' + 'guarded with GUARD_OVERFLOW') - self.emit_operation(op) + return IntBoundsCallbackArgs(op) def optimize_INT_ADD_OVF(self, op): b1 = self.getintbound(op.getarg(0)) @@ -283,7 +314,9 @@ # by optimize_GUARD_NO_OVERFLOW; if we see instead an # optimize_GUARD_OVERFLOW, then InvalidLoop. op = self.replace_op_with(op, rop.INT_ADD) - self.emit_operation(op) # emit the op + return IntBoundsCallbackArgs(op, self.optimize_INT_ADD_OVF_callback, resbound) + + def optimize_INT_ADD_OVF_callback(self, op, resbound, bn): r = self.getintbound(op) r.intersect(resbound) @@ -294,11 +327,13 @@ b1 = self.getintbound(arg1) if arg0.same_box(arg1): self.make_constant_int(op, 0) - return + return None resbound = b0.sub_bound(b1) if resbound.bounded(): op = self.replace_op_with(op, rop.INT_SUB) - self.emit_operation(op) # emit the op + return IntBoundsCallbackArgs(op, self.optimize_INT_SUB_OVF_callback, resbound) + + def optimize_INT_SUB_OVF_callback(self, op, resbound, bn): r = self.getintbound(op) r.intersect(resbound) @@ -308,7 +343,9 @@ resbound = b1.mul_bound(b2) if resbound.bounded(): op = self.replace_op_with(op, rop.INT_MUL) - self.emit_operation(op) + return IntBoundsCallbackArgs(op, self.optimize_INT_MUL_OVF_callback, resbound) + + def optimize_INT_MUL_OVF_callback(self, op, resbound, bn): r = self.getintbound(op) r.intersect(resbound) @@ -322,7 +359,7 @@ elif b1.known_ge(b2) or arg1 is arg2: self.make_constant_int(op, 0) else: - self.emit_operation(op) + return IntBoundsCallbackArgs(op) def optimize_INT_GT(self, op): arg1 = self.get_box_replacement(op.getarg(0)) @@ -334,7 +371,7 @@ elif b1.known_le(b2) or arg1 is arg2: self.make_constant_int(op, 0) else: - self.emit_operation(op) + return IntBoundsCallbackArgs(op) def optimize_INT_LE(self, op): arg1 = self.get_box_replacement(op.getarg(0)) @@ -346,7 +383,7 @@ elif b1.known_gt(b2): self.make_constant_int(op, 0) else: - self.emit_operation(op) + return IntBoundsCallbackArgs(op) def optimize_INT_GE(self, op): arg1 = self.get_box_replacement(op.getarg(0)) @@ -358,7 +395,7 @@ elif b1.known_lt(b2): self.make_constant_int(op, 0) else: - self.emit_operation(op) + return IntBoundsCallbackArgs(op) def optimize_INT_EQ(self, op): arg0 = self.get_box_replacement(op.getarg(0)) @@ -372,7 +409,7 @@ elif arg0.same_box(arg1): self.make_constant_int(op, 1) else: - self.emit_operation(op) + return IntBoundsCallbackArgs(op) def optimize_INT_NE(self, op): arg0 = self.get_box_replacement(op.getarg(0)) @@ -386,14 +423,14 @@ elif arg0 is arg1: self.make_constant_int(op, 0) else: - self.emit_operation(op) + return IntBoundsCallbackArgs(op) def optimize_INT_FORCE_GE_ZERO(self, op): b = self.getintbound(op.getarg(0)) if b.known_ge(IntBound(0, 0)): self.make_equal_to(op, op.getarg(0)) else: - self.emit_operation(op) + return IntBoundsCallbackArgs(op) def optimize_INT_SIGNEXT(self, op): b = self.getintbound(op.getarg(0)) @@ -404,29 +441,39 @@ if bounds.contains_bound(b): self.make_equal_to(op, op.getarg(0)) else: - self.emit_operation(op) - bres = self.getintbound(op) - bres.intersect(bounds) + return IntBoundsCallbackArgs(op, self.optimize_INT_SIGNEXT_callback, bounds) + + def optimize_INT_SIGNEXT_callback(self, op, bounds, bn): + bres = self.getintbound(op) + bres.intersect(bounds) def optimize_ARRAYLEN_GC(self, op): array = self.ensure_ptr_info_arg0(op) - self.emit_operation(op) + return IntBoundsCallbackArgs(op, self.optimize_ARRAYLEN_GC_callback, array) + + def optimize_ARRAYLEN_GC_callback(self, op, array, bn): self.optimizer.setintbound(op, array.getlenbound(None)) def optimize_STRLEN(self, op): - self.emit_operation(op) + return IntBoundsCallbackArgs(op, self.optimize_STRLEN_callback) + + def optimize_STRLEN_callback(self, op, bn1, bn2): self.make_nonnull_str(op.getarg(0), vstring.mode_string) array = self.getptrinfo(op.getarg(0)) self.optimizer.setintbound(op, array.getlenbound(vstring.mode_string)) def optimize_UNICODELEN(self, op): - self.emit_operation(op) + return IntBoundsCallbackArgs(op, self.optimize_UNICODELEN_callback) + + def optimize_UNICODELEN_callback(self, op, bn1, bn2): self.make_nonnull_str(op.getarg(0), vstring.mode_unicode) array = self.getptrinfo(op.getarg(0)) self.optimizer.setintbound(op, array.getlenbound(vstring.mode_unicode)) def optimize_STRGETITEM(self, op): - self.emit_operation(op) + return IntBoundsCallbackArgs(op, self.optimize_STRGETITEM_callback) + + def optimize_STRGETITEM_callback(self, op, bn1, bn2): v1 = self.getintbound(op) v2 = self.getptrinfo(op.getarg(0)) intbound = self.getintbound(op.getarg(1)) @@ -438,7 +485,9 @@ v1.make_lt(IntUpperBound(256)) def optimize_GETFIELD_RAW_I(self, op): - self.emit_operation(op) + return IntBoundsCallbackArgs(op, self.optimize_GETFIELD_RAW_I_callback) + + def optimize_GETFIELD_RAW_I_callback(self, op, bn1, bn2): descr = op.getdescr() if descr.is_integer_bounded(): b1 = self.getintbound(op) @@ -456,7 +505,9 @@ optimize_GETINTERIORFIELD_GC_F = optimize_GETFIELD_RAW_I def optimize_GETARRAYITEM_RAW_I(self, op): - self.emit_operation(op) + return IntBoundsCallbackArgs(op, self.optimize_GETARRAYITEM_RAW_I_callback) + + def optimize_GETARRAYITEM_RAW_I_callback(self, op, bn1, bn2): descr = op.getdescr() if descr and descr.is_item_integer_bounded(): intbound = self.getintbound(op) @@ -469,7 +520,9 @@ optimize_GETARRAYITEM_GC_R = optimize_GETARRAYITEM_RAW_I def optimize_UNICODEGETITEM(self, op): - self.emit_operation(op) + return IntBoundsCallbackArgs(op, self.optimize_UNICODEGETITEM_callback) + + def optimize_UNICODEGETITEM_callback(self, op, bn1, bn2): b1 = self.getintbound(op) b1.make_ge(IntLowerBound(0)) v2 = self.getptrinfo(op.getarg(0)) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -272,7 +272,7 @@ def set_optimizations(self, optimizations): if optimizations: - self.first_optimization = optimizations[0] + self.first_optimization = optimizations[1] for i in range(1, len(optimizations)): optimizations[i - 1].next_optimization = optimizations[i] optimizations[-1].next_optimization = self @@ -521,14 +521,14 @@ last = len(ops) for i in range(last): self._really_emitted_operation = None - self.first_optimization.propagate_forward(ops[i]) + self.send_extra_operation(ops[i]) # accumulate counters if flush: self.flush() if extra_jump: - self.first_optimization.propagate_forward(ops[-1]) + self.send_extra_operation(ops[-1]) self.resumedata_memo.update_counters(self.metainterp_sd.profiler) - + return (BasicLoopInfo(newargs, self.quasi_immutable_deps), self._newoperations) @@ -538,7 +538,12 @@ op.set_forwarded(None) def send_extra_operation(self, op): - self.first_optimization.propagate_forward(op) + callback_args = self.optimizations[0].propagate_forward(op) + if callback_args is None: + return + self.optimizations[0].last_emitted_operation = callback_args.op + self.first_optimization.propagate_forward(callback_args.op) + callback_args.callback() def propagate_forward(self, op): dispatch_opt(self, op) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -109,6 +109,9 @@ # otherwise, the operation remains self.emit_operation(op) + self.optimize_default_callback(op, save, nextop) + + def optimize_default_callback(self, op, save, nextop): if op.returns_bool_result(): self.getintbound(op).make_bool() if save: @@ -159,6 +162,9 @@ opnum = OpHelpers.call_for_descr(op.getdescr()) newop = self.optimizer.replace_op_with(op, opnum) self.emit_operation(newop) + self.optimize_CALL_PURE_I_callback(op) + + def optimize_CALL_PURE_I_callback(self, op): self.call_pure_positions.append( len(self.optimizer._newoperations) - 1) diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -123,7 +123,10 @@ self.make_constant_int(op, 0) else: self.emit_operation(op) - self.optimizer.pure_reverse(op) + self.optimize_INT_SUB_callback(op) + + def optimize_INT_SUB_callback(self, op): + self.optimizer.pure_reverse(op) def optimize_INT_ADD(self, op): if self.is_raw_ptr(op.getarg(0)) or self.is_raw_ptr(op.getarg(1)): @@ -141,7 +144,10 @@ self.make_equal_to(op, arg1) else: self.emit_operation(op) - self.optimizer.pure_reverse(op) + self.optimize_INT_ADD_callback(op) + + def optimize_INT_ADD_callback(self, op): + self.optimizer.pure_reverse(op) def optimize_INT_MUL(self, op): arg1 = self.get_box_replacement(op.getarg(0)) @@ -228,6 +234,9 @@ self.emit_operation(newop) return self.emit_operation(op) + self.optimize_FLOAT_MUL_callback(op) + + def optimize_FLOAT_MUL_callback(self, op): self.optimizer.pure_reverse(op) def optimize_FLOAT_TRUEDIV(self, op): @@ -253,6 +262,9 @@ def optimize_FLOAT_NEG(self, op): self.emit_operation(op) + self.optimize_FLOAT_NEG_callback(op) + + def optimize_FLOAT_NEG_callback(self, op): self.optimizer.pure_reverse(op) def optimize_guard(self, op, constbox, emit_operation=True): @@ -275,12 +287,15 @@ raise InvalidLoop('A GUARD_VALUE (%s) ' 'was proven to always fail' % r) return - + if emit_operation: self.emit_operation(op) + self.optimize_guard_callback(op, box, constbox) + else: + self.optimize_guard_callback(op, box, constbox) + + def optimize_guard_callback(self, op, box, constbox): self.make_constant(box, constbox) - #if self.optimizer.optheap: XXX - # self.optimizer.optheap.value_updated(value, self.getvalue(constbox)) def optimize_GUARD_ISNULL(self, op): info = self.getptrinfo(op.getarg(0)) @@ -292,6 +307,9 @@ raise InvalidLoop('A GUARD_ISNULL (%s) was proven to always ' 'fail' % r) self.emit_operation(op) + self.optimize_GUARD_ISNULL_callback(op) + + def optimize_GUARD_ISNULL_callback(self, op): self.make_constant(op.getarg(0), self.optimizer.cpu.ts.CONST_NULL) def optimize_GUARD_IS_OBJECT(self, op): @@ -368,6 +386,9 @@ raise InvalidLoop('A GUARD_NONNULL (%s) was proven to always ' 'fail' % r) self.emit_operation(op) + self.optimize_GUARD_NONNULL_callback(op) + + def optimize_GUARD_NONNULL_callback(self, op): self.make_nonnull(op.getarg(0)) self.getptrinfo(op.getarg(0)).mark_last_guard(self.optimizer) @@ -463,9 +484,15 @@ # on the same box. self.optimizer.replace_guard(op, info) self.emit_operation(op) - self.make_constant_class(op.getarg(0), expectedclassbox, False) + self.optimize_GUARD_CLASS_callback_1(op, expectedclassbox) return self.emit_operation(op) + self.optimize_GUARD_CLASS_callback_2(op, expectedclassbox) + + def optimize_GUARD_CLASS_callback_1(self, op, expectedclassbox): + self.make_constant_class(op.getarg(0), expectedclassbox, False) + + def optimize_GUARD_CLASS_callback_2(self, op, expectedclassbox): self.make_constant_class(op.getarg(0), expectedclassbox) def optimize_GUARD_NONNULL_CLASS(self, op): @@ -495,6 +522,9 @@ newop = self.replace_op_with(op, OpHelpers.call_for_descr(op.getdescr())) self.emit_operation(newop) + self.optimize_CALL_LOOPINVARIANT_I_callback(newop, op, key) + + def optimize_CALL_LOOPINVARIANT_I_callback(self, newop, op, key): self.loop_invariant_producer[key] = self.optimizer.getlastop() self.loop_invariant_results[key] = op optimize_CALL_LOOPINVARIANT_R = optimize_CALL_LOOPINVARIANT_I diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -69,13 +69,16 @@ if self._last_guard_not_forced_2 is not None: guard_op = self._last_guard_not_forced_2 self.emit_operation(op) - guard_op = self.optimizer.store_final_boxes_in_guard(guard_op, []) - i = len(self.optimizer._newoperations) - 1 - assert i >= 0 - self.optimizer._newoperations.insert(i, guard_op) + self.optimize_FINISH_callback(op, guard_op) else: self.emit_operation(op) + def optimize_FINISH_callback(self, op, guard_op): + guard_op = self.optimizer.store_final_boxes_in_guard(guard_op, []) + i = len(self.optimizer._newoperations) - 1 + assert i >= 0 + self.optimizer._newoperations.insert(i, guard_op) + def optimize_CALL_MAY_FORCE_I(self, op): effectinfo = op.getdescr().get_extra_info() oopspecindex = effectinfo.oopspecindex @@ -101,8 +104,6 @@ vrefinfo = self.optimizer.metainterp_sd.virtualref_info c_cls = vrefinfo.jit_virtual_ref_const_class vref_descr = vrefinfo.descr - descr_virtual_token = vrefinfo.descr_virtual_token - descr_forced = vrefinfo.descr_forced # # Replace the VIRTUAL_REF operation with a virtual structure of type # 'jit_virtual_ref'. The jit_virtual_ref structure may be forced soon, @@ -113,6 +114,12 @@ newop.set_forwarded(vrefvalue) token = ResOperation(rop.FORCE_TOKEN, []) self.emit_operation(token) + self.optimize_VIRTUAL_REF_callback(op, vrefvalue, newop, token) + + def optimize_VIRTUAL_REF_callback(self, op, vrefvalue, newop, token): + vrefinfo = self.optimizer.metainterp_sd.virtualref_info + descr_virtual_token = vrefinfo.descr_virtual_token + descr_forced = vrefinfo.descr_forced vrefvalue.setfield(descr_virtual_token, newop, token) vrefvalue.setfield(descr_forced, newop, self.optimizer.cpu.ts.CONST_NULLREF) diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -453,7 +453,10 @@ else: self.make_nonnull_str(op, mode) self.emit_operation(op) - self.pure_from_args(mode.STRLEN, [op], op.getarg(0)) + self._optimize_NEWSTR_callback(op, mode) + + def _optimize_NEWSTR_callback(self, op, mode): + self.pure_from_args(mode.STRLEN, [op], op.getarg(0)) def optimize_STRSETITEM(self, op): opinfo = self.getptrinfo(op.getarg(0)) From noreply at buildbot.pypy.org Sat Oct 3 22:49:18 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sat, 3 Oct 2015 22:49:18 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: write a draft Message-ID: <20151003204918.B0D161C069F@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: extradoc Changeset: r5564:172584f486a0 Date: 2015-10-03 22:00 +0200 http://bitbucket.org/pypy/extradoc/changeset/172584f486a0/ Log: write a draft diff --git a/blog/draft/warmup-improvements-2.rst b/blog/draft/warmup-improvements-2.rst new file mode 100644 --- /dev/null +++ b/blog/draft/warmup-improvements-2.rst @@ -0,0 +1,71 @@ + +Hello everyone! + +This is the second part of the series of warmup improvements and +memory consumption. This post covers recent work on sharing guard +resume data that was recently merged to trunk. It will be a part +of the next official PyPy release. To understand what it does, let's +start with a loop for a simple example:: + + def f(): + s = 0 + for i in range(100000): + s += 1 + +which compiles to the following loop:: + + label(p0, p1, p4, p6, p7, i39, i25, p15, p24, i44, i29, descr=TargetToken(4364727712)) + # check the loop exit + i45 = i44 >= i29 + guard(i45 is false) + # increase the loop counter + i46 = i44 + 1 + # store the index into special W_RangeObject + ((pypy.objspace.std.iterobject.W_AbstractSeqIterObject)p15).inst_index = i46 + # add s += 1 with overflow checking + i47 = int_add_ovf(i39, 1) + guard_no_overflow(descr=) + guard_not_invalidated(descr=) + i49 = getfield_raw_i(4336405536, descr=) + i50 = i49 < 0 + guard(i50 is false) + jump(p0, p1, p4, p6, p7, i47, i44, p15, p24, i46, i29, descr=TargetToken(4364727712)) + +Now each ``guard`` here needs a bit of data to know how to exit the compiled +assembler into the interpreter, and potentially to compile a bridge in the future. +Since over 90% of guards never fail, this is incredibly wasteful - we have a copy +of the resume data for each guard. When two guards are next to each other or the +operations in between them are pure, we can safely redo the operations or to simply +put, resume in the previous guard. That means every now and again we execute a few +operations extra, but not storing extra info saves quite a bit of time and a bit of memory. +I've done some measurments on annotating & rtyping pypy which is pretty memory hungry +program that compiles a fair bit. I measured, respectively: + +* total time the translation step took (annotating or rtyping) + +* time it took for tracing (that excludes backend time for the total JIT time) at + the end of rtyping. + +* memory the GC feels responsible for after the step. The real amount of memory + consumed will always be larger and the coefficient of savings is in 1.5-2x mark + +Here is the table: + ++---------+-----------------+--------------+-------------------+----------------+--------------+ +| branch | time annotation | time rtyping | memory annotation | memory rtyping | tracing time | ++---------+-----------------+--------------+-------------------+----------------+--------------+ +| default | 317s | 454s | 707M | 1349M | 60s | ++---------+-----------------+--------------+-------------------+----------------+--------------+ +| sharing | 302s | 430s | 595M | 1070M | 51s | ++---------+-----------------+--------------+-------------------+----------------+--------------+ +| win | 4.8% | 5.5% | 19% | 26% | 17% | ++---------+-----------------+--------------+-------------------+----------------+--------------+ + +Obviously pypy translation is a bit extreme exampl - the vast majority of the code out there +does not have that much code involved that's being jitted. However, it's at the very least +a good win for us :-) + +We will continue to improve the warmup performance and keep you posted! + +Cheers, +fijal From noreply at buildbot.pypy.org Sat Oct 3 23:00:12 2015 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 3 Oct 2015 23:00:12 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: some random changes, hopefully mostly improvements Message-ID: <20151003210012.CE0231C0FFE@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: extradoc Changeset: r5565:8af39e15a6a9 Date: 2015-10-04 00:01 +0300 http://bitbucket.org/pypy/extradoc/changeset/8af39e15a6a9/ Log: some random changes, hopefully mostly improvements diff --git a/blog/draft/warmup-improvements-2.rst b/blog/draft/warmup-improvements-2.rst --- a/blog/draft/warmup-improvements-2.rst +++ b/blog/draft/warmup-improvements-2.rst @@ -1,8 +1,8 @@ Hello everyone! -This is the second part of the series of warmup improvements and -memory consumption. This post covers recent work on sharing guard +This is the second part of the series of improvement in warmup time and +memory consumption in the PyPy JIT. This post covers recent work on sharing guard resume data that was recently merged to trunk. It will be a part of the next official PyPy release. To understand what it does, let's start with a loop for a simple example:: @@ -31,15 +31,15 @@ guard(i50 is false) jump(p0, p1, p4, p6, p7, i47, i44, p15, p24, i46, i29, descr=TargetToken(4364727712)) -Now each ``guard`` here needs a bit of data to know how to exit the compiled -assembler into the interpreter, and potentially to compile a bridge in the future. -Since over 90% of guards never fail, this is incredibly wasteful - we have a copy +Now each ``guard`` needs a bit of data to know how to exit the compiled +assembler back up to the interpreter, and potentially to compile a bridge in the +future. Since over 90% of guards never fail, this is incredibly wasteful - we have a copy of the resume data for each guard. When two guards are next to each other or the operations in between them are pure, we can safely redo the operations or to simply put, resume in the previous guard. That means every now and again we execute a few operations extra, but not storing extra info saves quite a bit of time and a bit of memory. -I've done some measurments on annotating & rtyping pypy which is pretty memory hungry -program that compiles a fair bit. I measured, respectively: +I've done some measurments on annotating & rtyping translation of pypy, which +is a pretty memory hungry program that compiles a fair bit. I measured, respectively: * total time the translation step took (annotating or rtyping) @@ -61,8 +61,8 @@ | win | 4.8% | 5.5% | 19% | 26% | 17% | +---------+-----------------+--------------+-------------------+----------------+--------------+ -Obviously pypy translation is a bit extreme exampl - the vast majority of the code out there -does not have that much code involved that's being jitted. However, it's at the very least +Obviously pypy translation is an extreme example - the vast majority of the code out there +does not have that many lines of code to be jitted. However, it's at the very least a good win for us :-) We will continue to improve the warmup performance and keep you posted! From noreply at buildbot.pypy.org Sat Oct 3 23:25:58 2015 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 3 Oct 2015 23:25:58 +0200 (CEST) Subject: [pypy-commit] pypy default: test, fix for overrun end of storage when offset, i non-zero Message-ID: <20151003212559.083061C13DF@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r79957:a3564b6798cb Date: 2015-10-04 00:25 +0300 http://bitbucket.org/pypy/pypy/changeset/a3564b6798cb/ Log: test, fix for overrun end of storage when offset, i non-zero diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2475,6 +2475,18 @@ a.fill(12) assert (a == u'1').all() + def test_unicode_record_array(self) : + from numpy import dtype, array + t = dtype([('a', 'S3'), ('b', 'U2')]) + x = array([('a', u'b')], dtype=t) + assert str(x) == "[('a', u'b')]" + + t = dtype([('a', 'U3'), ('b', 'S2')]) + x = array([(u'a', 'b')], dtype=t) + x['a'] = u'1' + assert str(x) == "[(u'1', 'b')]" + + def test_boolean_indexing(self): import numpy as np a = np.zeros((1, 3)) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -2231,9 +2231,9 @@ index = i + offset + 4*k data = rffi.cast(Int32.T, ord(box._value[k])) raw_storage_setitem_unaligned(storage, index, data) - for k in range(size, width // 4): - index = i + offset + 4*k - data = rffi.cast(Int32.T, 0) + # zero out the remaining memory + for index in range(size * 4 + i + offset, width): + data = rffi.cast(Int8.T, 0) raw_storage_setitem_unaligned(storage, index, data) def read(self, arr, i, offset, dtype): From noreply at buildbot.pypy.org Sun Oct 4 07:27:41 2015 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 4 Oct 2015 07:27:41 +0200 (CEST) Subject: [pypy-commit] pypy default: prevent buffer overrun in searchsorted Message-ID: <20151004052741.46A221C0369@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r79958:56f608d8072c Date: 2015-10-04 08:26 +0300 http://bitbucket.org/pypy/pypy/changeset/56f608d8072c/ Log: prevent buffer overrun in searchsorted diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -862,6 +862,8 @@ v = convert_to_array(space, w_v) ret = W_NDimArray.from_shape( space, v.get_shape(), get_dtype_cache(space).w_longdtype) + if ret.get_size() < 1: + return ret if side == NPY.SEARCHLEFT: binsearch = loop.binsearch_left else: From noreply at buildbot.pypy.org Sun Oct 4 10:09:24 2015 From: noreply at buildbot.pypy.org (camara) Date: Sun, 4 Oct 2015 10:09:24 +0200 (CEST) Subject: [pypy-commit] extradoc camara/warmupimprovements2rst-create-table-head-1443916820814: warmup-improvements-2.rst create table header Message-ID: <20151004080924.DC0381C0369@cobra.cs.uni-duesseldorf.de> Author: John M. Camara Branch: camara/warmupimprovements2rst-create-table-head-1443916820814 Changeset: r5566:8e5a0d5d8db4 Date: 2015-10-04 00:00 +0000 http://bitbucket.org/pypy/extradoc/changeset/8e5a0d5d8db4/ Log: warmup-improvements-2.rst create table header diff --git a/blog/draft/warmup-improvements-2.rst b/blog/draft/warmup-improvements-2.rst --- a/blog/draft/warmup-improvements-2.rst +++ b/blog/draft/warmup-improvements-2.rst @@ -53,7 +53,7 @@ +---------+-----------------+--------------+-------------------+----------------+--------------+ | branch | time annotation | time rtyping | memory annotation | memory rtyping | tracing time | -+---------+-----------------+--------------+-------------------+----------------+--------------+ ++=========+=================+==============+===================+================+==============+ | default | 317s | 454s | 707M | 1349M | 60s | +---------+-----------------+--------------+-------------------+----------------+--------------+ | sharing | 302s | 430s | 595M | 1070M | 51s | From noreply at buildbot.pypy.org Sun Oct 4 10:09:26 2015 From: noreply at buildbot.pypy.org (jerith) Date: Sun, 4 Oct 2015 10:09:26 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Merged in camara/extradoc/camara/warmupimprovements2rst-create-table-head-1443916820814 (pull request #7) Message-ID: <20151004080926.EA2851C0369@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: extradoc Changeset: r5567:a06b0b432f0f Date: 2015-10-04 10:10 +0200 http://bitbucket.org/pypy/extradoc/changeset/a06b0b432f0f/ Log: Merged in camara/extradoc/camara/warmupimprovements2rst-create- table-head-1443916820814 (pull request #7) warmup-improvements-2.rst create table header diff --git a/blog/draft/warmup-improvements-2.rst b/blog/draft/warmup-improvements-2.rst --- a/blog/draft/warmup-improvements-2.rst +++ b/blog/draft/warmup-improvements-2.rst @@ -53,7 +53,7 @@ +---------+-----------------+--------------+-------------------+----------------+--------------+ | branch | time annotation | time rtyping | memory annotation | memory rtyping | tracing time | -+---------+-----------------+--------------+-------------------+----------------+--------------+ ++=========+=================+==============+===================+================+==============+ | default | 317s | 454s | 707M | 1349M | 60s | +---------+-----------------+--------------+-------------------+----------------+--------------+ | sharing | 302s | 430s | 595M | 1070M | 51s | From noreply at buildbot.pypy.org Sun Oct 4 10:31:42 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 4 Oct 2015 10:31:42 +0200 (CEST) Subject: [pypy-commit] pypy default: fix two tests Message-ID: <20151004083142.6A7111C122A@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r79959:a1520085087f Date: 2015-10-04 10:23 +0200 http://bitbucket.org/pypy/pypy/changeset/a1520085087f/ Log: fix two tests diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -43,3 +43,8 @@ .. branch: numpy-ctypes Add support for ndarray.ctypes property. + +.. branch: share-guard-info + +Share guard resume data between consecutive guards that have only +pure operations and guards in between. \ No newline at end of file diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -4075,7 +4075,7 @@ assert res == 2 res = self.interp_operations(f, [], supports_longlong=True) assert res == 2 - self.check_operations_history(call_release_gil=1, call_may_force=0) + self.check_operations_history(call_release_gil_i=1, call_may_force_i=0) def test_unescaped_write_zero(self): class A: From noreply at buildbot.pypy.org Sun Oct 4 10:31:44 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 4 Oct 2015 10:31:44 +0200 (CEST) Subject: [pypy-commit] pypy default: fix this test I hope Message-ID: <20151004083144.832051C122A@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r79960:2d352d04adf6 Date: 2015-10-04 10:32 +0200 http://bitbucket.org/pypy/pypy/changeset/2d352d04adf6/ Log: fix this test I hope diff --git a/rpython/jit/metainterp/test/test_fficall.py b/rpython/jit/metainterp/test/test_fficall.py --- a/rpython/jit/metainterp/test/test_fficall.py +++ b/rpython/jit/metainterp/test/test_fficall.py @@ -187,7 +187,10 @@ def test_simple_call_longlong(self, **kwds): kwds.setdefault('supports_longlong', True) - kwds['expected_call_release_gil_i'] = kwds.pop('expected_call_release_gil', 1) + if is_64_bit: + kwds['expected_call_release_gil_i'] = kwds.pop('expected_call_release_gil', 1) + else: + kwds['expected_call_release_gil_f'] = kwds.pop('expected_call_release_gil', 1) maxint32 = 2147483647 a = r_longlong(maxint32) + 1 b = r_longlong(maxint32) + 2 From noreply at buildbot.pypy.org Sun Oct 4 10:39:12 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Oct 2015 10:39:12 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix for direct running of test_coercion Message-ID: <20151004083912.5F7C81C13D4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79961:ce1f6aca7c8b Date: 2015-10-04 09:12 +0200 http://bitbucket.org/pypy/pypy/changeset/ce1f6aca7c8b/ Log: Fix for direct running of test_coercion diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -158,7 +158,7 @@ RegrTest('test_codecs.py', core=True, usemodules='_multibytecodec'), RegrTest('test_codeop.py', core=True), RegrTest('test_coding.py', core=True), - RegrTest('test_coercion.py', core=True), + RegrTest('test_coercion.py', core=True, usemodules='struct'), RegrTest('test_collections.py', usemodules='binascii struct'), RegrTest('test_colorsys.py'), RegrTest('test_commands.py'), From noreply at buildbot.pypy.org Sun Oct 4 10:39:14 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Oct 2015 10:39:14 +0200 (CEST) Subject: [pypy-commit] pypy default: complete the fix Message-ID: <20151004083914.8921A1C13D4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79962:12419799c389 Date: 2015-10-04 10:39 +0200 http://bitbucket.org/pypy/pypy/changeset/12419799c389/ Log: complete the fix diff --git a/rpython/jit/metainterp/test/test_fficall.py b/rpython/jit/metainterp/test/test_fficall.py --- a/rpython/jit/metainterp/test/test_fficall.py +++ b/rpython/jit/metainterp/test/test_fficall.py @@ -191,6 +191,7 @@ kwds['expected_call_release_gil_i'] = kwds.pop('expected_call_release_gil', 1) else: kwds['expected_call_release_gil_f'] = kwds.pop('expected_call_release_gil', 1) + kwds['expected_call_release_gil_i'] = 0 maxint32 = 2147483647 a = r_longlong(maxint32) + 1 b = r_longlong(maxint32) + 2 From noreply at buildbot.pypy.org Sun Oct 4 10:39:16 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Oct 2015 10:39:16 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20151004083916.8403A1C13D4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79963:de3211a57ea4 Date: 2015-10-04 10:39 +0200 http://bitbucket.org/pypy/pypy/changeset/de3211a57ea4/ Log: merge heads diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -158,7 +158,7 @@ RegrTest('test_codecs.py', core=True, usemodules='_multibytecodec'), RegrTest('test_codeop.py', core=True), RegrTest('test_coding.py', core=True), - RegrTest('test_coercion.py', core=True), + RegrTest('test_coercion.py', core=True, usemodules='struct'), RegrTest('test_collections.py', usemodules='binascii struct'), RegrTest('test_colorsys.py'), RegrTest('test_commands.py'), From noreply at buildbot.pypy.org Sun Oct 4 10:39:20 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 4 Oct 2015 10:39:20 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: typo Message-ID: <20151004083920.542B21C13D4@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: extradoc Changeset: r5568:fb8778d05df0 Date: 2015-10-04 10:40 +0200 http://bitbucket.org/pypy/extradoc/changeset/fb8778d05df0/ Log: typo diff --git a/blog/draft/warmup-improvements-2.rst b/blog/draft/warmup-improvements-2.rst --- a/blog/draft/warmup-improvements-2.rst +++ b/blog/draft/warmup-improvements-2.rst @@ -1,7 +1,7 @@ Hello everyone! -This is the second part of the series of improvement in warmup time and +This is the second part of the series of improvements in warmup time and memory consumption in the PyPy JIT. This post covers recent work on sharing guard resume data that was recently merged to trunk. It will be a part of the next official PyPy release. To understand what it does, let's From noreply at buildbot.pypy.org Sun Oct 4 10:40:23 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 4 Oct 2015 10:40:23 +0200 (CEST) Subject: [pypy-commit] pypy default: skip the test and the feature that does nto seem to be that useful Message-ID: <20151004084023.8A86F1C13D4@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r79964:573b6d321c26 Date: 2015-10-04 10:39 +0200 http://bitbucket.org/pypy/pypy/changeset/573b6d321c26/ Log: skip the test and the feature that does nto seem to be that useful diff --git a/pypy/module/pypyjit/__init__.py b/pypy/module/pypyjit/__init__.py --- a/pypy/module/pypyjit/__init__.py +++ b/pypy/module/pypyjit/__init__.py @@ -15,8 +15,12 @@ 'set_compile_hook': 'interp_resop.set_compile_hook', 'set_abort_hook': 'interp_resop.set_abort_hook', 'get_stats_snapshot': 'interp_resop.get_stats_snapshot', - 'enable_debug': 'interp_resop.enable_debug', - 'disable_debug': 'interp_resop.disable_debug', + # those things are disabled because they have bugs, but if + # they're found to be useful, fix test_ztranslation_jit_stats + # in the backend first. get_stats_snapshot still produces + # correct loop_runs if PYPYLOG is correct + #'enable_debug': 'interp_resop.enable_debug', + #'disable_debug': 'interp_resop.disable_debug', 'ResOperation': 'interp_resop.WrappedOp', 'DebugMergePoint': 'interp_resop.DebugMergePoint', 'JitLoopInfo': 'interp_resop.W_JitLoopInfo', diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -213,6 +213,8 @@ CPUClass = getcpuclass() def test_jit_get_stats(self): + py.test.skip("disabled feature") + driver = JitDriver(greens = [], reds = ['i']) def f(): From noreply at buildbot.pypy.org Sun Oct 4 10:40:25 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 4 Oct 2015 10:40:25 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20151004084025.829E41C13D4@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r79965:551f1d6a8a9c Date: 2015-10-04 10:40 +0200 http://bitbucket.org/pypy/pypy/changeset/551f1d6a8a9c/ Log: merge diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -158,7 +158,7 @@ RegrTest('test_codecs.py', core=True, usemodules='_multibytecodec'), RegrTest('test_codeop.py', core=True), RegrTest('test_coding.py', core=True), - RegrTest('test_coercion.py', core=True), + RegrTest('test_coercion.py', core=True, usemodules='struct'), RegrTest('test_collections.py', usemodules='binascii struct'), RegrTest('test_colorsys.py'), RegrTest('test_commands.py'), diff --git a/rpython/jit/metainterp/test/test_fficall.py b/rpython/jit/metainterp/test/test_fficall.py --- a/rpython/jit/metainterp/test/test_fficall.py +++ b/rpython/jit/metainterp/test/test_fficall.py @@ -191,6 +191,7 @@ kwds['expected_call_release_gil_i'] = kwds.pop('expected_call_release_gil', 1) else: kwds['expected_call_release_gil_f'] = kwds.pop('expected_call_release_gil', 1) + kwds['expected_call_release_gil_i'] = 0 maxint32 = 2147483647 a = r_longlong(maxint32) + 1 b = r_longlong(maxint32) + 2 From noreply at buildbot.pypy.org Sun Oct 4 10:44:45 2015 From: noreply at buildbot.pypy.org (jerith) Date: Sun, 4 Oct 2015 10:44:45 +0200 (CEST) Subject: [pypy-commit] pypy unrecursive-opt: No extra args in intbounds callbacks. Message-ID: <20151004084445.E8AF11C13DF@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: unrecursive-opt Changeset: r79966:e9ea348da6d2 Date: 2015-10-04 10:45 +0200 http://bitbucket.org/pypy/pypy/changeset/e9ea348da6d2/ Log: No extra args in intbounds callbacks. diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -39,15 +39,13 @@ class IntBoundsCallbackArgs(object): - def __init__(self, op, callback_func=None, b1=None, b2=None): + def __init__(self, op, callback_func=None): self.op = op self.callback_func = callback_func - self.b1 = b1 - self.b2 = b2 def callback(self): if self.callback_func is not None: - self.callback_func(self.op, self.b1, self.b2) + self.callback_func(self.op) class OptIntBounds(Optimization): @@ -74,7 +72,7 @@ def _optimize_guard_true_false_value(self, op): return IntBoundsCallbackArgs(op, self._optimize_guard_true_false_value_callback) - def _optimize_guard_true_false_value_callback(self, op, bn1, bn2): + def _optimize_guard_true_false_value_callback(self, op): if op.getarg(0).type == 'i': self.propagate_bounds_backward(op.getarg(0)) @@ -84,18 +82,20 @@ def optimize_INT_OR_or_XOR(self, op): v1 = self.get_box_replacement(op.getarg(0)) - b1 = self.getintbound(v1) v2 = self.get_box_replacement(op.getarg(1)) - b2 = self.getintbound(v2) if v1 is v2: if op.getopnum() == rop.INT_OR: self.make_equal_to(op, v1) else: self.make_constant_int(op, 0) return None - return IntBoundsCallbackArgs(op, self.optimize_INT_OR_or_XOR_callback, b1, b2) + return IntBoundsCallbackArgs(op, self.optimize_INT_OR_or_XOR_callback) - def optimize_INT_OR_or_XOR_callback(self, op, b1, b2): + def optimize_INT_OR_or_XOR_callback(self, op): + v1 = self.get_box_replacement(op.getarg(0)) + b1 = self.getintbound(v1) + v2 = self.get_box_replacement(op.getarg(1)) + b2 = self.getintbound(v2) if b1.known_ge(IntBound(0, 0)) and \ b2.known_ge(IntBound(0, 0)): r = self.getintbound(op) @@ -106,11 +106,11 @@ optimize_INT_XOR = optimize_INT_OR_or_XOR def optimize_INT_AND(self, op): + return IntBoundsCallbackArgs(op, self.optimize_INT_AND_callback) + + def optimize_INT_AND_callback(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) - return IntBoundsCallbackArgs(op, self.optimize_INT_AND_callback, b1, b2) - - def optimize_INT_AND_callback(self, op, b1, b2): r = self.getintbound(op) if b2.is_constant(): val = b2.lower @@ -127,7 +127,7 @@ def optimize_INT_SUB(self, op): return IntBoundsCallbackArgs(op, self.optimize_INT_SUB_callback) - def optimize_INT_SUB_callback(self, op, bn1, bn2): + def optimize_INT_SUB_callback(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) b = b1.sub_bound(b2) @@ -174,7 +174,7 @@ return IntBoundsCallbackArgs(op, self.optimize_INT_ADD_callback) - def optimize_INT_ADD_callback(self, op, bn1, bn2): + def optimize_INT_ADD_callback(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) @@ -183,22 +183,22 @@ r.intersect(b) def optimize_INT_MUL(self, op): + return IntBoundsCallbackArgs(op, self.optimize_INT_MUL_callback) + + def optimize_INT_MUL_callback(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) - return IntBoundsCallbackArgs(op, self.optimize_INT_MUL_callback, b1, b2) - - def optimize_INT_MUL_callback(self, op, b1, b2): r = self.getintbound(op) b = b1.mul_bound(b2) if b.bounded(): r.intersect(b) def optimize_INT_FLOORDIV(self, op): + return IntBoundsCallbackArgs(op, self.optimize_INT_FLOORDIV_callback) + + def optimize_INT_FLOORDIV_callback(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) - return IntBoundsCallbackArgs(op, self.optimize_INT_FLOORDIV_callback, b1, b2) - - def optimize_INT_FLOORDIV_callback(self, op, b1, b2): r = self.getintbound(op) r.intersect(b1.div_bound(b2)) @@ -215,9 +215,13 @@ arg2 = ConstInt(val-1) op = self.replace_op_with(op, rop.INT_AND, args=[arg1, arg2]) - return IntBoundsCallbackArgs(op, self.optimize_INT_MOD_callback, b2, known_nonneg) + return IntBoundsCallbackArgs(op, self.optimize_INT_MOD_callback) - def optimize_INT_MOD_callback(self, op, b2, known_nonneg): + def optimize_INT_MOD_callback(self, op): + b1 = self.getintbound(op.getarg(0)) + b2 = self.getintbound(op.getarg(1)) + known_nonneg = (b1.known_ge(IntBound(0, 0)) and + b2.known_ge(IntBound(0, 0))) if b2.is_constant(): val = b2.getint() r = self.getintbound(op) @@ -232,15 +236,13 @@ r.make_lt(IntBound(val, val)) def optimize_INT_LSHIFT(self, op): + return IntBoundsCallbackArgs(op, self.optimize_INT_LSHIFT_callback) + + def optimize_INT_LSHIFT_callback(self, op): arg0 = self.get_box_replacement(op.getarg(0)) b1 = self.getintbound(arg0) arg1 = self.get_box_replacement(op.getarg(1)) b2 = self.getintbound(arg1) - return IntBoundsCallbackArgs(op, self.optimize_INT_LSHIFT_callback, b1, b2) - - def optimize_INT_LSHIFT_callback(self, op, b1, b2): - arg0 = self.get_box_replacement(op.getarg(0)) - arg1 = self.get_box_replacement(op.getarg(1)) r = self.getintbound(op) b = b1.lshift_bound(b2) r.intersect(b) @@ -260,9 +262,12 @@ # constant result (likely 0, for rshifts that kill all bits) self.make_constant_int(op, b.lower) return None - return IntBoundsCallbackArgs(op, self.optimize_INT_RSHIFT_callback, b) + return IntBoundsCallbackArgs(op, self.optimize_INT_RSHIFT_callback) - def optimize_INT_RSHIFT_callback(self, op, b, bn): + def optimize_INT_RSHIFT_callback(self, op): + b1 = self.getintbound(op.getarg(0)) + b2 = self.getintbound(op.getarg(1)) + b = b1.rshift_bound(b2) r = self.getintbound(op) r.intersect(b) @@ -314,9 +319,12 @@ # by optimize_GUARD_NO_OVERFLOW; if we see instead an # optimize_GUARD_OVERFLOW, then InvalidLoop. op = self.replace_op_with(op, rop.INT_ADD) - return IntBoundsCallbackArgs(op, self.optimize_INT_ADD_OVF_callback, resbound) + return IntBoundsCallbackArgs(op, self.optimize_INT_ADD_OVF_callback) - def optimize_INT_ADD_OVF_callback(self, op, resbound, bn): + def optimize_INT_ADD_OVF_callback(self, op): + b1 = self.getintbound(op.getarg(0)) + b2 = self.getintbound(op.getarg(1)) + resbound = b1.add_bound(b2) r = self.getintbound(op) r.intersect(resbound) @@ -331,9 +339,14 @@ resbound = b0.sub_bound(b1) if resbound.bounded(): op = self.replace_op_with(op, rop.INT_SUB) - return IntBoundsCallbackArgs(op, self.optimize_INT_SUB_OVF_callback, resbound) + return IntBoundsCallbackArgs(op, self.optimize_INT_SUB_OVF_callback) - def optimize_INT_SUB_OVF_callback(self, op, resbound, bn): + def optimize_INT_SUB_OVF_callback(self, op): + arg0 = self.get_box_replacement(op.getarg(0)) + arg1 = self.get_box_replacement(op.getarg(1)) + b0 = self.getintbound(arg0) + b1 = self.getintbound(arg1) + resbound = b0.sub_bound(b1) r = self.getintbound(op) r.intersect(resbound) @@ -343,9 +356,12 @@ resbound = b1.mul_bound(b2) if resbound.bounded(): op = self.replace_op_with(op, rop.INT_MUL) - return IntBoundsCallbackArgs(op, self.optimize_INT_MUL_OVF_callback, resbound) + return IntBoundsCallbackArgs(op, self.optimize_INT_MUL_OVF_callback) - def optimize_INT_MUL_OVF_callback(self, op, resbound, bn): + def optimize_INT_MUL_OVF_callback(self, op): + b1 = self.getintbound(op.getarg(0)) + b2 = self.getintbound(op.getarg(1)) + resbound = b1.mul_bound(b2) r = self.getintbound(op) r.intersect(resbound) @@ -441,23 +457,27 @@ if bounds.contains_bound(b): self.make_equal_to(op, op.getarg(0)) else: - return IntBoundsCallbackArgs(op, self.optimize_INT_SIGNEXT_callback, bounds) + return IntBoundsCallbackArgs(op, self.optimize_INT_SIGNEXT_callback) - def optimize_INT_SIGNEXT_callback(self, op, bounds, bn): + def optimize_INT_SIGNEXT_callback(self, op): + numbits = op.getarg(1).getint() * 8 + start = -(1 << (numbits - 1)) + stop = 1 << (numbits - 1) + bounds = IntBound(start, stop - 1) bres = self.getintbound(op) bres.intersect(bounds) def optimize_ARRAYLEN_GC(self, op): + return IntBoundsCallbackArgs(op, self.optimize_ARRAYLEN_GC_callback) + + def optimize_ARRAYLEN_GC_callback(self, op): array = self.ensure_ptr_info_arg0(op) - return IntBoundsCallbackArgs(op, self.optimize_ARRAYLEN_GC_callback, array) - - def optimize_ARRAYLEN_GC_callback(self, op, array, bn): self.optimizer.setintbound(op, array.getlenbound(None)) def optimize_STRLEN(self, op): return IntBoundsCallbackArgs(op, self.optimize_STRLEN_callback) - def optimize_STRLEN_callback(self, op, bn1, bn2): + def optimize_STRLEN_callback(self, op): self.make_nonnull_str(op.getarg(0), vstring.mode_string) array = self.getptrinfo(op.getarg(0)) self.optimizer.setintbound(op, array.getlenbound(vstring.mode_string)) @@ -465,7 +485,7 @@ def optimize_UNICODELEN(self, op): return IntBoundsCallbackArgs(op, self.optimize_UNICODELEN_callback) - def optimize_UNICODELEN_callback(self, op, bn1, bn2): + def optimize_UNICODELEN_callback(self, op): self.make_nonnull_str(op.getarg(0), vstring.mode_unicode) array = self.getptrinfo(op.getarg(0)) self.optimizer.setintbound(op, array.getlenbound(vstring.mode_unicode)) @@ -473,7 +493,7 @@ def optimize_STRGETITEM(self, op): return IntBoundsCallbackArgs(op, self.optimize_STRGETITEM_callback) - def optimize_STRGETITEM_callback(self, op, bn1, bn2): + def optimize_STRGETITEM_callback(self, op): v1 = self.getintbound(op) v2 = self.getptrinfo(op.getarg(0)) intbound = self.getintbound(op.getarg(1)) @@ -487,7 +507,7 @@ def optimize_GETFIELD_RAW_I(self, op): return IntBoundsCallbackArgs(op, self.optimize_GETFIELD_RAW_I_callback) - def optimize_GETFIELD_RAW_I_callback(self, op, bn1, bn2): + def optimize_GETFIELD_RAW_I_callback(self, op): descr = op.getdescr() if descr.is_integer_bounded(): b1 = self.getintbound(op) @@ -507,7 +527,7 @@ def optimize_GETARRAYITEM_RAW_I(self, op): return IntBoundsCallbackArgs(op, self.optimize_GETARRAYITEM_RAW_I_callback) - def optimize_GETARRAYITEM_RAW_I_callback(self, op, bn1, bn2): + def optimize_GETARRAYITEM_RAW_I_callback(self, op): descr = op.getdescr() if descr and descr.is_item_integer_bounded(): intbound = self.getintbound(op) @@ -522,7 +542,7 @@ def optimize_UNICODEGETITEM(self, op): return IntBoundsCallbackArgs(op, self.optimize_UNICODEGETITEM_callback) - def optimize_UNICODEGETITEM_callback(self, op, bn1, bn2): + def optimize_UNICODEGETITEM_callback(self, op): b1 = self.getintbound(op) b1.make_ge(IntLowerBound(0)) v2 = self.getptrinfo(op.getarg(0)) From noreply at buildbot.pypy.org Sun Oct 4 11:12:17 2015 From: noreply at buildbot.pypy.org (jerith) Date: Sun, 4 Oct 2015 11:12:17 +0200 (CEST) Subject: [pypy-commit] pypy unrecursive-opt: Turn callbacks into postprocess dispatch. Message-ID: <20151004091217.C8DED1C0369@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: unrecursive-opt Changeset: r79967:c28bff45d6ed Date: 2015-10-04 11:12 +0200 http://bitbucket.org/pypy/pypy/changeset/c28bff45d6ed/ Log: Turn callbacks into postprocess dispatch. diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -53,11 +53,14 @@ redundant guards""" def opt_default(self, op): - return IntBoundsCallbackArgs(op) + return op def propagate_forward(self, op): return dispatch_opt(self, op) + def propagate_postprocess(self, op): + return dispatch_postprocess(self, op) + def propagate_bounds_backward(self, box): # FIXME: This takes care of the instruction where box is the reuslt # but the bounds produced by all instructions where box is @@ -70,9 +73,9 @@ dispatch_bounds_ops(self, box) def _optimize_guard_true_false_value(self, op): - return IntBoundsCallbackArgs(op, self._optimize_guard_true_false_value_callback) + return op - def _optimize_guard_true_false_value_callback(self, op): + def _postprocess_guard_true_false_value(self, op): if op.getarg(0).type == 'i': self.propagate_bounds_backward(op.getarg(0)) @@ -80,6 +83,10 @@ optimize_GUARD_FALSE = _optimize_guard_true_false_value optimize_GUARD_VALUE = _optimize_guard_true_false_value + postprocess_GUARD_TRUE = _postprocess_guard_true_false_value + postprocess_GUARD_FALSE = _postprocess_guard_true_false_value + postprocess_GUARD_VALUE = _postprocess_guard_true_false_value + def optimize_INT_OR_or_XOR(self, op): v1 = self.get_box_replacement(op.getarg(0)) v2 = self.get_box_replacement(op.getarg(1)) @@ -89,9 +96,9 @@ else: self.make_constant_int(op, 0) return None - return IntBoundsCallbackArgs(op, self.optimize_INT_OR_or_XOR_callback) + return op - def optimize_INT_OR_or_XOR_callback(self, op): + def postprocess_INT_OR_or_XOR(self, op): v1 = self.get_box_replacement(op.getarg(0)) b1 = self.getintbound(v1) v2 = self.get_box_replacement(op.getarg(1)) @@ -105,10 +112,13 @@ optimize_INT_OR = optimize_INT_OR_or_XOR optimize_INT_XOR = optimize_INT_OR_or_XOR + postprocess_INT_OR = postprocess_INT_OR_or_XOR + postprocess_INT_XOR = postprocess_INT_OR_or_XOR + def optimize_INT_AND(self, op): - return IntBoundsCallbackArgs(op, self.optimize_INT_AND_callback) + return op - def optimize_INT_AND_callback(self, op): + def postprocess_INT_AND(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) @@ -125,9 +135,9 @@ r.intersect(IntBound(0, next_pow2_m1(lesser))) def optimize_INT_SUB(self, op): - return IntBoundsCallbackArgs(op, self.optimize_INT_SUB_callback) + return op - def optimize_INT_SUB_callback(self, op): + def postprocess_INT_SUB(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) b = b1.sub_bound(b2) @@ -138,7 +148,7 @@ arg1 = self.get_box_replacement(op.getarg(0)) arg2 = self.get_box_replacement(op.getarg(1)) if self.is_raw_ptr(arg1) or self.is_raw_ptr(arg2): - return IntBoundsCallbackArgs(op) + return op v1 = self.getintbound(arg1) v2 = self.getintbound(arg2) @@ -172,9 +182,9 @@ arg2 = ConstInt(sum) op = self.replace_op_with(op, rop.INT_ADD, args=[arg1, arg2]) - return IntBoundsCallbackArgs(op, self.optimize_INT_ADD_callback) + return op - def optimize_INT_ADD_callback(self, op): + def postprocess_INT_ADD(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) @@ -183,9 +193,9 @@ r.intersect(b) def optimize_INT_MUL(self, op): - return IntBoundsCallbackArgs(op, self.optimize_INT_MUL_callback) + return op - def optimize_INT_MUL_callback(self, op): + def postprocess_INT_MUL(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) @@ -194,9 +204,9 @@ r.intersect(b) def optimize_INT_FLOORDIV(self, op): - return IntBoundsCallbackArgs(op, self.optimize_INT_FLOORDIV_callback) + return op - def optimize_INT_FLOORDIV_callback(self, op): + def postprocess_INT_FLOORDIV(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) @@ -215,9 +225,9 @@ arg2 = ConstInt(val-1) op = self.replace_op_with(op, rop.INT_AND, args=[arg1, arg2]) - return IntBoundsCallbackArgs(op, self.optimize_INT_MOD_callback) + return op - def optimize_INT_MOD_callback(self, op): + def postprocess_INT_MOD(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) known_nonneg = (b1.known_ge(IntBound(0, 0)) and @@ -236,9 +246,9 @@ r.make_lt(IntBound(val, val)) def optimize_INT_LSHIFT(self, op): - return IntBoundsCallbackArgs(op, self.optimize_INT_LSHIFT_callback) + return op - def optimize_INT_LSHIFT_callback(self, op): + def postprocess_INT_LSHIFT(self, op): arg0 = self.get_box_replacement(op.getarg(0)) b1 = self.getintbound(arg0) arg1 = self.get_box_replacement(op.getarg(1)) @@ -262,9 +272,9 @@ # constant result (likely 0, for rshifts that kill all bits) self.make_constant_int(op, b.lower) return None - return IntBoundsCallbackArgs(op, self.optimize_INT_RSHIFT_callback) + return op - def optimize_INT_RSHIFT_callback(self, op): + def postprocess_INT_RSHIFT(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) b = b1.rshift_bound(b2) @@ -295,7 +305,7 @@ self.pure_from_args(rop.INT_SUB, [args[0], result], args[1]) #elif opnum == rop.INT_MUL_OVF: # self.pure(rop.INT_MUL, args[:], result) - return IntBoundsCallbackArgs(op) + return op def optimize_GUARD_OVERFLOW(self, op): # If INT_xxx_OVF was replaced by INT_xxx, *but* we still see @@ -308,7 +318,7 @@ raise InvalidLoop('An INT_xxx_OVF was proven not to overflow but' + 'guarded with GUARD_OVERFLOW') - return IntBoundsCallbackArgs(op) + return op def optimize_INT_ADD_OVF(self, op): b1 = self.getintbound(op.getarg(0)) @@ -319,9 +329,9 @@ # by optimize_GUARD_NO_OVERFLOW; if we see instead an # optimize_GUARD_OVERFLOW, then InvalidLoop. op = self.replace_op_with(op, rop.INT_ADD) - return IntBoundsCallbackArgs(op, self.optimize_INT_ADD_OVF_callback) + return op - def optimize_INT_ADD_OVF_callback(self, op): + def postprocess_INT_ADD_OVF(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) resbound = b1.add_bound(b2) @@ -339,9 +349,9 @@ resbound = b0.sub_bound(b1) if resbound.bounded(): op = self.replace_op_with(op, rop.INT_SUB) - return IntBoundsCallbackArgs(op, self.optimize_INT_SUB_OVF_callback) + return op - def optimize_INT_SUB_OVF_callback(self, op): + def postprocess_INT_SUB_OVF(self, op): arg0 = self.get_box_replacement(op.getarg(0)) arg1 = self.get_box_replacement(op.getarg(1)) b0 = self.getintbound(arg0) @@ -356,9 +366,9 @@ resbound = b1.mul_bound(b2) if resbound.bounded(): op = self.replace_op_with(op, rop.INT_MUL) - return IntBoundsCallbackArgs(op, self.optimize_INT_MUL_OVF_callback) + return op - def optimize_INT_MUL_OVF_callback(self, op): + def postprocess_INT_MUL_OVF(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) resbound = b1.mul_bound(b2) @@ -375,7 +385,7 @@ elif b1.known_ge(b2) or arg1 is arg2: self.make_constant_int(op, 0) else: - return IntBoundsCallbackArgs(op) + return op def optimize_INT_GT(self, op): arg1 = self.get_box_replacement(op.getarg(0)) @@ -387,7 +397,7 @@ elif b1.known_le(b2) or arg1 is arg2: self.make_constant_int(op, 0) else: - return IntBoundsCallbackArgs(op) + return op def optimize_INT_LE(self, op): arg1 = self.get_box_replacement(op.getarg(0)) @@ -399,7 +409,7 @@ elif b1.known_gt(b2): self.make_constant_int(op, 0) else: - return IntBoundsCallbackArgs(op) + return op def optimize_INT_GE(self, op): arg1 = self.get_box_replacement(op.getarg(0)) @@ -411,7 +421,7 @@ elif b1.known_lt(b2): self.make_constant_int(op, 0) else: - return IntBoundsCallbackArgs(op) + return op def optimize_INT_EQ(self, op): arg0 = self.get_box_replacement(op.getarg(0)) @@ -425,7 +435,7 @@ elif arg0.same_box(arg1): self.make_constant_int(op, 1) else: - return IntBoundsCallbackArgs(op) + return op def optimize_INT_NE(self, op): arg0 = self.get_box_replacement(op.getarg(0)) @@ -439,14 +449,14 @@ elif arg0 is arg1: self.make_constant_int(op, 0) else: - return IntBoundsCallbackArgs(op) + return op def optimize_INT_FORCE_GE_ZERO(self, op): b = self.getintbound(op.getarg(0)) if b.known_ge(IntBound(0, 0)): self.make_equal_to(op, op.getarg(0)) else: - return IntBoundsCallbackArgs(op) + return op def optimize_INT_SIGNEXT(self, op): b = self.getintbound(op.getarg(0)) @@ -457,9 +467,9 @@ if bounds.contains_bound(b): self.make_equal_to(op, op.getarg(0)) else: - return IntBoundsCallbackArgs(op, self.optimize_INT_SIGNEXT_callback) + return op - def optimize_INT_SIGNEXT_callback(self, op): + def postprocess_INT_SIGNEXT(self, op): numbits = op.getarg(1).getint() * 8 start = -(1 << (numbits - 1)) stop = 1 << (numbits - 1) @@ -468,32 +478,32 @@ bres.intersect(bounds) def optimize_ARRAYLEN_GC(self, op): - return IntBoundsCallbackArgs(op, self.optimize_ARRAYLEN_GC_callback) + return op - def optimize_ARRAYLEN_GC_callback(self, op): + def postprocess_ARRAYLEN_GC(self, op): array = self.ensure_ptr_info_arg0(op) self.optimizer.setintbound(op, array.getlenbound(None)) def optimize_STRLEN(self, op): - return IntBoundsCallbackArgs(op, self.optimize_STRLEN_callback) + return op - def optimize_STRLEN_callback(self, op): + def postprocess_STRLEN(self, op): self.make_nonnull_str(op.getarg(0), vstring.mode_string) array = self.getptrinfo(op.getarg(0)) self.optimizer.setintbound(op, array.getlenbound(vstring.mode_string)) def optimize_UNICODELEN(self, op): - return IntBoundsCallbackArgs(op, self.optimize_UNICODELEN_callback) + return op - def optimize_UNICODELEN_callback(self, op): + def postprocess_UNICODELEN(self, op): self.make_nonnull_str(op.getarg(0), vstring.mode_unicode) array = self.getptrinfo(op.getarg(0)) self.optimizer.setintbound(op, array.getlenbound(vstring.mode_unicode)) def optimize_STRGETITEM(self, op): - return IntBoundsCallbackArgs(op, self.optimize_STRGETITEM_callback) + return op - def optimize_STRGETITEM_callback(self, op): + def postprocess_STRGETITEM(self, op): v1 = self.getintbound(op) v2 = self.getptrinfo(op.getarg(0)) intbound = self.getintbound(op.getarg(1)) @@ -505,9 +515,9 @@ v1.make_lt(IntUpperBound(256)) def optimize_GETFIELD_RAW_I(self, op): - return IntBoundsCallbackArgs(op, self.optimize_GETFIELD_RAW_I_callback) + return op - def optimize_GETFIELD_RAW_I_callback(self, op): + def postprocess_GETFIELD_RAW_I(self, op): descr = op.getdescr() if descr.is_integer_bounded(): b1 = self.getintbound(op) @@ -520,14 +530,24 @@ optimize_GETFIELD_GC_R = optimize_GETFIELD_RAW_I optimize_GETFIELD_GC_F = optimize_GETFIELD_RAW_I + postprocess_GETFIELD_RAW_F = postprocess_GETFIELD_RAW_I + postprocess_GETFIELD_RAW_R = postprocess_GETFIELD_RAW_I + postprocess_GETFIELD_GC_I = postprocess_GETFIELD_RAW_I + postprocess_GETFIELD_GC_R = postprocess_GETFIELD_RAW_I + postprocess_GETFIELD_GC_F = postprocess_GETFIELD_RAW_I + optimize_GETINTERIORFIELD_GC_I = optimize_GETFIELD_RAW_I optimize_GETINTERIORFIELD_GC_R = optimize_GETFIELD_RAW_I optimize_GETINTERIORFIELD_GC_F = optimize_GETFIELD_RAW_I + postprocess_GETINTERIORFIELD_GC_I = postprocess_GETFIELD_RAW_I + postprocess_GETINTERIORFIELD_GC_R = postprocess_GETFIELD_RAW_I + postprocess_GETINTERIORFIELD_GC_F = postprocess_GETFIELD_RAW_I + def optimize_GETARRAYITEM_RAW_I(self, op): - return IntBoundsCallbackArgs(op, self.optimize_GETARRAYITEM_RAW_I_callback) + return op - def optimize_GETARRAYITEM_RAW_I_callback(self, op): + def postprocess_GETARRAYITEM_RAW_I(self, op): descr = op.getdescr() if descr and descr.is_item_integer_bounded(): intbound = self.getintbound(op) @@ -539,10 +559,15 @@ optimize_GETARRAYITEM_GC_F = optimize_GETARRAYITEM_RAW_I optimize_GETARRAYITEM_GC_R = optimize_GETARRAYITEM_RAW_I + postprocess_GETARRAYITEM_RAW_F = postprocess_GETARRAYITEM_RAW_I + postprocess_GETARRAYITEM_GC_I = postprocess_GETARRAYITEM_RAW_I + postprocess_GETARRAYITEM_GC_F = postprocess_GETARRAYITEM_RAW_I + postprocess_GETARRAYITEM_GC_R = postprocess_GETARRAYITEM_RAW_I + def optimize_UNICODEGETITEM(self, op): - return IntBoundsCallbackArgs(op, self.optimize_UNICODEGETITEM_callback) + return op - def optimize_UNICODEGETITEM_callback(self, op): + def postprocess_UNICODEGETITEM(self, op): b1 = self.getintbound(op) b1.make_ge(IntLowerBound(0)) v2 = self.getptrinfo(op.getarg(0)) @@ -709,3 +734,4 @@ dispatch_opt = make_dispatcher_method(OptIntBounds, 'optimize_', default=OptIntBounds.opt_default) dispatch_bounds_ops = make_dispatcher_method(OptIntBounds, 'propagate_bounds_') +dispatch_postprocess = make_dispatcher_method(OptIntBounds, 'postprocess_') diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -538,12 +538,12 @@ op.set_forwarded(None) def send_extra_operation(self, op): - callback_args = self.optimizations[0].propagate_forward(op) - if callback_args is None: + newop = self.optimizations[0].propagate_forward(op) + if newop is None: return - self.optimizations[0].last_emitted_operation = callback_args.op - self.first_optimization.propagate_forward(callback_args.op) - callback_args.callback() + self.optimizations[0].last_emitted_operation = newop + self.first_optimization.propagate_forward(newop) + self.optimizations[0].propagate_postprocess(newop) def propagate_forward(self, op): dispatch_opt(self, op) From noreply at buildbot.pypy.org Sun Oct 4 11:38:18 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Oct 2015 11:38:18 +0200 (CEST) Subject: [pypy-commit] pypy default: Memory saving on 'rd_locs', which turns out to be often exactly equal to Message-ID: <20151004093819.012E51C0369@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79968:dfaa5710b535 Date: 2015-10-04 11:38 +0200 http://bitbucket.org/pypy/pypy/changeset/dfaa5710b535/ Log: Memory saving on 'rd_locs', which turns out to be often exactly equal to the previously generated rd_locs diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -173,6 +173,8 @@ input_i += 1 return locs + _previous_rd_locs = [] + def store_info_on_descr(self, startspos, guardtok): withfloats = False for box in guardtok.failargs: @@ -184,7 +186,17 @@ fail_descr = cast_instance_to_gcref(guardtok.faildescr) fail_descr = rffi.cast(lltype.Signed, fail_descr) base_ofs = self.cpu.get_baseofs_of_frame_field() - positions = [rffi.cast(rffi.USHORT, 0)] * len(guardtok.fail_locs) + # + # in practice, about 2/3rd of 'positions' lists that we build are + # exactly the same as the previous one, so share the lists to + # conserve memory + if len(self._previous_rd_locs) == len(guardtok.fail_locs): + positions = self._previous_rd_locs # tentatively + shared = True + else: + positions = [rffi.cast(rffi.USHORT, 0)] * len(guardtok.fail_locs) + shared = False + # for i, loc in enumerate(guardtok.fail_locs): if loc is None: position = 0xFFFF @@ -203,7 +215,15 @@ position = len(self.cpu.gen_regs) + loc.value * coeff else: position = self.cpu.all_reg_indexes[loc.value] + + if shared: + if (rffi.cast(lltype.Signed, self._previous_rd_locs[i]) == + rffi.cast(lltype.Signed, position)): + continue # still equal + positions = positions[:] + shared = False positions[i] = rffi.cast(rffi.USHORT, position) + self._previous_rd_locs = positions # write down the positions of locs guardtok.faildescr.rd_locs = positions return fail_descr, target From noreply at buildbot.pypy.org Sun Oct 4 12:35:34 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 4 Oct 2015 12:35:34 +0200 (CEST) Subject: [pypy-commit] pypy default: these days pygame can be installed on pypy Message-ID: <20151004103535.03B8B1C12D8@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r79969:b8c62e963133 Date: 2015-10-04 12:35 +0200 http://bitbucket.org/pypy/pypy/changeset/b8c62e963133/ Log: these days pygame can be installed on pypy diff --git a/dotviewer/graphclient.py b/dotviewer/graphclient.py --- a/dotviewer/graphclient.py +++ b/dotviewer/graphclient.py @@ -127,16 +127,8 @@ return spawn_graphserver_handler((host, port)) def spawn_local_handler(): - if hasattr(sys, 'pypy_objspaceclass'): - # if 'python' is actually PyPy, e.g. in a virtualenv, then - # try hard to find a real CPython - try: - python = subprocess.check_output( - 'env -i $SHELL -l -c "which python"', shell=True).strip() - except subprocess.CalledProcessError: - # did not work, fall back to 'python' - python = 'python' - else: + python = os.getenv('PYPY_PYGAME_PYTHON') + if not python: python = sys.executable args = [python, '-u', GRAPHSERVER, '--stdio'] p = subprocess.Popen(args, From noreply at buildbot.pypy.org Sun Oct 4 13:42:24 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 4 Oct 2015 13:42:24 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: make the blog post less dry Message-ID: <20151004114224.781301C0369@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: extradoc Changeset: r5569:e203cb369cfd Date: 2015-10-04 13:43 +0200 http://bitbucket.org/pypy/extradoc/changeset/e203cb369cfd/ Log: make the blog post less dry diff --git a/blog/draft/warmup-improvements-2.rst b/blog/draft/warmup-improvements-2.rst --- a/blog/draft/warmup-improvements-2.rst +++ b/blog/draft/warmup-improvements-2.rst @@ -7,34 +7,45 @@ of the next official PyPy release. To understand what it does, let's start with a loop for a simple example:: + class A(object): + def __init__(self, x, y): + self.x = x + self.y = y + + def call_method(self, z): + return self.x + self.y + z + def f(): s = 0 for i in range(100000): - s += 1 + a = A(i, 1 + i) + s += a.call_method(i) -which compiles to the following loop:: +At the entrance of the loop, we have the following set of operations: - label(p0, p1, p4, p6, p7, i39, i25, p15, p24, i44, i29, descr=TargetToken(4364727712)) - # check the loop exit - i45 = i44 >= i29 - guard(i45 is false) - # increase the loop counter - i46 = i44 + 1 - # store the index into special W_RangeObject - ((pypy.objspace.std.iterobject.W_AbstractSeqIterObject)p15).inst_index = i46 - # add s += 1 with overflow checking - i47 = int_add_ovf(i39, 1) - guard_no_overflow(descr=) - guard_not_invalidated(descr=) - i49 = getfield_raw_i(4336405536, descr=) - i50 = i49 < 0 - guard(i50 is false) - jump(p0, p1, p4, p6, p7, i47, i44, p15, p24, i46, i29, descr=TargetToken(4364727712)) + guard(i5 == 4) + guard(p3 is null) + p27 = getfield_gc_pure_r(p2, descr=) + p28 = getfield_gc_pure_r(p2, descr=) + guard_class(p17, 4316866008, descr=) + p30 = getfield_gc_r(p17, descr=) + guard_nonnull(p30, descr=) + i31 = getfield_gc_i(p17, descr=) + p32 = getfield_gc_r(p30, descr=) + guard_class(p32, 4317041344, descr=) + p34 = getfield_gc_r(p30, descr=) + i35 = getfield_gc_pure_i(p34, descr=) -Now each ``guard`` needs a bit of data to know how to exit the compiled -assembler back up to the interpreter, and potentially to compile a bridge in the -future. Since over 90% of guards never fail, this is incredibly wasteful - we have a copy -of the resume data for each guard. When two guards are next to each other or the +The above operations gets executed at the entrance, so each time we call ``f()``. They ensure +all the optimizations done below stay valid. Now, as long as nothing +crazy happens, they only ensure that the world around us never changed. However, if someone puts new +methods on class ``A``, any of the above guards might fail, despite the fact that it's a very unlikely +case, pypy needs to track how to recover from this situation. Each of those points needs to keep the full +state of the optimizations performed, so we can safely deoptimize them and reenter the interpreter. +This is vastly wasteful since most of those guards never fail, hence some sharing between guards +has been performed. + +We went a step further - when two guards are next to each other or the operations in between them are pure, we can safely redo the operations or to simply put, resume in the previous guard. That means every now and again we execute a few operations extra, but not storing extra info saves quite a bit of time and a bit of memory. From noreply at buildbot.pypy.org Sun Oct 4 13:52:26 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 4 Oct 2015 13:52:26 +0200 (CEST) Subject: [pypy-commit] pypy default: fix rpython and tests Message-ID: <20151004115226.8ED261C122A@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r79970:6f4296b0d7fa Date: 2015-10-04 13:52 +0200 http://bitbucket.org/pypy/pypy/changeset/6f4296b0d7fa/ Log: fix rpython and tests diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -673,29 +673,11 @@ class ResumeDescr(AbstractFailDescr): _attrs_ = () -class ResumeGuardDescr(ResumeDescr): - _attrs_ = ('rd_numb', 'rd_count', 'rd_consts', 'rd_virtuals', - 'rd_frame_info_list', 'rd_pendingfields', 'status') - - rd_numb = lltype.nullptr(NUMBERING) - rd_count = 0 - rd_consts = None - rd_virtuals = None - rd_frame_info_list = None - rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) +class AbstractResumeGuardDescr(ResumeDescr): + _attrs_ = ('status',) status = r_uint(0) - def copy_all_attributes_from(self, other): - assert isinstance(other, ResumeGuardDescr) - self.rd_count = other.rd_count - self.rd_consts = other.rd_consts - self.rd_frame_info_list = other.rd_frame_info_list - self.rd_pendingfields = other.rd_pendingfields - self.rd_virtuals = other.rd_virtuals - self.rd_numb = other.rd_numb - # we don't copy status - ST_BUSY_FLAG = 0x01 # if set, busy tracing from the guard ST_TYPE_MASK = 0x06 # mask for the type (TY_xxx) ST_SHIFT = 3 # in "status >> ST_SHIFT" is stored: @@ -707,15 +689,6 @@ TY_REF = 0x04 TY_FLOAT = 0x06 - def store_final_boxes(self, guard_op, boxes, metainterp_sd): - guard_op.setfailargs(boxes) - self.rd_count = len(boxes) - # - if metainterp_sd.warmrunnerdesc is not None: # for tests - jitcounter = metainterp_sd.warmrunnerdesc.jitcounter - hash = jitcounter.fetch_next_hash() - self.status = hash & self.ST_SHIFT_MASK - def handle_fail(self, deadframe, metainterp_sd, jitdriver_sd): if self.must_compile(deadframe, metainterp_sd, jitdriver_sd): self.start_compiling() @@ -726,9 +699,14 @@ self.done_compiling() else: from rpython.jit.metainterp.blackhole import resume_in_blackhole - resume_in_blackhole(metainterp_sd, jitdriver_sd, self, deadframe) + if isinstance(self, ResumeGuardCopiedDescr): + resume_in_blackhole(metainterp_sd, jitdriver_sd, self.prev, deadframe) + else: + assert isinstance(self, ResumeGuardDescr) + resume_in_blackhole(metainterp_sd, jitdriver_sd, self, deadframe) assert 0, "unreachable" + def _trace_and_compile_from_bridge(self, deadframe, metainterp_sd, jitdriver_sd): # 'jitdriver_sd' corresponds to the outermost one, i.e. the one @@ -830,9 +808,53 @@ assert 0, box.type self.status = ty | (r_uint(index) << self.ST_SHIFT) + def store_hash(self, metainterp_sd): + if metainterp_sd.warmrunnerdesc is not None: # for tests + jitcounter = metainterp_sd.warmrunnerdesc.jitcounter + hash = jitcounter.fetch_next_hash() + self.status = hash & self.ST_SHIFT_MASK + +class ResumeGuardCopiedDescr(AbstractResumeGuardDescr): + _attrs_ = ('status', 'prev') + + def copy_all_attributes_from(self, other): + assert isinstance(other, ResumeGuardCopiedDescr) + self.prev = other.prev + +class ResumeGuardDescr(AbstractResumeGuardDescr): + _attrs_ = ('rd_numb', 'rd_count', 'rd_consts', 'rd_virtuals', + 'rd_frame_info_list', 'rd_pendingfields', 'status') + + rd_numb = lltype.nullptr(NUMBERING) + rd_count = 0 + rd_consts = None + rd_virtuals = None + rd_frame_info_list = None + rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) + + def copy_all_attributes_from(self, other): + if isinstance(other, ResumeGuardCopiedDescr): + other = other.prev + assert isinstance(other, ResumeGuardDescr) + self.rd_count = other.rd_count + self.rd_consts = other.rd_consts + self.rd_frame_info_list = other.rd_frame_info_list + self.rd_pendingfields = other.rd_pendingfields + self.rd_virtuals = other.rd_virtuals + self.rd_numb = other.rd_numb + # we don't copy status + + def store_final_boxes(self, guard_op, boxes, metainterp_sd): + guard_op.setfailargs(boxes) + self.rd_count = len(boxes) + self.store_hash(metainterp_sd) + class ResumeGuardExcDescr(ResumeGuardDescr): pass +class ResumeGuardCopiedExcDescr(ResumeGuardCopiedDescr): + pass + class ResumeAtPositionDescr(ResumeGuardDescr): pass @@ -853,6 +875,25 @@ ptr = cpu.ts.cast_to_baseclass(gcref) return cast_base_ptr_to_instance(AllVirtuals, ptr) +def invent_fail_descr_for_op(opnum, optimizer, copied_guard=False): + if opnum == rop.GUARD_NOT_FORCED or opnum == rop.GUARD_NOT_FORCED_2: + assert not copied_guard + resumedescr = ResumeGuardForcedDescr() + resumedescr._init(optimizer.metainterp_sd, optimizer.jitdriver_sd) + elif opnum in (rop.GUARD_IS_OBJECT, rop.GUARD_SUBCLASS, rop.GUARD_GC_TYPE): + # note - this only happens in tests + resumedescr = ResumeAtPositionDescr() + elif opnum in (rop.GUARD_EXCEPTION, rop.GUARD_NO_EXCEPTION): + if copied_guard: + resumedescr = ResumeGuardCopiedExcDescr() + else: + resumedescr = ResumeGuardExcDescr() + else: + if copied_guard: + resumedescr = ResumeGuardCopiedDescr() + else: + resumedescr = ResumeGuardDescr() + return resumedescr class ResumeGuardForcedDescr(ResumeGuardDescr): def _init(self, metainterp_sd, jitdriver_sd): @@ -889,7 +930,7 @@ rstack._stack_criticalcode_start() try: deadframe = cpu.force(token) - # this should set descr to ResumeGuardForceDescr, if it + # this should set descr to ResumeGuardForcedDescr, if it # was not that already faildescr = cpu.get_latest_descr(deadframe) assert isinstance(faildescr, ResumeGuardForcedDescr) @@ -913,19 +954,6 @@ hidden_all_virtuals = obj.hide(metainterp_sd.cpu) metainterp_sd.cpu.set_savedata_ref(deadframe, hidden_all_virtuals) -def invent_fail_descr_for_op(opnum, optimizer): - if opnum == rop.GUARD_NOT_FORCED or opnum == rop.GUARD_NOT_FORCED_2: - resumedescr = ResumeGuardForcedDescr() - resumedescr._init(optimizer.metainterp_sd, optimizer.jitdriver_sd) - elif opnum in (rop.GUARD_IS_OBJECT, rop.GUARD_SUBCLASS, rop.GUARD_GC_TYPE): - # note - this only happens in tests - resumedescr = ResumeAtPositionDescr() - elif opnum in (rop.GUARD_EXCEPTION, rop.GUARD_NO_EXCEPTION): - resumedescr = ResumeGuardExcDescr() - else: - resumedescr = ResumeGuardDescr() - return resumedescr - class ResumeFromInterpDescr(ResumeDescr): def __init__(self, original_greenkey): self.original_greenkey = original_greenkey diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -636,11 +636,14 @@ def _copy_resume_data_from(self, guard_op, last_guard_op): if guard_op.getopnum() in (rop.GUARD_NO_EXCEPTION, rop.GUARD_EXCEPTION): assert last_guard_op.getopnum() == rop.GUARD_NOT_FORCED - descr = compile.invent_fail_descr_for_op(guard_op.getopnum(), self) - descr.copy_all_attributes_from(last_guard_op.getdescr()) + descr = compile.invent_fail_descr_for_op(guard_op.getopnum(), self, True) + assert isinstance(descr, compile.ResumeGuardCopiedDescr) + last_descr = last_guard_op.getdescr() + assert isinstance(descr, compile.ResumeGuardDescr) + descr.prev = last_descr guard_op.setdescr(descr) - descr.store_final_boxes(guard_op, last_guard_op.getfailargs(), - self.metainterp_sd) + guard_op.setfailargs(last_guard_op.getfailargs()) + descr.store_hash(self.metainterp_sd) assert isinstance(guard_op, GuardResOp) if guard_op.getopnum() == rop.GUARD_VALUE: guard_op = self._maybe_replace_guard_value(guard_op, descr) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2302,9 +2302,13 @@ self.run_blackhole_interp_to_cancel_tracing(stb) assert False, "should always raise" - def handle_guard_failure(self, key, deadframe): + def handle_guard_failure(self, resumedescr, deadframe): debug_start('jit-tracing') self.staticdata.profiler.start_tracing() + if isinstance(resumedescr, compile.ResumeGuardCopiedDescr): + key = resumedescr.prev + else: + key = resumedescr assert isinstance(key, compile.ResumeGuardDescr) # store the resumekey.wref_original_loop_token() on 'self' to make # sure that it stays alive as long as this MetaInterp @@ -2314,15 +2318,15 @@ self.staticdata.try_to_free_some_loops() self.initialize_state_from_guard_failure(key, deadframe) try: - return self._handle_guard_failure(key, deadframe) + return self._handle_guard_failure(resumedescr, key, deadframe) finally: self.resumekey_original_loop_token = None self.staticdata.profiler.end_tracing() debug_stop('jit-tracing') - def _handle_guard_failure(self, key, deadframe): + def _handle_guard_failure(self, resumedescr, key, deadframe): self.current_merge_points = [] - self.resumekey = key + self.resumekey = resumedescr self.seen_loop_header_for_jdindex = -1 if isinstance(key, compile.ResumeAtPositionDescr): self.seen_loop_header_for_jdindex = self.jitdriver_sd.index From noreply at buildbot.pypy.org Sun Oct 4 13:59:56 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 4 Oct 2015 13:59:56 +0200 (CEST) Subject: [pypy-commit] pypy default: oops Message-ID: <20151004115956.9F6851C13D4@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r79971:6d64f32918a7 Date: 2015-10-04 14:00 +0200 http://bitbucket.org/pypy/pypy/changeset/6d64f32918a7/ Log: oops diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -639,7 +639,7 @@ descr = compile.invent_fail_descr_for_op(guard_op.getopnum(), self, True) assert isinstance(descr, compile.ResumeGuardCopiedDescr) last_descr = last_guard_op.getdescr() - assert isinstance(descr, compile.ResumeGuardDescr) + assert isinstance(last_descr, compile.ResumeGuardDescr) descr.prev = last_descr guard_op.setdescr(descr) guard_op.setfailargs(last_guard_op.getfailargs()) From noreply at buildbot.pypy.org Sun Oct 4 15:41:38 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 4 Oct 2015 15:41:38 +0200 (CEST) Subject: [pypy-commit] pypy default: it really should not make a difference but crashes pypy Message-ID: <20151004134138.AEDA01C0369@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r79972:dbfc6a7583c9 Date: 2015-10-04 14:39 +0200 http://bitbucket.org/pypy/pypy/changeset/dbfc6a7583c9/ Log: it really should not make a difference but crashes pypy diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2312,7 +2312,7 @@ assert isinstance(key, compile.ResumeGuardDescr) # store the resumekey.wref_original_loop_token() on 'self' to make # sure that it stays alive as long as this MetaInterp - self.resumekey_original_loop_token = key.rd_loop_token.loop_token_wref() + self.resumekey_original_loop_token = resumedescr.rd_loop_token.loop_token_wref() if self.resumekey_original_loop_token is None: raise compile.giveup() # should be rare self.staticdata.try_to_free_some_loops() From noreply at buildbot.pypy.org Sun Oct 4 15:41:40 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 4 Oct 2015 15:41:40 +0200 (CEST) Subject: [pypy-commit] pypy default: obvious fix Message-ID: <20151004134140.E93231C088E@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r79973:c7f424ad8ccd Date: 2015-10-04 15:35 +0200 http://bitbucket.org/pypy/pypy/changeset/c7f424ad8ccd/ Log: obvious fix diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2476,7 +2476,8 @@ def prepare_resume_from_failure(self, deadframe, resumedescr): exception = self.cpu.grab_exc_value(deadframe) - if isinstance(resumedescr, compile.ResumeGuardExcDescr): + if (isinstance(resumedescr, compile.ResumeGuardExcDescr) or + isinstance(resumedescr, compile.ResumeGuardCopiedExcDescr)): if exception: self.execute_ll_raised(lltype.cast_opaque_ptr(rclass.OBJECTPTR, exception)) From noreply at buildbot.pypy.org Sun Oct 4 15:41:43 2015 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 4 Oct 2015 15:41:43 +0200 (CEST) Subject: [pypy-commit] pypy default: a test and a fix Message-ID: <20151004134143.0F9801C122A@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r79974:accbdbfd3b8e Date: 2015-10-04 15:41 +0200 http://bitbucket.org/pypy/pypy/changeset/accbdbfd3b8e/ Log: a test and a fix diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2331,7 +2331,7 @@ if isinstance(key, compile.ResumeAtPositionDescr): self.seen_loop_header_for_jdindex = self.jitdriver_sd.index try: - self.prepare_resume_from_failure(deadframe, key) + self.prepare_resume_from_failure(deadframe, resumedescr) if self.resumekey_original_loop_token is None: # very rare case raise SwitchToBlackhole(Counters.ABORT_BRIDGE) self.interpret() diff --git a/rpython/jit/metainterp/test/test_exception.py b/rpython/jit/metainterp/test/test_exception.py --- a/rpython/jit/metainterp/test/test_exception.py +++ b/rpython/jit/metainterp/test/test_exception.py @@ -3,6 +3,7 @@ from rpython.rlib.jit import JitDriver, dont_look_inside from rpython.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask from rpython.jit.codewriter.policy import StopAtXPolicy +from rpython.rtyper.lltypesystem import lltype, rffi class ExceptionTests: @@ -60,6 +61,33 @@ res = self.meta_interp(f, [21], policy=StopAtXPolicy(check)) assert res == f(21) + def test_bridge_from_guard_exception_may_force(self): + myjitdriver = JitDriver(greens = [], reds = ['n']) + + c_time = rffi.llexternal("time", [lltype.Signed], lltype.Signed) + + def check(n): + if n % 2: + raise ValueError + if n == 100000: + c_time(0) + + def f(n): + while n > 0: + myjitdriver.can_enter_jit(n=n) + myjitdriver.jit_merge_point(n=n) + try: + check(n) + n -= 1 + except ValueError: + n -= 3 + return n + + res = self.meta_interp(f, [20], policy=StopAtXPolicy(check)) + assert res == f(20) + res = self.meta_interp(f, [21], policy=StopAtXPolicy(check)) + assert res == f(21) + def test_bridge_from_guard_no_exception(self): myjitdriver = JitDriver(greens = [], reds = ['n']) def check(n): From noreply at buildbot.pypy.org Sun Oct 4 15:54:17 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Oct 2015 15:54:17 +0200 (CEST) Subject: [pypy-commit] pypy default: Move the non-optimizable part of callbacks outside the JIT Message-ID: <20151004135417.0215D1C088E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79975:9c043c7d1a7b Date: 2015-10-04 15:54 +0200 http://bitbucket.org/pypy/pypy/changeset/9c043c7d1a7b/ Log: Move the non-optimizable part of callbacks outside the JIT diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -178,7 +178,8 @@ @jit.dont_look_inside -def _handle_applevel_exception(space, callback, e, ll_res, extra_line): +def _handle_applevel_exception(callback, e, ll_res, extra_line): + space = callback.space callback.write_error_return_value(ll_res) if callback.w_onerror is None: callback.print_error(e, extra_line) @@ -199,8 +200,16 @@ extra_line="\nDuring the call to 'onerror', " "another exception occurred:\n\n") + at jit.jit_callback("CFFI") +def py_invoke_callback(callback, ll_res, ll_args): + extra_line = '' + try: + w_res = callback.invoke(ll_args) + extra_line = "Trying to convert the result back to C:\n" + callback.convert_result(ll_res, w_res) + except OperationError, e: + _handle_applevel_exception(callback, e, ll_res, extra_line) - at jit.jit_callback("CFFI") def _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): """ Callback specification. ffi_cif - something ffi specific, don't care @@ -228,13 +237,7 @@ space = callback.space try: must_leave = space.threadlocals.try_enter_thread(space) - extra_line = '' - try: - w_res = callback.invoke(ll_args) - extra_line = "Trying to convert the result back to C:\n" - callback.convert_result(ll_res, w_res) - except OperationError, e: - _handle_applevel_exception(space, callback, e, ll_res, extra_line) + py_invoke_callback(callback, ll_res, ll_args) # except Exception, e: # oups! last-level attempt to recover. From noreply at buildbot.pypy.org Sun Oct 4 16:25:55 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Oct 2015 16:25:55 +0200 (CEST) Subject: [pypy-commit] pypy default: Typos and tweaks of comments Message-ID: <20151004142555.4FCD81C088E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79976:00532145d0f4 Date: 2015-10-04 16:26 +0200 http://bitbucket.org/pypy/pypy/changeset/00532145d0f4/ Log: Typos and tweaks of comments diff --git a/rpython/rlib/jit_libffi.py b/rpython/rlib/jit_libffi.py --- a/rpython/rlib/jit_libffi.py +++ b/rpython/rlib/jit_libffi.py @@ -87,14 +87,10 @@ ## test_fficall::test_guard_not_forced_fails for a more detalied explanation ## of the problem. ## -## The solution is to create a new separate operation libffi_save_result whose -## job is to write the result in the exchange_buffer: during normal execution -## this is a no-op because the buffer is already filled by libffi, but during -## jitting the behavior is to actually write into the buffer. -## +## The solution is to create a new separate operation libffi_call. ## The result is that now the jitcode looks like this: ## -## %i0 = direct_call(libffi_call_int, ...) +## %i0 = direct_call(libffi_call, ...) ## -live- ## raw_store(exchange_result, %i0) ## @@ -237,7 +233,7 @@ def jit_ffi_call_impl_any(cif_description, func_addr, exchange_buffer): """ - This is the function which actually calls libffi. All the rest if just + This is the function which actually calls libffi. All the rest is just infrastructure to convince the JIT to pass a typed result box to jit_ffi_save_result """ From noreply at buildbot.pypy.org Sun Oct 4 16:38:52 2015 From: noreply at buildbot.pypy.org (jerith) Date: Sun, 4 Oct 2015 16:38:52 +0200 (CEST) Subject: [pypy-commit] pypy unrecursive-opt: Move rewrite callbacks to postprocess pass, one test fails. Message-ID: <20151004143852.764201C088E@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: unrecursive-opt Changeset: r79977:c1e114708b34 Date: 2015-10-04 16:39 +0200 http://bitbucket.org/pypy/pypy/changeset/c1e114708b34/ Log: Move rewrite callbacks to postprocess pass, one test fails. diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -38,16 +38,6 @@ return n -class IntBoundsCallbackArgs(object): - def __init__(self, op, callback_func=None): - self.op = op - self.callback_func = callback_func - - def callback(self): - if self.callback_func is not None: - self.callback_func(self.op) - - class OptIntBounds(Optimization): """Keeps track of the bounds placed on integers by guards and remove redundant guards""" @@ -58,8 +48,8 @@ def propagate_forward(self, op): return dispatch_opt(self, op) - def propagate_postprocess(self, op): - return dispatch_postprocess(self, op) + def propagate_postprocess(self, op, oldop): + return dispatch_postprocess(self, op, oldop) def propagate_bounds_backward(self, box): # FIXME: This takes care of the instruction where box is the reuslt @@ -75,7 +65,7 @@ def _optimize_guard_true_false_value(self, op): return op - def _postprocess_guard_true_false_value(self, op): + def _postprocess_guard_true_false_value(self, op, oldop): if op.getarg(0).type == 'i': self.propagate_bounds_backward(op.getarg(0)) @@ -98,7 +88,7 @@ return None return op - def postprocess_INT_OR_or_XOR(self, op): + def postprocess_INT_OR_or_XOR(self, op, oldop): v1 = self.get_box_replacement(op.getarg(0)) b1 = self.getintbound(v1) v2 = self.get_box_replacement(op.getarg(1)) @@ -118,7 +108,7 @@ def optimize_INT_AND(self, op): return op - def postprocess_INT_AND(self, op): + def postprocess_INT_AND(self, op, oldop): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) @@ -137,7 +127,7 @@ def optimize_INT_SUB(self, op): return op - def postprocess_INT_SUB(self, op): + def postprocess_INT_SUB(self, op, oldop): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) b = b1.sub_bound(b2) @@ -184,7 +174,7 @@ return op - def postprocess_INT_ADD(self, op): + def postprocess_INT_ADD(self, op, oldop): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) @@ -195,7 +185,7 @@ def optimize_INT_MUL(self, op): return op - def postprocess_INT_MUL(self, op): + def postprocess_INT_MUL(self, op, oldop): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) @@ -206,7 +196,7 @@ def optimize_INT_FLOORDIV(self, op): return op - def postprocess_INT_FLOORDIV(self, op): + def postprocess_INT_FLOORDIV(self, op, oldop): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) @@ -227,7 +217,7 @@ args=[arg1, arg2]) return op - def postprocess_INT_MOD(self, op): + def postprocess_INT_MOD(self, op, oldop): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) known_nonneg = (b1.known_ge(IntBound(0, 0)) and @@ -248,7 +238,7 @@ def optimize_INT_LSHIFT(self, op): return op - def postprocess_INT_LSHIFT(self, op): + def postprocess_INT_LSHIFT(self, op, oldop): arg0 = self.get_box_replacement(op.getarg(0)) b1 = self.getintbound(arg0) arg1 = self.get_box_replacement(op.getarg(1)) @@ -274,7 +264,7 @@ return None return op - def postprocess_INT_RSHIFT(self, op): + def postprocess_INT_RSHIFT(self, op, oldop): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) b = b1.rshift_bound(b2) @@ -331,7 +321,7 @@ op = self.replace_op_with(op, rop.INT_ADD) return op - def postprocess_INT_ADD_OVF(self, op): + def postprocess_INT_ADD_OVF(self, op, oldop): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) resbound = b1.add_bound(b2) @@ -351,7 +341,7 @@ op = self.replace_op_with(op, rop.INT_SUB) return op - def postprocess_INT_SUB_OVF(self, op): + def postprocess_INT_SUB_OVF(self, op, oldop): arg0 = self.get_box_replacement(op.getarg(0)) arg1 = self.get_box_replacement(op.getarg(1)) b0 = self.getintbound(arg0) @@ -368,7 +358,7 @@ op = self.replace_op_with(op, rop.INT_MUL) return op - def postprocess_INT_MUL_OVF(self, op): + def postprocess_INT_MUL_OVF(self, op, oldop): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) resbound = b1.mul_bound(b2) @@ -469,7 +459,7 @@ else: return op - def postprocess_INT_SIGNEXT(self, op): + def postprocess_INT_SIGNEXT(self, op, oldop): numbits = op.getarg(1).getint() * 8 start = -(1 << (numbits - 1)) stop = 1 << (numbits - 1) @@ -480,14 +470,14 @@ def optimize_ARRAYLEN_GC(self, op): return op - def postprocess_ARRAYLEN_GC(self, op): + def postprocess_ARRAYLEN_GC(self, op, oldop): array = self.ensure_ptr_info_arg0(op) self.optimizer.setintbound(op, array.getlenbound(None)) def optimize_STRLEN(self, op): return op - def postprocess_STRLEN(self, op): + def postprocess_STRLEN(self, op, oldop): self.make_nonnull_str(op.getarg(0), vstring.mode_string) array = self.getptrinfo(op.getarg(0)) self.optimizer.setintbound(op, array.getlenbound(vstring.mode_string)) @@ -495,7 +485,7 @@ def optimize_UNICODELEN(self, op): return op - def postprocess_UNICODELEN(self, op): + def postprocess_UNICODELEN(self, op, oldop): self.make_nonnull_str(op.getarg(0), vstring.mode_unicode) array = self.getptrinfo(op.getarg(0)) self.optimizer.setintbound(op, array.getlenbound(vstring.mode_unicode)) @@ -503,7 +493,7 @@ def optimize_STRGETITEM(self, op): return op - def postprocess_STRGETITEM(self, op): + def postprocess_STRGETITEM(self, op, oldop): v1 = self.getintbound(op) v2 = self.getptrinfo(op.getarg(0)) intbound = self.getintbound(op.getarg(1)) @@ -517,7 +507,7 @@ def optimize_GETFIELD_RAW_I(self, op): return op - def postprocess_GETFIELD_RAW_I(self, op): + def postprocess_GETFIELD_RAW_I(self, op, oldop): descr = op.getdescr() if descr.is_integer_bounded(): b1 = self.getintbound(op) @@ -547,7 +537,7 @@ def optimize_GETARRAYITEM_RAW_I(self, op): return op - def postprocess_GETARRAYITEM_RAW_I(self, op): + def postprocess_GETARRAYITEM_RAW_I(self, op, oldop): descr = op.getdescr() if descr and descr.is_item_integer_bounded(): intbound = self.getintbound(op) @@ -567,7 +557,7 @@ def optimize_UNICODEGETITEM(self, op): return op - def postprocess_UNICODEGETITEM(self, op): + def postprocess_UNICODEGETITEM(self, op, oldop): b1 = self.getintbound(op) b1.make_ge(IntLowerBound(0)) v2 = self.getptrinfo(op.getarg(0)) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -272,7 +272,7 @@ def set_optimizations(self, optimizations): if optimizations: - self.first_optimization = optimizations[1] + self.first_optimization = optimizations[2] for i in range(1, len(optimizations)): optimizations[i - 1].next_optimization = optimizations[i] optimizations[-1].next_optimization = self @@ -538,12 +538,15 @@ op.set_forwarded(None) def send_extra_operation(self, op): - newop = self.optimizations[0].propagate_forward(op) - if newop is None: - return - self.optimizations[0].last_emitted_operation = newop - self.first_optimization.propagate_forward(newop) - self.optimizations[0].propagate_postprocess(newop) + oldop = op + for optimization in self.optimizations[:2]: + op = optimization.propagate_forward(op) + if op is None: + return + optimization.last_emitted_operation = op + self.first_optimization.propagate_forward(op) + for optimization in reversed(self.optimizations[:2]): + optimization.propagate_postprocess(op, oldop) def propagate_forward(self, op): dispatch_opt(self, op) diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -24,6 +24,9 @@ self.loop_invariant_results = {} self.loop_invariant_producer = {} + def opt_default(self, op): + return op + def setup(self): self.optimizer.optrewrite = self @@ -36,7 +39,10 @@ if self.find_rewritable_bool(op): return - dispatch_opt(self, op) + return dispatch_opt(self, op) + + def propagate_postprocess(self, op, oldop): + return dispatch_postprocess(self, op, oldop) def try_boolinvers(self, op, targs): oldop = self.get_pure_result(targs) @@ -97,7 +103,7 @@ self.make_equal_to(op, op.getarg(1)) return - self.emit_operation(op) + return op def optimize_INT_OR(self, op): b1 = self.getintbound(op.getarg(0)) @@ -107,7 +113,7 @@ elif b2.equal(0): self.make_equal_to(op, op.getarg(0)) else: - self.emit_operation(op) + return op def optimize_INT_SUB(self, op): arg1 = self.get_box_replacement(op.getarg(0)) @@ -118,20 +124,18 @@ self.make_equal_to(op, arg1) elif b1.equal(0): op = self.replace_op_with(op, rop.INT_NEG, args=[arg2]) - self.emit_operation(op) + return op elif arg1.same_box(arg2): self.make_constant_int(op, 0) else: - self.emit_operation(op) - self.optimize_INT_SUB_callback(op) + return op - def optimize_INT_SUB_callback(self, op): + def postprocess_INT_SUB(self, op, oldop): self.optimizer.pure_reverse(op) def optimize_INT_ADD(self, op): if self.is_raw_ptr(op.getarg(0)) or self.is_raw_ptr(op.getarg(1)): - self.emit_operation(op) - return + return op arg1 = self.get_box_replacement(op.getarg(0)) b1 = self.getintbound(arg1) arg2 = self.get_box_replacement(op.getarg(1)) @@ -143,10 +147,9 @@ elif b2.equal(0): self.make_equal_to(op, arg1) else: - self.emit_operation(op) - self.optimize_INT_ADD_callback(op) + return op - def optimize_INT_ADD_callback(self, op): + def postprocess_INT_ADD(self, op, oldop): self.optimizer.pure_reverse(op) def optimize_INT_MUL(self, op): @@ -172,7 +175,7 @@ new_rhs = ConstInt(highest_bit(lh_info.getint())) op = self.replace_op_with(op, rop.INT_LSHIFT, args=[rhs, new_rhs]) break - self.emit_operation(op) + return op def optimize_UINT_FLOORDIV(self, op): b2 = self.getintbound(op.getarg(1)) @@ -180,7 +183,7 @@ if b2.is_constant() and b2.getint() == 1: self.make_equal_to(op, op.getarg(0)) else: - self.emit_operation(op) + return op def optimize_INT_LSHIFT(self, op): b1 = self.getintbound(op.getarg(0)) @@ -191,7 +194,7 @@ elif b1.is_constant() and b1.getint() == 0: self.make_constant_int(op, 0) else: - self.emit_operation(op) + return op def optimize_INT_RSHIFT(self, op): b1 = self.getintbound(op.getarg(0)) @@ -202,7 +205,7 @@ elif b1.is_constant() and b1.getint() == 0: self.make_constant_int(op, 0) else: - self.emit_operation(op) + return op def optimize_INT_XOR(self, op): b1 = self.getintbound(op.getarg(0)) @@ -213,7 +216,7 @@ elif b2.equal(0): self.make_equal_to(op, op.getarg(0)) else: - self.emit_operation(op) + return op def optimize_FLOAT_MUL(self, op): arg1 = op.getarg(0) @@ -231,12 +234,10 @@ return elif v1.getfloat() == -1.0: newop = self.replace_op_with(op, rop.FLOAT_NEG, args=[rhs]) - self.emit_operation(newop) - return - self.emit_operation(op) - self.optimize_FLOAT_MUL_callback(op) + return newop + return op - def optimize_FLOAT_MUL_callback(self, op): + def postprocess_FLOAT_MUL(self, op, oldop): self.optimizer.pure_reverse(op) def optimize_FLOAT_TRUEDIV(self, op): @@ -258,16 +259,15 @@ c = ConstFloat(longlong.getfloatstorage(reciprocal)) newop = self.replace_op_with(op, rop.FLOAT_MUL, args=[arg1, c]) - self.emit_operation(newop) + return newop def optimize_FLOAT_NEG(self, op): - self.emit_operation(op) - self.optimize_FLOAT_NEG_callback(op) + return op - def optimize_FLOAT_NEG_callback(self, op): + def postprocess_FLOAT_NEG(self, op, oldop): self.optimizer.pure_reverse(op) - def optimize_guard(self, op, constbox, emit_operation=True): + def optimize_guard(self, op, constbox): box = op.getarg(0) if box.type == 'i': intbound = self.getintbound(box) @@ -288,14 +288,7 @@ 'was proven to always fail' % r) return - if emit_operation: - self.emit_operation(op) - self.optimize_guard_callback(op, box, constbox) - else: - self.optimize_guard_callback(op, box, constbox) - - def optimize_guard_callback(self, op, box, constbox): - self.make_constant(box, constbox) + return op def optimize_GUARD_ISNULL(self, op): info = self.getptrinfo(op.getarg(0)) @@ -306,10 +299,9 @@ r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) raise InvalidLoop('A GUARD_ISNULL (%s) was proven to always ' 'fail' % r) - self.emit_operation(op) - self.optimize_GUARD_ISNULL_callback(op) + return op - def optimize_GUARD_ISNULL_callback(self, op): + def postprocess_GUARD_ISNULL(self, op, oldop): self.make_constant(op.getarg(0), self.optimizer.cpu.ts.CONST_NULL) def optimize_GUARD_IS_OBJECT(self, op): @@ -326,7 +318,7 @@ return if info.is_precise(): raise InvalidLoop() - self.emit_operation(op) + return op def optimize_GUARD_GC_TYPE(self, op): info = self.getptrinfo(op.getarg(0)) @@ -340,7 +332,7 @@ if info.get_descr().get_type_id() != op.getarg(1).getint(): raise InvalidLoop("wrong GC types passed around!") return - self.emit_operation(op) + return op def _check_subclass(self, vtable1, vtable2): # checks that vtable1 is a subclass of vtable2 @@ -374,7 +366,7 @@ if self._check_subclass(info.get_descr().get_vtable(), op.getarg(1).getint()): return - self.emit_operation(op) + return op def optimize_GUARD_NONNULL(self, op): opinfo = self.getptrinfo(op.getarg(0)) @@ -385,10 +377,9 @@ r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) raise InvalidLoop('A GUARD_NONNULL (%s) was proven to always ' 'fail' % r) - self.emit_operation(op) - self.optimize_GUARD_NONNULL_callback(op) + return op - def optimize_GUARD_NONNULL_callback(self, op): + def postprocess_GUARD_NONNULL(self, op, oldop): self.make_nonnull(op.getarg(0)) self.getptrinfo(op.getarg(0)).mark_last_guard(self.optimizer) @@ -409,7 +400,11 @@ return constbox = op.getarg(1) assert isinstance(constbox, Const) - self.optimize_guard(op, constbox) + return self.optimize_guard(op, constbox) + + def postprocess_GUARD_VALUE(self, op, oldop): + box = self.get_box_replacement(op.getarg(0)) + self.make_constant(box, op.getarg(1)) def replace_guard_class_with_guard_value(self, op, info, old_guard_op): if old_guard_op.opnum != rop.GUARD_NONNULL: @@ -437,10 +432,18 @@ return op def optimize_GUARD_TRUE(self, op): - self.optimize_guard(op, CONST_1) + return self.optimize_guard(op, CONST_1) + + def postprocess_GUARD_TRUE(self, op, oldop): + box = self.get_box_replacement(op.getarg(0)) + self.make_constant(box, CONST_1) def optimize_GUARD_FALSE(self, op): - self.optimize_guard(op, CONST_0) + return self.optimize_guard(op, CONST_0) + + def postprocess_GUARD_FALSE(self, op, oldop): + box = self.get_box_replacement(op.getarg(0)) + self.make_constant(box, CONST_0) def optimize_RECORD_EXACT_CLASS(self, op): opinfo = self.getptrinfo(op.getarg(0)) @@ -483,17 +486,16 @@ # not put in short preambles guard_nonnull and guard_class # on the same box. self.optimizer.replace_guard(op, info) - self.emit_operation(op) - self.optimize_GUARD_CLASS_callback_1(op, expectedclassbox) - return - self.emit_operation(op) - self.optimize_GUARD_CLASS_callback_2(op, expectedclassbox) + return op + return op - def optimize_GUARD_CLASS_callback_1(self, op, expectedclassbox): - self.make_constant_class(op.getarg(0), expectedclassbox, False) - - def optimize_GUARD_CLASS_callback_2(self, op, expectedclassbox): - self.make_constant_class(op.getarg(0), expectedclassbox) + def postprocess_GUARD_CLASS(self, op, oldop): + expectedclassbox = op.getarg(1) + info = self.getptrinfo(op.getarg(0)) + old_guard_op = info.get_last_guard(self.optimizer) + update_last_guard = not old_guard_op or isinstance( + old_guard_op.getdescr(), compile.ResumeAtPositionDescr) + self.make_constant_class(op.getarg(0), expectedclassbox, update_last_guard) def optimize_GUARD_NONNULL_CLASS(self, op): info = self.getptrinfo(op.getarg(0)) @@ -501,7 +503,9 @@ r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) raise InvalidLoop('A GUARD_NONNULL_CLASS (%s) was proven to ' 'always fail' % r) - self.optimize_GUARD_CLASS(op) + return self.optimize_GUARD_CLASS(op) + + postprocess_GUARD_NONNULL_CLASS = postprocess_GUARD_CLASS def optimize_CALL_LOOPINVARIANT_I(self, op): arg = op.getarg(0) @@ -521,16 +525,21 @@ # there is no reason to have a separate operation for this newop = self.replace_op_with(op, OpHelpers.call_for_descr(op.getdescr())) - self.emit_operation(newop) - self.optimize_CALL_LOOPINVARIANT_I_callback(newop, op, key) + return op - def optimize_CALL_LOOPINVARIANT_I_callback(self, newop, op, key): + def postprocess_CALL_LOOPINVARIANT_I(self, op, oldop): + key = make_hashable_int(op.getarg(0).getint()) self.loop_invariant_producer[key] = self.optimizer.getlastop() - self.loop_invariant_results[key] = op + self.loop_invariant_results[key] = oldop + optimize_CALL_LOOPINVARIANT_R = optimize_CALL_LOOPINVARIANT_I optimize_CALL_LOOPINVARIANT_F = optimize_CALL_LOOPINVARIANT_I optimize_CALL_LOOPINVARIANT_N = optimize_CALL_LOOPINVARIANT_I + postprocess_CALL_LOOPINVARIANT_R = postprocess_CALL_LOOPINVARIANT_I + postprocess_CALL_LOOPINVARIANT_F = postprocess_CALL_LOOPINVARIANT_I + postprocess_CALL_LOOPINVARIANT_N = postprocess_CALL_LOOPINVARIANT_I + def optimize_COND_CALL(self, op): arg = op.getarg(0) b = self.getintbound(arg) @@ -540,7 +549,7 @@ return opnum = OpHelpers.call_for_type(op.type) op = op.copy_and_change(opnum, args=op.getarglist()[1:]) - self.emit_operation(op) + return op def _optimize_nullness(self, op, box, expect_nonnull): info = self.getnullness(box) @@ -549,17 +558,17 @@ elif info == INFO_NULL: self.make_constant_int(op, not expect_nonnull) else: - self.emit_operation(op) + return op def optimize_INT_IS_TRUE(self, op): if (not self.is_raw_ptr(op.getarg(0)) and self.getintbound(op.getarg(0)).is_bool()): self.make_equal_to(op, op.getarg(0)) return - self._optimize_nullness(op, op.getarg(0), True) + return self._optimize_nullness(op, op.getarg(0), True) def optimize_INT_IS_ZERO(self, op): - self._optimize_nullness(op, op.getarg(0), False) + return self._optimize_nullness(op, op.getarg(0), False) def _optimize_oois_ooisnot(self, op, expect_isnot, instance): arg0 = self.get_box_replacement(op.getarg(0)) @@ -575,9 +584,9 @@ elif info1 and info1.is_virtual(): self.make_constant_int(op, expect_isnot) elif info1 and info1.is_null(): - self._optimize_nullness(op, op.getarg(0), expect_isnot) + return self._optimize_nullness(op, op.getarg(0), expect_isnot) elif info0 and info0.is_null(): - self._optimize_nullness(op, op.getarg(1), expect_isnot) + return self._optimize_nullness(op, op.getarg(1), expect_isnot) elif arg0 is arg1: self.make_constant_int(op, not expect_isnot) else: @@ -596,19 +605,19 @@ # class is different self.make_constant_int(op, expect_isnot) return - self.emit_operation(op) + return op def optimize_PTR_EQ(self, op): - self._optimize_oois_ooisnot(op, False, False) + return self._optimize_oois_ooisnot(op, False, False) def optimize_PTR_NE(self, op): - self._optimize_oois_ooisnot(op, True, False) + return self._optimize_oois_ooisnot(op, True, False) def optimize_INSTANCE_PTR_EQ(self, op): - self._optimize_oois_ooisnot(op, False, True) + return self._optimize_oois_ooisnot(op, False, True) def optimize_INSTANCE_PTR_NE(self, op): - self._optimize_oois_ooisnot(op, True, True) + return self._optimize_oois_ooisnot(op, True, True) def optimize_CALL_N(self, op): # dispatch based on 'oopspecindex' to a method that handles @@ -617,14 +626,13 @@ effectinfo = op.getdescr().get_extra_info() oopspecindex = effectinfo.oopspecindex if oopspecindex == EffectInfo.OS_ARRAYCOPY: - if self._optimize_CALL_ARRAYCOPY(op): - return - self.emit_operation(op) + return self._optimize_CALL_ARRAYCOPY(op) + return op def _optimize_CALL_ARRAYCOPY(self, op): length = self.get_constant_box(op.getarg(5)) if length and length.getint() == 0: - return True # 0-length arraycopy + return None # 0-length arraycopy source_info = self.getptrinfo(op.getarg(1)) dest_info = self.getptrinfo(op.getarg(2)) @@ -640,7 +648,7 @@ dest_start = dest_start_box.getint() arraydescr = extrainfo.write_descrs_arrays[0] if arraydescr.is_array_of_structs(): - return False # not supported right now + return op # not supported right now # XXX fish fish fish for index in range(length.getint()): @@ -666,9 +674,9 @@ ConstInt(index + dest_start), val], descr=arraydescr) - self.emit_operation(newop) - return True - return False + self.optimizer.send_extra_operation(newop) + return None + return op def optimize_CALL_PURE_I(self, op): # this removes a CALL_PURE with all constant arguments. @@ -678,7 +686,8 @@ self.make_constant(op, result) self.last_emitted_operation = REMOVED return - self.emit_operation(op) + return op + optimize_CALL_PURE_R = optimize_CALL_PURE_I optimize_CALL_PURE_F = optimize_CALL_PURE_I optimize_CALL_PURE_N = optimize_CALL_PURE_I @@ -688,7 +697,7 @@ # it was a CALL_PURE or a CALL_LOOPINVARIANT that was killed; # so we also kill the following GUARD_NO_EXCEPTION return - self.emit_operation(op) + return op def optimize_GUARD_FUTURE_CONDITION(self, op): self.optimizer.notice_guard_future_condition(op) @@ -710,15 +719,15 @@ if val & (val - 1) == 0 and val > 0: # val == 2**shift op = self.replace_op_with(op, rop.INT_RSHIFT, args = [op.getarg(0), ConstInt(highest_bit(val))]) - self.emit_operation(op) + return op def optimize_CAST_PTR_TO_INT(self, op): self.optimizer.pure_reverse(op) - self.emit_operation(op) + return op def optimize_CAST_INT_TO_PTR(self, op): self.optimizer.pure_reverse(op) - self.emit_operation(op) + return op def optimize_SAME_AS_I(self, op): self.make_equal_to(op, op.getarg(0)) @@ -726,5 +735,6 @@ optimize_SAME_AS_F = optimize_SAME_AS_I dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', - default=OptRewrite.emit_operation) + default=OptRewrite.opt_default) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD') +dispatch_postprocess = make_dispatcher_method(OptRewrite, 'postprocess_') From noreply at buildbot.pypy.org Sun Oct 4 17:31:11 2015 From: noreply at buildbot.pypy.org (jerith) Date: Sun, 4 Oct 2015 17:31:11 +0200 (CEST) Subject: [pypy-commit] pypy unrecursive-opt: Move virtualize callbacks to postprocess pass. Message-ID: <20151004153111.A3A241C13D4@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: unrecursive-opt Changeset: r79978:bffdacba8eaa Date: 2015-10-04 17:22 +0200 http://bitbucket.org/pypy/pypy/changeset/bffdacba8eaa/ Log: Move virtualize callbacks to postprocess pass. diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -272,7 +272,7 @@ def set_optimizations(self, optimizations): if optimizations: - self.first_optimization = optimizations[2] + self.first_optimization = optimizations[3] for i in range(1, len(optimizations)): optimizations[i - 1].next_optimization = optimizations[i] optimizations[-1].next_optimization = self @@ -539,13 +539,13 @@ def send_extra_operation(self, op): oldop = op - for optimization in self.optimizations[:2]: + for optimization in self.optimizations[:3]: op = optimization.propagate_forward(op) if op is None: return optimization.last_emitted_operation = op self.first_optimization.propagate_forward(op) - for optimization in reversed(self.optimizations[:2]): + for optimization in reversed(self.optimizations[:3]): optimization.propagate_postprocess(op, oldop) def propagate_forward(self, op): diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -13,6 +13,10 @@ "Virtualize objects until they escape." _last_guard_not_forced_2 = None + _finish_guard_op = None + + def opt_default(self, op): + return op def make_virtual(self, known_class, source_op, descr): opinfo = info.InstancePtrInfo(descr, known_class, is_virtual=True) @@ -55,29 +59,27 @@ def optimize_GUARD_NO_EXCEPTION(self, op): if self.last_emitted_operation is REMOVED: return - self.emit_operation(op) + return op def optimize_GUARD_NOT_FORCED(self, op): if self.last_emitted_operation is REMOVED: return - self.emit_operation(op) + return op def optimize_GUARD_NOT_FORCED_2(self, op): self._last_guard_not_forced_2 = op def optimize_FINISH(self, op): - if self._last_guard_not_forced_2 is not None: - guard_op = self._last_guard_not_forced_2 - self.emit_operation(op) - self.optimize_FINISH_callback(op, guard_op) - else: - self.emit_operation(op) + self._finish_guard_op = self._last_guard_not_forced_2 + return op - def optimize_FINISH_callback(self, op, guard_op): - guard_op = self.optimizer.store_final_boxes_in_guard(guard_op, []) - i = len(self.optimizer._newoperations) - 1 - assert i >= 0 - self.optimizer._newoperations.insert(i, guard_op) + def postprocess_FINISH(self, op, oldop): + guard_op = self._finish_guard_op + if guard_op is not None: + guard_op = self.optimizer.store_final_boxes_in_guard(guard_op, []) + i = len(self.optimizer._newoperations) - 1 + assert i >= 0 + self.optimizer._newoperations.insert(i, guard_op) def optimize_CALL_MAY_FORCE_I(self, op): effectinfo = op.getdescr().get_extra_info() @@ -85,7 +87,7 @@ if oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUAL: if self._optimize_JIT_FORCE_VIRTUAL(op): return - self.emit_operation(op) + return op optimize_CALL_MAY_FORCE_R = optimize_CALL_MAY_FORCE_I optimize_CALL_MAY_FORCE_F = optimize_CALL_MAY_FORCE_I optimize_CALL_MAY_FORCE_N = optimize_CALL_MAY_FORCE_I @@ -97,13 +99,15 @@ opinfo = self.getptrinfo(op.getarg(2)) if opinfo and opinfo.is_virtual(): return - self.emit_operation(op) + return op def optimize_VIRTUAL_REF(self, op): # get some constants vrefinfo = self.optimizer.metainterp_sd.virtualref_info c_cls = vrefinfo.jit_virtual_ref_const_class vref_descr = vrefinfo.descr + descr_virtual_token = vrefinfo.descr_virtual_token + descr_forced = vrefinfo.descr_forced # # Replace the VIRTUAL_REF operation with a virtual structure of type # 'jit_virtual_ref'. The jit_virtual_ref structure may be forced soon, @@ -113,16 +117,10 @@ op.set_forwarded(newop) newop.set_forwarded(vrefvalue) token = ResOperation(rop.FORCE_TOKEN, []) - self.emit_operation(token) - self.optimize_VIRTUAL_REF_callback(op, vrefvalue, newop, token) - - def optimize_VIRTUAL_REF_callback(self, op, vrefvalue, newop, token): - vrefinfo = self.optimizer.metainterp_sd.virtualref_info - descr_virtual_token = vrefinfo.descr_virtual_token - descr_forced = vrefinfo.descr_forced vrefvalue.setfield(descr_virtual_token, newop, token) vrefvalue.setfield(descr_forced, newop, self.optimizer.cpu.ts.CONST_NULLREF) + return token def optimize_VIRTUAL_REF_FINISH(self, op): # This operation is used in two cases. In normal cases, it @@ -185,7 +183,7 @@ self.make_equal_to(op, fieldop) else: self.make_nonnull(op.getarg(0)) - self.emit_operation(op) + return op optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I @@ -203,7 +201,7 @@ self.get_box_replacement(op.getarg(1))) else: self.make_nonnull(struct) - self.emit_operation(op) + return op def optimize_NEW_WITH_VTABLE(self, op): known_class = ConstInt(op.getdescr().get_vtable()) @@ -217,36 +215,35 @@ if sizebox is not None: self.make_varray(op.getdescr(), sizebox.getint(), op) else: - self.emit_operation(op) + return op def optimize_NEW_ARRAY_CLEAR(self, op): sizebox = self.get_constant_box(op.getarg(0)) if sizebox is not None: self.make_varray(op.getdescr(), sizebox.getint(), op, clear=True) else: - self.emit_operation(op) + return op def optimize_CALL_N(self, op): effectinfo = op.getdescr().get_extra_info() if effectinfo.oopspecindex == EffectInfo.OS_RAW_MALLOC_VARSIZE_CHAR: - self.do_RAW_MALLOC_VARSIZE_CHAR(op) + return self.do_RAW_MALLOC_VARSIZE_CHAR(op) elif effectinfo.oopspecindex == EffectInfo.OS_RAW_FREE: - self.do_RAW_FREE(op) + return self.do_RAW_FREE(op) elif effectinfo.oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUALIZABLE: # we might end up having CALL here instead of COND_CALL info = self.getptrinfo(op.getarg(1)) if info and info.is_virtual(): return else: - self.emit_operation(op) + return op optimize_CALL_R = optimize_CALL_N optimize_CALL_I = optimize_CALL_N def do_RAW_MALLOC_VARSIZE_CHAR(self, op): sizebox = self.get_constant_box(op.getarg(1)) if sizebox is None: - self.emit_operation(op) - return + return op self.make_virtual_raw_memory(sizebox.getint(), op) self.last_emitted_operation = REMOVED @@ -254,7 +251,7 @@ opinfo = self.getrawptrinfo(op.getarg(1)) if opinfo and opinfo.is_virtual(): return - self.emit_operation(op) + return op def optimize_INT_ADD(self, op): opinfo = self.getrawptrinfo(op.getarg(0), create=False) @@ -267,7 +264,7 @@ isinstance(opinfo, info.RawSlicePtrInfo)): self.make_virtual_raw_slice(offset, opinfo, op) return - self.emit_operation(op) + return op def optimize_ARRAYLEN_GC(self, op): opinfo = self.getptrinfo(op.getarg(0)) @@ -275,7 +272,7 @@ self.make_constant_int(op, opinfo.getlength()) else: self.make_nonnull(op.getarg(0)) - self.emit_operation(op) + return op def optimize_GETARRAYITEM_GC_I(self, op): opinfo = self.getptrinfo(op.getarg(0)) @@ -289,7 +286,7 @@ self.make_equal_to(op, item) return self.make_nonnull(op.getarg(0)) - self.emit_operation(op) + return op optimize_GETARRAYITEM_GC_R = optimize_GETARRAYITEM_GC_I optimize_GETARRAYITEM_GC_F = optimize_GETARRAYITEM_GC_I @@ -309,7 +306,7 @@ self.get_box_replacement(op.getarg(2))) return self.make_nonnull(op.getarg(0)) - self.emit_operation(op) + return op def _unpack_arrayitem_raw_op(self, op, indexbox): index = indexbox.getint() @@ -334,7 +331,7 @@ self.make_equal_to(op, itemvalue) return self.make_nonnull(op.getarg(0)) - self.emit_operation(op) + return op optimize_GETARRAYITEM_RAW_F = optimize_GETARRAYITEM_RAW_I def optimize_SETARRAYITEM_RAW(self, op): @@ -350,7 +347,7 @@ except InvalidRawOperation: pass self.make_nonnull(op.getarg(0)) - self.emit_operation(op) + return op def _unpack_raw_load_store_op(self, op, offsetbox): offset = offsetbox.getint() @@ -372,7 +369,7 @@ else: self.make_equal_to(op, itemop) return - self.emit_operation(op) + return op optimize_RAW_LOAD_F = optimize_RAW_LOAD_I def optimize_RAW_STORE(self, op): @@ -386,7 +383,7 @@ return except InvalidRawOperation: pass - self.emit_operation(op) + return op def optimize_GETINTERIORFIELD_GC_I(self, op): opinfo = self.getptrinfo(op.getarg(0)) @@ -402,7 +399,7 @@ self.make_equal_to(op, fld) return self.make_nonnull(op.getarg(0)) - self.emit_operation(op) + return op optimize_GETINTERIORFIELD_GC_R = optimize_GETINTERIORFIELD_GC_I optimize_GETINTERIORFIELD_GC_F = optimize_GETINTERIORFIELD_GC_I @@ -416,10 +413,12 @@ self.get_box_replacement(op.getarg(2))) return self.make_nonnull(op.getarg(0)) - self.emit_operation(op) + return op dispatch_opt = make_dispatcher_method(OptVirtualize, 'optimize_', - default=OptVirtualize.emit_operation) + default=OptVirtualize.opt_default) OptVirtualize.propagate_forward = dispatch_opt +dispatch_postprocess = make_dispatcher_method(OptVirtualize, 'postprocess_') +OptVirtualize.propagate_postprocess = dispatch_postprocess From noreply at buildbot.pypy.org Sun Oct 4 19:03:36 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Oct 2015 19:03:36 +0200 (CEST) Subject: [pypy-commit] cffi default: Call a better function Message-ID: <20151004170336.985F31C088E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2299:21fef94ca0c8 Date: 2015-10-04 17:23 +0200 http://bitbucket.org/cffi/cffi/changeset/21fef94ca0c8/ Log: Call a better function diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -4998,7 +4998,7 @@ PyTuple_SET_ITEM(py_args, i, a); } - py_res = PyEval_CallObject(py_ob, py_args); + py_res = PyObject_Call(py_ob, py_args, NULL); if (py_res == NULL) goto error; if (convert_from_object_fficallback(result, SIGNATURE(1), py_res) < 0) { From noreply at buildbot.pypy.org Sun Oct 4 19:04:05 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Oct 2015 19:04:05 +0200 (CEST) Subject: [pypy-commit] cffi cmacros: A bunch of test about how it should be possible to support this: Message-ID: <20151004170405.9C9831C088E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cmacros Changeset: r2300:33acd0dc850c Date: 2015-10-04 19:04 +0200 http://bitbucket.org/cffi/cffi/changeset/33acd0dc850c/ Log: A bunch of test about how it should be possible to support this: conditional typedefs that used later in the cdef() diff --git a/testing/cffi0/test_parsing.py b/testing/cffi0/test_parsing.py --- a/testing/cffi0/test_parsing.py +++ b/testing/cffi0/test_parsing.py @@ -392,6 +392,254 @@ assert m.BCD == 43 assert m.CDE == 39 +def test_ifdef_partial_unsupported(): + ffi = FFI(backend=FakeBackend()) + ffi.cdef(""" + int foo(int + #ifdef ABC + , long + #endif + ); + """) + should_crash + +def test_conditional_typedef_1(): + ffi = FFI(backend=FakeBackend()) + ffi.cdef(""" + #ifdef ABC + typedef int foo_t; + #endif + """) + case = ffi._parser._declarations['typedef foo_t'] + assert isinstance(case, ConditionalCase) + assert case.condition == 'defined(ABC)' + assert str(case.iftrue) == '' + assert case.iffalse is None + +def test_conditional_typedef_2(): + ffi = FFI(backend=FakeBackend()) + ffi.cdef(""" + #ifdef ABC + typedef int foo_t; + #else + typedef long foo_t; + #endif + """) + case = ffi._parser._declarations['typedef foo_t'] + assert isinstance(case, ConditionalCase) + assert case.condition == 'defined(ABC)' + assert str(case.iftrue) == '' + assert str(case.iffalse) == '' + +def test_conditional_typedef_3(): + ffi = FFI(backend=FakeBackend()) + ffi.cdef(""" + #ifdef ABC + #else + typedef long foo_t; + #endif + """) + case = ffi._parser._declarations['typedef foo_t'] + assert isinstance(case, ConditionalCase) + assert case.condition == 'defined(ABC)' + assert case.iftrue is None + assert str(case.iffalse) == '' + +def test_conditional_typedef_4(): + ffi = FFI(backend=FakeBackend()) + ffi.cdef(""" + #ifndef ABC + typedef long foo_t; + #endif + """) + case = ffi._parser._declarations['typedef foo_t'] + assert isinstance(case, ConditionalCase) + assert case.condition == 'defined(ABC)' + assert case.iftrue is None + assert str(case.iffalse) == '' + +def test_conditional_func(): + ffi = FFI(backend=FakeBackend()) + ffi.cdef(""" + #ifndef ABC + int foo(int); + #endif + """) + case = ffi._parser._declarations['function foo'] + assert isinstance(case, ConditionalCase) + assert case.condition == 'defined(ABC)' + assert case.iftrue is None + assert str(case.iffalse) == '), , False>' + +def test_conditional_typedef_used_by_typedef(): + ffi = FFI(backend=FakeBackend()) + ffi.cdef(""" + #ifdef ABC + typedef int foo_t; + #else + typedef long foo_t; + #endif + typedef foo_t bar_t[2]; + """) + case = ffi._parser._declarations['typedef bar_t'] + assert isinstance(case, ConditionalCase) + assert case.condition == 'defined(ABC)' + assert str(case.iftrue) == ' x 2>' + assert str(case.iffalse) == ' x 2>' + +def test_conditional_typedef_used_by_func_1(): + ffi = FFI(backend=FakeBackend()) + ffi.cdef(""" + #ifdef ABC + typedef int foo_t; + #else + typedef long foo_t; + #endif + char foo(foo_t); + """) + case = ffi._parser._declarations['function foo'] + assert isinstance(case, ConditionalCase) + assert case.condition == 'defined(ABC)' + assert str(case.iftrue) == '), , False>' + assert str(case.iffalse) == '), , False>' + +def test_conditional_typedef_used_by_func_2(): + ffi = FFI(backend=FakeBackend()) + ffi.cdef(""" + #ifdef ABC + typedef int foo_t; + #else + typedef long foo_t; + #endif + foo_t foo(char); + """) + case = ffi._parser._declarations['function foo'] + assert isinstance(case, ConditionalCase) + assert case.condition == 'defined(ABC)' + assert str(case.iftrue) == '), , False>' + assert str(case.iffalse) == '), , False>' + +def test_conditional_typedef_not_used_by_func(): + ffi = FFI(backend=FakeBackend()) + ffi.cdef(""" + #ifdef ABC + typedef int foo_t; + #else + typedef long foo_t; + #endif + char foo(char); + """) + case = ffi._parser._declarations['function foo'] + assert str(case) == '), , False>' + +def test_conditional_nested(): + ffi = FFI(backend=FakeBackend()) + ffi.cdef(""" + #ifdef ABC + # if D > E + typedef int foo_t; + # else + typedef unsigned int foo_t; + # endif + #else + typedef long foo_t; + #endif + foo_t foo(char); + """) + case = ffi._parser._declarations['function foo'] + assert isinstance(case, ConditionalCase) + assert case.condition == 'defined(ABC)' + assert case.iftrue.condition == '(D > E)' + assert str(case.iftrue.iftrue) == '), , False>' + assert str(case.iftrue.iffalse) == '), , False>' + assert str(case.iffalse) == '), , False>' + +def test_conditional_reuse(): + ffi = FFI(backend=FakeBackend()) + ffi.cdef(""" + #ifdef ABC + typedef int foo_t; + #endif + + #ifdef ABC + foo_t foo(char); + #endif + """) + case = ffi._parser._declarations['function foo'] + assert isinstance(case, ConditionalCase) + assert case.condition == 'defined(ABC)' + assert str(case.iftrue) == '), , False>' + assert case.iffalse is None + +def test_conditional_reuse_nesting(): + ffi = FFI(backend=FakeBackend()) + ffi.cdef(""" + #ifdef ABC + # if D > E + typedef int foo_t; + # else + typedef unsigned int foo_t; + # endif + #endif + + #ifdef ABC + foo_t foo(char); + #endif + """) + case = ffi._parser._declarations['function foo'] + assert isinstance(case, ConditionalCase) + assert case.condition == 'defined(ABC)' + assert case.iftrue.condition == '(D > E)' + assert str(case.iftrue.iftrue) == '), , False>' + assert str(case.iftrue.iffalse) == '), , False>' + assert case.iffalse is None + +def test_conditional_different_condition(): + ffi = FFI(backend=FakeBackend()) + ffi.cdef(""" + #ifdef ABC + typedef int foo_t; + #else + typedef long foo_t; + #endif + + #if D > E + foo_t foo(char); + #endif + """) + case = ffi._parser._declarations['function foo'] + assert isinstance(case, ConditionalCase) + assert case.condition == '(D > E)' + assert case.iftrue.condition == 'defined(ABC)' + assert str(case.iftrue.iftrue) == '), , False>' + assert str(case.iftrue.iffalse) == '), , False>' + assert case.iffalse is None + +def test_conditional_reuse_reversed_nesting(): + ffi = FFI(backend=FakeBackend()) + ffi.cdef(""" + #ifdef ABC + # if D > E + typedef int foo_t; + # else + typedef unsigned int foo_t; + # endif + #else + typedef long foo_t; + #endif + + #if D > E + foo_t foo(char); + #endif + """) + case = ffi._parser._declarations['function foo'] + assert isinstance(case, ConditionalCase) + assert case.condition == '(D > E)' + assert case.iftrue.condition == 'defined(ABC)' + assert str(case.iftrue.iftrue) == '), , False>' + assert str(case.iftrue.iffalse) == '), , False>' + assert case.iffalse is None + def test_define_not_supported_for_now(): ffi = FFI(backend=FakeBackend()) e = py.test.raises(CDefError, ffi.cdef, '#define FOO "blah"') From noreply at buildbot.pypy.org Sun Oct 4 19:18:36 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 4 Oct 2015 19:18:36 +0200 (CEST) Subject: [pypy-commit] pypy issue-2148: Add incomplete pypyjit test for issue #2148 Message-ID: <20151004171836.75E031C12D8@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: issue-2148 Changeset: r79979:aa3aabb40221 Date: 2015-10-04 02:53 +0100 http://bitbucket.org/pypy/pypy/changeset/aa3aabb40221/ Log: Add incomplete pypyjit test for issue #2148 diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -248,3 +248,19 @@ guard_false(i157, descr=...) jump(..., descr=...) """) + + def test_mixed_div(self): + N = 1500 + def main(): + N = 1500 + import _numpypy.multiarray as np + arr = np.zeros(N) + l = [arr[i]/2. for i in range(N)] + return l + log = self.run(main, []) + assert log.result == [0.] * N + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + f3 = float_truediv(f1, f2) + jump(..., descr=...) + """) From noreply at buildbot.pypy.org Sun Oct 4 19:18:38 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 4 Oct 2015 19:18:38 +0200 (CEST) Subject: [pypy-commit] pypy issue-2148: Move find_shape_and_elems() to ctors.py, since it's only used there Message-ID: <20151004171838.A07BC1C12D8@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: issue-2148 Changeset: r79980:4da2050ae801 Date: 2015-10-04 03:28 +0100 http://bitbucket.org/pypy/pypy/changeset/4da2050ae801/ Log: Move find_shape_and_elems() to ctors.py, since it's only used there diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -3,10 +3,12 @@ from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi + from pypy.module.micronumpy import descriptor, loop, support from pypy.module.micronumpy.base import ( W_NDimArray, convert_to_array, W_NumpyObject) from pypy.module.micronumpy.converters import shape_converter +from . import constants as NPY def build_scalar(space, w_dtype, w_state): @@ -82,7 +84,6 @@ return w_res def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False): - from pypy.module.micronumpy import strides # for anything that isn't already an array, try __array__ method first if not isinstance(w_object, W_NDimArray): @@ -143,7 +144,7 @@ w_base=w_base, start=imp.start) else: # not an array - shape, elems_w = strides.find_shape_and_elems(space, w_object, dtype) + shape, elems_w = find_shape_and_elems(space, w_object, dtype) if dtype is None and space.isinstance_w(w_object, space.w_buffer): dtype = descriptor.get_dtype_cache(space).w_uint8dtype if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): @@ -165,7 +166,6 @@ def numpify(space, w_object): """Convert the object to a W_NumpyObject""" # XXX: code duplication with _array() - from pypy.module.micronumpy import strides if isinstance(w_object, W_NumpyObject): return w_object # for anything that isn't already an array, try __array__ method first @@ -173,7 +173,7 @@ if w_array is not None: return w_array - shape, elems_w = strides.find_shape_and_elems(space, w_object, None) + shape, elems_w = find_shape_and_elems(space, w_object, None) dtype = find_dtype_for_seq(space, elems_w, None) if dtype is None: dtype = descriptor.get_dtype_cache(space).w_float64dtype @@ -188,6 +188,64 @@ loop.assign(space, w_arr, elems_w) return w_arr +def find_shape_and_elems(space, w_iterable, dtype): + isstr = space.isinstance_w(w_iterable, space.w_str) + if not support.issequence_w(space, w_iterable) or isstr: + if dtype is None or dtype.char != NPY.CHARLTR: + return [], [w_iterable] + is_rec_type = dtype is not None and dtype.is_record() + if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): + return [], [w_iterable] + if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar(): + return [], [w_iterable] + return _find_shape_and_elems(space, w_iterable, is_rec_type) + + +def _find_shape_and_elems(space, w_iterable, is_rec_type): + from pypy.objspace.std.bufferobject import W_Buffer + shape = [space.len_w(w_iterable)] + if space.isinstance_w(w_iterable, space.w_buffer): + batch = [space.wrap(0)] * shape[0] + for i in range(shape[0]): + batch[i] = space.ord(space.getitem(w_iterable, space.wrap(i))) + else: + batch = space.listview(w_iterable) + while True: + if not batch: + return shape[:], [] + if is_single_elem(space, batch[0], is_rec_type): + for w_elem in batch: + if not is_single_elem(space, w_elem, is_rec_type): + raise OperationError(space.w_ValueError, space.wrap( + "setting an array element with a sequence")) + return shape[:], batch + new_batch = [] + size = space.len_w(batch[0]) + for w_elem in batch: + if (is_single_elem(space, w_elem, is_rec_type) or + space.len_w(w_elem) != size): + raise OperationError(space.w_ValueError, space.wrap( + "setting an array element with a sequence")) + w_array = space.lookup(w_elem, '__array__') + if w_array is not None: + # Make sure we call the array implementation of listview, + # since for some ndarray subclasses (matrix, for instance) + # listview does not reduce but rather returns the same class + w_elem = space.get_and_call_function(w_array, w_elem, space.w_None) + new_batch += space.listview(w_elem) + shape.append(size) + batch = new_batch + +def is_single_elem(space, w_elem, is_rec_type): + if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): + return True + if (space.isinstance_w(w_elem, space.w_tuple) or + space.isinstance_w(w_elem, space.w_list)): + return False + if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar(): + return False + return True + def _dtype_guess(space, dtype, w_elem): from .casting import scalar2dtype, find_binop_result_dtype if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -189,67 +189,6 @@ return rstrides, rbackstrides -def is_single_elem(space, w_elem, is_rec_type): - if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): - return True - if (space.isinstance_w(w_elem, space.w_tuple) or - space.isinstance_w(w_elem, space.w_list)): - return False - if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar(): - return False - return True - - -def find_shape_and_elems(space, w_iterable, dtype): - isstr = space.isinstance_w(w_iterable, space.w_str) - if not support.issequence_w(space, w_iterable) or isstr: - if dtype is None or dtype.char != NPY.CHARLTR: - return [], [w_iterable] - is_rec_type = dtype is not None and dtype.is_record() - if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): - return [], [w_iterable] - if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar(): - return [], [w_iterable] - return _find_shape_and_elems(space, w_iterable, is_rec_type) - - -def _find_shape_and_elems(space, w_iterable, is_rec_type): - from pypy.objspace.std.bufferobject import W_Buffer - shape = [space.len_w(w_iterable)] - if space.isinstance_w(w_iterable, space.w_buffer): - batch = [space.wrap(0)] * shape[0] - for i in range(shape[0]): - batch[i] = space.ord(space.getitem(w_iterable, space.wrap(i))) - else: - batch = space.listview(w_iterable) - while True: - if not batch: - return shape[:], [] - if is_single_elem(space, batch[0], is_rec_type): - for w_elem in batch: - if not is_single_elem(space, w_elem, is_rec_type): - raise OperationError(space.w_ValueError, space.wrap( - "setting an array element with a sequence")) - return shape[:], batch - new_batch = [] - size = space.len_w(batch[0]) - for w_elem in batch: - if (is_single_elem(space, w_elem, is_rec_type) or - space.len_w(w_elem) != size): - raise OperationError(space.w_ValueError, space.wrap( - "setting an array element with a sequence")) - w_array = space.lookup(w_elem, '__array__') - if w_array is not None: - # Make sure we call the array implementation of listview, - # since for some ndarray subclasses (matrix, for instance) - # listview does not reduce but rather returns the same class - w_elem = space.get_and_call_function(w_array, w_elem, space.w_None) - new_batch += space.listview(w_elem) - shape.append(size) - batch = new_batch - - - @jit.unroll_safe def shape_agreement(space, shape1, w_arr2, broadcast_down=True): if w_arr2 is None: diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -169,7 +169,7 @@ [1, 1, 1, 105, 105] def test_find_shape(self): - from pypy.module.micronumpy.strides import find_shape_and_elems + from pypy.module.micronumpy.ctors import find_shape_and_elems space = self.space shape, elems = find_shape_and_elems(space, @@ -2485,7 +2485,7 @@ x = array([(u'a', 'b')], dtype=t) x['a'] = u'1' assert str(x) == "[(u'1', 'b')]" - + def test_boolean_indexing(self): import numpy as np @@ -2709,7 +2709,7 @@ "input array from shape (3,1) into shape (3)" a[:, 1] = b[:,0] > 0.5 assert (a == [[0, 1], [0, 1], [0, 1]]).all() - + def test_ufunc(self): from numpy import array @@ -3868,7 +3868,7 @@ assert a[0]['y'] == 2 assert a[1]['y'] == 1 - + a = array([(1, [])], dtype=[('a', int32), ('b', int32, 0)]) assert a['b'].shape == (1, 0) b = loads(dumps(a)) From noreply at buildbot.pypy.org Sun Oct 4 19:18:40 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 4 Oct 2015 19:18:40 +0200 (CEST) Subject: [pypy-commit] pypy issue-2148: extract is_scalar_like() from find_shape_and_elems() Message-ID: <20151004171840.BD4261C12D8@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: issue-2148 Changeset: r79981:8a2df44b76b7 Date: 2015-10-04 18:06 +0100 http://bitbucket.org/pypy/pypy/changeset/8a2df44b76b7/ Log: extract is_scalar_like() from find_shape_and_elems() diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -189,17 +189,21 @@ return w_arr def find_shape_and_elems(space, w_iterable, dtype): - isstr = space.isinstance_w(w_iterable, space.w_str) - if not support.issequence_w(space, w_iterable) or isstr: - if dtype is None or dtype.char != NPY.CHARLTR: - return [], [w_iterable] - is_rec_type = dtype is not None and dtype.is_record() - if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): - return [], [w_iterable] - if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar(): + if is_scalar_like(space, w_iterable, dtype): return [], [w_iterable] return _find_shape_and_elems(space, w_iterable, is_rec_type) +def is_scalar_like(space, w_obj, dtype): + isstr = space.isinstance_w(w_obj, space.w_str) + if not support.issequence_w(space, w_obj) or isstr: + if dtype is None or dtype.char != NPY.CHARLTR: + return True + is_rec_type = dtype is not None and dtype.is_record() + if is_rec_type and is_single_elem(space, w_obj, is_rec_type): + return True + if isinstance(w_obj, W_NDimArray) and w_obj.is_scalar(): + return True + return False def _find_shape_and_elems(space, w_iterable, is_rec_type): from pypy.objspace.std.bufferobject import W_Buffer From noreply at buildbot.pypy.org Sun Oct 4 19:21:11 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 4 Oct 2015 19:21:11 +0200 (CEST) Subject: [pypy-commit] cffi cmacros: more tests Message-ID: <20151004172111.726EA1C12D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cmacros Changeset: r2301:39f0d496d0eb Date: 2015-10-04 19:21 +0200 http://bitbucket.org/cffi/cffi/changeset/39f0d496d0eb/ Log: more tests diff --git a/testing/cffi0/test_parsing.py b/testing/cffi0/test_parsing.py --- a/testing/cffi0/test_parsing.py +++ b/testing/cffi0/test_parsing.py @@ -511,13 +511,13 @@ #else typedef long foo_t; #endif - foo_t foo(char); + foo_t *foo(char); """) case = ffi._parser._declarations['function foo'] assert isinstance(case, ConditionalCase) assert case.condition == 'defined(ABC)' - assert str(case.iftrue) == '), , False>' - assert str(case.iffalse) == '), , False>' + assert str(case.iftrue) == '), >, False>' + assert str(case.iffalse) == '), >, False>' def test_conditional_typedef_not_used_by_func(): ffi = FFI(backend=FakeBackend()) @@ -532,6 +532,27 @@ case = ffi._parser._declarations['function foo'] assert str(case) == '), , False>' +def test_conditional_typedef_partially_defined_1(): + ffi = FFI(backend=FakeBackend()) + ffi.cdef(""" + #ifdef ABC + typedef int foo_t; + #endif + char foo(char); + """) + case = ffi._parser._declarations['function foo'] + assert str(case) == '), , False>' + +def test_conditional_typedef_partially_defined_2(): + ffi = FFI(backend=FakeBackend()) + ffi.cdef(""" + #ifdef ABC + typedef int foo_t; + #endif + char foo(foo_t *); + """) + should_crash + def test_conditional_nested(): ffi = FFI(backend=FakeBackend()) ffi.cdef(""" From noreply at buildbot.pypy.org Mon Oct 5 01:25:46 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 5 Oct 2015 01:25:46 +0200 (CEST) Subject: [pypy-commit] pypy issue-2148: fix Message-ID: <20151004232546.330A91C1464@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: issue-2148 Changeset: r79982:8f8f4e669441 Date: 2015-10-04 22:44 +0100 http://bitbucket.org/pypy/pypy/changeset/8f8f4e669441/ Log: fix diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -191,6 +191,7 @@ def find_shape_and_elems(space, w_iterable, dtype): if is_scalar_like(space, w_iterable, dtype): return [], [w_iterable] + is_rec_type = dtype is not None and dtype.is_record() return _find_shape_and_elems(space, w_iterable, is_rec_type) def is_scalar_like(space, w_obj, dtype): From noreply at buildbot.pypy.org Mon Oct 5 01:25:48 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 5 Oct 2015 01:25:48 +0200 (CEST) Subject: [pypy-commit] pypy issue-2148: Begin creating fast path for scalars in numpify() Message-ID: <20151004232548.587B01C1464@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: issue-2148 Changeset: r79983:8ead82751615 Date: 2015-10-04 23:30 +0100 http://bitbucket.org/pypy/pypy/changeset/8ead82751615/ Log: Begin creating fast path for scalars in numpify() diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -9,6 +9,7 @@ W_NDimArray, convert_to_array, W_NumpyObject) from pypy.module.micronumpy.converters import shape_converter from . import constants as NPY +from .casting import scalar2dtype def build_scalar(space, w_dtype, w_state): @@ -173,8 +174,12 @@ if w_array is not None: return w_array - shape, elems_w = find_shape_and_elems(space, w_object, None) - dtype = find_dtype_for_seq(space, elems_w, None) + if is_scalar_like(space, w_object, dtype=None): + shape, elems_w = [], [w_object] + dtype = scalar2dtype(space, w_object) + else: + shape, elems_w = _find_shape_and_elems(space, w_object) + dtype = find_dtype_for_seq(space, elems_w, None) if dtype is None: dtype = descriptor.get_dtype_cache(space).w_float64dtype elif dtype.is_str_or_unicode() and dtype.elsize < 1: @@ -206,7 +211,7 @@ return True return False -def _find_shape_and_elems(space, w_iterable, is_rec_type): +def _find_shape_and_elems(space, w_iterable, is_rec_type=False): from pypy.objspace.std.bufferobject import W_Buffer shape = [space.len_w(w_iterable)] if space.isinstance_w(w_iterable, space.w_buffer): From noreply at buildbot.pypy.org Mon Oct 5 01:25:50 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 5 Oct 2015 01:25:50 +0200 (CEST) Subject: [pypy-commit] pypy issue-2148: fast path for scalars in numpify() Message-ID: <20151004232550.7943B1C1464@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: issue-2148 Changeset: r79984:8f424ea4ba98 Date: 2015-10-05 00:23 +0100 http://bitbucket.org/pypy/pypy/changeset/8f424ea4ba98/ Log: fast path for scalars in numpify() diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -150,11 +150,6 @@ dtype = descriptor.get_dtype_cache(space).w_uint8dtype if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): dtype = find_dtype_for_seq(space, elems_w, dtype) - if dtype is None: - dtype = descriptor.get_dtype_cache(space).w_float64dtype - elif dtype.is_str_or_unicode() and dtype.elsize < 1: - # promote S0 -> S1, U0 -> U1 - dtype = descriptor.variable_dtype(space, dtype.char + '1') w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) if support.product(shape) == 1: # safe from overflow since from_shape checks @@ -175,23 +170,18 @@ return w_array if is_scalar_like(space, w_object, dtype=None): - shape, elems_w = [], [w_object] dtype = scalar2dtype(space, w_object) - else: - shape, elems_w = _find_shape_and_elems(space, w_object) - dtype = find_dtype_for_seq(space, elems_w, None) - if dtype is None: - dtype = descriptor.get_dtype_cache(space).w_float64dtype - elif dtype.is_str_or_unicode() and dtype.elsize < 1: - # promote S0 -> S1, U0 -> U1 - dtype = descriptor.variable_dtype(space, dtype.char + '1') + if dtype.is_str_or_unicode() and dtype.elsize < 1: + # promote S0 -> S1, U0 -> U1 + dtype = descriptor.variable_dtype(space, dtype.char + '1') + return dtype.coerce(space, w_object) - if len(elems_w) == 1: - return dtype.coerce(space, elems_w[0]) - else: - w_arr = W_NDimArray.from_shape(space, shape, dtype) - loop.assign(space, w_arr, elems_w) - return w_arr + shape, elems_w = _find_shape_and_elems(space, w_object) + dtype = find_dtype_for_seq(space, elems_w, None) + w_arr = W_NDimArray.from_shape(space, shape, dtype) + loop.assign(space, w_arr, elems_w) + return w_arr + def find_shape_and_elems(space, w_iterable, dtype): if is_scalar_like(space, w_iterable, dtype): @@ -269,6 +259,11 @@ return _dtype_guess(space, dtype, w_elem) for w_elem in elems_w: dtype = _dtype_guess(space, dtype, w_elem) + if dtype is None: + dtype = descriptor.get_dtype_cache(space).w_float64dtype + elif dtype.is_str_or_unicode() and dtype.elsize < 1: + # promote S0 -> S1, U0 -> U1 + dtype = descriptor.variable_dtype(space, dtype.char + '1') return dtype diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -480,3 +480,9 @@ u = unicode_(u'Aÿ') # raises(UnicodeEncodeError, "str(u)") # XXX assert repr(u) == repr(u'Aÿ') + + def test_binop_with_sequence(self): + import numpy as np + c = np.float64(1.) + [1.] + assert isinstance(c, np.ndarray) + assert (c == [2.]).all() From noreply at buildbot.pypy.org Mon Oct 5 10:26:56 2015 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 5 Oct 2015 10:26:56 +0200 (CEST) Subject: [pypy-commit] pypy default: remove evil and unused method Message-ID: <20151005082656.D06A11C12E8@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r79985:0c3303fd17a3 Date: 2015-10-05 10:27 +0200 http://bitbucket.org/pypy/pypy/changeset/0c3303fd17a3/ Log: remove evil and unused method diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -26,19 +26,6 @@ self.prev = prev self.boxes = boxes - def copy(self, memo): - try: - return memo.snapshots[self] - except KeyError: - if self.prev is not None: - prev = self.prev.copy(memo) - else: - prev = None - boxes = [memo.get(box, box) for box in self.boxes] - new_snapshot = Snapshot(prev, boxes) - memo.snapshots[self] = new_snapshot - return new_snapshot - class FrameInfo(object): __slots__ = ('prev', 'jitcode', 'pc') From noreply at buildbot.pypy.org Mon Oct 5 10:31:55 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Oct 2015 10:31:55 +0200 (CEST) Subject: [pypy-commit] pypy default: Tentative fix for a bug that shows up rarely on the full pypy: Message-ID: <20151005083155.292E11C12E8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79986:8cc8067ed404 Date: 2015-10-05 09:12 +0200 http://bitbucket.org/pypy/pypy/changeset/8cc8067ed404/ Log: Tentative fix for a bug that shows up rarely on the full pypy: the likely cause is that short-preamble operations are supposed to never be forwarded, but they temporarily are. The problem is that the send_extra_operation() in the middle of this piece of code can invoke arbitrary parts of the optimizer. diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -320,6 +320,14 @@ return None # explicit because the return can be non-None return virtual_state + def _map_args(self, mapping, arglist): + result = [] + for box in arglist: + if not isinstance(box, Const): + box = mapping[box] + result.append(box) + return result + def inline_short_preamble(self, jump_args, args_no_virtuals, short, patchguardop, target_token, label_op): short_inputargs = short[0].getarglist() @@ -331,33 +339,39 @@ # THIS WILL MODIFY ALL THE LISTS PROVIDED, POTENTIALLY self.short_preamble_producer.setup(short_inputargs, short_jump_args, short, label_op.getarglist()) - try: + if 1: # (keep indentation) self._check_no_forwarding([short_inputargs, short], False) assert len(short_inputargs) == len(jump_args) + # We need to make a list of fresh new operations corresponding + # to the short preamble operations. We could temporarily forward + # the short operations to the fresh ones, but there are obscure + # issues: send_extra_operation() below might occasionally invoke + # use_box(), which assumes the short operations are not forwarded. + # So we avoid such temporary forwarding and just use a dict here. + mapping = {} for i in range(len(jump_args)): - short_inputargs[i].set_forwarded(None) - self.make_equal_to(short_inputargs[i], jump_args[i]) + mapping[short_inputargs[i]] = jump_args[i] i = 1 while i < len(short) - 1: - op = short[i] - if op.is_guard(): - op = self.replace_op_with(op, op.getopnum(), + sop = short[i] + arglist = self._map_args(mapping, sop.getarglist()) + if sop.is_guard(): + op = sop.copy_and_change(sop.getopnum(), arglist, descr=compile.ResumeAtPositionDescr()) assert isinstance(op, GuardResOp) op.rd_snapshot = patchguardop.rd_snapshot op.rd_frame_info_list = patchguardop.rd_frame_info_list + else: + op = sop.copy_and_change(sop.getopnum(), arglist) + mapping[sop] = op i += 1 self.optimizer.send_extra_operation(op) # force all of them except the virtuals for arg in args_no_virtuals + short_jump_args: self.optimizer.force_box(self.get_box_replacement(arg)) self.optimizer.flush() - return [self.get_box_replacement(box) for box in short_jump_args] - finally: - for op in short_inputargs: - op.set_forwarded(None) - for op in short: - op.set_forwarded(None) + return [self.get_box_replacement(box) + for box in self._map_args(mapping, short_jump_args)] def _expand_info(self, arg, infos): if isinstance(arg, AbstractResOp) and arg.is_same_as(): From noreply at buildbot.pypy.org Mon Oct 5 10:31:57 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Oct 2015 10:31:57 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20151005083157.71F951C12E8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79987:d43a1b95f25c Date: 2015-10-05 10:32 +0200 http://bitbucket.org/pypy/pypy/changeset/d43a1b95f25c/ Log: merge heads diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -26,19 +26,6 @@ self.prev = prev self.boxes = boxes - def copy(self, memo): - try: - return memo.snapshots[self] - except KeyError: - if self.prev is not None: - prev = self.prev.copy(memo) - else: - prev = None - boxes = [memo.get(box, box) for box in self.boxes] - new_snapshot = Snapshot(prev, boxes) - memo.snapshots[self] = new_snapshot - return new_snapshot - class FrameInfo(object): __slots__ = ('prev', 'jitcode', 'pc') From noreply at buildbot.pypy.org Mon Oct 5 10:50:23 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Oct 2015 10:50:23 +0200 (CEST) Subject: [pypy-commit] pypy default: Remove this keyword argument, never used. Using it would fail to Message-ID: <20151005085023.0F1091C0352@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79988:bef7f8d920d7 Date: 2015-10-05 10:50 +0200 http://bitbucket.org/pypy/pypy/changeset/bef7f8d920d7/ Log: Remove this keyword argument, never used. Using it would fail to annotate: the final ResOperation.get_box_replacement() has a @specialize on this argument. diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -146,8 +146,8 @@ return fw return None - def get_box_replacement(self, op, not_const=False): - return self.optimizer.get_box_replacement(op, not_const=not_const) + def get_box_replacement(self, op): + return self.optimizer.get_box_replacement(op) def getlastop(self): return self.optimizer.getlastop() @@ -336,10 +336,10 @@ if self.get_box_replacement(op).is_constant(): return info.FloatConstInfo(self.get_box_replacement(op)) - def get_box_replacement(self, op, not_const=False): + def get_box_replacement(self, op): if op is None: return op - return op.get_box_replacement(not_const) + return op.get_box_replacement() def force_box(self, op, optforce=None): op = self.get_box_replacement(op) From noreply at buildbot.pypy.org Mon Oct 5 16:14:33 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Oct 2015 16:14:33 +0200 (CEST) Subject: [pypy-commit] pypy default: RPython_StartupCode() never returns non-null Message-ID: <20151005141433.B96EE1C0FDF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79989:ea1c9b81f55c Date: 2015-10-05 15:54 +0200 http://bitbucket.org/pypy/pypy/changeset/ea1c9b81f55c/ Log: RPython_StartupCode() never returns non-null diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -763,8 +763,7 @@ def gen_startupcode(f, database): # generate the start-up code and put it into a function - print >> f, 'char *RPython_StartupCode(void) {' - print >> f, '\tchar *error = NULL;' + print >> f, 'void RPython_StartupCode(void) {' bk = database.translator.annotator.bookkeeper if bk.thread_local_fields: @@ -778,18 +777,12 @@ for dest, value in database.late_initializations: print >> f, "\t%s = %s;" % (dest, value) - firsttime = True for node in database.containerlist: lines = list(node.startupcode()) if lines: - if firsttime: - firsttime = False - else: - print >> f, '\tif (error) return error;' for line in lines: print >> f, '\t'+line - print >> f, '\treturn error;' print >> f, '}' def commondefs(defines): diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -61,8 +61,7 @@ } #endif - errmsg = RPython_StartupCode(); - if (errmsg) goto error; + RPython_StartupCode(); list = _RPyListOfString_New(argc); if (RPyExceptionOccurred()) goto memory_out; diff --git a/rpython/translator/c/src/entrypoint.h b/rpython/translator/c/src/entrypoint.h --- a/rpython/translator/c/src/entrypoint.h +++ b/rpython/translator/c/src/entrypoint.h @@ -8,6 +8,6 @@ #define PYPY_MAIN_FUNCTION main #endif -RPY_EXTERN char *RPython_StartupCode(void); +RPY_EXTERN void RPython_StartupCode(void); RPY_EXPORTED int PYPY_MAIN_FUNCTION(int argc, char *argv[]); #endif /* PYPY_STANDALONE */ From noreply at buildbot.pypy.org Mon Oct 5 16:14:35 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Oct 2015 16:14:35 +0200 (CEST) Subject: [pypy-commit] pypy default: Move this assertion (which fails, very rarely) into logic to handle that Message-ID: <20151005141435.EB32A1C11FF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79990:72f247efc060 Date: 2015-10-05 16:14 +0200 http://bitbucket.org/pypy/pypy/changeset/72f247efc060/ Log: Move this assertion (which fails, very rarely) into logic to handle that case. diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -592,6 +592,17 @@ def emit_guard_operation(self, op, pendingfields): guard_op = self.replace_op_with(op, op.getopnum()) opnum = guard_op.getopnum() + # If guard_(no)_exception is merged with another previous guard, then + # it *should* be is "some_call;guard_not_forced;guard_(no)_exception". + # The guard_(no)_exception can also occur at different places, + # but these should not be preceeded immediately by another guard. + # Sadly, asserting this seems to fail in rare cases. So instead, + # we simply give up sharing. + if (opnum in (rop.GUARD_NO_EXCEPTION, rop.GUARD_EXCEPTION) and + self._last_guard_op is not None and + self._last_guard_op.getopnum() != rop.GUARD_NOT_FORCED): + self._last_guard_op = None + # if (self._last_guard_op and guard_op.getdescr() is None): self.metainterp_sd.profiler.count_ops(opnum, jitprof.Counters.OPT_GUARDS_SHARED) @@ -634,8 +645,6 @@ def _copy_resume_data_from(self, guard_op, last_guard_op): - if guard_op.getopnum() in (rop.GUARD_NO_EXCEPTION, rop.GUARD_EXCEPTION): - assert last_guard_op.getopnum() == rop.GUARD_NOT_FORCED descr = compile.invent_fail_descr_for_op(guard_op.getopnum(), self, True) assert isinstance(descr, compile.ResumeGuardCopiedDescr) last_descr = last_guard_op.getdescr() From noreply at buildbot.pypy.org Mon Oct 5 16:19:05 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Oct 2015 16:19:05 +0200 (CEST) Subject: [pypy-commit] pypy default: can't spell Message-ID: <20151005141905.6047E1C1228@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79991:a46f877170ff Date: 2015-10-05 16:19 +0200 http://bitbucket.org/pypy/pypy/changeset/a46f877170ff/ Log: can't spell diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -593,7 +593,7 @@ guard_op = self.replace_op_with(op, op.getopnum()) opnum = guard_op.getopnum() # If guard_(no)_exception is merged with another previous guard, then - # it *should* be is "some_call;guard_not_forced;guard_(no)_exception". + # it *should* be in "some_call;guard_not_forced;guard_(no)_exception". # The guard_(no)_exception can also occur at different places, # but these should not be preceeded immediately by another guard. # Sadly, asserting this seems to fail in rare cases. So instead, From noreply at buildbot.pypy.org Mon Oct 5 16:36:53 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Oct 2015 16:36:53 +0200 (CEST) Subject: [pypy-commit] pypy default: Typo Message-ID: <20151005143653.DB0C21C129A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79992:beae03a0417f Date: 2015-10-05 16:37 +0200 http://bitbucket.org/pypy/pypy/changeset/beae03a0417f/ Log: Typo diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -214,7 +214,7 @@ """ Callback specification. ffi_cif - something ffi specific, don't care ll_args - rffi.VOIDPP - pointer to array of pointers to args - ll_restype - rffi.VOIDP - pointer to result + ll_res - rffi.VOIDP - pointer to result ll_userdata - a special structure which holds necessary information (what the real callback is for example), casted to VOIDP """ From noreply at buildbot.pypy.org Mon Oct 5 18:29:56 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Oct 2015 18:29:56 +0200 (CEST) Subject: [pypy-commit] cffi default: ARM is documented to have 'unsigned int' as the wchar_t type Message-ID: <20151005162956.D559E1C0F47@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2302:db6e4bf0dd54 Date: 2015-10-05 18:30 +0200 http://bitbucket.org/cffi/cffi/changeset/db6e4bf0dd54/ Log: ARM is documented to have 'unsigned int' as the wchar_t type diff --git a/testing/cffi0/backend_tests.py b/testing/cffi0/backend_tests.py --- a/testing/cffi0/backend_tests.py +++ b/testing/cffi0/backend_tests.py @@ -756,8 +756,8 @@ p = ffi.cast("long long", ffi.cast("wchar_t", -1)) if SIZE_OF_WCHAR == 2: # 2 bytes, unsigned assert int(p) == 0xffff - elif platform.machine() == 'aarch64': # 4 bytes, unsigned - assert int(p) == 0xffffffff + elif platform.machine().startswith(('arm', 'aarch64')): + assert int(p) == 0xffffffff # 4 bytes, unsigned else: # 4 bytes, signed assert int(p) == -1 p = ffi.cast("int", u+'\u1234') diff --git a/testing/cffi1/test_new_ffi_1.py b/testing/cffi1/test_new_ffi_1.py --- a/testing/cffi1/test_new_ffi_1.py +++ b/testing/cffi1/test_new_ffi_1.py @@ -781,8 +781,8 @@ p = ffi.cast("long long", ffi.cast("wchar_t", -1)) if SIZE_OF_WCHAR == 2: # 2 bytes, unsigned assert int(p) == 0xffff - elif platform.machine() == 'aarch64': # 4 bytes, unsigned - assert int(p) == 0xffffffff + elif platform.machine().startswith(('arm', 'aarch64')): + assert int(p) == 0xffffffff # 4 bytes, unsigned else: # 4 bytes, signed assert int(p) == -1 p = ffi.cast("int", u+'\u1234') From noreply at buildbot.pypy.org Mon Oct 5 18:31:39 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Oct 2015 18:31:39 +0200 (CEST) Subject: [pypy-commit] pypy default: import cffi/db6e4bf0dd54 Message-ID: <20151005163139.791DF1C0F47@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r79993:50f0276e8071 Date: 2015-10-05 18:31 +0200 http://bitbucket.org/pypy/pypy/changeset/50f0276e8071/ Log: import cffi/db6e4bf0dd54 diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -757,8 +757,8 @@ p = ffi.cast("long long", ffi.cast("wchar_t", -1)) if SIZE_OF_WCHAR == 2: # 2 bytes, unsigned assert int(p) == 0xffff - elif platform.machine() == 'aarch64': # 4 bytes, unsigned - assert int(p) == 0xffffffff + elif platform.machine().startswith(('arm', 'aarch64')): + assert int(p) == 0xffffffff # 4 bytes, unsigned else: # 4 bytes, signed assert int(p) == -1 p = ffi.cast("int", u+'\u1234') diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py @@ -209,6 +209,9 @@ # Check the particular results on Intel import platform if (platform.machine().startswith('i386') or + platform.machine().startswith('i486') or + platform.machine().startswith('i586') or + platform.machine().startswith('i686') or platform.machine().startswith('x86')): assert abs(more_precise - 0.656769) < 0.001 assert abs(less_precise - 3.99091) < 0.001 diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py @@ -782,8 +782,8 @@ p = ffi.cast("long long", ffi.cast("wchar_t", -1)) if SIZE_OF_WCHAR == 2: # 2 bytes, unsigned assert int(p) == 0xffff - elif platform.machine() == 'aarch64': # 4 bytes, unsigned - assert int(p) == 0xffffffff + elif platform.machine().startswith(('arm', 'aarch64')): + assert int(p) == 0xffffffff # 4 bytes, unsigned else: # 4 bytes, signed assert int(p) == -1 p = ffi.cast("int", u+'\u1234') diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py @@ -189,6 +189,9 @@ # Check the particular results on Intel import platform if (platform.machine().startswith('i386') or + platform.machine().startswith('i486') or + platform.machine().startswith('i586') or + platform.machine().startswith('i686') or platform.machine().startswith('x86')): assert abs(more_precise - 0.656769) < 0.001 assert abs(less_precise - 3.99091) < 0.001 From noreply at buildbot.pypy.org Mon Oct 5 18:43:24 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 5 Oct 2015 18:43:24 +0200 (CEST) Subject: [pypy-commit] pypy issue-2148: _calc_dtype() is unroll_safe since it's a search loop over a list of bounded length Message-ID: <20151005164324.E91491C0352@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: issue-2148 Changeset: r79994:0941119321e8 Date: 2015-10-05 17:43 +0100 http://bitbucket.org/pypy/pypy/changeset/0941119321e8/ Log: _calc_dtype() is unroll_safe since it's a search loop over a list of bounded length diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -479,6 +479,7 @@ dt_in, dt_out = self._calc_dtype(space, dtype, out, casting) return dt_in, dt_out, self.func + @jit.unroll_safe def _calc_dtype(self, space, arg_dtype, out=None, casting='unsafe'): if arg_dtype.is_object(): return arg_dtype, arg_dtype @@ -672,6 +673,7 @@ "requested type has type code '%s'" % (self.name, dtype.char)) + @jit.unroll_safe def _calc_dtype(self, space, l_dtype, r_dtype, out, casting, w_arg1, w_arg2): if l_dtype.is_object() or r_dtype.is_object(): From noreply at buildbot.pypy.org Mon Oct 5 19:24:54 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Oct 2015 19:24:54 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: A branch to implement "__stdcall" more completely on Windows. Initial tests Message-ID: <20151005172454.13CF41C0F47@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2303:826248f909c5 Date: 2015-10-05 19:25 +0200 http://bitbucket.org/cffi/cffi/changeset/826248f909c5/ Log: A branch to implement "__stdcall" more completely on Windows. Initial tests diff --git a/testing/cffi0/test_function.py b/testing/cffi0/test_function.py --- a/testing/cffi0/test_function.py +++ b/testing/cffi0/test_function.py @@ -427,3 +427,48 @@ res = m.QueryPerformanceFrequency(p_freq) assert res != 0 assert p_freq[0] != 0 + + def test_explicit_cdecl_stdcall(self): + if sys.platform != 'win32': + py.test.skip("Windows-only test") + if self.Backend is CTypesBackend: + py.test.skip("not with the ctypes backend") + # + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + BOOL QueryPerformanceFrequency(LONGLONG *lpFrequency); + """) + m = ffi.dlopen("Kernel32.dll") + tp = ffi.typeof(m.QueryPerformanceFrequency) + assert 'stdcall' not in str(tp) and 'cdecl' not in str(tp) + assert tp is ( + ffi.typeof(ffi.addressof(m, 'QueryPerformanceFrequency')).item) + # + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + BOOL QueryPerformanceFrequency(LONGLONG *lpFrequency); + """, calling_conv="cdecl") + m = ffi.dlopen("Kernel32.dll") + tpc = ffi.typeof(m.QueryPerformanceFrequency) + assert tpc is tp + assert tpc is ( + ffi.typeof(ffi.addressof(m, 'QueryPerformanceFrequency')).item) + # + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + BOOL QueryPerformanceFrequency(LONGLONG *lpFrequency); + """, calling_conv="stdcall") + m = ffi.dlopen("Kernel32.dll") + tps = ffi.typeof(m.QueryPerformanceFrequency) + assert tps is not tpc + assert '__stdcall' in str(tps) and 'cdecl' not in str(tps) + assert tps is ( + ffi.typeof(ffi.addressof(m, 'QueryPerformanceFrequency')).item) + # + ffi = FFI(backend=self.Backend()) + ffi.cdef("typedef int (*fnc_t)(int);", calling_conv="cdecl") + ffi.cdef("typedef int (*fns_t)(int);", calling_conv="stdcall") + tpc = ffi.typeof("fnc_t") + tps = ffi.typeof("fns_t") + assert str(tpc) == "" + assert str(tps) == "" diff --git a/testing/cffi0/test_verify.py b/testing/cffi0/test_verify.py --- a/testing/cffi0/test_verify.py +++ b/testing/cffi0/test_verify.py @@ -1221,7 +1221,6 @@ assert lib.sin(1.23) == math.sin(1.23) def test_callback_calling_convention(): - py.test.skip("later") if sys.platform != 'win32': py.test.skip("Windows only") ffi = FFI() diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -1280,3 +1280,57 @@ """) assert lib.aaa == 42 py.test.raises(AttributeError, "lib.aaa = 43") + +def test_win32_calling_convention_1(): + if sys.platform != 'win32': + py.test.skip("Windows only") + ffi = FFI() + ffi.cdef("int call1(int(*cb)(int)); int cb1(int);", calling_conv="cdecl") + ffi.cdef("int call2(int(*cb)(int)); int cb2(int);", calling_conv="stdcall") + lib = verify(ffi, 'test_win32_calling_convention_1', """ + int __cdecl call1(int(*__cdecl cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(i); + return result; + } + int __stdcall call2(int(*__stdcall cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(-i); + return result; + } + int __cdecl cb1(int x) { return x * 2; } + int __stdcall cb2(int x) { return x * 3; } + """) + assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + +def test_win32_calling_convention_2(): + if sys.platform != 'win32': + py.test.skip("Windows only") + # any mistake in the declaration of plain function (including the + # precise argument types and, here, the calling convention) are + # automatically corrected. But this does not apply to the 'cb' + # function pointer argument. + ffi = FFI() + ffi.cdef("int call1(int(*cb)(int)); int cb1(int);", calling_conv="cdecl") + ffi.cdef("int call2(int(*cb)(int)); int cb2(int);", calling_conv="stdcall") + lib = verify(ffi, 'test_win32_calling_convention_2', """ + int __stdcall call1(int(*__cdecl cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(i); + return result; + } + int __cdecl call2(int(*__stdcall cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(-i); + return result; + } + int __stdcall cb1(int x) { return x * 2; } + int __cdecl cb2(int x) { return x * 3; } + """) + assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 From noreply at buildbot.pypy.org Mon Oct 5 19:28:39 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Oct 2015 19:28:39 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: fix msvc warning Message-ID: <20151005172839.85C1D1C0F47@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2304:c0117c1c1b8a Date: 2015-10-05 19:29 +0200 http://bitbucket.org/cffi/cffi/changeset/c0117c1c1b8a/ Log: fix msvc warning diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -3644,13 +3644,13 @@ funcptr [ctresult, ellipsis+abi, num_args, ctargs...] */ PyObject *key, *y; - const void **pkey; + void *pkey; key = PyBytes_FromStringAndSize(NULL, keylength * sizeof(void *)); if (key == NULL) goto error; - pkey = (const void **)PyBytes_AS_STRING(key); + pkey = PyBytes_AS_STRING(key); memcpy(pkey, unique_key, keylength * sizeof(void *)); y = PyDict_GetItem(unique_cache, key); From noreply at buildbot.pypy.org Mon Oct 5 20:09:59 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Oct 2015 20:09:59 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: Starting, with exactly two function types: no-abi (i.e. cdecl on windows), or stdcall. Message-ID: <20151005180959.DF1121C0162@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2305:91834f9534f8 Date: 2015-10-05 20:05 +0200 http://bitbucket.org/cffi/cffi/changeset/91834f9534f8/ Log: Starting, with exactly two function types: no-abi (i.e. cdecl on windows), or stdcall. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -4661,7 +4661,7 @@ } static int fb_build_name(struct funcbuilder_s *fb, PyObject *fargs, - CTypeDescrObject *fresult, int ellipsis) + CTypeDescrObject *fresult, int ellipsis, int fabi) { Py_ssize_t i, nargs = PyTuple_GET_SIZE(fargs); fb->nargs = nargs; @@ -4672,9 +4672,17 @@ RESULT_TYPE_HEAD (*)(ARG_1_TYPE, ARG_2_TYPE, etc) RESULT_TYPE_TAIL */ fb_cat_name(fb, fresult->ct_name, fresult->ct_name_position); - fb_cat_name(fb, "(*)(", 4); + fb_cat_name(fb, "(", 1); + i = 2; +#if defined(MS_WIN32) && !defined(_WIN64) + if (fabi == FFI_STDCALL) { + fb_cat_name(fb, "__stdcall ", 10); + i += 10; + } +#endif + fb_cat_name(fb, "*)(", 3); if (fb->fct) { - i = fresult->ct_name_position + 2; /* between '(*' and ')(' */ + i = fresult->ct_name_position + i; /* between '(*' and ')(' */ fb->fct->ct_name_position = i; } @@ -4710,7 +4718,7 @@ static CTypeDescrObject *fb_prepare_ctype(struct funcbuilder_s *fb, PyObject *fargs, CTypeDescrObject *fresult, - int ellipsis) + int ellipsis, int fabi) { CTypeDescrObject *fct; @@ -4719,7 +4727,7 @@ fb->fct = NULL; /* compute the total size needed for the name */ - if (fb_build_name(fb, fargs, fresult, ellipsis) < 0) + if (fb_build_name(fb, fargs, fresult, ellipsis, fabi) < 0) return NULL; /* allocate the function type */ @@ -4730,7 +4738,7 @@ /* call again fb_build_name() to really build the ct_name */ fb->bufferp = fct->ct_name; - if (fb_build_name(fb, fargs, fresult, ellipsis) < 0) + if (fb_build_name(fb, fargs, fresult, ellipsis, fabi) < 0) goto error; assert(fb->bufferp == fct->ct_name + fb->nb_bytes); @@ -4807,7 +4815,7 @@ return NULL; } - fct = fb_prepare_ctype(&funcbuilder, fargs, fresult, ellipsis); + fct = fb_prepare_ctype(&funcbuilder, fargs, fresult, ellipsis, fabi); if (fct == NULL) return NULL; diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -91,7 +91,7 @@ self.NULL = self.cast(self.BVoidP, 0) self.CData, self.CType = backend._get_types() - def cdef(self, csource, override=False, packed=False): + def cdef(self, csource, override=False, packed=False, calling_conv=None): """Parse the given C source. This registers all declared functions, types, and global variables. The functions and global variables can then be accessed via either 'ffi.dlopen()' or 'ffi.verify()'. @@ -104,7 +104,8 @@ raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: - self._parser.parse(csource, override=override, packed=packed) + self._parser.parse(csource, override=override, packed=packed, + calling_conv=calling_conv) self._cdefsources.append(csource) if override: for cache in self._function_caches: diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -103,6 +103,7 @@ self._structnode2type = weakref.WeakKeyDictionary() self._override = False self._packed = False + self._abi = None self._int_constants = {} self._recomplete = [] self._uses_new_feature = None @@ -162,16 +163,26 @@ msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) - def parse(self, csource, override=False, packed=False): + def parse(self, csource, override=False, packed=False, calling_conv=None): + if calling_conv is None or calling_conv == "cdecl": + abi = None + elif calling_conv == "stdcall": + abi = "stdcall" + else: + raise api.CDefError("calling_conv must be 'cdecl' or 'stdcall';" + " got %r" % (calling_conv,)) prev_override = self._override prev_packed = self._packed + prev_abi = self._abi try: self._override = override self._packed = packed + self._abi = abi self._internal_parse(csource) finally: self._override = prev_override self._packed = prev_packed + self._abi = prev_abi def _internal_parse(self, csource): ast, macros, csource = self._parse(csource) @@ -449,7 +460,7 @@ if not ellipsis and args == [model.void_type]: args = [] result, quals = self._get_type_and_quals(typenode.type) - return model.RawFunctionType(tuple(args), result, ellipsis) + return model.RawFunctionType(tuple(args), result, ellipsis, self._abi) def _as_func_arg(self, type, quals): if isinstance(type, model.ArrayType): diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -193,18 +193,21 @@ class BaseFunctionType(BaseType): - _attrs_ = ('args', 'result', 'ellipsis') + _attrs_ = ('args', 'result', 'ellipsis', 'abi') - def __init__(self, args, result, ellipsis): + def __init__(self, args, result, ellipsis, abi=None): self.args = args self.result = result self.ellipsis = ellipsis + self.abi = abi # reprargs = [arg._get_c_name() for arg in self.args] if self.ellipsis: reprargs.append('...') reprargs = reprargs or ['void'] replace_with = self._base_pattern % (', '.join(reprargs),) + if abi is not None: + replace_with = replace_with[:1] + abi + ' ' + replace_with[1:] self.c_name_with_marker = ( self.result.c_name_with_marker.replace('&', replace_with)) @@ -222,7 +225,7 @@ "type, not a pointer-to-function type" % (self,)) def as_function_pointer(self): - return FunctionPtrType(self.args, self.result, self.ellipsis) + return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi) class FunctionPtrType(BaseFunctionType): @@ -233,11 +236,25 @@ args = [] for tp in self.args: args.append(tp.get_cached_btype(ffi, finishlist)) + if self.abi is None: + abi_args = () + elif self.abi == "stdcall": + try: + abi_args = (ffi._backend.FFI_STDCALL,) + except AttributeError: + if sys.platform == "win32": + raise NotImplementedError("%r: stdcall with ctypes backend") + else: + from . import api + raise api.CDefError("%r: '__stdcall' only for Windows") + import pdb;pdb.set_trace() + else: + raise NotImplementedError("abi=%r" % (self.abi,)) return global_cache(self, ffi, 'new_function_type', - tuple(args), result, self.ellipsis) + tuple(args), result, self.ellipsis, *abi_args) def as_raw_function(self): - return RawFunctionType(self.args, self.result, self.ellipsis) + return RawFunctionType(self.args, self.result, self.ellipsis, self.abi) class PointerType(BaseType): diff --git a/testing/cffi0/test_function.py b/testing/cffi0/test_function.py --- a/testing/cffi0/test_function.py +++ b/testing/cffi0/test_function.py @@ -440,9 +440,7 @@ """) m = ffi.dlopen("Kernel32.dll") tp = ffi.typeof(m.QueryPerformanceFrequency) - assert 'stdcall' not in str(tp) and 'cdecl' not in str(tp) - assert tp is ( - ffi.typeof(ffi.addressof(m, 'QueryPerformanceFrequency')).item) + assert str(tp) == "" # ffi = FFI(backend=self.Backend()) ffi.cdef(""" @@ -451,8 +449,6 @@ m = ffi.dlopen("Kernel32.dll") tpc = ffi.typeof(m.QueryPerformanceFrequency) assert tpc is tp - assert tpc is ( - ffi.typeof(ffi.addressof(m, 'QueryPerformanceFrequency')).item) # ffi = FFI(backend=self.Backend()) ffi.cdef(""" @@ -461,9 +457,7 @@ m = ffi.dlopen("Kernel32.dll") tps = ffi.typeof(m.QueryPerformanceFrequency) assert tps is not tpc - assert '__stdcall' in str(tps) and 'cdecl' not in str(tps) - assert tps is ( - ffi.typeof(ffi.addressof(m, 'QueryPerformanceFrequency')).item) + assert str(tps) == "" # ffi = FFI(backend=self.Backend()) ffi.cdef("typedef int (*fnc_t)(int);", calling_conv="cdecl") @@ -472,3 +466,13 @@ tps = ffi.typeof("fns_t") assert str(tpc) == "" assert str(tps) == "" + + def test_stdcall_only_on_windows(self): + if sys.platform == 'win32': + py.test.skip("not-Windows-only test") + ffi = FFI(backend=self.Backend()) + e = py.test.raises(CDefError, ffi.cdef, """ + BOOL QueryPerformanceFrequency(LONGLONG *lpFrequency); + """, calling_conv="stdcall") + assert str(e.value) == ( + ": '__stdcall' only for Windows") From noreply at buildbot.pypy.org Mon Oct 5 20:10:02 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Oct 2015 20:10:02 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: non-windows fixes Message-ID: <20151005181002.074F01C0162@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2306:91c068cc1270 Date: 2015-10-05 20:10 +0200 http://bitbucket.org/cffi/cffi/changeset/91c068cc1270/ Log: non-windows fixes diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -167,7 +167,7 @@ if calling_conv is None or calling_conv == "cdecl": abi = None elif calling_conv == "stdcall": - abi = "stdcall" + abi = "__stdcall" else: raise api.CDefError("calling_conv must be 'cdecl' or 'stdcall';" " got %r" % (calling_conv,)) diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -1,4 +1,4 @@ -import types +import types, sys import weakref from .lock import allocate_lock @@ -238,15 +238,17 @@ args.append(tp.get_cached_btype(ffi, finishlist)) if self.abi is None: abi_args = () - elif self.abi == "stdcall": + elif self.abi == "__stdcall": try: abi_args = (ffi._backend.FFI_STDCALL,) except AttributeError: if sys.platform == "win32": - raise NotImplementedError("%r: stdcall with ctypes backend") + raise NotImplementedError("%r: stdcall with ctypes backend" + % (self,)) else: from . import api - raise api.CDefError("%r: '__stdcall' only for Windows") + raise api.CDefError("%r: '__stdcall' only for Windows" + % (self,)) import pdb;pdb.set_trace() else: raise NotImplementedError("abi=%r" % (self.abi,)) diff --git a/testing/cffi0/test_function.py b/testing/cffi0/test_function.py --- a/testing/cffi0/test_function.py +++ b/testing/cffi0/test_function.py @@ -1,5 +1,5 @@ import py -from cffi import FFI +from cffi import FFI, CDefError import math, os, sys import ctypes.util from cffi.backend_ctypes import CTypesBackend @@ -471,8 +471,10 @@ if sys.platform == 'win32': py.test.skip("not-Windows-only test") ffi = FFI(backend=self.Backend()) - e = py.test.raises(CDefError, ffi.cdef, """ - BOOL QueryPerformanceFrequency(LONGLONG *lpFrequency); + ffi.cdef(""" + int QueryPerformanceFrequency(long long *lpFrequency); """, calling_conv="stdcall") + m = ffi.dlopen(None) + e = py.test.raises(CDefError, getattr, m, 'QueryPerformanceFrequency') assert str(e.value) == ( - ": '__stdcall' only for Windows") + ": '__stdcall' only for Windows") From noreply at buildbot.pypy.org Mon Oct 5 20:11:21 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Oct 2015 20:11:21 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: forgot pdb Message-ID: <20151005181121.ECF551C0162@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2307:77adc3118fcd Date: 2015-10-05 20:12 +0200 http://bitbucket.org/cffi/cffi/changeset/77adc3118fcd/ Log: forgot pdb diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -249,7 +249,6 @@ from . import api raise api.CDefError("%r: '__stdcall' only for Windows" % (self,)) - import pdb;pdb.set_trace() else: raise NotImplementedError("abi=%r" % (self.abi,)) return global_cache(self, ffi, 'new_function_type', From noreply at buildbot.pypy.org Mon Oct 5 20:16:24 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 5 Oct 2015 20:16:24 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: check that the function types are really incompatible Message-ID: <20151005181624.146491C0162@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2308:b3fc7b996d25 Date: 2015-10-05 20:17 +0200 http://bitbucket.org/cffi/cffi/changeset/b3fc7b996d25/ Log: check that the function types are really incompatible diff --git a/testing/cffi0/test_function.py b/testing/cffi0/test_function.py --- a/testing/cffi0/test_function.py +++ b/testing/cffi0/test_function.py @@ -466,6 +466,13 @@ tps = ffi.typeof("fns_t") assert str(tpc) == "" assert str(tps) == "" + # + fnc = ffi.cast("fnc_t", 0) + fns = ffi.cast("fns_t", 0) + ffi.new("fnc_t[]", [fnc]) + py.test.raises(TypeError, ffi.new, "fnc_t[]", [fns]) + py.test.raises(TypeError, ffi.new, "fns_t[]", [fnc]) + ffi.new("fns_t[]", [fns]) def test_stdcall_only_on_windows(self): if sys.platform == 'win32': From noreply at buildbot.pypy.org Mon Oct 5 21:05:10 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 5 Oct 2015 21:05:10 +0200 (CEST) Subject: [pypy-commit] pypy issue-2148: Turn test_mixed_div() into a real test Message-ID: <20151005190510.03ED11C069F@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: issue-2148 Changeset: r79995:a28b47d56405 Date: 2015-10-05 20:05 +0100 http://bitbucket.org/pypy/pypy/changeset/a28b47d56405/ Log: Turn test_mixed_div() into a real test diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -261,6 +261,29 @@ assert log.result == [0.] * N loop, = log.loops_by_filename(self.filepath) assert loop.match(""" - f3 = float_truediv(f1, f2) + i92 = int_ge(i91, i37) + guard_false(i92, descr=...) + i93 = int_add(i91, 1) + setfield_gc(p23, i93, descr=) + i94 = int_ge(i91, i56) + guard_false(i94, descr=...) + i96 = int_mul(i91, i58) + i97 = int_add(i51, i96) + f98 = raw_load_f(i63, i97, descr=) + guard_not_invalidated(descr=...) + f100 = float_mul(f98, 0.500000) + i101 = int_add(i79, 1) + i102 = arraylen_gc(p85, descr=) + i103 = int_lt(i102, i101) + cond_call(i103, ConstClass(_ll_list_resize_hint_really_look_inside_iff__listPtr_Signed_Bool), p76, i101, 1, descr=) + guard_no_exception(descr=...) + p104 = getfield_gc_r(p76, descr=) + p105 = new_with_vtable(descr=) + setfield_gc(p105, f100, descr=) + setarrayitem_gc(p104, i79, p105, descr=) + i106 = getfield_raw_i(#, descr=) + setfield_gc(p76, i101, descr=) + i107 = int_lt(i106, 0) + guard_false(i107, descr=...) jump(..., descr=...) """) From noreply at buildbot.pypy.org Tue Oct 6 02:16:01 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 6 Oct 2015 02:16:01 +0200 (CEST) Subject: [pypy-commit] pypy issue-2148: Close branch issue-2148 Message-ID: <20151006001601.F1F971C0352@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: issue-2148 Changeset: r79996:dbe1f4355ebc Date: 2015-10-06 01:16 +0100 http://bitbucket.org/pypy/pypy/changeset/dbe1f4355ebc/ Log: Close branch issue-2148 From noreply at buildbot.pypy.org Tue Oct 6 02:16:11 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 6 Oct 2015 02:16:11 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in issue-2148 (pull request #336) Message-ID: <20151006001611.2B91F1C0352@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r79997:d7a28e98dab5 Date: 2015-10-06 01:16 +0100 http://bitbucket.org/pypy/pypy/changeset/d7a28e98dab5/ Log: Merged in issue-2148 (pull request #336) Fix performance regression on operations mixing numpy scalars and Python floats. Closes issue #2148 diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -3,10 +3,13 @@ from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi + from pypy.module.micronumpy import descriptor, loop, support from pypy.module.micronumpy.base import ( W_NDimArray, convert_to_array, W_NumpyObject) from pypy.module.micronumpy.converters import shape_converter +from . import constants as NPY +from .casting import scalar2dtype def build_scalar(space, w_dtype, w_state): @@ -82,7 +85,6 @@ return w_res def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False): - from pypy.module.micronumpy import strides # for anything that isn't already an array, try __array__ method first if not isinstance(w_object, W_NDimArray): @@ -143,16 +145,11 @@ w_base=w_base, start=imp.start) else: # not an array - shape, elems_w = strides.find_shape_and_elems(space, w_object, dtype) + shape, elems_w = find_shape_and_elems(space, w_object, dtype) if dtype is None and space.isinstance_w(w_object, space.w_buffer): dtype = descriptor.get_dtype_cache(space).w_uint8dtype if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): dtype = find_dtype_for_seq(space, elems_w, dtype) - if dtype is None: - dtype = descriptor.get_dtype_cache(space).w_float64dtype - elif dtype.is_str_or_unicode() and dtype.elsize < 1: - # promote S0 -> S1, U0 -> U1 - dtype = descriptor.variable_dtype(space, dtype.char + '1') w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) if support.product(shape) == 1: # safe from overflow since from_shape checks @@ -165,7 +162,6 @@ def numpify(space, w_object): """Convert the object to a W_NumpyObject""" # XXX: code duplication with _array() - from pypy.module.micronumpy import strides if isinstance(w_object, W_NumpyObject): return w_object # for anything that isn't already an array, try __array__ method first @@ -173,20 +169,82 @@ if w_array is not None: return w_array - shape, elems_w = strides.find_shape_and_elems(space, w_object, None) + if is_scalar_like(space, w_object, dtype=None): + dtype = scalar2dtype(space, w_object) + if dtype.is_str_or_unicode() and dtype.elsize < 1: + # promote S0 -> S1, U0 -> U1 + dtype = descriptor.variable_dtype(space, dtype.char + '1') + return dtype.coerce(space, w_object) + + shape, elems_w = _find_shape_and_elems(space, w_object) dtype = find_dtype_for_seq(space, elems_w, None) - if dtype is None: - dtype = descriptor.get_dtype_cache(space).w_float64dtype - elif dtype.is_str_or_unicode() and dtype.elsize < 1: - # promote S0 -> S1, U0 -> U1 - dtype = descriptor.variable_dtype(space, dtype.char + '1') + w_arr = W_NDimArray.from_shape(space, shape, dtype) + loop.assign(space, w_arr, elems_w) + return w_arr - if len(elems_w) == 1: - return dtype.coerce(space, elems_w[0]) + +def find_shape_and_elems(space, w_iterable, dtype): + if is_scalar_like(space, w_iterable, dtype): + return [], [w_iterable] + is_rec_type = dtype is not None and dtype.is_record() + return _find_shape_and_elems(space, w_iterable, is_rec_type) + +def is_scalar_like(space, w_obj, dtype): + isstr = space.isinstance_w(w_obj, space.w_str) + if not support.issequence_w(space, w_obj) or isstr: + if dtype is None or dtype.char != NPY.CHARLTR: + return True + is_rec_type = dtype is not None and dtype.is_record() + if is_rec_type and is_single_elem(space, w_obj, is_rec_type): + return True + if isinstance(w_obj, W_NDimArray) and w_obj.is_scalar(): + return True + return False + +def _find_shape_and_elems(space, w_iterable, is_rec_type=False): + from pypy.objspace.std.bufferobject import W_Buffer + shape = [space.len_w(w_iterable)] + if space.isinstance_w(w_iterable, space.w_buffer): + batch = [space.wrap(0)] * shape[0] + for i in range(shape[0]): + batch[i] = space.ord(space.getitem(w_iterable, space.wrap(i))) else: - w_arr = W_NDimArray.from_shape(space, shape, dtype) - loop.assign(space, w_arr, elems_w) - return w_arr + batch = space.listview(w_iterable) + while True: + if not batch: + return shape[:], [] + if is_single_elem(space, batch[0], is_rec_type): + for w_elem in batch: + if not is_single_elem(space, w_elem, is_rec_type): + raise OperationError(space.w_ValueError, space.wrap( + "setting an array element with a sequence")) + return shape[:], batch + new_batch = [] + size = space.len_w(batch[0]) + for w_elem in batch: + if (is_single_elem(space, w_elem, is_rec_type) or + space.len_w(w_elem) != size): + raise OperationError(space.w_ValueError, space.wrap( + "setting an array element with a sequence")) + w_array = space.lookup(w_elem, '__array__') + if w_array is not None: + # Make sure we call the array implementation of listview, + # since for some ndarray subclasses (matrix, for instance) + # listview does not reduce but rather returns the same class + w_elem = space.get_and_call_function(w_array, w_elem, space.w_None) + new_batch += space.listview(w_elem) + shape.append(size) + batch = new_batch + +def is_single_elem(space, w_elem, is_rec_type): + if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): + return True + if (space.isinstance_w(w_elem, space.w_tuple) or + space.isinstance_w(w_elem, space.w_list)): + return False + if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar(): + return False + return True def _dtype_guess(space, dtype, w_elem): from .casting import scalar2dtype, find_binop_result_dtype @@ -201,6 +259,11 @@ return _dtype_guess(space, dtype, w_elem) for w_elem in elems_w: dtype = _dtype_guess(space, dtype, w_elem) + if dtype is None: + dtype = descriptor.get_dtype_cache(space).w_float64dtype + elif dtype.is_str_or_unicode() and dtype.elsize < 1: + # promote S0 -> S1, U0 -> U1 + dtype = descriptor.variable_dtype(space, dtype.char + '1') return dtype diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -189,67 +189,6 @@ return rstrides, rbackstrides -def is_single_elem(space, w_elem, is_rec_type): - if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): - return True - if (space.isinstance_w(w_elem, space.w_tuple) or - space.isinstance_w(w_elem, space.w_list)): - return False - if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar(): - return False - return True - - -def find_shape_and_elems(space, w_iterable, dtype): - isstr = space.isinstance_w(w_iterable, space.w_str) - if not support.issequence_w(space, w_iterable) or isstr: - if dtype is None or dtype.char != NPY.CHARLTR: - return [], [w_iterable] - is_rec_type = dtype is not None and dtype.is_record() - if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): - return [], [w_iterable] - if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar(): - return [], [w_iterable] - return _find_shape_and_elems(space, w_iterable, is_rec_type) - - -def _find_shape_and_elems(space, w_iterable, is_rec_type): - from pypy.objspace.std.bufferobject import W_Buffer - shape = [space.len_w(w_iterable)] - if space.isinstance_w(w_iterable, space.w_buffer): - batch = [space.wrap(0)] * shape[0] - for i in range(shape[0]): - batch[i] = space.ord(space.getitem(w_iterable, space.wrap(i))) - else: - batch = space.listview(w_iterable) - while True: - if not batch: - return shape[:], [] - if is_single_elem(space, batch[0], is_rec_type): - for w_elem in batch: - if not is_single_elem(space, w_elem, is_rec_type): - raise OperationError(space.w_ValueError, space.wrap( - "setting an array element with a sequence")) - return shape[:], batch - new_batch = [] - size = space.len_w(batch[0]) - for w_elem in batch: - if (is_single_elem(space, w_elem, is_rec_type) or - space.len_w(w_elem) != size): - raise OperationError(space.w_ValueError, space.wrap( - "setting an array element with a sequence")) - w_array = space.lookup(w_elem, '__array__') - if w_array is not None: - # Make sure we call the array implementation of listview, - # since for some ndarray subclasses (matrix, for instance) - # listview does not reduce but rather returns the same class - w_elem = space.get_and_call_function(w_array, w_elem, space.w_None) - new_batch += space.listview(w_elem) - shape.append(size) - batch = new_batch - - - @jit.unroll_safe def shape_agreement(space, shape1, w_arr2, broadcast_down=True): if w_arr2 is None: diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -169,7 +169,7 @@ [1, 1, 1, 105, 105] def test_find_shape(self): - from pypy.module.micronumpy.strides import find_shape_and_elems + from pypy.module.micronumpy.ctors import find_shape_and_elems space = self.space shape, elems = find_shape_and_elems(space, @@ -2485,7 +2485,7 @@ x = array([(u'a', 'b')], dtype=t) x['a'] = u'1' assert str(x) == "[(u'1', 'b')]" - + def test_boolean_indexing(self): import numpy as np @@ -2709,7 +2709,7 @@ "input array from shape (3,1) into shape (3)" a[:, 1] = b[:,0] > 0.5 assert (a == [[0, 1], [0, 1], [0, 1]]).all() - + def test_ufunc(self): from numpy import array @@ -3868,7 +3868,7 @@ assert a[0]['y'] == 2 assert a[1]['y'] == 1 - + a = array([(1, [])], dtype=[('a', int32), ('b', int32, 0)]) assert a['b'].shape == (1, 0) b = loads(dumps(a)) diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -480,3 +480,9 @@ u = unicode_(u'Aÿ') # raises(UnicodeEncodeError, "str(u)") # XXX assert repr(u) == repr(u'Aÿ') + + def test_binop_with_sequence(self): + import numpy as np + c = np.float64(1.) + [1.] + assert isinstance(c, np.ndarray) + assert (c == [2.]).all() diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -479,6 +479,7 @@ dt_in, dt_out = self._calc_dtype(space, dtype, out, casting) return dt_in, dt_out, self.func + @jit.unroll_safe def _calc_dtype(self, space, arg_dtype, out=None, casting='unsafe'): if arg_dtype.is_object(): return arg_dtype, arg_dtype @@ -672,6 +673,7 @@ "requested type has type code '%s'" % (self.name, dtype.char)) + @jit.unroll_safe def _calc_dtype(self, space, l_dtype, r_dtype, out, casting, w_arg1, w_arg2): if l_dtype.is_object() or r_dtype.is_object(): diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -248,3 +248,42 @@ guard_false(i157, descr=...) jump(..., descr=...) """) + + def test_mixed_div(self): + N = 1500 + def main(): + N = 1500 + import _numpypy.multiarray as np + arr = np.zeros(N) + l = [arr[i]/2. for i in range(N)] + return l + log = self.run(main, []) + assert log.result == [0.] * N + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i92 = int_ge(i91, i37) + guard_false(i92, descr=...) + i93 = int_add(i91, 1) + setfield_gc(p23, i93, descr=) + i94 = int_ge(i91, i56) + guard_false(i94, descr=...) + i96 = int_mul(i91, i58) + i97 = int_add(i51, i96) + f98 = raw_load_f(i63, i97, descr=) + guard_not_invalidated(descr=...) + f100 = float_mul(f98, 0.500000) + i101 = int_add(i79, 1) + i102 = arraylen_gc(p85, descr=) + i103 = int_lt(i102, i101) + cond_call(i103, ConstClass(_ll_list_resize_hint_really_look_inside_iff__listPtr_Signed_Bool), p76, i101, 1, descr=) + guard_no_exception(descr=...) + p104 = getfield_gc_r(p76, descr=) + p105 = new_with_vtable(descr=) + setfield_gc(p105, f100, descr=) + setarrayitem_gc(p104, i79, p105, descr=) + i106 = getfield_raw_i(#, descr=) + setfield_gc(p76, i101, descr=) + i107 = int_lt(i106, 0) + guard_false(i107, descr=...) + jump(..., descr=...) + """) From noreply at buildbot.pypy.org Tue Oct 6 02:28:02 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 6 Oct 2015 02:28:02 +0200 (CEST) Subject: [pypy-commit] pypy default: document merged branch Message-ID: <20151006002802.D400B1C0352@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r79998:d029d6332d9f Date: 2015-10-06 01:28 +0100 http://bitbucket.org/pypy/pypy/changeset/d029d6332d9f/ Log: document merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -47,4 +47,9 @@ .. branch: share-guard-info Share guard resume data between consecutive guards that have only -pure operations and guards in between. \ No newline at end of file +pure operations and guards in between. + +.. branch: issue-2148 + +Fix performance regression on operations mixing numpy scalars and Python +floats, cf. issue #2148. From noreply at buildbot.pypy.org Tue Oct 6 07:24:25 2015 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 6 Oct 2015 07:24:25 +0200 (CEST) Subject: [pypy-commit] pypy default: fix the test Message-ID: <20151006052425.9F2EC1C0352@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r79999:43f8aaef0f6e Date: 2015-10-06 07:24 +0200 http://bitbucket.org/pypy/pypy/changeset/43f8aaef0f6e/ Log: fix the test diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -646,10 +646,12 @@ def _copy_resume_data_from(self, guard_op, last_guard_op): descr = compile.invent_fail_descr_for_op(guard_op.getopnum(), self, True) - assert isinstance(descr, compile.ResumeGuardCopiedDescr) last_descr = last_guard_op.getdescr() assert isinstance(last_descr, compile.ResumeGuardDescr) - descr.prev = last_descr + if isinstance(descr, compile.ResumeGuardCopiedDescr): + descr.prev = last_descr + else: + descr.copy_all_attributes_from(last_descr) guard_op.setdescr(descr) guard_op.setfailargs(last_guard_op.getfailargs()) descr.store_hash(self.metainterp_sd) From noreply at buildbot.pypy.org Tue Oct 6 08:40:55 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 08:40:55 +0200 (CEST) Subject: [pypy-commit] pypy default: Run the tests in sorted order, and print which size each one is testing Message-ID: <20151006064055.15D921C0726@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80000:b853ee35818e Date: 2015-10-06 08:41 +0200 http://bitbucket.org/pypy/pypy/changeset/b853ee35818e/ Log: Run the tests in sorted order, and print which size each one is testing diff --git a/pypy/module/pypyjit/test_pypy_c/test_alloc.py b/pypy/module/pypyjit/test_pypy_c/test_alloc.py --- a/pypy/module/pypyjit/test_pypy_c/test_alloc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_alloc.py @@ -7,10 +7,11 @@ [2 ** n - 1 for n in range(26)]) def test_newstr_constant_size(self): - for size in TestAlloc.SIZES: + for size in sorted(TestAlloc.SIZES): yield self.newstr_constant_size, size def newstr_constant_size(self, size): + print 'size =', size src = """if 1: N = %(size)d part_a = 'a' * N From noreply at buildbot.pypy.org Tue Oct 6 09:35:38 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 09:35:38 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: in-progress, but found a problem Message-ID: <20151006073538.71D271C227D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2309:fc4b1899ac96 Date: 2015-10-06 09:35 +0200 http://bitbucket.org/cffi/cffi/changeset/fc4b1899ac96/ Log: in-progress, but found a problem diff --git a/c/realize_c_type.c b/c/realize_c_type.c --- a/c/realize_c_type.c +++ b/c/realize_c_type.c @@ -544,7 +544,7 @@ case _CFFI_OP_FUNCTION: { PyObject *fargs; - int i, base_index, num_args, ellipsis; + int i, base_index, num_args, ellipsis, abi; y = (PyObject *)realize_c_type(builder, opcodes, _CFFI_GETARG(op)); if (y == NULL) @@ -560,7 +560,22 @@ _CFFI_OP_FUNCTION_END) num_args++; - ellipsis = _CFFI_GETARG(opcodes[base_index + num_args]) & 1; + ellipsis = _CFFI_GETARG(opcodes[base_index + num_args]) & 0x01; + abi = _CFFI_GETARG(opcodes[base_index + num_args]) & 0xFE; + switch (abi) { + case 0: + abi = FFI_DEFAULT_ABI; + break; +#if defined(MS_WIN32) && !defined(_WIN64) + case 2: + abi = FFI_STDCALL; + break; +#endif + default: + PyErr_Format(FFIError, "abi number %d not supported", abi); + Py_DECREF(y); + return NULL; + } fargs = PyTuple_New(num_args); if (fargs == NULL) { @@ -578,8 +593,7 @@ PyTuple_SET_ITEM(fargs, i, z); } - z = new_function_type(fargs, (CTypeDescrObject *)y, ellipsis, - FFI_DEFAULT_ABI); + z = new_function_type(fargs, (CTypeDescrObject *)y, ellipsis, abi); Py_DECREF(fargs); Py_DECREF(y); if (z == NULL) diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -243,12 +243,13 @@ abi_args = (ffi._backend.FFI_STDCALL,) except AttributeError: if sys.platform == "win32": - raise NotImplementedError("%r: stdcall with ctypes backend" - % (self,)) + raise NotImplementedError("%r: stdcall" % (self,)) else: from . import api - raise api.CDefError("%r: '__stdcall' only for Windows" + raise api.CDefError("%r: '__stdcall': only on Windows" % (self,)) + if self.ellipsis: # win32: __stdcall is ignored when + abi_args = () # applied to variadic functions else: raise NotImplementedError("abi=%r" % (self.abi,)) return global_cache(self, ffi, 'new_function_type', diff --git a/cffi/recompiler.py b/cffi/recompiler.py --- a/cffi/recompiler.py +++ b/cffi/recompiler.py @@ -607,7 +607,11 @@ call_arguments.append('x%d' % i) repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' - name_and_arguments = '_cffi_d_%s(%s)' % (name, repr_arguments) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments) prnt('static %s' % (tp.result.get_c_name(name_and_arguments),)) prnt('{') call_arguments = ', '.join(call_arguments) @@ -710,7 +714,8 @@ if difference: repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' - name_and_arguments = '_cffi_f_%s(%s)' % (name, repr_arguments) + name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name, + repr_arguments) prnt('static %s' % (tp_result.get_c_name(name_and_arguments),)) prnt('{') if result_decl: @@ -1135,7 +1140,13 @@ else: self.cffi_types[index] = CffiOp(OP_NOOP, realindex) index += 1 - self.cffi_types[index] = CffiOp(OP_FUNCTION_END, int(tp.ellipsis)) + flags = int(tp.ellipsis) + if tp.abi is not None: + if tp.abi == '__stdcall': + flags |= 2 + else: + raise NotImplementedError("abi=%r" % (tp.abi,)) + self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags) def _emit_bytecode_PointerType(self, tp, index): self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype]) diff --git a/cffi/vengine_gen.py b/cffi/vengine_gen.py --- a/cffi/vengine_gen.py +++ b/cffi/vengine_gen.py @@ -159,7 +159,11 @@ arglist = ', '.join(arglist) or 'void' wrappername = '_cffi_f_%s' % name self.export_symbols.append(wrappername) - funcdecl = ' %s(%s)' % (wrappername, arglist) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + funcdecl = ' %s%s(%s)' % (abi, wrappername, arglist) context = 'result of %s' % name prnt(tpresult.get_c_name(funcdecl, context)) prnt('{') diff --git a/testing/cffi0/test_verify.py b/testing/cffi0/test_verify.py --- a/testing/cffi0/test_verify.py +++ b/testing/cffi0/test_verify.py @@ -1220,24 +1220,6 @@ lib = ffi.verify('#include ', libraries=lib_m) assert lib.sin(1.23) == math.sin(1.23) -def test_callback_calling_convention(): - if sys.platform != 'win32': - py.test.skip("Windows only") - ffi = FFI() - ffi.cdef(""" - int call1(int(*__cdecl cb)(int)); - int call2(int(*__stdcall cb)(int)); - """) - lib = ffi.verify(""" - int call1(int(*__cdecl cb)(int)) { - return cb(42) + 1; - } - int call2(int(*__stdcall cb)(int)) { - return cb(-42) - 6; - } - """) - xxx - def test_opaque_integer_as_function_result(): #import platform #if platform.machine().startswith('sparc'): @@ -2259,3 +2241,128 @@ assert foo_s.fields[0][1].type is ffi.typeof("int") assert foo_s.fields[1][0] == 'b' assert foo_s.fields[1][1].type is ffi.typeof("void *") + +def test_win32_calling_convention_1(): + if sys.platform != 'win32': + py.test.skip("Windows only") + ffi = FFI() + ffi.cdef("int call1(int(*cb)(int));", calling_conv="cdecl") + ffi.cdef("int call2(int(*cb)(int));", calling_conv="stdcall") + lib = ffi.verify(r""" + int __cdecl call1(int(__cdecl *cb)(int)) { + printf("here1\n"); + printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(i); + printf("result = %d\n", result); + return result; + } + int __stdcall call2(int(__stdcall *cb)(int)) { + int i, result = 0; + printf("here1\n"); + printf("cb = %p, cb2 = %p\n", cb, (void *)cb2); + for (i = 0; i < 1000; i++) + result += cb(-i); + printf("result = %d\n", result); + return result; + } + """) + assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + ... + print '<<< cb2 =', ffi.addressof(lib, 'cb2') + ptr_call2 = ffi.addressof(lib, 'call2') + assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + print '<<< done' + +def test_win32_calling_convention_2(): + if sys.platform != 'win32': + py.test.skip("Windows only") + # any mistake in the declaration of plain function (including the + # precise argument types and, here, the calling convention) are + # automatically corrected. But this does not apply to the 'cb' + # function pointer argument. + ffi = FFI() + ffi.cdef("int call1(int(*cb)(int)); int cb1(int);", calling_conv="cdecl") + ffi.cdef("int call2(int(*cb)(int)); int cb2(int);", calling_conv="stdcall") + lib = verify(ffi, 'test_win32_calling_convention_2', """ + int __stdcall call1(int(__cdecl *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(i); + return result; + } + int __cdecl call2(int(__stdcall *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(-i); + return result; + } + int __stdcall cb1(int x) { return x * 2; } + int __cdecl cb2(int x) { return x * 3; } + """) + ptr_call1 = ffi.addressof(lib, 'call1') + ptr_call2 = ffi.addressof(lib, 'call2') + py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) + py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1')) + assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + assert ptr_call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + +def test_win32_calling_convention_3(): + if sys.platform != 'win32': + py.test.skip("Windows only") + ffi = FFI() + ffi.cdef("struct point { int x, y; };") + ffi.cdef("struct point call1(int(*cb)(struct point)); " + "int cb1(struct point);", calling_conv="cdecl") + ffi.cdef("struct point call2(int(*cb)(struct point)); " + "int cb2(struct point);", calling_conv="stdcall") + lib = verify(ffi, 'test_win32_calling_convention_3', r""" + struct point { int x, y; }; + int __stdcall cb1(struct point pt) { return pt.x + 10 * pt.y; } + int __cdecl cb2(struct point pt) { return pt.x + 100 * pt.y; } + struct point __stdcall call1(int(__cdecl *cb)(struct point)) { + int i; + struct point result = { 0, 0 }; + printf("here1\n"); + printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + for (i = 0; i < 1000; i++) { + struct point p = { i, -i }; + int r = cb(p); + result.x += r; + result.y -= r; + } + return result; + } + struct point __cdecl call2(int(__stdcall *cb)(struct point)) { + int i; + struct point result = { 0, 0 }; + for (i = 0; i < 1000; i++) { + struct point p = { -i, i }; + int r = cb(p); + result.x += r; + result.y -= r; + } + return result; + } + """) + ptr_call1 = ffi.addressof(lib, 'call1') + ptr_call2 = ffi.addressof(lib, 'call2') + py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) + py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1')) + print '<<< cb1 =', ffi.addressof(lib, 'cb1') + pt = lib.call1(ffi.addressof(lib, 'cb1')) + assert (pt.x, pt.y) == (-9*500*999, 9*500*999) + pt = ptr_call1(ffi.addressof(lib, 'cb1')) + assert (pt.x, pt.y) == (-9*500*999, 9*500*999) + pt = lib.call2(ffi.addressof(lib, 'cb2')) + assert (pt.x, pt.y) == (99*500*999, -99*500*999) + pt = ptr_call2(ffi.addressof(lib, 'cb2')) + assert (pt.x, pt.y) == (99*500*999, -99*500*999) diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -1287,24 +1287,37 @@ ffi = FFI() ffi.cdef("int call1(int(*cb)(int)); int cb1(int);", calling_conv="cdecl") ffi.cdef("int call2(int(*cb)(int)); int cb2(int);", calling_conv="stdcall") - lib = verify(ffi, 'test_win32_calling_convention_1', """ - int __cdecl call1(int(*__cdecl cb)(int)) { + lib = verify(ffi, 'test_win32_calling_convention_1', r""" + int __cdecl cb1(int x) { return x * 2; } + int __cdecl call1(int(__cdecl *cb)(int)) { + printf("here1\n"); + printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); int i, result = 0; for (i = 0; i < 1000; i++) result += cb(i); + printf("result = %d\n", result); return result; } - int __stdcall call2(int(*__stdcall cb)(int)) { + int __stdcall cb2(int x) { return x * 3; } + int __stdcall call2(int(__stdcall *cb)(int)) { int i, result = 0; + printf("here1\n"); + printf("cb = %p, cb2 = %p\n", cb, (void *)cb2); for (i = 0; i < 1000; i++) result += cb(-i); + printf("result = %d\n", result); return result; } - int __cdecl cb1(int x) { return x * 2; } - int __stdcall cb2(int x) { return x * 3; } """) + print '<<< cb1 =', ffi.addressof(lib, 'cb1') + ptr_call1 = ffi.addressof(lib, 'call1') assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + assert ptr_call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + print '<<< cb2 =', ffi.addressof(lib, 'cb2') + ptr_call2 = ffi.addressof(lib, 'call2') assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + print '<<< done' def test_win32_calling_convention_2(): if sys.platform != 'win32': @@ -1317,13 +1330,13 @@ ffi.cdef("int call1(int(*cb)(int)); int cb1(int);", calling_conv="cdecl") ffi.cdef("int call2(int(*cb)(int)); int cb2(int);", calling_conv="stdcall") lib = verify(ffi, 'test_win32_calling_convention_2', """ - int __stdcall call1(int(*__cdecl cb)(int)) { + int __stdcall call1(int(__cdecl *cb)(int)) { int i, result = 0; for (i = 0; i < 1000; i++) result += cb(i); return result; } - int __cdecl call2(int(*__stdcall cb)(int)) { + int __cdecl call2(int(__stdcall *cb)(int)) { int i, result = 0; for (i = 0; i < 1000; i++) result += cb(-i); @@ -1332,5 +1345,107 @@ int __stdcall cb1(int x) { return x * 2; } int __cdecl cb2(int x) { return x * 3; } """) + ptr_call1 = ffi.addressof(lib, 'call1') + ptr_call2 = ffi.addressof(lib, 'call2') + py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) + py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1')) assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + assert ptr_call1(ffi.addressof(lib, 'cb1')) == 500*999*2 assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + +def test_win32_calling_convention_3(): + if sys.platform != 'win32': + py.test.skip("Windows only") + ffi = FFI() + ffi.cdef("struct point { int x, y; };") + ffi.cdef("struct point call1(int(*cb)(struct point)); " + "int cb1(struct point);", calling_conv="cdecl") + ffi.cdef("struct point call2(int(*cb)(struct point)); " + "int cb2(struct point);", calling_conv="stdcall") + lib = verify(ffi, 'test_win32_calling_convention_3', r""" + struct point { int x, y; }; + int __stdcall cb1(struct point pt) { return pt.x + 10 * pt.y; } + int __cdecl cb2(struct point pt) { return pt.x + 100 * pt.y; } + struct point __stdcall call1(int(__cdecl *cb)(struct point)) { + int i; + struct point result = { 0, 0 }; + printf("here1\n"); + printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + for (i = 0; i < 1000; i++) { + struct point p = { i, -i }; + int r = cb(p); + result.x += r; + result.y -= r; + } + return result; + } + struct point __cdecl call2(int(__stdcall *cb)(struct point)) { + int i; + struct point result = { 0, 0 }; + for (i = 0; i < 1000; i++) { + struct point p = { -i, i }; + int r = cb(p); + result.x += r; + result.y -= r; + } + return result; + } + """) + ptr_call1 = ffi.addressof(lib, 'call1') + ptr_call2 = ffi.addressof(lib, 'call2') + py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) + py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1')) + print '<<< cb1 =', ffi.addressof(lib, 'cb1') + pt = lib.call1(ffi.addressof(lib, 'cb1')) + assert (pt.x, pt.y) == (-9*500*999, 9*500*999) + pt = ptr_call1(ffi.addressof(lib, 'cb1')) + assert (pt.x, pt.y) == (-9*500*999, 9*500*999) + pt = lib.call2(ffi.addressof(lib, 'cb2')) + assert (pt.x, pt.y) == (99*500*999, -99*500*999) + pt = ptr_call2(ffi.addressof(lib, 'cb2')) + assert (pt.x, pt.y) == (99*500*999, -99*500*999) + +def test_win32_calling_convention_4(): + if sys.platform != 'win32': + py.test.skip("Windows only") + ffi = FFI() + ffi.cdef("int call1(int(*cb)(int));", calling_conv="cdecl") + ffi.cdef("int call2(int(*cb)(int));", calling_conv="stdcall") + lib = verify(ffi, 'test_win32_calling_convention_4', """ + int __stdcall call1(int(__cdecl *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(i); + return result; + } + int __cdecl call2(int(__stdcall *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(-i); + return result; + } + """) + @ffi.callback("int(int)", calling_conv="cdecl") + def cb1(x): + return x * 2 + ... + @ffi.callback("int(int)", calling_conv="stdcall") + def cb2(x): + return x * 2 + + + ptr_call1 = ffi.addressof(lib, 'call1') + ptr_call2 = ffi.addressof(lib, 'call2') + py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) + py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1')) + assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + assert ptr_call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3 From noreply at buildbot.pypy.org Tue Oct 6 10:36:15 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 10:36:15 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: This doesn't work anyway Message-ID: <20151006083615.C31D71C12D6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2310:081da9ea6b97 Date: 2015-10-06 10:24 +0200 http://bitbucket.org/cffi/cffi/changeset/081da9ea6b97/ Log: This doesn't work anyway diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -6381,11 +6381,7 @@ #if defined(MS_WIN32) && !defined(_WIN64) PyModule_AddIntConstant(m, "FFI_STDCALL", FFI_STDCALL) < 0 || #endif -#ifdef FFI_CDECL - PyModule_AddIntConstant(m, "FFI_CDECL", FFI_CDECL) < 0 || /* win32 */ -#else PyModule_AddIntConstant(m, "FFI_CDECL", FFI_DEFAULT_ABI) < 0 || -#endif #ifdef MS_WIN32 # ifdef _WIN64 From noreply at buildbot.pypy.org Tue Oct 6 10:36:17 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 10:36:17 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: Support directly __stdcall or WINAPI (or __cdecl, ignored) inside Message-ID: <20151006083617.C74ED1C12D6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2311:b1eed93a8c20 Date: 2015-10-06 10:27 +0200 http://bitbucket.org/cffi/cffi/changeset/b1eed93a8c20/ Log: Support directly __stdcall or WINAPI (or __cdecl, ignored) inside cparser. diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -91,7 +91,7 @@ self.NULL = self.cast(self.BVoidP, 0) self.CData, self.CType = backend._get_types() - def cdef(self, csource, override=False, packed=False, calling_conv=None): + def cdef(self, csource, override=False, packed=False): """Parse the given C source. This registers all declared functions, types, and global variables. The functions and global variables can then be accessed via either 'ffi.dlopen()' or 'ffi.verify()'. @@ -104,8 +104,7 @@ raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: - self._parser.parse(csource, override=override, packed=packed, - calling_conv=calling_conv) + self._parser.parse(csource, override=override, packed=packed) self._cdefsources.append(csource) if override: for cache in self._function_caches: diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -26,6 +26,9 @@ _r_words = re.compile(r"\w+|\S") _parser_cache = None _r_int_literal = re.compile(r"-?0?x?[0-9a-f]+[lu]*$", re.IGNORECASE) +_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") +_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") +_r_cdecl = re.compile(r"\b__cdecl\b") def _get_parser(): global _parser_cache @@ -44,6 +47,14 @@ macrovalue = macrovalue.replace('\\\n', '').strip() macros[macroname] = macrovalue csource = _r_define.sub('', csource) + # BIG HACK: replace WINAPI or __stdcall with "volatile const". + # It doesn't make sense for the return type of a function to be + # "volatile volatile const", so we abuse it to detect __stdcall... + # Hack number 2 is that "int(volatile *fptr)();" is not valid C + # syntax, so we place the "volatile" before the opening parenthesis. + csource = _r_stdcall2.sub(' volatile volatile const(', csource) + csource = _r_stdcall1.sub(' volatile volatile const ', csource) + csource = _r_cdecl.sub(' ', csource) # Replace "[...]" with "[__dotdotdotarray__]" csource = _r_partial_array.sub('[__dotdotdotarray__]', csource) # Replace "...}" with "__dotdotdotNUM__}". This construction should @@ -103,7 +114,6 @@ self._structnode2type = weakref.WeakKeyDictionary() self._override = False self._packed = False - self._abi = None self._int_constants = {} self._recomplete = [] self._uses_new_feature = None @@ -163,26 +173,16 @@ msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) - def parse(self, csource, override=False, packed=False, calling_conv=None): - if calling_conv is None or calling_conv == "cdecl": - abi = None - elif calling_conv == "stdcall": - abi = "__stdcall" - else: - raise api.CDefError("calling_conv must be 'cdecl' or 'stdcall';" - " got %r" % (calling_conv,)) + def parse(self, csource, override=False, packed=False): prev_override = self._override prev_packed = self._packed - prev_abi = self._abi try: self._override = override self._packed = packed - self._abi = abi self._internal_parse(csource) finally: self._override = prev_override self._packed = prev_packed - self._abi = prev_abi def _internal_parse(self, csource): ast, macros, csource = self._parse(csource) @@ -460,7 +460,13 @@ if not ellipsis and args == [model.void_type]: args = [] result, quals = self._get_type_and_quals(typenode.type) - return model.RawFunctionType(tuple(args), result, ellipsis, self._abi) + # the 'quals' on the result type are ignored. HACK: we absure them + # to detect __stdcall functions: we textually replace "__stdcall" + # with "volatile volatile const" above. + abi = None + if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']: + abi = '__stdcall' + return model.RawFunctionType(tuple(args), result, ellipsis, abi) def _as_func_arg(self, type, quals): if isinstance(type, model.ArrayType): diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -236,22 +236,13 @@ args = [] for tp in self.args: args.append(tp.get_cached_btype(ffi, finishlist)) - if self.abi is None: - abi_args = () - elif self.abi == "__stdcall": - try: - abi_args = (ffi._backend.FFI_STDCALL,) - except AttributeError: - if sys.platform == "win32": - raise NotImplementedError("%r: stdcall" % (self,)) - else: - from . import api - raise api.CDefError("%r: '__stdcall': only on Windows" - % (self,)) - if self.ellipsis: # win32: __stdcall is ignored when - abi_args = () # applied to variadic functions - else: - raise NotImplementedError("abi=%r" % (self.abi,)) + abi_args = () + if self.abi == "__stdcall": + if not self.ellipsis: # __stdcall ignored for variadic funcs + try: + abi_args = (ffi._backend.FFI_STDCALL,) + except AttributeError: + pass return global_cache(self, ffi, 'new_function_type', tuple(args), result, self.ellipsis, *abi_args) diff --git a/testing/cffi0/test_parsing.py b/testing/cffi0/test_parsing.py --- a/testing/cffi0/test_parsing.py +++ b/testing/cffi0/test_parsing.py @@ -364,3 +364,17 @@ assert C.TWO == 2 assert C.NIL == 0 assert C.NEG == -1 + +def test_stdcall(): + ffi = FFI() + tp = ffi.typeof("int(*)(int __stdcall x(int)," + " long (__cdecl*y)(void)," + " short(WINAPI *z)(short))") + if sys.platform == 'win32': + stdcall = '__stdcall ' + else: + stdcall = '' + assert str(tp) == ( + "" % (stdcall, stdcall)) From noreply at buildbot.pypy.org Tue Oct 6 10:36:19 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 10:36:19 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: start to fix tests Message-ID: <20151006083619.D03BF1C12D6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2312:6d47c080e372 Date: 2015-10-06 10:37 +0200 http://bitbucket.org/cffi/cffi/changeset/6d47c080e372/ Log: start to fix tests diff --git a/testing/cffi0/test_function.py b/testing/cffi0/test_function.py --- a/testing/cffi0/test_function.py +++ b/testing/cffi0/test_function.py @@ -444,24 +444,24 @@ # ffi = FFI(backend=self.Backend()) ffi.cdef(""" - BOOL QueryPerformanceFrequency(LONGLONG *lpFrequency); - """, calling_conv="cdecl") + BOOL __cdecl QueryPerformanceFrequency(LONGLONG *lpFrequency); + """) m = ffi.dlopen("Kernel32.dll") tpc = ffi.typeof(m.QueryPerformanceFrequency) assert tpc is tp # ffi = FFI(backend=self.Backend()) ffi.cdef(""" - BOOL QueryPerformanceFrequency(LONGLONG *lpFrequency); - """, calling_conv="stdcall") + BOOL WINAPI QueryPerformanceFrequency(LONGLONG *lpFrequency); + """) m = ffi.dlopen("Kernel32.dll") tps = ffi.typeof(m.QueryPerformanceFrequency) assert tps is not tpc assert str(tps) == "" # ffi = FFI(backend=self.Backend()) - ffi.cdef("typedef int (*fnc_t)(int);", calling_conv="cdecl") - ffi.cdef("typedef int (*fns_t)(int);", calling_conv="stdcall") + ffi.cdef("typedef int (__cdecl *fnc_t)(int);") + ffi.cdef("typedef int (__stdcall *fns_t)(int);") tpc = ffi.typeof("fnc_t") tps = ffi.typeof("fns_t") assert str(tpc) == "" @@ -478,10 +478,8 @@ if sys.platform == 'win32': py.test.skip("not-Windows-only test") ffi = FFI(backend=self.Backend()) - ffi.cdef(""" - int QueryPerformanceFrequency(long long *lpFrequency); - """, calling_conv="stdcall") - m = ffi.dlopen(None) - e = py.test.raises(CDefError, getattr, m, 'QueryPerformanceFrequency') - assert str(e.value) == ( - ": '__stdcall' only for Windows") + ffi.cdef("double __stdcall sin(double x);") # stdcall ignored + m = ffi.dlopen(lib_m) + assert "double(*)(double)" in str(ffi.typeof(m.sin)) + x = m.sin(1.23) + assert x == math.sin(1.23) diff --git a/testing/cffi0/test_verify.py b/testing/cffi0/test_verify.py --- a/testing/cffi0/test_verify.py +++ b/testing/cffi0/test_verify.py @@ -2246,9 +2246,16 @@ if sys.platform != 'win32': py.test.skip("Windows only") ffi = FFI() - ffi.cdef("int call1(int(*cb)(int));", calling_conv="cdecl") - ffi.cdef("int call2(int(*cb)(int));", calling_conv="stdcall") + ffi.cdef(""" + int __cdecl call1(int(__cdecl *cb)(int)); + int __stdcall call2(int(__stdcall *cb)(int)); + int (__cdecl *const cb1)(int); + int (__stdcall *const cb2)(int); + """) lib = ffi.verify(r""" + int __cdecl cb1(int x) { return x * 2; } + int __stdcall cb2(int x) { return x * 3; } + int __cdecl call1(int(__cdecl *cb)(int)) { printf("here1\n"); printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); @@ -2268,13 +2275,8 @@ return result; } """) - assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 - ... - print '<<< cb2 =', ffi.addressof(lib, 'cb2') - ptr_call2 = ffi.addressof(lib, 'call2') - assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 - assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3 - print '<<< done' + assert lib.call1(lib.cb1) == 500*999*2 + assert lib.call2(lib.cb2) == -500*999*3 def test_win32_calling_convention_2(): if sys.platform != 'win32': @@ -2284,16 +2286,16 @@ # automatically corrected. But this does not apply to the 'cb' # function pointer argument. ffi = FFI() - ffi.cdef("int call1(int(*cb)(int)); int cb1(int);", calling_conv="cdecl") - ffi.cdef("int call2(int(*cb)(int)); int cb2(int);", calling_conv="stdcall") + ffi.cdef("int __stdcall call1(int(*cb)(int)); int cb1(int);") + ffi.cdef("int call2(int(__stdcall *cb)(int)); int __stdcall cb2(int);") lib = verify(ffi, 'test_win32_calling_convention_2', """ - int __stdcall call1(int(__cdecl *cb)(int)) { + int __cdecl call1(int(__cdecl *cb)(int)) { int i, result = 0; for (i = 0; i < 1000; i++) result += cb(i); return result; } - int __cdecl call2(int(__stdcall *cb)(int)) { + int __stdcall call2(int(__stdcall *cb)(int)) { int i, result = 0; for (i = 0; i < 1000; i++) result += cb(-i); @@ -2318,7 +2320,7 @@ py.test.skip("Windows only") ffi = FFI() ffi.cdef("struct point { int x, y; };") - ffi.cdef("struct point call1(int(*cb)(struct point)); " + ffi.cdef("struct point __stdcall call1(int(*__cdecl cb)(struct point)); " "int cb1(struct point);", calling_conv="cdecl") ffi.cdef("struct point call2(int(*cb)(struct point)); " "int cb2(struct point);", calling_conv="stdcall") From noreply at buildbot.pypy.org Tue Oct 6 11:00:00 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 11:00:00 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: Fix test Message-ID: <20151006090000.CAACF1C12D6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2313:af9fbe2cfe29 Date: 2015-10-06 10:48 +0200 http://bitbucket.org/cffi/cffi/changeset/af9fbe2cfe29/ Log: Fix test diff --git a/testing/cffi0/test_verify.py b/testing/cffi0/test_verify.py --- a/testing/cffi0/test_verify.py +++ b/testing/cffi0/test_verify.py @@ -2257,9 +2257,9 @@ int __stdcall cb2(int x) { return x * 3; } int __cdecl call1(int(__cdecl *cb)(int)) { + int i, result = 0; printf("here1\n"); printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); - int i, result = 0; for (i = 0; i < 1000; i++) result += cb(i); printf("result = %d\n", result); @@ -2286,9 +2286,13 @@ # automatically corrected. But this does not apply to the 'cb' # function pointer argument. ffi = FFI() - ffi.cdef("int __stdcall call1(int(*cb)(int)); int cb1(int);") - ffi.cdef("int call2(int(__stdcall *cb)(int)); int __stdcall cb2(int);") - lib = verify(ffi, 'test_win32_calling_convention_2', """ + ffi.cdef(""" + int __stdcall call1(int(__cdecl *cb)(int)); + int __cdecl call2(int(__stdcall *cb)(int)); + int (__cdecl *const cb1)(int); + int (__stdcall *const cb2)(int); + """) + lib = ffi.verify(r""" int __cdecl call1(int(__cdecl *cb)(int)) { int i, result = 0; for (i = 0; i < 1000; i++) @@ -2301,33 +2305,31 @@ result += cb(-i); return result; } - int __stdcall cb1(int x) { return x * 2; } - int __cdecl cb2(int x) { return x * 3; } + int __cdecl cb1(int x) { return x * 2; } + int __stdcall cb2(int x) { return x * 3; } """) - ptr_call1 = ffi.addressof(lib, 'call1') - ptr_call2 = ffi.addressof(lib, 'call2') - py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) - py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) - py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) - py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1')) - assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 - assert ptr_call1(ffi.addressof(lib, 'cb1')) == 500*999*2 - assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 - assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + py.test.raises(TypeError, lib.call1, lib.cb2) + py.test.raises(TypeError, lib.call2, lib.cb1) + assert lib.call1(lib.cb1) == 500*999*2 + assert lib.call2(lib.cb2) == -500*999*3 def test_win32_calling_convention_3(): if sys.platform != 'win32': py.test.skip("Windows only") ffi = FFI() - ffi.cdef("struct point { int x, y; };") - ffi.cdef("struct point __stdcall call1(int(*__cdecl cb)(struct point)); " - "int cb1(struct point);", calling_conv="cdecl") - ffi.cdef("struct point call2(int(*cb)(struct point)); " - "int cb2(struct point);", calling_conv="stdcall") - lib = verify(ffi, 'test_win32_calling_convention_3', r""" + ffi.cdef(""" struct point { int x, y; }; - int __stdcall cb1(struct point pt) { return pt.x + 10 * pt.y; } - int __cdecl cb2(struct point pt) { return pt.x + 100 * pt.y; } + + int (*const cb1)(struct point); + int (__stdcall *const cb2)(struct point); + + struct point __stdcall call1(int(*cb)(struct point)); + struct point call2(int(__stdcall *cb)(struct point)); + """) + lib = ffi.verify(r""" + struct point { int x, y; }; + int cb1(struct point pt) { return pt.x + 10 * pt.y; } + int __stdcall cb2(struct point pt) { return pt.x + 100 * pt.y; } struct point __stdcall call1(int(__cdecl *cb)(struct point)) { int i; struct point result = { 0, 0 }; @@ -2353,18 +2355,9 @@ return result; } """) - ptr_call1 = ffi.addressof(lib, 'call1') - ptr_call2 = ffi.addressof(lib, 'call2') - py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) - py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) - py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) - py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1')) - print '<<< cb1 =', ffi.addressof(lib, 'cb1') - pt = lib.call1(ffi.addressof(lib, 'cb1')) + py.test.raises(TypeError, lib.call1, lib.cb2) + py.test.raises(TypeError, lib.call2, lib.cb1) + pt = lib.call1(lib.cb1) assert (pt.x, pt.y) == (-9*500*999, 9*500*999) - pt = ptr_call1(ffi.addressof(lib, 'cb1')) - assert (pt.x, pt.y) == (-9*500*999, 9*500*999) - pt = lib.call2(ffi.addressof(lib, 'cb2')) + pt = lib.call2(lib.cb2) assert (pt.x, pt.y) == (99*500*999, -99*500*999) - pt = ptr_call2(ffi.addressof(lib, 'cb2')) - assert (pt.x, pt.y) == (99*500*999, -99*500*999) From noreply at buildbot.pypy.org Tue Oct 6 11:00:03 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 11:00:03 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: Another passing test Message-ID: <20151006090003.013E71C12D6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2314:044552dec3ba Date: 2015-10-06 10:53 +0200 http://bitbucket.org/cffi/cffi/changeset/044552dec3ba/ Log: Another passing test diff --git a/testing/cffi0/test_verify.py b/testing/cffi0/test_verify.py --- a/testing/cffi0/test_verify.py +++ b/testing/cffi0/test_verify.py @@ -2242,9 +2242,40 @@ assert foo_s.fields[1][0] == 'b' assert foo_s.fields[1][1].type is ffi.typeof("void *") +def test_win32_calling_convention_0(): + ffi = FFI() + ffi.cdef(""" + int call1(int(__cdecl *cb)(int)); + int call2(int(__stdcall *cb)(int)); + """) + lib = ffi.verify(r""" + int __cdecl call1(int(__cdecl *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(i); + printf("result = %d\n", result); + return result; + } + int __stdcall call2(int(__stdcall *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(-i); + printf("result = %d\n", result); + return result; + } + """) + @ffi.callback("int(int)") + def cb1(x): + return x * 2 + @ffi.callback("int __stdcall(int)") + def cb2(x): + return x * 3 + assert lib.call1(cb1) == 500*999*2 + assert lib.call2(cb2) == -500*999*3 + py.test.raises(TypeError, lib.call1, cb2) + py.test.raises(TypeError, lib.call2, cb1) + def test_win32_calling_convention_1(): - if sys.platform != 'win32': - py.test.skip("Windows only") ffi = FFI() ffi.cdef(""" int __cdecl call1(int(__cdecl *cb)(int)); @@ -2279,8 +2310,6 @@ assert lib.call2(lib.cb2) == -500*999*3 def test_win32_calling_convention_2(): - if sys.platform != 'win32': - py.test.skip("Windows only") # any mistake in the declaration of plain function (including the # precise argument types and, here, the calling convention) are # automatically corrected. But this does not apply to the 'cb' @@ -2314,8 +2343,6 @@ assert lib.call2(lib.cb2) == -500*999*3 def test_win32_calling_convention_3(): - if sys.platform != 'win32': - py.test.skip("Windows only") ffi = FFI() ffi.cdef(""" struct point { int x, y; }; From noreply at buildbot.pypy.org Tue Oct 6 11:00:05 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 11:00:05 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: more test fixes Message-ID: <20151006090005.182831C12D6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2315:5a6f5727774a Date: 2015-10-06 11:00 +0200 http://bitbucket.org/cffi/cffi/changeset/5a6f5727774a/ Log: more test fixes diff --git a/testing/cffi0/test_verify.py b/testing/cffi0/test_verify.py --- a/testing/cffi0/test_verify.py +++ b/testing/cffi0/test_verify.py @@ -2249,7 +2249,10 @@ int call2(int(__stdcall *cb)(int)); """) lib = ffi.verify(r""" - int __cdecl call1(int(__cdecl *cb)(int)) { + #ifndef WINAPI + # define __stdcall + #endif + int call1(int(*cb)(int)) { int i, result = 0; for (i = 0; i < 1000; i++) result += cb(i); @@ -2272,8 +2275,12 @@ return x * 3 assert lib.call1(cb1) == 500*999*2 assert lib.call2(cb2) == -500*999*3 - py.test.raises(TypeError, lib.call1, cb2) - py.test.raises(TypeError, lib.call2, cb1) + if sys.platform == 'win32': + assert '__stdcall' in str(ffi.typeof(cb2)) + py.test.raises(TypeError, lib.call1, cb2) + py.test.raises(TypeError, lib.call2, cb1) + else: + assert '__stdcall' not in str(ffi.typeof(cb2)) def test_win32_calling_convention_1(): ffi = FFI() @@ -2284,6 +2291,10 @@ int (__stdcall *const cb2)(int); """) lib = ffi.verify(r""" + #ifndef WINAPI + # define __cdecl + # define __stdcall + #endif int __cdecl cb1(int x) { return x * 2; } int __stdcall cb2(int x) { return x * 3; } @@ -2322,6 +2333,10 @@ int (__stdcall *const cb2)(int); """) lib = ffi.verify(r""" + #ifndef WINAPI + # define __cdecl + # define __stdcall + #endif int __cdecl call1(int(__cdecl *cb)(int)) { int i, result = 0; for (i = 0; i < 1000; i++) @@ -2337,8 +2352,6 @@ int __cdecl cb1(int x) { return x * 2; } int __stdcall cb2(int x) { return x * 3; } """) - py.test.raises(TypeError, lib.call1, lib.cb2) - py.test.raises(TypeError, lib.call2, lib.cb1) assert lib.call1(lib.cb1) == 500*999*2 assert lib.call2(lib.cb2) == -500*999*3 @@ -2354,6 +2367,10 @@ struct point call2(int(__stdcall *cb)(struct point)); """) lib = ffi.verify(r""" + #ifndef WINAPI + # define __cdecl + # define __stdcall + #endif struct point { int x, y; }; int cb1(struct point pt) { return pt.x + 10 * pt.y; } int __stdcall cb2(struct point pt) { return pt.x + 100 * pt.y; } @@ -2382,8 +2399,9 @@ return result; } """) - py.test.raises(TypeError, lib.call1, lib.cb2) - py.test.raises(TypeError, lib.call2, lib.cb1) + if sys.platform == 'win32': + py.test.raises(TypeError, lib.call1, lib.cb2) + py.test.raises(TypeError, lib.call2, lib.cb1) pt = lib.call1(lib.cb1) assert (pt.x, pt.y) == (-9*500*999, 9*500*999) pt = lib.call2(lib.cb2) From noreply at buildbot.pypy.org Tue Oct 6 12:16:33 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 12:16:33 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: Aargh, WINAPI is not a macro?? Message-ID: <20151006101634.145EE1C0726@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2316:8141b7a9346b Date: 2015-10-06 12:14 +0200 http://bitbucket.org/cffi/cffi/changeset/8141b7a9346b/ Log: Aargh, WINAPI is not a macro?? diff --git a/testing/cffi0/test_verify.py b/testing/cffi0/test_verify.py --- a/testing/cffi0/test_verify.py +++ b/testing/cffi0/test_verify.py @@ -2246,21 +2246,23 @@ ffi = FFI() ffi.cdef(""" int call1(int(__cdecl *cb)(int)); - int call2(int(__stdcall *cb)(int)); + int (*const call2)(int(__stdcall *cb)(int)); """) lib = ffi.verify(r""" - #ifndef WINAPI - # define __stdcall + #ifndef MS_WIN32 + # define __stdcall FOOBARBAZZZZZ #endif int call1(int(*cb)(int)) { int i, result = 0; + printf("call1: cb = %p\n", cb); for (i = 0; i < 1000; i++) result += cb(i); printf("result = %d\n", result); return result; } - int __stdcall call2(int(__stdcall *cb)(int)) { + int call2(int(__stdcall *cb)(int)) { int i, result = 0; + printf("call2: cb = %p\n", cb); for (i = 0; i < 1000; i++) result += cb(-i); printf("result = %d\n", result); @@ -2273,14 +2275,24 @@ @ffi.callback("int __stdcall(int)") def cb2(x): return x * 3 - assert lib.call1(cb1) == 500*999*2 - assert lib.call2(cb2) == -500*999*3 + print 'cb1 =', cb1 + res = lib.call1(cb1) + assert res == 500*999*2 + print 'cb2 =', cb2 + print ffi.typeof(lib.call2) + print 'call2 =', lib.call2 + res = lib.call2(cb2) + print '...' + assert res == -500*999*3 + print 'done' if sys.platform == 'win32': assert '__stdcall' in str(ffi.typeof(cb2)) + assert '__stdcall' not in str(ffi.typeof(cb1)) py.test.raises(TypeError, lib.call1, cb2) py.test.raises(TypeError, lib.call2, cb1) else: assert '__stdcall' not in str(ffi.typeof(cb2)) + assert ffi.typeof(cb2) is ffi.typeof(cb1) def test_win32_calling_convention_1(): ffi = FFI() @@ -2291,7 +2303,7 @@ int (__stdcall *const cb2)(int); """) lib = ffi.verify(r""" - #ifndef WINAPI + #ifndef MS_WIN32 # define __cdecl # define __stdcall #endif @@ -2333,7 +2345,7 @@ int (__stdcall *const cb2)(int); """) lib = ffi.verify(r""" - #ifndef WINAPI + #ifndef MS_WIN32 # define __cdecl # define __stdcall #endif @@ -2367,7 +2379,7 @@ struct point call2(int(__stdcall *cb)(struct point)); """) lib = ffi.verify(r""" - #ifndef WINAPI + #ifndef MS_WIN32 # define __cdecl # define __stdcall #endif From noreply at buildbot.pypy.org Tue Oct 6 12:16:36 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 12:16:36 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: more of the same Message-ID: <20151006101636.3196C1C0726@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2317:107d94dda1b1 Date: 2015-10-06 12:17 +0200 http://bitbucket.org/cffi/cffi/changeset/107d94dda1b1/ Log: more of the same diff --git a/testing/cffi0/test_verify.py b/testing/cffi0/test_verify.py --- a/testing/cffi0/test_verify.py +++ b/testing/cffi0/test_verify.py @@ -2249,7 +2249,7 @@ int (*const call2)(int(__stdcall *cb)(int)); """) lib = ffi.verify(r""" - #ifndef MS_WIN32 + #ifndef _MSC_VER # define __stdcall FOOBARBAZZZZZ #endif int call1(int(*cb)(int)) { @@ -2303,7 +2303,7 @@ int (__stdcall *const cb2)(int); """) lib = ffi.verify(r""" - #ifndef MS_WIN32 + #ifndef _MSC_VER # define __cdecl # define __stdcall #endif @@ -2345,7 +2345,7 @@ int (__stdcall *const cb2)(int); """) lib = ffi.verify(r""" - #ifndef MS_WIN32 + #ifndef _MSC_VER # define __cdecl # define __stdcall #endif @@ -2379,7 +2379,7 @@ struct point call2(int(__stdcall *cb)(struct point)); """) lib = ffi.verify(r""" - #ifndef MS_WIN32 + #ifndef _MSC_VER # define __cdecl # define __stdcall #endif From noreply at buildbot.pypy.org Tue Oct 6 12:21:57 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 12:21:57 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: Fix remaining tests Message-ID: <20151006102157.12C7A1C0726@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2318:3e58f08489fa Date: 2015-10-06 12:22 +0200 http://bitbucket.org/cffi/cffi/changeset/3e58f08489fa/ Log: Fix remaining tests diff --git a/testing/cffi0/test_verify.py b/testing/cffi0/test_verify.py --- a/testing/cffi0/test_verify.py +++ b/testing/cffi0/test_verify.py @@ -2250,7 +2250,7 @@ """) lib = ffi.verify(r""" #ifndef _MSC_VER - # define __stdcall FOOBARBAZZZZZ + # define __stdcall /* nothing */ #endif int call1(int(*cb)(int)) { int i, result = 0; diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -1281,24 +1281,79 @@ assert lib.aaa == 42 py.test.raises(AttributeError, "lib.aaa = 43") -def test_win32_calling_convention_1(): - if sys.platform != 'win32': - py.test.skip("Windows only") +def test_win32_calling_convention_0(): ffi = FFI() - ffi.cdef("int call1(int(*cb)(int)); int cb1(int);", calling_conv="cdecl") - ffi.cdef("int call2(int(*cb)(int)); int cb2(int);", calling_conv="stdcall") - lib = verify(ffi, 'test_win32_calling_convention_1', r""" - int __cdecl cb1(int x) { return x * 2; } - int __cdecl call1(int(__cdecl *cb)(int)) { - printf("here1\n"); - printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + ffi.cdef(""" + int call1(int(__cdecl *cb)(int)); + int (*const call2)(int(__stdcall *cb)(int)); + """) + lib = verify(ffi, 'test_win32_calling_convention_0', r""" + #ifndef _MSC_VER + # define __stdcall /* nothing */ + #endif + int call1(int(*cb)(int)) { int i, result = 0; + printf("call1: cb = %p\n", cb); for (i = 0; i < 1000; i++) result += cb(i); printf("result = %d\n", result); return result; } + int call2(int(__stdcall *cb)(int)) { + int i, result = 0; + printf("call2: cb = %p\n", cb); + for (i = 0; i < 1000; i++) + result += cb(-i); + printf("result = %d\n", result); + return result; + } + """) + @ffi.callback("int(int)") + def cb1(x): + return x * 2 + @ffi.callback("int __stdcall(int)") + def cb2(x): + return x * 3 + res = lib.call1(cb1) + assert res == 500*999*2 + assert res == ffi.addressof(lib, 'call1')(cb1) + res = lib.call2(cb2) + assert res == -500*999*3 + assert res == ffi.addressof(lib, 'call2')(cb2) + if sys.platform == 'win32': + assert '__stdcall' in str(ffi.typeof(cb2)) + assert '__stdcall' not in str(ffi.typeof(cb1)) + py.test.raises(TypeError, lib.call1, cb2) + py.test.raises(TypeError, lib.call2, cb1) + else: + assert '__stdcall' not in str(ffi.typeof(cb2)) + assert ffi.typeof(cb2) is ffi.typeof(cb1) + +def test_win32_calling_convention_1(): + ffi = FFI() + ffi.cdef(""" + int __cdecl call1(int(__cdecl *cb)(int)); + int __stdcall call2(int(__stdcall *cb)(int)); + int (__cdecl *const cb1)(int); + int (__stdcall *const cb2)(int); + """) + lib = verify(ffi, 'test_win32_calling_convention_1', r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + int __cdecl cb1(int x) { return x * 2; } int __stdcall cb2(int x) { return x * 3; } + + int __cdecl call1(int(__cdecl *cb)(int)) { + int i, result = 0; + printf("here1\n"); + printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + for (i = 0; i < 1000; i++) + result += cb(i); + printf("result = %d\n", result); + return result; + } int __stdcall call2(int(__stdcall *cb)(int)) { int i, result = 0; printf("here1\n"); @@ -1320,55 +1375,68 @@ print '<<< done' def test_win32_calling_convention_2(): - if sys.platform != 'win32': - py.test.skip("Windows only") # any mistake in the declaration of plain function (including the # precise argument types and, here, the calling convention) are # automatically corrected. But this does not apply to the 'cb' # function pointer argument. ffi = FFI() - ffi.cdef("int call1(int(*cb)(int)); int cb1(int);", calling_conv="cdecl") - ffi.cdef("int call2(int(*cb)(int)); int cb2(int);", calling_conv="stdcall") + ffi.cdef(""" + int __stdcall call1(int(__cdecl *cb)(int)); + int __cdecl call2(int(__stdcall *cb)(int)); + int (__cdecl *const cb1)(int); + int (__stdcall *const cb2)(int); + """) lib = verify(ffi, 'test_win32_calling_convention_2', """ - int __stdcall call1(int(__cdecl *cb)(int)) { + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + int __cdecl call1(int(__cdecl *cb)(int)) { int i, result = 0; for (i = 0; i < 1000; i++) result += cb(i); return result; } - int __cdecl call2(int(__stdcall *cb)(int)) { + int __stdcall call2(int(__stdcall *cb)(int)) { int i, result = 0; for (i = 0; i < 1000; i++) result += cb(-i); return result; } - int __stdcall cb1(int x) { return x * 2; } - int __cdecl cb2(int x) { return x * 3; } + int __cdecl cb1(int x) { return x * 2; } + int __stdcall cb2(int x) { return x * 3; } """) ptr_call1 = ffi.addressof(lib, 'call1') ptr_call2 = ffi.addressof(lib, 'call2') - py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) - py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) - py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) - py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1')) + if sys.platform == 'win32': + py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) + py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1')) assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 assert ptr_call1(ffi.addressof(lib, 'cb1')) == 500*999*2 assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3 def test_win32_calling_convention_3(): - if sys.platform != 'win32': - py.test.skip("Windows only") ffi = FFI() - ffi.cdef("struct point { int x, y; };") - ffi.cdef("struct point call1(int(*cb)(struct point)); " - "int cb1(struct point);", calling_conv="cdecl") - ffi.cdef("struct point call2(int(*cb)(struct point)); " - "int cb2(struct point);", calling_conv="stdcall") + ffi.cdef(""" + struct point { int x, y; }; + + int (*const cb1)(struct point); + int (__stdcall *const cb2)(struct point); + + struct point __stdcall call1(int(*cb)(struct point)); + struct point call2(int(__stdcall *cb)(struct point)); + """) lib = verify(ffi, 'test_win32_calling_convention_3', r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif struct point { int x, y; }; - int __stdcall cb1(struct point pt) { return pt.x + 10 * pt.y; } - int __cdecl cb2(struct point pt) { return pt.x + 100 * pt.y; } + int cb1(struct point pt) { return pt.x + 10 * pt.y; } + int __stdcall cb2(struct point pt) { return pt.x + 100 * pt.y; } struct point __stdcall call1(int(__cdecl *cb)(struct point)) { int i; struct point result = { 0, 0 }; @@ -1396,11 +1464,11 @@ """) ptr_call1 = ffi.addressof(lib, 'call1') ptr_call2 = ffi.addressof(lib, 'call2') - py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) - py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) - py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) - py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1')) - print '<<< cb1 =', ffi.addressof(lib, 'cb1') + if sys.platform == 'win32': + py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) + py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1')) pt = lib.call1(ffi.addressof(lib, 'cb1')) assert (pt.x, pt.y) == (-9*500*999, 9*500*999) pt = ptr_call1(ffi.addressof(lib, 'cb1')) @@ -1409,43 +1477,3 @@ assert (pt.x, pt.y) == (99*500*999, -99*500*999) pt = ptr_call2(ffi.addressof(lib, 'cb2')) assert (pt.x, pt.y) == (99*500*999, -99*500*999) - -def test_win32_calling_convention_4(): - if sys.platform != 'win32': - py.test.skip("Windows only") - ffi = FFI() - ffi.cdef("int call1(int(*cb)(int));", calling_conv="cdecl") - ffi.cdef("int call2(int(*cb)(int));", calling_conv="stdcall") - lib = verify(ffi, 'test_win32_calling_convention_4', """ - int __stdcall call1(int(__cdecl *cb)(int)) { - int i, result = 0; - for (i = 0; i < 1000; i++) - result += cb(i); - return result; - } - int __cdecl call2(int(__stdcall *cb)(int)) { - int i, result = 0; - for (i = 0; i < 1000; i++) - result += cb(-i); - return result; - } - """) - @ffi.callback("int(int)", calling_conv="cdecl") - def cb1(x): - return x * 2 - ... - @ffi.callback("int(int)", calling_conv="stdcall") - def cb2(x): - return x * 2 - - - ptr_call1 = ffi.addressof(lib, 'call1') - ptr_call2 = ffi.addressof(lib, 'call2') - py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) - py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) - py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) - py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1')) - assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 - assert ptr_call1(ffi.addressof(lib, 'cb1')) == 500*999*2 - assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 - assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3 From noreply at buildbot.pypy.org Tue Oct 6 13:14:47 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 13:14:47 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: Parse __cdecl and __stdcall in the built-in parser Message-ID: <20151006111447.AEC0C1C120E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2319:d513c1efe328 Date: 2015-10-06 13:13 +0200 http://bitbucket.org/cffi/cffi/changeset/d513c1efe328/ Log: Parse __cdecl and __stdcall in the built-in parser diff --git a/c/parse_c_type.c b/c/parse_c_type.c --- a/c/parse_c_type.c +++ b/c/parse_c_type.c @@ -40,6 +40,9 @@ TOK_UNSIGNED, TOK_VOID, TOK_VOLATILE, + + TOK_CDECL, + TOK_STDCALL, }; typedef struct { @@ -154,6 +157,8 @@ switch (*p) { case '_': if (tok->size == 5 && !memcmp(p, "_Bool", 5)) tok->kind = TOK__BOOL; + if (tok->size == 7 && !memcmp(p,"__cdecl",7)) tok->kind = TOK_CDECL; + if (tok->size == 9 && !memcmp(p,"__stdcall",9))tok->kind = TOK_STDCALL; break; case 'c': if (tok->size == 4 && !memcmp(p, "char", 4)) tok->kind = TOK_CHAR; @@ -225,7 +230,7 @@ type). The 'outer' argument is the index of the opcode outside this "sequel". */ - int check_for_grouping; + int check_for_grouping, abi=0; _cffi_opcode_t result, *p_current; header: @@ -242,6 +247,12 @@ /* ignored for now */ next_token(tok); goto header; + case TOK_CDECL: + case TOK_STDCALL: + /* must be in a function; checked below */ + abi = tok->kind; + next_token(tok); + goto header; default: break; } @@ -258,6 +269,11 @@ while (tok->kind == TOK_OPEN_PAREN) { next_token(tok); + if (tok->kind == TOK_CDECL || tok->kind == TOK_STDCALL) { + abi = tok->kind; + next_token(tok); + } + if ((check_for_grouping--) == 1 && (tok->kind == TOK_STAR || tok->kind == TOK_CONST || tok->kind == TOK_VOLATILE || @@ -275,7 +291,14 @@ } else { /* function type */ - int arg_total, base_index, arg_next, has_ellipsis=0; + int arg_total, base_index, arg_next, flags=0; + + if (abi == TOK_STDCALL) { + flags = 2; + /* note that an ellipsis below will overwrite this flags, + which is the goal: variadic functions are always cdecl */ + } + abi = 0; if (tok->kind == TOK_VOID && get_following_char(tok) == ')') { next_token(tok); @@ -304,7 +327,7 @@ _cffi_opcode_t oarg; if (tok->kind == TOK_DOTDOTDOT) { - has_ellipsis = 1; + flags = 1; /* ellipsis */ next_token(tok); break; } @@ -328,8 +351,7 @@ next_token(tok); } } - tok->output[arg_next] = _CFFI_OP(_CFFI_OP_FUNCTION_END, - has_ellipsis); + tok->output[arg_next] = _CFFI_OP(_CFFI_OP_FUNCTION_END, flags); } if (tok->kind != TOK_CLOSE_PAREN) @@ -337,6 +359,9 @@ next_token(tok); } + if (abi != 0) + return parse_error(tok, "expected '('"); + while (tok->kind == TOK_OPEN_BRACKET) { *p_current = _CFFI_OP(_CFFI_GETOP(*p_current), tok->output_index); p_current = tok->output + tok->output_index; diff --git a/testing/cffi1/test_parse_c_type.py b/testing/cffi1/test_parse_c_type.py --- a/testing/cffi1/test_parse_c_type.py +++ b/testing/cffi1/test_parse_c_type.py @@ -341,3 +341,17 @@ # not supported (really obscure): # "char[+5]" # "char['A']" + +def test_stdcall_cdecl(): + assert parse("int __stdcall(int)") == [Prim(lib._CFFI_PRIM_INT), + '->', Func(0), NoOp(4), FuncEnd(2), + Prim(lib._CFFI_PRIM_INT)] + assert parse("int __stdcall func(int)") == parse("int __stdcall(int)") + assert parse("int (__stdcall *)()") == [Prim(lib._CFFI_PRIM_INT), + NoOp(3), '->', Pointer(1), + Func(0), FuncEnd(2), 0] + assert parse("int (__stdcall *p)()") == parse("int (__stdcall*)()") + parse_error("__stdcall int", "identifier expected", 0) + parse_error("__cdecl int", "identifier expected", 0) + parse_error("int __stdcall", "expected '('", 13) + parse_error("int __cdecl", "expected '('", 11) diff --git a/testing/cffi1/test_realize_c_type.py b/testing/cffi1/test_realize_c_type.py --- a/testing/cffi1/test_realize_c_type.py +++ b/testing/cffi1/test_realize_c_type.py @@ -1,4 +1,4 @@ -import py +import py, sys from cffi import cffi_opcode @@ -46,3 +46,29 @@ def test_all_primitives(): for name in cffi_opcode.PRIMITIVE_TO_INDEX: check(name, name) + + +def check_func(input, expected_output=None): + import _cffi_backend + ffi = _cffi_backend.FFI() + ct = ffi.typeof(ffi.callback(input, lambda: None)) + assert isinstance(ct, ffi.CType) + if sys.platform != 'win32': + expected_output = expected_output.replace('__stdcall *', '*') + assert ct.cname == expected_output + +def test_funcptr_stdcall(): + check_func("int(int)", "int(*)(int)") + check_func("int foobar(int)", "int(*)(int)") + check_func("int __stdcall(int)", "int(__stdcall *)(int)") + check_func("int __stdcall foobar(int)", "int(__stdcall *)(int)") + check_func("void __cdecl(void)", "void(*)()") + check_func("void __cdecl foobar(void)", "void(*)()") + check_func("void __stdcall(void)", "void(__stdcall *)()") + check_func("void __stdcall foobar(long, short)", + "void(__stdcall *)(long, short)") + check_func("void(void __cdecl(void), void __stdcall(void))", + "void(*)(void(*)(), void(__stdcall *)())") + +def test_variadic_overrides_stdcall(): + check("void (__stdcall*)(int, ...)", "void(*)(int, ...)") diff --git a/testing/cffi1/test_verify1.py b/testing/cffi1/test_verify1.py --- a/testing/cffi1/test_verify1.py +++ b/testing/cffi1/test_verify1.py @@ -1200,25 +1200,6 @@ lib = ffi.verify('#include ', libraries=lib_m) assert lib.sin(1.23) == math.sin(1.23) -def test_callback_calling_convention(): - py.test.skip("later") - if sys.platform != 'win32': - py.test.skip("Windows only") - ffi = FFI() - ffi.cdef(""" - int call1(int(*__cdecl cb)(int)); - int call2(int(*__stdcall cb)(int)); - """) - lib = ffi.verify(""" - int call1(int(*__cdecl cb)(int)) { - return cb(42) + 1; - } - int call2(int(*__stdcall cb)(int)) { - return cb(-42) - 6; - } - """) - xxx - def test_opaque_integer_as_function_result(): #import platform #if platform.machine().startswith('sparc'): From noreply at buildbot.pypy.org Tue Oct 6 13:14:49 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 13:14:49 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: fix Message-ID: <20151006111449.C61EF1C120E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2320:8ed6cf0d1c90 Date: 2015-10-06 13:15 +0200 http://bitbucket.org/cffi/cffi/changeset/8ed6cf0d1c90/ Log: fix diff --git a/c/realize_c_type.c b/c/realize_c_type.c --- a/c/realize_c_type.c +++ b/c/realize_c_type.c @@ -566,11 +566,13 @@ case 0: abi = FFI_DEFAULT_ABI; break; + case 2: #if defined(MS_WIN32) && !defined(_WIN64) - case 2: abi = FFI_STDCALL; +#else + abi = FFI_DEFAULT_ABI; +#endif break; -#endif default: PyErr_Format(FFIError, "abi number %d not supported", abi); Py_DECREF(y); From noreply at buildbot.pypy.org Tue Oct 6 13:19:21 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 13:19:21 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: Fix Message-ID: <20151006111921.6DB9B1C120E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2321:2def2a1321b0 Date: 2015-10-06 13:20 +0200 http://bitbucket.org/cffi/cffi/changeset/2def2a1321b0/ Log: Fix diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -464,8 +464,9 @@ # to detect __stdcall functions: we textually replace "__stdcall" # with "volatile volatile const" above. abi = None - if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']: - abi = '__stdcall' + if hasattr(typenode.type, 'quals'): # else, probable syntax error anyway + if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']: + abi = '__stdcall' return model.RawFunctionType(tuple(args), result, ellipsis, abi) def _as_func_arg(self, type, quals): From noreply at buildbot.pypy.org Tue Oct 6 13:50:23 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 13:50:23 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: remove the debug printfs Message-ID: <20151006115023.952121C12D6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2322:cca75830f063 Date: 2015-10-06 13:38 +0200 http://bitbucket.org/cffi/cffi/changeset/cca75830f063/ Log: remove the debug printfs diff --git a/testing/cffi0/test_verify.py b/testing/cffi0/test_verify.py --- a/testing/cffi0/test_verify.py +++ b/testing/cffi0/test_verify.py @@ -2254,18 +2254,18 @@ #endif int call1(int(*cb)(int)) { int i, result = 0; - printf("call1: cb = %p\n", cb); + //printf("call1: cb = %p\n", cb); for (i = 0; i < 1000; i++) result += cb(i); - printf("result = %d\n", result); + //printf("result = %d\n", result); return result; } int call2(int(__stdcall *cb)(int)) { int i, result = 0; - printf("call2: cb = %p\n", cb); + //printf("call2: cb = %p\n", cb); for (i = 0; i < 1000; i++) result += cb(-i); - printf("result = %d\n", result); + //printf("result = %d\n", result); return result; } """) @@ -2312,20 +2312,20 @@ int __cdecl call1(int(__cdecl *cb)(int)) { int i, result = 0; - printf("here1\n"); - printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + //printf("here1\n"); + //printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); for (i = 0; i < 1000; i++) result += cb(i); - printf("result = %d\n", result); + //printf("result = %d\n", result); return result; } int __stdcall call2(int(__stdcall *cb)(int)) { int i, result = 0; - printf("here1\n"); - printf("cb = %p, cb2 = %p\n", cb, (void *)cb2); + //printf("here1\n"); + //printf("cb = %p, cb2 = %p\n", cb, (void *)cb2); for (i = 0; i < 1000; i++) result += cb(-i); - printf("result = %d\n", result); + //printf("result = %d\n", result); return result; } """) @@ -2389,8 +2389,8 @@ struct point __stdcall call1(int(__cdecl *cb)(struct point)) { int i; struct point result = { 0, 0 }; - printf("here1\n"); - printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + //printf("here1\n"); + //printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); for (i = 0; i < 1000; i++) { struct point p = { i, -i }; int r = cb(p); diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -1293,18 +1293,18 @@ #endif int call1(int(*cb)(int)) { int i, result = 0; - printf("call1: cb = %p\n", cb); + //printf("call1: cb = %p\n", cb); for (i = 0; i < 1000; i++) result += cb(i); - printf("result = %d\n", result); + //printf("result = %d\n", result); return result; } int call2(int(__stdcall *cb)(int)) { int i, result = 0; - printf("call2: cb = %p\n", cb); + //printf("call2: cb = %p\n", cb); for (i = 0; i < 1000; i++) result += cb(-i); - printf("result = %d\n", result); + //printf("result = %d\n", result); return result; } """) @@ -1347,20 +1347,20 @@ int __cdecl call1(int(__cdecl *cb)(int)) { int i, result = 0; - printf("here1\n"); - printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + //printf("here1\n"); + //printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); for (i = 0; i < 1000; i++) result += cb(i); - printf("result = %d\n", result); + //printf("result = %d\n", result); return result; } int __stdcall call2(int(__stdcall *cb)(int)) { int i, result = 0; - printf("here1\n"); - printf("cb = %p, cb2 = %p\n", cb, (void *)cb2); + //printf("here1\n"); + //printf("cb = %p, cb2 = %p\n", cb, (void *)cb2); for (i = 0; i < 1000; i++) result += cb(-i); - printf("result = %d\n", result); + //printf("result = %d\n", result); return result; } """) @@ -1440,8 +1440,8 @@ struct point __stdcall call1(int(__cdecl *cb)(struct point)) { int i; struct point result = { 0, 0 }; - printf("here1\n"); - printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + //printf("here1\n"); + //printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); for (i = 0; i < 1000; i++) { struct point p = { i, -i }; int r = cb(p); From noreply at buildbot.pypy.org Tue Oct 6 13:50:25 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 13:50:25 +0200 (CEST) Subject: [pypy-commit] cffi default: fix on 32-bit for test_some_float_invalid_2 Message-ID: <20151006115025.A81AC1C12D6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2323:6f38ec69eac6 Date: 2015-10-06 13:51 +0200 http://bitbucket.org/cffi/cffi/changeset/6f38ec69eac6/ Log: fix on 32-bit for test_some_float_invalid_2 diff --git a/cffi/parse_c_type.h b/cffi/parse_c_type.h --- a/cffi/parse_c_type.h +++ b/cffi/parse_c_type.h @@ -5,7 +5,7 @@ #define _CFFI_OP(opcode, arg) (_cffi_opcode_t)(opcode | (((uintptr_t)(arg)) << 8)) #define _CFFI_GETOP(cffi_opcode) ((unsigned char)(uintptr_t)cffi_opcode) -#define _CFFI_GETARG(cffi_opcode) (((uintptr_t)cffi_opcode) >> 8) +#define _CFFI_GETARG(cffi_opcode) (((intptr_t)cffi_opcode) >> 8) #define _CFFI_OP_PRIMITIVE 1 #define _CFFI_OP_POINTER 3 From noreply at buildbot.pypy.org Tue Oct 6 13:52:12 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 13:52:12 +0200 (CEST) Subject: [pypy-commit] cffi win32-stdcall: ready to merge Message-ID: <20151006115212.1D2A71C12D6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: win32-stdcall Changeset: r2324:de3147427efb Date: 2015-10-06 13:52 +0200 http://bitbucket.org/cffi/cffi/changeset/de3147427efb/ Log: ready to merge From noreply at buildbot.pypy.org Tue Oct 6 13:52:14 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 13:52:14 +0200 (CEST) Subject: [pypy-commit] cffi default: hg merge win32-stdcall Message-ID: <20151006115214.973C81C12D6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2325:256e52bacdb1 Date: 2015-10-06 13:53 +0200 http://bitbucket.org/cffi/cffi/changeset/256e52bacdb1/ Log: hg merge win32-stdcall Win32: full support for '__stdcall' in function types. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -3644,13 +3644,13 @@ funcptr [ctresult, ellipsis+abi, num_args, ctargs...] */ PyObject *key, *y; - const void **pkey; + void *pkey; key = PyBytes_FromStringAndSize(NULL, keylength * sizeof(void *)); if (key == NULL) goto error; - pkey = (const void **)PyBytes_AS_STRING(key); + pkey = PyBytes_AS_STRING(key); memcpy(pkey, unique_key, keylength * sizeof(void *)); y = PyDict_GetItem(unique_cache, key); @@ -4661,7 +4661,7 @@ } static int fb_build_name(struct funcbuilder_s *fb, PyObject *fargs, - CTypeDescrObject *fresult, int ellipsis) + CTypeDescrObject *fresult, int ellipsis, int fabi) { Py_ssize_t i, nargs = PyTuple_GET_SIZE(fargs); fb->nargs = nargs; @@ -4672,9 +4672,17 @@ RESULT_TYPE_HEAD (*)(ARG_1_TYPE, ARG_2_TYPE, etc) RESULT_TYPE_TAIL */ fb_cat_name(fb, fresult->ct_name, fresult->ct_name_position); - fb_cat_name(fb, "(*)(", 4); + fb_cat_name(fb, "(", 1); + i = 2; +#if defined(MS_WIN32) && !defined(_WIN64) + if (fabi == FFI_STDCALL) { + fb_cat_name(fb, "__stdcall ", 10); + i += 10; + } +#endif + fb_cat_name(fb, "*)(", 3); if (fb->fct) { - i = fresult->ct_name_position + 2; /* between '(*' and ')(' */ + i = fresult->ct_name_position + i; /* between '(*' and ')(' */ fb->fct->ct_name_position = i; } @@ -4710,7 +4718,7 @@ static CTypeDescrObject *fb_prepare_ctype(struct funcbuilder_s *fb, PyObject *fargs, CTypeDescrObject *fresult, - int ellipsis) + int ellipsis, int fabi) { CTypeDescrObject *fct; @@ -4719,7 +4727,7 @@ fb->fct = NULL; /* compute the total size needed for the name */ - if (fb_build_name(fb, fargs, fresult, ellipsis) < 0) + if (fb_build_name(fb, fargs, fresult, ellipsis, fabi) < 0) return NULL; /* allocate the function type */ @@ -4730,7 +4738,7 @@ /* call again fb_build_name() to really build the ct_name */ fb->bufferp = fct->ct_name; - if (fb_build_name(fb, fargs, fresult, ellipsis) < 0) + if (fb_build_name(fb, fargs, fresult, ellipsis, fabi) < 0) goto error; assert(fb->bufferp == fct->ct_name + fb->nb_bytes); @@ -4807,7 +4815,7 @@ return NULL; } - fct = fb_prepare_ctype(&funcbuilder, fargs, fresult, ellipsis); + fct = fb_prepare_ctype(&funcbuilder, fargs, fresult, ellipsis, fabi); if (fct == NULL) return NULL; @@ -6373,11 +6381,7 @@ #if defined(MS_WIN32) && !defined(_WIN64) PyModule_AddIntConstant(m, "FFI_STDCALL", FFI_STDCALL) < 0 || #endif -#ifdef FFI_CDECL - PyModule_AddIntConstant(m, "FFI_CDECL", FFI_CDECL) < 0 || /* win32 */ -#else PyModule_AddIntConstant(m, "FFI_CDECL", FFI_DEFAULT_ABI) < 0 || -#endif #ifdef MS_WIN32 # ifdef _WIN64 diff --git a/c/parse_c_type.c b/c/parse_c_type.c --- a/c/parse_c_type.c +++ b/c/parse_c_type.c @@ -40,6 +40,9 @@ TOK_UNSIGNED, TOK_VOID, TOK_VOLATILE, + + TOK_CDECL, + TOK_STDCALL, }; typedef struct { @@ -154,6 +157,8 @@ switch (*p) { case '_': if (tok->size == 5 && !memcmp(p, "_Bool", 5)) tok->kind = TOK__BOOL; + if (tok->size == 7 && !memcmp(p,"__cdecl",7)) tok->kind = TOK_CDECL; + if (tok->size == 9 && !memcmp(p,"__stdcall",9))tok->kind = TOK_STDCALL; break; case 'c': if (tok->size == 4 && !memcmp(p, "char", 4)) tok->kind = TOK_CHAR; @@ -225,7 +230,7 @@ type). The 'outer' argument is the index of the opcode outside this "sequel". */ - int check_for_grouping; + int check_for_grouping, abi=0; _cffi_opcode_t result, *p_current; header: @@ -242,6 +247,12 @@ /* ignored for now */ next_token(tok); goto header; + case TOK_CDECL: + case TOK_STDCALL: + /* must be in a function; checked below */ + abi = tok->kind; + next_token(tok); + goto header; default: break; } @@ -258,6 +269,11 @@ while (tok->kind == TOK_OPEN_PAREN) { next_token(tok); + if (tok->kind == TOK_CDECL || tok->kind == TOK_STDCALL) { + abi = tok->kind; + next_token(tok); + } + if ((check_for_grouping--) == 1 && (tok->kind == TOK_STAR || tok->kind == TOK_CONST || tok->kind == TOK_VOLATILE || @@ -275,7 +291,14 @@ } else { /* function type */ - int arg_total, base_index, arg_next, has_ellipsis=0; + int arg_total, base_index, arg_next, flags=0; + + if (abi == TOK_STDCALL) { + flags = 2; + /* note that an ellipsis below will overwrite this flags, + which is the goal: variadic functions are always cdecl */ + } + abi = 0; if (tok->kind == TOK_VOID && get_following_char(tok) == ')') { next_token(tok); @@ -304,7 +327,7 @@ _cffi_opcode_t oarg; if (tok->kind == TOK_DOTDOTDOT) { - has_ellipsis = 1; + flags = 1; /* ellipsis */ next_token(tok); break; } @@ -328,8 +351,7 @@ next_token(tok); } } - tok->output[arg_next] = _CFFI_OP(_CFFI_OP_FUNCTION_END, - has_ellipsis); + tok->output[arg_next] = _CFFI_OP(_CFFI_OP_FUNCTION_END, flags); } if (tok->kind != TOK_CLOSE_PAREN) @@ -337,6 +359,9 @@ next_token(tok); } + if (abi != 0) + return parse_error(tok, "expected '('"); + while (tok->kind == TOK_OPEN_BRACKET) { *p_current = _CFFI_OP(_CFFI_GETOP(*p_current), tok->output_index); p_current = tok->output + tok->output_index; diff --git a/c/realize_c_type.c b/c/realize_c_type.c --- a/c/realize_c_type.c +++ b/c/realize_c_type.c @@ -544,7 +544,7 @@ case _CFFI_OP_FUNCTION: { PyObject *fargs; - int i, base_index, num_args, ellipsis; + int i, base_index, num_args, ellipsis, abi; y = (PyObject *)realize_c_type(builder, opcodes, _CFFI_GETARG(op)); if (y == NULL) @@ -560,7 +560,24 @@ _CFFI_OP_FUNCTION_END) num_args++; - ellipsis = _CFFI_GETARG(opcodes[base_index + num_args]) & 1; + ellipsis = _CFFI_GETARG(opcodes[base_index + num_args]) & 0x01; + abi = _CFFI_GETARG(opcodes[base_index + num_args]) & 0xFE; + switch (abi) { + case 0: + abi = FFI_DEFAULT_ABI; + break; + case 2: +#if defined(MS_WIN32) && !defined(_WIN64) + abi = FFI_STDCALL; +#else + abi = FFI_DEFAULT_ABI; +#endif + break; + default: + PyErr_Format(FFIError, "abi number %d not supported", abi); + Py_DECREF(y); + return NULL; + } fargs = PyTuple_New(num_args); if (fargs == NULL) { @@ -578,8 +595,7 @@ PyTuple_SET_ITEM(fargs, i, z); } - z = new_function_type(fargs, (CTypeDescrObject *)y, ellipsis, - FFI_DEFAULT_ABI); + z = new_function_type(fargs, (CTypeDescrObject *)y, ellipsis, abi); Py_DECREF(fargs); Py_DECREF(y); if (z == NULL) diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -26,6 +26,9 @@ _r_words = re.compile(r"\w+|\S") _parser_cache = None _r_int_literal = re.compile(r"-?0?x?[0-9a-f]+[lu]*$", re.IGNORECASE) +_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") +_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") +_r_cdecl = re.compile(r"\b__cdecl\b") def _get_parser(): global _parser_cache @@ -44,6 +47,14 @@ macrovalue = macrovalue.replace('\\\n', '').strip() macros[macroname] = macrovalue csource = _r_define.sub('', csource) + # BIG HACK: replace WINAPI or __stdcall with "volatile const". + # It doesn't make sense for the return type of a function to be + # "volatile volatile const", so we abuse it to detect __stdcall... + # Hack number 2 is that "int(volatile *fptr)();" is not valid C + # syntax, so we place the "volatile" before the opening parenthesis. + csource = _r_stdcall2.sub(' volatile volatile const(', csource) + csource = _r_stdcall1.sub(' volatile volatile const ', csource) + csource = _r_cdecl.sub(' ', csource) # Replace "[...]" with "[__dotdotdotarray__]" csource = _r_partial_array.sub('[__dotdotdotarray__]', csource) # Replace "...}" with "__dotdotdotNUM__}". This construction should @@ -449,7 +460,14 @@ if not ellipsis and args == [model.void_type]: args = [] result, quals = self._get_type_and_quals(typenode.type) - return model.RawFunctionType(tuple(args), result, ellipsis) + # the 'quals' on the result type are ignored. HACK: we absure them + # to detect __stdcall functions: we textually replace "__stdcall" + # with "volatile volatile const" above. + abi = None + if hasattr(typenode.type, 'quals'): # else, probable syntax error anyway + if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']: + abi = '__stdcall' + return model.RawFunctionType(tuple(args), result, ellipsis, abi) def _as_func_arg(self, type, quals): if isinstance(type, model.ArrayType): diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -1,4 +1,4 @@ -import types +import types, sys import weakref from .lock import allocate_lock @@ -193,18 +193,21 @@ class BaseFunctionType(BaseType): - _attrs_ = ('args', 'result', 'ellipsis') + _attrs_ = ('args', 'result', 'ellipsis', 'abi') - def __init__(self, args, result, ellipsis): + def __init__(self, args, result, ellipsis, abi=None): self.args = args self.result = result self.ellipsis = ellipsis + self.abi = abi # reprargs = [arg._get_c_name() for arg in self.args] if self.ellipsis: reprargs.append('...') reprargs = reprargs or ['void'] replace_with = self._base_pattern % (', '.join(reprargs),) + if abi is not None: + replace_with = replace_with[:1] + abi + ' ' + replace_with[1:] self.c_name_with_marker = ( self.result.c_name_with_marker.replace('&', replace_with)) @@ -222,7 +225,7 @@ "type, not a pointer-to-function type" % (self,)) def as_function_pointer(self): - return FunctionPtrType(self.args, self.result, self.ellipsis) + return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi) class FunctionPtrType(BaseFunctionType): @@ -233,11 +236,18 @@ args = [] for tp in self.args: args.append(tp.get_cached_btype(ffi, finishlist)) + abi_args = () + if self.abi == "__stdcall": + if not self.ellipsis: # __stdcall ignored for variadic funcs + try: + abi_args = (ffi._backend.FFI_STDCALL,) + except AttributeError: + pass return global_cache(self, ffi, 'new_function_type', - tuple(args), result, self.ellipsis) + tuple(args), result, self.ellipsis, *abi_args) def as_raw_function(self): - return RawFunctionType(self.args, self.result, self.ellipsis) + return RawFunctionType(self.args, self.result, self.ellipsis, self.abi) class PointerType(BaseType): diff --git a/cffi/recompiler.py b/cffi/recompiler.py --- a/cffi/recompiler.py +++ b/cffi/recompiler.py @@ -607,7 +607,11 @@ call_arguments.append('x%d' % i) repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' - name_and_arguments = '_cffi_d_%s(%s)' % (name, repr_arguments) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments) prnt('static %s' % (tp.result.get_c_name(name_and_arguments),)) prnt('{') call_arguments = ', '.join(call_arguments) @@ -710,7 +714,8 @@ if difference: repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' - name_and_arguments = '_cffi_f_%s(%s)' % (name, repr_arguments) + name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name, + repr_arguments) prnt('static %s' % (tp_result.get_c_name(name_and_arguments),)) prnt('{') if result_decl: @@ -1135,7 +1140,13 @@ else: self.cffi_types[index] = CffiOp(OP_NOOP, realindex) index += 1 - self.cffi_types[index] = CffiOp(OP_FUNCTION_END, int(tp.ellipsis)) + flags = int(tp.ellipsis) + if tp.abi is not None: + if tp.abi == '__stdcall': + flags |= 2 + else: + raise NotImplementedError("abi=%r" % (tp.abi,)) + self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags) def _emit_bytecode_PointerType(self, tp, index): self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype]) diff --git a/cffi/vengine_gen.py b/cffi/vengine_gen.py --- a/cffi/vengine_gen.py +++ b/cffi/vengine_gen.py @@ -159,7 +159,11 @@ arglist = ', '.join(arglist) or 'void' wrappername = '_cffi_f_%s' % name self.export_symbols.append(wrappername) - funcdecl = ' %s(%s)' % (wrappername, arglist) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + funcdecl = ' %s%s(%s)' % (abi, wrappername, arglist) context = 'result of %s' % name prnt(tpresult.get_c_name(funcdecl, context)) prnt('{') diff --git a/testing/cffi0/test_function.py b/testing/cffi0/test_function.py --- a/testing/cffi0/test_function.py +++ b/testing/cffi0/test_function.py @@ -1,5 +1,5 @@ import py -from cffi import FFI +from cffi import FFI, CDefError import math, os, sys import ctypes.util from cffi.backend_ctypes import CTypesBackend @@ -427,3 +427,59 @@ res = m.QueryPerformanceFrequency(p_freq) assert res != 0 assert p_freq[0] != 0 + + def test_explicit_cdecl_stdcall(self): + if sys.platform != 'win32': + py.test.skip("Windows-only test") + if self.Backend is CTypesBackend: + py.test.skip("not with the ctypes backend") + # + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + BOOL QueryPerformanceFrequency(LONGLONG *lpFrequency); + """) + m = ffi.dlopen("Kernel32.dll") + tp = ffi.typeof(m.QueryPerformanceFrequency) + assert str(tp) == "" + # + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + BOOL __cdecl QueryPerformanceFrequency(LONGLONG *lpFrequency); + """) + m = ffi.dlopen("Kernel32.dll") + tpc = ffi.typeof(m.QueryPerformanceFrequency) + assert tpc is tp + # + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + BOOL WINAPI QueryPerformanceFrequency(LONGLONG *lpFrequency); + """) + m = ffi.dlopen("Kernel32.dll") + tps = ffi.typeof(m.QueryPerformanceFrequency) + assert tps is not tpc + assert str(tps) == "" + # + ffi = FFI(backend=self.Backend()) + ffi.cdef("typedef int (__cdecl *fnc_t)(int);") + ffi.cdef("typedef int (__stdcall *fns_t)(int);") + tpc = ffi.typeof("fnc_t") + tps = ffi.typeof("fns_t") + assert str(tpc) == "" + assert str(tps) == "" + # + fnc = ffi.cast("fnc_t", 0) + fns = ffi.cast("fns_t", 0) + ffi.new("fnc_t[]", [fnc]) + py.test.raises(TypeError, ffi.new, "fnc_t[]", [fns]) + py.test.raises(TypeError, ffi.new, "fns_t[]", [fnc]) + ffi.new("fns_t[]", [fns]) + + def test_stdcall_only_on_windows(self): + if sys.platform == 'win32': + py.test.skip("not-Windows-only test") + ffi = FFI(backend=self.Backend()) + ffi.cdef("double __stdcall sin(double x);") # stdcall ignored + m = ffi.dlopen(lib_m) + assert "double(*)(double)" in str(ffi.typeof(m.sin)) + x = m.sin(1.23) + assert x == math.sin(1.23) diff --git a/testing/cffi0/test_parsing.py b/testing/cffi0/test_parsing.py --- a/testing/cffi0/test_parsing.py +++ b/testing/cffi0/test_parsing.py @@ -364,3 +364,17 @@ assert C.TWO == 2 assert C.NIL == 0 assert C.NEG == -1 + +def test_stdcall(): + ffi = FFI() + tp = ffi.typeof("int(*)(int __stdcall x(int)," + " long (__cdecl*y)(void)," + " short(WINAPI *z)(short))") + if sys.platform == 'win32': + stdcall = '__stdcall ' + else: + stdcall = '' + assert str(tp) == ( + "" % (stdcall, stdcall)) diff --git a/testing/cffi0/test_verify.py b/testing/cffi0/test_verify.py --- a/testing/cffi0/test_verify.py +++ b/testing/cffi0/test_verify.py @@ -1220,25 +1220,6 @@ lib = ffi.verify('#include ', libraries=lib_m) assert lib.sin(1.23) == math.sin(1.23) -def test_callback_calling_convention(): - py.test.skip("later") - if sys.platform != 'win32': - py.test.skip("Windows only") - ffi = FFI() - ffi.cdef(""" - int call1(int(*__cdecl cb)(int)); - int call2(int(*__stdcall cb)(int)); - """) - lib = ffi.verify(""" - int call1(int(*__cdecl cb)(int)) { - return cb(42) + 1; - } - int call2(int(*__stdcall cb)(int)) { - return cb(-42) - 6; - } - """) - xxx - def test_opaque_integer_as_function_result(): #import platform #if platform.machine().startswith('sparc'): @@ -2260,3 +2241,180 @@ assert foo_s.fields[0][1].type is ffi.typeof("int") assert foo_s.fields[1][0] == 'b' assert foo_s.fields[1][1].type is ffi.typeof("void *") + +def test_win32_calling_convention_0(): + ffi = FFI() + ffi.cdef(""" + int call1(int(__cdecl *cb)(int)); + int (*const call2)(int(__stdcall *cb)(int)); + """) + lib = ffi.verify(r""" + #ifndef _MSC_VER + # define __stdcall /* nothing */ + #endif + int call1(int(*cb)(int)) { + int i, result = 0; + //printf("call1: cb = %p\n", cb); + for (i = 0; i < 1000; i++) + result += cb(i); + //printf("result = %d\n", result); + return result; + } + int call2(int(__stdcall *cb)(int)) { + int i, result = 0; + //printf("call2: cb = %p\n", cb); + for (i = 0; i < 1000; i++) + result += cb(-i); + //printf("result = %d\n", result); + return result; + } + """) + @ffi.callback("int(int)") + def cb1(x): + return x * 2 + @ffi.callback("int __stdcall(int)") + def cb2(x): + return x * 3 + print 'cb1 =', cb1 + res = lib.call1(cb1) + assert res == 500*999*2 + print 'cb2 =', cb2 + print ffi.typeof(lib.call2) + print 'call2 =', lib.call2 + res = lib.call2(cb2) + print '...' + assert res == -500*999*3 + print 'done' + if sys.platform == 'win32': + assert '__stdcall' in str(ffi.typeof(cb2)) + assert '__stdcall' not in str(ffi.typeof(cb1)) + py.test.raises(TypeError, lib.call1, cb2) + py.test.raises(TypeError, lib.call2, cb1) + else: + assert '__stdcall' not in str(ffi.typeof(cb2)) + assert ffi.typeof(cb2) is ffi.typeof(cb1) + +def test_win32_calling_convention_1(): + ffi = FFI() + ffi.cdef(""" + int __cdecl call1(int(__cdecl *cb)(int)); + int __stdcall call2(int(__stdcall *cb)(int)); + int (__cdecl *const cb1)(int); + int (__stdcall *const cb2)(int); + """) + lib = ffi.verify(r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + int __cdecl cb1(int x) { return x * 2; } + int __stdcall cb2(int x) { return x * 3; } + + int __cdecl call1(int(__cdecl *cb)(int)) { + int i, result = 0; + //printf("here1\n"); + //printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + for (i = 0; i < 1000; i++) + result += cb(i); + //printf("result = %d\n", result); + return result; + } + int __stdcall call2(int(__stdcall *cb)(int)) { + int i, result = 0; + //printf("here1\n"); + //printf("cb = %p, cb2 = %p\n", cb, (void *)cb2); + for (i = 0; i < 1000; i++) + result += cb(-i); + //printf("result = %d\n", result); + return result; + } + """) + assert lib.call1(lib.cb1) == 500*999*2 + assert lib.call2(lib.cb2) == -500*999*3 + +def test_win32_calling_convention_2(): + # any mistake in the declaration of plain function (including the + # precise argument types and, here, the calling convention) are + # automatically corrected. But this does not apply to the 'cb' + # function pointer argument. + ffi = FFI() + ffi.cdef(""" + int __stdcall call1(int(__cdecl *cb)(int)); + int __cdecl call2(int(__stdcall *cb)(int)); + int (__cdecl *const cb1)(int); + int (__stdcall *const cb2)(int); + """) + lib = ffi.verify(r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + int __cdecl call1(int(__cdecl *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(i); + return result; + } + int __stdcall call2(int(__stdcall *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(-i); + return result; + } + int __cdecl cb1(int x) { return x * 2; } + int __stdcall cb2(int x) { return x * 3; } + """) + assert lib.call1(lib.cb1) == 500*999*2 + assert lib.call2(lib.cb2) == -500*999*3 + +def test_win32_calling_convention_3(): + ffi = FFI() + ffi.cdef(""" + struct point { int x, y; }; + + int (*const cb1)(struct point); + int (__stdcall *const cb2)(struct point); + + struct point __stdcall call1(int(*cb)(struct point)); + struct point call2(int(__stdcall *cb)(struct point)); + """) + lib = ffi.verify(r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + struct point { int x, y; }; + int cb1(struct point pt) { return pt.x + 10 * pt.y; } + int __stdcall cb2(struct point pt) { return pt.x + 100 * pt.y; } + struct point __stdcall call1(int(__cdecl *cb)(struct point)) { + int i; + struct point result = { 0, 0 }; + //printf("here1\n"); + //printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + for (i = 0; i < 1000; i++) { + struct point p = { i, -i }; + int r = cb(p); + result.x += r; + result.y -= r; + } + return result; + } + struct point __cdecl call2(int(__stdcall *cb)(struct point)) { + int i; + struct point result = { 0, 0 }; + for (i = 0; i < 1000; i++) { + struct point p = { -i, i }; + int r = cb(p); + result.x += r; + result.y -= r; + } + return result; + } + """) + if sys.platform == 'win32': + py.test.raises(TypeError, lib.call1, lib.cb2) + py.test.raises(TypeError, lib.call2, lib.cb1) + pt = lib.call1(lib.cb1) + assert (pt.x, pt.y) == (-9*500*999, 9*500*999) + pt = lib.call2(lib.cb2) + assert (pt.x, pt.y) == (99*500*999, -99*500*999) diff --git a/testing/cffi1/test_parse_c_type.py b/testing/cffi1/test_parse_c_type.py --- a/testing/cffi1/test_parse_c_type.py +++ b/testing/cffi1/test_parse_c_type.py @@ -341,3 +341,17 @@ # not supported (really obscure): # "char[+5]" # "char['A']" + +def test_stdcall_cdecl(): + assert parse("int __stdcall(int)") == [Prim(lib._CFFI_PRIM_INT), + '->', Func(0), NoOp(4), FuncEnd(2), + Prim(lib._CFFI_PRIM_INT)] + assert parse("int __stdcall func(int)") == parse("int __stdcall(int)") + assert parse("int (__stdcall *)()") == [Prim(lib._CFFI_PRIM_INT), + NoOp(3), '->', Pointer(1), + Func(0), FuncEnd(2), 0] + assert parse("int (__stdcall *p)()") == parse("int (__stdcall*)()") + parse_error("__stdcall int", "identifier expected", 0) + parse_error("__cdecl int", "identifier expected", 0) + parse_error("int __stdcall", "expected '('", 13) + parse_error("int __cdecl", "expected '('", 11) diff --git a/testing/cffi1/test_realize_c_type.py b/testing/cffi1/test_realize_c_type.py --- a/testing/cffi1/test_realize_c_type.py +++ b/testing/cffi1/test_realize_c_type.py @@ -1,4 +1,4 @@ -import py +import py, sys from cffi import cffi_opcode @@ -46,3 +46,29 @@ def test_all_primitives(): for name in cffi_opcode.PRIMITIVE_TO_INDEX: check(name, name) + + +def check_func(input, expected_output=None): + import _cffi_backend + ffi = _cffi_backend.FFI() + ct = ffi.typeof(ffi.callback(input, lambda: None)) + assert isinstance(ct, ffi.CType) + if sys.platform != 'win32': + expected_output = expected_output.replace('__stdcall *', '*') + assert ct.cname == expected_output + +def test_funcptr_stdcall(): + check_func("int(int)", "int(*)(int)") + check_func("int foobar(int)", "int(*)(int)") + check_func("int __stdcall(int)", "int(__stdcall *)(int)") + check_func("int __stdcall foobar(int)", "int(__stdcall *)(int)") + check_func("void __cdecl(void)", "void(*)()") + check_func("void __cdecl foobar(void)", "void(*)()") + check_func("void __stdcall(void)", "void(__stdcall *)()") + check_func("void __stdcall foobar(long, short)", + "void(__stdcall *)(long, short)") + check_func("void(void __cdecl(void), void __stdcall(void))", + "void(*)(void(*)(), void(__stdcall *)())") + +def test_variadic_overrides_stdcall(): + check("void (__stdcall*)(int, ...)", "void(*)(int, ...)") diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -1280,3 +1280,200 @@ """) assert lib.aaa == 42 py.test.raises(AttributeError, "lib.aaa = 43") + +def test_win32_calling_convention_0(): + ffi = FFI() + ffi.cdef(""" + int call1(int(__cdecl *cb)(int)); + int (*const call2)(int(__stdcall *cb)(int)); + """) + lib = verify(ffi, 'test_win32_calling_convention_0', r""" + #ifndef _MSC_VER + # define __stdcall /* nothing */ + #endif + int call1(int(*cb)(int)) { + int i, result = 0; + //printf("call1: cb = %p\n", cb); + for (i = 0; i < 1000; i++) + result += cb(i); + //printf("result = %d\n", result); + return result; + } + int call2(int(__stdcall *cb)(int)) { + int i, result = 0; + //printf("call2: cb = %p\n", cb); + for (i = 0; i < 1000; i++) + result += cb(-i); + //printf("result = %d\n", result); + return result; + } + """) + @ffi.callback("int(int)") + def cb1(x): + return x * 2 + @ffi.callback("int __stdcall(int)") + def cb2(x): + return x * 3 + res = lib.call1(cb1) + assert res == 500*999*2 + assert res == ffi.addressof(lib, 'call1')(cb1) + res = lib.call2(cb2) + assert res == -500*999*3 + assert res == ffi.addressof(lib, 'call2')(cb2) + if sys.platform == 'win32': + assert '__stdcall' in str(ffi.typeof(cb2)) + assert '__stdcall' not in str(ffi.typeof(cb1)) + py.test.raises(TypeError, lib.call1, cb2) + py.test.raises(TypeError, lib.call2, cb1) + else: + assert '__stdcall' not in str(ffi.typeof(cb2)) + assert ffi.typeof(cb2) is ffi.typeof(cb1) + +def test_win32_calling_convention_1(): + ffi = FFI() + ffi.cdef(""" + int __cdecl call1(int(__cdecl *cb)(int)); + int __stdcall call2(int(__stdcall *cb)(int)); + int (__cdecl *const cb1)(int); + int (__stdcall *const cb2)(int); + """) + lib = verify(ffi, 'test_win32_calling_convention_1', r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + int __cdecl cb1(int x) { return x * 2; } + int __stdcall cb2(int x) { return x * 3; } + + int __cdecl call1(int(__cdecl *cb)(int)) { + int i, result = 0; + //printf("here1\n"); + //printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + for (i = 0; i < 1000; i++) + result += cb(i); + //printf("result = %d\n", result); + return result; + } + int __stdcall call2(int(__stdcall *cb)(int)) { + int i, result = 0; + //printf("here1\n"); + //printf("cb = %p, cb2 = %p\n", cb, (void *)cb2); + for (i = 0; i < 1000; i++) + result += cb(-i); + //printf("result = %d\n", result); + return result; + } + """) + print '<<< cb1 =', ffi.addressof(lib, 'cb1') + ptr_call1 = ffi.addressof(lib, 'call1') + assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + assert ptr_call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + print '<<< cb2 =', ffi.addressof(lib, 'cb2') + ptr_call2 = ffi.addressof(lib, 'call2') + assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + print '<<< done' + +def test_win32_calling_convention_2(): + # any mistake in the declaration of plain function (including the + # precise argument types and, here, the calling convention) are + # automatically corrected. But this does not apply to the 'cb' + # function pointer argument. + ffi = FFI() + ffi.cdef(""" + int __stdcall call1(int(__cdecl *cb)(int)); + int __cdecl call2(int(__stdcall *cb)(int)); + int (__cdecl *const cb1)(int); + int (__stdcall *const cb2)(int); + """) + lib = verify(ffi, 'test_win32_calling_convention_2', """ + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + int __cdecl call1(int(__cdecl *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(i); + return result; + } + int __stdcall call2(int(__stdcall *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(-i); + return result; + } + int __cdecl cb1(int x) { return x * 2; } + int __stdcall cb2(int x) { return x * 3; } + """) + ptr_call1 = ffi.addressof(lib, 'call1') + ptr_call2 = ffi.addressof(lib, 'call2') + if sys.platform == 'win32': + py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) + py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1')) + assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + assert ptr_call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + +def test_win32_calling_convention_3(): + ffi = FFI() + ffi.cdef(""" + struct point { int x, y; }; + + int (*const cb1)(struct point); + int (__stdcall *const cb2)(struct point); + + struct point __stdcall call1(int(*cb)(struct point)); + struct point call2(int(__stdcall *cb)(struct point)); + """) + lib = verify(ffi, 'test_win32_calling_convention_3', r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + struct point { int x, y; }; + int cb1(struct point pt) { return pt.x + 10 * pt.y; } + int __stdcall cb2(struct point pt) { return pt.x + 100 * pt.y; } + struct point __stdcall call1(int(__cdecl *cb)(struct point)) { + int i; + struct point result = { 0, 0 }; + //printf("here1\n"); + //printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + for (i = 0; i < 1000; i++) { + struct point p = { i, -i }; + int r = cb(p); + result.x += r; + result.y -= r; + } + return result; + } + struct point __cdecl call2(int(__stdcall *cb)(struct point)) { + int i; + struct point result = { 0, 0 }; + for (i = 0; i < 1000; i++) { + struct point p = { -i, i }; + int r = cb(p); + result.x += r; + result.y -= r; + } + return result; + } + """) + ptr_call1 = ffi.addressof(lib, 'call1') + ptr_call2 = ffi.addressof(lib, 'call2') + if sys.platform == 'win32': + py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) + py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1')) + pt = lib.call1(ffi.addressof(lib, 'cb1')) + assert (pt.x, pt.y) == (-9*500*999, 9*500*999) + pt = ptr_call1(ffi.addressof(lib, 'cb1')) + assert (pt.x, pt.y) == (-9*500*999, 9*500*999) + pt = lib.call2(ffi.addressof(lib, 'cb2')) + assert (pt.x, pt.y) == (99*500*999, -99*500*999) + pt = ptr_call2(ffi.addressof(lib, 'cb2')) + assert (pt.x, pt.y) == (99*500*999, -99*500*999) diff --git a/testing/cffi1/test_verify1.py b/testing/cffi1/test_verify1.py --- a/testing/cffi1/test_verify1.py +++ b/testing/cffi1/test_verify1.py @@ -1200,25 +1200,6 @@ lib = ffi.verify('#include ', libraries=lib_m) assert lib.sin(1.23) == math.sin(1.23) -def test_callback_calling_convention(): - py.test.skip("later") - if sys.platform != 'win32': - py.test.skip("Windows only") - ffi = FFI() - ffi.cdef(""" - int call1(int(*__cdecl cb)(int)); - int call2(int(*__stdcall cb)(int)); - """) - lib = ffi.verify(""" - int call1(int(*__cdecl cb)(int)) { - return cb(42) + 1; - } - int call2(int(*__stdcall cb)(int)) { - return cb(-42) - 6; - } - """) - xxx - def test_opaque_integer_as_function_result(): #import platform #if platform.machine().startswith('sparc'): From noreply at buildbot.pypy.org Tue Oct 6 13:52:47 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 13:52:47 +0200 (CEST) Subject: [pypy-commit] cffi default: reminder to document Message-ID: <20151006115247.8648E1C12D6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2326:d81337f89fa5 Date: 2015-10-06 13:53 +0200 http://bitbucket.org/cffi/cffi/changeset/d81337f89fa5/ Log: reminder to document diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -19,6 +19,8 @@ It also fixes corner cases like ``typedef const int T; T a;`` which would previously not consider ``a`` as a constant. +* XXX __stdcall + v1.2.1 ====== From noreply at buildbot.pypy.org Tue Oct 6 14:29:04 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 14:29:04 +0200 (CEST) Subject: [pypy-commit] pypy default: (untested yet) fix switch_arm_gcc.h, which works a bit by chance Message-ID: <20151006122904.8F5331C12D6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80001:0a490c14d60b Date: 2015-10-06 14:29 +0200 http://bitbucket.org/pypy/pypy/changeset/0a490c14d60b/ Log: (untested yet) fix switch_arm_gcc.h, which works a bit by chance diff --git a/rpython/translator/c/src/stacklet/switch_arm_gcc.h b/rpython/translator/c/src/stacklet/switch_arm_gcc.h --- a/rpython/translator/c/src/stacklet/switch_arm_gcc.h +++ b/rpython/translator/c/src/stacklet/switch_arm_gcc.h @@ -5,16 +5,16 @@ # define call_reg(x) "blx " #x "\n" #endif -static void __attribute__((optimize("O3"))) *slp_switch(void *(*save_state)(void*, void*), +static void *slp_switch(void *(*save_state)(void*, void*), void *(*restore_state)(void*, void*), void *extra) { void *result; __asm__ volatile ( - "mov r3, %[save_state]\n" - /* save values in calee saved registers for later */ - "mov r4, %[restore_state]\n" - "mov r5, %[extra]\n" + "ldr r3, %[save_state]\n" + /* save values in callee saved registers for later */ + "ldr r4, %[restore_state]\n" + "ldr r5, %[extra]\n" "mov r0, sp\n" /* arg 1: current (old) stack pointer */ "mov r1, r5\n" /* arg 2: extra */ call_reg(r3) /* call save_state() */ @@ -33,14 +33,16 @@ /* The stack's content is now restored. */ "zero:\n" - "mov %[result], r0\n" + "str r0, %[result]\n" - : [result]"=r"(result) /* output variables */ + : [result]"=m"(result) /* output variables */ /* input variables */ - : [restore_state]"r"(restore_state), - [save_state]"r"(save_state), - [extra]"r"(extra) - : "lr", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r13" + : [restore_state]"m"(restore_state), + [save_state]"m"(save_state), + [extra]"m"(extra) + : "lr", "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", + "r10", "r11", "r12", /* r13 is sp, r14 is lr, and r15 is pc */ + "memory", "cc", "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7" ); return result; } From noreply at buildbot.pypy.org Tue Oct 6 15:19:30 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 6 Oct 2015 15:19:30 +0200 (CEST) Subject: [pypy-commit] pypy default: fixes Message-ID: <20151006131930.59E541C0726@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80002:43e04ba55c9c Date: 2015-10-06 15:19 +0200 http://bitbucket.org/pypy/pypy/changeset/43e04ba55c9c/ Log: fixes diff --git a/rpython/translator/c/src/stacklet/Makefile b/rpython/translator/c/src/stacklet/Makefile --- a/rpython/translator/c/src/stacklet/Makefile +++ b/rpython/translator/c/src/stacklet/Makefile @@ -28,19 +28,19 @@ tests-static-g: stacklet.c stacklet.h tests.c gcc ${INC} -Wall -g -o run_tests_static_g stacklet.c tests.c ${DEBUG} - run_tests_static_g + ./run_tests_static_g tests-static-o: stacklet.c stacklet.h tests.c gcc ${INC} -Wall -g -O2 -o run_tests_static_o stacklet.c tests.c ${DEBUG} - run_tests_static_o + ./run_tests_static_o tests-dynamic-g: stacklet_g.so tests.c gcc ${INC} -Wall -g -o run_tests_dynamic_g stacklet_g.so tests.c ${DEBUG} - LD_LIBRARY_PATH=. run_tests_dynamic_g + LD_LIBRARY_PATH=. ./run_tests_dynamic_g tests-dynamic-o: stacklet.so tests.c gcc ${INC} -Wall -g -O2 -o run_tests_dynamic_o stacklet.so tests.c ${DEBUG} - LD_LIBRARY_PATH=. run_tests_dynamic_o + LD_LIBRARY_PATH=. ./run_tests_dynamic_o tests-repeat: tests python runtests.py run_tests_static_g > /dev/null diff --git a/rpython/translator/c/src/stacklet/switch_arm_gcc.h b/rpython/translator/c/src/stacklet/switch_arm_gcc.h --- a/rpython/translator/c/src/stacklet/switch_arm_gcc.h +++ b/rpython/translator/c/src/stacklet/switch_arm_gcc.h @@ -10,11 +10,24 @@ void *extra) { void *result; + /* + seven registers to preserve: r2, r3, r7, r8, r9, r10, r11 + registers marked as clobbered: r0, r1, r4, r5, r6, r12, lr + others: r13 is sp; r14 is lr; r15 is pc + */ + __asm__ volatile ( - "ldr r3, %[save_state]\n" + + /* align the stack and save 7 more registers explicitly */ + "mov r0, sp\n" + "and r1, r0, #-16\n" + "mov sp, r1\n" + "push {r0, r2, r3, r7, r8, r9, r10, r11}\n" /* total 8, still aligned */ + /* save values in callee saved registers for later */ - "ldr r4, %[restore_state]\n" - "ldr r5, %[extra]\n" + "mov r4, %[restore_state]\n" /* can't be r0 or r1: marked clobbered */ + "mov r5, %[extra]\n" /* can't be r0 or r1 or r4: marked clob. */ + "mov r3, %[save_state]\n" /* can't be r0, r1, r4, r5: marked clob. */ "mov r0, sp\n" /* arg 1: current (old) stack pointer */ "mov r1, r5\n" /* arg 2: extra */ call_reg(r3) /* call save_state() */ @@ -28,20 +41,22 @@ /* From now on, the stack pointer is modified, but the content of the stack is not restored yet. It contains only garbage here. */ "mov r1, r5\n" /* arg 2: extra */ - /* arg 1: current (new) stack pointer is already in r0*/ + /* arg 1: current (new) stack pointer is already in r0*/ call_reg(r4) /* call restore_state() */ /* The stack's content is now restored. */ "zero:\n" - "str r0, %[result]\n" - : [result]"=m"(result) /* output variables */ + "pop {r1, r2, r3, r7, r8, r9, r10, r11}\n" + "mov sp, r1\n" + "mov %[result], r0\n" + + : [result]"=r"(result) /* output variables */ /* input variables */ - : [restore_state]"m"(restore_state), - [save_state]"m"(save_state), - [extra]"m"(extra) - : "lr", "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", - "r10", "r11", "r12", /* r13 is sp, r14 is lr, and r15 is pc */ + : [restore_state]"r"(restore_state), + [save_state]"r"(save_state), + [extra]"r"(extra) + : "r0", "r1", "r4", "r5", "r6", "r12", "lr", "memory", "cc", "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7" ); return result; From noreply at buildbot.pypy.org Tue Oct 6 21:31:28 2015 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 6 Oct 2015 21:31:28 +0200 (CEST) Subject: [pypy-commit] pypy fortran-order: assert order in (CORDER, FORTRANORDER) for implemented arrays, fix failure Message-ID: <20151006193129.0DC341C12D6@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: fortran-order Changeset: r80003:07148cce622e Date: 2015-10-06 22:30 +0300 http://bitbucket.org/pypy/pypy/changeset/07148cce622e/ Log: assert order in (CORDER, FORTRANORDER) for implemented arrays, fix failure diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -383,6 +383,7 @@ t_strides[i] = base base *= shape[i] backstrides = calc_backstrides(t_strides, shape) + order = support.get_order_as_CF(self.order, order) impl = ConcreteArray(shape, dtype, order, t_strides, backstrides) loop.setslice(space, impl.get_shape(), impl, self) return impl @@ -434,6 +435,8 @@ self.shape = shape # already tested for overflow in from_shape_and_storage self.size = support.product(shape) * dtype.elsize + if order not in (NPY.CORDER, NPY.FORTRANORDER): + raise oefmt(dtype.itemtype.space.w_ValueError, "ConcreteArrayNotOwning but order is not 0,1 rather %d", order) self.order = order self.dtype = dtype self.strides = strides @@ -567,6 +570,8 @@ self.parent = parent self.storage = parent.storage self.gcstruct = parent.gcstruct + if parent.order not in (NPY.CORDER, NPY.FORTRANORDER): + raise oefmt(dtype.itemtype.space.w_ValueError, "SliceArray but parent order is not 0,1 rather %d", parent.order) self.order = parent.order self.dtype = dtype try: From noreply at buildbot.pypy.org Tue Oct 6 21:31:33 2015 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 6 Oct 2015 21:31:33 +0200 (CEST) Subject: [pypy-commit] pypy fortran-order: merge default into branch Message-ID: <20151006193133.806591C1319@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: fortran-order Changeset: r80004:37e0897bbbbe Date: 2015-10-06 22:31 +0300 http://bitbucket.org/pypy/pypy/changeset/37e0897bbbbe/ Log: merge default into branch diff too long, truncating to 2000 out of 5515 lines diff --git a/dotviewer/graphclient.py b/dotviewer/graphclient.py --- a/dotviewer/graphclient.py +++ b/dotviewer/graphclient.py @@ -127,16 +127,8 @@ return spawn_graphserver_handler((host, port)) def spawn_local_handler(): - if hasattr(sys, 'pypy_objspaceclass'): - # if 'python' is actually PyPy, e.g. in a virtualenv, then - # try hard to find a real CPython - try: - python = subprocess.check_output( - 'env -i $SHELL -l -c "which python"', shell=True).strip() - except subprocess.CalledProcessError: - # did not work, fall back to 'python' - python = 'python' - else: + python = os.getenv('PYPY_PYGAME_PYTHON') + if not python: python = sys.executable args = [python, '-u', GRAPHSERVER, '--stdio'] p = subprocess.Popen(args, diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -158,7 +158,7 @@ RegrTest('test_codecs.py', core=True, usemodules='_multibytecodec'), RegrTest('test_codeop.py', core=True), RegrTest('test_coding.py', core=True), - RegrTest('test_coercion.py', core=True), + RegrTest('test_coercion.py', core=True, usemodules='struct'), RegrTest('test_collections.py', usemodules='binascii struct'), RegrTest('test_colorsys.py'), RegrTest('test_commands.py'), diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -609,7 +609,7 @@ def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: - tp = ffi._parser._declarations[key] + tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) try: value = backendlib.load_function(BType, name) @@ -620,7 +620,7 @@ # key = 'variable ' + name if key in ffi._parser._declarations: - tp = ffi._parser._declarations[key] + tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) read_variable = backendlib.read_variable write_variable = backendlib.write_variable @@ -631,12 +631,23 @@ # if not copied_enums: from . import model - for key, tp in ffi._parser._declarations.items(): + error = None + for key, (tp, _) in ffi._parser._declarations.items(): if not isinstance(tp, model.EnumType): continue + try: + tp.check_not_partial() + except Exception as e: + error = e + continue for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + if error is not None: + if name in library.__dict__: + return # ignore error, about a different enum + raise error + for key, val in ffi._parser._int_constants.items(): if key not in library.__dict__: library.__dict__[key] = val diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -192,6 +192,7 @@ if not decl.name: raise api.CDefError("typedef does not declare any name", decl) + quals = 0 if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) and decl.type.type.names[-1] == '__dotdotdot__'): realtype = self._get_unknown_type(decl) @@ -202,8 +203,9 @@ decl.type.type.type.names == ['__dotdotdot__']): realtype = model.unknown_ptr_type(decl.name) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) + realtype, quals = self._get_type_and_quals( + decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype, quals=quals) else: raise api.CDefError("unrecognized construct", decl) except api.FFIError as e: @@ -255,9 +257,9 @@ def _parse_decl(self, decl): node = decl.type if isinstance(node, pycparser.c_ast.FuncDecl): - tp = self._get_type(node, name=decl.name) + tp, quals = self._get_type_and_quals(node, name=decl.name) assert isinstance(tp, model.RawFunctionType) - tp = self._get_type_pointer(tp) + tp = self._get_type_pointer(tp, quals) self._declare('function ' + decl.name, tp) else: if isinstance(node, pycparser.c_ast.Struct): @@ -271,9 +273,10 @@ decl) # if decl.name: - tp = self._get_type(node, partial_length_ok=True) + tp, quals = self._get_type_and_quals(node, + partial_length_ok=True) if tp.is_raw_function: - tp = self._get_type_pointer(tp) + tp = self._get_type_pointer(tp, quals) self._declare('function ' + decl.name, tp) elif (tp.is_integer_type() and hasattr(decl, 'init') and @@ -287,10 +290,10 @@ _r_int_literal.match(decl.init.expr.value)): self._add_integer_constant(decl.name, '-' + decl.init.expr.value) - elif self._is_constant_globalvar(node): - self._declare('constant ' + decl.name, tp) + elif (quals & model.Q_CONST) and not tp.is_array_type: + self._declare('constant ' + decl.name, tp, quals=quals) else: - self._declare('variable ' + decl.name, tp) + self._declare('variable ' + decl.name, tp, quals=quals) def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] @@ -298,40 +301,51 @@ exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type) + tp, quals = self._get_type_and_quals(exprnode.type) + return tp - def _declare(self, name, obj, included=False): + def _declare(self, name, obj, included=False, quals=0): if name in self._declarations: - if self._declarations[name] is obj: + prevobj, prevquals = self._declarations[name] + if prevobj is obj and prevquals == quals: return if not self._override: raise api.FFIError( "multiple declarations of %s (for interactive usage, " "try cdef(xx, override=True))" % (name,)) assert '__dotdotdot__' not in name.split() - self._declarations[name] = obj + self._declarations[name] = (obj, quals) if included: self._included_declarations.add(obj) - def _get_type_pointer(self, type, const=False, declname=None): + def _extract_quals(self, type): + quals = 0 + if isinstance(type, (pycparser.c_ast.TypeDecl, + pycparser.c_ast.PtrDecl)): + if 'const' in type.quals: + quals |= model.Q_CONST + if 'restrict' in type.quals: + quals |= model.Q_RESTRICT + return quals + + def _get_type_pointer(self, type, quals, declname=None): if isinstance(type, model.RawFunctionType): return type.as_function_pointer() if (isinstance(type, model.StructOrUnionOrEnum) and type.name.startswith('$') and type.name[1:].isdigit() and type.forcename is None and declname is not None): - return model.NamedPointerType(type, declname) - if const: - return model.ConstPointerType(type) - return model.PointerType(type) + return model.NamedPointerType(type, declname, quals) + return model.PointerType(type, quals) - def _get_type(self, typenode, name=None, partial_length_ok=False): + def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): - type = self._declarations['typedef ' + typenode.type.names[0]] - return type + tp, quals = self._declarations['typedef ' + typenode.type.names[0]] + quals |= self._extract_quals(typenode) + return tp, quals # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type @@ -340,18 +354,19 @@ else: length = self._parse_constant( typenode.dim, partial_length_ok=partial_length_ok) - tp = self._get_type(typenode.type, + tp, quals = self._get_type_and_quals(typenode.type, partial_length_ok=partial_length_ok) - return model.ArrayType(tp, length) + return model.ArrayType(tp, length), quals # if isinstance(typenode, pycparser.c_ast.PtrDecl): # pointer type - const = (isinstance(typenode.type, pycparser.c_ast.TypeDecl) - and 'const' in typenode.type.quals) - return self._get_type_pointer(self._get_type(typenode.type), const, - declname=name) + itemtype, itemquals = self._get_type_and_quals(typenode.type) + tp = self._get_type_pointer(itemtype, itemquals, declname=name) + quals = self._extract_quals(typenode) + return tp, quals # if isinstance(typenode, pycparser.c_ast.TypeDecl): + quals = self._extract_quals(typenode) type = typenode.type if isinstance(type, pycparser.c_ast.IdentifierType): # assume a primitive type. get it from .names, but reduce @@ -379,35 +394,38 @@ names = newnames + names ident = ' '.join(names) if ident == 'void': - return model.void_type + return model.void_type, quals if ident == '__dotdotdot__': raise api.FFIError(':%d: bad usage of "..."' % typenode.coord.line) - return resolve_common_type(ident) + return resolve_common_type(ident), quals # if isinstance(type, pycparser.c_ast.Struct): # 'struct foobar' - return self._get_struct_union_enum_type('struct', type, name) + tp = self._get_struct_union_enum_type('struct', type, name) + return tp, quals # if isinstance(type, pycparser.c_ast.Union): # 'union foobar' - return self._get_struct_union_enum_type('union', type, name) + tp = self._get_struct_union_enum_type('union', type, name) + return tp, quals # if isinstance(type, pycparser.c_ast.Enum): # 'enum foobar' - return self._get_struct_union_enum_type('enum', type, name) + tp = self._get_struct_union_enum_type('enum', type, name) + return tp, quals # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - return self._parse_function_type(typenode, name) + return self._parse_function_type(typenode, name), 0 # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): return self._get_struct_union_enum_type('struct', typenode, name, - nested=True) + nested=True), 0 if isinstance(typenode, pycparser.c_ast.Union): return self._get_struct_union_enum_type('union', typenode, name, - nested=True) + nested=True), 0 # raise api.FFIError(":%d: bad or unsupported type declaration" % typenode.coord.line) @@ -426,28 +444,21 @@ raise api.CDefError( "%s: a function with only '(...)' as argument" " is not correct C" % (funcname or 'in expression')) - args = [self._as_func_arg(self._get_type(argdeclnode.type)) + args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type)) for argdeclnode in params] if not ellipsis and args == [model.void_type]: args = [] - result = self._get_type(typenode.type) + result, quals = self._get_type_and_quals(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) - def _as_func_arg(self, type): + def _as_func_arg(self, type, quals): if isinstance(type, model.ArrayType): - return model.PointerType(type.item) + return model.PointerType(type.item, quals) elif isinstance(type, model.RawFunctionType): return type.as_function_pointer() else: return type - def _is_constant_globalvar(self, typenode): - if isinstance(typenode, pycparser.c_ast.PtrDecl): - return 'const' in typenode.quals - if isinstance(typenode, pycparser.c_ast.TypeDecl): - return 'const' in typenode.quals - return False - def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): # First, a level of caching on the exact 'type' node of the AST. # This is obscure, but needed because pycparser "unrolls" declarations @@ -486,7 +497,7 @@ else: explicit_name = name key = '%s %s' % (kind, name) - tp = self._declarations.get(key, None) + tp, _ = self._declarations.get(key, (None, None)) # if tp is None: if kind == 'struct': @@ -528,6 +539,7 @@ fldnames = [] fldtypes = [] fldbitsize = [] + fldquals = [] for decl in type.decls: if (isinstance(decl.type, pycparser.c_ast.IdentifierType) and ''.join(decl.type.names) == '__dotdotdot__'): @@ -541,7 +553,8 @@ else: bitsize = self._parse_constant(decl.bitsize) self._partial_length = False - type = self._get_type(decl.type, partial_length_ok=True) + type, fqual = self._get_type_and_quals(decl.type, + partial_length_ok=True) if self._partial_length: self._make_partial(tp, nested) if isinstance(type, model.StructType) and type.partial: @@ -549,9 +562,11 @@ fldnames.append(decl.name or '') fldtypes.append(type) fldbitsize.append(bitsize) + fldquals.append(fqual) tp.fldnames = tuple(fldnames) tp.fldtypes = tuple(fldtypes) tp.fldbitsize = tuple(fldbitsize) + tp.fldquals = tuple(fldquals) if fldbitsize != [-1] * len(fldbitsize): if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" @@ -632,14 +647,12 @@ return tp def include(self, other): - for name, tp in other._declarations.items(): + for name, (tp, quals) in other._declarations.items(): if name.startswith('anonymous $enum_$'): continue # fix for test_anonymous_enum_include kind = name.split(' ', 1)[0] - if kind in ('struct', 'union', 'enum', 'anonymous'): - self._declare(name, tp, included=True) - elif kind == 'typedef': - self._declare(name, tp, included=True) + if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'): + self._declare(name, tp, included=True, quals=quals) for k, v in other._int_constants.items(): self._add_constants(k, v) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -4,11 +4,26 @@ from .lock import allocate_lock +# type qualifiers +Q_CONST = 0x01 +Q_RESTRICT = 0x02 + +def qualify(quals, replace_with): + if quals & Q_CONST: + replace_with = ' const ' + replace_with.lstrip() + if quals & Q_RESTRICT: + # It seems that __restrict is supported by gcc and msvc. + # If you hit some different compiler, add a #define in + # _cffi_include.h for it (and in its copies, documented there) + replace_with = ' __restrict ' + replace_with.lstrip() + return replace_with + + class BaseTypeByIdentity(object): is_array_type = False is_raw_function = False - def get_c_name(self, replace_with='', context='a C file'): + def get_c_name(self, replace_with='', context='a C file', quals=0): result = self.c_name_with_marker assert result.count('&') == 1 # some logic duplication with ffi.getctype()... :-( @@ -18,6 +33,7 @@ replace_with = '(%s)' % replace_with elif not replace_with[0] in '[(': replace_with = ' ' + replace_with + replace_with = qualify(quals, replace_with) result = result.replace('&', replace_with) if '$' in result: from .ffiplatform import VerificationError @@ -225,16 +241,14 @@ class PointerType(BaseType): - _attrs_ = ('totype',) - _base_pattern = " *&" - _base_pattern_array = "(*&)" + _attrs_ = ('totype', 'quals') - def __init__(self, totype): + def __init__(self, totype, quals=0): self.totype = totype + self.quals = quals + extra = qualify(quals, " *&") if totype.is_array_type: - extra = self._base_pattern_array - else: - extra = self._base_pattern + extra = "(%s)" % (extra.lstrip(),) self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra) def build_backend_type(self, ffi, finishlist): @@ -243,10 +257,8 @@ voidp_type = PointerType(void_type) - -class ConstPointerType(PointerType): - _base_pattern = " const *&" - _base_pattern_array = "(const *&)" +def ConstPointerType(totype): + return PointerType(totype, Q_CONST) const_voidp_type = ConstPointerType(void_type) @@ -254,8 +266,8 @@ class NamedPointerType(PointerType): _attrs_ = ('totype', 'name') - def __init__(self, totype, name): - PointerType.__init__(self, totype) + def __init__(self, totype, name, quals=0): + PointerType.__init__(self, totype, quals) self.name = name self.c_name_with_marker = name + '&' @@ -315,11 +327,12 @@ partial = False packed = False - def __init__(self, name, fldnames, fldtypes, fldbitsize): + def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None): self.name = name self.fldnames = fldnames self.fldtypes = fldtypes self.fldbitsize = fldbitsize + self.fldquals = fldquals self.build_c_name_with_marker() def has_anonymous_struct_fields(self): @@ -331,14 +344,17 @@ return False def enumfields(self): - for name, type, bitsize in zip(self.fldnames, self.fldtypes, - self.fldbitsize): + fldquals = self.fldquals + if fldquals is None: + fldquals = (0,) * len(self.fldnames) + for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes, + self.fldbitsize, fldquals): if name == '' and isinstance(type, StructOrUnion): # nested anonymous struct/union for result in type.enumfields(): yield result else: - yield (name, type, bitsize) + yield (name, type, bitsize, quals) def force_flatten(self): # force the struct or union to have a declaration that lists @@ -347,13 +363,16 @@ names = [] types = [] bitsizes = [] - for name, type, bitsize in self.enumfields(): + fldquals = [] + for name, type, bitsize, quals in self.enumfields(): names.append(name) types.append(type) bitsizes.append(bitsize) + fldquals.append(quals) self.fldnames = tuple(names) self.fldtypes = tuple(types) self.fldbitsize = tuple(bitsizes) + self.fldquals = tuple(fldquals) def get_cached_btype(self, ffi, finishlist, can_delay=False): BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist, diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -195,17 +195,15 @@ elif isinstance(tp, model.StructOrUnion): if tp.fldtypes is not None and ( tp not in self.ffi._parser._included_declarations): - for name1, tp1, _ in tp.enumfields(): + for name1, tp1, _, _ in tp.enumfields(): self._do_collect_type(self._field_type(tp, name1, tp1)) else: for _, x in tp._get_items(): self._do_collect_type(x) - def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) - def _generate(self, step_name): - for name, tp in self._get_declarations(): + lst = self.ffi._parser._declarations.items() + for name, (tp, quals) in sorted(lst): kind, realname = name.split(' ', 1) try: method = getattr(self, '_generate_cpy_%s_%s' % (kind, @@ -214,6 +212,7 @@ raise ffiplatform.VerificationError( "not implemented in recompile(): %r" % name) try: + self._current_quals = quals method(tp, realname) except Exception as e: model.attach_exception_info(e, name) @@ -774,7 +773,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): try: if ftype.is_integer_type() or fbitsize >= 0: # accept all integers, but complain on float or double @@ -789,7 +788,8 @@ ftype = ftype.item fname = fname + '[0]' prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -823,7 +823,7 @@ c_fields = [] if reason_for_not_expanding is None: enumfields = list(tp.enumfields()) - for fldname, fldtype, fbitsize in enumfields: + for fldname, fldtype, fbitsize, fqual in enumfields: fldtype = self._field_type(tp, fldname, fldtype) # cname is None for _add_missing_struct_unions() only op = OP_NOOP @@ -879,7 +879,9 @@ # because they don't have any known C name. Check that they are # not partial (we can't complete or verify them!) and emit them # anonymously. - for tp in list(self._struct_unions): + lst = list(self._struct_unions.items()) + lst.sort(key=lambda tp_order: tp_order[1]) + for tp, order in lst: if tp not in self._seen_struct_unions: if tp.partial: raise NotImplementedError("internal inconsistency: %r is " @@ -1004,6 +1006,8 @@ def _enum_ctx(self, tp, cname): type_index = self._typesdict[tp] type_op = CffiOp(OP_ENUM, -1) + if self.target_is_python: + tp.check_not_partial() for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): self._lsts["global"].append( GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op, @@ -1081,7 +1085,8 @@ # if 'tp' were a function type, but that is not possible here. # (If 'tp' is a function _pointer_ type, then casts from "fn_t # **" to "void *" are again no-ops, as far as I can tell.) - prnt('static ' + tp.get_c_name('*_cffi_var_%s(void)' % (name,))) + decl = '*_cffi_var_%s(void)' % (name,) + prnt('static ' + tp.get_c_name(decl, quals=self._current_quals)) prnt('{') prnt(' return %s(%s);' % (ampersand, name)) prnt('}') diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -197,7 +197,10 @@ return library def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst def _generate(self, step_name): for name, tp in self._get_declarations(): @@ -468,7 +471,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double @@ -477,7 +480,8 @@ # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -488,7 +492,7 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) @@ -552,7 +556,7 @@ check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -87,7 +87,10 @@ return library def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst def _generate(self, step_name): for name, tp in self._get_declarations(): @@ -260,7 +263,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double @@ -269,7 +272,8 @@ # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -280,7 +284,7 @@ prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) @@ -342,7 +346,7 @@ check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -39,8 +39,9 @@ "_csv", "cppyy", "_pypyjson" ]) -if (sys.platform.startswith('linux') and os.uname()[4] == 'x86_64' - and sys.maxint > 2**32): # it's not enough that we get x86_64 +if ((sys.platform.startswith('linux') or sys.platform == 'darwin') + and os.uname()[4] == 'x86_64' and sys.maxint > 2**32): + # it's not enough that we get x86_64 working_modules.add('_vmprof') translation_modules = default_modules.copy() diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -43,3 +43,13 @@ .. branch: numpy-ctypes Add support for ndarray.ctypes property. + +.. branch: share-guard-info + +Share guard resume data between consecutive guards that have only +pure operations and guards in between. + +.. branch: issue-2148 + +Fix performance regression on operations mixing numpy scalars and Python +floats, cf. issue #2148. diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -253,26 +253,27 @@ def binaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_meth = self.getattr(space, specialname, False) + if isinstance(w_a, W_InstanceObject): + w_meth = w_a.getattr(space, specialname, False) if w_meth is None: return space.w_NotImplemented return space.call_function(w_meth, w_b) else: + # fall back to space.xxx() if coerce returns a non-W_Instance + # object as first argument return getattr(space, objspacename)(w_a, w_b) binaryop.func_name = name def rbinaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None or w_a is self: - w_meth = self.getattr(space, rspecialname, False) + if isinstance(w_a, W_InstanceObject): + w_meth = w_a.getattr(space, rspecialname, False) if w_meth is None: return space.w_NotImplemented - return space.call_function(w_meth, w_other) + return space.call_function(w_meth, w_b) else: + # fall back to space.xxx() if coerce returns a non-W_Instance + # object as first argument return getattr(space, objspacename)(w_b, w_a) rbinaryop.func_name = "r" + name return binaryop, rbinaryop @@ -283,7 +284,7 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - return [None, None] + return [w_self, w_other] return space.fixedview(w_tup, 2) def descr_instance_new(space, w_type, w_class, w_dict=None): @@ -523,13 +524,9 @@ def descr_cmp(self, space, w_other): # do all the work here like CPython w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - else: - if (not isinstance(w_a, W_InstanceObject) and - not isinstance(w_b, W_InstanceObject)): - return space.cmp(w_a, w_b) + if (not isinstance(w_a, W_InstanceObject) and + not isinstance(w_b, W_InstanceObject)): + return space.cmp(w_a, w_b) if isinstance(w_a, W_InstanceObject): w_func = w_a.getattr(space, '__cmp__', False) if w_func is not None: @@ -636,42 +633,36 @@ def descr_pow(self, space, w_other, w_modulo=None): if space.is_none(w_modulo): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) - return space.w_NotImplemented + if isinstance(w_a, W_InstanceObject): + w_func = w_a.getattr(space, '__pow__', False) + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other) else: return space.pow(w_a, w_b, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other, w_modulo) def descr_rpow(self, space, w_other, w_modulo=None): if space.is_none(w_modulo): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) - return space.w_NotImplemented + if isinstance(w_a, W_InstanceObject): + w_func = w_a.getattr(space, '__rpow__', False) + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other) else: return space.pow(w_b, w_a, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other, w_modulo) def descr_next(self, space): w_func = self.getattr(space, 'next', False) diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -417,6 +417,22 @@ pass raises(TypeError, coerce, B(), []) + def test_coerce_inf(self): + class B: + def __coerce__(self, other): + return B(), B() + def __add__(self, other): + return 42 + assert B() + B() == 42 + + def test_coerce_reverse(self): + class CoerceNumber: + def __coerce__(self, other): + assert isinstance(other, int) + return (6, other) + assert 5 + CoerceNumber() == 11 + assert 2 ** CoerceNumber() == 64 + def test_binaryop(self): class A: def __add__(self, other): diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -178,7 +178,8 @@ @jit.dont_look_inside -def _handle_applevel_exception(space, callback, e, ll_res, extra_line): +def _handle_applevel_exception(callback, e, ll_res, extra_line): + space = callback.space callback.write_error_return_value(ll_res) if callback.w_onerror is None: callback.print_error(e, extra_line) @@ -199,13 +200,21 @@ extra_line="\nDuring the call to 'onerror', " "another exception occurred:\n\n") + at jit.jit_callback("CFFI") +def py_invoke_callback(callback, ll_res, ll_args): + extra_line = '' + try: + w_res = callback.invoke(ll_args) + extra_line = "Trying to convert the result back to C:\n" + callback.convert_result(ll_res, w_res) + except OperationError, e: + _handle_applevel_exception(callback, e, ll_res, extra_line) - at jit.jit_callback("CFFI") def _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): """ Callback specification. ffi_cif - something ffi specific, don't care ll_args - rffi.VOIDPP - pointer to array of pointers to args - ll_restype - rffi.VOIDP - pointer to result + ll_res - rffi.VOIDP - pointer to result ll_userdata - a special structure which holds necessary information (what the real callback is for example), casted to VOIDP """ @@ -228,13 +237,7 @@ space = callback.space try: must_leave = space.threadlocals.try_enter_thread(space) - extra_line = '' - try: - w_res = callback.invoke(ll_args) - extra_line = "Trying to convert the result back to C:\n" - callback.convert_result(ll_res, w_res) - except OperationError, e: - _handle_applevel_exception(space, callback, e, ll_res, extra_line) + py_invoke_callback(callback, ll_res, ll_args) # except Exception, e: # oups! last-level attempt to recover. diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -11,7 +11,8 @@ class W_CType(W_Root): - _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_'] + _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_', + '_pointer_type'] _immutable_fields_ = ['size?', 'name', 'name_position'] # note that 'size' is not strictly immutable, because it can change # from -1 to the real value in the W_CTypeStruct subclass. diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -168,7 +168,7 @@ class W_CTypePointer(W_CTypePtrBase): - _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr'] + _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr', '_array_types'] _immutable_fields_ = ['is_file', 'cache_array_type?', 'is_void_ptr'] kind = "pointer" cache_array_type = None diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -4,7 +4,7 @@ from rpython.rlib.objectmodel import specialize, r_dict, compute_identity_hash from rpython.rlib.rarithmetic import ovfcheck, intmask -from rpython.rlib import jit +from rpython.rlib import jit, rweakref from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform @@ -23,27 +23,12 @@ class UniqueCache: def __init__(self, space): - self.ctvoid = None # There can be only one - self.ctvoidp = None # Cache for self.pointers[self.ctvoid] - self.ctchara = None # Cache for self.arrays[charp, -1] - self.primitives = {} # Keys: name - self.pointers = {} # Keys: base_ctype - self.arrays = {} # Keys: (ptr_ctype, length_or_-1) - self.functions = r_dict(# Keys: (fargs, w_fresult, ellipsis) - _func_key_eq, _func_key_hash) - -def _func_key_eq((fargs1, w_fresult1, ellipsis1), - (fargs2, w_fresult2, ellipsis2)): - return (fargs1 == fargs2 and # list equality here - w_fresult1 is w_fresult2 and - ellipsis1 == ellipsis2) - -def _func_key_hash((fargs, w_fresult, ellipsis)): - x = compute_identity_hash(w_fresult) ^ ellipsis - for w_arg in fargs: - y = compute_identity_hash(w_arg) - x = intmask((1000003 * x) ^ y) - return x + self.ctvoid = None # Cache for the 'void' type + self.ctvoidp = None # Cache for the 'void *' type + self.ctchara = None # Cache for the 'char[]' type + self.primitives = {} # Cache for {name: primitive_type} + self.functions = [] # see _new_function_type() + self.for_testing = False def _clean_cache(space): "NOT_RPYTHON" @@ -165,20 +150,24 @@ # ____________________________________________________________ + at specialize.memo() +def _setup_wref(has_weakref_support): + assert has_weakref_support, "_cffi_backend requires weakrefs" + ctypeobj.W_CType._pointer_type = rweakref.dead_ref + ctypeptr.W_CTypePointer._array_types = None + @unwrap_spec(w_ctype=ctypeobj.W_CType) def new_pointer_type(space, w_ctype): return _new_pointer_type(space, w_ctype) @jit.elidable def _new_pointer_type(space, w_ctype): - unique_cache = space.fromcache(UniqueCache) - try: - return unique_cache.pointers[w_ctype] - except KeyError: - pass - ctypepointer = ctypeptr.W_CTypePointer(space, w_ctype) - unique_cache.pointers[w_ctype] = ctypepointer - return ctypepointer + _setup_wref(rweakref.has_weakref_support()) + ctptr = w_ctype._pointer_type() + if ctptr is None: + ctptr = ctypeptr.W_CTypePointer(space, w_ctype) + w_ctype._pointer_type = rweakref.ref(ctptr) + return ctptr # ____________________________________________________________ @@ -195,16 +184,19 @@ @jit.elidable def _new_array_type(space, w_ctptr, length): - unique_cache = space.fromcache(UniqueCache) - unique_key = (w_ctptr, length) - try: - return unique_cache.arrays[unique_key] - except KeyError: - pass - # + _setup_wref(rweakref.has_weakref_support()) if not isinstance(w_ctptr, ctypeptr.W_CTypePointer): raise OperationError(space.w_TypeError, space.wrap("first arg must be a pointer ctype")) + arrays = w_ctptr._array_types + if arrays is None: + arrays = rweakref.RWeakValueDictionary(int, ctypearray.W_CTypeArray) + w_ctptr._array_types = arrays + else: + ctype = arrays.get(length) + if ctype is not None: + return ctype + # ctitem = w_ctptr.ctitem if ctitem.size < 0: raise oefmt(space.w_ValueError, "array item of unknown size: '%s'", @@ -222,7 +214,7 @@ extra = '[%d]' % length # ctype = ctypearray.W_CTypeArray(space, w_ctptr, length, arraysize, extra) - unique_cache.arrays[unique_key] = ctype + arrays.set(length, ctype) return ctype # ____________________________________________________________ @@ -612,29 +604,69 @@ fargs.append(w_farg) return _new_function_type(space, fargs, w_fresult, bool(ellipsis)) +def _func_key_hash(unique_cache, fargs, fresult, ellipsis): + x = compute_identity_hash(fresult) + for w_arg in fargs: + y = compute_identity_hash(w_arg) + x = intmask((1000003 * x) ^ y) + x ^= ellipsis + if unique_cache.for_testing: # constant-folded to False in translation; + x &= 3 # but for test, keep only 2 bits of hash + return x + # can't use @jit.elidable here, because it might call back to random # space functions via force_lazy_struct() -def _new_function_type(space, fargs, w_fresult, ellipsis=False): +def _new_function_type(space, fargs, fresult, ellipsis=False): + try: + return _get_function_type(space, fargs, fresult, ellipsis) + except KeyError: + return _build_function_type(space, fargs, fresult, ellipsis) + + at jit.elidable +def _get_function_type(space, fargs, fresult, ellipsis): + # This function is elidable because if called again with exactly the + # same arguments (and if it didn't raise KeyError), it would give + # the same result, at least as long as this result is still live. + # + # 'unique_cache.functions' is a list of weak dicts, each mapping + # the func_hash number to a W_CTypeFunc. There is normally only + # one such dict, but in case of hash collision, there might be + # more. + unique_cache = space.fromcache(UniqueCache) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + for weakdict in unique_cache.functions: + ctype = weakdict.get(func_hash) + if (ctype is not None and + ctype.ctitem is fresult and + ctype.fargs == fargs and + ctype.ellipsis == ellipsis): + return ctype + raise KeyError + + at jit.dont_look_inside +def _build_function_type(space, fargs, fresult, ellipsis): from pypy.module._cffi_backend import ctypefunc # - unique_cache = space.fromcache(UniqueCache) - unique_key = (fargs, w_fresult, ellipsis) - try: - return unique_cache.functions[unique_key] - except KeyError: - pass - # - if ((w_fresult.size < 0 and - not isinstance(w_fresult, ctypevoid.W_CTypeVoid)) - or isinstance(w_fresult, ctypearray.W_CTypeArray)): - if (isinstance(w_fresult, ctypestruct.W_CTypeStructOrUnion) and - w_fresult.size < 0): + if ((fresult.size < 0 and + not isinstance(fresult, ctypevoid.W_CTypeVoid)) + or isinstance(fresult, ctypearray.W_CTypeArray)): + if (isinstance(fresult, ctypestruct.W_CTypeStructOrUnion) and + fresult.size < 0): raise oefmt(space.w_TypeError, - "result type '%s' is opaque", w_fresult.name) + "result type '%s' is opaque", fresult.name) else: raise oefmt(space.w_TypeError, - "invalid result type: '%s'", w_fresult.name) + "invalid result type: '%s'", fresult.name) # - fct = ctypefunc.W_CTypeFunc(space, fargs, w_fresult, ellipsis) - unique_cache.functions[unique_key] = fct + fct = ctypefunc.W_CTypeFunc(space, fargs, fresult, ellipsis) + unique_cache = space.fromcache(UniqueCache) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + for weakdict in unique_cache.functions: + if weakdict.get(func_hash) is None: + weakdict.set(func_hash, fct) + break + else: + weakdict = rweakref.RWeakValueDictionary(int, ctypefunc.W_CTypeFunc) + unique_cache.functions.append(weakdict) + weakdict.set(func_hash, fct) return fct diff --git a/pypy/module/_cffi_backend/test/test_c.py b/pypy/module/_cffi_backend/test/test_c.py --- a/pypy/module/_cffi_backend/test/test_c.py +++ b/pypy/module/_cffi_backend/test/test_c.py @@ -22,7 +22,7 @@ from rpython.tool.udir import udir from pypy.interpreter import gateway from pypy.module._cffi_backend import Module -from pypy.module._cffi_backend.newtype import _clean_cache +from pypy.module._cffi_backend.newtype import _clean_cache, UniqueCache from rpython.translator import cdir from rpython.translator.platform import host from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -86,8 +86,10 @@ _all_test_c.find_and_load_library = func _all_test_c._testfunc = testfunc """) + UniqueCache.for_testing = True def teardown_method(self, method): + UniqueCache.for_testing = False _clean_cache(self.space) diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -3,11 +3,13 @@ from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi + from pypy.module.micronumpy import descriptor, loop, support from pypy.module.micronumpy.base import (wrap_impl, W_NDimArray, convert_to_array, W_NumpyObject) from pypy.module.micronumpy.converters import shape_converter, order_converter import pypy.module.micronumpy.constants as NPY +from .casting import scalar2dtype def build_scalar(space, w_dtype, w_state): @@ -83,7 +85,6 @@ return w_res def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False): - from pypy.module.micronumpy import strides # for anything that isn't already an array, try __array__ method first if not isinstance(w_object, W_NDimArray): @@ -139,16 +140,11 @@ w_base=w_base, start=imp.start) else: # not an array - shape, elems_w = strides.find_shape_and_elems(space, w_object, dtype) + shape, elems_w = find_shape_and_elems(space, w_object, dtype) if dtype is None and space.isinstance_w(w_object, space.w_buffer): dtype = descriptor.get_dtype_cache(space).w_uint8dtype if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): dtype = find_dtype_for_seq(space, elems_w, dtype) - if dtype is None: - dtype = descriptor.get_dtype_cache(space).w_float64dtype - elif dtype.is_str_or_unicode() and dtype.elsize < 1: - # promote S0 -> S1, U0 -> U1 - dtype = descriptor.variable_dtype(space, dtype.char + '1') w_arr = W_NDimArray.from_shape(space, shape, dtype, order=npy_order) if support.product(shape) == 1: # safe from overflow since from_shape checks @@ -161,7 +157,6 @@ def numpify(space, w_object): """Convert the object to a W_NumpyObject""" # XXX: code duplication with _array() - from pypy.module.micronumpy import strides if isinstance(w_object, W_NumpyObject): return w_object # for anything that isn't already an array, try __array__ method first @@ -169,20 +164,82 @@ if w_array is not None: return w_array - shape, elems_w = strides.find_shape_and_elems(space, w_object, None) + if is_scalar_like(space, w_object, dtype=None): + dtype = scalar2dtype(space, w_object) + if dtype.is_str_or_unicode() and dtype.elsize < 1: + # promote S0 -> S1, U0 -> U1 + dtype = descriptor.variable_dtype(space, dtype.char + '1') + return dtype.coerce(space, w_object) + + shape, elems_w = _find_shape_and_elems(space, w_object) dtype = find_dtype_for_seq(space, elems_w, None) - if dtype is None: - dtype = descriptor.get_dtype_cache(space).w_float64dtype - elif dtype.is_str_or_unicode() and dtype.elsize < 1: - # promote S0 -> S1, U0 -> U1 - dtype = descriptor.variable_dtype(space, dtype.char + '1') + w_arr = W_NDimArray.from_shape(space, shape, dtype) + loop.assign(space, w_arr, elems_w) + return w_arr - if len(elems_w) == 1: - return dtype.coerce(space, elems_w[0]) + +def find_shape_and_elems(space, w_iterable, dtype): + if is_scalar_like(space, w_iterable, dtype): + return [], [w_iterable] + is_rec_type = dtype is not None and dtype.is_record() + return _find_shape_and_elems(space, w_iterable, is_rec_type) + +def is_scalar_like(space, w_obj, dtype): + isstr = space.isinstance_w(w_obj, space.w_str) + if not support.issequence_w(space, w_obj) or isstr: + if dtype is None or dtype.char != NPY.CHARLTR: + return True + is_rec_type = dtype is not None and dtype.is_record() + if is_rec_type and is_single_elem(space, w_obj, is_rec_type): + return True + if isinstance(w_obj, W_NDimArray) and w_obj.is_scalar(): + return True + return False + +def _find_shape_and_elems(space, w_iterable, is_rec_type=False): + from pypy.objspace.std.bufferobject import W_Buffer + shape = [space.len_w(w_iterable)] + if space.isinstance_w(w_iterable, space.w_buffer): + batch = [space.wrap(0)] * shape[0] + for i in range(shape[0]): + batch[i] = space.ord(space.getitem(w_iterable, space.wrap(i))) else: - w_arr = W_NDimArray.from_shape(space, shape, dtype) - loop.assign(space, w_arr, elems_w) - return w_arr + batch = space.listview(w_iterable) + while True: + if not batch: + return shape[:], [] + if is_single_elem(space, batch[0], is_rec_type): + for w_elem in batch: + if not is_single_elem(space, w_elem, is_rec_type): + raise OperationError(space.w_ValueError, space.wrap( + "setting an array element with a sequence")) + return shape[:], batch + new_batch = [] + size = space.len_w(batch[0]) + for w_elem in batch: + if (is_single_elem(space, w_elem, is_rec_type) or + space.len_w(w_elem) != size): + raise OperationError(space.w_ValueError, space.wrap( + "setting an array element with a sequence")) + w_array = space.lookup(w_elem, '__array__') + if w_array is not None: + # Make sure we call the array implementation of listview, + # since for some ndarray subclasses (matrix, for instance) + # listview does not reduce but rather returns the same class + w_elem = space.get_and_call_function(w_array, w_elem, space.w_None) + new_batch += space.listview(w_elem) + shape.append(size) + batch = new_batch + +def is_single_elem(space, w_elem, is_rec_type): + if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): + return True + if (space.isinstance_w(w_elem, space.w_tuple) or + space.isinstance_w(w_elem, space.w_list)): + return False + if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar(): + return False + return True def _dtype_guess(space, dtype, w_elem): from .casting import scalar2dtype, find_binop_result_dtype @@ -197,6 +254,11 @@ return _dtype_guess(space, dtype, w_elem) for w_elem in elems_w: dtype = _dtype_guess(space, dtype, w_elem) + if dtype is None: + dtype = descriptor.get_dtype_cache(space).w_float64dtype + elif dtype.is_str_or_unicode() and dtype.elsize < 1: + # promote S0 -> S1, U0 -> U1 + dtype = descriptor.variable_dtype(space, dtype.char + '1') return dtype diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -855,6 +855,8 @@ v = convert_to_array(space, w_v) ret = W_NDimArray.from_shape( space, v.get_shape(), get_dtype_cache(space).w_longdtype) + if ret.get_size() < 1: + return ret if side == NPY.SEARCHLEFT: binsearch = loop.binsearch_left else: @@ -1301,6 +1303,9 @@ [space.wrap(0)]), space.wrap("b")]) builder = StringBuilder() + if self.get_dtype().is_object(): + raise oefmt(space.w_NotImplementedError, + "reduce for 'object' dtype not supported yet") if isinstance(self.implementation, SliceArray): iter, state = self.implementation.create_iter() while not iter.done(state): diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -189,67 +189,6 @@ return rstrides, rbackstrides -def is_single_elem(space, w_elem, is_rec_type): - if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): - return True - if (space.isinstance_w(w_elem, space.w_tuple) or - space.isinstance_w(w_elem, space.w_list)): - return False - if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar(): - return False - return True - - -def find_shape_and_elems(space, w_iterable, dtype): - isstr = space.isinstance_w(w_iterable, space.w_str) - if not support.issequence_w(space, w_iterable) or isstr: - if dtype is None or dtype.char != NPY.CHARLTR: - return [], [w_iterable] - is_rec_type = dtype is not None and dtype.is_record() - if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): - return [], [w_iterable] - if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar(): - return [], [w_iterable] - return _find_shape_and_elems(space, w_iterable, is_rec_type) - - -def _find_shape_and_elems(space, w_iterable, is_rec_type): - from pypy.objspace.std.bufferobject import W_Buffer - shape = [space.len_w(w_iterable)] - if space.isinstance_w(w_iterable, space.w_buffer): - batch = [space.wrap(0)] * shape[0] - for i in range(shape[0]): - batch[i] = space.ord(space.getitem(w_iterable, space.wrap(i))) - else: - batch = space.listview(w_iterable) - while True: - if not batch: - return shape[:], [] - if is_single_elem(space, batch[0], is_rec_type): - for w_elem in batch: - if not is_single_elem(space, w_elem, is_rec_type): - raise OperationError(space.w_ValueError, space.wrap( - "setting an array element with a sequence")) - return shape[:], batch - new_batch = [] - size = space.len_w(batch[0]) - for w_elem in batch: - if (is_single_elem(space, w_elem, is_rec_type) or - space.len_w(w_elem) != size): - raise OperationError(space.w_ValueError, space.wrap( - "setting an array element with a sequence")) - w_array = space.lookup(w_elem, '__array__') - if w_array is not None: - # Make sure we call the array implementation of listview, - # since for some ndarray subclasses (matrix, for instance) - # listview does not reduce but rather returns the same class - w_elem = space.get_and_call_function(w_array, w_elem, space.w_None) - new_batch += space.listview(w_elem) - shape.append(size) - batch = new_batch - - - @jit.unroll_safe def shape_agreement(space, shape1, w_arr2, broadcast_down=True): if w_arr2 is None: diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -40,7 +40,10 @@ def product_check(s): i = 1 for x in s: - i = ovfcheck(i * x) + try: + i = ovfcheck(i * x) + except OverflowError: + raise return i def check_and_adjust_index(space, index, size, axis): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -170,7 +170,7 @@ [1, 1, 1, 105, 105] def test_find_shape(self): - from pypy.module.micronumpy.strides import find_shape_and_elems + from pypy.module.micronumpy.ctors import find_shape_and_elems space = self.space shape, elems = find_shape_and_elems(space, @@ -2478,6 +2478,18 @@ a.fill(12) assert (a == u'1').all() + def test_unicode_record_array(self) : + from numpy import dtype, array + t = dtype([('a', 'S3'), ('b', 'U2')]) + x = array([('a', u'b')], dtype=t) + assert str(x) == "[('a', u'b')]" + + t = dtype([('a', 'U3'), ('b', 'S2')]) + x = array([(u'a', 'b')], dtype=t) + x['a'] = u'1' + assert str(x) == "[(u'1', 'b')]" + + def test_boolean_indexing(self): import numpy as np a = np.zeros((1, 3)) @@ -2700,7 +2712,7 @@ "input array from shape (3,1) into shape (3)" a[:, 1] = b[:,0] > 0.5 assert (a == [[0, 1], [0, 1], [0, 1]]).all() - + def test_ufunc(self): from numpy import array @@ -3856,7 +3868,7 @@ assert a[0]['y'] == 2 assert a[1]['y'] == 1 - + a = array([(1, [])], dtype=[('a', int32), ('b', int32, 0)]) assert a['b'].shape == (1, 0) b = loads(dumps(a)) diff --git a/pypy/module/micronumpy/test/test_object_arrays.py b/pypy/module/micronumpy/test/test_object_arrays.py --- a/pypy/module/micronumpy/test/test_object_arrays.py +++ b/pypy/module/micronumpy/test/test_object_arrays.py @@ -3,6 +3,8 @@ class AppTestObjectDtypes(BaseNumpyAppTest): + spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) + def setup_class(cls): BaseNumpyAppTest.setup_class.im_func(cls) cls.w_runappdirect = cls.space.wrap(option.runappdirect) @@ -187,3 +189,21 @@ assert b.shape == (1,) assert b.dtype == np.float_ assert (b == 1.0).all() + + + def test__reduce__(self): + from numpy import arange, dtype + from cPickle import loads, dumps + import sys + + a = arange(15).astype(object) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, dumps, a) + skip('not implemented yet') + b = loads(dumps(a)) + assert (a == b).all() + + a = arange(15).astype(object).reshape((3, 5)) + b = loads(dumps(a)) + assert (a == b).all() + diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -480,3 +480,9 @@ u = unicode_(u'Aÿ') # raises(UnicodeEncodeError, "str(u)") # XXX assert repr(u) == repr(u'Aÿ') + + def test_binop_with_sequence(self): + import numpy as np + c = np.float64(1.) + [1.] + assert isinstance(c, np.ndarray) + assert (c == [2.]).all() diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -2231,9 +2231,9 @@ index = i + offset + 4*k data = rffi.cast(Int32.T, ord(box._value[k])) raw_storage_setitem_unaligned(storage, index, data) - for k in range(size, width // 4): - index = i + offset + 4*k - data = rffi.cast(Int32.T, 0) + # zero out the remaining memory + for index in range(size * 4 + i + offset, width): + data = rffi.cast(Int8.T, 0) raw_storage_setitem_unaligned(storage, index, data) def read(self, arr, i, offset, dtype): diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -479,6 +479,7 @@ dt_in, dt_out = self._calc_dtype(space, dtype, out, casting) return dt_in, dt_out, self.func + @jit.unroll_safe def _calc_dtype(self, space, arg_dtype, out=None, casting='unsafe'): if arg_dtype.is_object(): return arg_dtype, arg_dtype @@ -672,6 +673,7 @@ "requested type has type code '%s'", self.name, dtype.char) + @jit.unroll_safe def _calc_dtype(self, space, l_dtype, r_dtype, out, casting, w_arg1, w_arg2): if l_dtype.is_object() or r_dtype.is_object(): diff --git a/pypy/module/pypyjit/__init__.py b/pypy/module/pypyjit/__init__.py --- a/pypy/module/pypyjit/__init__.py +++ b/pypy/module/pypyjit/__init__.py @@ -15,8 +15,12 @@ 'set_compile_hook': 'interp_resop.set_compile_hook', 'set_abort_hook': 'interp_resop.set_abort_hook', 'get_stats_snapshot': 'interp_resop.get_stats_snapshot', - 'enable_debug': 'interp_resop.enable_debug', - 'disable_debug': 'interp_resop.disable_debug', + # those things are disabled because they have bugs, but if + # they're found to be useful, fix test_ztranslation_jit_stats + # in the backend first. get_stats_snapshot still produces + # correct loop_runs if PYPYLOG is correct + #'enable_debug': 'interp_resop.enable_debug', + #'disable_debug': 'interp_resop.disable_debug', 'ResOperation': 'interp_resop.WrappedOp', 'DebugMergePoint': 'interp_resop.DebugMergePoint', 'JitLoopInfo': 'interp_resop.W_JitLoopInfo', diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -315,11 +315,12 @@ """ ll_times = jit_hooks.stats_get_loop_run_times(None) w_times = space.newdict() - for i in range(len(ll_times)): - w_key = space.newtuple([space.wrap(ll_times[i].type), - space.wrap(ll_times[i].number)]) - space.setitem(w_times, w_key, - space.wrap(ll_times[i].counter)) + if ll_times: + for i in range(len(ll_times)): + w_key = space.newtuple([space.wrap(ll_times[i].type), + space.wrap(ll_times[i].number)]) + space.setitem(w_times, w_key, + space.wrap(ll_times[i].counter)) w_counters = space.newdict() for i, counter_name in enumerate(Counters.counter_names): v = jit_hooks.stats_get_counter_value(None, i) diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -213,22 +213,6 @@ self.on_abort() assert l == [('pypyjit', 'ABORT_TOO_LONG', [])] - def test_on_optimize(self): - import pypyjit - l = [] - - def hook(info): - l.append(info.jitdriver_name) - - def optimize_hook(info): - return [] - - pypyjit.set_compile_hook(hook) - pypyjit.set_optimize_hook(optimize_hook) - self.on_optimize() - self.on_compile() - assert l == ['pypyjit'] - def test_creation(self): from pypyjit import ResOperation diff --git a/pypy/module/pypyjit/test_pypy_c/test_alloc.py b/pypy/module/pypyjit/test_pypy_c/test_alloc.py --- a/pypy/module/pypyjit/test_pypy_c/test_alloc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_alloc.py @@ -7,10 +7,11 @@ [2 ** n - 1 for n in range(26)]) def test_newstr_constant_size(self): - for size in TestAlloc.SIZES: + for size in sorted(TestAlloc.SIZES): yield self.newstr_constant_size, size def newstr_constant_size(self, size): + print 'size =', size src = """if 1: N = %(size)d part_a = 'a' * N diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -28,7 +28,7 @@ def test_struct_unpack(self): def main(n): - import struct + import _struct as struct import array a = array.array('c', struct.pack('i', 42)) i = 0 diff --git a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py --- a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py +++ b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py @@ -76,6 +76,6 @@ assert len(mod_bridges) in (1, 2, 3) # check that counts are reasonable (precise # may change in the future) - assert N - 2000 < sum(l.count for l in fn_with_bridges_loops) < N + 1000 + assert N - 2000 < sum(l.count for l in fn_with_bridges_loops) < N + 1500 diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -248,3 +248,42 @@ guard_false(i157, descr=...) jump(..., descr=...) """) + + def test_mixed_div(self): + N = 1500 + def main(): + N = 1500 + import _numpypy.multiarray as np + arr = np.zeros(N) + l = [arr[i]/2. for i in range(N)] + return l + log = self.run(main, []) + assert log.result == [0.] * N + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i92 = int_ge(i91, i37) + guard_false(i92, descr=...) + i93 = int_add(i91, 1) + setfield_gc(p23, i93, descr=) + i94 = int_ge(i91, i56) + guard_false(i94, descr=...) + i96 = int_mul(i91, i58) + i97 = int_add(i51, i96) + f98 = raw_load_f(i63, i97, descr=) + guard_not_invalidated(descr=...) + f100 = float_mul(f98, 0.500000) + i101 = int_add(i79, 1) + i102 = arraylen_gc(p85, descr=) + i103 = int_lt(i102, i101) + cond_call(i103, ConstClass(_ll_list_resize_hint_really_look_inside_iff__listPtr_Signed_Bool), p76, i101, 1, descr=) + guard_no_exception(descr=...) + p104 = getfield_gc_r(p76, descr=) + p105 = new_with_vtable(descr=) + setfield_gc(p105, f100, descr=) + setarrayitem_gc(p104, i79, p105, descr=) + i106 = getfield_raw_i(#, descr=) + setfield_gc(p76, i101, descr=) + i107 = int_lt(i106, 0) + guard_false(i107, descr=...) + jump(..., descr=...) + """) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -2,7 +2,7 @@ import py import platform import sys, ctypes -from cffi import FFI, CDefError, FFIError +from cffi import FFI, CDefError, FFIError, VerificationMissing from pypy.module.test_lib_pypy.cffi_tests.support import * SIZE_OF_INT = ctypes.sizeof(ctypes.c_int) @@ -757,8 +757,8 @@ p = ffi.cast("long long", ffi.cast("wchar_t", -1)) if SIZE_OF_WCHAR == 2: # 2 bytes, unsigned assert int(p) == 0xffff - elif platform.machine() == 'aarch64': # 4 bytes, unsigned - assert int(p) == 0xffffffff + elif platform.machine().startswith(('arm', 'aarch64')): + assert int(p) == 0xffffffff # 4 bytes, unsigned else: # 4 bytes, signed assert int(p) == -1 p = ffi.cast("int", u+'\u1234') @@ -927,6 +927,14 @@ assert ffi.string(ffi.cast("enum foo", -16)) == "E" assert ffi.string(ffi.cast("enum foo", -8)) == "F" + def test_enum_partial(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef(r"enum foo {A, ...}; enum bar { B, C };") + lib = ffi.dlopen(None) + assert lib.B == 0 + py.test.raises(VerificationMissing, getattr, lib, "A") + assert lib.C == 1 + def test_array_of_struct(self): ffi = FFI(backend=self.Backend()) ffi.cdef("struct foo { int a, b; };") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py @@ -58,6 +58,11 @@ assert ptr_type.get_c_name("") == "int(const *)[5]" assert ptr_type.get_c_name("*x") == "int(const * *x)[5]" +def test_qual_pointer_type(): + ptr_type = PointerType(PrimitiveType("long long"), Q_RESTRICT) + assert ptr_type.get_c_name("") == "long long __restrict *" + assert const_voidp_type.get_c_name("") == "void const *" + def test_unknown_pointer_type(): ptr_type = unknown_ptr_type("foo_p") assert ptr_type.get_c_name("") == "foo_p" diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py @@ -308,7 +308,6 @@ ffi.cdef("void f(WPARAM);") def test__is_constant_globalvar(): - from cffi.cparser import Parser, _get_parser for input, expected_output in [ ("int a;", False), ("const int a;", True), @@ -325,11 +324,36 @@ ("int a[5][6];", False), ("const int a[5][6];", False), ]: - p = Parser() - ast = _get_parser().parse(input) - decl = ast.children()[0][1] - node = decl.type - assert p._is_constant_globalvar(node) == expected_output + ffi = FFI() + ffi.cdef(input) + declarations = ffi._parser._declarations + assert ('constant a' in declarations) == expected_output + assert ('variable a' in declarations) == (not expected_output) + +def test_restrict(): + from cffi import model + for input, expected_output in [ + ("int a;", False), + ("restrict int a;", True), + ("int *a;", False), + ]: + ffi = FFI() + ffi.cdef(input) + tp, quals = ffi._parser._declarations['variable a'] + assert bool(quals & model.Q_RESTRICT) == expected_output + +def test_different_const_funcptr_types(): + lst = [] + for input in [ + "int(*)(int *a)", + "int(*)(int const *a)", + "int(*)(int * const a)", + "int(*)(int const a[])"]: + ffi = FFI(backend=FakeBackend()) + lst.append(ffi._parser.parse_type(input)) + assert lst[0] != lst[1] + assert lst[0] == lst[2] + assert lst[1] == lst[3] def test_enum(): ffi = FFI() diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py @@ -209,6 +209,9 @@ # Check the particular results on Intel import platform if (platform.machine().startswith('i386') or + platform.machine().startswith('i486') or + platform.machine().startswith('i586') or + platform.machine().startswith('i686') or platform.machine().startswith('x86')): assert abs(more_precise - 0.656769) < 0.001 assert abs(less_precise - 3.99091) < 0.001 @@ -1636,11 +1639,11 @@ def test_FILE_stored_explicitly(): ffi = FFI() - ffi.cdef("int myprintf(const char *, int); FILE *myfile;") + ffi.cdef("int myprintf11(const char *, int); FILE *myfile;") lib = ffi.verify(""" #include FILE *myfile; - int myprintf(const char *out, int value) { + int myprintf11(const char *out, int value) { return fprintf(myfile, out, value); } """) @@ -1650,7 +1653,7 @@ lib.myfile = ffi.cast("FILE *", fw1) # fw1.write(b"X") - r = lib.myprintf(b"hello, %d!\n", ffi.cast("int", 42)) + r = lib.myprintf11(b"hello, %d!\n", ffi.cast("int", 42)) fw1.close() assert r == len("hello, 42!\n") # @@ -2248,3 +2251,13 @@ e = py.test.raises(VerificationError, ffi.verify, "") assert str(e.value) == ("feature not supported with ffi.verify(), but only " "with ffi.set_source(): 'typedef unsigned long... t1'") + +def test_const_fields(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a; void *const b; };""") + ffi.verify("""struct foo_s { const int a; void *const b; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int") + assert foo_s.fields[1][0] == 'b' + assert foo_s.fields[1][1].type is ffi.typeof("void *") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py @@ -30,6 +30,32 @@ assert ffi.typeof("int[][10]") is ffi.typeof("int[][10]") assert ffi.typeof("int(*)()") is ffi.typeof("int(*)()") +def test_ffi_type_not_immortal(): + import weakref, gc + ffi = _cffi1_backend.FFI() + t1 = ffi.typeof("int **") + t2 = ffi.typeof("int *") + w1 = weakref.ref(t1) + w2 = weakref.ref(t2) + del t1, ffi + gc.collect() + assert w1() is None + assert w2() is t2 + ffi = _cffi1_backend.FFI() + assert ffi.typeof(ffi.new("int **")[0]) is t2 + # + ffi = _cffi1_backend.FFI() + t1 = ffi.typeof("int ***") + t2 = ffi.typeof("int **") + w1 = weakref.ref(t1) + w2 = weakref.ref(t2) + del t2, ffi + gc.collect() + assert w1() is t1 + assert w2() is not None # kept alive by t1 + ffi = _cffi1_backend.FFI() + assert ffi.typeof("int * *") is t1.item + def test_ffi_cache_type_globally(): ffi1 = _cffi1_backend.FFI() ffi2 = _cffi1_backend.FFI() diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py @@ -782,8 +782,8 @@ p = ffi.cast("long long", ffi.cast("wchar_t", -1)) if SIZE_OF_WCHAR == 2: # 2 bytes, unsigned assert int(p) == 0xffff - elif platform.machine() == 'aarch64': # 4 bytes, unsigned - assert int(p) == 0xffffffff + elif platform.machine().startswith(('arm', 'aarch64')): + assert int(p) == 0xffffffff # 4 bytes, unsigned else: # 4 bytes, signed assert int(p) == -1 p = ffi.cast("int", u+'\u1234') diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py @@ -2,7 +2,7 @@ import sys import py from cffi import FFI -from cffi import recompiler, ffiplatform +from cffi import recompiler, ffiplatform, VerificationMissing from pypy.module.test_lib_pypy.cffi_tests.udir import udir @@ -204,3 +204,10 @@ "foobar", _version=0x2594) assert str(e.value).startswith( "cffi out-of-line Python module 'foobar' has unknown version") + +def test_partial_enum(): + ffi = FFI() + ffi.cdef("enum foo { A, B, ... };") + ffi.set_source('test_partial_enum', None) + py.test.raises(VerificationMissing, ffi.emit_python_code, + str(tmpdir.join('test_partial_enum.py'))) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1192,3 +1192,92 @@ py.test.raises(ffi.error, getattr, lib, 'my_value') e = py.test.raises(ffi.error, setattr, lib, 'my_value', 50) assert str(e.value) == "global variable 'my_value' is at address NULL" + +def test_const_fields(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a; void *const b; };""") + lib = verify(ffi, 'test_const_fields', """ + struct foo_s { const int a; void *const b; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int") + assert foo_s.fields[1][0] == 'b' + assert foo_s.fields[1][1].type is ffi.typeof("void *") + +def test_restrict_fields(): + if sys.platform == 'win32': + py.test.skip("'__restrict__' probably not recognized") + ffi = FFI() + ffi.cdef("""struct foo_s { void * restrict b; };""") + lib = verify(ffi, 'test_restrict_fields', """ + struct foo_s { void * __restrict__ b; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'b' + assert foo_s.fields[0][1].type is ffi.typeof("void *") + +def test_const_array_fields(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a[4]; };""") + lib = verify(ffi, 'test_const_array_fields', """ + struct foo_s { const int a[4]; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int[4]") + +def test_const_array_fields_varlength(): From noreply at buildbot.pypy.org Wed Oct 7 02:45:34 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 7 Oct 2015 02:45:34 +0200 (CEST) Subject: [pypy-commit] pypy callfamily: hg merge default Message-ID: <20151007004534.768171C0726@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: callfamily Changeset: r80005:44956214dd48 Date: 2015-09-25 18:57 +0100 http://bitbucket.org/pypy/pypy/changeset/44956214dd48/ Log: hg merge default diff too long, truncating to 2000 out of 56370 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -15,3 +15,4 @@ e03971291f3a0729ecd3ee7fae7ddb0bb82d476c release-2.6.0 e03971291f3a0729ecd3ee7fae7ddb0bb82d476c release-2.6.0 295ee98b69288471b0fcf2e0ede82ce5209eb90b release-2.6.0 +f3ad1e1e1d6215e20d34bb65ab85ff9188c9f559 release-2.6.1 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -168,7 +168,6 @@ Michael Twomey Lucian Branescu Mihaila Yichao Yu - Anton Gulenko Gabriel Lavoie Olivier Dormond Jared Grubb @@ -215,6 +214,7 @@ Carl Meyer Karl Ramm Pieter Zieschang + Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -247,6 +247,7 @@ Toni Mattis Lucas Stadler Julian Berman + Markus Holtermann roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez @@ -352,8 +353,7 @@ Except when otherwise stated (look for LICENSE files or copyright/license information at the beginning of each file) the files in the 'lib-python/2.7' directory are all copyrighted by the Python Software Foundation and licensed -under the Python Software License of which you can find a copy here: -http://www.python.org/doc/Copyright.html +under the terms that you can find here: https://docs.python.org/2/license.html License for 'pypy/module/unicodedata/' ====================================== @@ -430,9 +430,9 @@ gdbm module, provided in the file lib_pypy/gdbm.py, is redistributed under the terms of the GPL license as well. -License for 'pypy/module/_vmprof/src' +License for 'rpython/rlib/rvmprof/src' -------------------------------------- The code is based on gperftools. You may see a copy of the License for it at - https://code.google.com/p/gperftools/source/browse/COPYING + https://github.com/gperftools/gperftools/blob/master/COPYING diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -303,7 +303,7 @@ RegrTest('test_memoryio.py'), RegrTest('test_memoryview.py'), RegrTest('test_md5.py'), - RegrTest('test_mhlib.py'), + RegrTest('test_mhlib.py', usemodules='binascii struct'), RegrTest('test_mimetools.py'), RegrTest('test_mimetypes.py'), RegrTest('test_MimeWriter.py', core=False, usemodules='binascii'), diff --git a/lib_pypy/_curses.py b/lib_pypy/_curses.py --- a/lib_pypy/_curses.py +++ b/lib_pypy/_curses.py @@ -1026,16 +1026,22 @@ def tigetflag(capname): _ensure_initialised_setupterm() + if isinstance(capname, unicode): + capname = capname.encode('ascii') return lib.tigetflag(capname) def tigetnum(capname): _ensure_initialised_setupterm() + if isinstance(capname, unicode): + capname = capname.encode('ascii') return lib.tigetnum(capname) def tigetstr(capname): _ensure_initialised_setupterm() + if isinstance(capname, unicode): + capname = capname.encode('ascii') val = lib.tigetstr(capname) if int(ffi.cast("intptr_t", val)) in (0, -1): return None diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.2.0 +Version: 1.3.0 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.2.0" -__version_info__ = (1, 2, 0) +__version__ = "1.3.0" +__version_info__ = (1, 3, 0) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -46,7 +46,7 @@ # endif #else # include -# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux) # include # endif #endif @@ -214,6 +214,12 @@ (size) == 8 ? ((sign) ? _CFFI_PRIM_INT64 : _CFFI_PRIM_UINT64) : \ _CFFI__UNKNOWN_PRIM) +#define _cffi_prim_float(size) \ + ((size) == sizeof(float) ? _CFFI_PRIM_FLOAT : \ + (size) == sizeof(double) ? _CFFI_PRIM_DOUBLE : \ + (size) == sizeof(long double) ? _CFFI__UNKNOWN_LONG_DOUBLE : \ + _CFFI__UNKNOWN_FLOAT_PRIM) + #define _cffi_check_int(got, got_nonpos, expected) \ ((got_nonpos) == (expected <= 0) && \ (got) == (unsigned long long)expected) diff --git a/lib_pypy/cffi/cffi_opcode.py b/lib_pypy/cffi/cffi_opcode.py --- a/lib_pypy/cffi/cffi_opcode.py +++ b/lib_pypy/cffi/cffi_opcode.py @@ -106,7 +106,9 @@ PRIM_UINTMAX = 47 _NUM_PRIM = 48 -_UNKNOWN_PRIM = -1 +_UNKNOWN_PRIM = -1 +_UNKNOWN_FLOAT_PRIM = -2 +_UNKNOWN_LONG_DOUBLE = -3 PRIMITIVE_TO_INDEX = { 'char': PRIM_CHAR, diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -15,9 +15,11 @@ except ImportError: lock = None -_r_comment = re.compile(r"/\*.*?\*/|//.*?$", re.DOTALL | re.MULTILINE) -_r_define = re.compile(r"^\s*#\s*define\s+([A-Za-z_][A-Za-z_0-9]*)\s+(.*?)$", - re.MULTILINE) +_r_comment = re.compile(r"/\*.*?\*/|//([^\n\\]|\\.)*?$", + re.DOTALL | re.MULTILINE) +_r_define = re.compile(r"^\s*#\s*define\s+([A-Za-z_][A-Za-z_0-9]*)" + r"\b((?:[^\n\\]|\\.)*?)$", + re.DOTALL | re.MULTILINE) _r_partial_enum = re.compile(r"=\s*\.\.\.\s*[,}]|\.\.\.\s*\}") _r_enum_dotdotdot = re.compile(r"__dotdotdot\d+__$") _r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]") @@ -39,6 +41,7 @@ macros = {} for match in _r_define.finditer(csource): macroname, macrovalue = match.groups() + macrovalue = macrovalue.replace('\\\n', '').strip() macros[macroname] = macrovalue csource = _r_define.sub('', csource) # Replace "[...]" with "[__dotdotdotarray__]" @@ -423,13 +426,10 @@ raise api.CDefError( "%s: a function with only '(...)' as argument" " is not correct C" % (funcname or 'in expression')) - elif (len(params) == 1 and - isinstance(params[0].type, pycparser.c_ast.TypeDecl) and - isinstance(params[0].type.type, pycparser.c_ast.IdentifierType) - and list(params[0].type.type.names) == ['void']): - del params[0] args = [self._as_func_arg(self._get_type(argdeclnode.type)) for argdeclnode in params] + if not ellipsis and args == [model.void_type]: + args = [] result = self._get_type(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) @@ -648,10 +648,21 @@ assert typenames[-1] == '__dotdotdot__' if len(typenames) == 1: return model.unknown_type(decl.name) - for t in typenames[:-1]: - if t not in ['int', 'short', 'long', 'signed', 'unsigned', 'char']: - raise api.FFIError(':%d: bad usage of "..."' % decl.coord.line) + + if (typenames[:-1] == ['float'] or + typenames[:-1] == ['double']): + # not for 'long double' so far + result = model.UnknownFloatType(decl.name) + else: + for t in typenames[:-1]: + if t not in ['int', 'short', 'long', 'signed', + 'unsigned', 'char']: + raise api.FFIError(':%d: bad usage of "..."' % + decl.coord.line) + result = model.UnknownIntegerType(decl.name) + if self._uses_new_feature is None: self._uses_new_feature = "'typedef %s... %s'" % ( ' '.join(typenames[:-1]), decl.name) - return model.UnknownIntegerType(decl.name) + + return result diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -158,12 +158,23 @@ self.c_name_with_marker = name + '&' def is_integer_type(self): - return True # for now + return True def build_backend_type(self, ffi, finishlist): raise NotImplementedError("integer type '%s' can only be used after " "compilation" % self.name) +class UnknownFloatType(BasePrimitiveType): + _attrs_ = ('name', ) + + def __init__(self, name): + self.name = name + self.c_name_with_marker = name + '&' + + def build_backend_type(self, ffi, finishlist): + raise NotImplementedError("float type '%s' can only be used after " + "compilation" % self.name) + class BaseFunctionType(BaseType): _attrs_ = ('args', 'result', 'ellipsis') diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h --- a/lib_pypy/cffi/parse_c_type.h +++ b/lib_pypy/cffi/parse_c_type.h @@ -79,7 +79,9 @@ #define _CFFI_PRIM_UINTMAX 47 #define _CFFI__NUM_PRIM 48 -#define _CFFI__UNKNOWN_PRIM (-1) +#define _CFFI__UNKNOWN_PRIM (-1) +#define _CFFI__UNKNOWN_FLOAT_PRIM (-2) +#define _CFFI__UNKNOWN_LONG_DOUBLE (-3) struct _cffi_global_s { diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -4,11 +4,6 @@ VERSION = "0x2601" -try: - int_type = (int, long) -except NameError: # Python 3 - int_type = int - class GlobalExpr: def __init__(self, name, address, type_op, size=0, check_value=0): @@ -473,6 +468,10 @@ if tp.is_integer_type() and tp.name != '_Bool': converter = '_cffi_to_c_int' extraarg = ', %s' % tp.name + elif isinstance(tp, model.UnknownFloatType): + # don't check with is_float_type(): it may be a 'long + # double' here, and _cffi_to_c_double would loose precision + converter = '(%s)_cffi_to_c_double' % (tp.get_c_name(''),) else: converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''), tp.name.replace(' ', '_')) @@ -527,6 +526,8 @@ if isinstance(tp, model.BasePrimitiveType): if tp.is_integer_type(): return '_cffi_from_c_int(%s, %s)' % (var, tp.name) + elif isinstance(tp, model.UnknownFloatType): + return '_cffi_from_c_double(%s)' % (var,) elif tp.name != 'long double': return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var) else: @@ -1112,6 +1113,12 @@ ' ) <= 0)' % (tp.name, tp.name, tp.name)) self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s) + def _emit_bytecode_UnknownFloatType(self, tp, index): + s = ('_cffi_prim_float(sizeof(%s) *\n' + ' (((%s)1) / 2) * 2 /* integer => 0, float => 1 */\n' + ' )' % (tp.name, tp.name)) + self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s) + def _emit_bytecode_RawFunctionType(self, tp, index): self.cffi_types[index] = CffiOp(OP_FUNCTION, self._typesdict[tp.result]) index += 1 diff --git a/lib_pypy/cffi/setuptools_ext.py b/lib_pypy/cffi/setuptools_ext.py --- a/lib_pypy/cffi/setuptools_ext.py +++ b/lib_pypy/cffi/setuptools_ext.py @@ -81,10 +81,16 @@ allsources.extend(kwds.pop('sources', [])) ext = Extension(name=module_name, sources=allsources, **kwds) - def make_mod(tmpdir): + def make_mod(tmpdir, pre_run=None): c_file = os.path.join(tmpdir, module_name + source_extension) log.info("generating cffi module %r" % c_file) mkpath(tmpdir) + # a setuptools-only, API-only hook: called with the "ext" and "ffi" + # arguments just before we turn the ffi into C code. To use it, + # subclass the 'distutils.command.build_ext.build_ext' class and + # add a method 'def pre_run(self, ext, ffi)'. + if pre_run is not None: + pre_run(ext, ffi) updated = recompiler.make_c_source(ffi, module_name, source, c_file) if not updated: log.info("already up-to-date") @@ -98,7 +104,8 @@ class build_ext_make_mod(base_class): def run(self): if ext.sources[0] == '$PLACEHOLDER': - ext.sources[0] = make_mod(self.build_temp) + pre_run = getattr(self, 'pre_run', None) + ext.sources[0] = make_mod(self.build_temp, pre_run) base_class.run(self) dist.cmdclass['build_ext'] = build_ext_make_mod # NB. multiple runs here will create multiple 'build_ext_make_mod' diff --git a/lib_pypy/ctypes_support.py b/lib_pypy/ctypes_support.py --- a/lib_pypy/ctypes_support.py +++ b/lib_pypy/ctypes_support.py @@ -28,7 +28,7 @@ def _where_is_errno(): return standard_c_lib.__errno_location() -elif sys.platform in ('darwin', 'freebsd7', 'freebsd8', 'freebsd9'): +elif sys.platform == 'darwin' or sys.platform.startswith('freebsd'): standard_c_lib.__error.restype = ctypes.POINTER(ctypes.c_int) standard_c_lib.__error.argtypes = None def _where_is_errno(): diff --git a/lib_pypy/greenlet.egg-info b/lib_pypy/greenlet.egg-info --- a/lib_pypy/greenlet.egg-info +++ b/lib_pypy/greenlet.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: greenlet -Version: 0.4.7 +Version: 0.4.9 Summary: Lightweight in-process concurrent programming Home-page: https://github.com/python-greenlet/greenlet Author: Ralf Schmitt (for CPython), PyPy team diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -1,7 +1,7 @@ import sys import _continuation -__version__ = "0.4.7" +__version__ = "0.4.9" # ____________________________________________________________ # Exceptions diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -39,7 +39,8 @@ "_csv", "cppyy", "_pypyjson" ]) -if sys.platform.startswith('linux') and os.uname()[4] == 'x86_64': +if (sys.platform.startswith('linux') and os.uname()[4] == 'x86_64' + and sys.maxint > 2**32): # it's not enough that we get x86_64 working_modules.add('_vmprof') translation_modules = default_modules.copy() @@ -91,6 +92,8 @@ if sys.platform == "win32": module_suggests["cpyext"].append(("translation.shared", True)) + +# NOTE: this dictionary is not used any more module_import_dependencies = { # no _rawffi if importing rpython.rlib.clibffi raises ImportError # or CompilationError or py.test.skip.Exception @@ -107,6 +110,7 @@ } def get_module_validator(modname): + # NOTE: this function is not used any more if modname in module_import_dependencies: modlist = module_import_dependencies[modname] def validator(config): diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -67,7 +67,7 @@ # The short X.Y version. version = '2.6' # The full version, including alpha/beta/rc tags. -release = '2.6.0' +release = '2.6.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -32,6 +32,7 @@ Lukas Diekmann Sven Hager Anders Lehmann + Richard Plangger Aurelien Campeas Remi Meier Niklaus Haldimann @@ -57,7 +58,6 @@ Ludovic Aubry Jacob Hallen Jason Creighton - Richard Plangger Alex Martelli Michal Bendowski stian @@ -138,7 +138,6 @@ Michael Twomey Lucian Branescu Mihaila Yichao Yu - Anton Gulenko Gabriel Lavoie Olivier Dormond Jared Grubb @@ -185,6 +184,7 @@ Carl Meyer Karl Ramm Pieter Zieschang + Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -217,6 +217,7 @@ Toni Mattis Lucas Stadler Julian Berman + Markus Holtermann roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez @@ -252,6 +253,7 @@ shoma hosaka Daniel Neuhäuser Ben Mather + Niclas Olofsson halgari Boglarka Vezer Chris Pressey diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -20,10 +20,6 @@ It initializes the RPython/PyPy GC and does a bunch of necessary startup code. This function cannot fail. -.. function:: void pypy_init_threads(void); - - Initialize threads. Only need to be called if there are any threads involved - .. function:: int pypy_setup_home(char* home, int verbose); This function searches the PyPy standard library starting from the given @@ -38,6 +34,11 @@ Function returns 0 on success or -1 on failure, can be called multiple times until the library is found. +.. function:: void pypy_init_threads(void); + + Initialize threads. Only need to be called if there are any threads involved. + *Must be called after pypy_setup_home()* + .. function:: int pypy_execute_source(char* source); Execute the Python source code given in the ``source`` argument. In case of diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -5,8 +5,8 @@ with any external library. Right now, there are the following possibilities of providing -third-party modules for the PyPy python interpreter (in order of -usefulness): +third-party modules for the PyPy python interpreter (in order, from most +directly useful to most messy to use with PyPy): * Write them in pure Python and use CFFI_. diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -67,7 +67,7 @@ The other commands of ``setup.py`` are available too, like ``build``. .. _PyPI: https://pypi.python.org/pypi -.. _`use virtualenv (as documented here)`: getting-started.html#installing-using-virtualenv +.. _`use virtualenv (as documented here)`: install.html#installing-using-virtualenv Module xyz does not work in the sandboxed PyPy? diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -31,15 +31,14 @@ and add the new file to pypy/doc/index-of-whatsnew.rst * go to pypy/tool/release and run ``force-builds.py `` - The following binaries should be built, however, we need more buildbots - - JIT: windows, linux, os/x, armhf, armel - - no JIT: windows, linux, os/x - - sandbox: linux, os/x + The following JIT binaries should be built, however, we need more buildbots + windows, linux-32, linux-64, osx64, armhf-raring, armhf-raspberrian, armel, + freebsd64 * wait for builds to complete, make sure there are no failures * download the builds, repackage binaries. Tag the release version and download and repackage source from bitbucket. You may find it - convenient to use the ``repackage.sh`` script in pypy/tools to do this. + convenient to use the ``repackage.sh`` script in pypy/tool/release to do this. Otherwise repackage and upload source "-src.tar.bz2" to bitbucket and to cobra, as some packagers prefer a clearly labeled source package diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-2.6.1.rst release-2.6.0.rst release-2.5.1.rst release-2.5.0.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-2.6.1.rst whatsnew-2.6.0.rst whatsnew-2.5.1.rst whatsnew-2.5.0.rst diff --git a/pypy/doc/jit-hooks.rst b/pypy/doc/jit-hooks.rst --- a/pypy/doc/jit-hooks.rst +++ b/pypy/doc/jit-hooks.rst @@ -5,19 +5,8 @@ understanding what's pypy's JIT doing while running your program. There are three functions related to that coming from the ``pypyjit`` module: -.. function:: set_optimize_hook(callable) - Set a compiling hook that will be called each time a loop is optimized, - but before assembler compilation. This allows adding additional - optimizations on Python level. - - The callable will be called with the ``pypyjit.JitLoopInfo`` object. - Refer to it's documentation for details. - - Result value will be the resulting list of operations, or None - - -.. function:: set_compile_hook(callable) +.. function:: set_compile_hook(callable, operations=True) Set a compiling hook that will be called each time a loop is compiled. @@ -28,6 +17,9 @@ inside the jit hook is itself jitted, it will get compiled, but the jit hook won't be called for that. + if operations=False, no list of operations will be available. Useful + if the hook is supposed to be very lighweight. + .. function:: set_abort_hook(hook) Set a hook (callable) that will be called each time there is tracing @@ -66,3 +58,25 @@ * ``loop_run_times`` - counters for number of times loops are run, only works when ``enable_debug`` is called. + +.. class:: JitLoopInfo + + A class containing information about the compiled loop. Usable attributes: + + * ``operations`` - list of operations, if requested + + * ``jitdriver_name`` - the name of jitdriver associated with this loop + + * ``greenkey`` - a key at which the loop got compiled (e.g. code position, + is_being_profiled, pycode tuple for python jitdriver) + + * ``loop_no`` - loop cardinal number + + * ``bridge_no`` - id of the fail descr + + * ``type`` - "entry bridge", "loop" or "bridge" + + * ``asmaddr`` - an address in raw memory where assembler resides + + * ``asmlen`` - length of raw memory with assembler associated + diff --git a/pypy/doc/release-2.6.1.rst b/pypy/doc/release-2.6.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-2.6.1.rst @@ -0,0 +1,129 @@ +========== +PyPy 2.6.1 +========== + +We're pleased to announce PyPy 2.6.1, an update to PyPy 2.6.0 released June 1. +We have updated stdlib to 2.7.10, `cffi`_ to version 1.3, extended support for +the new vmprof_ statistical profiler for multiple threads, and increased +functionality of numpy. + +You can download the PyPy 2.6.1 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project, and our volunteers and contributors. + +.. _`cffi`: https://cffi.readthedocs.org + +We would also like to encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ with making +RPython's JIT even better. + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD_, freebsd_), +as well as newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _OpenBSD: http://cvsweb.openbsd.org/cgi-bin/cvsweb/ports/lang/pypy +.. _freebsd: https://svnweb.freebsd.org/ports/head/lang/pypy/ +.. _`dynamic languages`: http://pypyjs.org + +Highlights +=========== + +* Bug Fixes + + * Revive non-SSE2 support + + * Fixes for detaching _io.Buffer* + + * On Windows, close (and flush) all open sockets on exiting + + * Drop support for ancient macOS v10.4 and before + + * Clear up contention in the garbage collector between trace-me-later and pinning + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy. + +* New features: + + * cffi was updated to version 1.3 + + * The python stdlib was updated to 2.7.10 from 2.7.9 + + * vmprof now supports multiple threads and OS X + + * The translation process builds cffi import libraries for some stdlib + packages, which should prevent confusion when package.py is not used + + * better support for gdb debugging + + * freebsd should be able to translate PyPy "out of the box" with no patches + +* Numpy: + + * Better support for record dtypes, including the ``align`` keyword + + * Implement casting and create output arrays accordingly (still missing some corner cases) + + * Support creation of unicode ndarrays + + * Better support ndarray.flags + + * Support ``axis`` argument in more functions + + * Refactor array indexing to support ellipses + + * Allow the docstrings of built-in numpy objects to be set at run-time + + * Support the ``buffered`` nditer creation keyword + +* Performance improvements: + + * Delay recursive calls to make them non-recursive + + * Skip loop unrolling if it compiles too much code + + * Tweak the heapcache + + * Add a list strategy for lists that store both floats and 32-bit integers. + The latter are encoded as nonstandard NaNs. Benchmarks show that the speed + of such lists is now very close to the speed of purely-int or purely-float + lists. + + * Simplify implementation of ffi.gc() to avoid most weakrefs + + * Massively improve the performance of map() with more than + one sequence argument + +.. _`vmprof`: https://vmprof.readthedocs.org +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-2.6.1.html + +Please try it out and let us know what you think. We welcome +success stories, `experiments`_, or `benchmarks`_, we know you are using PyPy, please tell us about it! + +Cheers + +The PyPy Team + +.. _`experiments`: https://morepypy.blogspot.com/2015/02/experiments-in-pyrlang-with-rpython.html +.. _`benchmarks`: https://mithrandi.net/blog/2015/03/axiom-benchmark-results-on-pypy-2-5-0 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-2.6.1.rst copy from pypy/doc/whatsnew-head.rst copy to pypy/doc/whatsnew-2.6.1.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-2.6.1.rst @@ -1,6 +1,6 @@ -======================= -What's new in PyPy 2.6+ -======================= +======================== +What's new in PyPy 2.6.1 +======================== .. this is a revision shortly after release-2.6.0 .. startrev: 91904d5c5188 @@ -32,7 +32,10 @@ ``lst[0]`` is still *not* the float ``42.0`` but the integer ``42``.) .. branch: cffi-callback-onerror +Part of cffi 1.2. + .. branch: cffi-new-allocator +Part of cffi 1.2. .. branch: unicode-dtype @@ -67,3 +70,7 @@ .. branch: vmprof-review Clean up of vmprof, notably to handle correctly multiple threads + +.. branch: no_boehm_dl + +Remove extra link library from Boehm GC diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -2,68 +2,40 @@ What's new in PyPy 2.6+ ======================= -.. this is a revision shortly after release-2.6.0 -.. startrev: 91904d5c5188 +.. this is a revision shortly after release-2.6.1 +.. startrev: 07769be4057b -.. branch: use_min_scalar -Correctly resolve the output dtype of ufunc(array, scalar) calls. +.. branch: keys_with_hash +Improve the performance of dict.update() and a bunch of methods from +sets, by reusing the hash value stored in one dict when inspecting +or changing another dict with that key. -.. branch: stdlib-2.7.10 +.. branch: optresult-unroll +A major refactoring of the ResOperations that kills Box. Also rewrote +unrolling to enable future enhancements. Should improve warmup time +by 20% or so. -Update stdlib to version 2.7.10 +.. branch: optimize-cond-call +Optimize common sequences of operations like +``int_lt/cond_call`` in the JIT backends -.. branch: issue2062 +.. branch: missing_openssl_include +Fix for missing headers in OpenBSD, already applied in downstream ports -.. branch: disable-unroll-for-short-loops -The JIT no longer performs loop unrolling if the loop compiles to too much code. +.. branch: gc-more-incremental +Remove a source of non-incremental-ness in the GC: now +external_malloc() no longer runs gc_step_until() any more. If there +is a currently-running major collection, we do only so many steps +before returning. This number of steps depends on the size of the +allocated object. It is controlled by tracking the general progress +of these major collection steps and the size of old objects that +keep adding up between them. -.. branch: run-create_cffi_imports +.. branch: remember-tracing-counts +Reenable jithooks -Build cffi import libraries as part of translation by monkey-patching an -additional task into translation +.. branch: detect_egd2 -.. branch: int-float-list-strategy - -Use a compact strategy for Python lists that mix integers and floats, -at least if the integers fit inside 32 bits. These lists are now -stored as an array of floats, like lists that contain only floats; the -difference is that integers are stored as tagged NaNs. (This should -have no visible effect! After ``lst = [42, 42.5]``, the value of -``lst[0]`` is still *not* the float ``42.0`` but the integer ``42``.) - -.. branch: cffi-callback-onerror -.. branch: cffi-new-allocator - -.. branch: unicode-dtype - -Partial implementation of unicode dtype and unicode scalars. - -.. branch: dtypes-compatability - -Improve compatibility with numpy dtypes; handle offsets to create unions, -fix str() and repr(), allow specifying itemsize, metadata and titles, add flags, -allow subclassing dtype - -.. branch: indexing - -Refactor array indexing to support ellipses. - -.. branch: numpy-docstrings - -Allow the docstrings of built-in numpy objects to be set at run-time. - -.. branch: nditer-revisited - -Implement nditer 'buffered' flag and fix some edge cases - -.. branch: ufunc-reduce - -Allow multiple axes in ufunc.reduce() - -.. branch: fix-tinylang-goals - -Update tinylang goals to match current rpython - -.. branch: vmprof-review - -Clean up of vmprof, notably to handle correctly multiple threads +.. branch: shadowstack-no-move-2 +Issue #2141: fix a crash on Windows and OS/X and ARM when running +at least 20 threads. diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py --- a/pypy/interpreter/miscutils.py +++ b/pypy/interpreter/miscutils.py @@ -9,6 +9,7 @@ implementation for this feature, and patches 'space.threadlocals' when 'thread' is initialized. """ + _immutable_fields_ = ['_value?'] _value = None def get_ec(self): diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -111,7 +111,6 @@ 'interp_magic.mapdict_cache_counter') PYC_MAGIC = get_pyc_magic(self.space) self.extra_interpdef('PYC_MAGIC', 'space.wrap(%d)' % PYC_MAGIC) - # try: from rpython.jit.backend import detect_cpu model = detect_cpu.autodetect() @@ -121,7 +120,7 @@ raise else: pass # ok fine to ignore in this case - # + if self.space.config.translation.jit: features = detect_cpu.getcpufeatures(model) self.extra_interpdef('jit_backend_features', diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -2,7 +2,7 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rlib import rdynload -VERSION = "1.2.0" +VERSION = "1.3.0" class Module(MixedModule): diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -19,13 +19,27 @@ # ____________________________________________________________ +class Closure(object): + """This small class is here to have a __del__ outside any cycle.""" + + ll_error = lltype.nullptr(rffi.CCHARP.TO) # set manually + + def __init__(self, ptr): + self.ptr = ptr + + def __del__(self): + clibffi.closureHeap.free(rffi.cast(clibffi.FFI_CLOSUREP, self.ptr)) + if self.ll_error: + lltype.free(self.ll_error, flavor='raw') + + class W_CDataCallback(W_CData): #_immutable_fields_ = ... - ll_error = lltype.nullptr(rffi.CCHARP.TO) w_onerror = None def __init__(self, space, ctype, w_callable, w_error, w_onerror): raw_closure = rffi.cast(rffi.CCHARP, clibffi.closureHeap.alloc()) + self._closure = Closure(raw_closure) W_CData.__init__(self, space, raw_closure, ctype) # if not space.is_true(space.callable(w_callable)): @@ -44,10 +58,11 @@ if size > 0: if fresult.is_primitive_integer and size < SIZE_OF_FFI_ARG: size = SIZE_OF_FFI_ARG - self.ll_error = lltype.malloc(rffi.CCHARP.TO, size, flavor='raw', - zero=True) + self._closure.ll_error = lltype.malloc(rffi.CCHARP.TO, size, + flavor='raw', zero=True) if not space.is_none(w_error): - convert_from_object_fficallback(fresult, self.ll_error, w_error) + convert_from_object_fficallback(fresult, self._closure.ll_error, + w_error) # self.unique_id = compute_unique_id(self) global_callback_mapping.set(self.unique_id, self) @@ -74,12 +89,6 @@ from pypy.module.thread.os_thread import setup_threads setup_threads(space) - #@rgc.must_be_light_finalizer - def __del__(self): - clibffi.closureHeap.free(rffi.cast(clibffi.FFI_CLOSUREP, self._ptr)) - if self.ll_error: - lltype.free(self.ll_error, flavor='raw') - def _repr_extra(self): space = self.space return 'calling ' + space.str_w(space.repr(self.w_callable)) @@ -114,8 +123,8 @@ def write_error_return_value(self, ll_res): fresult = self.getfunctype().ctitem if fresult.size > 0: - misc._raw_memcopy(self.ll_error, ll_res, fresult.size) - keepalive_until_here(self) # to keep self.ll_error alive + misc._raw_memcopy(self._closure.ll_error, ll_res, fresult.size) + keepalive_until_here(self) # to keep self._closure.ll_error alive global_callback_mapping = rweakref.RWeakValueDictionary(int, W_CDataCallback) diff --git a/pypy/module/_cffi_backend/cffi_opcode.py b/pypy/module/_cffi_backend/cffi_opcode.py --- a/pypy/module/_cffi_backend/cffi_opcode.py +++ b/pypy/module/_cffi_backend/cffi_opcode.py @@ -9,16 +9,16 @@ assert isinstance(self.arg, str) return '(_cffi_opcode_t)(%s)' % (self.arg,) classname = CLASS_NAME[self.op] - return '_CFFI_OP(_CFFI_OP_%s, %d)' % (classname, self.arg) + return '_CFFI_OP(_CFFI_OP_%s, %s)' % (classname, self.arg) def as_python_bytes(self): - if self.op is None: - if self.arg.isdigit(): - value = int(self.arg) # non-negative: '-' not in self.arg - if value >= 2**31: - raise OverflowError("cannot emit %r: limited to 2**31-1" - % (self.arg,)) - return format_four_bytes(value) + if self.op is None and self.arg.isdigit(): + value = int(self.arg) # non-negative: '-' not in self.arg + if value >= 2**31: + raise OverflowError("cannot emit %r: limited to 2**31-1" + % (self.arg,)) + return format_four_bytes(value) + if isinstance(self.arg, str): from .ffiplatform import VerificationError raise VerificationError("cannot emit to Python: %r" % (self.arg,)) return format_four_bytes((self.arg << 8) | self.op) @@ -106,7 +106,9 @@ PRIM_UINTMAX = 47 _NUM_PRIM = 48 -_UNKNOWN_PRIM = -1 +_UNKNOWN_PRIM = -1 +_UNKNOWN_FLOAT_PRIM = -2 +_UNKNOWN_LONG_DOUBLE = -3 PRIMITIVE_TO_INDEX = { 'char': PRIM_CHAR, diff --git a/pypy/module/_cffi_backend/realize_c_type.py b/pypy/module/_cffi_backend/realize_c_type.py --- a/pypy/module/_cffi_backend/realize_c_type.py +++ b/pypy/module/_cffi_backend/realize_c_type.py @@ -81,6 +81,13 @@ if num == cffi_opcode._UNKNOWN_PRIM: raise oefmt(ffi.w_FFIError, "primitive integer type with an " "unexpected size (or not an integer type at all)") + elif num == cffi_opcode._UNKNOWN_FLOAT_PRIM: + raise oefmt(ffi.w_FFIError, "primitive floating-point type with an " + "unexpected size (or not a float type at all)") + elif num == cffi_opcode._UNKNOWN_LONG_DOUBLE: + raise oefmt(ffi.w_FFIError, "primitive floating-point type is " + "'long double', not supported for now with " + "the syntax 'typedef double... xxx;'") else: raise oefmt(space.w_NotImplementedError, "prim=%d", num) realize_cache = space.fromcache(RealizeCache) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,6 +1,9 @@ # ____________________________________________________________ import sys +assert __version__ == "1.3.0", ("This test_c.py file is for testing a version" + " of cffi that differs from the one that we" + " get from 'import _cffi_backend'") if sys.version_info < (3,): type_or_class = "type" mandatory_b_prefix = '' @@ -3424,7 +3427,3 @@ "be 'foo *', but the types are different (check " "that you are not e.g. mixing up different ffi " "instances)") - -def test_version(): - # this test is here mostly for PyPy - assert __version__ == "1.2.0" diff --git a/pypy/module/_multiprocessing/test/test_memory.py b/pypy/module/_multiprocessing/test/test_memory.py --- a/pypy/module/_multiprocessing/test/test_memory.py +++ b/pypy/module/_multiprocessing/test/test_memory.py @@ -1,8 +1,12 @@ +import sys + class AppTestMemory: spaceconfig = dict(usemodules=('_multiprocessing', 'mmap', '_rawffi', 'itertools', - 'signal', 'select', 'fcntl', + 'signal', 'select', 'binascii')) + if sys.platform != 'win32': + spaceconfig['usemodules'] += ('fcntl',) def test_address_of(self): import _multiprocessing diff --git a/pypy/module/_multiprocessing/test/test_semaphore.py b/pypy/module/_multiprocessing/test/test_semaphore.py --- a/pypy/module/_multiprocessing/test/test_semaphore.py +++ b/pypy/module/_multiprocessing/test/test_semaphore.py @@ -1,12 +1,19 @@ +import sys + from pypy.module._multiprocessing.interp_semaphore import ( RECURSIVE_MUTEX, SEMAPHORE) class AppTestSemaphore: spaceconfig = dict(usemodules=('_multiprocessing', 'thread', - 'signal', 'select', 'fcntl', + 'signal', 'select', 'binascii', 'struct')) + if sys.platform == 'win32': + spaceconfig['usemodules'] += ('_rawffi',) + else: + spaceconfig['usemodules'] += ('fcntl',) + def setup_class(cls): cls.w_SEMAPHORE = cls.space.wrap(SEMAPHORE) cls.w_RECURSIVE = cls.space.wrap(RECURSIVE_MUTEX) diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -241,20 +241,26 @@ res = libssl_RAND_status() return space.wrap(res) - @unwrap_spec(path=str) - def RAND_egd(space, path): - """RAND_egd(path) -> bytes + if HAVE_OPENSSL_RAND_EGD: + @unwrap_spec(path=str) + def RAND_egd(space, path): + """RAND_egd(path) -> bytes - Queries the entropy gather daemon (EGD) on socket path. Returns number - of bytes read. Raises socket.sslerror if connection to EGD fails or - if it does provide enough data to seed PRNG.""" - with rffi.scoped_str2charp(path) as socket_path: - bytes = libssl_RAND_egd(socket_path) - if bytes == -1: - raise ssl_error(space, - "EGD connection failed or EGD did not return " - "enough data to seed the PRNG") - return space.wrap(bytes) + Queries the entropy gather daemon (EGD) on socket path. Returns number + of bytes read. Raises socket.sslerror if connection to EGD fails or + if it does provide enough data to seed PRNG.""" + with rffi.scoped_str2charp(path) as socket_path: + bytes = libssl_RAND_egd(socket_path) + if bytes == -1: + raise ssl_error(space, + "EGD connection failed or EGD did not return " + "enough data to seed the PRNG") + return space.wrap(bytes) + else: + # Dummy func for platforms missing RAND_egd(). Most likely LibreSSL. + @unwrap_spec(path=str) + def RAND_egd(space, path): + raise ssl_error(space, "RAND_egd unavailable") class _SSLSocket(W_Root): diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py --- a/pypy/module/_ssl/test/test_ssl.py +++ b/pypy/module/_ssl/test/test_ssl.py @@ -36,7 +36,8 @@ assert isinstance(_ssl.OPENSSL_VERSION_INFO, tuple) assert len(_ssl.OPENSSL_VERSION_INFO) == 5 assert isinstance(_ssl.OPENSSL_VERSION, str) - assert 'openssl' in _ssl.OPENSSL_VERSION.lower() + lower_version = _ssl.OPENSSL_VERSION.lower() + assert 'openssl' in lower_version or "libressl" in lower_version assert isinstance(_ssl.ALERT_DESCRIPTION_ACCESS_DENIED, int) @@ -69,8 +70,9 @@ def test_sslwrap(self): import _ssl, _socket, sys, gc - if sys.platform == 'darwin' or 'freebsd' in sys.platform: - skip("hangs indefinitely on OSX & FreeBSD (also on CPython)") + if sys.platform == 'darwin' or 'freebsd' in sys.platform or \ + 'openbsd' in sys.platform: + skip("hangs indefinitely on OSX & BSD (also on CPython)") s = _socket.socket() if sys.version_info < (2, 7, 9): ss = _ssl.sslwrap(s, 0) diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -21,11 +21,12 @@ i = 0 count = 0 i += 5 * WORD # header - assert s[i] == '\x04' - i += 1 # marker - assert s[i] == '\x04' - i += 1 # length - i += len('pypy') + assert s[i ] == '\x05' # MARKER_HEADER + assert s[i + 1] == '\x00' # 0 + assert s[i + 2] == '\x01' # VERSION_THREAD_ID + assert s[i + 3] == chr(4) # len('pypy') + assert s[i + 4: i + 8] == 'pypy' + i += 8 while i < len(s): if s[i] == '\x03': break diff --git a/pypy/module/_vmprof/test/test_direct.py b/pypy/module/_vmprof/test/test_direct.py --- a/pypy/module/_vmprof/test/test_direct.py +++ b/pypy/module/_vmprof/test/test_direct.py @@ -42,7 +42,7 @@ } -""" + open(str(srcdir.join("rvmprof_get_custom_offset.h"))).read()) +""" + open(str(srcdir.join("vmprof_get_custom_offset.h"))).read()) class TestDirect(object): def test_infrastructure(self): diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -379,6 +379,8 @@ PyObject *ht_name, *ht_slots; } PyHeapTypeObject; +#define PyObject_Bytes PyObject_Str + /* Flag bits for printing: */ #define Py_PRINT_RAW 1 /* No string quotes etc. */ diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -2,6 +2,7 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from rpython.rlib import jit class W_Count(W_Root): @@ -322,6 +323,11 @@ """) +islice_ignore_items_driver = jit.JitDriver(name='islice_ignore_items', + greens=['tp'], + reds=['num', 'w_islice', + 'w_iterator']) + class W_ISlice(W_Root): def __init__(self, space, w_iterable, w_startstop, args_w): self.iterable = space.iter(w_iterable) @@ -407,11 +413,18 @@ raise def _ignore_items(self, num): - if self.iterable is None: + w_iterator = self.iterable + if w_iterator is None: raise OperationError(self.space.w_StopIteration, self.space.w_None) + + tp = self.space.type(w_iterator) while True: + islice_ignore_items_driver.jit_merge_point(tp=tp, + num=num, + w_islice=self, + w_iterator=w_iterator) try: - self.space.next(self.iterable) + self.space.next(w_iterator) except OperationError as e: if e.match(self.space, self.space.w_StopIteration): self.iterable = None diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py --- a/pypy/module/itertools/test/test_itertools.py +++ b/pypy/module/itertools/test/test_itertools.py @@ -1085,3 +1085,18 @@ assert list(itertools.islice(c2, 3)) == expected c3 = pickle.loads(pickle.dumps(c)) assert list(itertools.islice(c3, 3)) == expected + + def test_islice_attack(self): + import itertools + class Iterator(object): + first = True + def __iter__(self): + return self + def next(self): + if self.first: + self.first = False + list(islice) + return 52 + myiter = Iterator() + islice = itertools.islice(myiter, 5, 8) + raises(StopIteration, islice.next) diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -9,6 +9,7 @@ 'ndarray': 'ndarray.W_NDimArray', 'dtype': 'descriptor.W_Dtype', 'flatiter': 'flatiter.W_FlatIterator', + 'flagsobj': 'flagsobj.W_FlagsObject', '_reconstruct' : 'ndarray._reconstruct', 'scalar' : 'ctors.build_scalar', diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -1,6 +1,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from rpython.tool.pairtype import extendabletype +from rpython.rlib.rarithmetic import ovfcheck from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY @@ -44,9 +45,9 @@ raise oefmt(space.w_ValueError, "sequence too large; must be smaller than %d", NPY.MAXDIMS) try: - support.product(shape) * dtype.elsize + ovfcheck(support.product_check(shape) * dtype.elsize) except OverflowError as e: - raise oefmt(space.w_ValueError, "array is too big") + raise oefmt(space.w_ValueError, "array is too big.") strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, backstrides, zero=zero) @@ -68,9 +69,9 @@ raise oefmt(space.w_ValueError, "sequence too large; must be smaller than %d", NPY.MAXDIMS) try: - totalsize = support.product(shape) * isize + totalsize = ovfcheck(support.product_check(shape) * isize) except OverflowError as e: - raise oefmt(space.w_ValueError, "array is too big") + raise oefmt(space.w_ValueError, "array is too big.") if storage_bytes > 0 : if totalsize > storage_bytes: raise OperationError(space.w_TypeError, space.wrap( diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -147,7 +147,7 @@ def get_flags(self): return (NPY.ARRAY_C_CONTIGUOUS | NPY.ARRAY_F_CONTIGUOUS | - NPY.ARRAY_WRITEABLE | NPY.ARRAY_OWNDATA) + NPY.ARRAY_ALIGNED | NPY.ARRAY_OWNDATA) def item(self, space): return self.get_dtype(space).itemtype.to_builtin_type(space, self) diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError, oefmt from rpython.rlib import jit, rgc +from rpython.rlib.rarithmetic import ovfcheck from rpython.rlib.buffer import Buffer from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage, \ @@ -409,6 +410,7 @@ make_sure_not_resized(strides) make_sure_not_resized(backstrides) self.shape = shape + # already tested for overflow in from_shape_and_storage self.size = support.product(shape) * dtype.elsize self.order = order self.dtype = dtype @@ -428,9 +430,9 @@ raise oefmt(space.w_ValueError, "sequence too large; must be smaller than %d", NPY.MAXDIMS) try: - support.product(new_shape) * self.dtype.elsize + ovfcheck(support.product_check(new_shape) * self.dtype.elsize) except OverflowError as e: - raise oefmt(space.w_ValueError, "array is too big") + raise oefmt(space.w_ValueError, "array is too big.") strides, backstrides = calc_strides(new_shape, self.dtype, self.order) return SliceArray(self.start, strides, backstrides, new_shape, self, @@ -457,8 +459,11 @@ storage=lltype.nullptr(RAW_STORAGE), zero=True): gcstruct = V_OBJECTSTORE flags = NPY.ARRAY_ALIGNED | NPY.ARRAY_WRITEABLE - length = support.product(shape) - self.size = length * dtype.elsize + try: + length = support.product_check(shape) + self.size = ovfcheck(length * dtype.elsize) + except OverflowError: + raise oefmt(dtype.itemtype.space.w_ValueError, "array is too big.") if storage == lltype.nullptr(RAW_STORAGE): if dtype.num == NPY.OBJECT: storage = dtype.itemtype.malloc(length * dtype.elsize, zero=True) @@ -542,7 +547,10 @@ self.gcstruct = parent.gcstruct self.order = parent.order self.dtype = dtype - self.size = support.product(shape) * self.dtype.elsize + try: + self.size = ovfcheck(support.product_check(shape) * self.dtype.elsize) + except OverflowError: + raise oefmt(dtype.itemtype.space.w_ValueError, "array is too big.") self.start = start self.orig_arr = orig_arr flags = parent.flags & NPY.ARRAY_ALIGNED @@ -564,9 +572,9 @@ raise oefmt(space.w_ValueError, "sequence too large; must be smaller than %d", NPY.MAXDIMS) try: - support.product(new_shape) * self.dtype.elsize + ovfcheck(support.product_check(new_shape) * self.dtype.elsize) except OverflowError as e: - raise oefmt(space.w_ValueError, "array is too big") + raise oefmt(space.w_ValueError, "array is too big.") if len(self.get_shape()) < 2 or self.size == 0: # TODO: this code could be refactored into calc_strides # but then calc_strides would have to accept a stepping factor diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -153,7 +153,7 @@ dtype = descriptor.variable_dtype(space, dtype.char + '1') w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) - if support.product(shape) == 1: + if support.product(shape) == 1: # safe from overflow since from_shape checks w_arr.set_scalar_value(dtype.coerce(space, elems_w[0])) else: loop.assign(space, w_arr, elems_w) @@ -213,10 +213,9 @@ raise OperationError(space.w_ValueError, space.wrap( "negative dimensions are not allowed")) try: - support.product(shape) + support.product_check(shape) except OverflowError: - raise OperationError(space.w_ValueError, space.wrap( - "array is too big.")) + raise oefmt(space.w_ValueError, "array is too big.") return W_NDimArray.from_shape(space, shape, dtype=dtype, zero=zero) def empty(space, w_shape, w_dtype=None, w_order=None): diff --git a/pypy/module/micronumpy/flagsobj.py b/pypy/module/micronumpy/flagsobj.py --- a/pypy/module/micronumpy/flagsobj.py +++ b/pypy/module/micronumpy/flagsobj.py @@ -57,6 +57,9 @@ self.flags & NPY.ARRAY_F_CONTIGUOUS or self.flags & NPY.ARRAY_C_CONTIGUOUS )) + def descr_get_num(self, space): + return space.wrap(self.flags) + def descr_getitem(self, space, w_item): key = space.str_w(w_item) if key == "C" or key == "CONTIGUOUS" or key == "C_CONTIGUOUS": @@ -122,4 +125,5 @@ aligned = GetSetProperty(W_FlagsObject.descr_get_aligned), fnc = GetSetProperty(W_FlagsObject.descr_get_fnc), forc = GetSetProperty(W_FlagsObject.descr_get_forc), + num = GetSetProperty(W_FlagsObject.descr_get_num), ) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -6,6 +6,7 @@ from rpython.rlib import jit from rpython.rlib.rstring import StringBuilder from rpython.rlib.rawstorage import RAW_STORAGE_PTR +from rpython.rlib.rarithmetic import ovfcheck from rpython.rtyper.lltypesystem import rffi from rpython.tool.sourcetools import func_with_new_name from pypy.module.micronumpy import descriptor, ufuncs, boxes, arrayops, loop, \ @@ -611,6 +612,7 @@ "__array__(dtype) not implemented")) if type(self) is W_NDimArray: return self + # sz cannot overflow since self is valid sz = support.product(self.get_shape()) * self.get_dtype().elsize return W_NDimArray.from_shape_and_storage( space, self.get_shape(), self.implementation.storage, @@ -1405,9 +1407,9 @@ return W_NDimArray.from_shape(space, shape, dtype, order) strides, backstrides = calc_strides(shape, dtype.base, order) try: - totalsize = support.product(shape) * dtype.base.elsize + totalsize = ovfcheck(support.product_check(shape) * dtype.base.elsize) except OverflowError as e: - raise oefmt(space.w_ValueError, "array is too big") + raise oefmt(space.w_ValueError, "array is too big.") impl = ConcreteArray(shape, dtype.base, order, strides, backstrides) w_ret = space.allocate_instance(W_NDimArray, w_subtype) W_NDimArray.__init__(w_ret, impl) diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -32,10 +32,16 @@ def product(s): i = 1 for x in s: + i *= x + return i + + at jit.unroll_safe +def product_check(s): + i = 1 + for x in s: i = ovfcheck(i * x) return i - def check_and_adjust_index(space, index, size, axis): if index < -size or index >= size: if axis >= 0: diff --git a/pypy/module/micronumpy/test/test_flagsobj.py b/pypy/module/micronumpy/test/test_flagsobj.py --- a/pypy/module/micronumpy/test/test_flagsobj.py +++ b/pypy/module/micronumpy/test/test_flagsobj.py @@ -30,6 +30,7 @@ assert a.flags.forc == True assert a.flags['FNC'] == False assert a.flags['FORC'] == True + assert a.flags.num == 1287 raises(KeyError, "a.flags['blah']") raises(KeyError, "a.flags['C_CONTIGUOUS'] = False") raises((TypeError, AttributeError), "a.flags.c_contiguous = False") @@ -38,6 +39,7 @@ import numpy as np a = np.int32(2) assert a.flags.c_contiguous == True + assert a.flags.num == 263 def test_compare(self): import numpy as np diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -270,7 +270,7 @@ exc = raises(ValueError, ndarray, [1,2,256]*10000) assert exc.value[0] == 'sequence too large; must be smaller than 32' exc = raises(ValueError, ndarray, [1,2,256]*10) - assert exc.value[0] == 'array is too big' + assert exc.value[0] == 'array is too big.' def test_ndmin(self): from numpy import array diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -1006,7 +1006,6 @@ assert isinstance(curarg, W_NDimArray) if len(arg_shapes[i]) != curarg.ndims(): # reshape - sz = product(curarg.get_shape()) * curarg.get_dtype().elsize with curarg.implementation as storage: inargs[i] = W_NDimArray.from_shape_and_storage( diff --git a/pypy/module/pypyjit/__init__.py b/pypy/module/pypyjit/__init__.py --- a/pypy/module/pypyjit/__init__.py +++ b/pypy/module/pypyjit/__init__.py @@ -8,8 +8,11 @@ 'set_param': 'interp_jit.set_param', 'residual_call': 'interp_jit.residual_call', 'not_from_assembler': 'interp_jit.W_NotFromAssembler', + 'get_jitcell_at_key': 'interp_jit.get_jitcell_at_key', + 'dont_trace_here': 'interp_jit.dont_trace_here', + 'trace_next_iteration': 'interp_jit.trace_next_iteration', + 'trace_next_iteration_hash': 'interp_jit.trace_next_iteration_hash', 'set_compile_hook': 'interp_resop.set_compile_hook', - 'set_optimize_hook': 'interp_resop.set_optimize_hook', 'set_abort_hook': 'interp_resop.set_abort_hook', 'get_stats_snapshot': 'interp_resop.get_stats_snapshot', 'enable_debug': 'interp_resop.enable_debug', @@ -17,7 +20,6 @@ 'ResOperation': 'interp_resop.WrappedOp', 'DebugMergePoint': 'interp_resop.DebugMergePoint', 'JitLoopInfo': 'interp_resop.W_JitLoopInfo', - 'Box': 'interp_resop.WrappedBox', 'PARAMETER_DOCS': 'space.wrap(rpython.rlib.jit.PARAMETER_DOCS)', } diff --git a/pypy/module/pypyjit/hooks.py b/pypy/module/pypyjit/hooks.py --- a/pypy/module/pypyjit/hooks.py +++ b/pypy/module/pypyjit/hooks.py @@ -35,10 +35,10 @@ self._compile_hook(debug_info, is_bridge=True) def before_compile(self, debug_info): - self._optimize_hook(debug_info, is_bridge=False) + pass def before_compile_bridge(self, debug_info): - self._optimize_hook(debug_info, is_bridge=True) + pass def _compile_hook(self, debug_info, is_bridge): space = self.space @@ -46,7 +46,8 @@ if cache.in_recursion: return if space.is_true(cache.w_compile_hook): - w_debug_info = W_JitLoopInfo(space, debug_info, is_bridge) + w_debug_info = W_JitLoopInfo(space, debug_info, is_bridge, + cache.compile_hook_with_ops) cache.in_recursion = True try: try: @@ -57,33 +58,4 @@ finally: cache.in_recursion = False - def _optimize_hook(self, debug_info, is_bridge=False): - space = self.space - cache = space.fromcache(Cache) - if cache.in_recursion: - return - if space.is_true(cache.w_optimize_hook): - w_debug_info = W_JitLoopInfo(space, debug_info, is_bridge) - cache.in_recursion = True - try: - try: - w_res = space.call_function(cache.w_optimize_hook, - space.wrap(w_debug_info)) - if space.is_w(w_res, space.w_None): - return - l = [] - for w_item in space.listview(w_res): - item = space.interp_w(WrappedOp, w_item) - l.append(jit_hooks._cast_to_resop(item.op)) - del debug_info.operations[:] # modifying operations above is - # probably not a great idea since types may not work - # and we'll end up with half-working list and - # a segfault/fatal RPython error - for elem in l: - debug_info.operations.append(elem) - except OperationError, e: - e.write_unraisable(space, "jit hook ", cache.w_compile_hook) - finally: - cache.in_recursion = False - pypy_hooks = PyPyJitIface() diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py --- a/pypy/module/pypyjit/interp_jit.py +++ b/pypy/module/pypyjit/interp_jit.py @@ -5,11 +5,14 @@ from rpython.rlib.rarithmetic import r_uint, intmask from rpython.rlib.jit import JitDriver, hint, we_are_jitted, dont_look_inside -from rpython.rlib import jit -from rpython.rlib.jit import current_trace_length, unroll_parameters +from rpython.rlib import jit, jit_hooks +from rpython.rlib.jit import current_trace_length, unroll_parameters,\ + JitHookInterface +from rpython.rtyper.annlowlevel import cast_instance_to_gcref import pypy.interpreter.pyopcode # for side-effects from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.pycode import CO_GENERATOR, PyCode +from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pyopcode import ExitFrame, Yield from pypy.interpreter.baseobjspace import W_Root @@ -188,3 +191,100 @@ __call__ = interp2app(W_NotFromAssembler.descr_call), ) W_NotFromAssembler.typedef.acceptable_as_base_class = False + + at unwrap_spec(next_instr=int, is_being_profiled=bool, w_pycode=PyCode) + at dont_look_inside +def get_jitcell_at_key(space, next_instr, is_being_profiled, w_pycode): + ll_pycode = cast_instance_to_gcref(w_pycode) + return space.wrap(bool(jit_hooks.get_jitcell_at_key( + 'pypyjit', r_uint(next_instr), int(is_being_profiled), ll_pycode))) + + at unwrap_spec(next_instr=int, is_being_profiled=bool, w_pycode=PyCode) + at dont_look_inside +def dont_trace_here(space, next_instr, is_being_profiled, w_pycode): + ll_pycode = cast_instance_to_gcref(w_pycode) + jit_hooks.dont_trace_here( + 'pypyjit', r_uint(next_instr), int(is_being_profiled), ll_pycode) + return space.w_None + + at unwrap_spec(next_instr=int, is_being_profiled=bool, w_pycode=PyCode) + at dont_look_inside +def trace_next_iteration(space, next_instr, is_being_profiled, w_pycode): + ll_pycode = cast_instance_to_gcref(w_pycode) + jit_hooks.trace_next_iteration( + 'pypyjit', r_uint(next_instr), int(is_being_profiled), ll_pycode) + return space.w_None + + at unwrap_spec(hash=r_uint) + at dont_look_inside +def trace_next_iteration_hash(space, hash): + jit_hooks.trace_next_iteration_hash('pypyjit', hash) + return space.w_None + +# class Cache(object): +# in_recursion = False + +# def __init__(self, space): +# self.w_compile_bridge = space.w_None +# self.w_compile_loop = space.w_None + +# def set_compile_bridge(space, w_hook): +# cache = space.fromcache(Cache) +# assert w_hook is not None +# cache.w_compile_bridge = w_hook + +# def set_compile_loop(space, w_hook): +# from rpython.rlib.nonconst import NonConstant + +# cache = space.fromcache(Cache) +# assert w_hook is not None +# cache.w_compile_loop = w_hook +# cache.in_recursion = NonConstant(False) + +# class PyPyJitHookInterface(JitHookInterface): +# def after_compile(self, debug_info): +# space = self.space +# cache = space.fromcache(Cache) +# if cache.in_recursion: +# return +# l_w = [] +# if not space.is_true(cache.w_compile_loop): +# return +# for i, op in enumerate(debug_info.operations): +# if op.is_guard(): +# w_t = space.newtuple([space.wrap(i), space.wrap(op.getopnum()), space.wrap(op.getdescr().get_jitcounter_hash())]) +# l_w.append(w_t) +# try: +# cache.in_recursion = True +# try: +# space.call_function(cache.w_compile_loop, space.newlist(l_w)) +# except OperationError, e: +# e.write_unraisable(space, "jit hook ", cache.w_compile_bridge) +# finally: +# cache.in_recursion = False + +# def after_compile_bridge(self, debug_info): +# space = self.space +# cache = space.fromcache(Cache) +# if cache.in_recursion: +# return +# if not space.is_true(cache.w_compile_bridge): +# return +# w_hash = space.wrap(debug_info.fail_descr.get_jitcounter_hash()) +# try: +# cache.in_recursion = True +# try: +# space.call_function(cache.w_compile_bridge, w_hash) +# except OperationError, e: +# e.write_unraisable(space, "jit hook ", cache.w_compile_bridge) +# finally: +# cache.in_recursion = False + +# def before_compile(self, debug_info): +# pass + +# def before_compile_bridge(self, debug_info): +# pass + +# pypy_hooks = PyPyJitHookInterface() + diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -8,7 +8,7 @@ from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance, hlstr from rpython.rtyper.rclass import OBJECT -from rpython.jit.metainterp.resoperation import rop +#from rpython.jit.metainterp.resoperation import rop from rpython.rlib.nonconst import NonConstant from rpython.rlib import jit_hooks from rpython.rlib.jit import Counters @@ -22,7 +22,6 @@ def __init__(self, space): self.w_compile_hook = space.w_None self.w_abort_hook = space.w_None - self.w_optimize_hook = space.w_None def getno(self): self.no += 1 @@ -43,8 +42,9 @@ else: return space.wrap(greenkey_repr) -def set_compile_hook(space, w_hook): - """ set_compile_hook(hook) + at unwrap_spec(operations=bool) +def set_compile_hook(space, w_hook, operations=True): + """ set_compile_hook(hook, operations=True) Set a compiling hook that will be called each time a loop is compiled. @@ -58,25 +58,9 @@ cache = space.fromcache(Cache) assert w_hook is not None cache.w_compile_hook = w_hook + cache.compile_hook_with_ops = operations cache.in_recursion = NonConstant(False) -def set_optimize_hook(space, w_hook): - """ set_optimize_hook(hook) - - Set a compiling hook that will be called each time a loop is optimized, - but before assembler compilation. This allows adding additional - optimizations on Python level. - - The hook will be called with the pypyjit.JitLoopInfo object. Refer to it's - docstring for details. - - Result value will be the resulting list of operations, or None - """ - cache = space.fromcache(Cache) - cache.w_optimize_hook = w_hook - cache.in_recursion = NonConstant(False) - - def set_abort_hook(space, w_hook): """ set_abort_hook(hook) @@ -96,6 +80,9 @@ cache.in_recursion = NonConstant(False) def wrap_oplist(space, logops, operations, ops_offset=None): + # this function is called from the JIT + from rpython.jit.metainterp.resoperation import rop + l_w = [] jitdrivers_sd = logops.metainterp_sd.jitdrivers_sd for op in operations: @@ -103,117 +90,58 @@ ofs = -1 else: ofs = ops_offset.get(op, 0) - if op.opnum == rop.DEBUG_MERGE_POINT: + num = op.getopnum() + name = op.getopname() + if num == rop.DEBUG_MERGE_POINT: jd_sd = jitdrivers_sd[op.getarg(0).getint()] greenkey = op.getarglist()[3:] repr = jd_sd.warmstate.get_location_str(greenkey) w_greenkey = wrap_greenkey(space, jd_sd.jitdriver, greenkey, repr) - l_w.append(DebugMergePoint(space, jit_hooks._cast_to_gcref(op), + l_w.append(DebugMergePoint(space, name, logops.repr_of_resop(op), jd_sd.jitdriver.name, op.getarg(1).getint(), op.getarg(2).getint(), w_greenkey)) else: - l_w.append(WrappedOp(jit_hooks._cast_to_gcref(op), ofs, - logops.repr_of_resop(op))) + l_w.append(WrappedOp(name, ofs, logops.repr_of_resop(op))) return l_w + at unwrap_spec(offset=int, repr=str, name=str) +def descr_new_resop(space, w_tp, name, offset=-1, repr=''): + return WrappedOp(name, offset, repr) -class WrappedBox(W_Root): - """ A class representing a single box - """ - def __init__(self, llbox): - self.llbox = llbox - - def descr_getint(self, space): - if not jit_hooks.box_isint(self.llbox): - raise OperationError(space.w_NotImplementedError, - space.wrap("Box has no int value")) - return space.wrap(jit_hooks.box_getint(self.llbox)) - - at unwrap_spec(no=int) -def descr_new_box(space, w_tp, no): - return WrappedBox(jit_hooks.boxint_new(no)) - -WrappedBox.typedef = TypeDef( - 'Box', - __new__ = interp2app(descr_new_box), - getint = interp2app(WrappedBox.descr_getint), -) - - at unwrap_spec(num=int, offset=int, repr=str, w_res=W_Root) -def descr_new_resop(space, w_tp, num, w_args, w_res, offset=-1, - repr=''): - args = [space.interp_w(WrappedBox, w_arg).llbox for w_arg in - space.listview(w_args)] - if space.is_none(w_res): - llres = jit_hooks.emptyval() - else: - if not isinstance(w_res, WrappedBox): - raise OperationError(space.w_TypeError, space.wrap( - "expected box type, got %s" % space.type(w_res))) - llres = w_res.llbox - return WrappedOp(jit_hooks.resop_new(num, args, llres), offset, repr) - - at unwrap_spec(repr=str, jd_name=str, call_depth=int, call_id=int) -def descr_new_dmp(space, w_tp, w_args, repr, jd_name, call_depth, call_id, + at unwrap_spec(repr=str, name=str, jd_name=str, call_depth=int, call_id=int) +def descr_new_dmp(space, w_tp, name, repr, jd_name, call_depth, call_id, w_greenkey): - args = [space.interp_w(WrappedBox, w_arg).llbox for w_arg in - space.listview(w_args)] - num = rop.DEBUG_MERGE_POINT - return DebugMergePoint(space, - jit_hooks.resop_new(num, args, jit_hooks.emptyval()), + return DebugMergePoint(space, name, repr, jd_name, call_depth, call_id, w_greenkey) class WrappedOp(W_Root): """ A class representing a single ResOperation, wrapped nicely """ - def __init__(self, op, offset, repr_of_resop): - self.op = op + def __init__(self, name, offset, repr_of_resop): self.offset = offset + self.name = name self.repr_of_resop = repr_of_resop def descr_repr(self, space): return space.wrap(self.repr_of_resop) - def descr_num(self, space): - return space.wrap(jit_hooks.resop_getopnum(self.op)) - def descr_name(self, space): - return space.wrap(hlstr(jit_hooks.resop_getopname(self.op))) - - @unwrap_spec(no=int) - def descr_getarg(self, space, no): - try: - box = jit_hooks.resop_getarg(self.op, no) - except IndexError: - raise OperationError(space.w_IndexError, - space.wrap("Index out of range")) - return WrappedBox(box) - - @unwrap_spec(no=int, w_box=WrappedBox) - def descr_setarg(self, space, no, w_box): - jit_hooks.resop_setarg(self.op, no, w_box.llbox) - - def descr_getresult(self, space): - return WrappedBox(jit_hooks.resop_getresult(self.op)) - - def descr_setresult(self, space, w_box): - box = space.interp_w(WrappedBox, w_box) - jit_hooks.resop_setresult(self.op, box.llbox) + return space.wrap(self.name) class DebugMergePoint(WrappedOp): """ A class representing Debug Merge Point - the entry point to a jitted loop. """ - def __init__(self, space, op, repr_of_resop, jd_name, call_depth, call_id, - w_greenkey): + def __init__(self, space, name, repr_of_resop, jd_name, call_depth, + call_id, w_greenkey): - WrappedOp.__init__(self, op, -1, repr_of_resop) + WrappedOp.__init__(self, name, -1, repr_of_resop) self.jd_name = jd_name self.call_depth = call_depth self.call_id = call_id @@ -237,12 +165,7 @@ __doc__ = WrappedOp.__doc__, __new__ = interp2app(descr_new_resop), __repr__ = interp2app(WrappedOp.descr_repr), - num = GetSetProperty(WrappedOp.descr_num), name = GetSetProperty(WrappedOp.descr_name), - getarg = interp2app(WrappedOp.descr_getarg), - setarg = interp2app(WrappedOp.descr_setarg), - result = GetSetProperty(WrappedOp.descr_getresult, - WrappedOp.descr_setresult), offset = interp_attrproperty("offset", cls=WrappedOp), ) WrappedOp.typedef.acceptable_as_base_class = False @@ -278,14 +201,18 @@ From noreply at buildbot.pypy.org Wed Oct 7 02:45:36 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 7 Oct 2015 02:45:36 +0200 (CEST) Subject: [pypy-commit] pypy callfamily: create FunctionRepr Message-ID: <20151007004536.9D5EA1C0726@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: callfamily Changeset: r80006:64d0e4527392 Date: 2015-10-01 04:47 +0100 http://bitbucket.org/pypy/pypy/changeset/64d0e4527392/ Log: create FunctionRepr diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -35,14 +35,17 @@ def rtyper_makerepr(self, rtyper): kind = self.getKind() if issubclass(kind, FunctionDesc): - sample = self.any_description() - callfamily = sample.querycallfamily() - if callfamily and callfamily.total_calltable_size > 0: - getRepr = FunctionsPBCRepr - if small_cand(rtyper, self): - getRepr = SmallFunctionSetPBCRepr + if len(self.descriptions) == 1 and not self.can_be_None: + getRepr = FunctionRepr else: - getRepr = getFrozenPBCRepr + sample = self.any_description() + callfamily = sample.querycallfamily() + if callfamily and callfamily.total_calltable_size > 0: + getRepr = FunctionsPBCRepr + if small_cand(rtyper, self): + getRepr = SmallFunctionSetPBCRepr + else: + getRepr = getFrozenPBCRepr elif issubclass(kind, ClassDesc): # user classes getRepr = ClassesPBCRepr @@ -357,6 +360,9 @@ else: return hop.llops.convertvar(v, rresult, hop.r_result) +class FunctionRepr(FunctionsPBCRepr): + pass + class __extend__(pairtype(FunctionsPBCRepr, FunctionsPBCRepr)): def convert_from_to((r_fpbc1, r_fpbc2), v, llops): # this check makes sense because both source and dest repr are FunctionsPBCRepr From noreply at buildbot.pypy.org Wed Oct 7 02:45:38 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 7 Oct 2015 02:45:38 +0200 (CEST) Subject: [pypy-commit] pypy callfamily: Move some methods to FunctionRepr and simplify FunctionsPBCRepr Message-ID: <20151007004538.B57921C0726@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: callfamily Changeset: r80007:09379b1fc493 Date: 2015-10-06 21:12 +0100 http://bitbucket.org/pypy/pypy/changeset/09379b1fc493/ Log: Move some methods to FunctionRepr and simplify FunctionsPBCRepr diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -175,27 +175,23 @@ class FunctionsPBCRepr(CanBeNull, Repr): - """Representation selected for a PBC of function(s).""" + """Representation selected for a PBC of functions.""" def __init__(self, rtyper, s_pbc): self.rtyper = rtyper self.s_pbc = s_pbc self.callfamily = s_pbc.any_description().getcallfamily() - if len(s_pbc.descriptions) == 1 and not s_pbc.can_be_None: - # a single function - self.lowleveltype = Void + llct = get_concrete_calltable(self.rtyper, self.callfamily) + self.concretetable = llct.table + self.uniquerows = llct.uniquerows + if len(llct.uniquerows) == 1: + row = llct.uniquerows[0] + self.lowleveltype = row.fntype else: - llct = get_concrete_calltable(self.rtyper, self.callfamily) - self.concretetable = llct.table - self.uniquerows = llct.uniquerows - if len(llct.uniquerows) == 1: - row = llct.uniquerows[0] - self.lowleveltype = row.fntype - else: - # several functions, each with several specialized variants. - # each function becomes a pointer to a Struct containing - # pointers to its variants. - self.lowleveltype = self.setup_specfunc() + # several functions, each with several specialized variants. + # each function becomes a pointer to a Struct containing + # pointers to its variants. + self.lowleveltype = self.setup_specfunc() self.funccache = {} def setup_specfunc(self): @@ -227,34 +223,31 @@ return self.funccache[funcdesc] except KeyError: pass - if self.lowleveltype is Void: - result = None + llfns = {} + found_anything = False + for row in self.uniquerows: + if funcdesc in row: + llfn = row[funcdesc] + found_anything = True + else: + # missing entry -- need a 'null' of the type that matches + # this row + llfn = self.rtyper.type_system.null_callable(row.fntype) + llfns[row.attrname] = llfn + if len(self.uniquerows) == 1: + if found_anything: + result = llfn # from the loop above + else: + # extremely rare case, shown only sometimes by + # test_bug_callfamily: don't emit NULL, because that + # would be interpreted as equal to None... It should + # never be called anyway. + result = rffi.cast(self.lowleveltype, ~len(self.funccache)) else: - llfns = {} - found_anything = False - for row in self.uniquerows: - if funcdesc in row: - llfn = row[funcdesc] - found_anything = True - else: - # missing entry -- need a 'null' of the type that matches - # this row - llfn = self.rtyper.type_system.null_callable(row.fntype) - llfns[row.attrname] = llfn - if len(self.uniquerows) == 1: - if found_anything: - result = llfn # from the loop above - else: - # extremely rare case, shown only sometimes by - # test_bug_callfamily: don't emit NULL, because that - # would be interpreted as equal to None... It should - # never be called anyway. - result = rffi.cast(self.lowleveltype, ~len(self.funccache)) - else: - # build a Struct with all the values collected in 'llfns' - result = self.create_specfunc() - for attrname, llfn in llfns.items(): - setattr(result, attrname, llfn) + # build a Struct with all the values collected in 'llfns' + result = self.create_specfunc() + for attrname, llfn in llfns.items(): + setattr(result, attrname, llfn) self.funccache[funcdesc] = result return result @@ -263,8 +256,6 @@ value = value.im_func # unbound method -> bare function elif isinstance(value, staticmethod): value = value.__get__(42) # hackish, get the function wrapped by staticmethod - if self.lowleveltype is Void: - return None if value is None: null = self.rtyper.type_system.null_callable(self.lowleveltype) return null @@ -277,15 +268,7 @@ 'index' and 'shape' tells which of its items we are interested in. """ assert v.concretetype == self.lowleveltype - if self.lowleveltype is Void: - assert len(self.s_pbc.descriptions) == 1 - # lowleveltype wouldn't be Void otherwise - funcdesc, = self.s_pbc.descriptions - row_of_one_graph = self.callfamily.calltables[shape][index] - graph = row_of_one_graph[funcdesc] - llfn = self.rtyper.getcallable(graph) - return inputconst(typeOf(llfn), llfn) - elif len(self.uniquerows) == 1: + if len(self.uniquerows) == 1: return v else: # 'v' is a Struct pointer, read the corresponding field @@ -293,34 +276,6 @@ cname = inputconst(Void, row.attrname) return self.get_specfunc_row(llop, v, cname, row.fntype) - def get_unique_llfn(self): - # try to build a unique low-level function. Avoid to use - # whenever possible! Doesn't work with specialization, multiple - # different call sites, etc. - if self.lowleveltype is not Void: - raise TyperError("cannot pass multiple functions here") - assert len(self.s_pbc.descriptions) == 1 - # lowleveltype wouldn't be Void otherwise - funcdesc, = self.s_pbc.descriptions - tables = [] # find the simple call in the calltable - for shape, table in self.callfamily.calltables.items(): - if not shape[1] and not shape[2]: - tables.append(table) - if len(tables) != 1: - raise TyperError("cannot pass a function with various call shapes") - table, = tables - graphs = [] - for row in table: - if funcdesc in row: - graphs.append(row[funcdesc]) - if not graphs: - raise TyperError("cannot pass here a function that is not called") - graph = graphs[0] - if graphs != [graph] * len(graphs): - raise TyperError("cannot pass a specialized function here") - llfn = self.rtyper.getcallable(graph) - return inputconst(typeOf(llfn), llfn) - def get_concrete_llfn(self, s_pbc, args_s, op): bk = self.rtyper.annotator.bookkeeper funcdesc, = s_pbc.descriptions @@ -361,7 +316,55 @@ return hop.llops.convertvar(v, rresult, hop.r_result) class FunctionRepr(FunctionsPBCRepr): - pass + """Repr for a constant function""" + def __init__(self, rtyper, s_pbc): + self.rtyper = rtyper + self.s_pbc = s_pbc + self.callfamily = s_pbc.any_description().getcallfamily() + self.lowleveltype = Void + + def convert_desc(self, funcdesc): + return None + + def convert_const(self, value): + return None + + def convert_to_concrete_llfn(self, v, shape, index, llop): + """Convert the variable 'v' to a variable referring to a concrete + low-level function. In case the call table contains multiple rows, + 'index' and 'shape' tells which of its items we are interested in. + """ + assert v.concretetype == self.lowleveltype + funcdesc, = self.s_pbc.descriptions + row_of_one_graph = self.callfamily.calltables[shape][index] + graph = row_of_one_graph[funcdesc] + llfn = self.rtyper.getcallable(graph) + return inputconst(typeOf(llfn), llfn) + + def get_unique_llfn(self): + # try to build a unique low-level function. Avoid to use + # whenever possible! Doesn't work with specialization, multiple + # different call sites, etc. + funcdesc, = self.s_pbc.descriptions + tables = [] # find the simple call in the calltable + for shape, table in self.callfamily.calltables.items(): + if not shape[1] and not shape[2]: + tables.append(table) + if len(tables) != 1: + raise TyperError("cannot pass a function with various call shapes") + table, = tables + graphs = [] + for row in table: + if funcdesc in row: + graphs.append(row[funcdesc]) + if not graphs: + raise TyperError("cannot pass here a function that is not called") + graph = graphs[0] + if graphs != [graph] * len(graphs): + raise TyperError("cannot pass a specialized function here") + llfn = self.rtyper.getcallable(graph) + return inputconst(typeOf(llfn), llfn) + class __extend__(pairtype(FunctionsPBCRepr, FunctionsPBCRepr)): def convert_from_to((r_fpbc1, r_fpbc2), v, llops): From noreply at buildbot.pypy.org Wed Oct 7 02:45:42 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 7 Oct 2015 02:45:42 +0200 (CEST) Subject: [pypy-commit] pypy callfamily: hg merge default Message-ID: <20151007004542.70DE81C0726@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: callfamily Changeset: r80008:3b3699266740 Date: 2015-10-07 01:45 +0100 http://bitbucket.org/pypy/pypy/changeset/3b3699266740/ Log: hg merge default diff too long, truncating to 2000 out of 6297 lines diff --git a/dotviewer/graphclient.py b/dotviewer/graphclient.py --- a/dotviewer/graphclient.py +++ b/dotviewer/graphclient.py @@ -127,16 +127,8 @@ return spawn_graphserver_handler((host, port)) def spawn_local_handler(): - if hasattr(sys, 'pypy_objspaceclass'): - # if 'python' is actually PyPy, e.g. in a virtualenv, then - # try hard to find a real CPython - try: - python = subprocess.check_output( - 'env -i $SHELL -l -c "which python"', shell=True).strip() - except subprocess.CalledProcessError: - # did not work, fall back to 'python' - python = 'python' - else: + python = os.getenv('PYPY_PYGAME_PYTHON') + if not python: python = sys.executable args = [python, '-u', GRAPHSERVER, '--stdio'] p = subprocess.Popen(args, diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -158,7 +158,7 @@ RegrTest('test_codecs.py', core=True, usemodules='_multibytecodec'), RegrTest('test_codeop.py', core=True), RegrTest('test_coding.py', core=True), - RegrTest('test_coercion.py', core=True), + RegrTest('test_coercion.py', core=True, usemodules='struct'), RegrTest('test_collections.py', usemodules='binascii struct'), RegrTest('test_colorsys.py'), RegrTest('test_commands.py'), diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -609,7 +609,7 @@ def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: - tp = ffi._parser._declarations[key] + tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) try: value = backendlib.load_function(BType, name) @@ -620,7 +620,7 @@ # key = 'variable ' + name if key in ffi._parser._declarations: - tp = ffi._parser._declarations[key] + tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) read_variable = backendlib.read_variable write_variable = backendlib.write_variable @@ -631,12 +631,23 @@ # if not copied_enums: from . import model - for key, tp in ffi._parser._declarations.items(): + error = None + for key, (tp, _) in ffi._parser._declarations.items(): if not isinstance(tp, model.EnumType): continue + try: + tp.check_not_partial() + except Exception as e: + error = e + continue for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + if error is not None: + if name in library.__dict__: + return # ignore error, about a different enum + raise error + for key, val in ffi._parser._int_constants.items(): if key not in library.__dict__: library.__dict__[key] = val diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -192,6 +192,7 @@ if not decl.name: raise api.CDefError("typedef does not declare any name", decl) + quals = 0 if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) and decl.type.type.names[-1] == '__dotdotdot__'): realtype = self._get_unknown_type(decl) @@ -202,8 +203,9 @@ decl.type.type.type.names == ['__dotdotdot__']): realtype = model.unknown_ptr_type(decl.name) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) + realtype, quals = self._get_type_and_quals( + decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype, quals=quals) else: raise api.CDefError("unrecognized construct", decl) except api.FFIError as e: @@ -255,9 +257,9 @@ def _parse_decl(self, decl): node = decl.type if isinstance(node, pycparser.c_ast.FuncDecl): - tp = self._get_type(node, name=decl.name) + tp, quals = self._get_type_and_quals(node, name=decl.name) assert isinstance(tp, model.RawFunctionType) - tp = self._get_type_pointer(tp) + tp = self._get_type_pointer(tp, quals) self._declare('function ' + decl.name, tp) else: if isinstance(node, pycparser.c_ast.Struct): @@ -271,9 +273,10 @@ decl) # if decl.name: - tp = self._get_type(node, partial_length_ok=True) + tp, quals = self._get_type_and_quals(node, + partial_length_ok=True) if tp.is_raw_function: - tp = self._get_type_pointer(tp) + tp = self._get_type_pointer(tp, quals) self._declare('function ' + decl.name, tp) elif (tp.is_integer_type() and hasattr(decl, 'init') and @@ -287,10 +290,10 @@ _r_int_literal.match(decl.init.expr.value)): self._add_integer_constant(decl.name, '-' + decl.init.expr.value) - elif self._is_constant_globalvar(node): - self._declare('constant ' + decl.name, tp) + elif (quals & model.Q_CONST) and not tp.is_array_type: + self._declare('constant ' + decl.name, tp, quals=quals) else: - self._declare('variable ' + decl.name, tp) + self._declare('variable ' + decl.name, tp, quals=quals) def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] @@ -298,40 +301,51 @@ exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type) + tp, quals = self._get_type_and_quals(exprnode.type) + return tp - def _declare(self, name, obj, included=False): + def _declare(self, name, obj, included=False, quals=0): if name in self._declarations: - if self._declarations[name] is obj: + prevobj, prevquals = self._declarations[name] + if prevobj is obj and prevquals == quals: return if not self._override: raise api.FFIError( "multiple declarations of %s (for interactive usage, " "try cdef(xx, override=True))" % (name,)) assert '__dotdotdot__' not in name.split() - self._declarations[name] = obj + self._declarations[name] = (obj, quals) if included: self._included_declarations.add(obj) - def _get_type_pointer(self, type, const=False, declname=None): + def _extract_quals(self, type): + quals = 0 + if isinstance(type, (pycparser.c_ast.TypeDecl, + pycparser.c_ast.PtrDecl)): + if 'const' in type.quals: + quals |= model.Q_CONST + if 'restrict' in type.quals: + quals |= model.Q_RESTRICT + return quals + + def _get_type_pointer(self, type, quals, declname=None): if isinstance(type, model.RawFunctionType): return type.as_function_pointer() if (isinstance(type, model.StructOrUnionOrEnum) and type.name.startswith('$') and type.name[1:].isdigit() and type.forcename is None and declname is not None): - return model.NamedPointerType(type, declname) - if const: - return model.ConstPointerType(type) - return model.PointerType(type) + return model.NamedPointerType(type, declname, quals) + return model.PointerType(type, quals) - def _get_type(self, typenode, name=None, partial_length_ok=False): + def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): - type = self._declarations['typedef ' + typenode.type.names[0]] - return type + tp, quals = self._declarations['typedef ' + typenode.type.names[0]] + quals |= self._extract_quals(typenode) + return tp, quals # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type @@ -340,18 +354,19 @@ else: length = self._parse_constant( typenode.dim, partial_length_ok=partial_length_ok) - tp = self._get_type(typenode.type, + tp, quals = self._get_type_and_quals(typenode.type, partial_length_ok=partial_length_ok) - return model.ArrayType(tp, length) + return model.ArrayType(tp, length), quals # if isinstance(typenode, pycparser.c_ast.PtrDecl): # pointer type - const = (isinstance(typenode.type, pycparser.c_ast.TypeDecl) - and 'const' in typenode.type.quals) - return self._get_type_pointer(self._get_type(typenode.type), const, - declname=name) + itemtype, itemquals = self._get_type_and_quals(typenode.type) + tp = self._get_type_pointer(itemtype, itemquals, declname=name) + quals = self._extract_quals(typenode) + return tp, quals # if isinstance(typenode, pycparser.c_ast.TypeDecl): + quals = self._extract_quals(typenode) type = typenode.type if isinstance(type, pycparser.c_ast.IdentifierType): # assume a primitive type. get it from .names, but reduce @@ -379,35 +394,38 @@ names = newnames + names ident = ' '.join(names) if ident == 'void': - return model.void_type + return model.void_type, quals if ident == '__dotdotdot__': raise api.FFIError(':%d: bad usage of "..."' % typenode.coord.line) - return resolve_common_type(ident) + return resolve_common_type(ident), quals # if isinstance(type, pycparser.c_ast.Struct): # 'struct foobar' - return self._get_struct_union_enum_type('struct', type, name) + tp = self._get_struct_union_enum_type('struct', type, name) + return tp, quals # if isinstance(type, pycparser.c_ast.Union): # 'union foobar' - return self._get_struct_union_enum_type('union', type, name) + tp = self._get_struct_union_enum_type('union', type, name) + return tp, quals # if isinstance(type, pycparser.c_ast.Enum): # 'enum foobar' - return self._get_struct_union_enum_type('enum', type, name) + tp = self._get_struct_union_enum_type('enum', type, name) + return tp, quals # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - return self._parse_function_type(typenode, name) + return self._parse_function_type(typenode, name), 0 # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): return self._get_struct_union_enum_type('struct', typenode, name, - nested=True) + nested=True), 0 if isinstance(typenode, pycparser.c_ast.Union): return self._get_struct_union_enum_type('union', typenode, name, - nested=True) + nested=True), 0 # raise api.FFIError(":%d: bad or unsupported type declaration" % typenode.coord.line) @@ -426,28 +444,21 @@ raise api.CDefError( "%s: a function with only '(...)' as argument" " is not correct C" % (funcname or 'in expression')) - args = [self._as_func_arg(self._get_type(argdeclnode.type)) + args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type)) for argdeclnode in params] if not ellipsis and args == [model.void_type]: args = [] - result = self._get_type(typenode.type) + result, quals = self._get_type_and_quals(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) - def _as_func_arg(self, type): + def _as_func_arg(self, type, quals): if isinstance(type, model.ArrayType): - return model.PointerType(type.item) + return model.PointerType(type.item, quals) elif isinstance(type, model.RawFunctionType): return type.as_function_pointer() else: return type - def _is_constant_globalvar(self, typenode): - if isinstance(typenode, pycparser.c_ast.PtrDecl): - return 'const' in typenode.quals - if isinstance(typenode, pycparser.c_ast.TypeDecl): - return 'const' in typenode.quals - return False - def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): # First, a level of caching on the exact 'type' node of the AST. # This is obscure, but needed because pycparser "unrolls" declarations @@ -486,7 +497,7 @@ else: explicit_name = name key = '%s %s' % (kind, name) - tp = self._declarations.get(key, None) + tp, _ = self._declarations.get(key, (None, None)) # if tp is None: if kind == 'struct': @@ -528,6 +539,7 @@ fldnames = [] fldtypes = [] fldbitsize = [] + fldquals = [] for decl in type.decls: if (isinstance(decl.type, pycparser.c_ast.IdentifierType) and ''.join(decl.type.names) == '__dotdotdot__'): @@ -541,7 +553,8 @@ else: bitsize = self._parse_constant(decl.bitsize) self._partial_length = False - type = self._get_type(decl.type, partial_length_ok=True) + type, fqual = self._get_type_and_quals(decl.type, + partial_length_ok=True) if self._partial_length: self._make_partial(tp, nested) if isinstance(type, model.StructType) and type.partial: @@ -549,9 +562,11 @@ fldnames.append(decl.name or '') fldtypes.append(type) fldbitsize.append(bitsize) + fldquals.append(fqual) tp.fldnames = tuple(fldnames) tp.fldtypes = tuple(fldtypes) tp.fldbitsize = tuple(fldbitsize) + tp.fldquals = tuple(fldquals) if fldbitsize != [-1] * len(fldbitsize): if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" @@ -632,14 +647,12 @@ return tp def include(self, other): - for name, tp in other._declarations.items(): + for name, (tp, quals) in other._declarations.items(): if name.startswith('anonymous $enum_$'): continue # fix for test_anonymous_enum_include kind = name.split(' ', 1)[0] - if kind in ('struct', 'union', 'enum', 'anonymous'): - self._declare(name, tp, included=True) - elif kind == 'typedef': - self._declare(name, tp, included=True) + if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'): + self._declare(name, tp, included=True, quals=quals) for k, v in other._int_constants.items(): self._add_constants(k, v) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -4,11 +4,26 @@ from .lock import allocate_lock +# type qualifiers +Q_CONST = 0x01 +Q_RESTRICT = 0x02 + +def qualify(quals, replace_with): + if quals & Q_CONST: + replace_with = ' const ' + replace_with.lstrip() + if quals & Q_RESTRICT: + # It seems that __restrict is supported by gcc and msvc. + # If you hit some different compiler, add a #define in + # _cffi_include.h for it (and in its copies, documented there) + replace_with = ' __restrict ' + replace_with.lstrip() + return replace_with + + class BaseTypeByIdentity(object): is_array_type = False is_raw_function = False - def get_c_name(self, replace_with='', context='a C file'): + def get_c_name(self, replace_with='', context='a C file', quals=0): result = self.c_name_with_marker assert result.count('&') == 1 # some logic duplication with ffi.getctype()... :-( @@ -18,6 +33,7 @@ replace_with = '(%s)' % replace_with elif not replace_with[0] in '[(': replace_with = ' ' + replace_with + replace_with = qualify(quals, replace_with) result = result.replace('&', replace_with) if '$' in result: from .ffiplatform import VerificationError @@ -225,16 +241,14 @@ class PointerType(BaseType): - _attrs_ = ('totype',) - _base_pattern = " *&" - _base_pattern_array = "(*&)" + _attrs_ = ('totype', 'quals') - def __init__(self, totype): + def __init__(self, totype, quals=0): self.totype = totype + self.quals = quals + extra = qualify(quals, " *&") if totype.is_array_type: - extra = self._base_pattern_array - else: - extra = self._base_pattern + extra = "(%s)" % (extra.lstrip(),) self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra) def build_backend_type(self, ffi, finishlist): @@ -243,10 +257,8 @@ voidp_type = PointerType(void_type) - -class ConstPointerType(PointerType): - _base_pattern = " const *&" - _base_pattern_array = "(const *&)" +def ConstPointerType(totype): + return PointerType(totype, Q_CONST) const_voidp_type = ConstPointerType(void_type) @@ -254,8 +266,8 @@ class NamedPointerType(PointerType): _attrs_ = ('totype', 'name') - def __init__(self, totype, name): - PointerType.__init__(self, totype) + def __init__(self, totype, name, quals=0): + PointerType.__init__(self, totype, quals) self.name = name self.c_name_with_marker = name + '&' @@ -315,11 +327,12 @@ partial = False packed = False - def __init__(self, name, fldnames, fldtypes, fldbitsize): + def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None): self.name = name self.fldnames = fldnames self.fldtypes = fldtypes self.fldbitsize = fldbitsize + self.fldquals = fldquals self.build_c_name_with_marker() def has_anonymous_struct_fields(self): @@ -331,14 +344,17 @@ return False def enumfields(self): - for name, type, bitsize in zip(self.fldnames, self.fldtypes, - self.fldbitsize): + fldquals = self.fldquals + if fldquals is None: + fldquals = (0,) * len(self.fldnames) + for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes, + self.fldbitsize, fldquals): if name == '' and isinstance(type, StructOrUnion): # nested anonymous struct/union for result in type.enumfields(): yield result else: - yield (name, type, bitsize) + yield (name, type, bitsize, quals) def force_flatten(self): # force the struct or union to have a declaration that lists @@ -347,13 +363,16 @@ names = [] types = [] bitsizes = [] - for name, type, bitsize in self.enumfields(): + fldquals = [] + for name, type, bitsize, quals in self.enumfields(): names.append(name) types.append(type) bitsizes.append(bitsize) + fldquals.append(quals) self.fldnames = tuple(names) self.fldtypes = tuple(types) self.fldbitsize = tuple(bitsizes) + self.fldquals = tuple(fldquals) def get_cached_btype(self, ffi, finishlist, can_delay=False): BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist, diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -195,17 +195,15 @@ elif isinstance(tp, model.StructOrUnion): if tp.fldtypes is not None and ( tp not in self.ffi._parser._included_declarations): - for name1, tp1, _ in tp.enumfields(): + for name1, tp1, _, _ in tp.enumfields(): self._do_collect_type(self._field_type(tp, name1, tp1)) else: for _, x in tp._get_items(): self._do_collect_type(x) - def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) - def _generate(self, step_name): - for name, tp in self._get_declarations(): + lst = self.ffi._parser._declarations.items() + for name, (tp, quals) in sorted(lst): kind, realname = name.split(' ', 1) try: method = getattr(self, '_generate_cpy_%s_%s' % (kind, @@ -214,6 +212,7 @@ raise ffiplatform.VerificationError( "not implemented in recompile(): %r" % name) try: + self._current_quals = quals method(tp, realname) except Exception as e: model.attach_exception_info(e, name) @@ -774,7 +773,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): try: if ftype.is_integer_type() or fbitsize >= 0: # accept all integers, but complain on float or double @@ -789,7 +788,8 @@ ftype = ftype.item fname = fname + '[0]' prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -823,7 +823,7 @@ c_fields = [] if reason_for_not_expanding is None: enumfields = list(tp.enumfields()) - for fldname, fldtype, fbitsize in enumfields: + for fldname, fldtype, fbitsize, fqual in enumfields: fldtype = self._field_type(tp, fldname, fldtype) # cname is None for _add_missing_struct_unions() only op = OP_NOOP @@ -879,7 +879,9 @@ # because they don't have any known C name. Check that they are # not partial (we can't complete or verify them!) and emit them # anonymously. - for tp in list(self._struct_unions): + lst = list(self._struct_unions.items()) + lst.sort(key=lambda tp_order: tp_order[1]) + for tp, order in lst: if tp not in self._seen_struct_unions: if tp.partial: raise NotImplementedError("internal inconsistency: %r is " @@ -1004,6 +1006,8 @@ def _enum_ctx(self, tp, cname): type_index = self._typesdict[tp] type_op = CffiOp(OP_ENUM, -1) + if self.target_is_python: + tp.check_not_partial() for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): self._lsts["global"].append( GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op, @@ -1081,7 +1085,8 @@ # if 'tp' were a function type, but that is not possible here. # (If 'tp' is a function _pointer_ type, then casts from "fn_t # **" to "void *" are again no-ops, as far as I can tell.) - prnt('static ' + tp.get_c_name('*_cffi_var_%s(void)' % (name,))) + decl = '*_cffi_var_%s(void)' % (name,) + prnt('static ' + tp.get_c_name(decl, quals=self._current_quals)) prnt('{') prnt(' return %s(%s);' % (ampersand, name)) prnt('}') diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -197,7 +197,10 @@ return library def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst def _generate(self, step_name): for name, tp in self._get_declarations(): @@ -468,7 +471,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double @@ -477,7 +480,8 @@ # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -488,7 +492,7 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) @@ -552,7 +556,7 @@ check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -87,7 +87,10 @@ return library def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst def _generate(self, step_name): for name, tp in self._get_declarations(): @@ -260,7 +263,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double @@ -269,7 +272,8 @@ # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -280,7 +284,7 @@ prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) @@ -342,7 +346,7 @@ check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -39,8 +39,9 @@ "_csv", "cppyy", "_pypyjson" ]) -if (sys.platform.startswith('linux') and os.uname()[4] == 'x86_64' - and sys.maxint > 2**32): # it's not enough that we get x86_64 +if ((sys.platform.startswith('linux') or sys.platform == 'darwin') + and os.uname()[4] == 'x86_64' and sys.maxint > 2**32): + # it's not enough that we get x86_64 working_modules.add('_vmprof') translation_modules = default_modules.copy() diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -39,3 +39,17 @@ .. branch: shadowstack-no-move-2 Issue #2141: fix a crash on Windows and OS/X and ARM when running at least 20 threads. + +.. branch: numpy-ctypes + +Add support for ndarray.ctypes property. + +.. branch: share-guard-info + +Share guard resume data between consecutive guards that have only +pure operations and guards in between. + +.. branch: issue-2148 + +Fix performance regression on operations mixing numpy scalars and Python +floats, cf. issue #2148. diff --git a/pypy/module/__builtin__/app_functional.py b/pypy/module/__builtin__/app_functional.py --- a/pypy/module/__builtin__/app_functional.py +++ b/pypy/module/__builtin__/app_functional.py @@ -5,6 +5,7 @@ from __future__ import with_statement import operator from __pypy__ import resizelist_hint, newlist_hint +from __pypy__ import specialized_zip_2_lists # ____________________________________________________________ @@ -217,11 +218,16 @@ in length to the length of the shortest argument sequence.""" l = len(sequences) if l == 2: + # A very fast path if the two sequences are lists + seq0 = sequences[0] + seq1 = sequences[1] + try: + return specialized_zip_2_lists(seq0, seq1) + except TypeError: + pass # This is functionally the same as the code below, but more # efficient because it unrolls the loops over 'sequences'. # Only for two arguments, which is the most common case. - seq0 = sequences[0] - seq1 = sequences[1] iter0 = iter(seq0) iter1 = iter(seq1) hint = min(100000000, # max 100M diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -253,26 +253,27 @@ def binaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_meth = self.getattr(space, specialname, False) + if isinstance(w_a, W_InstanceObject): + w_meth = w_a.getattr(space, specialname, False) if w_meth is None: return space.w_NotImplemented return space.call_function(w_meth, w_b) else: + # fall back to space.xxx() if coerce returns a non-W_Instance + # object as first argument return getattr(space, objspacename)(w_a, w_b) binaryop.func_name = name def rbinaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None or w_a is self: - w_meth = self.getattr(space, rspecialname, False) + if isinstance(w_a, W_InstanceObject): + w_meth = w_a.getattr(space, rspecialname, False) if w_meth is None: return space.w_NotImplemented - return space.call_function(w_meth, w_other) + return space.call_function(w_meth, w_b) else: + # fall back to space.xxx() if coerce returns a non-W_Instance + # object as first argument return getattr(space, objspacename)(w_b, w_a) rbinaryop.func_name = "r" + name return binaryop, rbinaryop @@ -283,7 +284,7 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - return [None, None] + return [w_self, w_other] return space.fixedview(w_tup, 2) def descr_instance_new(space, w_type, w_class, w_dict=None): @@ -523,13 +524,9 @@ def descr_cmp(self, space, w_other): # do all the work here like CPython w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - else: - if (not isinstance(w_a, W_InstanceObject) and - not isinstance(w_b, W_InstanceObject)): - return space.cmp(w_a, w_b) + if (not isinstance(w_a, W_InstanceObject) and + not isinstance(w_b, W_InstanceObject)): + return space.cmp(w_a, w_b) if isinstance(w_a, W_InstanceObject): w_func = w_a.getattr(space, '__cmp__', False) if w_func is not None: @@ -636,42 +633,36 @@ def descr_pow(self, space, w_other, w_modulo=None): if space.is_none(w_modulo): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) - return space.w_NotImplemented + if isinstance(w_a, W_InstanceObject): + w_func = w_a.getattr(space, '__pow__', False) + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other) else: return space.pow(w_a, w_b, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other, w_modulo) def descr_rpow(self, space, w_other, w_modulo=None): if space.is_none(w_modulo): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) - return space.w_NotImplemented + if isinstance(w_a, W_InstanceObject): + w_func = w_a.getattr(space, '__rpow__', False) + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other) else: return space.pow(w_b, w_a, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other, w_modulo) def descr_next(self, space): w_func = self.getattr(space, 'next', False) diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -417,6 +417,22 @@ pass raises(TypeError, coerce, B(), []) + def test_coerce_inf(self): + class B: + def __coerce__(self, other): + return B(), B() + def __add__(self, other): + return 42 + assert B() + B() == 42 + + def test_coerce_reverse(self): + class CoerceNumber: + def __coerce__(self, other): + assert isinstance(other, int) + return (6, other) + assert 5 + CoerceNumber() == 11 + assert 2 ** CoerceNumber() == 64 + def test_binaryop(self): class A: def __add__(self, other): diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -83,6 +83,7 @@ 'newdict' : 'interp_dict.newdict', 'reversed_dict' : 'interp_dict.reversed_dict', 'strategy' : 'interp_magic.strategy', # dict,set,list + 'specialized_zip_2_lists' : 'interp_magic.specialized_zip_2_lists', 'set_debug' : 'interp_magic.set_debug', 'locals_to_fast' : 'interp_magic.locals_to_fast', 'save_module_content_for_future_reload': diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -147,3 +147,7 @@ @unwrap_spec(w_module=MixedModule) def save_module_content_for_future_reload(space, w_module): w_module.save_module_content_for_future_reload() + +def specialized_zip_2_lists(space, w_list1, w_list2): + from pypy.objspace.std.specialisedtupleobject import specialized_zip_2_lists + return specialized_zip_2_lists(space, w_list1, w_list2) diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -178,7 +178,8 @@ @jit.dont_look_inside -def _handle_applevel_exception(space, callback, e, ll_res, extra_line): +def _handle_applevel_exception(callback, e, ll_res, extra_line): + space = callback.space callback.write_error_return_value(ll_res) if callback.w_onerror is None: callback.print_error(e, extra_line) @@ -199,13 +200,21 @@ extra_line="\nDuring the call to 'onerror', " "another exception occurred:\n\n") + at jit.jit_callback("CFFI") +def py_invoke_callback(callback, ll_res, ll_args): + extra_line = '' + try: + w_res = callback.invoke(ll_args) + extra_line = "Trying to convert the result back to C:\n" + callback.convert_result(ll_res, w_res) + except OperationError, e: + _handle_applevel_exception(callback, e, ll_res, extra_line) - at jit.jit_callback("CFFI") def _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): """ Callback specification. ffi_cif - something ffi specific, don't care ll_args - rffi.VOIDPP - pointer to array of pointers to args - ll_restype - rffi.VOIDP - pointer to result + ll_res - rffi.VOIDP - pointer to result ll_userdata - a special structure which holds necessary information (what the real callback is for example), casted to VOIDP """ @@ -228,13 +237,7 @@ space = callback.space try: must_leave = space.threadlocals.try_enter_thread(space) - extra_line = '' - try: - w_res = callback.invoke(ll_args) - extra_line = "Trying to convert the result back to C:\n" - callback.convert_result(ll_res, w_res) - except OperationError, e: - _handle_applevel_exception(space, callback, e, ll_res, extra_line) + py_invoke_callback(callback, ll_res, ll_args) # except Exception, e: # oups! last-level attempt to recover. diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -11,7 +11,8 @@ class W_CType(W_Root): - _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_'] + _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_', + '_pointer_type'] _immutable_fields_ = ['size?', 'name', 'name_position'] # note that 'size' is not strictly immutable, because it can change # from -1 to the real value in the W_CTypeStruct subclass. diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -7,6 +7,7 @@ from rpython.rlib.rarithmetic import r_uint, r_ulonglong, intmask from rpython.rlib import jit from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.tool import rfficache from pypy.interpreter.error import oefmt from pypy.module._cffi_backend import cdataobj, misc @@ -125,12 +126,25 @@ cdata[0] = value +# XXX explicitly use an integer type instead of lltype.UniChar here, +# because for now the latter is defined as unsigned by RPython (even +# though it may be signed when 'wchar_t' is written to C). +WCHAR_INT = {(2, False): rffi.USHORT, + (4, False): rffi.UINT, + (4, True): rffi.INT}[rffi.sizeof(lltype.UniChar), + rfficache.signof_c_type('wchar_t')] +WCHAR_INTP = rffi.CArrayPtr(WCHAR_INT) + class W_CTypePrimitiveUniChar(W_CTypePrimitiveCharOrUniChar): _attrs_ = [] + if rffi.r_wchar_t.SIGN: + def write_raw_integer_data(self, w_cdata, value): + w_cdata.write_raw_signed_data(value) + def cast_to_int(self, cdata): - unichardata = rffi.cast(rffi.CWCHARP, cdata) - return self.space.wrap(ord(unichardata[0])) + unichardata = rffi.cast(WCHAR_INTP, cdata) + return self.space.wrap(unichardata[0]) def convert_to_object(self, cdata): unichardata = rffi.cast(rffi.CWCHARP, cdata) diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -168,7 +168,7 @@ class W_CTypePointer(W_CTypePtrBase): - _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr'] + _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr', '_array_types'] _immutable_fields_ = ['is_file', 'cache_array_type?', 'is_void_ptr'] kind = "pointer" cache_array_type = None diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -4,7 +4,7 @@ from rpython.rlib.objectmodel import specialize, r_dict, compute_identity_hash from rpython.rlib.rarithmetic import ovfcheck, intmask -from rpython.rlib import jit +from rpython.rlib import jit, rweakref from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform @@ -23,27 +23,12 @@ class UniqueCache: def __init__(self, space): - self.ctvoid = None # There can be only one - self.ctvoidp = None # Cache for self.pointers[self.ctvoid] - self.ctchara = None # Cache for self.arrays[charp, -1] - self.primitives = {} # Keys: name - self.pointers = {} # Keys: base_ctype - self.arrays = {} # Keys: (ptr_ctype, length_or_-1) - self.functions = r_dict(# Keys: (fargs, w_fresult, ellipsis) - _func_key_eq, _func_key_hash) - -def _func_key_eq((fargs1, w_fresult1, ellipsis1), - (fargs2, w_fresult2, ellipsis2)): - return (fargs1 == fargs2 and # list equality here - w_fresult1 is w_fresult2 and - ellipsis1 == ellipsis2) - -def _func_key_hash((fargs, w_fresult, ellipsis)): - x = compute_identity_hash(w_fresult) ^ ellipsis - for w_arg in fargs: - y = compute_identity_hash(w_arg) - x = intmask((1000003 * x) ^ y) - return x + self.ctvoid = None # Cache for the 'void' type + self.ctvoidp = None # Cache for the 'void *' type + self.ctchara = None # Cache for the 'char[]' type + self.primitives = {} # Cache for {name: primitive_type} + self.functions = [] # see _new_function_type() + self.for_testing = False def _clean_cache(space): "NOT_RPYTHON" @@ -165,20 +150,24 @@ # ____________________________________________________________ + at specialize.memo() +def _setup_wref(has_weakref_support): + assert has_weakref_support, "_cffi_backend requires weakrefs" + ctypeobj.W_CType._pointer_type = rweakref.dead_ref + ctypeptr.W_CTypePointer._array_types = None + @unwrap_spec(w_ctype=ctypeobj.W_CType) def new_pointer_type(space, w_ctype): return _new_pointer_type(space, w_ctype) @jit.elidable def _new_pointer_type(space, w_ctype): - unique_cache = space.fromcache(UniqueCache) - try: - return unique_cache.pointers[w_ctype] - except KeyError: - pass - ctypepointer = ctypeptr.W_CTypePointer(space, w_ctype) - unique_cache.pointers[w_ctype] = ctypepointer - return ctypepointer + _setup_wref(rweakref.has_weakref_support()) + ctptr = w_ctype._pointer_type() + if ctptr is None: + ctptr = ctypeptr.W_CTypePointer(space, w_ctype) + w_ctype._pointer_type = rweakref.ref(ctptr) + return ctptr # ____________________________________________________________ @@ -195,16 +184,19 @@ @jit.elidable def _new_array_type(space, w_ctptr, length): - unique_cache = space.fromcache(UniqueCache) - unique_key = (w_ctptr, length) - try: - return unique_cache.arrays[unique_key] - except KeyError: - pass - # + _setup_wref(rweakref.has_weakref_support()) if not isinstance(w_ctptr, ctypeptr.W_CTypePointer): raise OperationError(space.w_TypeError, space.wrap("first arg must be a pointer ctype")) + arrays = w_ctptr._array_types + if arrays is None: + arrays = rweakref.RWeakValueDictionary(int, ctypearray.W_CTypeArray) + w_ctptr._array_types = arrays + else: + ctype = arrays.get(length) + if ctype is not None: + return ctype + # ctitem = w_ctptr.ctitem if ctitem.size < 0: raise oefmt(space.w_ValueError, "array item of unknown size: '%s'", @@ -222,7 +214,7 @@ extra = '[%d]' % length # ctype = ctypearray.W_CTypeArray(space, w_ctptr, length, arraysize, extra) - unique_cache.arrays[unique_key] = ctype + arrays.set(length, ctype) return ctype # ____________________________________________________________ @@ -612,29 +604,69 @@ fargs.append(w_farg) return _new_function_type(space, fargs, w_fresult, bool(ellipsis)) +def _func_key_hash(unique_cache, fargs, fresult, ellipsis): + x = compute_identity_hash(fresult) + for w_arg in fargs: + y = compute_identity_hash(w_arg) + x = intmask((1000003 * x) ^ y) + x ^= ellipsis + if unique_cache.for_testing: # constant-folded to False in translation; + x &= 3 # but for test, keep only 2 bits of hash + return x + # can't use @jit.elidable here, because it might call back to random # space functions via force_lazy_struct() -def _new_function_type(space, fargs, w_fresult, ellipsis=False): +def _new_function_type(space, fargs, fresult, ellipsis=False): + try: + return _get_function_type(space, fargs, fresult, ellipsis) + except KeyError: + return _build_function_type(space, fargs, fresult, ellipsis) + + at jit.elidable +def _get_function_type(space, fargs, fresult, ellipsis): + # This function is elidable because if called again with exactly the + # same arguments (and if it didn't raise KeyError), it would give + # the same result, at least as long as this result is still live. + # + # 'unique_cache.functions' is a list of weak dicts, each mapping + # the func_hash number to a W_CTypeFunc. There is normally only + # one such dict, but in case of hash collision, there might be + # more. + unique_cache = space.fromcache(UniqueCache) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + for weakdict in unique_cache.functions: + ctype = weakdict.get(func_hash) + if (ctype is not None and + ctype.ctitem is fresult and + ctype.fargs == fargs and + ctype.ellipsis == ellipsis): + return ctype + raise KeyError + + at jit.dont_look_inside +def _build_function_type(space, fargs, fresult, ellipsis): from pypy.module._cffi_backend import ctypefunc # - unique_cache = space.fromcache(UniqueCache) - unique_key = (fargs, w_fresult, ellipsis) - try: - return unique_cache.functions[unique_key] - except KeyError: - pass - # - if ((w_fresult.size < 0 and - not isinstance(w_fresult, ctypevoid.W_CTypeVoid)) - or isinstance(w_fresult, ctypearray.W_CTypeArray)): - if (isinstance(w_fresult, ctypestruct.W_CTypeStructOrUnion) and - w_fresult.size < 0): + if ((fresult.size < 0 and + not isinstance(fresult, ctypevoid.W_CTypeVoid)) + or isinstance(fresult, ctypearray.W_CTypeArray)): + if (isinstance(fresult, ctypestruct.W_CTypeStructOrUnion) and + fresult.size < 0): raise oefmt(space.w_TypeError, - "result type '%s' is opaque", w_fresult.name) + "result type '%s' is opaque", fresult.name) else: raise oefmt(space.w_TypeError, - "invalid result type: '%s'", w_fresult.name) + "invalid result type: '%s'", fresult.name) # - fct = ctypefunc.W_CTypeFunc(space, fargs, w_fresult, ellipsis) - unique_cache.functions[unique_key] = fct + fct = ctypefunc.W_CTypeFunc(space, fargs, fresult, ellipsis) + unique_cache = space.fromcache(UniqueCache) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + for weakdict in unique_cache.functions: + if weakdict.get(func_hash) is None: + weakdict.set(func_hash, fct) + break + else: + weakdict = rweakref.RWeakValueDictionary(int, ctypefunc.W_CTypeFunc) + unique_cache.functions.append(weakdict) + weakdict.set(func_hash, fct) return fct diff --git a/pypy/module/_cffi_backend/test/test_c.py b/pypy/module/_cffi_backend/test/test_c.py --- a/pypy/module/_cffi_backend/test/test_c.py +++ b/pypy/module/_cffi_backend/test/test_c.py @@ -22,7 +22,7 @@ from rpython.tool.udir import udir from pypy.interpreter import gateway from pypy.module._cffi_backend import Module -from pypy.module._cffi_backend.newtype import _clean_cache +from pypy.module._cffi_backend.newtype import _clean_cache, UniqueCache from rpython.translator import cdir from rpython.translator.platform import host from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -86,8 +86,10 @@ _all_test_c.find_and_load_library = func _all_test_c._testfunc = testfunc """) + UniqueCache.for_testing = True def teardown_method(self, method): + UniqueCache.for_testing = False _clean_cache(self.space) diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -34,6 +34,7 @@ i += 1 _, size = struct.unpack("ll", s[i:i + 2 * WORD]) i += 2 * WORD + size * struct.calcsize("P") + i += WORD # thread id elif s[i] == '\x02': i += 1 _, size = struct.unpack("ll", s[i:i + 2 * WORD]) diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -844,6 +844,18 @@ b.byteswap() assert a != b + def test_unicode_ord_positive(self): + import sys + if sys.maxunicode == 0xffff: + skip("test for 32-bit unicodes") + a = self.array('u', '\xff\xff\xff\xff') + assert len(a) == 1 + assert repr(a[0]) == "u'\Uffffffff'" + if sys.maxint == 2147483647: + assert ord(a[0]) == -1 + else: + assert ord(a[0]) == 4294967295 + def test_weakref(self): import weakref a = self.array('c', 'Hi!') diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -117,12 +117,14 @@ return W_NDimArray(impl) @staticmethod - def new_slice(space, offset, strides, backstrides, shape, parent, orig_arr, dtype=None): + def new_slice(space, offset, strides, backstrides, shape, parent, w_arr, dtype=None): from pypy.module.micronumpy import concrete - + w_base = w_arr + if w_arr.implementation.base() is not None: + w_base = w_arr.implementation.base() impl = concrete.SliceArray(offset, strides, backstrides, shape, parent, - orig_arr, dtype) - return wrap_impl(space, space.type(orig_arr), orig_arr, impl) + w_base, dtype) + return wrap_impl(space, space.type(w_arr), w_arr, impl) @staticmethod def new_scalar(space, dtype, w_val=None): diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -1,6 +1,7 @@ from pypy.interpreter.error import OperationError, oefmt from rpython.rlib import jit, rgc from rpython.rlib.rarithmetic import ovfcheck +from rpython.rlib.listsort import make_timsort_class from rpython.rlib.buffer import Buffer from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage, \ @@ -17,6 +18,19 @@ is_f_contiguous) from rpython.rlib.objectmodel import keepalive_until_here +TimSort = make_timsort_class() +class StrideSort(TimSort): + ''' + argsort (return the indices to sort) a list of strides + ''' + def __init__(self, rangelist, strides): + self.strides = strides + TimSort.__init__(self, rangelist) + + def lt(self, a, b): + return self.strides[a] < self.strides[b] + + class BaseConcreteArray(object): _immutable_fields_ = ['dtype?', 'storage', 'start', 'size', 'shape[*]', 'strides[*]', 'backstrides[*]', 'order', 'gcstruct', @@ -354,12 +368,15 @@ elif order != self.order: t_strides, backstrides = calc_strides(shape, dtype, order) else: - mins = strides[0] + indx_array = range(len(strides)) + list_sorter = StrideSort(indx_array, strides) + list_sorter.sort() t_elsize = dtype.elsize - for s in strides: - if s < mins: - mins = s - t_strides = [s * t_elsize / mins for s in strides] + t_strides = strides[:] + base = dtype.elsize + for i in indx_array: + t_strides[i] = base + base *= shape[i] backstrides = calc_backstrides(t_strides, shape) impl = ConcreteArray(shape, dtype, order, t_strides, backstrides) loop.setslice(space, impl.get_shape(), impl, self) diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -3,10 +3,13 @@ from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi + from pypy.module.micronumpy import descriptor, loop, support from pypy.module.micronumpy.base import ( W_NDimArray, convert_to_array, W_NumpyObject) from pypy.module.micronumpy.converters import shape_converter +from . import constants as NPY +from .casting import scalar2dtype def build_scalar(space, w_dtype, w_state): @@ -82,7 +85,6 @@ return w_res def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False): - from pypy.module.micronumpy import strides # for anything that isn't already an array, try __array__ method first if not isinstance(w_object, W_NDimArray): @@ -133,7 +135,9 @@ return w_arr else: imp = w_object.implementation - w_base = imp.base() or w_object + w_base = w_object + if imp.base() is not None: + w_base = imp.base() with imp as storage: sz = support.product(w_object.get_shape()) * dtype.elsize return W_NDimArray.from_shape_and_storage(space, @@ -141,16 +145,11 @@ w_base=w_base, start=imp.start) else: # not an array - shape, elems_w = strides.find_shape_and_elems(space, w_object, dtype) + shape, elems_w = find_shape_and_elems(space, w_object, dtype) if dtype is None and space.isinstance_w(w_object, space.w_buffer): dtype = descriptor.get_dtype_cache(space).w_uint8dtype if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): dtype = find_dtype_for_seq(space, elems_w, dtype) - if dtype is None: - dtype = descriptor.get_dtype_cache(space).w_float64dtype - elif dtype.is_str_or_unicode() and dtype.elsize < 1: - # promote S0 -> S1, U0 -> U1 - dtype = descriptor.variable_dtype(space, dtype.char + '1') w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) if support.product(shape) == 1: # safe from overflow since from_shape checks @@ -163,7 +162,6 @@ def numpify(space, w_object): """Convert the object to a W_NumpyObject""" # XXX: code duplication with _array() - from pypy.module.micronumpy import strides if isinstance(w_object, W_NumpyObject): return w_object # for anything that isn't already an array, try __array__ method first @@ -171,20 +169,82 @@ if w_array is not None: return w_array - shape, elems_w = strides.find_shape_and_elems(space, w_object, None) + if is_scalar_like(space, w_object, dtype=None): + dtype = scalar2dtype(space, w_object) + if dtype.is_str_or_unicode() and dtype.elsize < 1: + # promote S0 -> S1, U0 -> U1 + dtype = descriptor.variable_dtype(space, dtype.char + '1') + return dtype.coerce(space, w_object) + + shape, elems_w = _find_shape_and_elems(space, w_object) dtype = find_dtype_for_seq(space, elems_w, None) - if dtype is None: - dtype = descriptor.get_dtype_cache(space).w_float64dtype - elif dtype.is_str_or_unicode() and dtype.elsize < 1: - # promote S0 -> S1, U0 -> U1 - dtype = descriptor.variable_dtype(space, dtype.char + '1') + w_arr = W_NDimArray.from_shape(space, shape, dtype) + loop.assign(space, w_arr, elems_w) + return w_arr - if len(elems_w) == 1: - return dtype.coerce(space, elems_w[0]) + +def find_shape_and_elems(space, w_iterable, dtype): + if is_scalar_like(space, w_iterable, dtype): + return [], [w_iterable] + is_rec_type = dtype is not None and dtype.is_record() + return _find_shape_and_elems(space, w_iterable, is_rec_type) + +def is_scalar_like(space, w_obj, dtype): + isstr = space.isinstance_w(w_obj, space.w_str) + if not support.issequence_w(space, w_obj) or isstr: + if dtype is None or dtype.char != NPY.CHARLTR: + return True + is_rec_type = dtype is not None and dtype.is_record() + if is_rec_type and is_single_elem(space, w_obj, is_rec_type): + return True + if isinstance(w_obj, W_NDimArray) and w_obj.is_scalar(): + return True + return False + +def _find_shape_and_elems(space, w_iterable, is_rec_type=False): + from pypy.objspace.std.bufferobject import W_Buffer + shape = [space.len_w(w_iterable)] + if space.isinstance_w(w_iterable, space.w_buffer): + batch = [space.wrap(0)] * shape[0] + for i in range(shape[0]): + batch[i] = space.ord(space.getitem(w_iterable, space.wrap(i))) else: - w_arr = W_NDimArray.from_shape(space, shape, dtype) - loop.assign(space, w_arr, elems_w) - return w_arr + batch = space.listview(w_iterable) + while True: + if not batch: + return shape[:], [] + if is_single_elem(space, batch[0], is_rec_type): + for w_elem in batch: + if not is_single_elem(space, w_elem, is_rec_type): + raise OperationError(space.w_ValueError, space.wrap( + "setting an array element with a sequence")) + return shape[:], batch + new_batch = [] + size = space.len_w(batch[0]) + for w_elem in batch: + if (is_single_elem(space, w_elem, is_rec_type) or + space.len_w(w_elem) != size): + raise OperationError(space.w_ValueError, space.wrap( + "setting an array element with a sequence")) + w_array = space.lookup(w_elem, '__array__') + if w_array is not None: + # Make sure we call the array implementation of listview, + # since for some ndarray subclasses (matrix, for instance) + # listview does not reduce but rather returns the same class + w_elem = space.get_and_call_function(w_array, w_elem, space.w_None) + new_batch += space.listview(w_elem) + shape.append(size) + batch = new_batch + +def is_single_elem(space, w_elem, is_rec_type): + if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): + return True + if (space.isinstance_w(w_elem, space.w_tuple) or + space.isinstance_w(w_elem, space.w_list)): + return False + if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar(): + return False + return True def _dtype_guess(space, dtype, w_elem): from .casting import scalar2dtype, find_binop_result_dtype @@ -199,6 +259,11 @@ return _dtype_guess(space, dtype, w_elem) for w_elem in elems_w: dtype = _dtype_guess(space, dtype, w_elem) + if dtype is None: + dtype = descriptor.get_dtype_cache(space).w_float64dtype + elif dtype.is_str_or_unicode() and dtype.elsize < 1: + # promote S0 -> S1, U0 -> U1 + dtype = descriptor.variable_dtype(space, dtype.char + '1') return dtype diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -747,8 +747,12 @@ return out def descr_get_ctypes(self, space): - raise OperationError(space.w_NotImplementedError, space.wrap( - "ctypes not implemented yet")) + w_result = space.appexec([self], """(arr): + from numpy.core import _internal + p_data = arr.__array_interface__['data'][0] + return _internal._ctypes(arr, p_data) + """) + return w_result def buffer_w(self, space, flags): return self.implementation.get_buffer(space, True) @@ -858,6 +862,8 @@ v = convert_to_array(space, w_v) ret = W_NDimArray.from_shape( space, v.get_shape(), get_dtype_cache(space).w_longdtype) + if ret.get_size() < 1: + return ret if side == NPY.SEARCHLEFT: binsearch = loop.binsearch_left else: @@ -1304,6 +1310,9 @@ [space.wrap(0)]), space.wrap("b")]) builder = StringBuilder() + if self.get_dtype().is_object(): + raise oefmt(space.w_NotImplementedError, + "reduce for 'object' dtype not supported yet") if isinstance(self.implementation, SliceArray): iter, state = self.implementation.create_iter() while not iter.done(state): diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -189,67 +189,6 @@ return rstrides, rbackstrides -def is_single_elem(space, w_elem, is_rec_type): - if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): - return True - if (space.isinstance_w(w_elem, space.w_tuple) or - space.isinstance_w(w_elem, space.w_list)): - return False - if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar(): - return False - return True - - -def find_shape_and_elems(space, w_iterable, dtype): - isstr = space.isinstance_w(w_iterable, space.w_str) - if not support.issequence_w(space, w_iterable) or isstr: - if dtype is None or dtype.char != NPY.CHARLTR: - return [], [w_iterable] - is_rec_type = dtype is not None and dtype.is_record() - if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): - return [], [w_iterable] - if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar(): - return [], [w_iterable] - return _find_shape_and_elems(space, w_iterable, is_rec_type) - - -def _find_shape_and_elems(space, w_iterable, is_rec_type): - from pypy.objspace.std.bufferobject import W_Buffer - shape = [space.len_w(w_iterable)] - if space.isinstance_w(w_iterable, space.w_buffer): - batch = [space.wrap(0)] * shape[0] - for i in range(shape[0]): - batch[i] = space.ord(space.getitem(w_iterable, space.wrap(i))) - else: - batch = space.listview(w_iterable) - while True: - if not batch: - return shape[:], [] - if is_single_elem(space, batch[0], is_rec_type): - for w_elem in batch: - if not is_single_elem(space, w_elem, is_rec_type): - raise OperationError(space.w_ValueError, space.wrap( - "setting an array element with a sequence")) - return shape[:], batch - new_batch = [] - size = space.len_w(batch[0]) - for w_elem in batch: - if (is_single_elem(space, w_elem, is_rec_type) or - space.len_w(w_elem) != size): - raise OperationError(space.w_ValueError, space.wrap( - "setting an array element with a sequence")) - w_array = space.lookup(w_elem, '__array__') - if w_array is not None: - # Make sure we call the array implementation of listview, - # since for some ndarray subclasses (matrix, for instance) - # listview does not reduce but rather returns the same class - w_elem = space.get_and_call_function(w_array, w_elem, space.w_None) - new_batch += space.listview(w_elem) - shape.append(size) - batch = new_batch - - - @jit.unroll_safe def shape_agreement(space, shape1, w_arr2, broadcast_down=True): if w_arr2 is None: diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -39,7 +39,10 @@ def product_check(s): i = 1 for x in s: - i = ovfcheck(i * x) + try: + i = ovfcheck(i * x) + except OverflowError: + raise return i def check_and_adjust_index(space, index, size, axis): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -169,7 +169,7 @@ [1, 1, 1, 105, 105] def test_find_shape(self): - from pypy.module.micronumpy.strides import find_shape_and_elems + from pypy.module.micronumpy.ctors import find_shape_and_elems space = self.space shape, elems = find_shape_and_elems(space, @@ -2218,7 +2218,7 @@ assert _weakref.ref(a) def test_astype(self): - from numpy import array, arange + from numpy import array, arange, empty b = array(1).astype(float) assert b == 1 assert b.dtype == float @@ -2273,14 +2273,36 @@ b = a.astype('f4', order='C', copy=False) assert a is b + a = empty([3, 3, 3, 3], 'uint8') + a[:] = 0 + b = a[2] + c = b[:, :2, :] + d = c.swapaxes(1, -1) + e = d.astype('complex128') + assert e.shape == (3, 3, 2) + assert e.strides == (96, 16, 48) + assert (e.real == d).all() + def test_base(self): - from numpy import array + from numpy import array, empty assert array(1).base is None assert array([1, 2]).base is None a = array([1, 2, 3, 4]) b = a[::2] assert b.base is a + a = empty([3, 3, 3, 3], 'uint8') + a[:] = 0 + b = a[2] + assert b.base.base is None + c = b[:, :2, :] + d = c.swapaxes(1, -1) + assert c.base.base is None + assert d.base.base is None + assert d.shape == (3, 3, 2) + assert d.__array_interface__['data'][0] == \ + a.__array_interface__['data'][0] + a.strides[0] * 2 + def test_byteswap(self): from numpy import array @@ -2453,6 +2475,18 @@ a.fill(12) assert (a == u'1').all() + def test_unicode_record_array(self) : + from numpy import dtype, array + t = dtype([('a', 'S3'), ('b', 'U2')]) + x = array([('a', u'b')], dtype=t) + assert str(x) == "[('a', u'b')]" + + t = dtype([('a', 'U3'), ('b', 'S2')]) + x = array([(u'a', 'b')], dtype=t) + x['a'] = u'1' + assert str(x) == "[(u'1', 'b')]" + + def test_boolean_indexing(self): import numpy as np a = np.zeros((1, 3)) @@ -2497,10 +2531,10 @@ assert b.shape == b[...].shape assert (b == b[...]).all() - a = np.arange(6).reshape(2, 3) + a = np.arange(6) if '__pypy__' in sys.builtin_module_names: raises(ValueError, "a[..., ...]") - b = a [..., 0] + b = a.reshape(2, 3)[..., 0] assert (b == [0, 3]).all() assert b.base is a @@ -2675,7 +2709,7 @@ "input array from shape (3,1) into shape (3)" a[:, 1] = b[:,0] > 0.5 assert (a == [[0, 1], [0, 1], [0, 1]]).all() - + def test_ufunc(self): from numpy import array @@ -3834,7 +3868,7 @@ assert a[0]['y'] == 2 assert a[1]['y'] == 1 - + a = array([(1, [])], dtype=[('a', int32), ('b', int32, 0)]) assert a['b'].shape == (1, 0) b = loads(dumps(a)) diff --git a/pypy/module/micronumpy/test/test_object_arrays.py b/pypy/module/micronumpy/test/test_object_arrays.py --- a/pypy/module/micronumpy/test/test_object_arrays.py +++ b/pypy/module/micronumpy/test/test_object_arrays.py @@ -3,6 +3,8 @@ class AppTestObjectDtypes(BaseNumpyAppTest): + spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) + def setup_class(cls): BaseNumpyAppTest.setup_class.im_func(cls) cls.w_runappdirect = cls.space.wrap(option.runappdirect) @@ -187,3 +189,21 @@ assert b.shape == (1,) assert b.dtype == np.float_ assert (b == 1.0).all() + + + def test__reduce__(self): + from numpy import arange, dtype + from cPickle import loads, dumps + import sys + + a = arange(15).astype(object) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, dumps, a) + skip('not implemented yet') + b = loads(dumps(a)) + assert (a == b).all() + + a = arange(15).astype(object).reshape((3, 5)) + b = loads(dumps(a)) + assert (a == b).all() + diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -480,3 +480,9 @@ u = unicode_(u'Aÿ') # raises(UnicodeEncodeError, "str(u)") # XXX assert repr(u) == repr(u'Aÿ') + + def test_binop_with_sequence(self): + import numpy as np + c = np.float64(1.) + [1.] + assert isinstance(c, np.ndarray) + assert (c == [2.]).all() diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -2231,9 +2231,9 @@ index = i + offset + 4*k data = rffi.cast(Int32.T, ord(box._value[k])) raw_storage_setitem_unaligned(storage, index, data) - for k in range(size, width // 4): - index = i + offset + 4*k - data = rffi.cast(Int32.T, 0) + # zero out the remaining memory + for index in range(size * 4 + i + offset, width): + data = rffi.cast(Int8.T, 0) raw_storage_setitem_unaligned(storage, index, data) def read(self, arr, i, offset, dtype): diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -479,6 +479,7 @@ dt_in, dt_out = self._calc_dtype(space, dtype, out, casting) return dt_in, dt_out, self.func + @jit.unroll_safe def _calc_dtype(self, space, arg_dtype, out=None, casting='unsafe'): if arg_dtype.is_object(): return arg_dtype, arg_dtype @@ -672,6 +673,7 @@ "requested type has type code '%s'" % (self.name, dtype.char)) + @jit.unroll_safe def _calc_dtype(self, space, l_dtype, r_dtype, out, casting, w_arg1, w_arg2): if l_dtype.is_object() or r_dtype.is_object(): diff --git a/pypy/module/pypyjit/__init__.py b/pypy/module/pypyjit/__init__.py --- a/pypy/module/pypyjit/__init__.py +++ b/pypy/module/pypyjit/__init__.py @@ -15,8 +15,12 @@ 'set_compile_hook': 'interp_resop.set_compile_hook', 'set_abort_hook': 'interp_resop.set_abort_hook', 'get_stats_snapshot': 'interp_resop.get_stats_snapshot', - 'enable_debug': 'interp_resop.enable_debug', - 'disable_debug': 'interp_resop.disable_debug', + # those things are disabled because they have bugs, but if + # they're found to be useful, fix test_ztranslation_jit_stats + # in the backend first. get_stats_snapshot still produces + # correct loop_runs if PYPYLOG is correct + #'enable_debug': 'interp_resop.enable_debug', + #'disable_debug': 'interp_resop.disable_debug', 'ResOperation': 'interp_resop.WrappedOp', 'DebugMergePoint': 'interp_resop.DebugMergePoint', 'JitLoopInfo': 'interp_resop.W_JitLoopInfo', diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -315,11 +315,12 @@ """ ll_times = jit_hooks.stats_get_loop_run_times(None) w_times = space.newdict() - for i in range(len(ll_times)): - w_key = space.newtuple([space.wrap(ll_times[i].type), - space.wrap(ll_times[i].number)]) - space.setitem(w_times, w_key, - space.wrap(ll_times[i].counter)) + if ll_times: + for i in range(len(ll_times)): + w_key = space.newtuple([space.wrap(ll_times[i].type), + space.wrap(ll_times[i].number)]) + space.setitem(w_times, w_key, + space.wrap(ll_times[i].counter)) w_counters = space.newdict() for i, counter_name in enumerate(Counters.counter_names): v = jit_hooks.stats_get_counter_value(None, i) diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -213,22 +213,6 @@ self.on_abort() assert l == [('pypyjit', 'ABORT_TOO_LONG', [])] - def test_on_optimize(self): - import pypyjit - l = [] - - def hook(info): - l.append(info.jitdriver_name) - - def optimize_hook(info): - return [] - - pypyjit.set_compile_hook(hook) - pypyjit.set_optimize_hook(optimize_hook) - self.on_optimize() - self.on_compile() - assert l == ['pypyjit'] - def test_creation(self): from pypyjit import ResOperation diff --git a/pypy/module/pypyjit/test_pypy_c/test_alloc.py b/pypy/module/pypyjit/test_pypy_c/test_alloc.py --- a/pypy/module/pypyjit/test_pypy_c/test_alloc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_alloc.py @@ -7,10 +7,11 @@ [2 ** n - 1 for n in range(26)]) def test_newstr_constant_size(self): - for size in TestAlloc.SIZES: + for size in sorted(TestAlloc.SIZES): yield self.newstr_constant_size, size def newstr_constant_size(self, size): + print 'size =', size src = """if 1: N = %(size)d part_a = 'a' * N diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -28,7 +28,7 @@ def test_struct_unpack(self): def main(n): - import struct + import _struct as struct import array a = array.array('c', struct.pack('i', 42)) i = 0 diff --git a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py --- a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py +++ b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py @@ -76,6 +76,6 @@ assert len(mod_bridges) in (1, 2, 3) # check that counts are reasonable (precise # may change in the future) - assert N - 2000 < sum(l.count for l in fn_with_bridges_loops) < N + 1000 + assert N - 2000 < sum(l.count for l in fn_with_bridges_loops) < N + 1500 diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -248,3 +248,42 @@ guard_false(i157, descr=...) jump(..., descr=...) """) + + def test_mixed_div(self): + N = 1500 + def main(): + N = 1500 + import _numpypy.multiarray as np + arr = np.zeros(N) + l = [arr[i]/2. for i in range(N)] + return l + log = self.run(main, []) + assert log.result == [0.] * N + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i92 = int_ge(i91, i37) + guard_false(i92, descr=...) + i93 = int_add(i91, 1) + setfield_gc(p23, i93, descr=) + i94 = int_ge(i91, i56) + guard_false(i94, descr=...) + i96 = int_mul(i91, i58) From noreply at buildbot.pypy.org Wed Oct 7 08:54:11 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Oct 2015 08:54:11 +0200 (CEST) Subject: [pypy-commit] cffi default: Document __stdcall. Message-ID: <20151007065411.CA04B1C11B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2327:35fb1f6a0eac Date: 2015-10-07 08:54 +0200 http://bitbucket.org/cffi/cffi/changeset/35fb1f6a0eac/ Log: Document __stdcall. diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -494,11 +494,6 @@ return 0 lib.python_callback = python_callback -Windows: you can't yet specify the calling convention of callbacks. -(For regular calls, the correct calling convention should be -automatically inferred by the C backend.) Use an indirection, like -in the example just above. - Be careful when writing the Python callback function: if it returns an object of the wrong type, or more generally raises an exception, then the exception cannot be propagated. Instead, it is printed to stderr @@ -547,6 +542,47 @@ that frame by reading ``traceback.tb_frame.f_locals['argname']``. +Windows: calling conventions +---------------------------- + +On Win32, functions can have two main calling conventions: either +"cdecl" (the default), or "stdcall" (also known as "WINAPI"). There +are also other, rare calling conventions; these are not supported. + +When you issue calls from Python to C, the implementation is such that +it works with any of these two main calling conventions; you don't +have to specify it. However, if you manipulate variables of type +"function pointer" or declare callbacks, then the calling convention +must be correct. This is done by writing ``__cdecl`` or ``__stdcall`` +in the type, like in C:: + + @ffi.callback("int __stdcall(int, int)") + def AddNumbers(x, y): + return x + y + +or:: + + ffi.cdef(""" + struct foo_s { + int (__stdcall *MyFuncPtr)(int, int); + }; + """) + +``__cdecl`` is supported but is always the default so it can be left +out. In the ``cdef()``, you can also use ``WINAPI`` as equivalent to +``__stdcall``. As mentioned above, it is not needed (but doesn't +hurt) to say ``WINAPI`` or ``__stdcall`` when declaring a plain +function in the ``cdef()``. + +These calling convention specifiers are accepted but ignored on any +platform other than 32-bit Windows. + +*New in version 1.3:* the calling convention specifiers are not +recognized in previous versions. In API mode, you could work around +it by using an indirection, like in the example in the section about +Callbacks_. There was no way to use stdcall callbacks in ABI mode. + + FFI Interface ------------- diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -19,7 +19,12 @@ It also fixes corner cases like ``typedef const int T; T a;`` which would previously not consider ``a`` as a constant. -* XXX __stdcall +* Win32: support for ``__stdcall``. For callbacks and function + pointers; regular C functions don't need to have their `calling + convention`_ declared. + + +.. _`calling convention`: using.html#windows-calling-conventions v1.2.1 From noreply at buildbot.pypy.org Wed Oct 7 09:15:34 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Oct 2015 09:15:34 +0200 (CEST) Subject: [pypy-commit] cffi default: add a direct test Message-ID: <20151007071534.DE6351C069F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2328:7a99e5f6dada Date: 2015-10-07 09:16 +0200 http://bitbucket.org/cffi/cffi/changeset/7a99e5f6dada/ Log: add a direct test diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -2327,9 +2327,6 @@ f(); f() assert get_errno() == 77 -def test_abi(): - assert isinstance(FFI_DEFAULT_ABI, int) - def test_cast_to_array(): # not valid in C! extension to get a non-owning BInt = new_primitive_type("int") @@ -3438,3 +3435,16 @@ "be 'foo *', but the types are different (check " "that you are not e.g. mixing up different ffi " "instances)") + +def test_stdcall_function_type(): + assert FFI_CDECL == FFI_DEFAULT_ABI + try: + stdcall = FFI_STDCALL + except NameError: + stdcall = FFI_DEFAULT_ABI + BInt = new_primitive_type("int") + BFunc = new_function_type((BInt, BInt), BInt, False, stdcall) + if stdcall != FFI_DEFAULT_ABI: + assert repr(BFunc) == "" + else: + assert repr(BFunc) == "" From noreply at buildbot.pypy.org Wed Oct 7 09:43:11 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Oct 2015 09:43:11 +0200 (CEST) Subject: [pypy-commit] pypy cffi-stdcall: A branch to implement win32-stdcall from cffi Message-ID: <20151007074311.7A0281C0726@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-stdcall Changeset: r80009:43e556124e1a Date: 2015-10-07 09:00 +0200 http://bitbucket.org/pypy/pypy/changeset/43e556124e1a/ Log: A branch to implement win32-stdcall from cffi From noreply at buildbot.pypy.org Wed Oct 7 09:43:13 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Oct 2015 09:43:13 +0200 (CEST) Subject: [pypy-commit] pypy cffi-stdcall: import cffi/7a99e5f6dada Message-ID: <20151007074313.97C631C0726@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-stdcall Changeset: r80010:cb35f33376b6 Date: 2015-10-07 09:19 +0200 http://bitbucket.org/pypy/pypy/changeset/cb35f33376b6/ Log: import cffi/7a99e5f6dada diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -26,6 +26,9 @@ _r_words = re.compile(r"\w+|\S") _parser_cache = None _r_int_literal = re.compile(r"-?0?x?[0-9a-f]+[lu]*$", re.IGNORECASE) +_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") +_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") +_r_cdecl = re.compile(r"\b__cdecl\b") def _get_parser(): global _parser_cache @@ -44,6 +47,14 @@ macrovalue = macrovalue.replace('\\\n', '').strip() macros[macroname] = macrovalue csource = _r_define.sub('', csource) + # BIG HACK: replace WINAPI or __stdcall with "volatile const". + # It doesn't make sense for the return type of a function to be + # "volatile volatile const", so we abuse it to detect __stdcall... + # Hack number 2 is that "int(volatile *fptr)();" is not valid C + # syntax, so we place the "volatile" before the opening parenthesis. + csource = _r_stdcall2.sub(' volatile volatile const(', csource) + csource = _r_stdcall1.sub(' volatile volatile const ', csource) + csource = _r_cdecl.sub(' ', csource) # Replace "[...]" with "[__dotdotdotarray__]" csource = _r_partial_array.sub('[__dotdotdotarray__]', csource) # Replace "...}" with "__dotdotdotNUM__}". This construction should @@ -449,7 +460,14 @@ if not ellipsis and args == [model.void_type]: args = [] result, quals = self._get_type_and_quals(typenode.type) - return model.RawFunctionType(tuple(args), result, ellipsis) + # the 'quals' on the result type are ignored. HACK: we absure them + # to detect __stdcall functions: we textually replace "__stdcall" + # with "volatile volatile const" above. + abi = None + if hasattr(typenode.type, 'quals'): # else, probable syntax error anyway + if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']: + abi = '__stdcall' + return model.RawFunctionType(tuple(args), result, ellipsis, abi) def _as_func_arg(self, type, quals): if isinstance(type, model.ArrayType): diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,4 +1,4 @@ -import types +import types, sys import weakref from .lock import allocate_lock @@ -193,18 +193,21 @@ class BaseFunctionType(BaseType): - _attrs_ = ('args', 'result', 'ellipsis') + _attrs_ = ('args', 'result', 'ellipsis', 'abi') - def __init__(self, args, result, ellipsis): + def __init__(self, args, result, ellipsis, abi=None): self.args = args self.result = result self.ellipsis = ellipsis + self.abi = abi # reprargs = [arg._get_c_name() for arg in self.args] if self.ellipsis: reprargs.append('...') reprargs = reprargs or ['void'] replace_with = self._base_pattern % (', '.join(reprargs),) + if abi is not None: + replace_with = replace_with[:1] + abi + ' ' + replace_with[1:] self.c_name_with_marker = ( self.result.c_name_with_marker.replace('&', replace_with)) @@ -222,7 +225,7 @@ "type, not a pointer-to-function type" % (self,)) def as_function_pointer(self): - return FunctionPtrType(self.args, self.result, self.ellipsis) + return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi) class FunctionPtrType(BaseFunctionType): @@ -233,11 +236,18 @@ args = [] for tp in self.args: args.append(tp.get_cached_btype(ffi, finishlist)) + abi_args = () + if self.abi == "__stdcall": + if not self.ellipsis: # __stdcall ignored for variadic funcs + try: + abi_args = (ffi._backend.FFI_STDCALL,) + except AttributeError: + pass return global_cache(self, ffi, 'new_function_type', - tuple(args), result, self.ellipsis) + tuple(args), result, self.ellipsis, *abi_args) def as_raw_function(self): - return RawFunctionType(self.args, self.result, self.ellipsis) + return RawFunctionType(self.args, self.result, self.ellipsis, self.abi) class PointerType(BaseType): diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h --- a/lib_pypy/cffi/parse_c_type.h +++ b/lib_pypy/cffi/parse_c_type.h @@ -5,7 +5,7 @@ #define _CFFI_OP(opcode, arg) (_cffi_opcode_t)(opcode | (((uintptr_t)(arg)) << 8)) #define _CFFI_GETOP(cffi_opcode) ((unsigned char)(uintptr_t)cffi_opcode) -#define _CFFI_GETARG(cffi_opcode) (((uintptr_t)cffi_opcode) >> 8) +#define _CFFI_GETARG(cffi_opcode) (((intptr_t)cffi_opcode) >> 8) #define _CFFI_OP_PRIMITIVE 1 #define _CFFI_OP_POINTER 3 diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -607,7 +607,11 @@ call_arguments.append('x%d' % i) repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' - name_and_arguments = '_cffi_d_%s(%s)' % (name, repr_arguments) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments) prnt('static %s' % (tp.result.get_c_name(name_and_arguments),)) prnt('{') call_arguments = ', '.join(call_arguments) @@ -710,7 +714,8 @@ if difference: repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' - name_and_arguments = '_cffi_f_%s(%s)' % (name, repr_arguments) + name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name, + repr_arguments) prnt('static %s' % (tp_result.get_c_name(name_and_arguments),)) prnt('{') if result_decl: @@ -1135,7 +1140,13 @@ else: self.cffi_types[index] = CffiOp(OP_NOOP, realindex) index += 1 - self.cffi_types[index] = CffiOp(OP_FUNCTION_END, int(tp.ellipsis)) + flags = int(tp.ellipsis) + if tp.abi is not None: + if tp.abi == '__stdcall': + flags |= 2 + else: + raise NotImplementedError("abi=%r" % (tp.abi,)) + self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags) def _emit_bytecode_PointerType(self, tp, index): self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype]) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -159,7 +159,11 @@ arglist = ', '.join(arglist) or 'void' wrappername = '_cffi_f_%s' % name self.export_symbols.append(wrappername) - funcdecl = ' %s(%s)' % (wrappername, arglist) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + funcdecl = ' %s%s(%s)' % (abi, wrappername, arglist) context = 'result of %s' % name prnt(tpresult.get_c_name(funcdecl, context)) prnt('{') diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2316,9 +2316,6 @@ f(); f() assert get_errno() == 77 -def test_abi(): - assert isinstance(FFI_DEFAULT_ABI, int) - def test_cast_to_array(): # not valid in C! extension to get a non-owning BInt = new_primitive_type("int") @@ -3427,3 +3424,16 @@ "be 'foo *', but the types are different (check " "that you are not e.g. mixing up different ffi " "instances)") + +def test_stdcall_function_type(): + assert FFI_CDECL == FFI_DEFAULT_ABI + try: + stdcall = FFI_STDCALL + except NameError: + stdcall = FFI_DEFAULT_ABI + BInt = new_primitive_type("int") + BFunc = new_function_type((BInt, BInt), BInt, False, stdcall) + if stdcall != FFI_DEFAULT_ABI: + assert repr(BFunc) == "" + else: + assert repr(BFunc) == "" diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py @@ -1,6 +1,6 @@ # Generated by pypy/tool/import_cffi.py import py -from cffi import FFI +from cffi import FFI, CDefError import math, os, sys import ctypes.util from cffi.backend_ctypes import CTypesBackend @@ -428,3 +428,59 @@ res = m.QueryPerformanceFrequency(p_freq) assert res != 0 assert p_freq[0] != 0 + + def test_explicit_cdecl_stdcall(self): + if sys.platform != 'win32': + py.test.skip("Windows-only test") + if self.Backend is CTypesBackend: + py.test.skip("not with the ctypes backend") + # + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + BOOL QueryPerformanceFrequency(LONGLONG *lpFrequency); + """) + m = ffi.dlopen("Kernel32.dll") + tp = ffi.typeof(m.QueryPerformanceFrequency) + assert str(tp) == "" + # + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + BOOL __cdecl QueryPerformanceFrequency(LONGLONG *lpFrequency); + """) + m = ffi.dlopen("Kernel32.dll") + tpc = ffi.typeof(m.QueryPerformanceFrequency) + assert tpc is tp + # + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + BOOL WINAPI QueryPerformanceFrequency(LONGLONG *lpFrequency); + """) + m = ffi.dlopen("Kernel32.dll") + tps = ffi.typeof(m.QueryPerformanceFrequency) + assert tps is not tpc + assert str(tps) == "" + # + ffi = FFI(backend=self.Backend()) + ffi.cdef("typedef int (__cdecl *fnc_t)(int);") + ffi.cdef("typedef int (__stdcall *fns_t)(int);") + tpc = ffi.typeof("fnc_t") + tps = ffi.typeof("fns_t") + assert str(tpc) == "" + assert str(tps) == "" + # + fnc = ffi.cast("fnc_t", 0) + fns = ffi.cast("fns_t", 0) + ffi.new("fnc_t[]", [fnc]) + py.test.raises(TypeError, ffi.new, "fnc_t[]", [fns]) + py.test.raises(TypeError, ffi.new, "fns_t[]", [fnc]) + ffi.new("fns_t[]", [fns]) + + def test_stdcall_only_on_windows(self): + if sys.platform == 'win32': + py.test.skip("not-Windows-only test") + ffi = FFI(backend=self.Backend()) + ffi.cdef("double __stdcall sin(double x);") # stdcall ignored + m = ffi.dlopen(lib_m) + assert "double(*)(double)" in str(ffi.typeof(m.sin)) + x = m.sin(1.23) + assert x == math.sin(1.23) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py @@ -365,3 +365,17 @@ assert C.TWO == 2 assert C.NIL == 0 assert C.NEG == -1 + +def test_stdcall(): + ffi = FFI() + tp = ffi.typeof("int(*)(int __stdcall x(int)," + " long (__cdecl*y)(void)," + " short(WINAPI *z)(short))") + if sys.platform == 'win32': + stdcall = '__stdcall ' + else: + stdcall = '' + assert str(tp) == ( + "" % (stdcall, stdcall)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py @@ -1221,25 +1221,6 @@ lib = ffi.verify('#include ', libraries=lib_m) assert lib.sin(1.23) == math.sin(1.23) -def test_callback_calling_convention(): - py.test.skip("later") - if sys.platform != 'win32': - py.test.skip("Windows only") - ffi = FFI() - ffi.cdef(""" - int call1(int(*__cdecl cb)(int)); - int call2(int(*__stdcall cb)(int)); - """) - lib = ffi.verify(""" - int call1(int(*__cdecl cb)(int)) { - return cb(42) + 1; - } - int call2(int(*__stdcall cb)(int)) { - return cb(-42) - 6; - } - """) - xxx - def test_opaque_integer_as_function_result(): #import platform #if platform.machine().startswith('sparc'): @@ -2261,3 +2242,180 @@ assert foo_s.fields[0][1].type is ffi.typeof("int") assert foo_s.fields[1][0] == 'b' assert foo_s.fields[1][1].type is ffi.typeof("void *") + +def test_win32_calling_convention_0(): + ffi = FFI() + ffi.cdef(""" + int call1(int(__cdecl *cb)(int)); + int (*const call2)(int(__stdcall *cb)(int)); + """) + lib = ffi.verify(r""" + #ifndef _MSC_VER + # define __stdcall /* nothing */ + #endif + int call1(int(*cb)(int)) { + int i, result = 0; + //printf("call1: cb = %p\n", cb); + for (i = 0; i < 1000; i++) + result += cb(i); + //printf("result = %d\n", result); + return result; + } + int call2(int(__stdcall *cb)(int)) { + int i, result = 0; + //printf("call2: cb = %p\n", cb); + for (i = 0; i < 1000; i++) + result += cb(-i); + //printf("result = %d\n", result); + return result; + } + """) + @ffi.callback("int(int)") + def cb1(x): + return x * 2 + @ffi.callback("int __stdcall(int)") + def cb2(x): + return x * 3 + print 'cb1 =', cb1 + res = lib.call1(cb1) + assert res == 500*999*2 + print 'cb2 =', cb2 + print ffi.typeof(lib.call2) + print 'call2 =', lib.call2 + res = lib.call2(cb2) + print '...' + assert res == -500*999*3 + print 'done' + if sys.platform == 'win32': + assert '__stdcall' in str(ffi.typeof(cb2)) + assert '__stdcall' not in str(ffi.typeof(cb1)) + py.test.raises(TypeError, lib.call1, cb2) + py.test.raises(TypeError, lib.call2, cb1) + else: + assert '__stdcall' not in str(ffi.typeof(cb2)) + assert ffi.typeof(cb2) is ffi.typeof(cb1) + +def test_win32_calling_convention_1(): + ffi = FFI() + ffi.cdef(""" + int __cdecl call1(int(__cdecl *cb)(int)); + int __stdcall call2(int(__stdcall *cb)(int)); + int (__cdecl *const cb1)(int); + int (__stdcall *const cb2)(int); + """) + lib = ffi.verify(r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + int __cdecl cb1(int x) { return x * 2; } + int __stdcall cb2(int x) { return x * 3; } + + int __cdecl call1(int(__cdecl *cb)(int)) { + int i, result = 0; + //printf("here1\n"); + //printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + for (i = 0; i < 1000; i++) + result += cb(i); + //printf("result = %d\n", result); + return result; + } + int __stdcall call2(int(__stdcall *cb)(int)) { + int i, result = 0; + //printf("here1\n"); + //printf("cb = %p, cb2 = %p\n", cb, (void *)cb2); + for (i = 0; i < 1000; i++) + result += cb(-i); + //printf("result = %d\n", result); + return result; + } + """) + assert lib.call1(lib.cb1) == 500*999*2 + assert lib.call2(lib.cb2) == -500*999*3 + +def test_win32_calling_convention_2(): + # any mistake in the declaration of plain function (including the + # precise argument types and, here, the calling convention) are + # automatically corrected. But this does not apply to the 'cb' + # function pointer argument. + ffi = FFI() + ffi.cdef(""" + int __stdcall call1(int(__cdecl *cb)(int)); + int __cdecl call2(int(__stdcall *cb)(int)); + int (__cdecl *const cb1)(int); + int (__stdcall *const cb2)(int); + """) + lib = ffi.verify(r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + int __cdecl call1(int(__cdecl *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(i); + return result; + } + int __stdcall call2(int(__stdcall *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(-i); + return result; + } + int __cdecl cb1(int x) { return x * 2; } + int __stdcall cb2(int x) { return x * 3; } + """) + assert lib.call1(lib.cb1) == 500*999*2 + assert lib.call2(lib.cb2) == -500*999*3 + +def test_win32_calling_convention_3(): + ffi = FFI() + ffi.cdef(""" + struct point { int x, y; }; + + int (*const cb1)(struct point); + int (__stdcall *const cb2)(struct point); + + struct point __stdcall call1(int(*cb)(struct point)); + struct point call2(int(__stdcall *cb)(struct point)); + """) + lib = ffi.verify(r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + struct point { int x, y; }; + int cb1(struct point pt) { return pt.x + 10 * pt.y; } + int __stdcall cb2(struct point pt) { return pt.x + 100 * pt.y; } + struct point __stdcall call1(int(__cdecl *cb)(struct point)) { + int i; + struct point result = { 0, 0 }; + //printf("here1\n"); + //printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + for (i = 0; i < 1000; i++) { + struct point p = { i, -i }; + int r = cb(p); + result.x += r; + result.y -= r; + } + return result; + } + struct point __cdecl call2(int(__stdcall *cb)(struct point)) { + int i; + struct point result = { 0, 0 }; + for (i = 0; i < 1000; i++) { + struct point p = { -i, i }; + int r = cb(p); + result.x += r; + result.y -= r; + } + return result; + } + """) + if sys.platform == 'win32': + py.test.raises(TypeError, lib.call1, lib.cb2) + py.test.raises(TypeError, lib.call2, lib.cb1) + pt = lib.call1(lib.cb1) + assert (pt.x, pt.y) == (-9*500*999, 9*500*999) + pt = lib.call2(lib.cb2) + assert (pt.x, pt.y) == (99*500*999, -99*500*999) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_parse_c_type.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_parse_c_type.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_parse_c_type.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_parse_c_type.py @@ -342,3 +342,17 @@ # not supported (really obscure): # "char[+5]" # "char['A']" + +def test_stdcall_cdecl(): + assert parse("int __stdcall(int)") == [Prim(lib._CFFI_PRIM_INT), + '->', Func(0), NoOp(4), FuncEnd(2), + Prim(lib._CFFI_PRIM_INT)] + assert parse("int __stdcall func(int)") == parse("int __stdcall(int)") + assert parse("int (__stdcall *)()") == [Prim(lib._CFFI_PRIM_INT), + NoOp(3), '->', Pointer(1), + Func(0), FuncEnd(2), 0] + assert parse("int (__stdcall *p)()") == parse("int (__stdcall*)()") + parse_error("__stdcall int", "identifier expected", 0) + parse_error("__cdecl int", "identifier expected", 0) + parse_error("int __stdcall", "expected '('", 13) + parse_error("int __cdecl", "expected '('", 11) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_realize_c_type.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_realize_c_type.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_realize_c_type.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_realize_c_type.py @@ -1,5 +1,5 @@ # Generated by pypy/tool/import_cffi.py -import py +import py, sys from cffi import cffi_opcode @@ -47,3 +47,29 @@ def test_all_primitives(): for name in cffi_opcode.PRIMITIVE_TO_INDEX: check(name, name) + + +def check_func(input, expected_output=None): + import _cffi_backend + ffi = _cffi_backend.FFI() + ct = ffi.typeof(ffi.callback(input, lambda: None)) + assert isinstance(ct, ffi.CType) + if sys.platform != 'win32': + expected_output = expected_output.replace('__stdcall *', '*') + assert ct.cname == expected_output + +def test_funcptr_stdcall(): + check_func("int(int)", "int(*)(int)") + check_func("int foobar(int)", "int(*)(int)") + check_func("int __stdcall(int)", "int(__stdcall *)(int)") + check_func("int __stdcall foobar(int)", "int(__stdcall *)(int)") + check_func("void __cdecl(void)", "void(*)()") + check_func("void __cdecl foobar(void)", "void(*)()") + check_func("void __stdcall(void)", "void(__stdcall *)()") + check_func("void __stdcall foobar(long, short)", + "void(__stdcall *)(long, short)") + check_func("void(void __cdecl(void), void __stdcall(void))", + "void(*)(void(*)(), void(__stdcall *)())") + +def test_variadic_overrides_stdcall(): + check("void (__stdcall*)(int, ...)", "void(*)(int, ...)") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1281,3 +1281,200 @@ """) assert lib.aaa == 42 py.test.raises(AttributeError, "lib.aaa = 43") + +def test_win32_calling_convention_0(): + ffi = FFI() + ffi.cdef(""" + int call1(int(__cdecl *cb)(int)); + int (*const call2)(int(__stdcall *cb)(int)); + """) + lib = verify(ffi, 'test_win32_calling_convention_0', r""" + #ifndef _MSC_VER + # define __stdcall /* nothing */ + #endif + int call1(int(*cb)(int)) { + int i, result = 0; + //printf("call1: cb = %p\n", cb); + for (i = 0; i < 1000; i++) + result += cb(i); + //printf("result = %d\n", result); + return result; + } + int call2(int(__stdcall *cb)(int)) { + int i, result = 0; + //printf("call2: cb = %p\n", cb); + for (i = 0; i < 1000; i++) + result += cb(-i); + //printf("result = %d\n", result); + return result; + } + """) + @ffi.callback("int(int)") + def cb1(x): + return x * 2 + @ffi.callback("int __stdcall(int)") + def cb2(x): + return x * 3 + res = lib.call1(cb1) + assert res == 500*999*2 + assert res == ffi.addressof(lib, 'call1')(cb1) + res = lib.call2(cb2) + assert res == -500*999*3 + assert res == ffi.addressof(lib, 'call2')(cb2) + if sys.platform == 'win32': + assert '__stdcall' in str(ffi.typeof(cb2)) + assert '__stdcall' not in str(ffi.typeof(cb1)) + py.test.raises(TypeError, lib.call1, cb2) + py.test.raises(TypeError, lib.call2, cb1) + else: + assert '__stdcall' not in str(ffi.typeof(cb2)) + assert ffi.typeof(cb2) is ffi.typeof(cb1) + +def test_win32_calling_convention_1(): + ffi = FFI() + ffi.cdef(""" + int __cdecl call1(int(__cdecl *cb)(int)); + int __stdcall call2(int(__stdcall *cb)(int)); + int (__cdecl *const cb1)(int); + int (__stdcall *const cb2)(int); + """) + lib = verify(ffi, 'test_win32_calling_convention_1', r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + int __cdecl cb1(int x) { return x * 2; } + int __stdcall cb2(int x) { return x * 3; } + + int __cdecl call1(int(__cdecl *cb)(int)) { + int i, result = 0; + //printf("here1\n"); + //printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + for (i = 0; i < 1000; i++) + result += cb(i); + //printf("result = %d\n", result); + return result; + } + int __stdcall call2(int(__stdcall *cb)(int)) { + int i, result = 0; + //printf("here1\n"); + //printf("cb = %p, cb2 = %p\n", cb, (void *)cb2); + for (i = 0; i < 1000; i++) + result += cb(-i); + //printf("result = %d\n", result); + return result; + } + """) + print '<<< cb1 =', ffi.addressof(lib, 'cb1') + ptr_call1 = ffi.addressof(lib, 'call1') + assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + assert ptr_call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + print '<<< cb2 =', ffi.addressof(lib, 'cb2') + ptr_call2 = ffi.addressof(lib, 'call2') + assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + print '<<< done' + +def test_win32_calling_convention_2(): + # any mistake in the declaration of plain function (including the + # precise argument types and, here, the calling convention) are + # automatically corrected. But this does not apply to the 'cb' + # function pointer argument. + ffi = FFI() + ffi.cdef(""" + int __stdcall call1(int(__cdecl *cb)(int)); + int __cdecl call2(int(__stdcall *cb)(int)); + int (__cdecl *const cb1)(int); + int (__stdcall *const cb2)(int); + """) + lib = verify(ffi, 'test_win32_calling_convention_2', """ + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + int __cdecl call1(int(__cdecl *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(i); + return result; + } + int __stdcall call2(int(__stdcall *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(-i); + return result; + } + int __cdecl cb1(int x) { return x * 2; } + int __stdcall cb2(int x) { return x * 3; } + """) + ptr_call1 = ffi.addressof(lib, 'call1') + ptr_call2 = ffi.addressof(lib, 'call2') + if sys.platform == 'win32': + py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) + py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1')) + assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + assert ptr_call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + +def test_win32_calling_convention_3(): + ffi = FFI() + ffi.cdef(""" + struct point { int x, y; }; + + int (*const cb1)(struct point); + int (__stdcall *const cb2)(struct point); + + struct point __stdcall call1(int(*cb)(struct point)); + struct point call2(int(__stdcall *cb)(struct point)); + """) + lib = verify(ffi, 'test_win32_calling_convention_3', r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + struct point { int x, y; }; + int cb1(struct point pt) { return pt.x + 10 * pt.y; } + int __stdcall cb2(struct point pt) { return pt.x + 100 * pt.y; } + struct point __stdcall call1(int(__cdecl *cb)(struct point)) { + int i; + struct point result = { 0, 0 }; + //printf("here1\n"); + //printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + for (i = 0; i < 1000; i++) { + struct point p = { i, -i }; + int r = cb(p); + result.x += r; + result.y -= r; + } + return result; + } + struct point __cdecl call2(int(__stdcall *cb)(struct point)) { + int i; + struct point result = { 0, 0 }; + for (i = 0; i < 1000; i++) { + struct point p = { -i, i }; + int r = cb(p); + result.x += r; + result.y -= r; + } + return result; + } + """) + ptr_call1 = ffi.addressof(lib, 'call1') + ptr_call2 = ffi.addressof(lib, 'call2') + if sys.platform == 'win32': + py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) + py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1')) + pt = lib.call1(ffi.addressof(lib, 'cb1')) + assert (pt.x, pt.y) == (-9*500*999, 9*500*999) + pt = ptr_call1(ffi.addressof(lib, 'cb1')) + assert (pt.x, pt.y) == (-9*500*999, 9*500*999) + pt = lib.call2(ffi.addressof(lib, 'cb2')) + assert (pt.x, pt.y) == (99*500*999, -99*500*999) + pt = ptr_call2(ffi.addressof(lib, 'cb2')) + assert (pt.x, pt.y) == (99*500*999, -99*500*999) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py @@ -1201,25 +1201,6 @@ lib = ffi.verify('#include ', libraries=lib_m) assert lib.sin(1.23) == math.sin(1.23) -def test_callback_calling_convention(): - py.test.skip("later") - if sys.platform != 'win32': - py.test.skip("Windows only") - ffi = FFI() - ffi.cdef(""" - int call1(int(*__cdecl cb)(int)); - int call2(int(*__stdcall cb)(int)); - """) - lib = ffi.verify(""" - int call1(int(*__cdecl cb)(int)) { - return cb(42) + 1; - } - int call2(int(*__stdcall cb)(int)) { - return cb(-42) - 6; - } - """) - xxx - def test_opaque_integer_as_function_result(): #import platform #if platform.machine().startswith('sparc'): From noreply at buildbot.pypy.org Wed Oct 7 09:43:15 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Oct 2015 09:43:15 +0200 (CEST) Subject: [pypy-commit] pypy cffi-stdcall: Pass around the abi of functions, and use it to write "__stdcall" in the repr Message-ID: <20151007074315.B78221C0726@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-stdcall Changeset: r80011:670a38662272 Date: 2015-10-07 09:43 +0200 http://bitbucket.org/pypy/pypy/changeset/670a38662272/ Log: Pass around the abi of functions, and use it to write "__stdcall" in the repr diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,9 +1,16 @@ import sys from pypy.interpreter.mixedmodule import MixedModule -from rpython.rlib import rdynload +from rpython.rlib import rdynload, clibffi VERSION = "1.3.0" +FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI +try: + FFI_STDCALL = clibffi.FFI_STDCALL + has_stdcall = True +except AttributeError: + has_stdcall = False + class Module(MixedModule): @@ -44,8 +51,8 @@ 'get_errno': 'cerrno.get_errno', 'set_errno': 'cerrno.set_errno', - 'FFI_DEFAULT_ABI': 'ctypefunc._get_abi(space, "FFI_DEFAULT_ABI")', - 'FFI_CDECL': 'ctypefunc._get_abi(space,"FFI_DEFAULT_ABI")',#win32 name + 'FFI_DEFAULT_ABI': 'space.wrap(%d)' % FFI_DEFAULT_ABI, + 'FFI_CDECL': 'space.wrap(%d)' % FFI_DEFAULT_ABI, # win32 name # CFFI 1.0 'FFI': 'ffi_obj.W_FFIObject', @@ -53,6 +60,9 @@ if sys.platform == 'win32': interpleveldefs['getwinerror'] = 'cerrno.getwinerror' + if has_stdcall: + interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL + def get_dict_rtld_constants(): found = {} diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -12,6 +12,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from pypy.interpreter.error import OperationError, oefmt +from pypy.module import _cffi_backend from pypy.module._cffi_backend import ctypearray, cdataobj, cerrno from pypy.module._cffi_backend.ctypeobj import W_CType from pypy.module._cffi_backend.ctypeptr import W_CTypePtrBase, W_CTypePointer @@ -23,20 +24,21 @@ class W_CTypeFunc(W_CTypePtrBase): - _attrs_ = ['fargs', 'ellipsis', 'cif_descr'] - _immutable_fields_ = ['fargs[*]', 'ellipsis', 'cif_descr'] + _attrs_ = ['fargs', 'ellipsis', 'abi', 'cif_descr'] + _immutable_fields_ = ['fargs[*]', 'ellipsis', 'abi', 'cif_descr'] kind = "function" cif_descr = lltype.nullptr(CIF_DESCRIPTION) - def __init__(self, space, fargs, fresult, ellipsis): + def __init__(self, space, fargs, fresult, ellipsis, abi): assert isinstance(ellipsis, bool) - extra = self._compute_extra_text(fargs, fresult, ellipsis) + extra, xpos = self._compute_extra_text(fargs, fresult, ellipsis, abi) size = rffi.sizeof(rffi.VOIDP) - W_CTypePtrBase.__init__(self, space, size, extra, 2, fresult, + W_CTypePtrBase.__init__(self, space, size, extra, xpos, fresult, could_cast_anything=False) self.fargs = fargs self.ellipsis = ellipsis + self.abi = abi # fresult is stored in self.ctitem if not ellipsis: @@ -44,7 +46,7 @@ # at all. The cif is computed on every call from the actual # types passed in. For all other functions, the cif_descr # is computed here. - builder = CifDescrBuilder(fargs, fresult) + builder = CifDescrBuilder(fargs, fresult, abi) try: builder.rawallocate(self) except OperationError, e: @@ -76,7 +78,7 @@ ctypefunc.fargs = fvarargs ctypefunc.ctitem = self.ctitem #ctypefunc.cif_descr = NULL --- already provided as the default - CifDescrBuilder(fvarargs, self.ctitem).rawallocate(ctypefunc) + CifDescrBuilder(fvarargs, self.ctitem, self.abi).rawallocate(ctypefunc) return ctypefunc @rgc.must_be_light_finalizer @@ -84,8 +86,13 @@ if self.cif_descr: lltype.free(self.cif_descr, flavor='raw') - def _compute_extra_text(self, fargs, fresult, ellipsis): + def _compute_extra_text(self, fargs, fresult, ellipsis, abi): + from pypy.module._cffi_backend import newtype argnames = ['(*)('] + xpos = 2 + if _cffi_backend.has_stdcall and abi == _cffi_backend.FFI_STDCALL: + argnames[0] = '(__stdcall *)(' + xpos += len('__stdcall ') for i, farg in enumerate(fargs): if i > 0: argnames.append(', ') @@ -95,7 +102,7 @@ argnames.append(', ') argnames.append('...') argnames.append(')') - return ''.join(argnames) + return ''.join(argnames), xpos def _fget(self, attrchar): if attrchar == 'a': # args @@ -106,7 +113,7 @@ if attrchar == 'E': # ellipsis return self.space.wrap(self.ellipsis) if attrchar == 'A': # abi - return self.space.wrap(clibffi.FFI_DEFAULT_ABI) # XXX + return self.space.wrap(self.abi) return W_CTypePtrBase._fget(self, attrchar) def call(self, funcaddr, args_w): @@ -181,11 +188,6 @@ def set_mustfree_flag(data, flag): rffi.ptradd(data, -1)[0] = chr(flag) -def _get_abi(space, name): - abi = getattr(clibffi, name) - assert isinstance(abi, int) - return space.wrap(abi) - # ____________________________________________________________ @@ -260,9 +262,10 @@ class CifDescrBuilder(object): rawmem = lltype.nullptr(rffi.CCHARP.TO) - def __init__(self, fargs, fresult): + def __init__(self, fargs, fresult, fabi): self.fargs = fargs self.fresult = fresult + self.fabi = fabi def fb_alloc(self, size): size = llmemory.raw_malloc_usage(size) @@ -421,7 +424,7 @@ cif_descr.exchange_size = exchange_offset def fb_extra_fields(self, cif_descr): - cif_descr.abi = clibffi.FFI_DEFAULT_ABI # XXX + cif_descr.abi = self.fabi cif_descr.nargs = len(self.fargs) cif_descr.rtype = self.rtype cif_descr.atypes = self.atypes diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -4,10 +4,11 @@ from rpython.rlib.objectmodel import specialize, r_dict, compute_identity_hash from rpython.rlib.rarithmetic import ovfcheck, intmask -from rpython.rlib import jit, rweakref +from rpython.rlib import jit, rweakref, clibffi from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform +from pypy.module import _cffi_backend from pypy.module._cffi_backend import (ctypeobj, ctypeprim, ctypeptr, ctypearray, ctypestruct, ctypevoid, ctypeenum) @@ -592,8 +593,9 @@ # ____________________________________________________________ - at unwrap_spec(w_fresult=ctypeobj.W_CType, ellipsis=int) -def new_function_type(space, w_fargs, w_fresult, ellipsis=0): + at unwrap_spec(w_fresult=ctypeobj.W_CType, ellipsis=int, abi=int) +def new_function_type(space, w_fargs, w_fresult, ellipsis=0, + abi=_cffi_backend.FFI_DEFAULT_ABI): fargs = [] for w_farg in space.fixedview(w_fargs): if not isinstance(w_farg, ctypeobj.W_CType): @@ -602,28 +604,28 @@ if isinstance(w_farg, ctypearray.W_CTypeArray): w_farg = w_farg.ctptr fargs.append(w_farg) - return _new_function_type(space, fargs, w_fresult, bool(ellipsis)) + return _new_function_type(space, fargs, w_fresult, bool(ellipsis), abi) -def _func_key_hash(unique_cache, fargs, fresult, ellipsis): +def _func_key_hash(unique_cache, fargs, fresult, ellipsis, abi): x = compute_identity_hash(fresult) for w_arg in fargs: y = compute_identity_hash(w_arg) x = intmask((1000003 * x) ^ y) - x ^= ellipsis + x ^= (ellipsis - abi) if unique_cache.for_testing: # constant-folded to False in translation; x &= 3 # but for test, keep only 2 bits of hash return x # can't use @jit.elidable here, because it might call back to random # space functions via force_lazy_struct() -def _new_function_type(space, fargs, fresult, ellipsis=False): +def _new_function_type(space, fargs, fresult, ellipsis, abi): try: - return _get_function_type(space, fargs, fresult, ellipsis) + return _get_function_type(space, fargs, fresult, ellipsis, abi) except KeyError: - return _build_function_type(space, fargs, fresult, ellipsis) + return _build_function_type(space, fargs, fresult, ellipsis, abi) @jit.elidable -def _get_function_type(space, fargs, fresult, ellipsis): +def _get_function_type(space, fargs, fresult, ellipsis, abi): # This function is elidable because if called again with exactly the # same arguments (and if it didn't raise KeyError), it would give # the same result, at least as long as this result is still live. @@ -633,18 +635,19 @@ # one such dict, but in case of hash collision, there might be # more. unique_cache = space.fromcache(UniqueCache) - func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis, abi) for weakdict in unique_cache.functions: ctype = weakdict.get(func_hash) if (ctype is not None and ctype.ctitem is fresult and ctype.fargs == fargs and - ctype.ellipsis == ellipsis): + ctype.ellipsis == ellipsis and + ctype.abi == abi): return ctype raise KeyError @jit.dont_look_inside -def _build_function_type(space, fargs, fresult, ellipsis): +def _build_function_type(space, fargs, fresult, ellipsis, abi): from pypy.module._cffi_backend import ctypefunc # if ((fresult.size < 0 and @@ -658,9 +661,9 @@ raise oefmt(space.w_TypeError, "invalid result type: '%s'", fresult.name) # - fct = ctypefunc.W_CTypeFunc(space, fargs, fresult, ellipsis) + fct = ctypefunc.W_CTypeFunc(space, fargs, fresult, ellipsis, abi) unique_cache = space.fromcache(UniqueCache) - func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis, abi) for weakdict in unique_cache.functions: if weakdict.get(func_hash) is None: weakdict.set(func_hash, fct) From noreply at buildbot.pypy.org Wed Oct 7 10:00:31 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Oct 2015 10:00:31 +0200 (CEST) Subject: [pypy-commit] pypy cffi-stdcall: Port the rest of the win32-stdcall branch Message-ID: <20151007080031.0AA531C0797@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-stdcall Changeset: r80012:348cafc5db08 Date: 2015-10-07 09:59 +0200 http://bitbucket.org/pypy/pypy/changeset/348cafc5db08/ Log: Port the rest of the win32-stdcall branch diff --git a/pypy/module/_cffi_backend/realize_c_type.py b/pypy/module/_cffi_backend/realize_c_type.py --- a/pypy/module/_cffi_backend/realize_c_type.py +++ b/pypy/module/_cffi_backend/realize_c_type.py @@ -5,6 +5,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.error import oefmt from pypy.interpreter.baseobjspace import W_Root +from pypy.module import _cffi_backend from pypy.module._cffi_backend.ctypeobj import W_CType from pypy.module._cffi_backend import cffi_opcode, newtype, ctypestruct from pypy.module._cffi_backend import parse_c_type @@ -164,16 +165,28 @@ OP_FUNCTION_END = cffi_opcode.OP_FUNCTION_END while getop(opcodes[base_index + num_args]) != OP_FUNCTION_END: num_args += 1 - ellipsis = (getarg(opcodes[base_index + num_args]) & 1) != 0 + # + ellipsis = (getarg(opcodes[base_index + num_args]) & 0x01) != 0 + abi = (getarg(opcodes[base_index + num_args]) & 0xFE) + if abi == 0: + abi = _cffi_backend.FFI_DEFAULT_ABI + elif abi == 2: + if _cffi_backend.has_stdcall: + abi = _cffi_backend.FFI_STDCALL + else: + abi = _cffi_backend.FFI_DEFAULT_ABI + else: + raise oefmt(ffi.w_FFIError, "abi number %d not supported", abi) + # fargs = [realize_c_type(ffi, opcodes, base_index + i) for i in range(num_args)] - return fargs, fret, ellipsis + return fargs, fret, ellipsis, abi def unwrap_as_fnptr(self, ffi): if self._ctfuncptr is None: - fargs, fret, ellipsis = self._unpack(ffi) + fargs, fret, ellipsis, abi = self._unpack(ffi) self._ctfuncptr = newtype._new_function_type( - ffi.space, fargs, fret, ellipsis) + ffi.space, fargs, fret, ellipsis, abi) return self._ctfuncptr def unwrap_as_fnptr_in_elidable(self): @@ -190,7 +203,7 @@ # type ptr-to-struct. This is how recompiler.py produces # trampoline functions for PyPy. if self.nostruct_ctype is None: - fargs, fret, ellipsis = self._unpack(ffi) + fargs, fret, ellipsis, abi = self._unpack(ffi) # 'locs' will be a string of the same length as the final fargs, # containing 'A' where a struct argument was detected, and 'R' # in first position if a struct return value was detected @@ -207,7 +220,7 @@ locs = ['R'] + locs fret = newtype.new_void_type(ffi.space) ctfuncptr = newtype._new_function_type( - ffi.space, fargs, fret, ellipsis) + ffi.space, fargs, fret, ellipsis, abi) if locs == ['\x00'] * len(locs): locs = None else: @@ -218,7 +231,7 @@ locs[0] == 'R') def unexpected_fn_type(self, ffi): - fargs, fret, ellipsis = self._unpack(ffi) + fargs, fret, ellipsis, abi = self._unpack(ffi) argnames = [farg.name for farg in fargs] if ellipsis: argnames.append('...') diff --git a/pypy/module/_cffi_backend/src/parse_c_type.c b/pypy/module/_cffi_backend/src/parse_c_type.c --- a/pypy/module/_cffi_backend/src/parse_c_type.c +++ b/pypy/module/_cffi_backend/src/parse_c_type.c @@ -51,6 +51,9 @@ TOK_UNSIGNED, TOK_VOID, TOK_VOLATILE, + + TOK_CDECL, + TOK_STDCALL, }; typedef struct { @@ -165,6 +168,8 @@ switch (*p) { case '_': if (tok->size == 5 && !memcmp(p, "_Bool", 5)) tok->kind = TOK__BOOL; + if (tok->size == 7 && !memcmp(p,"__cdecl",7)) tok->kind = TOK_CDECL; + if (tok->size == 9 && !memcmp(p,"__stdcall",9))tok->kind = TOK_STDCALL; break; case 'c': if (tok->size == 4 && !memcmp(p, "char", 4)) tok->kind = TOK_CHAR; @@ -236,7 +241,7 @@ type). The 'outer' argument is the index of the opcode outside this "sequel". */ - int check_for_grouping; + int check_for_grouping, abi=0; _cffi_opcode_t result, *p_current; header: @@ -253,6 +258,12 @@ /* ignored for now */ next_token(tok); goto header; + case TOK_CDECL: + case TOK_STDCALL: + /* must be in a function; checked below */ + abi = tok->kind; + next_token(tok); + goto header; default: break; } @@ -269,6 +280,11 @@ while (tok->kind == TOK_OPEN_PAREN) { next_token(tok); + if (tok->kind == TOK_CDECL || tok->kind == TOK_STDCALL) { + abi = tok->kind; + next_token(tok); + } + if ((check_for_grouping--) == 1 && (tok->kind == TOK_STAR || tok->kind == TOK_CONST || tok->kind == TOK_VOLATILE || @@ -286,7 +302,14 @@ } else { /* function type */ - int arg_total, base_index, arg_next, has_ellipsis=0; + int arg_total, base_index, arg_next, flags=0; + + if (abi == TOK_STDCALL) { + flags = 2; + /* note that an ellipsis below will overwrite this flags, + which is the goal: variadic functions are always cdecl */ + } + abi = 0; if (tok->kind == TOK_VOID && get_following_char(tok) == ')') { next_token(tok); @@ -315,7 +338,7 @@ _cffi_opcode_t oarg; if (tok->kind == TOK_DOTDOTDOT) { - has_ellipsis = 1; + flags = 1; /* ellipsis */ next_token(tok); break; } @@ -339,8 +362,7 @@ next_token(tok); } } - tok->output[arg_next] = _CFFI_OP(_CFFI_OP_FUNCTION_END, - has_ellipsis); + tok->output[arg_next] = _CFFI_OP(_CFFI_OP_FUNCTION_END, flags); } if (tok->kind != TOK_CLOSE_PAREN) @@ -348,6 +370,9 @@ next_token(tok); } + if (abi != 0) + return parse_error(tok, "expected '('"); + while (tok->kind == TOK_OPEN_BRACKET) { *p_current = _CFFI_OP(_CFFI_GETOP(*p_current), tok->output_index); p_current = tok->output + tok->output_index; diff --git a/pypy/module/_cffi_backend/test/test_parse_c_type.py b/pypy/module/_cffi_backend/test/test_parse_c_type.py --- a/pypy/module/_cffi_backend/test/test_parse_c_type.py +++ b/pypy/module/_cffi_backend/test/test_parse_c_type.py @@ -338,3 +338,17 @@ # not supported (really obscure): # "char[+5]" # "char['A']" + +def test_stdcall_cdecl(): + assert parse("int __stdcall(int)") == [Prim(cffi_opcode.PRIM_INT), + '->', Func(0), NoOp(4), FuncEnd(2), + Prim(cffi_opcode.PRIM_INT)] + assert parse("int __stdcall func(int)") == parse("int __stdcall(int)") + assert parse("int (__stdcall *)()") == [Prim(cffi_opcode.PRIM_INT), + NoOp(3), '->', Pointer(1), + Func(0), FuncEnd(2), 0] + assert parse("int (__stdcall *p)()") == parse("int (__stdcall*)()") + parse_error("__stdcall int", "identifier expected", 0) + parse_error("__cdecl int", "identifier expected", 0) + parse_error("int __stdcall", "expected '('", 13) + parse_error("int __cdecl", "expected '('", 11) From noreply at buildbot.pypy.org Wed Oct 7 10:39:02 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Oct 2015 10:39:02 +0200 (CEST) Subject: [pypy-commit] pypy cffi-stdcall: Win32 uses 8-bytes alignment Message-ID: <20151007083902.366241C11B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-stdcall Changeset: r80013:0d7a6efb28c7 Date: 2015-10-07 10:39 +0200 http://bitbucket.org/pypy/pypy/changeset/0d7a6efb28c7/ Log: Win32 uses 8-bytes alignment diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -143,7 +143,7 @@ # obscure hack when untranslated, maybe, approximate, don't use if isinstance(align, llmemory.FieldOffset): align = rffi.sizeof(align.TYPE.y) - if (1 << (8*align-2)) > sys.maxint: + if sys.platform != 'win32' and (1 << (8*align-2)) > sys.maxint: align /= 2 else: # a different hack when translated, to avoid seeing constants From noreply at buildbot.pypy.org Wed Oct 7 12:26:04 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Oct 2015 12:26:04 +0200 (CEST) Subject: [pypy-commit] pypy cffi-stdcall: ready to merge Message-ID: <20151007102604.46E741C069F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-stdcall Changeset: r80014:9a795bd39d7f Date: 2015-10-07 12:24 +0200 http://bitbucket.org/pypy/pypy/changeset/9a795bd39d7f/ Log: ready to merge From noreply at buildbot.pypy.org Wed Oct 7 12:26:06 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Oct 2015 12:26:06 +0200 (CEST) Subject: [pypy-commit] pypy default: hg merge cffi-stdcall Message-ID: <20151007102606.97F0D1C069F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80015:773e6136f72e Date: 2015-10-07 12:25 +0200 http://bitbucket.org/pypy/pypy/changeset/773e6136f72e/ Log: hg merge cffi-stdcall Win32 support for the "__stdcall" calling convention diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -26,6 +26,9 @@ _r_words = re.compile(r"\w+|\S") _parser_cache = None _r_int_literal = re.compile(r"-?0?x?[0-9a-f]+[lu]*$", re.IGNORECASE) +_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") +_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") +_r_cdecl = re.compile(r"\b__cdecl\b") def _get_parser(): global _parser_cache @@ -44,6 +47,14 @@ macrovalue = macrovalue.replace('\\\n', '').strip() macros[macroname] = macrovalue csource = _r_define.sub('', csource) + # BIG HACK: replace WINAPI or __stdcall with "volatile const". + # It doesn't make sense for the return type of a function to be + # "volatile volatile const", so we abuse it to detect __stdcall... + # Hack number 2 is that "int(volatile *fptr)();" is not valid C + # syntax, so we place the "volatile" before the opening parenthesis. + csource = _r_stdcall2.sub(' volatile volatile const(', csource) + csource = _r_stdcall1.sub(' volatile volatile const ', csource) + csource = _r_cdecl.sub(' ', csource) # Replace "[...]" with "[__dotdotdotarray__]" csource = _r_partial_array.sub('[__dotdotdotarray__]', csource) # Replace "...}" with "__dotdotdotNUM__}". This construction should @@ -449,7 +460,14 @@ if not ellipsis and args == [model.void_type]: args = [] result, quals = self._get_type_and_quals(typenode.type) - return model.RawFunctionType(tuple(args), result, ellipsis) + # the 'quals' on the result type are ignored. HACK: we absure them + # to detect __stdcall functions: we textually replace "__stdcall" + # with "volatile volatile const" above. + abi = None + if hasattr(typenode.type, 'quals'): # else, probable syntax error anyway + if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']: + abi = '__stdcall' + return model.RawFunctionType(tuple(args), result, ellipsis, abi) def _as_func_arg(self, type, quals): if isinstance(type, model.ArrayType): diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,4 +1,4 @@ -import types +import types, sys import weakref from .lock import allocate_lock @@ -193,18 +193,21 @@ class BaseFunctionType(BaseType): - _attrs_ = ('args', 'result', 'ellipsis') + _attrs_ = ('args', 'result', 'ellipsis', 'abi') - def __init__(self, args, result, ellipsis): + def __init__(self, args, result, ellipsis, abi=None): self.args = args self.result = result self.ellipsis = ellipsis + self.abi = abi # reprargs = [arg._get_c_name() for arg in self.args] if self.ellipsis: reprargs.append('...') reprargs = reprargs or ['void'] replace_with = self._base_pattern % (', '.join(reprargs),) + if abi is not None: + replace_with = replace_with[:1] + abi + ' ' + replace_with[1:] self.c_name_with_marker = ( self.result.c_name_with_marker.replace('&', replace_with)) @@ -222,7 +225,7 @@ "type, not a pointer-to-function type" % (self,)) def as_function_pointer(self): - return FunctionPtrType(self.args, self.result, self.ellipsis) + return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi) class FunctionPtrType(BaseFunctionType): @@ -233,11 +236,18 @@ args = [] for tp in self.args: args.append(tp.get_cached_btype(ffi, finishlist)) + abi_args = () + if self.abi == "__stdcall": + if not self.ellipsis: # __stdcall ignored for variadic funcs + try: + abi_args = (ffi._backend.FFI_STDCALL,) + except AttributeError: + pass return global_cache(self, ffi, 'new_function_type', - tuple(args), result, self.ellipsis) + tuple(args), result, self.ellipsis, *abi_args) def as_raw_function(self): - return RawFunctionType(self.args, self.result, self.ellipsis) + return RawFunctionType(self.args, self.result, self.ellipsis, self.abi) class PointerType(BaseType): diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h --- a/lib_pypy/cffi/parse_c_type.h +++ b/lib_pypy/cffi/parse_c_type.h @@ -5,7 +5,7 @@ #define _CFFI_OP(opcode, arg) (_cffi_opcode_t)(opcode | (((uintptr_t)(arg)) << 8)) #define _CFFI_GETOP(cffi_opcode) ((unsigned char)(uintptr_t)cffi_opcode) -#define _CFFI_GETARG(cffi_opcode) (((uintptr_t)cffi_opcode) >> 8) +#define _CFFI_GETARG(cffi_opcode) (((intptr_t)cffi_opcode) >> 8) #define _CFFI_OP_PRIMITIVE 1 #define _CFFI_OP_POINTER 3 diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -607,7 +607,11 @@ call_arguments.append('x%d' % i) repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' - name_and_arguments = '_cffi_d_%s(%s)' % (name, repr_arguments) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments) prnt('static %s' % (tp.result.get_c_name(name_and_arguments),)) prnt('{') call_arguments = ', '.join(call_arguments) @@ -710,7 +714,8 @@ if difference: repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' - name_and_arguments = '_cffi_f_%s(%s)' % (name, repr_arguments) + name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name, + repr_arguments) prnt('static %s' % (tp_result.get_c_name(name_and_arguments),)) prnt('{') if result_decl: @@ -1135,7 +1140,13 @@ else: self.cffi_types[index] = CffiOp(OP_NOOP, realindex) index += 1 - self.cffi_types[index] = CffiOp(OP_FUNCTION_END, int(tp.ellipsis)) + flags = int(tp.ellipsis) + if tp.abi is not None: + if tp.abi == '__stdcall': + flags |= 2 + else: + raise NotImplementedError("abi=%r" % (tp.abi,)) + self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags) def _emit_bytecode_PointerType(self, tp, index): self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype]) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -159,7 +159,11 @@ arglist = ', '.join(arglist) or 'void' wrappername = '_cffi_f_%s' % name self.export_symbols.append(wrappername) - funcdecl = ' %s(%s)' % (wrappername, arglist) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + funcdecl = ' %s%s(%s)' % (abi, wrappername, arglist) context = 'result of %s' % name prnt(tpresult.get_c_name(funcdecl, context)) prnt('{') diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,9 +1,16 @@ import sys from pypy.interpreter.mixedmodule import MixedModule -from rpython.rlib import rdynload +from rpython.rlib import rdynload, clibffi VERSION = "1.3.0" +FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI +try: + FFI_STDCALL = clibffi.FFI_STDCALL + has_stdcall = True +except AttributeError: + has_stdcall = False + class Module(MixedModule): @@ -44,8 +51,8 @@ 'get_errno': 'cerrno.get_errno', 'set_errno': 'cerrno.set_errno', - 'FFI_DEFAULT_ABI': 'ctypefunc._get_abi(space, "FFI_DEFAULT_ABI")', - 'FFI_CDECL': 'ctypefunc._get_abi(space,"FFI_DEFAULT_ABI")',#win32 name + 'FFI_DEFAULT_ABI': 'space.wrap(%d)' % FFI_DEFAULT_ABI, + 'FFI_CDECL': 'space.wrap(%d)' % FFI_DEFAULT_ABI, # win32 name # CFFI 1.0 'FFI': 'ffi_obj.W_FFIObject', @@ -53,6 +60,9 @@ if sys.platform == 'win32': interpleveldefs['getwinerror'] = 'cerrno.getwinerror' + if has_stdcall: + interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL + def get_dict_rtld_constants(): found = {} diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -12,6 +12,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from pypy.interpreter.error import OperationError, oefmt +from pypy.module import _cffi_backend from pypy.module._cffi_backend import ctypearray, cdataobj, cerrno from pypy.module._cffi_backend.ctypeobj import W_CType from pypy.module._cffi_backend.ctypeptr import W_CTypePtrBase, W_CTypePointer @@ -23,20 +24,21 @@ class W_CTypeFunc(W_CTypePtrBase): - _attrs_ = ['fargs', 'ellipsis', 'cif_descr'] - _immutable_fields_ = ['fargs[*]', 'ellipsis', 'cif_descr'] + _attrs_ = ['fargs', 'ellipsis', 'abi', 'cif_descr'] + _immutable_fields_ = ['fargs[*]', 'ellipsis', 'abi', 'cif_descr'] kind = "function" cif_descr = lltype.nullptr(CIF_DESCRIPTION) - def __init__(self, space, fargs, fresult, ellipsis): + def __init__(self, space, fargs, fresult, ellipsis, abi): assert isinstance(ellipsis, bool) - extra = self._compute_extra_text(fargs, fresult, ellipsis) + extra, xpos = self._compute_extra_text(fargs, fresult, ellipsis, abi) size = rffi.sizeof(rffi.VOIDP) - W_CTypePtrBase.__init__(self, space, size, extra, 2, fresult, + W_CTypePtrBase.__init__(self, space, size, extra, xpos, fresult, could_cast_anything=False) self.fargs = fargs self.ellipsis = ellipsis + self.abi = abi # fresult is stored in self.ctitem if not ellipsis: @@ -44,7 +46,7 @@ # at all. The cif is computed on every call from the actual # types passed in. For all other functions, the cif_descr # is computed here. - builder = CifDescrBuilder(fargs, fresult) + builder = CifDescrBuilder(fargs, fresult, abi) try: builder.rawallocate(self) except OperationError, e: @@ -76,7 +78,7 @@ ctypefunc.fargs = fvarargs ctypefunc.ctitem = self.ctitem #ctypefunc.cif_descr = NULL --- already provided as the default - CifDescrBuilder(fvarargs, self.ctitem).rawallocate(ctypefunc) + CifDescrBuilder(fvarargs, self.ctitem, self.abi).rawallocate(ctypefunc) return ctypefunc @rgc.must_be_light_finalizer @@ -84,8 +86,13 @@ if self.cif_descr: lltype.free(self.cif_descr, flavor='raw') - def _compute_extra_text(self, fargs, fresult, ellipsis): + def _compute_extra_text(self, fargs, fresult, ellipsis, abi): + from pypy.module._cffi_backend import newtype argnames = ['(*)('] + xpos = 2 + if _cffi_backend.has_stdcall and abi == _cffi_backend.FFI_STDCALL: + argnames[0] = '(__stdcall *)(' + xpos += len('__stdcall ') for i, farg in enumerate(fargs): if i > 0: argnames.append(', ') @@ -95,7 +102,7 @@ argnames.append(', ') argnames.append('...') argnames.append(')') - return ''.join(argnames) + return ''.join(argnames), xpos def _fget(self, attrchar): if attrchar == 'a': # args @@ -106,7 +113,7 @@ if attrchar == 'E': # ellipsis return self.space.wrap(self.ellipsis) if attrchar == 'A': # abi - return self.space.wrap(clibffi.FFI_DEFAULT_ABI) # XXX + return self.space.wrap(self.abi) return W_CTypePtrBase._fget(self, attrchar) def call(self, funcaddr, args_w): @@ -181,11 +188,6 @@ def set_mustfree_flag(data, flag): rffi.ptradd(data, -1)[0] = chr(flag) -def _get_abi(space, name): - abi = getattr(clibffi, name) - assert isinstance(abi, int) - return space.wrap(abi) - # ____________________________________________________________ @@ -260,9 +262,10 @@ class CifDescrBuilder(object): rawmem = lltype.nullptr(rffi.CCHARP.TO) - def __init__(self, fargs, fresult): + def __init__(self, fargs, fresult, fabi): self.fargs = fargs self.fresult = fresult + self.fabi = fabi def fb_alloc(self, size): size = llmemory.raw_malloc_usage(size) @@ -421,7 +424,7 @@ cif_descr.exchange_size = exchange_offset def fb_extra_fields(self, cif_descr): - cif_descr.abi = clibffi.FFI_DEFAULT_ABI # XXX + cif_descr.abi = self.fabi cif_descr.nargs = len(self.fargs) cif_descr.rtype = self.rtype cif_descr.atypes = self.atypes diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -143,7 +143,7 @@ # obscure hack when untranslated, maybe, approximate, don't use if isinstance(align, llmemory.FieldOffset): align = rffi.sizeof(align.TYPE.y) - if (1 << (8*align-2)) > sys.maxint: + if sys.platform != 'win32' and (1 << (8*align-2)) > sys.maxint: align /= 2 else: # a different hack when translated, to avoid seeing constants diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -4,10 +4,11 @@ from rpython.rlib.objectmodel import specialize, r_dict, compute_identity_hash from rpython.rlib.rarithmetic import ovfcheck, intmask -from rpython.rlib import jit, rweakref +from rpython.rlib import jit, rweakref, clibffi from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform +from pypy.module import _cffi_backend from pypy.module._cffi_backend import (ctypeobj, ctypeprim, ctypeptr, ctypearray, ctypestruct, ctypevoid, ctypeenum) @@ -592,8 +593,9 @@ # ____________________________________________________________ - at unwrap_spec(w_fresult=ctypeobj.W_CType, ellipsis=int) -def new_function_type(space, w_fargs, w_fresult, ellipsis=0): + at unwrap_spec(w_fresult=ctypeobj.W_CType, ellipsis=int, abi=int) +def new_function_type(space, w_fargs, w_fresult, ellipsis=0, + abi=_cffi_backend.FFI_DEFAULT_ABI): fargs = [] for w_farg in space.fixedview(w_fargs): if not isinstance(w_farg, ctypeobj.W_CType): @@ -602,28 +604,28 @@ if isinstance(w_farg, ctypearray.W_CTypeArray): w_farg = w_farg.ctptr fargs.append(w_farg) - return _new_function_type(space, fargs, w_fresult, bool(ellipsis)) + return _new_function_type(space, fargs, w_fresult, bool(ellipsis), abi) -def _func_key_hash(unique_cache, fargs, fresult, ellipsis): +def _func_key_hash(unique_cache, fargs, fresult, ellipsis, abi): x = compute_identity_hash(fresult) for w_arg in fargs: y = compute_identity_hash(w_arg) x = intmask((1000003 * x) ^ y) - x ^= ellipsis + x ^= (ellipsis - abi) if unique_cache.for_testing: # constant-folded to False in translation; x &= 3 # but for test, keep only 2 bits of hash return x # can't use @jit.elidable here, because it might call back to random # space functions via force_lazy_struct() -def _new_function_type(space, fargs, fresult, ellipsis=False): +def _new_function_type(space, fargs, fresult, ellipsis, abi): try: - return _get_function_type(space, fargs, fresult, ellipsis) + return _get_function_type(space, fargs, fresult, ellipsis, abi) except KeyError: - return _build_function_type(space, fargs, fresult, ellipsis) + return _build_function_type(space, fargs, fresult, ellipsis, abi) @jit.elidable -def _get_function_type(space, fargs, fresult, ellipsis): +def _get_function_type(space, fargs, fresult, ellipsis, abi): # This function is elidable because if called again with exactly the # same arguments (and if it didn't raise KeyError), it would give # the same result, at least as long as this result is still live. @@ -633,18 +635,19 @@ # one such dict, but in case of hash collision, there might be # more. unique_cache = space.fromcache(UniqueCache) - func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis, abi) for weakdict in unique_cache.functions: ctype = weakdict.get(func_hash) if (ctype is not None and ctype.ctitem is fresult and ctype.fargs == fargs and - ctype.ellipsis == ellipsis): + ctype.ellipsis == ellipsis and + ctype.abi == abi): return ctype raise KeyError @jit.dont_look_inside -def _build_function_type(space, fargs, fresult, ellipsis): +def _build_function_type(space, fargs, fresult, ellipsis, abi): from pypy.module._cffi_backend import ctypefunc # if ((fresult.size < 0 and @@ -658,9 +661,9 @@ raise oefmt(space.w_TypeError, "invalid result type: '%s'", fresult.name) # - fct = ctypefunc.W_CTypeFunc(space, fargs, fresult, ellipsis) + fct = ctypefunc.W_CTypeFunc(space, fargs, fresult, ellipsis, abi) unique_cache = space.fromcache(UniqueCache) - func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis, abi) for weakdict in unique_cache.functions: if weakdict.get(func_hash) is None: weakdict.set(func_hash, fct) diff --git a/pypy/module/_cffi_backend/realize_c_type.py b/pypy/module/_cffi_backend/realize_c_type.py --- a/pypy/module/_cffi_backend/realize_c_type.py +++ b/pypy/module/_cffi_backend/realize_c_type.py @@ -5,6 +5,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.error import oefmt from pypy.interpreter.baseobjspace import W_Root +from pypy.module import _cffi_backend from pypy.module._cffi_backend.ctypeobj import W_CType from pypy.module._cffi_backend import cffi_opcode, newtype, ctypestruct from pypy.module._cffi_backend import parse_c_type @@ -164,16 +165,28 @@ OP_FUNCTION_END = cffi_opcode.OP_FUNCTION_END while getop(opcodes[base_index + num_args]) != OP_FUNCTION_END: num_args += 1 - ellipsis = (getarg(opcodes[base_index + num_args]) & 1) != 0 + # + ellipsis = (getarg(opcodes[base_index + num_args]) & 0x01) != 0 + abi = (getarg(opcodes[base_index + num_args]) & 0xFE) + if abi == 0: + abi = _cffi_backend.FFI_DEFAULT_ABI + elif abi == 2: + if _cffi_backend.has_stdcall: + abi = _cffi_backend.FFI_STDCALL + else: + abi = _cffi_backend.FFI_DEFAULT_ABI + else: + raise oefmt(ffi.w_FFIError, "abi number %d not supported", abi) + # fargs = [realize_c_type(ffi, opcodes, base_index + i) for i in range(num_args)] - return fargs, fret, ellipsis + return fargs, fret, ellipsis, abi def unwrap_as_fnptr(self, ffi): if self._ctfuncptr is None: - fargs, fret, ellipsis = self._unpack(ffi) + fargs, fret, ellipsis, abi = self._unpack(ffi) self._ctfuncptr = newtype._new_function_type( - ffi.space, fargs, fret, ellipsis) + ffi.space, fargs, fret, ellipsis, abi) return self._ctfuncptr def unwrap_as_fnptr_in_elidable(self): @@ -190,7 +203,7 @@ # type ptr-to-struct. This is how recompiler.py produces # trampoline functions for PyPy. if self.nostruct_ctype is None: - fargs, fret, ellipsis = self._unpack(ffi) + fargs, fret, ellipsis, abi = self._unpack(ffi) # 'locs' will be a string of the same length as the final fargs, # containing 'A' where a struct argument was detected, and 'R' # in first position if a struct return value was detected @@ -207,7 +220,7 @@ locs = ['R'] + locs fret = newtype.new_void_type(ffi.space) ctfuncptr = newtype._new_function_type( - ffi.space, fargs, fret, ellipsis) + ffi.space, fargs, fret, ellipsis, abi) if locs == ['\x00'] * len(locs): locs = None else: @@ -218,7 +231,7 @@ locs[0] == 'R') def unexpected_fn_type(self, ffi): - fargs, fret, ellipsis = self._unpack(ffi) + fargs, fret, ellipsis, abi = self._unpack(ffi) argnames = [farg.name for farg in fargs] if ellipsis: argnames.append('...') diff --git a/pypy/module/_cffi_backend/src/parse_c_type.c b/pypy/module/_cffi_backend/src/parse_c_type.c --- a/pypy/module/_cffi_backend/src/parse_c_type.c +++ b/pypy/module/_cffi_backend/src/parse_c_type.c @@ -51,6 +51,9 @@ TOK_UNSIGNED, TOK_VOID, TOK_VOLATILE, + + TOK_CDECL, + TOK_STDCALL, }; typedef struct { @@ -165,6 +168,8 @@ switch (*p) { case '_': if (tok->size == 5 && !memcmp(p, "_Bool", 5)) tok->kind = TOK__BOOL; + if (tok->size == 7 && !memcmp(p,"__cdecl",7)) tok->kind = TOK_CDECL; + if (tok->size == 9 && !memcmp(p,"__stdcall",9))tok->kind = TOK_STDCALL; break; case 'c': if (tok->size == 4 && !memcmp(p, "char", 4)) tok->kind = TOK_CHAR; @@ -236,7 +241,7 @@ type). The 'outer' argument is the index of the opcode outside this "sequel". */ - int check_for_grouping; + int check_for_grouping, abi=0; _cffi_opcode_t result, *p_current; header: @@ -253,6 +258,12 @@ /* ignored for now */ next_token(tok); goto header; + case TOK_CDECL: + case TOK_STDCALL: + /* must be in a function; checked below */ + abi = tok->kind; + next_token(tok); + goto header; default: break; } @@ -269,6 +280,11 @@ while (tok->kind == TOK_OPEN_PAREN) { next_token(tok); + if (tok->kind == TOK_CDECL || tok->kind == TOK_STDCALL) { + abi = tok->kind; + next_token(tok); + } + if ((check_for_grouping--) == 1 && (tok->kind == TOK_STAR || tok->kind == TOK_CONST || tok->kind == TOK_VOLATILE || @@ -286,7 +302,14 @@ } else { /* function type */ - int arg_total, base_index, arg_next, has_ellipsis=0; + int arg_total, base_index, arg_next, flags=0; + + if (abi == TOK_STDCALL) { + flags = 2; + /* note that an ellipsis below will overwrite this flags, + which is the goal: variadic functions are always cdecl */ + } + abi = 0; if (tok->kind == TOK_VOID && get_following_char(tok) == ')') { next_token(tok); @@ -315,7 +338,7 @@ _cffi_opcode_t oarg; if (tok->kind == TOK_DOTDOTDOT) { - has_ellipsis = 1; + flags = 1; /* ellipsis */ next_token(tok); break; } @@ -339,8 +362,7 @@ next_token(tok); } } - tok->output[arg_next] = _CFFI_OP(_CFFI_OP_FUNCTION_END, - has_ellipsis); + tok->output[arg_next] = _CFFI_OP(_CFFI_OP_FUNCTION_END, flags); } if (tok->kind != TOK_CLOSE_PAREN) @@ -348,6 +370,9 @@ next_token(tok); } + if (abi != 0) + return parse_error(tok, "expected '('"); + while (tok->kind == TOK_OPEN_BRACKET) { *p_current = _CFFI_OP(_CFFI_GETOP(*p_current), tok->output_index); p_current = tok->output + tok->output_index; diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2316,9 +2316,6 @@ f(); f() assert get_errno() == 77 -def test_abi(): - assert isinstance(FFI_DEFAULT_ABI, int) - def test_cast_to_array(): # not valid in C! extension to get a non-owning BInt = new_primitive_type("int") @@ -3427,3 +3424,16 @@ "be 'foo *', but the types are different (check " "that you are not e.g. mixing up different ffi " "instances)") + +def test_stdcall_function_type(): + assert FFI_CDECL == FFI_DEFAULT_ABI + try: + stdcall = FFI_STDCALL + except NameError: + stdcall = FFI_DEFAULT_ABI + BInt = new_primitive_type("int") + BFunc = new_function_type((BInt, BInt), BInt, False, stdcall) + if stdcall != FFI_DEFAULT_ABI: + assert repr(BFunc) == "" + else: + assert repr(BFunc) == "" diff --git a/pypy/module/_cffi_backend/test/test_parse_c_type.py b/pypy/module/_cffi_backend/test/test_parse_c_type.py --- a/pypy/module/_cffi_backend/test/test_parse_c_type.py +++ b/pypy/module/_cffi_backend/test/test_parse_c_type.py @@ -338,3 +338,17 @@ # not supported (really obscure): # "char[+5]" # "char['A']" + +def test_stdcall_cdecl(): + assert parse("int __stdcall(int)") == [Prim(cffi_opcode.PRIM_INT), + '->', Func(0), NoOp(4), FuncEnd(2), + Prim(cffi_opcode.PRIM_INT)] + assert parse("int __stdcall func(int)") == parse("int __stdcall(int)") + assert parse("int (__stdcall *)()") == [Prim(cffi_opcode.PRIM_INT), + NoOp(3), '->', Pointer(1), + Func(0), FuncEnd(2), 0] + assert parse("int (__stdcall *p)()") == parse("int (__stdcall*)()") + parse_error("__stdcall int", "identifier expected", 0) + parse_error("__cdecl int", "identifier expected", 0) + parse_error("int __stdcall", "expected '('", 13) + parse_error("int __cdecl", "expected '('", 11) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py @@ -1,6 +1,6 @@ # Generated by pypy/tool/import_cffi.py import py -from cffi import FFI +from cffi import FFI, CDefError import math, os, sys import ctypes.util from cffi.backend_ctypes import CTypesBackend @@ -428,3 +428,59 @@ res = m.QueryPerformanceFrequency(p_freq) assert res != 0 assert p_freq[0] != 0 + + def test_explicit_cdecl_stdcall(self): + if sys.platform != 'win32': + py.test.skip("Windows-only test") + if self.Backend is CTypesBackend: + py.test.skip("not with the ctypes backend") + # + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + BOOL QueryPerformanceFrequency(LONGLONG *lpFrequency); + """) + m = ffi.dlopen("Kernel32.dll") + tp = ffi.typeof(m.QueryPerformanceFrequency) + assert str(tp) == "" + # + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + BOOL __cdecl QueryPerformanceFrequency(LONGLONG *lpFrequency); + """) + m = ffi.dlopen("Kernel32.dll") + tpc = ffi.typeof(m.QueryPerformanceFrequency) + assert tpc is tp + # + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + BOOL WINAPI QueryPerformanceFrequency(LONGLONG *lpFrequency); + """) + m = ffi.dlopen("Kernel32.dll") + tps = ffi.typeof(m.QueryPerformanceFrequency) + assert tps is not tpc + assert str(tps) == "" + # + ffi = FFI(backend=self.Backend()) + ffi.cdef("typedef int (__cdecl *fnc_t)(int);") + ffi.cdef("typedef int (__stdcall *fns_t)(int);") + tpc = ffi.typeof("fnc_t") + tps = ffi.typeof("fns_t") + assert str(tpc) == "" + assert str(tps) == "" + # + fnc = ffi.cast("fnc_t", 0) + fns = ffi.cast("fns_t", 0) + ffi.new("fnc_t[]", [fnc]) + py.test.raises(TypeError, ffi.new, "fnc_t[]", [fns]) + py.test.raises(TypeError, ffi.new, "fns_t[]", [fnc]) + ffi.new("fns_t[]", [fns]) + + def test_stdcall_only_on_windows(self): + if sys.platform == 'win32': + py.test.skip("not-Windows-only test") + ffi = FFI(backend=self.Backend()) + ffi.cdef("double __stdcall sin(double x);") # stdcall ignored + m = ffi.dlopen(lib_m) + assert "double(*)(double)" in str(ffi.typeof(m.sin)) + x = m.sin(1.23) + assert x == math.sin(1.23) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py @@ -365,3 +365,17 @@ assert C.TWO == 2 assert C.NIL == 0 assert C.NEG == -1 + +def test_stdcall(): + ffi = FFI() + tp = ffi.typeof("int(*)(int __stdcall x(int)," + " long (__cdecl*y)(void)," + " short(WINAPI *z)(short))") + if sys.platform == 'win32': + stdcall = '__stdcall ' + else: + stdcall = '' + assert str(tp) == ( + "" % (stdcall, stdcall)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py @@ -1221,25 +1221,6 @@ lib = ffi.verify('#include ', libraries=lib_m) assert lib.sin(1.23) == math.sin(1.23) -def test_callback_calling_convention(): - py.test.skip("later") - if sys.platform != 'win32': - py.test.skip("Windows only") - ffi = FFI() - ffi.cdef(""" - int call1(int(*__cdecl cb)(int)); - int call2(int(*__stdcall cb)(int)); - """) - lib = ffi.verify(""" - int call1(int(*__cdecl cb)(int)) { - return cb(42) + 1; - } - int call2(int(*__stdcall cb)(int)) { - return cb(-42) - 6; - } - """) - xxx - def test_opaque_integer_as_function_result(): #import platform #if platform.machine().startswith('sparc'): @@ -2261,3 +2242,180 @@ assert foo_s.fields[0][1].type is ffi.typeof("int") assert foo_s.fields[1][0] == 'b' assert foo_s.fields[1][1].type is ffi.typeof("void *") + +def test_win32_calling_convention_0(): + ffi = FFI() + ffi.cdef(""" + int call1(int(__cdecl *cb)(int)); + int (*const call2)(int(__stdcall *cb)(int)); + """) + lib = ffi.verify(r""" + #ifndef _MSC_VER + # define __stdcall /* nothing */ + #endif + int call1(int(*cb)(int)) { + int i, result = 0; + //printf("call1: cb = %p\n", cb); + for (i = 0; i < 1000; i++) + result += cb(i); + //printf("result = %d\n", result); + return result; + } + int call2(int(__stdcall *cb)(int)) { + int i, result = 0; + //printf("call2: cb = %p\n", cb); + for (i = 0; i < 1000; i++) + result += cb(-i); + //printf("result = %d\n", result); + return result; + } + """) + @ffi.callback("int(int)") + def cb1(x): + return x * 2 + @ffi.callback("int __stdcall(int)") + def cb2(x): + return x * 3 + print 'cb1 =', cb1 + res = lib.call1(cb1) + assert res == 500*999*2 + print 'cb2 =', cb2 + print ffi.typeof(lib.call2) + print 'call2 =', lib.call2 + res = lib.call2(cb2) + print '...' + assert res == -500*999*3 + print 'done' + if sys.platform == 'win32': + assert '__stdcall' in str(ffi.typeof(cb2)) + assert '__stdcall' not in str(ffi.typeof(cb1)) + py.test.raises(TypeError, lib.call1, cb2) + py.test.raises(TypeError, lib.call2, cb1) + else: + assert '__stdcall' not in str(ffi.typeof(cb2)) + assert ffi.typeof(cb2) is ffi.typeof(cb1) + +def test_win32_calling_convention_1(): + ffi = FFI() + ffi.cdef(""" + int __cdecl call1(int(__cdecl *cb)(int)); + int __stdcall call2(int(__stdcall *cb)(int)); + int (__cdecl *const cb1)(int); + int (__stdcall *const cb2)(int); + """) + lib = ffi.verify(r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + int __cdecl cb1(int x) { return x * 2; } + int __stdcall cb2(int x) { return x * 3; } + + int __cdecl call1(int(__cdecl *cb)(int)) { + int i, result = 0; + //printf("here1\n"); + //printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + for (i = 0; i < 1000; i++) + result += cb(i); + //printf("result = %d\n", result); + return result; + } + int __stdcall call2(int(__stdcall *cb)(int)) { + int i, result = 0; + //printf("here1\n"); + //printf("cb = %p, cb2 = %p\n", cb, (void *)cb2); + for (i = 0; i < 1000; i++) + result += cb(-i); + //printf("result = %d\n", result); + return result; + } + """) + assert lib.call1(lib.cb1) == 500*999*2 + assert lib.call2(lib.cb2) == -500*999*3 + +def test_win32_calling_convention_2(): + # any mistake in the declaration of plain function (including the + # precise argument types and, here, the calling convention) are + # automatically corrected. But this does not apply to the 'cb' + # function pointer argument. + ffi = FFI() + ffi.cdef(""" + int __stdcall call1(int(__cdecl *cb)(int)); + int __cdecl call2(int(__stdcall *cb)(int)); + int (__cdecl *const cb1)(int); + int (__stdcall *const cb2)(int); + """) + lib = ffi.verify(r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + int __cdecl call1(int(__cdecl *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(i); + return result; + } + int __stdcall call2(int(__stdcall *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(-i); + return result; + } + int __cdecl cb1(int x) { return x * 2; } + int __stdcall cb2(int x) { return x * 3; } + """) + assert lib.call1(lib.cb1) == 500*999*2 + assert lib.call2(lib.cb2) == -500*999*3 + +def test_win32_calling_convention_3(): + ffi = FFI() + ffi.cdef(""" + struct point { int x, y; }; + + int (*const cb1)(struct point); + int (__stdcall *const cb2)(struct point); + + struct point __stdcall call1(int(*cb)(struct point)); + struct point call2(int(__stdcall *cb)(struct point)); + """) + lib = ffi.verify(r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + struct point { int x, y; }; + int cb1(struct point pt) { return pt.x + 10 * pt.y; } + int __stdcall cb2(struct point pt) { return pt.x + 100 * pt.y; } + struct point __stdcall call1(int(__cdecl *cb)(struct point)) { + int i; + struct point result = { 0, 0 }; + //printf("here1\n"); + //printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + for (i = 0; i < 1000; i++) { + struct point p = { i, -i }; + int r = cb(p); + result.x += r; + result.y -= r; + } + return result; + } + struct point __cdecl call2(int(__stdcall *cb)(struct point)) { + int i; + struct point result = { 0, 0 }; + for (i = 0; i < 1000; i++) { + struct point p = { -i, i }; + int r = cb(p); + result.x += r; + result.y -= r; + } + return result; + } + """) + if sys.platform == 'win32': + py.test.raises(TypeError, lib.call1, lib.cb2) + py.test.raises(TypeError, lib.call2, lib.cb1) + pt = lib.call1(lib.cb1) + assert (pt.x, pt.y) == (-9*500*999, 9*500*999) + pt = lib.call2(lib.cb2) + assert (pt.x, pt.y) == (99*500*999, -99*500*999) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_parse_c_type.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_parse_c_type.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_parse_c_type.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_parse_c_type.py @@ -342,3 +342,17 @@ # not supported (really obscure): # "char[+5]" # "char['A']" + +def test_stdcall_cdecl(): + assert parse("int __stdcall(int)") == [Prim(lib._CFFI_PRIM_INT), + '->', Func(0), NoOp(4), FuncEnd(2), + Prim(lib._CFFI_PRIM_INT)] + assert parse("int __stdcall func(int)") == parse("int __stdcall(int)") + assert parse("int (__stdcall *)()") == [Prim(lib._CFFI_PRIM_INT), + NoOp(3), '->', Pointer(1), + Func(0), FuncEnd(2), 0] + assert parse("int (__stdcall *p)()") == parse("int (__stdcall*)()") + parse_error("__stdcall int", "identifier expected", 0) + parse_error("__cdecl int", "identifier expected", 0) + parse_error("int __stdcall", "expected '('", 13) + parse_error("int __cdecl", "expected '('", 11) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_realize_c_type.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_realize_c_type.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_realize_c_type.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_realize_c_type.py @@ -1,5 +1,5 @@ # Generated by pypy/tool/import_cffi.py -import py +import py, sys from cffi import cffi_opcode @@ -47,3 +47,29 @@ def test_all_primitives(): for name in cffi_opcode.PRIMITIVE_TO_INDEX: check(name, name) + + +def check_func(input, expected_output=None): + import _cffi_backend + ffi = _cffi_backend.FFI() + ct = ffi.typeof(ffi.callback(input, lambda: None)) + assert isinstance(ct, ffi.CType) + if sys.platform != 'win32': + expected_output = expected_output.replace('__stdcall *', '*') + assert ct.cname == expected_output + +def test_funcptr_stdcall(): + check_func("int(int)", "int(*)(int)") + check_func("int foobar(int)", "int(*)(int)") + check_func("int __stdcall(int)", "int(__stdcall *)(int)") + check_func("int __stdcall foobar(int)", "int(__stdcall *)(int)") + check_func("void __cdecl(void)", "void(*)()") + check_func("void __cdecl foobar(void)", "void(*)()") + check_func("void __stdcall(void)", "void(__stdcall *)()") + check_func("void __stdcall foobar(long, short)", + "void(__stdcall *)(long, short)") + check_func("void(void __cdecl(void), void __stdcall(void))", + "void(*)(void(*)(), void(__stdcall *)())") + +def test_variadic_overrides_stdcall(): + check("void (__stdcall*)(int, ...)", "void(*)(int, ...)") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1281,3 +1281,200 @@ """) assert lib.aaa == 42 py.test.raises(AttributeError, "lib.aaa = 43") + +def test_win32_calling_convention_0(): + ffi = FFI() + ffi.cdef(""" + int call1(int(__cdecl *cb)(int)); + int (*const call2)(int(__stdcall *cb)(int)); + """) + lib = verify(ffi, 'test_win32_calling_convention_0', r""" + #ifndef _MSC_VER + # define __stdcall /* nothing */ + #endif + int call1(int(*cb)(int)) { + int i, result = 0; + //printf("call1: cb = %p\n", cb); + for (i = 0; i < 1000; i++) + result += cb(i); + //printf("result = %d\n", result); + return result; + } + int call2(int(__stdcall *cb)(int)) { + int i, result = 0; + //printf("call2: cb = %p\n", cb); + for (i = 0; i < 1000; i++) + result += cb(-i); + //printf("result = %d\n", result); + return result; + } + """) + @ffi.callback("int(int)") + def cb1(x): + return x * 2 + @ffi.callback("int __stdcall(int)") + def cb2(x): + return x * 3 + res = lib.call1(cb1) + assert res == 500*999*2 + assert res == ffi.addressof(lib, 'call1')(cb1) + res = lib.call2(cb2) + assert res == -500*999*3 + assert res == ffi.addressof(lib, 'call2')(cb2) + if sys.platform == 'win32': + assert '__stdcall' in str(ffi.typeof(cb2)) + assert '__stdcall' not in str(ffi.typeof(cb1)) + py.test.raises(TypeError, lib.call1, cb2) + py.test.raises(TypeError, lib.call2, cb1) + else: + assert '__stdcall' not in str(ffi.typeof(cb2)) + assert ffi.typeof(cb2) is ffi.typeof(cb1) + +def test_win32_calling_convention_1(): + ffi = FFI() + ffi.cdef(""" + int __cdecl call1(int(__cdecl *cb)(int)); + int __stdcall call2(int(__stdcall *cb)(int)); + int (__cdecl *const cb1)(int); + int (__stdcall *const cb2)(int); + """) + lib = verify(ffi, 'test_win32_calling_convention_1', r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + int __cdecl cb1(int x) { return x * 2; } + int __stdcall cb2(int x) { return x * 3; } + + int __cdecl call1(int(__cdecl *cb)(int)) { + int i, result = 0; + //printf("here1\n"); + //printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + for (i = 0; i < 1000; i++) + result += cb(i); + //printf("result = %d\n", result); + return result; + } + int __stdcall call2(int(__stdcall *cb)(int)) { + int i, result = 0; + //printf("here1\n"); + //printf("cb = %p, cb2 = %p\n", cb, (void *)cb2); + for (i = 0; i < 1000; i++) + result += cb(-i); + //printf("result = %d\n", result); + return result; + } + """) + print '<<< cb1 =', ffi.addressof(lib, 'cb1') + ptr_call1 = ffi.addressof(lib, 'call1') + assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + assert ptr_call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + print '<<< cb2 =', ffi.addressof(lib, 'cb2') + ptr_call2 = ffi.addressof(lib, 'call2') + assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + print '<<< done' + +def test_win32_calling_convention_2(): + # any mistake in the declaration of plain function (including the + # precise argument types and, here, the calling convention) are + # automatically corrected. But this does not apply to the 'cb' + # function pointer argument. + ffi = FFI() + ffi.cdef(""" + int __stdcall call1(int(__cdecl *cb)(int)); + int __cdecl call2(int(__stdcall *cb)(int)); + int (__cdecl *const cb1)(int); + int (__stdcall *const cb2)(int); + """) + lib = verify(ffi, 'test_win32_calling_convention_2', """ + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + int __cdecl call1(int(__cdecl *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(i); + return result; + } + int __stdcall call2(int(__stdcall *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(-i); + return result; + } + int __cdecl cb1(int x) { return x * 2; } + int __stdcall cb2(int x) { return x * 3; } + """) + ptr_call1 = ffi.addressof(lib, 'call1') + ptr_call2 = ffi.addressof(lib, 'call2') + if sys.platform == 'win32': + py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) + py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1')) + assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + assert ptr_call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + +def test_win32_calling_convention_3(): + ffi = FFI() + ffi.cdef(""" + struct point { int x, y; }; + + int (*const cb1)(struct point); + int (__stdcall *const cb2)(struct point); + + struct point __stdcall call1(int(*cb)(struct point)); + struct point call2(int(__stdcall *cb)(struct point)); + """) + lib = verify(ffi, 'test_win32_calling_convention_3', r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + struct point { int x, y; }; + int cb1(struct point pt) { return pt.x + 10 * pt.y; } + int __stdcall cb2(struct point pt) { return pt.x + 100 * pt.y; } + struct point __stdcall call1(int(__cdecl *cb)(struct point)) { + int i; + struct point result = { 0, 0 }; + //printf("here1\n"); + //printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + for (i = 0; i < 1000; i++) { + struct point p = { i, -i }; + int r = cb(p); + result.x += r; + result.y -= r; + } + return result; + } + struct point __cdecl call2(int(__stdcall *cb)(struct point)) { + int i; + struct point result = { 0, 0 }; + for (i = 0; i < 1000; i++) { + struct point p = { -i, i }; + int r = cb(p); + result.x += r; + result.y -= r; + } + return result; + } + """) + ptr_call1 = ffi.addressof(lib, 'call1') + ptr_call2 = ffi.addressof(lib, 'call2') + if sys.platform == 'win32': + py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) + py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1')) + pt = lib.call1(ffi.addressof(lib, 'cb1')) + assert (pt.x, pt.y) == (-9*500*999, 9*500*999) + pt = ptr_call1(ffi.addressof(lib, 'cb1')) + assert (pt.x, pt.y) == (-9*500*999, 9*500*999) + pt = lib.call2(ffi.addressof(lib, 'cb2')) + assert (pt.x, pt.y) == (99*500*999, -99*500*999) + pt = ptr_call2(ffi.addressof(lib, 'cb2')) + assert (pt.x, pt.y) == (99*500*999, -99*500*999) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py @@ -1201,25 +1201,6 @@ lib = ffi.verify('#include ', libraries=lib_m) assert lib.sin(1.23) == math.sin(1.23) -def test_callback_calling_convention(): - py.test.skip("later") - if sys.platform != 'win32': - py.test.skip("Windows only") - ffi = FFI() - ffi.cdef(""" - int call1(int(*__cdecl cb)(int)); - int call2(int(*__stdcall cb)(int)); - """) - lib = ffi.verify(""" - int call1(int(*__cdecl cb)(int)) { - return cb(42) + 1; - } - int call2(int(*__stdcall cb)(int)) { - return cb(-42) - 6; - } - """) - xxx - def test_opaque_integer_as_function_result(): #import platform #if platform.machine().startswith('sparc'): From noreply at buildbot.pypy.org Wed Oct 7 12:26:08 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Oct 2015 12:26:08 +0200 (CEST) Subject: [pypy-commit] pypy default: document branch Message-ID: <20151007102608.AB7F31C069F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80016:4fa762be0a42 Date: 2015-10-07 12:26 +0200 http://bitbucket.org/pypy/pypy/changeset/4fa762be0a42/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -53,3 +53,6 @@ Fix performance regression on operations mixing numpy scalars and Python floats, cf. issue #2148. + +.. branch: cffi-stdcall +Win32: support '__stdcall' in CFFI. From noreply at buildbot.pypy.org Wed Oct 7 13:25:10 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Oct 2015 13:25:10 +0200 (CEST) Subject: [pypy-commit] pypy py3.3-bootstrap: A check (it fails when running almost any test): during space.startup(), Message-ID: <20151007112510.06F831C11FF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: py3.3-bootstrap Changeset: r80017:88a921f5ae6b Date: 2015-10-07 13:25 +0200 http://bitbucket.org/pypy/pypy/changeset/88a921f5ae6b/ Log: A check (it fails when running almost any test): during space.startup(), we must not call "import encodings". This is too early; the stdlib path will only be set up afterwards. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -420,6 +420,7 @@ raise if isinstance(w_mod, Module) and not w_mod.startup_called: w_mod.init(self) + self._basic_startup_done = True def finish(self): self.wait_for_thread_shutdown() diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -163,6 +163,11 @@ state = space.fromcache(CodecState) if state.codec_need_encodings: # registers new codecs. + # First, a check that we're not called very early---if we are, + # then after translation the code in app_main doesn't have a + # chance to set up the path to the stdlib. + if not we_are_translated(): + assert space._basic_startup_done # This import uses the "builtin" import method, and is needed # to bootstrap the full importlib module. w_import = space.getattr(space.builtin, space.wrap("__import__")) From noreply at buildbot.pypy.org Wed Oct 7 13:31:11 2015 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Oct 2015 13:31:11 +0200 (CEST) Subject: [pypy-commit] pypy compress-numbering: pack jitcode and pc into one slot Message-ID: <20151007113111.BBE141C120E@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: compress-numbering Changeset: r80018:51545f543b79 Date: 2015-10-07 13:29 +0200 http://bitbucket.org/pypy/pypy/changeset/51545f543b79/ Log: pack jitcode and pc into one slot diff --git a/rpython/jit/codewriter/codewriter.py b/rpython/jit/codewriter/codewriter.py --- a/rpython/jit/codewriter/codewriter.py +++ b/rpython/jit/codewriter/codewriter.py @@ -28,7 +28,7 @@ self.transform_graph_to_jitcode(graph, jitcode, True) return jitcode - def transform_graph_to_jitcode(self, graph, jitcode, verbose): + def transform_graph_to_jitcode(self, graph, jitcode, verbose, index): """Transform a graph into a JitCode containing the same bytecode in a different format. """ @@ -58,6 +58,7 @@ # constants are cast to their normalized type (Signed, GCREF or # Float). self.assembler.assemble(ssarepr, jitcode) + jitcode.index = index # # print the resulting assembler if self.debug: @@ -67,13 +68,16 @@ log.info("making JitCodes...") self.callcontrol.grab_initial_jitcodes() count = 0 + all_jitcodes = [] for graph, jitcode in self.callcontrol.enum_pending_graphs(): - self.transform_graph_to_jitcode(graph, jitcode, verbose) + self.transform_graph_to_jitcode(graph, jitcode, verbose, len(all_jitcodes)) + all_jitcodes.append(jitcode) count += 1 if not count % 500: log.info("Produced %d jitcodes" % count) self.assembler.finished(self.callcontrol.callinfocollection) log.info("there are %d JitCode instances." % count) + return all_jitcodes def setup_vrefinfo(self, vrefinfo): # must be called at most once diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1611,6 +1611,7 @@ #debug_start('jit-blackhole') blackholeinterp = blackhole_from_resumedata( metainterp_sd.blackholeinterpbuilder, + metainterp_sd.jitcodes, jitdriver_sd, resumedescr, deadframe, diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -26,13 +26,21 @@ self.prev = prev self.boxes = boxes +def combine_uint(index1, index2): + assert 0 <= index1 < 65536 + assert 0 <= index2 < 65536 + return index1 << 16 | index2 # it's ok to return signed here, + # we need only 32bit, but 64 is ok for now + +def unpack_uint(packed): + return packed >> 16, packed & 0xffff + class FrameInfo(object): - __slots__ = ('prev', 'jitcode', 'pc') + __slots__ = ('prev', 'packed_jitcode_pc') def __init__(self, prev, jitcode, pc): self.prev = prev - self.jitcode = jitcode - self.pc = pc + self.packed_jitcode_pc = combine_uint(jitcode.index, pc) def _ensure_parent_resumedata(framestack, n): target = framestack[n] @@ -40,7 +48,8 @@ return back = framestack[n - 1] if target.parent_resumedata_frame_info_list is not None: - assert target.parent_resumedata_frame_info_list.pc == back.pc + _, pc = unpack_uint(target.parent_resumedata_frame_info_list.packed_jitcode_pc) + assert pc == back.pc return _ensure_parent_resumedata(framestack, n - 1) target.parent_resumedata_frame_info_list = FrameInfo( @@ -970,8 +979,10 @@ virtualizable_boxes, virtualref_boxes = boxes frameinfo = storage.rd_frame_info_list while True: - f = metainterp.newframe(frameinfo.jitcode) - f.setup_resume_at_op(frameinfo.pc) + jitcode_pos, pc = unpack_uint(frameinfo.packed_jitcode_pc) + jitcode = metainterp.staticdata.jitcodes[jitcode_pos] + f = metainterp.newframe(jitcode) + f.setup_resume_at_op(pc) resumereader.consume_boxes(f.get_current_position_info(), f.registers_i, f.registers_r, f.registers_f) frameinfo = frameinfo.prev @@ -1225,7 +1236,8 @@ # ---------- when resuming for blackholing, get direct values ---------- -def blackhole_from_resumedata(blackholeinterpbuilder, jitdriver_sd, storage, +def blackhole_from_resumedata(blackholeinterpbuilder, jitcodes, + jitdriver_sd, storage, deadframe, all_virtuals=None): # The initialization is stack-critical code: it must not be interrupted by # StackOverflow, otherwise the jit_virtual_refs are left in a dangling state. @@ -1259,7 +1271,9 @@ curbh = firstbh frameinfo = storage.rd_frame_info_list while True: - curbh.setposition(frameinfo.jitcode, frameinfo.pc) + jitcode_pos, pc = unpack_uint(frameinfo.packed_jitcode_pc) + jitcode = jitcodes[jitcode_pos] + curbh.setposition(jitcode, pc) resumereader.consume_one_section(curbh) curbh = curbh.nextblackholeinterp frameinfo = frameinfo.prev diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -243,7 +243,8 @@ verbose = False # not self.cpu.translate_support_code self.rewrite_access_helpers() self.create_jit_entry_points() - self.codewriter.make_jitcodes(verbose=verbose) + jitcodes = self.codewriter.make_jitcodes(verbose=verbose) + self.metainterp_sd.jitcodes = jitcodes self.rewrite_can_enter_jits() self.rewrite_set_param_and_get_stats() self.rewrite_force_virtual(vrefinfo) From noreply at buildbot.pypy.org Wed Oct 7 14:15:49 2015 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Oct 2015 14:15:49 +0200 (CEST) Subject: [pypy-commit] pypy compress-numbering: store also packed on numbering; Message-ID: <20151007121549.AF3281C11FF@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: compress-numbering Changeset: r80019:b37bf50befec Date: 2015-10-07 14:07 +0200 http://bitbucket.org/pypy/pypy/changeset/b37bf50befec/ Log: store also packed on numbering; diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -33,7 +33,7 @@ # we need only 32bit, but 64 is ok for now def unpack_uint(packed): - return packed >> 16, packed & 0xffff + return (packed >> 16) & 0xffff, packed & 0xffff class FrameInfo(object): __slots__ = ('prev', 'packed_jitcode_pc') @@ -94,6 +94,7 @@ NUMBERINGP = lltype.Ptr(lltype.GcForwardReference()) NUMBERING = lltype.GcStruct('Numbering', ('prev', NUMBERINGP), + ('packed_jitcode_pc', lltype.Signed), ('nums', lltype.Array(rffi.SHORT))) NUMBERINGP.TO.become(NUMBERING) @@ -195,18 +196,25 @@ # env numbering - def number(self, optimizer, snapshot): + def number(self, optimizer, snapshot, frameinfo, first_iteration=False): if snapshot is None: return lltype.nullptr(NUMBERING), {}, 0 if snapshot in self.numberings: numb, liveboxes, v = self.numberings[snapshot] return numb, liveboxes.copy(), v - numb1, liveboxes, v = self.number(optimizer, snapshot.prev) + if first_iteration: + numb1, liveboxes, v = self.number(optimizer, snapshot.prev, frameinfo) + else: + numb1, liveboxes, v = self.number(optimizer, snapshot.prev, frameinfo.prev) n = len(liveboxes) - v boxes = snapshot.boxes length = len(boxes) numb = lltype.malloc(NUMBERING, length) + if first_iteration: + numb.packed_jitcode_pc = -1 + else: + numb.packed_jitcode_pc = frameinfo.packed_jitcode_pc for i in range(length): box = boxes[i] box = optimizer.get_box_replacement(box) @@ -377,7 +385,8 @@ assert not storage.rd_numb snapshot = self.snapshot_storage.rd_snapshot assert snapshot is not None # is that true? - numb, liveboxes_from_env, v = self.memo.number(optimizer, snapshot) + numb, liveboxes_from_env, v = self.memo.number(optimizer, snapshot, + self.snapshot_storage.rd_frame_info_list, first_iteration=True) self.liveboxes_from_env = liveboxes_from_env self.liveboxes = {} storage.rd_numb = numb From noreply at buildbot.pypy.org Wed Oct 7 14:15:51 2015 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 7 Oct 2015 14:15:51 +0200 (CEST) Subject: [pypy-commit] pypy compress-numbering: don't store rd_frame_info_list at all Message-ID: <20151007121551.D18B61C11FF@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: compress-numbering Changeset: r80020:713e1ba22921 Date: 2015-10-07 14:15 +0200 http://bitbucket.org/pypy/pypy/changeset/713e1ba22921/ Log: don't store rd_frame_info_list at all diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -823,13 +823,12 @@ class ResumeGuardDescr(AbstractResumeGuardDescr): _attrs_ = ('rd_numb', 'rd_count', 'rd_consts', 'rd_virtuals', - 'rd_frame_info_list', 'rd_pendingfields', 'status') + 'rd_pendingfields', 'status') rd_numb = lltype.nullptr(NUMBERING) rd_count = 0 rd_consts = None rd_virtuals = None - rd_frame_info_list = None rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) def copy_all_attributes_from(self, other): @@ -838,7 +837,6 @@ assert isinstance(other, ResumeGuardDescr) self.rd_count = other.rd_count self.rd_consts = other.rd_consts - self.rd_frame_info_list = other.rd_frame_info_list self.rd_pendingfields = other.rd_pendingfields self.rd_virtuals = other.rd_virtuals self.rd_numb = other.rd_numb diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -391,8 +391,7 @@ self.liveboxes = {} storage.rd_numb = numb self.snapshot_storage.rd_snapshot = None - storage.rd_frame_info_list = self.snapshot_storage.rd_frame_info_list - + # collect liveboxes and virtuals n = len(liveboxes_from_env) - v liveboxes = [None] * n @@ -986,7 +985,7 @@ boxes = resumereader.consume_vref_and_vable_boxes(virtualizable_info, greenfield_info) virtualizable_boxes, virtualref_boxes = boxes - frameinfo = storage.rd_frame_info_list + frameinfo = storage.rd_numb.prev while True: jitcode_pos, pc = unpack_uint(frameinfo.packed_jitcode_pc) jitcode = metainterp.staticdata.jitcodes[jitcode_pos] @@ -995,7 +994,7 @@ resumereader.consume_boxes(f.get_current_position_info(), f.registers_i, f.registers_r, f.registers_f) frameinfo = frameinfo.prev - if frameinfo is None: + if not frameinfo: break metainterp.framestack.reverse() return resumereader.liveboxes, virtualizable_boxes, virtualref_boxes @@ -1266,27 +1265,27 @@ # the bottom one, i.e. the last one in the chain, in order to make # the comment in BlackholeInterpreter.setposition() valid. nextbh = None - frameinfo = storage.rd_frame_info_list + numbering = storage.rd_numb.prev while True: curbh = blackholeinterpbuilder.acquire_interp() curbh.nextblackholeinterp = nextbh nextbh = curbh - frameinfo = frameinfo.prev - if frameinfo is None: + numbering = numbering.prev + if not numbering: break firstbh = nextbh # # Now fill the blackhole interpreters with resume data. curbh = firstbh - frameinfo = storage.rd_frame_info_list + numbering = storage.rd_numb.prev while True: - jitcode_pos, pc = unpack_uint(frameinfo.packed_jitcode_pc) + jitcode_pos, pc = unpack_uint(numbering.packed_jitcode_pc) jitcode = jitcodes[jitcode_pos] curbh.setposition(jitcode, pc) resumereader.consume_one_section(curbh) curbh = curbh.nextblackholeinterp - frameinfo = frameinfo.prev - if frameinfo is None: + numbering = numbering.prev + if not numbering: break return firstbh From noreply at buildbot.pypy.org Wed Oct 7 16:57:46 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Oct 2015 16:57:46 +0200 (CEST) Subject: [pypy-commit] pypy default: Translation fix? for cppyy... Message-ID: <20151007145746.542361C0797@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80021:d37906ecef73 Date: 2015-10-07 16:57 +0200 http://bitbucket.org/pypy/pypy/changeset/d37906ecef73/ Log: Translation fix? for cppyy... diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -30,7 +30,8 @@ cif_descr = lltype.nullptr(CIF_DESCRIPTION) - def __init__(self, space, fargs, fresult, ellipsis, abi): + def __init__(self, space, fargs, fresult, ellipsis, + abi=_cffi_backend.FFI_DEFAULT_ABI): assert isinstance(ellipsis, bool) extra, xpos = self._compute_extra_text(fargs, fresult, ellipsis, abi) size = rffi.sizeof(rffi.VOIDP) From noreply at buildbot.pypy.org Wed Oct 7 17:30:41 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Oct 2015 17:30:41 +0200 (CEST) Subject: [pypy-commit] pypy default: Add an assert in case we forget to say jit_hooks.stats_xxx(None) Message-ID: <20151007153041.316691C0EFC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80022:8c3dea33025b Date: 2015-10-07 17:30 +0200 http://bitbucket.org/pypy/pypy/changeset/8c3dea33025b/ Log: Add an assert in case we forget to say jit_hooks.stats_xxx(None) diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -780,6 +780,8 @@ if func.func_name.startswith('stats_'): # get special treatment since we rewrite it to a call that accepts # jit driver + assert len(op.args) >= 3, ("%r must have a first argument " + "(which is None)" % (func,)) func = func_with_new_name(func, func.func_name + '_compiled') def new_func(ignored, *args): From noreply at buildbot.pypy.org Wed Oct 7 18:08:49 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Oct 2015 18:08:49 +0200 (CEST) Subject: [pypy-commit] pypy default: Expose to app-level the stats (two numbers) maintained by AsmMemoryManager Message-ID: <20151007160849.F2FB71C11FF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80023:4f21ee6823f2 Date: 2015-10-07 17:12 +0100 http://bitbucket.org/pypy/pypy/changeset/4f21ee6823f2/ Log: Expose to app-level the stats (two numbers) maintained by AsmMemoryManager diff --git a/pypy/module/pypyjit/__init__.py b/pypy/module/pypyjit/__init__.py --- a/pypy/module/pypyjit/__init__.py +++ b/pypy/module/pypyjit/__init__.py @@ -15,6 +15,7 @@ 'set_compile_hook': 'interp_resop.set_compile_hook', 'set_abort_hook': 'interp_resop.set_abort_hook', 'get_stats_snapshot': 'interp_resop.get_stats_snapshot', + 'get_stats_asmmemmgr': 'interp_resop.get_stats_asmmemmgr', # those things are disabled because they have bugs, but if # they're found to be useful, fix test_ztranslation_jit_stats # in the backend first. get_stats_snapshot still produces diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -333,6 +333,13 @@ return space.wrap(W_JitInfoSnapshot(space, w_times, w_counters, w_counter_times)) +def get_stats_asmmemmgr(space): + """Returns the raw memory currently used by the JIT backend, + as a pair (total_memory_allocated, memory_in_use).""" + m1 = jit_hooks.stats_asmmemmgr_allocated(None) + m2 = jit_hooks.stats_asmmemmgr_used(None) + return space.newtuple([space.wrap(m1), space.wrap(m2)]) + def enable_debug(space): """ Set the jit debugging - completely necessary for some stats to work, most notably assembler counters. diff --git a/rpython/jit/backend/llsupport/asmmemmgr.py b/rpython/jit/backend/llsupport/asmmemmgr.py --- a/rpython/jit/backend/llsupport/asmmemmgr.py +++ b/rpython/jit/backend/llsupport/asmmemmgr.py @@ -25,6 +25,10 @@ self.free_blocks_end = {} # map {stop: start} self.blocks_by_size = [[] for i in range(self.num_indices)] + def get_stats(self): + """Returns stats for rlib.jit.jit_hooks.stats_asmmemmgr_*().""" + return (self.total_memory_allocated, self.total_mallocs) + def malloc(self, minsize, maxsize): """Allocate executable memory, between minsize and maxsize bytes, and return a pair (start, stop). Does not perform any rounding diff --git a/rpython/rlib/jit_hooks.py b/rpython/rlib/jit_hooks.py --- a/rpython/rlib/jit_hooks.py +++ b/rpython/rlib/jit_hooks.py @@ -129,6 +129,14 @@ def stats_get_loop_run_times(warmrunnerdesc): return warmrunnerdesc.metainterp_sd.cpu.get_all_loop_runs() + at register_helper(annmodel.SomeInteger(unsigned=True)) +def stats_asmmemmgr_allocated(warmrunnerdesc): + return warmrunnerdesc.metainterp_sd.cpu.asmmemmgr.get_stats()[0] + + at register_helper(annmodel.SomeInteger(unsigned=True)) +def stats_asmmemmgr_used(warmrunnerdesc): + return warmrunnerdesc.metainterp_sd.cpu.asmmemmgr.get_stats()[1] + # ---------------------- jitcell interface ---------------------- def _new_hook(name, resulttype): From noreply at buildbot.pypy.org Wed Oct 7 18:17:53 2015 From: noreply at buildbot.pypy.org (sbauman) Date: Wed, 7 Oct 2015 18:17:53 +0200 (CEST) Subject: [pypy-commit] pypy default: Factor in field immutability when invalidating heap information Message-ID: <20151007161753.D0E161C069F@cobra.cs.uni-duesseldorf.de> Author: Spenser Bauman Branch: Changeset: r80024:a5858e533974 Date: 2015-10-07 12:11 -0400 http://bitbucket.org/pypy/pypy/changeset/a5858e533974/ Log: Factor in field immutability when invalidating heap information diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -414,6 +414,8 @@ for arraydescr in effectinfo.readonly_descrs_arrays: self.force_lazy_setarrayitem(arraydescr) for fielddescr in effectinfo.write_descrs_fields: + if fielddescr.is_always_pure(): + continue try: del self.cached_dict_reads[fielddescr] except KeyError: diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -3520,6 +3520,27 @@ """ self.optimize_loop(ops, expected) + def test_residual_call_does_not_invalidate_immutable_caches(self): + ops = """ + [p1] + i1 = getfield_gc_pure_i(p1, descr=valuedescr3) + i2 = call_i(i1, descr=writevalue3descr) + i3 = getfield_gc_pure_i(p1, descr=valuedescr3) + jump(p1) + """ + expected_preamble = """ + [p1] + i1 = getfield_gc_pure_i(p1, descr=valuedescr3) + i2 = call_i(i1, descr=writevalue3descr) + jump(p1, i1) + """ + expected = """ + [p1, i1] + i2 = call_i(i1, descr=writevalue3descr) + jump(p1, i1) + """ + self.optimize_loop(ops, expected, expected_preamble=expected_preamble) + def test_residual_call_invalidate_some_caches(self): ops = """ [p1, p2] diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -280,6 +280,8 @@ writearraydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [], [], [adescr], [arraydescr], [])) + writevalue3descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], [valuedescr3], [], [])) readadescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([adescr], [], [], [], [], [])) mayforcevirtdescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, From noreply at buildbot.pypy.org Wed Oct 7 18:17:56 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 7 Oct 2015 18:17:56 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in sbauman/pypy (pull request #338) Message-ID: <20151007161756.345D71C069F@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r80025:be4b389d856e Date: 2015-10-07 18:18 +0200 http://bitbucket.org/pypy/pypy/changeset/be4b389d856e/ Log: Merged in sbauman/pypy (pull request #338) Factor in field immutability when invalidating heap information diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -414,6 +414,8 @@ for arraydescr in effectinfo.readonly_descrs_arrays: self.force_lazy_setarrayitem(arraydescr) for fielddescr in effectinfo.write_descrs_fields: + if fielddescr.is_always_pure(): + continue try: del self.cached_dict_reads[fielddescr] except KeyError: diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -3520,6 +3520,27 @@ """ self.optimize_loop(ops, expected) + def test_residual_call_does_not_invalidate_immutable_caches(self): + ops = """ + [p1] + i1 = getfield_gc_pure_i(p1, descr=valuedescr3) + i2 = call_i(i1, descr=writevalue3descr) + i3 = getfield_gc_pure_i(p1, descr=valuedescr3) + jump(p1) + """ + expected_preamble = """ + [p1] + i1 = getfield_gc_pure_i(p1, descr=valuedescr3) + i2 = call_i(i1, descr=writevalue3descr) + jump(p1, i1) + """ + expected = """ + [p1, i1] + i2 = call_i(i1, descr=writevalue3descr) + jump(p1, i1) + """ + self.optimize_loop(ops, expected, expected_preamble=expected_preamble) + def test_residual_call_invalidate_some_caches(self): ops = """ [p1, p2] diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -280,6 +280,8 @@ writearraydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([], [], [], [adescr], [arraydescr], [])) + writevalue3descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, + EffectInfo([], [], [], [valuedescr3], [], [])) readadescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo([adescr], [], [], [], [], [])) mayforcevirtdescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, From noreply at buildbot.pypy.org Wed Oct 7 18:18:54 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Oct 2015 18:18:54 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix the nesting level of this subheader Message-ID: <20151007161854.CF01E1C069F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80026:dec11c0514ba Date: 2015-10-07 18:19 +0200 http://bitbucket.org/pypy/pypy/changeset/dec11c0514ba/ Log: Fix the nesting level of this subheader diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -83,7 +83,7 @@ RPython Mixed Modules -===================== +--------------------- This is the internal way to write built-in extension modules in PyPy. It cannot be used by any 3rd-party module: the extension modules are From noreply at buildbot.pypy.org Wed Oct 7 19:01:34 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 7 Oct 2015 19:01:34 +0200 (CEST) Subject: [pypy-commit] cffi default: Clarify some details Message-ID: <20151007170134.96E901C11B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2329:29538451c652 Date: 2015-10-07 19:02 +0200 http://bitbucket.org/cffi/cffi/changeset/29538451c652/ Log: Clarify some details diff --git a/doc/source/overview.rst b/doc/source/overview.rst --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -308,12 +308,15 @@ ``dlopen()`` returns a ```` object, and this object has got as attributes all function, constant and variable symbols that are coming from this library and that have been declared in the -``cdef()``. +``cdef()``. If you have several interdependent libraries to load, +you would call ``cdef()`` only once but ``dlopen()`` several times. -By opposition, the API examples work like a C program does: the C +By opposition, the API mode works more closely like a C program: the C linker (static or dynamic) is responsible for finding any symbol used. You name the libraries in the ``libraries`` keyword argument to -``set_source()``. Other common arguments include ``library_dirs`` and +``set_source()``, but never need to say which symbol comes +from which library. +Other common arguments to ``set_source()`` include ``library_dirs`` and ``include_dirs``; all these arguments are passed to the standard distutils/setuptools. @@ -335,35 +338,39 @@ meant to access fields by guessing where they are in the structures. *The C libraries are typically meant to be used with a C compiler.* -The second example shows how to do that: instead of doing a ``dlopen()``, -we use ``set_source(..., "C header...")``. When using this approach -we have the advantage that we can use "``...``" at various places in +The "real example" above shows how to do that: this example uses +``set_source(..., "C source...")`` and never ``dlopen()``. +When using this approach, +we have the advantage that we can use literally "``...``" at various places in the ``cdef()``, and the missing information will be completed with the help of the C compiler. Actually, a single C source file is produced, -which contains first the ``C header`` part unmodified, followed by +which contains first the "C source" part unmodified, followed by some "magic" C code and declarations derived from the ``cdef()``. When this C file is compiled, the resulting C extension module will contain all the information we need---or the C compiler will give warnings or -errors, as usual e.g. if you misdeclare some function's signature. +errors, as usual e.g. if we misdeclare some function's signature. -Note that the ``C header`` part can contain arbitrary C code. You can -use it to declare some more helper functions written in C. To export +Note that the "C source" part from ``set_source()`` can contain +arbitrary C code. You can use this to declare some +more helper functions written in C. To export these helpers to Python, put their signature in the ``cdef()`` too. -(You can use the ``static`` C keyword, as in ``static int -myhelper(int x) { real_code_here; }``, because these helpers are only +(You can use the ``static`` C keyword in the "C source" part, +as in ``static int myhelper(int x) { return x * 42; }``, +because these helpers are only referenced from the "magic" C code that is generated afterwards in the same C file.) This can be used for example to wrap "crazy" macros into more standard C functions. The extra layer of C can be useful for other reasons too, like calling functions that expect some complicated argument -structures that you prefer to build in C rather than in Python. On +structures that you prefer to build in C rather than in Python. (On the other hand, if all you need is to call "function-like" macros, then you can directly declare them in the ``cdef()`` as if they were -functions. +functions.) The generated piece of C code should be the same independently on the -platform on which you run it, so in simple cases you can simply +platform on which you run it (or the Python version), +so in simple cases you can directly distribute the pre-generated C code and treat it as a regular C extension module. The special Setuptools lines in the `example above`__ are meant for the more complicated cases where we need to From noreply at buildbot.pypy.org Wed Oct 7 19:49:32 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 7 Oct 2015 19:49:32 +0200 (CEST) Subject: [pypy-commit] pypy callfamily: update convert_from_to() definitions Message-ID: <20151007174932.2A29B1C0710@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: callfamily Changeset: r80027:b78ca5dca6d5 Date: 2015-10-07 03:25 +0100 http://bitbucket.org/pypy/pypy/changeset/b78ca5dca6d5/ Log: update convert_from_to() definitions diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -366,15 +366,23 @@ return inputconst(typeOf(llfn), llfn) +class __extend__(pairtype(FunctionRepr, FunctionRepr)): + def convert_from_to((r_fpbc1, r_fpbc2), v, llops): + return v + +class __extend__(pairtype(FunctionRepr, FunctionsPBCRepr)): + def convert_from_to((r_fpbc1, r_fpbc2), v, llops): + return inputconst(r_fpbc2, r_fpbc1.s_pbc.const) + +class __extend__(pairtype(FunctionsPBCRepr, FunctionRepr)): + def convert_from_to((r_fpbc1, r_fpbc2), v, llops): + return inputconst(Void, None) + class __extend__(pairtype(FunctionsPBCRepr, FunctionsPBCRepr)): def convert_from_to((r_fpbc1, r_fpbc2), v, llops): # this check makes sense because both source and dest repr are FunctionsPBCRepr if r_fpbc1.lowleveltype == r_fpbc2.lowleveltype: return v - if r_fpbc1.lowleveltype is Void: - return inputconst(r_fpbc2, r_fpbc1.s_pbc.const) - if r_fpbc2.lowleveltype is Void: - return inputconst(Void, None) return NotImplemented @@ -505,16 +513,16 @@ resulttype=Bool) +class __extend__(pairtype(SmallFunctionSetPBCRepr, FunctionRepr)): + def convert_from_to((r_set, r_ptr), v, llops): + return inputconst(Void, None) + class __extend__(pairtype(SmallFunctionSetPBCRepr, FunctionsPBCRepr)): def convert_from_to((r_set, r_ptr), v, llops): - if r_ptr.lowleveltype is Void: - return inputconst(Void, None) - else: - assert v.concretetype is Char - v_int = llops.genop('cast_char_to_int', [v], - resulttype=Signed) - return llops.genop('getarrayitem', [r_set.c_pointer_table, v_int], - resulttype=r_ptr.lowleveltype) + assert v.concretetype is Char + v_int = llops.genop('cast_char_to_int', [v], resulttype=Signed) + return llops.genop('getarrayitem', [r_set.c_pointer_table, v_int], + resulttype=r_ptr.lowleveltype) def compression_function(r_set): @@ -536,14 +544,15 @@ return r_set._compression_function +class __extend__(pairtype(FunctionRepr, SmallFunctionSetPBCRepr)): + def convert_from_to((r_ptr, r_set), v, llops): + desc, = r_ptr.s_pbc.descriptions + return inputconst(Char, r_set.convert_desc(desc)) + class __extend__(pairtype(FunctionsPBCRepr, SmallFunctionSetPBCRepr)): def convert_from_to((r_ptr, r_set), v, llops): - if r_ptr.lowleveltype is Void: - desc, = r_ptr.s_pbc.descriptions - return inputconst(Char, r_set.convert_desc(desc)) - else: - ll_compress = compression_function(r_set) - return llops.gendirectcall(ll_compress, v) + ll_compress = compression_function(r_set) + return llops.gendirectcall(ll_compress, v) def conversion_table(r_from, r_to): From noreply at buildbot.pypy.org Wed Oct 7 19:49:34 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 7 Oct 2015 19:49:34 +0200 (CEST) Subject: [pypy-commit] pypy callfamily: Make FunctionRepr and FunctionsPBCRepr subclasses of a common base class Message-ID: <20151007174934.5AC5A1C0710@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: callfamily Changeset: r80028:977271ee31e1 Date: 2015-10-07 18:49 +0100 http://bitbucket.org/pypy/pypy/changeset/977271ee31e1/ Log: Make FunctionRepr and FunctionsPBCRepr subclasses of a common base class diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -173,14 +173,58 @@ raise TyperError("call table was unexpectedly extended") return llct - -class FunctionsPBCRepr(CanBeNull, Repr): - """Representation selected for a PBC of functions.""" - +class FunctionReprBase(Repr): def __init__(self, rtyper, s_pbc): self.rtyper = rtyper self.s_pbc = s_pbc self.callfamily = s_pbc.any_description().getcallfamily() + + def get_s_callable(self): + return self.s_pbc + + def get_r_implfunc(self): + return self, 0 + + def get_s_signatures(self, shape): + funcdesc = self.s_pbc.any_description() + return funcdesc.get_s_signatures(shape) + + def rtype_simple_call(self, hop): + return self.call(hop) + + def rtype_call_args(self, hop): + return self.call(hop) + + def call(self, hop): + bk = self.rtyper.annotator.bookkeeper + args = hop.spaceop.build_args(hop.args_s[1:]) + s_pbc = hop.args_s[0] # possibly more precise than self.s_pbc + descs = list(s_pbc.descriptions) + shape, index = self.callfamily.find_row(bk, descs, args, hop.spaceop) + row_of_graphs = self.callfamily.calltables[shape][index] + anygraph = row_of_graphs.itervalues().next() # pick any witness + vfn = hop.inputarg(self, arg=0) + vlist = [self.convert_to_concrete_llfn(vfn, shape, index, + hop.llops)] + vlist += callparse.callparse(self.rtyper, anygraph, hop) + rresult = callparse.getrresult(self.rtyper, anygraph) + hop.exception_is_here() + if isinstance(vlist[0], Constant): + v = hop.genop('direct_call', vlist, resulttype=rresult) + else: + vlist.append(hop.inputconst(Void, row_of_graphs.values())) + v = hop.genop('indirect_call', vlist, resulttype=rresult) + if hop.r_result is impossible_repr: + return None # see test_always_raising_methods + else: + return hop.llops.convertvar(v, rresult, hop.r_result) + + +class FunctionsPBCRepr(CanBeNull, FunctionReprBase): + """Representation selected for a PBC of functions.""" + + def __init__(self, rtyper, s_pbc): + FunctionReprBase.__init__(self, rtyper, s_pbc) llct = get_concrete_calltable(self.rtyper, self.callfamily) self.concretetable = llct.table self.uniquerows = llct.uniquerows @@ -207,16 +251,6 @@ def get_specfunc_row(self, llop, v, c_rowname, resulttype): return llop.genop('getfield', [v, c_rowname], resulttype=resulttype) - def get_s_callable(self): - return self.s_pbc - - def get_r_implfunc(self): - return self, 0 - - def get_s_signatures(self, shape): - funcdesc = self.s_pbc.any_description() - return funcdesc.get_s_signatures(shape) - def convert_desc(self, funcdesc): # get the whole "column" of the call table corresponding to this desc try: @@ -276,52 +310,11 @@ cname = inputconst(Void, row.attrname) return self.get_specfunc_row(llop, v, cname, row.fntype) - def get_concrete_llfn(self, s_pbc, args_s, op): - bk = self.rtyper.annotator.bookkeeper - funcdesc, = s_pbc.descriptions - args = simple_args(args_s) - with bk.at_position(None): - graph = funcdesc.get_graph(args, op) - llfn = self.rtyper.getcallable(graph) - return inputconst(typeOf(llfn), llfn) - def rtype_simple_call(self, hop): - return self.call(hop) +class FunctionRepr(FunctionReprBase): + """Repr for a constant function""" - def rtype_call_args(self, hop): - return self.call(hop) - - def call(self, hop): - bk = self.rtyper.annotator.bookkeeper - args = hop.spaceop.build_args(hop.args_s[1:]) - s_pbc = hop.args_s[0] # possibly more precise than self.s_pbc - descs = list(s_pbc.descriptions) - shape, index = self.callfamily.find_row(bk, descs, args, hop.spaceop) - row_of_graphs = self.callfamily.calltables[shape][index] - anygraph = row_of_graphs.itervalues().next() # pick any witness - vfn = hop.inputarg(self, arg=0) - vlist = [self.convert_to_concrete_llfn(vfn, shape, index, - hop.llops)] - vlist += callparse.callparse(self.rtyper, anygraph, hop) - rresult = callparse.getrresult(self.rtyper, anygraph) - hop.exception_is_here() - if isinstance(vlist[0], Constant): - v = hop.genop('direct_call', vlist, resulttype=rresult) - else: - vlist.append(hop.inputconst(Void, row_of_graphs.values())) - v = hop.genop('indirect_call', vlist, resulttype=rresult) - if hop.r_result is impossible_repr: - return None # see test_always_raising_methods - else: - return hop.llops.convertvar(v, rresult, hop.r_result) - -class FunctionRepr(FunctionsPBCRepr): - """Repr for a constant function""" - def __init__(self, rtyper, s_pbc): - self.rtyper = rtyper - self.s_pbc = s_pbc - self.callfamily = s_pbc.any_description().getcallfamily() - self.lowleveltype = Void + lowleveltype = Void def convert_desc(self, funcdesc): return None @@ -334,7 +327,7 @@ low-level function. In case the call table contains multiple rows, 'index' and 'shape' tells which of its items we are interested in. """ - assert v.concretetype == self.lowleveltype + assert v.concretetype == Void funcdesc, = self.s_pbc.descriptions row_of_one_graph = self.callfamily.calltables[shape][index] graph = row_of_one_graph[funcdesc] @@ -365,6 +358,16 @@ llfn = self.rtyper.getcallable(graph) return inputconst(typeOf(llfn), llfn) + def get_concrete_llfn(self, s_pbc, args_s, op): + bk = self.rtyper.annotator.bookkeeper + funcdesc, = s_pbc.descriptions + args = simple_args(args_s) + with bk.at_position(None): + graph = funcdesc.get_graph(args, op) + llfn = self.rtyper.getcallable(graph) + return inputconst(typeOf(llfn), llfn) + + class __extend__(pairtype(FunctionRepr, FunctionRepr)): def convert_from_to((r_fpbc1, r_fpbc2), v, llops): @@ -1163,8 +1166,7 @@ def redispatch_call(self, hop, call_args): r_class = self.r_im_self.rclass mangled_name, r_func = r_class.clsfields[self.methodname] - assert isinstance(r_func, (FunctionsPBCRepr, - SmallFunctionSetPBCRepr)) + assert isinstance(r_func, (FunctionReprBase, SmallFunctionSetPBCRepr)) # s_func = r_func.s_pbc -- not precise enough, see # test_precise_method_call_1. Build a more precise one... funcdescs = [desc.funcdesc for desc in hop.args_s[0].descriptions] From noreply at buildbot.pypy.org Wed Oct 7 23:57:09 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 7 Oct 2015 23:57:09 +0200 (CEST) Subject: [pypy-commit] pypy callfamily: Make SmallFunctionSetPBCRepr a subclass of FunctionReprBase Message-ID: <20151007215709.8C0621C11B3@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: callfamily Changeset: r80029:53b94ce9d2ab Date: 2015-10-07 19:26 +0100 http://bitbucket.org/pypy/pypy/changeset/53b94ce9d2ab/ Log: Make SmallFunctionSetPBCRepr a subclass of FunctionReprBase diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -389,11 +389,9 @@ return NotImplemented -class SmallFunctionSetPBCRepr(Repr): +class SmallFunctionSetPBCRepr(FunctionReprBase): def __init__(self, rtyper, s_pbc): - self.rtyper = rtyper - self.s_pbc = s_pbc - self.callfamily = s_pbc.any_description().getcallfamily() + FunctionReprBase.__init__(self, rtyper, s_pbc) llct = get_concrete_calltable(self.rtyper, self.callfamily) assert len(llct.uniquerows) == 1 self.lowleveltype = Char @@ -425,16 +423,6 @@ pointer_table[i] = self.pointer_repr.convert_const(None) self.c_pointer_table = inputconst(Ptr(POINTER_TABLE), pointer_table) - def get_s_callable(self): - return self.s_pbc - - def get_r_implfunc(self): - return self, 0 - - def get_s_signatures(self, shape): - funcdesc = self.s_pbc.any_description() - return funcdesc.get_s_signatures(shape) - def convert_desc(self, funcdesc): return chr(self.descriptions.index(funcdesc)) @@ -446,12 +434,6 @@ funcdesc = self.rtyper.annotator.bookkeeper.getdesc(value) return self.convert_desc(funcdesc) - def rtype_simple_call(self, hop): - return self.call(hop) - - def rtype_call_args(self, hop): - return self.call(hop) - def dispatcher(self, shape, index, argtypes, resulttype): key = shape, index, tuple(argtypes), resulttype if key in self._dispatch_cache: @@ -1166,7 +1148,7 @@ def redispatch_call(self, hop, call_args): r_class = self.r_im_self.rclass mangled_name, r_func = r_class.clsfields[self.methodname] - assert isinstance(r_func, (FunctionReprBase, SmallFunctionSetPBCRepr)) + assert isinstance(r_func, FunctionReprBase) # s_func = r_func.s_pbc -- not precise enough, see # test_precise_method_call_1. Build a more precise one... funcdescs = [desc.funcdesc for desc in hop.args_s[0].descriptions] From noreply at buildbot.pypy.org Thu Oct 8 06:31:18 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Oct 2015 06:31:18 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix test (didn't run on 64-bit, and failed on Win32) Message-ID: <20151008043118.CADEC1C0726@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80030:7a55c3196936 Date: 2015-10-08 06:31 +0200 http://bitbucket.org/pypy/pypy/changeset/7a55c3196936/ Log: Fix test (didn't run on 64-bit, and failed on Win32) diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -4046,8 +4046,11 @@ def test_external_call(self): from rpython.rlib.objectmodel import invoke_around_extcall - T = rffi.CArrayPtr(rffi.TIME_T) - external = rffi.llexternal("time", [T], rffi.TIME_T) + TIME_T = lltype.Signed + # ^^^ some 32-bit platforms have a 64-bit rffi.TIME_T, but we + # don't want that here; we just want always a Signed value + T = rffi.CArrayPtr(TIME_T) + external = rffi.llexternal("time", [T], TIME_T) class Oups(Exception): pass @@ -4071,9 +4074,9 @@ external(lltype.nullptr(T.TO)) return len(state.l) - res = self.interp_operations(f, [], supports_longlong=True) + res = self.interp_operations(f, []) assert res == 2 - res = self.interp_operations(f, [], supports_longlong=True) + res = self.interp_operations(f, []) assert res == 2 self.check_operations_history(call_release_gil_i=1, call_may_force_i=0) From noreply at buildbot.pypy.org Thu Oct 8 06:36:19 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Oct 2015 06:36:19 +0200 (CEST) Subject: [pypy-commit] pypy default: 32-bit support Message-ID: <20151008043619.DC1AA1C1192@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80031:710006a4a05f Date: 2015-10-08 06:36 +0200 http://bitbucket.org/pypy/pypy/changeset/710006a4a05f/ Log: 32-bit support diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -273,16 +273,16 @@ guard_not_invalidated(descr=...) f100 = float_mul(f98, 0.500000) i101 = int_add(i79, 1) - i102 = arraylen_gc(p85, descr=) + i102 = arraylen_gc(p85, descr=) i103 = int_lt(i102, i101) cond_call(i103, ConstClass(_ll_list_resize_hint_really_look_inside_iff__listPtr_Signed_Bool), p76, i101, 1, descr=) guard_no_exception(descr=...) - p104 = getfield_gc_r(p76, descr=) - p105 = new_with_vtable(descr=) - setfield_gc(p105, f100, descr=) - setarrayitem_gc(p104, i79, p105, descr=) + p104 = getfield_gc_r(p76, descr=) + p105 = new_with_vtable(descr=) + setfield_gc(p105, f100, descr=) + setarrayitem_gc(p104, i79, p105, descr=) i106 = getfield_raw_i(#, descr=) - setfield_gc(p76, i101, descr=) + setfield_gc(p76, i101, descr=) i107 = int_lt(i106, 0) guard_false(i107, descr=...) jump(..., descr=...) From noreply at buildbot.pypy.org Thu Oct 8 08:40:18 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 8 Oct 2015 08:40:18 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: missing restriction on the input arguments of guarding values Message-ID: <20151008064018.B209C1C069F@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80032:10f70b96e015 Date: 2015-10-03 10:00 +0200 http://bitbucket.org/pypy/pypy/changeset/10f70b96e015/ Log: missing restriction on the input arguments of guarding values diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -250,6 +250,29 @@ restrict = self.argument_restrictions[index] return restrict.bytesize + def opcount_filling_vector_register(self, op, vec_reg_size): + """ How many operations of that kind can one execute + with a machine instruction of register size X? + """ + if op.is_typecast(): + if op.casts_down(): + size = op.cast_input_bytesize(vec_reg_size) + return size // op.cast_from_bytesize() + else: + return vec_reg_size // op.cast_to_bytesize() + return vec_reg_size // op.bytesize + +class GuardRestrict(OpRestrict): + def opcount_filling_vector_register(self, op, vec_reg_size): + arg = op.getarg(0) + return vec_reg_size // arg.bytesize + +class LoadRestrict(OpRestrict): + def opcount_filling_vector_register(self, op, vec_reg_size): + assert op.is_primitive_load() + descr = op.getdescr() + return vec_reg_size // descr.get_item_size_in_bytes() + class StoreRestrict(OpRestrict): def __init__(self, argument_restris): self.argument_restrictions = argument_restris @@ -264,6 +287,11 @@ descr = op.getdescr() return descr.get_item_size_in_bytes() + def opcount_filling_vector_register(self, op, vec_reg_size): + assert op.is_primitive_store() + descr = op.getdescr() + return vec_reg_size // descr.get_item_size_in_bytes() + class OpMatchSizeTypeFirst(OpRestrict): def check_operation(self, state, pack, op): i = 0 @@ -293,6 +321,9 @@ OR_MSTF_I = OpMatchSizeTypeFirst([TR_ANY_INTEGER, TR_ANY_INTEGER]) OR_MSTF_F = OpMatchSizeTypeFirst([TR_ANY_FLOAT, TR_ANY_FLOAT]) + STORE_RESTRICT = StoreRestrict([None, None, TR_ANY]) + LOAD_RESTRICT = LoadRestrict([]) + GUARD_RESTRICT = GuardRestrict([TR_ANY_INTEGER]) # note that the following definition is x86 arch specific MAPPING = { @@ -312,12 +343,19 @@ rop.VEC_FLOAT_ABS: OpRestrict([TR_ANY_FLOAT]), rop.VEC_FLOAT_NEG: OpRestrict([TR_ANY_FLOAT]), - rop.VEC_RAW_STORE: StoreRestrict([None, None, TR_ANY]), - rop.VEC_SETARRAYITEM_RAW: StoreRestrict([None, None, TR_ANY]), - rop.VEC_SETARRAYITEM_GC: StoreRestrict([None, None, TR_ANY]), + rop.VEC_RAW_STORE: STORE_RESTRICT, + rop.VEC_SETARRAYITEM_RAW: STORE_RESTRICT, + rop.VEC_SETARRAYITEM_GC: STORE_RESTRICT, - rop.GUARD_TRUE: OpRestrict([TR_ANY_INTEGER]), - rop.GUARD_FALSE: OpRestrict([TR_ANY_INTEGER]), + rop.VEC_RAW_LOAD_I: LOAD_RESTRICT, + rop.VEC_RAW_LOAD_F: LOAD_RESTRICT, + rop.VEC_GETARRAYITEM_RAW_I: LOAD_RESTRICT, + rop.VEC_GETARRAYITEM_RAW_F: LOAD_RESTRICT, + rop.VEC_GETARRAYITEM_GC_I: LOAD_RESTRICT, + rop.VEC_GETARRAYITEM_GC_F: LOAD_RESTRICT, + + rop.GUARD_TRUE: GUARD_RESTRICT, + rop.GUARD_FALSE: GUARD_RESTRICT, ## irregular rop.VEC_INT_SIGNEXT: OpRestrict([TR_ANY_INTEGER]), @@ -333,12 +371,19 @@ rop.VEC_INT_IS_TRUE: OpRestrict([TR_ANY_INTEGER,TR_ANY_INTEGER]), } + @staticmethod + def get(op): + res = trans.MAPPING.get(op.vector, None) + if not res: + failnbail_transformation("could not get OpRestrict for " + str(op)) + return res + def turn_into_vector(state, pack): """ Turn a pack into a vector instruction """ check_if_pack_supported(state, pack) state.costmodel.record_pack_savings(pack, pack.numops()) left = pack.leftmost() - oprestrict = trans.MAPPING.get(pack.leftmost().vector, None) + oprestrict = trans.get(left) if oprestrict is not None: oprestrict.check_operation(state, pack, left) args = left.getarglist_copy() @@ -733,24 +778,6 @@ break self.setvector_of_box(arg, i, box) -def opcount_filling_vector_register(pack, vec_reg_size): - """ How many operations of that kind can one execute - with a machine instruction of register size X? - """ - op = pack.leftmost() - if op.returns_void(): - assert op.is_primitive_store() - descr = op.getdescr() - return vec_reg_size // descr.get_item_size_in_bytes() - - if op.is_typecast(): - if op.casts_down(): - size = op.cast_input_bytesize(vec_reg_size) - return size // op.cast_from_bytesize() - else: - return vec_reg_size // op.cast_to_bytesize() - return vec_reg_size // op.bytesize - class Pack(object): """ A pack is a set of n statements that are: * isomorphic @@ -880,8 +907,13 @@ break pack.update_pack_of_nodes() + def opcount_filling_vector_register(self, vec_reg_size): + left = self.leftmost() + oprestrict = trans.get(left) + return oprestrict.opcount_filling_vector_register(left, vec_reg_size) + def slice_operations(self, vec_reg_size): - count = opcount_filling_vector_register(self, vec_reg_size) + count = self.opcount_filling_vector_register(vec_reg_size) assert count > 0 newoplist = self.operations[count:] oplist = self.operations[:count] diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py @@ -22,7 +22,6 @@ from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.jit.metainterp.optimizeopt.version import LoopVersionInfo from rpython.jit.backend.llsupport.descr import ArrayDescr -from rpython.jit.metainterp.optimizeopt.schedule import opcount_filling_vector_register from rpython.jit.metainterp.optimizeopt.dependency import Node, DependencyGraph class FakeJitDriverStaticData(object): @@ -245,12 +244,32 @@ return FakeInput(type, datatype, size, signed) class BaseTestVectorize(VecTestHelper): - def test_opcount_filling(self): + def test_opcount_filling_store(self): descr = ArrayDescr(0,8, None, 'F', concrete_type='f') - pack = Pack([Node(ResOperation(rop.VEC_RAW_STORE, [0,0,arg('f',4)], descr), 0), - Node(ResOperation(rop.VEC_RAW_STORE, [0,0,arg('f',4)], descr), 0), + pack = Pack([Node(ResOperation(rop.RAW_STORE, [0,0,arg('f',4)], descr), 0), + Node(ResOperation(rop.RAW_STORE, [0,0,arg('f',4)], descr), 0), ]) - assert opcount_filling_vector_register(pack, 16) == 2 + assert pack.opcount_filling_vector_register(16) == 2 + + def test_opcount_filling_guard(self): + descr = ArrayDescr(0,4, None, 'S') + vec = ResOperation(rop.VEC_RAW_LOAD_I, ['a','i'], descr=descr) + vec.count = 4 + pack = Pack([Node(ResOperation(rop.GUARD_TRUE, [vec]), 0), + Node(ResOperation(rop.GUARD_TRUE, [vec]), 1), + Node(ResOperation(rop.GUARD_TRUE, [vec]), 2), + Node(ResOperation(rop.GUARD_TRUE, [vec]), 3), + Node(ResOperation(rop.GUARD_TRUE, [vec]), 4), + Node(ResOperation(rop.GUARD_TRUE, [vec]), 5), + ]) + assert pack.opcount_filling_vector_register(16) == 4 + ops, newops = pack.slice_operations(16) + assert len(ops) == 4 + assert len(newops) == 2 + assert pack.opcount_filling_vector_register(8) == 2 + ops, newops = pack.slice_operations(8) + assert len(ops) == 2 + assert len(newops) == 4 def test_move_guard_first(self): trace = self.parse_trace(""" diff --git a/rpython/jit/metainterp/test/test_resoperation.py b/rpython/jit/metainterp/test/test_resoperation.py --- a/rpython/jit/metainterp/test/test_resoperation.py +++ b/rpython/jit/metainterp/test/test_resoperation.py @@ -125,7 +125,7 @@ descr=descr) assert (op.type, op.datatype, op.bytesize, op.is_vector()) == ('i', 'i', 4, True) -def test_store(): +def test_vec_store(): descr = ArrayDescr(0,8, None, 'F', concrete_type='f') vec = rop.InputArgVector() op = rop.ResOperation(rop.rop.VEC_RAW_STORE, @@ -133,6 +133,14 @@ descr=descr) assert (op.type, op.datatype, op.bytesize, op.is_vector()) == ('v', 'v', 8, True) +def test_vec_guard(): + vec = rop.InputArgVector() + vec.bytesize = 4 + vec.type = vec.datatype = 'i' + vec.sigend = True + op = rop.ResOperation(rop.rop.GUARD_TRUE, [vec]) + assert (op.type, op.datatype, op.bytesize, op.is_vector()) == ('v', 'i', 4, False) + def test_types(): op = rop.ResOperation(rop.rop.INT_ADD, [ConstInt(0),ConstInt(1)]) assert op.type == 'i' From noreply at buildbot.pypy.org Thu Oct 8 08:40:20 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 8 Oct 2015 08:40:20 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: leave not only merge point out but any debug op Message-ID: <20151008064020.C8AB81C069F@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80033:142b6b8b8484 Date: 2015-10-08 08:39 +0200 http://bitbucket.org/pypy/pypy/changeset/142b6b8b8484/ Log: leave not only merge point out but any debug op diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py --- a/rpython/jit/metainterp/optimizeopt/vector.py +++ b/rpython/jit/metainterp/optimizeopt/vector.py @@ -161,7 +161,7 @@ guard_count = 0 at_least_one_array_access = True for i,op in enumerate(loop.operations): - if op.getopnum() == rop.DEBUG_MERGE_POINT: + if op.is_debug(): continue if op.vector >= 0 and not op.is_guard(): From noreply at buildbot.pypy.org Thu Oct 8 09:36:46 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Oct 2015 09:36:46 +0200 (CEST) Subject: [pypy-commit] pypy default: Update module docstring Message-ID: <20151008073646.941B41C145F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80034:b58e5dc31dac Date: 2015-10-08 09:19 +0200 http://bitbucket.org/pypy/pypy/changeset/b58e5dc31dac/ Log: Update module docstring diff --git a/pypy/module/itertools/__init__.py b/pypy/module/itertools/__init__.py --- a/pypy/module/itertools/__init__.py +++ b/pypy/module/itertools/__init__.py @@ -10,7 +10,6 @@ repeat(elem [,n]) --> elem, elem, elem, ... endlessly or up to n times Iterators terminating on the shortest input sequence: - izip(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ... ifilter(pred, seq) --> elements of seq where pred(elem) is True ifilterfalse(pred, seq) --> elements of seq where pred(elem) is False islice(seq, [start,] stop [, step]) --> elements from @@ -22,6 +21,14 @@ takewhile(pred, seq) --> seq[0], seq[1], until pred fails dropwhile(pred, seq) --> seq[n], seq[n+1], starting when pred fails groupby(iterable[, keyfunc]) --> sub-iterators grouped by value of keyfunc(v) + izip(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ... + izip_longest(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ... + + Combinatoric generators: + product(p, q, ... [repeat=1]) --> cartesian product + permutations(p[, r]) + combinations(p, r) + combinations_with_replacement(p, r) """ interpleveldefs = { From noreply at buildbot.pypy.org Thu Oct 8 09:36:48 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Oct 2015 09:36:48 +0200 (CEST) Subject: [pypy-commit] pypy default: Unroll itertools.izip_longest() with two sequences Message-ID: <20151008073648.A19761C145F@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80035:9aa1274d056c Date: 2015-10-08 09:37 +0200 http://bitbucket.org/pypy/pypy/changeset/9aa1274d056c/ Log: Unroll itertools.izip_longest() with two sequences diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -649,33 +649,38 @@ class W_IZipLongest(W_IMap): _error_name = "izip_longest" + _immutable_fields_ = ["w_fillvalue"] + + def _fetch(self, index): + w_iter = self.iterators_w[index] + if w_iter is not None: + space = self.space + try: + return space.next(w_iter) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + self.active -= 1 + if self.active <= 0: + # It was the last active iterator + raise + self.iterators_w[index] = None + return self.w_fillvalue def next_w(self): - space = self.space + # common case: 2 arguments + if len(self.iterators_w) == 2: + objects = [self._fetch(0), self._fetch(1)] + else: + objects = self._get_objects() + return self.space.newtuple(objects) + + def _get_objects(self): + # the loop is out of the way of the JIT nb = len(self.iterators_w) - if nb == 0: - raise OperationError(space.w_StopIteration, space.w_None) - - objects_w = [None] * nb - for index in range(nb): - w_value = self.w_fillvalue - w_it = self.iterators_w[index] - if w_it is not None: - try: - w_value = space.next(w_it) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - - self.active -= 1 - if self.active == 0: - # It was the last active iterator - raise - self.iterators_w[index] = None - - objects_w[index] = w_value - return space.newtuple(objects_w) + raise OperationError(self.space.w_StopIteration, self.space.w_None) + return [self._fetch(index) for index in range(nb)] def W_IZipLongest___new__(space, w_subtype, __args__): arguments_w, kwds_w = __args__.unpack() From noreply at buildbot.pypy.org Thu Oct 8 11:06:35 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 8 Oct 2015 11:06:35 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: merged default Message-ID: <20151008090635.A1F981C13BE@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80036:598c56268e90 Date: 2015-10-08 08:59 +0200 http://bitbucket.org/pypy/pypy/changeset/598c56268e90/ Log: merged default diff too long, truncating to 2000 out of 6822 lines diff --git a/dotviewer/graphclient.py b/dotviewer/graphclient.py --- a/dotviewer/graphclient.py +++ b/dotviewer/graphclient.py @@ -127,16 +127,8 @@ return spawn_graphserver_handler((host, port)) def spawn_local_handler(): - if hasattr(sys, 'pypy_objspaceclass'): - # if 'python' is actually PyPy, e.g. in a virtualenv, then - # try hard to find a real CPython - try: - python = subprocess.check_output( - 'env -i $SHELL -l -c "which python"', shell=True).strip() - except subprocess.CalledProcessError: - # did not work, fall back to 'python' - python = 'python' - else: + python = os.getenv('PYPY_PYGAME_PYTHON') + if not python: python = sys.executable args = [python, '-u', GRAPHSERVER, '--stdio'] p = subprocess.Popen(args, diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -158,7 +158,7 @@ RegrTest('test_codecs.py', core=True, usemodules='_multibytecodec'), RegrTest('test_codeop.py', core=True), RegrTest('test_coding.py', core=True), - RegrTest('test_coercion.py', core=True), + RegrTest('test_coercion.py', core=True, usemodules='struct'), RegrTest('test_collections.py', usemodules='binascii struct'), RegrTest('test_colorsys.py'), RegrTest('test_commands.py'), diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -609,7 +609,7 @@ def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: - tp = ffi._parser._declarations[key] + tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) try: value = backendlib.load_function(BType, name) @@ -620,7 +620,7 @@ # key = 'variable ' + name if key in ffi._parser._declarations: - tp = ffi._parser._declarations[key] + tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) read_variable = backendlib.read_variable write_variable = backendlib.write_variable @@ -631,12 +631,23 @@ # if not copied_enums: from . import model - for key, tp in ffi._parser._declarations.items(): + error = None + for key, (tp, _) in ffi._parser._declarations.items(): if not isinstance(tp, model.EnumType): continue + try: + tp.check_not_partial() + except Exception as e: + error = e + continue for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + if error is not None: + if name in library.__dict__: + return # ignore error, about a different enum + raise error + for key, val in ffi._parser._int_constants.items(): if key not in library.__dict__: library.__dict__[key] = val diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -26,6 +26,9 @@ _r_words = re.compile(r"\w+|\S") _parser_cache = None _r_int_literal = re.compile(r"-?0?x?[0-9a-f]+[lu]*$", re.IGNORECASE) +_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") +_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") +_r_cdecl = re.compile(r"\b__cdecl\b") def _get_parser(): global _parser_cache @@ -44,6 +47,14 @@ macrovalue = macrovalue.replace('\\\n', '').strip() macros[macroname] = macrovalue csource = _r_define.sub('', csource) + # BIG HACK: replace WINAPI or __stdcall with "volatile const". + # It doesn't make sense for the return type of a function to be + # "volatile volatile const", so we abuse it to detect __stdcall... + # Hack number 2 is that "int(volatile *fptr)();" is not valid C + # syntax, so we place the "volatile" before the opening parenthesis. + csource = _r_stdcall2.sub(' volatile volatile const(', csource) + csource = _r_stdcall1.sub(' volatile volatile const ', csource) + csource = _r_cdecl.sub(' ', csource) # Replace "[...]" with "[__dotdotdotarray__]" csource = _r_partial_array.sub('[__dotdotdotarray__]', csource) # Replace "...}" with "__dotdotdotNUM__}". This construction should @@ -192,6 +203,7 @@ if not decl.name: raise api.CDefError("typedef does not declare any name", decl) + quals = 0 if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) and decl.type.type.names[-1] == '__dotdotdot__'): realtype = self._get_unknown_type(decl) @@ -202,8 +214,9 @@ decl.type.type.type.names == ['__dotdotdot__']): realtype = model.unknown_ptr_type(decl.name) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) + realtype, quals = self._get_type_and_quals( + decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype, quals=quals) else: raise api.CDefError("unrecognized construct", decl) except api.FFIError as e: @@ -255,9 +268,9 @@ def _parse_decl(self, decl): node = decl.type if isinstance(node, pycparser.c_ast.FuncDecl): - tp = self._get_type(node, name=decl.name) + tp, quals = self._get_type_and_quals(node, name=decl.name) assert isinstance(tp, model.RawFunctionType) - tp = self._get_type_pointer(tp) + tp = self._get_type_pointer(tp, quals) self._declare('function ' + decl.name, tp) else: if isinstance(node, pycparser.c_ast.Struct): @@ -271,9 +284,10 @@ decl) # if decl.name: - tp = self._get_type(node, partial_length_ok=True) + tp, quals = self._get_type_and_quals(node, + partial_length_ok=True) if tp.is_raw_function: - tp = self._get_type_pointer(tp) + tp = self._get_type_pointer(tp, quals) self._declare('function ' + decl.name, tp) elif (tp.is_integer_type() and hasattr(decl, 'init') and @@ -287,10 +301,10 @@ _r_int_literal.match(decl.init.expr.value)): self._add_integer_constant(decl.name, '-' + decl.init.expr.value) - elif self._is_constant_globalvar(node): - self._declare('constant ' + decl.name, tp) + elif (quals & model.Q_CONST) and not tp.is_array_type: + self._declare('constant ' + decl.name, tp, quals=quals) else: - self._declare('variable ' + decl.name, tp) + self._declare('variable ' + decl.name, tp, quals=quals) def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] @@ -298,40 +312,51 @@ exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type) + tp, quals = self._get_type_and_quals(exprnode.type) + return tp - def _declare(self, name, obj, included=False): + def _declare(self, name, obj, included=False, quals=0): if name in self._declarations: - if self._declarations[name] is obj: + prevobj, prevquals = self._declarations[name] + if prevobj is obj and prevquals == quals: return if not self._override: raise api.FFIError( "multiple declarations of %s (for interactive usage, " "try cdef(xx, override=True))" % (name,)) assert '__dotdotdot__' not in name.split() - self._declarations[name] = obj + self._declarations[name] = (obj, quals) if included: self._included_declarations.add(obj) - def _get_type_pointer(self, type, const=False, declname=None): + def _extract_quals(self, type): + quals = 0 + if isinstance(type, (pycparser.c_ast.TypeDecl, + pycparser.c_ast.PtrDecl)): + if 'const' in type.quals: + quals |= model.Q_CONST + if 'restrict' in type.quals: + quals |= model.Q_RESTRICT + return quals + + def _get_type_pointer(self, type, quals, declname=None): if isinstance(type, model.RawFunctionType): return type.as_function_pointer() if (isinstance(type, model.StructOrUnionOrEnum) and type.name.startswith('$') and type.name[1:].isdigit() and type.forcename is None and declname is not None): - return model.NamedPointerType(type, declname) - if const: - return model.ConstPointerType(type) - return model.PointerType(type) + return model.NamedPointerType(type, declname, quals) + return model.PointerType(type, quals) - def _get_type(self, typenode, name=None, partial_length_ok=False): + def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): - type = self._declarations['typedef ' + typenode.type.names[0]] - return type + tp, quals = self._declarations['typedef ' + typenode.type.names[0]] + quals |= self._extract_quals(typenode) + return tp, quals # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type @@ -340,18 +365,19 @@ else: length = self._parse_constant( typenode.dim, partial_length_ok=partial_length_ok) - tp = self._get_type(typenode.type, + tp, quals = self._get_type_and_quals(typenode.type, partial_length_ok=partial_length_ok) - return model.ArrayType(tp, length) + return model.ArrayType(tp, length), quals # if isinstance(typenode, pycparser.c_ast.PtrDecl): # pointer type - const = (isinstance(typenode.type, pycparser.c_ast.TypeDecl) - and 'const' in typenode.type.quals) - return self._get_type_pointer(self._get_type(typenode.type), const, - declname=name) + itemtype, itemquals = self._get_type_and_quals(typenode.type) + tp = self._get_type_pointer(itemtype, itemquals, declname=name) + quals = self._extract_quals(typenode) + return tp, quals # if isinstance(typenode, pycparser.c_ast.TypeDecl): + quals = self._extract_quals(typenode) type = typenode.type if isinstance(type, pycparser.c_ast.IdentifierType): # assume a primitive type. get it from .names, but reduce @@ -379,35 +405,38 @@ names = newnames + names ident = ' '.join(names) if ident == 'void': - return model.void_type + return model.void_type, quals if ident == '__dotdotdot__': raise api.FFIError(':%d: bad usage of "..."' % typenode.coord.line) - return resolve_common_type(ident) + return resolve_common_type(ident), quals # if isinstance(type, pycparser.c_ast.Struct): # 'struct foobar' - return self._get_struct_union_enum_type('struct', type, name) + tp = self._get_struct_union_enum_type('struct', type, name) + return tp, quals # if isinstance(type, pycparser.c_ast.Union): # 'union foobar' - return self._get_struct_union_enum_type('union', type, name) + tp = self._get_struct_union_enum_type('union', type, name) + return tp, quals # if isinstance(type, pycparser.c_ast.Enum): # 'enum foobar' - return self._get_struct_union_enum_type('enum', type, name) + tp = self._get_struct_union_enum_type('enum', type, name) + return tp, quals # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - return self._parse_function_type(typenode, name) + return self._parse_function_type(typenode, name), 0 # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): return self._get_struct_union_enum_type('struct', typenode, name, - nested=True) + nested=True), 0 if isinstance(typenode, pycparser.c_ast.Union): return self._get_struct_union_enum_type('union', typenode, name, - nested=True) + nested=True), 0 # raise api.FFIError(":%d: bad or unsupported type declaration" % typenode.coord.line) @@ -426,28 +455,28 @@ raise api.CDefError( "%s: a function with only '(...)' as argument" " is not correct C" % (funcname or 'in expression')) - args = [self._as_func_arg(self._get_type(argdeclnode.type)) + args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type)) for argdeclnode in params] if not ellipsis and args == [model.void_type]: args = [] - result = self._get_type(typenode.type) - return model.RawFunctionType(tuple(args), result, ellipsis) + result, quals = self._get_type_and_quals(typenode.type) + # the 'quals' on the result type are ignored. HACK: we absure them + # to detect __stdcall functions: we textually replace "__stdcall" + # with "volatile volatile const" above. + abi = None + if hasattr(typenode.type, 'quals'): # else, probable syntax error anyway + if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']: + abi = '__stdcall' + return model.RawFunctionType(tuple(args), result, ellipsis, abi) - def _as_func_arg(self, type): + def _as_func_arg(self, type, quals): if isinstance(type, model.ArrayType): - return model.PointerType(type.item) + return model.PointerType(type.item, quals) elif isinstance(type, model.RawFunctionType): return type.as_function_pointer() else: return type - def _is_constant_globalvar(self, typenode): - if isinstance(typenode, pycparser.c_ast.PtrDecl): - return 'const' in typenode.quals - if isinstance(typenode, pycparser.c_ast.TypeDecl): - return 'const' in typenode.quals - return False - def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): # First, a level of caching on the exact 'type' node of the AST. # This is obscure, but needed because pycparser "unrolls" declarations @@ -486,7 +515,7 @@ else: explicit_name = name key = '%s %s' % (kind, name) - tp = self._declarations.get(key, None) + tp, _ = self._declarations.get(key, (None, None)) # if tp is None: if kind == 'struct': @@ -528,6 +557,7 @@ fldnames = [] fldtypes = [] fldbitsize = [] + fldquals = [] for decl in type.decls: if (isinstance(decl.type, pycparser.c_ast.IdentifierType) and ''.join(decl.type.names) == '__dotdotdot__'): @@ -541,7 +571,8 @@ else: bitsize = self._parse_constant(decl.bitsize) self._partial_length = False - type = self._get_type(decl.type, partial_length_ok=True) + type, fqual = self._get_type_and_quals(decl.type, + partial_length_ok=True) if self._partial_length: self._make_partial(tp, nested) if isinstance(type, model.StructType) and type.partial: @@ -549,9 +580,11 @@ fldnames.append(decl.name or '') fldtypes.append(type) fldbitsize.append(bitsize) + fldquals.append(fqual) tp.fldnames = tuple(fldnames) tp.fldtypes = tuple(fldtypes) tp.fldbitsize = tuple(fldbitsize) + tp.fldquals = tuple(fldquals) if fldbitsize != [-1] * len(fldbitsize): if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" @@ -632,14 +665,12 @@ return tp def include(self, other): - for name, tp in other._declarations.items(): + for name, (tp, quals) in other._declarations.items(): if name.startswith('anonymous $enum_$'): continue # fix for test_anonymous_enum_include kind = name.split(' ', 1)[0] - if kind in ('struct', 'union', 'enum', 'anonymous'): - self._declare(name, tp, included=True) - elif kind == 'typedef': - self._declare(name, tp, included=True) + if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'): + self._declare(name, tp, included=True, quals=quals) for k, v in other._int_constants.items(): self._add_constants(k, v) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,14 +1,29 @@ -import types +import types, sys import weakref from .lock import allocate_lock +# type qualifiers +Q_CONST = 0x01 +Q_RESTRICT = 0x02 + +def qualify(quals, replace_with): + if quals & Q_CONST: + replace_with = ' const ' + replace_with.lstrip() + if quals & Q_RESTRICT: + # It seems that __restrict is supported by gcc and msvc. + # If you hit some different compiler, add a #define in + # _cffi_include.h for it (and in its copies, documented there) + replace_with = ' __restrict ' + replace_with.lstrip() + return replace_with + + class BaseTypeByIdentity(object): is_array_type = False is_raw_function = False - def get_c_name(self, replace_with='', context='a C file'): + def get_c_name(self, replace_with='', context='a C file', quals=0): result = self.c_name_with_marker assert result.count('&') == 1 # some logic duplication with ffi.getctype()... :-( @@ -18,6 +33,7 @@ replace_with = '(%s)' % replace_with elif not replace_with[0] in '[(': replace_with = ' ' + replace_with + replace_with = qualify(quals, replace_with) result = result.replace('&', replace_with) if '$' in result: from .ffiplatform import VerificationError @@ -177,18 +193,21 @@ class BaseFunctionType(BaseType): - _attrs_ = ('args', 'result', 'ellipsis') + _attrs_ = ('args', 'result', 'ellipsis', 'abi') - def __init__(self, args, result, ellipsis): + def __init__(self, args, result, ellipsis, abi=None): self.args = args self.result = result self.ellipsis = ellipsis + self.abi = abi # reprargs = [arg._get_c_name() for arg in self.args] if self.ellipsis: reprargs.append('...') reprargs = reprargs or ['void'] replace_with = self._base_pattern % (', '.join(reprargs),) + if abi is not None: + replace_with = replace_with[:1] + abi + ' ' + replace_with[1:] self.c_name_with_marker = ( self.result.c_name_with_marker.replace('&', replace_with)) @@ -206,7 +225,7 @@ "type, not a pointer-to-function type" % (self,)) def as_function_pointer(self): - return FunctionPtrType(self.args, self.result, self.ellipsis) + return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi) class FunctionPtrType(BaseFunctionType): @@ -217,24 +236,29 @@ args = [] for tp in self.args: args.append(tp.get_cached_btype(ffi, finishlist)) + abi_args = () + if self.abi == "__stdcall": + if not self.ellipsis: # __stdcall ignored for variadic funcs + try: + abi_args = (ffi._backend.FFI_STDCALL,) + except AttributeError: + pass return global_cache(self, ffi, 'new_function_type', - tuple(args), result, self.ellipsis) + tuple(args), result, self.ellipsis, *abi_args) def as_raw_function(self): - return RawFunctionType(self.args, self.result, self.ellipsis) + return RawFunctionType(self.args, self.result, self.ellipsis, self.abi) class PointerType(BaseType): - _attrs_ = ('totype',) - _base_pattern = " *&" - _base_pattern_array = "(*&)" + _attrs_ = ('totype', 'quals') - def __init__(self, totype): + def __init__(self, totype, quals=0): self.totype = totype + self.quals = quals + extra = qualify(quals, " *&") if totype.is_array_type: - extra = self._base_pattern_array - else: - extra = self._base_pattern + extra = "(%s)" % (extra.lstrip(),) self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra) def build_backend_type(self, ffi, finishlist): @@ -243,10 +267,8 @@ voidp_type = PointerType(void_type) - -class ConstPointerType(PointerType): - _base_pattern = " const *&" - _base_pattern_array = "(const *&)" +def ConstPointerType(totype): + return PointerType(totype, Q_CONST) const_voidp_type = ConstPointerType(void_type) @@ -254,8 +276,8 @@ class NamedPointerType(PointerType): _attrs_ = ('totype', 'name') - def __init__(self, totype, name): - PointerType.__init__(self, totype) + def __init__(self, totype, name, quals=0): + PointerType.__init__(self, totype, quals) self.name = name self.c_name_with_marker = name + '&' @@ -315,11 +337,12 @@ partial = False packed = False - def __init__(self, name, fldnames, fldtypes, fldbitsize): + def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None): self.name = name self.fldnames = fldnames self.fldtypes = fldtypes self.fldbitsize = fldbitsize + self.fldquals = fldquals self.build_c_name_with_marker() def has_anonymous_struct_fields(self): @@ -331,14 +354,17 @@ return False def enumfields(self): - for name, type, bitsize in zip(self.fldnames, self.fldtypes, - self.fldbitsize): + fldquals = self.fldquals + if fldquals is None: + fldquals = (0,) * len(self.fldnames) + for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes, + self.fldbitsize, fldquals): if name == '' and isinstance(type, StructOrUnion): # nested anonymous struct/union for result in type.enumfields(): yield result else: - yield (name, type, bitsize) + yield (name, type, bitsize, quals) def force_flatten(self): # force the struct or union to have a declaration that lists @@ -347,13 +373,16 @@ names = [] types = [] bitsizes = [] - for name, type, bitsize in self.enumfields(): + fldquals = [] + for name, type, bitsize, quals in self.enumfields(): names.append(name) types.append(type) bitsizes.append(bitsize) + fldquals.append(quals) self.fldnames = tuple(names) self.fldtypes = tuple(types) self.fldbitsize = tuple(bitsizes) + self.fldquals = tuple(fldquals) def get_cached_btype(self, ffi, finishlist, can_delay=False): BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist, diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h --- a/lib_pypy/cffi/parse_c_type.h +++ b/lib_pypy/cffi/parse_c_type.h @@ -5,7 +5,7 @@ #define _CFFI_OP(opcode, arg) (_cffi_opcode_t)(opcode | (((uintptr_t)(arg)) << 8)) #define _CFFI_GETOP(cffi_opcode) ((unsigned char)(uintptr_t)cffi_opcode) -#define _CFFI_GETARG(cffi_opcode) (((uintptr_t)cffi_opcode) >> 8) +#define _CFFI_GETARG(cffi_opcode) (((intptr_t)cffi_opcode) >> 8) #define _CFFI_OP_PRIMITIVE 1 #define _CFFI_OP_POINTER 3 diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -195,17 +195,15 @@ elif isinstance(tp, model.StructOrUnion): if tp.fldtypes is not None and ( tp not in self.ffi._parser._included_declarations): - for name1, tp1, _ in tp.enumfields(): + for name1, tp1, _, _ in tp.enumfields(): self._do_collect_type(self._field_type(tp, name1, tp1)) else: for _, x in tp._get_items(): self._do_collect_type(x) - def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) - def _generate(self, step_name): - for name, tp in self._get_declarations(): + lst = self.ffi._parser._declarations.items() + for name, (tp, quals) in sorted(lst): kind, realname = name.split(' ', 1) try: method = getattr(self, '_generate_cpy_%s_%s' % (kind, @@ -214,6 +212,7 @@ raise ffiplatform.VerificationError( "not implemented in recompile(): %r" % name) try: + self._current_quals = quals method(tp, realname) except Exception as e: model.attach_exception_info(e, name) @@ -608,7 +607,11 @@ call_arguments.append('x%d' % i) repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' - name_and_arguments = '_cffi_d_%s(%s)' % (name, repr_arguments) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments) prnt('static %s' % (tp.result.get_c_name(name_and_arguments),)) prnt('{') call_arguments = ', '.join(call_arguments) @@ -711,7 +714,8 @@ if difference: repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' - name_and_arguments = '_cffi_f_%s(%s)' % (name, repr_arguments) + name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name, + repr_arguments) prnt('static %s' % (tp_result.get_c_name(name_and_arguments),)) prnt('{') if result_decl: @@ -774,7 +778,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): try: if ftype.is_integer_type() or fbitsize >= 0: # accept all integers, but complain on float or double @@ -789,7 +793,8 @@ ftype = ftype.item fname = fname + '[0]' prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -823,7 +828,7 @@ c_fields = [] if reason_for_not_expanding is None: enumfields = list(tp.enumfields()) - for fldname, fldtype, fbitsize in enumfields: + for fldname, fldtype, fbitsize, fqual in enumfields: fldtype = self._field_type(tp, fldname, fldtype) # cname is None for _add_missing_struct_unions() only op = OP_NOOP @@ -879,7 +884,9 @@ # because they don't have any known C name. Check that they are # not partial (we can't complete or verify them!) and emit them # anonymously. - for tp in list(self._struct_unions): + lst = list(self._struct_unions.items()) + lst.sort(key=lambda tp_order: tp_order[1]) + for tp, order in lst: if tp not in self._seen_struct_unions: if tp.partial: raise NotImplementedError("internal inconsistency: %r is " @@ -1004,6 +1011,8 @@ def _enum_ctx(self, tp, cname): type_index = self._typesdict[tp] type_op = CffiOp(OP_ENUM, -1) + if self.target_is_python: + tp.check_not_partial() for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): self._lsts["global"].append( GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op, @@ -1081,7 +1090,8 @@ # if 'tp' were a function type, but that is not possible here. # (If 'tp' is a function _pointer_ type, then casts from "fn_t # **" to "void *" are again no-ops, as far as I can tell.) - prnt('static ' + tp.get_c_name('*_cffi_var_%s(void)' % (name,))) + decl = '*_cffi_var_%s(void)' % (name,) + prnt('static ' + tp.get_c_name(decl, quals=self._current_quals)) prnt('{') prnt(' return %s(%s);' % (ampersand, name)) prnt('}') @@ -1130,7 +1140,13 @@ else: self.cffi_types[index] = CffiOp(OP_NOOP, realindex) index += 1 - self.cffi_types[index] = CffiOp(OP_FUNCTION_END, int(tp.ellipsis)) + flags = int(tp.ellipsis) + if tp.abi is not None: + if tp.abi == '__stdcall': + flags |= 2 + else: + raise NotImplementedError("abi=%r" % (tp.abi,)) + self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags) def _emit_bytecode_PointerType(self, tp, index): self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype]) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -197,7 +197,10 @@ return library def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst def _generate(self, step_name): for name, tp in self._get_declarations(): @@ -468,7 +471,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double @@ -477,7 +480,8 @@ # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -488,7 +492,7 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) @@ -552,7 +556,7 @@ check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -87,7 +87,10 @@ return library def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst def _generate(self, step_name): for name, tp in self._get_declarations(): @@ -156,7 +159,11 @@ arglist = ', '.join(arglist) or 'void' wrappername = '_cffi_f_%s' % name self.export_symbols.append(wrappername) - funcdecl = ' %s(%s)' % (wrappername, arglist) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + funcdecl = ' %s%s(%s)' % (abi, wrappername, arglist) context = 'result of %s' % name prnt(tpresult.get_c_name(funcdecl, context)) prnt('{') @@ -260,7 +267,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double @@ -269,7 +276,8 @@ # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -280,7 +288,7 @@ prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) @@ -342,7 +350,7 @@ check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -39,8 +39,9 @@ "_csv", "cppyy", "_pypyjson" ]) -if (sys.platform.startswith('linux') and os.uname()[4] == 'x86_64' - and sys.maxint > 2**32): # it's not enough that we get x86_64 +if ((sys.platform.startswith('linux') or sys.platform == 'darwin') + and os.uname()[4] == 'x86_64' and sys.maxint > 2**32): + # it's not enough that we get x86_64 working_modules.add('_vmprof') translation_modules = default_modules.copy() diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -83,7 +83,7 @@ RPython Mixed Modules -===================== +--------------------- This is the internal way to write built-in extension modules in PyPy. It cannot be used by any 3rd-party module: the extension modules are diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -44,6 +44,19 @@ Add support for ndarray.ctypes property. +.. branch: share-guard-info + +Share guard resume data between consecutive guards that have only +pure operations and guards in between. + +.. branch: issue-2148 + +Fix performance regression on operations mixing numpy scalars and Python +floats, cf. issue #2148. + +.. branch: cffi-stdcall +Win32: support '__stdcall' in CFFI. + .. branch: vecopt .. branch: vecopt-merge diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -253,26 +253,27 @@ def binaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_meth = self.getattr(space, specialname, False) + if isinstance(w_a, W_InstanceObject): + w_meth = w_a.getattr(space, specialname, False) if w_meth is None: return space.w_NotImplemented return space.call_function(w_meth, w_b) else: + # fall back to space.xxx() if coerce returns a non-W_Instance + # object as first argument return getattr(space, objspacename)(w_a, w_b) binaryop.func_name = name def rbinaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None or w_a is self: - w_meth = self.getattr(space, rspecialname, False) + if isinstance(w_a, W_InstanceObject): + w_meth = w_a.getattr(space, rspecialname, False) if w_meth is None: return space.w_NotImplemented - return space.call_function(w_meth, w_other) + return space.call_function(w_meth, w_b) else: + # fall back to space.xxx() if coerce returns a non-W_Instance + # object as first argument return getattr(space, objspacename)(w_b, w_a) rbinaryop.func_name = "r" + name return binaryop, rbinaryop @@ -283,7 +284,7 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - return [None, None] + return [w_self, w_other] return space.fixedview(w_tup, 2) def descr_instance_new(space, w_type, w_class, w_dict=None): @@ -523,13 +524,9 @@ def descr_cmp(self, space, w_other): # do all the work here like CPython w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - else: - if (not isinstance(w_a, W_InstanceObject) and - not isinstance(w_b, W_InstanceObject)): - return space.cmp(w_a, w_b) + if (not isinstance(w_a, W_InstanceObject) and + not isinstance(w_b, W_InstanceObject)): + return space.cmp(w_a, w_b) if isinstance(w_a, W_InstanceObject): w_func = w_a.getattr(space, '__cmp__', False) if w_func is not None: @@ -636,42 +633,36 @@ def descr_pow(self, space, w_other, w_modulo=None): if space.is_none(w_modulo): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) - return space.w_NotImplemented + if isinstance(w_a, W_InstanceObject): + w_func = w_a.getattr(space, '__pow__', False) + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other) else: return space.pow(w_a, w_b, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other, w_modulo) def descr_rpow(self, space, w_other, w_modulo=None): if space.is_none(w_modulo): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) - return space.w_NotImplemented + if isinstance(w_a, W_InstanceObject): + w_func = w_a.getattr(space, '__rpow__', False) + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other) else: return space.pow(w_b, w_a, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other, w_modulo) def descr_next(self, space): w_func = self.getattr(space, 'next', False) diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -417,6 +417,22 @@ pass raises(TypeError, coerce, B(), []) + def test_coerce_inf(self): + class B: + def __coerce__(self, other): + return B(), B() + def __add__(self, other): + return 42 + assert B() + B() == 42 + + def test_coerce_reverse(self): + class CoerceNumber: + def __coerce__(self, other): + assert isinstance(other, int) + return (6, other) + assert 5 + CoerceNumber() == 11 + assert 2 ** CoerceNumber() == 64 + def test_binaryop(self): class A: def __add__(self, other): diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,9 +1,16 @@ import sys from pypy.interpreter.mixedmodule import MixedModule -from rpython.rlib import rdynload +from rpython.rlib import rdynload, clibffi VERSION = "1.3.0" +FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI +try: + FFI_STDCALL = clibffi.FFI_STDCALL + has_stdcall = True +except AttributeError: + has_stdcall = False + class Module(MixedModule): @@ -44,8 +51,8 @@ 'get_errno': 'cerrno.get_errno', 'set_errno': 'cerrno.set_errno', - 'FFI_DEFAULT_ABI': 'ctypefunc._get_abi(space, "FFI_DEFAULT_ABI")', - 'FFI_CDECL': 'ctypefunc._get_abi(space,"FFI_DEFAULT_ABI")',#win32 name + 'FFI_DEFAULT_ABI': 'space.wrap(%d)' % FFI_DEFAULT_ABI, + 'FFI_CDECL': 'space.wrap(%d)' % FFI_DEFAULT_ABI, # win32 name # CFFI 1.0 'FFI': 'ffi_obj.W_FFIObject', @@ -53,6 +60,9 @@ if sys.platform == 'win32': interpleveldefs['getwinerror'] = 'cerrno.getwinerror' + if has_stdcall: + interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL + def get_dict_rtld_constants(): found = {} diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -178,7 +178,8 @@ @jit.dont_look_inside -def _handle_applevel_exception(space, callback, e, ll_res, extra_line): +def _handle_applevel_exception(callback, e, ll_res, extra_line): + space = callback.space callback.write_error_return_value(ll_res) if callback.w_onerror is None: callback.print_error(e, extra_line) @@ -199,13 +200,21 @@ extra_line="\nDuring the call to 'onerror', " "another exception occurred:\n\n") + at jit.jit_callback("CFFI") +def py_invoke_callback(callback, ll_res, ll_args): + extra_line = '' + try: + w_res = callback.invoke(ll_args) + extra_line = "Trying to convert the result back to C:\n" + callback.convert_result(ll_res, w_res) + except OperationError, e: + _handle_applevel_exception(callback, e, ll_res, extra_line) - at jit.jit_callback("CFFI") def _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): """ Callback specification. ffi_cif - something ffi specific, don't care ll_args - rffi.VOIDPP - pointer to array of pointers to args - ll_restype - rffi.VOIDP - pointer to result + ll_res - rffi.VOIDP - pointer to result ll_userdata - a special structure which holds necessary information (what the real callback is for example), casted to VOIDP """ @@ -228,13 +237,7 @@ space = callback.space try: must_leave = space.threadlocals.try_enter_thread(space) - extra_line = '' - try: - w_res = callback.invoke(ll_args) - extra_line = "Trying to convert the result back to C:\n" - callback.convert_result(ll_res, w_res) - except OperationError, e: - _handle_applevel_exception(space, callback, e, ll_res, extra_line) + py_invoke_callback(callback, ll_res, ll_args) # except Exception, e: # oups! last-level attempt to recover. diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -12,6 +12,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from pypy.interpreter.error import OperationError, oefmt +from pypy.module import _cffi_backend from pypy.module._cffi_backend import ctypearray, cdataobj, cerrno from pypy.module._cffi_backend.ctypeobj import W_CType from pypy.module._cffi_backend.ctypeptr import W_CTypePtrBase, W_CTypePointer @@ -23,20 +24,22 @@ class W_CTypeFunc(W_CTypePtrBase): - _attrs_ = ['fargs', 'ellipsis', 'cif_descr'] - _immutable_fields_ = ['fargs[*]', 'ellipsis', 'cif_descr'] + _attrs_ = ['fargs', 'ellipsis', 'abi', 'cif_descr'] + _immutable_fields_ = ['fargs[*]', 'ellipsis', 'abi', 'cif_descr'] kind = "function" cif_descr = lltype.nullptr(CIF_DESCRIPTION) - def __init__(self, space, fargs, fresult, ellipsis): + def __init__(self, space, fargs, fresult, ellipsis, + abi=_cffi_backend.FFI_DEFAULT_ABI): assert isinstance(ellipsis, bool) - extra = self._compute_extra_text(fargs, fresult, ellipsis) + extra, xpos = self._compute_extra_text(fargs, fresult, ellipsis, abi) size = rffi.sizeof(rffi.VOIDP) - W_CTypePtrBase.__init__(self, space, size, extra, 2, fresult, + W_CTypePtrBase.__init__(self, space, size, extra, xpos, fresult, could_cast_anything=False) self.fargs = fargs self.ellipsis = ellipsis + self.abi = abi # fresult is stored in self.ctitem if not ellipsis: @@ -44,7 +47,7 @@ # at all. The cif is computed on every call from the actual # types passed in. For all other functions, the cif_descr # is computed here. - builder = CifDescrBuilder(fargs, fresult) + builder = CifDescrBuilder(fargs, fresult, abi) try: builder.rawallocate(self) except OperationError, e: @@ -76,7 +79,7 @@ ctypefunc.fargs = fvarargs ctypefunc.ctitem = self.ctitem #ctypefunc.cif_descr = NULL --- already provided as the default - CifDescrBuilder(fvarargs, self.ctitem).rawallocate(ctypefunc) + CifDescrBuilder(fvarargs, self.ctitem, self.abi).rawallocate(ctypefunc) return ctypefunc @rgc.must_be_light_finalizer @@ -84,8 +87,13 @@ if self.cif_descr: lltype.free(self.cif_descr, flavor='raw') - def _compute_extra_text(self, fargs, fresult, ellipsis): + def _compute_extra_text(self, fargs, fresult, ellipsis, abi): + from pypy.module._cffi_backend import newtype argnames = ['(*)('] + xpos = 2 + if _cffi_backend.has_stdcall and abi == _cffi_backend.FFI_STDCALL: + argnames[0] = '(__stdcall *)(' + xpos += len('__stdcall ') for i, farg in enumerate(fargs): if i > 0: argnames.append(', ') @@ -95,7 +103,7 @@ argnames.append(', ') argnames.append('...') argnames.append(')') - return ''.join(argnames) + return ''.join(argnames), xpos def _fget(self, attrchar): if attrchar == 'a': # args @@ -106,7 +114,7 @@ if attrchar == 'E': # ellipsis return self.space.wrap(self.ellipsis) if attrchar == 'A': # abi - return self.space.wrap(clibffi.FFI_DEFAULT_ABI) # XXX + return self.space.wrap(self.abi) return W_CTypePtrBase._fget(self, attrchar) def call(self, funcaddr, args_w): @@ -181,11 +189,6 @@ def set_mustfree_flag(data, flag): rffi.ptradd(data, -1)[0] = chr(flag) -def _get_abi(space, name): - abi = getattr(clibffi, name) - assert isinstance(abi, int) - return space.wrap(abi) - # ____________________________________________________________ @@ -260,9 +263,10 @@ class CifDescrBuilder(object): rawmem = lltype.nullptr(rffi.CCHARP.TO) - def __init__(self, fargs, fresult): + def __init__(self, fargs, fresult, fabi): self.fargs = fargs self.fresult = fresult + self.fabi = fabi def fb_alloc(self, size): size = llmemory.raw_malloc_usage(size) @@ -421,7 +425,7 @@ cif_descr.exchange_size = exchange_offset def fb_extra_fields(self, cif_descr): - cif_descr.abi = clibffi.FFI_DEFAULT_ABI # XXX + cif_descr.abi = self.fabi cif_descr.nargs = len(self.fargs) cif_descr.rtype = self.rtype cif_descr.atypes = self.atypes diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -11,7 +11,8 @@ class W_CType(W_Root): - _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_'] + _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_', + '_pointer_type'] _immutable_fields_ = ['size?', 'name', 'name_position'] # note that 'size' is not strictly immutable, because it can change # from -1 to the real value in the W_CTypeStruct subclass. @@ -142,7 +143,7 @@ # obscure hack when untranslated, maybe, approximate, don't use if isinstance(align, llmemory.FieldOffset): align = rffi.sizeof(align.TYPE.y) - if (1 << (8*align-2)) > sys.maxint: + if sys.platform != 'win32' and (1 << (8*align-2)) > sys.maxint: align /= 2 else: # a different hack when translated, to avoid seeing constants diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -168,7 +168,7 @@ class W_CTypePointer(W_CTypePtrBase): - _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr'] + _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr', '_array_types'] _immutable_fields_ = ['is_file', 'cache_array_type?', 'is_void_ptr'] kind = "pointer" cache_array_type = None diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -4,10 +4,11 @@ from rpython.rlib.objectmodel import specialize, r_dict, compute_identity_hash from rpython.rlib.rarithmetic import ovfcheck, intmask -from rpython.rlib import jit +from rpython.rlib import jit, rweakref, clibffi from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform +from pypy.module import _cffi_backend from pypy.module._cffi_backend import (ctypeobj, ctypeprim, ctypeptr, ctypearray, ctypestruct, ctypevoid, ctypeenum) @@ -23,27 +24,12 @@ class UniqueCache: def __init__(self, space): - self.ctvoid = None # There can be only one - self.ctvoidp = None # Cache for self.pointers[self.ctvoid] - self.ctchara = None # Cache for self.arrays[charp, -1] - self.primitives = {} # Keys: name - self.pointers = {} # Keys: base_ctype - self.arrays = {} # Keys: (ptr_ctype, length_or_-1) - self.functions = r_dict(# Keys: (fargs, w_fresult, ellipsis) - _func_key_eq, _func_key_hash) - -def _func_key_eq((fargs1, w_fresult1, ellipsis1), - (fargs2, w_fresult2, ellipsis2)): - return (fargs1 == fargs2 and # list equality here - w_fresult1 is w_fresult2 and - ellipsis1 == ellipsis2) - -def _func_key_hash((fargs, w_fresult, ellipsis)): - x = compute_identity_hash(w_fresult) ^ ellipsis - for w_arg in fargs: - y = compute_identity_hash(w_arg) - x = intmask((1000003 * x) ^ y) - return x + self.ctvoid = None # Cache for the 'void' type + self.ctvoidp = None # Cache for the 'void *' type + self.ctchara = None # Cache for the 'char[]' type + self.primitives = {} # Cache for {name: primitive_type} + self.functions = [] # see _new_function_type() + self.for_testing = False def _clean_cache(space): "NOT_RPYTHON" @@ -165,20 +151,24 @@ # ____________________________________________________________ + at specialize.memo() +def _setup_wref(has_weakref_support): + assert has_weakref_support, "_cffi_backend requires weakrefs" + ctypeobj.W_CType._pointer_type = rweakref.dead_ref + ctypeptr.W_CTypePointer._array_types = None + @unwrap_spec(w_ctype=ctypeobj.W_CType) def new_pointer_type(space, w_ctype): return _new_pointer_type(space, w_ctype) @jit.elidable def _new_pointer_type(space, w_ctype): - unique_cache = space.fromcache(UniqueCache) - try: - return unique_cache.pointers[w_ctype] - except KeyError: - pass - ctypepointer = ctypeptr.W_CTypePointer(space, w_ctype) - unique_cache.pointers[w_ctype] = ctypepointer - return ctypepointer + _setup_wref(rweakref.has_weakref_support()) + ctptr = w_ctype._pointer_type() + if ctptr is None: + ctptr = ctypeptr.W_CTypePointer(space, w_ctype) + w_ctype._pointer_type = rweakref.ref(ctptr) + return ctptr # ____________________________________________________________ @@ -195,16 +185,19 @@ @jit.elidable def _new_array_type(space, w_ctptr, length): - unique_cache = space.fromcache(UniqueCache) - unique_key = (w_ctptr, length) - try: - return unique_cache.arrays[unique_key] - except KeyError: - pass - # + _setup_wref(rweakref.has_weakref_support()) if not isinstance(w_ctptr, ctypeptr.W_CTypePointer): raise OperationError(space.w_TypeError, space.wrap("first arg must be a pointer ctype")) + arrays = w_ctptr._array_types + if arrays is None: + arrays = rweakref.RWeakValueDictionary(int, ctypearray.W_CTypeArray) + w_ctptr._array_types = arrays + else: + ctype = arrays.get(length) + if ctype is not None: + return ctype + # ctitem = w_ctptr.ctitem if ctitem.size < 0: raise oefmt(space.w_ValueError, "array item of unknown size: '%s'", @@ -222,7 +215,7 @@ extra = '[%d]' % length # ctype = ctypearray.W_CTypeArray(space, w_ctptr, length, arraysize, extra) - unique_cache.arrays[unique_key] = ctype + arrays.set(length, ctype) return ctype # ____________________________________________________________ @@ -600,8 +593,9 @@ # ____________________________________________________________ - at unwrap_spec(w_fresult=ctypeobj.W_CType, ellipsis=int) -def new_function_type(space, w_fargs, w_fresult, ellipsis=0): + at unwrap_spec(w_fresult=ctypeobj.W_CType, ellipsis=int, abi=int) +def new_function_type(space, w_fargs, w_fresult, ellipsis=0, + abi=_cffi_backend.FFI_DEFAULT_ABI): fargs = [] for w_farg in space.fixedview(w_fargs): if not isinstance(w_farg, ctypeobj.W_CType): @@ -610,31 +604,72 @@ if isinstance(w_farg, ctypearray.W_CTypeArray): w_farg = w_farg.ctptr fargs.append(w_farg) - return _new_function_type(space, fargs, w_fresult, bool(ellipsis)) + return _new_function_type(space, fargs, w_fresult, bool(ellipsis), abi) + +def _func_key_hash(unique_cache, fargs, fresult, ellipsis, abi): + x = compute_identity_hash(fresult) + for w_arg in fargs: + y = compute_identity_hash(w_arg) + x = intmask((1000003 * x) ^ y) + x ^= (ellipsis - abi) + if unique_cache.for_testing: # constant-folded to False in translation; + x &= 3 # but for test, keep only 2 bits of hash + return x # can't use @jit.elidable here, because it might call back to random # space functions via force_lazy_struct() -def _new_function_type(space, fargs, w_fresult, ellipsis=False): +def _new_function_type(space, fargs, fresult, ellipsis, abi): + try: + return _get_function_type(space, fargs, fresult, ellipsis, abi) + except KeyError: + return _build_function_type(space, fargs, fresult, ellipsis, abi) + + at jit.elidable +def _get_function_type(space, fargs, fresult, ellipsis, abi): + # This function is elidable because if called again with exactly the + # same arguments (and if it didn't raise KeyError), it would give + # the same result, at least as long as this result is still live. + # + # 'unique_cache.functions' is a list of weak dicts, each mapping + # the func_hash number to a W_CTypeFunc. There is normally only + # one such dict, but in case of hash collision, there might be + # more. + unique_cache = space.fromcache(UniqueCache) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis, abi) + for weakdict in unique_cache.functions: + ctype = weakdict.get(func_hash) + if (ctype is not None and + ctype.ctitem is fresult and + ctype.fargs == fargs and + ctype.ellipsis == ellipsis and + ctype.abi == abi): + return ctype + raise KeyError + + at jit.dont_look_inside +def _build_function_type(space, fargs, fresult, ellipsis, abi): from pypy.module._cffi_backend import ctypefunc # - unique_cache = space.fromcache(UniqueCache) - unique_key = (fargs, w_fresult, ellipsis) - try: - return unique_cache.functions[unique_key] - except KeyError: - pass - # - if ((w_fresult.size < 0 and - not isinstance(w_fresult, ctypevoid.W_CTypeVoid)) - or isinstance(w_fresult, ctypearray.W_CTypeArray)): - if (isinstance(w_fresult, ctypestruct.W_CTypeStructOrUnion) and - w_fresult.size < 0): + if ((fresult.size < 0 and + not isinstance(fresult, ctypevoid.W_CTypeVoid)) + or isinstance(fresult, ctypearray.W_CTypeArray)): + if (isinstance(fresult, ctypestruct.W_CTypeStructOrUnion) and + fresult.size < 0): raise oefmt(space.w_TypeError, - "result type '%s' is opaque", w_fresult.name) + "result type '%s' is opaque", fresult.name) else: raise oefmt(space.w_TypeError, - "invalid result type: '%s'", w_fresult.name) + "invalid result type: '%s'", fresult.name) # - fct = ctypefunc.W_CTypeFunc(space, fargs, w_fresult, ellipsis) - unique_cache.functions[unique_key] = fct + fct = ctypefunc.W_CTypeFunc(space, fargs, fresult, ellipsis, abi) + unique_cache = space.fromcache(UniqueCache) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis, abi) + for weakdict in unique_cache.functions: + if weakdict.get(func_hash) is None: + weakdict.set(func_hash, fct) + break + else: + weakdict = rweakref.RWeakValueDictionary(int, ctypefunc.W_CTypeFunc) + unique_cache.functions.append(weakdict) + weakdict.set(func_hash, fct) return fct diff --git a/pypy/module/_cffi_backend/realize_c_type.py b/pypy/module/_cffi_backend/realize_c_type.py --- a/pypy/module/_cffi_backend/realize_c_type.py +++ b/pypy/module/_cffi_backend/realize_c_type.py @@ -5,6 +5,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.error import oefmt from pypy.interpreter.baseobjspace import W_Root +from pypy.module import _cffi_backend from pypy.module._cffi_backend.ctypeobj import W_CType from pypy.module._cffi_backend import cffi_opcode, newtype, ctypestruct from pypy.module._cffi_backend import parse_c_type @@ -164,16 +165,28 @@ OP_FUNCTION_END = cffi_opcode.OP_FUNCTION_END while getop(opcodes[base_index + num_args]) != OP_FUNCTION_END: num_args += 1 - ellipsis = (getarg(opcodes[base_index + num_args]) & 1) != 0 + # + ellipsis = (getarg(opcodes[base_index + num_args]) & 0x01) != 0 + abi = (getarg(opcodes[base_index + num_args]) & 0xFE) + if abi == 0: + abi = _cffi_backend.FFI_DEFAULT_ABI + elif abi == 2: + if _cffi_backend.has_stdcall: + abi = _cffi_backend.FFI_STDCALL + else: + abi = _cffi_backend.FFI_DEFAULT_ABI + else: + raise oefmt(ffi.w_FFIError, "abi number %d not supported", abi) + # fargs = [realize_c_type(ffi, opcodes, base_index + i) for i in range(num_args)] - return fargs, fret, ellipsis + return fargs, fret, ellipsis, abi def unwrap_as_fnptr(self, ffi): if self._ctfuncptr is None: - fargs, fret, ellipsis = self._unpack(ffi) + fargs, fret, ellipsis, abi = self._unpack(ffi) self._ctfuncptr = newtype._new_function_type( - ffi.space, fargs, fret, ellipsis) + ffi.space, fargs, fret, ellipsis, abi) return self._ctfuncptr def unwrap_as_fnptr_in_elidable(self): @@ -190,7 +203,7 @@ # type ptr-to-struct. This is how recompiler.py produces # trampoline functions for PyPy. if self.nostruct_ctype is None: - fargs, fret, ellipsis = self._unpack(ffi) + fargs, fret, ellipsis, abi = self._unpack(ffi) # 'locs' will be a string of the same length as the final fargs, # containing 'A' where a struct argument was detected, and 'R' # in first position if a struct return value was detected @@ -207,7 +220,7 @@ locs = ['R'] + locs fret = newtype.new_void_type(ffi.space) ctfuncptr = newtype._new_function_type( - ffi.space, fargs, fret, ellipsis) + ffi.space, fargs, fret, ellipsis, abi) if locs == ['\x00'] * len(locs): locs = None else: @@ -218,7 +231,7 @@ locs[0] == 'R') def unexpected_fn_type(self, ffi): - fargs, fret, ellipsis = self._unpack(ffi) + fargs, fret, ellipsis, abi = self._unpack(ffi) argnames = [farg.name for farg in fargs] if ellipsis: argnames.append('...') diff --git a/pypy/module/_cffi_backend/src/parse_c_type.c b/pypy/module/_cffi_backend/src/parse_c_type.c --- a/pypy/module/_cffi_backend/src/parse_c_type.c +++ b/pypy/module/_cffi_backend/src/parse_c_type.c @@ -51,6 +51,9 @@ TOK_UNSIGNED, TOK_VOID, TOK_VOLATILE, + + TOK_CDECL, + TOK_STDCALL, }; typedef struct { @@ -165,6 +168,8 @@ switch (*p) { case '_': if (tok->size == 5 && !memcmp(p, "_Bool", 5)) tok->kind = TOK__BOOL; + if (tok->size == 7 && !memcmp(p,"__cdecl",7)) tok->kind = TOK_CDECL; + if (tok->size == 9 && !memcmp(p,"__stdcall",9))tok->kind = TOK_STDCALL; break; case 'c': if (tok->size == 4 && !memcmp(p, "char", 4)) tok->kind = TOK_CHAR; @@ -236,7 +241,7 @@ type). The 'outer' argument is the index of the opcode outside this "sequel". */ - int check_for_grouping; + int check_for_grouping, abi=0; _cffi_opcode_t result, *p_current; header: @@ -253,6 +258,12 @@ /* ignored for now */ next_token(tok); goto header; + case TOK_CDECL: + case TOK_STDCALL: + /* must be in a function; checked below */ + abi = tok->kind; + next_token(tok); + goto header; default: break; } @@ -269,6 +280,11 @@ while (tok->kind == TOK_OPEN_PAREN) { next_token(tok); + if (tok->kind == TOK_CDECL || tok->kind == TOK_STDCALL) { + abi = tok->kind; + next_token(tok); + } + if ((check_for_grouping--) == 1 && (tok->kind == TOK_STAR || tok->kind == TOK_CONST || tok->kind == TOK_VOLATILE || @@ -286,7 +302,14 @@ } else { /* function type */ - int arg_total, base_index, arg_next, has_ellipsis=0; + int arg_total, base_index, arg_next, flags=0; + + if (abi == TOK_STDCALL) { + flags = 2; + /* note that an ellipsis below will overwrite this flags, + which is the goal: variadic functions are always cdecl */ + } + abi = 0; if (tok->kind == TOK_VOID && get_following_char(tok) == ')') { next_token(tok); @@ -315,7 +338,7 @@ _cffi_opcode_t oarg; if (tok->kind == TOK_DOTDOTDOT) { - has_ellipsis = 1; + flags = 1; /* ellipsis */ next_token(tok); break; } @@ -339,8 +362,7 @@ next_token(tok); } } - tok->output[arg_next] = _CFFI_OP(_CFFI_OP_FUNCTION_END, - has_ellipsis); + tok->output[arg_next] = _CFFI_OP(_CFFI_OP_FUNCTION_END, flags); } if (tok->kind != TOK_CLOSE_PAREN) @@ -348,6 +370,9 @@ next_token(tok); } + if (abi != 0) + return parse_error(tok, "expected '('"); + while (tok->kind == TOK_OPEN_BRACKET) { *p_current = _CFFI_OP(_CFFI_GETOP(*p_current), tok->output_index); p_current = tok->output + tok->output_index; diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2316,9 +2316,6 @@ f(); f() assert get_errno() == 77 -def test_abi(): - assert isinstance(FFI_DEFAULT_ABI, int) - def test_cast_to_array(): # not valid in C! extension to get a non-owning BInt = new_primitive_type("int") @@ -3427,3 +3424,16 @@ "be 'foo *', but the types are different (check " "that you are not e.g. mixing up different ffi " "instances)") + +def test_stdcall_function_type(): + assert FFI_CDECL == FFI_DEFAULT_ABI + try: + stdcall = FFI_STDCALL + except NameError: + stdcall = FFI_DEFAULT_ABI + BInt = new_primitive_type("int") + BFunc = new_function_type((BInt, BInt), BInt, False, stdcall) + if stdcall != FFI_DEFAULT_ABI: + assert repr(BFunc) == "" + else: + assert repr(BFunc) == "" diff --git a/pypy/module/_cffi_backend/test/test_c.py b/pypy/module/_cffi_backend/test/test_c.py --- a/pypy/module/_cffi_backend/test/test_c.py +++ b/pypy/module/_cffi_backend/test/test_c.py @@ -22,7 +22,7 @@ from rpython.tool.udir import udir from pypy.interpreter import gateway from pypy.module._cffi_backend import Module -from pypy.module._cffi_backend.newtype import _clean_cache +from pypy.module._cffi_backend.newtype import _clean_cache, UniqueCache from rpython.translator import cdir from rpython.translator.platform import host from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -86,8 +86,10 @@ _all_test_c.find_and_load_library = func _all_test_c._testfunc = testfunc """) + UniqueCache.for_testing = True def teardown_method(self, method): + UniqueCache.for_testing = False _clean_cache(self.space) diff --git a/pypy/module/_cffi_backend/test/test_parse_c_type.py b/pypy/module/_cffi_backend/test/test_parse_c_type.py --- a/pypy/module/_cffi_backend/test/test_parse_c_type.py +++ b/pypy/module/_cffi_backend/test/test_parse_c_type.py @@ -338,3 +338,17 @@ # not supported (really obscure): # "char[+5]" # "char['A']" + +def test_stdcall_cdecl(): + assert parse("int __stdcall(int)") == [Prim(cffi_opcode.PRIM_INT), + '->', Func(0), NoOp(4), FuncEnd(2), + Prim(cffi_opcode.PRIM_INT)] + assert parse("int __stdcall func(int)") == parse("int __stdcall(int)") + assert parse("int (__stdcall *)()") == [Prim(cffi_opcode.PRIM_INT), + NoOp(3), '->', Pointer(1), + Func(0), FuncEnd(2), 0] + assert parse("int (__stdcall *p)()") == parse("int (__stdcall*)()") + parse_error("__stdcall int", "identifier expected", 0) + parse_error("__cdecl int", "identifier expected", 0) + parse_error("int __stdcall", "expected '('", 13) + parse_error("int __cdecl", "expected '('", 11) diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -3,10 +3,13 @@ from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi + from pypy.module.micronumpy import descriptor, loop, support from pypy.module.micronumpy.base import ( W_NDimArray, convert_to_array, W_NumpyObject) from pypy.module.micronumpy.converters import shape_converter +from . import constants as NPY +from .casting import scalar2dtype def build_scalar(space, w_dtype, w_state): @@ -82,7 +85,6 @@ return w_res def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False): - from pypy.module.micronumpy import strides # for anything that isn't already an array, try __array__ method first if not isinstance(w_object, W_NDimArray): @@ -143,16 +145,11 @@ w_base=w_base, start=imp.start) else: # not an array - shape, elems_w = strides.find_shape_and_elems(space, w_object, dtype) + shape, elems_w = find_shape_and_elems(space, w_object, dtype) if dtype is None and space.isinstance_w(w_object, space.w_buffer): dtype = descriptor.get_dtype_cache(space).w_uint8dtype if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): dtype = find_dtype_for_seq(space, elems_w, dtype) - if dtype is None: - dtype = descriptor.get_dtype_cache(space).w_float64dtype - elif dtype.is_str_or_unicode() and dtype.elsize < 1: - # promote S0 -> S1, U0 -> U1 - dtype = descriptor.variable_dtype(space, dtype.char + '1') w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) if support.product(shape) == 1: # safe from overflow since from_shape checks @@ -165,7 +162,6 @@ def numpify(space, w_object): """Convert the object to a W_NumpyObject""" # XXX: code duplication with _array() - from pypy.module.micronumpy import strides if isinstance(w_object, W_NumpyObject): return w_object # for anything that isn't already an array, try __array__ method first @@ -173,20 +169,82 @@ if w_array is not None: return w_array - shape, elems_w = strides.find_shape_and_elems(space, w_object, None) + if is_scalar_like(space, w_object, dtype=None): + dtype = scalar2dtype(space, w_object) + if dtype.is_str_or_unicode() and dtype.elsize < 1: + # promote S0 -> S1, U0 -> U1 + dtype = descriptor.variable_dtype(space, dtype.char + '1') + return dtype.coerce(space, w_object) + + shape, elems_w = _find_shape_and_elems(space, w_object) dtype = find_dtype_for_seq(space, elems_w, None) - if dtype is None: - dtype = descriptor.get_dtype_cache(space).w_float64dtype - elif dtype.is_str_or_unicode() and dtype.elsize < 1: - # promote S0 -> S1, U0 -> U1 - dtype = descriptor.variable_dtype(space, dtype.char + '1') + w_arr = W_NDimArray.from_shape(space, shape, dtype) + loop.assign(space, w_arr, elems_w) + return w_arr - if len(elems_w) == 1: - return dtype.coerce(space, elems_w[0]) + +def find_shape_and_elems(space, w_iterable, dtype): + if is_scalar_like(space, w_iterable, dtype): + return [], [w_iterable] + is_rec_type = dtype is not None and dtype.is_record() + return _find_shape_and_elems(space, w_iterable, is_rec_type) + +def is_scalar_like(space, w_obj, dtype): + isstr = space.isinstance_w(w_obj, space.w_str) + if not support.issequence_w(space, w_obj) or isstr: + if dtype is None or dtype.char != NPY.CHARLTR: + return True + is_rec_type = dtype is not None and dtype.is_record() + if is_rec_type and is_single_elem(space, w_obj, is_rec_type): + return True + if isinstance(w_obj, W_NDimArray) and w_obj.is_scalar(): + return True + return False + +def _find_shape_and_elems(space, w_iterable, is_rec_type=False): + from pypy.objspace.std.bufferobject import W_Buffer + shape = [space.len_w(w_iterable)] + if space.isinstance_w(w_iterable, space.w_buffer): + batch = [space.wrap(0)] * shape[0] + for i in range(shape[0]): + batch[i] = space.ord(space.getitem(w_iterable, space.wrap(i))) else: - w_arr = W_NDimArray.from_shape(space, shape, dtype) - loop.assign(space, w_arr, elems_w) - return w_arr + batch = space.listview(w_iterable) + while True: + if not batch: + return shape[:], [] + if is_single_elem(space, batch[0], is_rec_type): + for w_elem in batch: + if not is_single_elem(space, w_elem, is_rec_type): + raise OperationError(space.w_ValueError, space.wrap( + "setting an array element with a sequence")) + return shape[:], batch + new_batch = [] + size = space.len_w(batch[0]) + for w_elem in batch: + if (is_single_elem(space, w_elem, is_rec_type) or + space.len_w(w_elem) != size): + raise OperationError(space.w_ValueError, space.wrap( + "setting an array element with a sequence")) + w_array = space.lookup(w_elem, '__array__') + if w_array is not None: + # Make sure we call the array implementation of listview, + # since for some ndarray subclasses (matrix, for instance) + # listview does not reduce but rather returns the same class + w_elem = space.get_and_call_function(w_array, w_elem, space.w_None) + new_batch += space.listview(w_elem) + shape.append(size) + batch = new_batch + +def is_single_elem(space, w_elem, is_rec_type): + if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): + return True + if (space.isinstance_w(w_elem, space.w_tuple) or + space.isinstance_w(w_elem, space.w_list)): + return False + if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar(): + return False + return True def _dtype_guess(space, dtype, w_elem): from .casting import scalar2dtype, find_binop_result_dtype @@ -201,6 +259,11 @@ return _dtype_guess(space, dtype, w_elem) for w_elem in elems_w: dtype = _dtype_guess(space, dtype, w_elem) + if dtype is None: + dtype = descriptor.get_dtype_cache(space).w_float64dtype + elif dtype.is_str_or_unicode() and dtype.elsize < 1: + # promote S0 -> S1, U0 -> U1 + dtype = descriptor.variable_dtype(space, dtype.char + '1') return dtype diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -862,6 +862,8 @@ v = convert_to_array(space, w_v) ret = W_NDimArray.from_shape( space, v.get_shape(), get_dtype_cache(space).w_longdtype) + if ret.get_size() < 1: + return ret if side == NPY.SEARCHLEFT: binsearch = loop.binsearch_left else: @@ -1308,6 +1310,9 @@ [space.wrap(0)]), space.wrap("b")]) builder = StringBuilder() + if self.get_dtype().is_object(): + raise oefmt(space.w_NotImplementedError, + "reduce for 'object' dtype not supported yet") if isinstance(self.implementation, SliceArray): iter, state = self.implementation.create_iter() while not iter.done(state): diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -190,67 +190,6 @@ return rstrides, rbackstrides -def is_single_elem(space, w_elem, is_rec_type): - if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): - return True - if (space.isinstance_w(w_elem, space.w_tuple) or - space.isinstance_w(w_elem, space.w_list)): - return False - if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar(): - return False - return True - - -def find_shape_and_elems(space, w_iterable, dtype): - isstr = space.isinstance_w(w_iterable, space.w_str) - if not support.issequence_w(space, w_iterable) or isstr: - if dtype is None or dtype.char != NPY.CHARLTR: - return [], [w_iterable] - is_rec_type = dtype is not None and dtype.is_record() - if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): - return [], [w_iterable] - if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar(): - return [], [w_iterable] - return _find_shape_and_elems(space, w_iterable, is_rec_type) - - -def _find_shape_and_elems(space, w_iterable, is_rec_type): - from pypy.objspace.std.bufferobject import W_Buffer - shape = [space.len_w(w_iterable)] - if space.isinstance_w(w_iterable, space.w_buffer): - batch = [space.wrap(0)] * shape[0] - for i in range(shape[0]): - batch[i] = space.ord(space.getitem(w_iterable, space.wrap(i))) - else: - batch = space.listview(w_iterable) - while True: - if not batch: - return shape[:], [] - if is_single_elem(space, batch[0], is_rec_type): - for w_elem in batch: - if not is_single_elem(space, w_elem, is_rec_type): - raise OperationError(space.w_ValueError, space.wrap( - "setting an array element with a sequence")) - return shape[:], batch - new_batch = [] - size = space.len_w(batch[0]) From noreply at buildbot.pypy.org Thu Oct 8 11:06:37 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 8 Oct 2015 11:06:37 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: quick fix for the guard exit, the first argument is unpacked, added a test to check if this leads to a wrong result Message-ID: <20151008090637.C9D4F1C13BE@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80037:68cd8c5a751e Date: 2015-10-08 10:11 +0200 http://bitbucket.org/pypy/pypy/changeset/68cd8c5a751e/ Log: quick fix for the guard exit, the first argument is unpacked, added a test to check if this leads to a wrong result diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -759,8 +759,7 @@ index) elif typetag == self.TY_REF: refval = metainterp_sd.cpu.get_value_direct(deadframe, 'r', - elif typetag == self.TY_REF: - refval = metainterp_sd.cpu.get_ref_value(deadframe, index) + index) intval = lltype.cast_ptr_to_int(refval) elif typetag == self.TY_FLOAT: floatval = metainterp_sd.cpu.get_value_direct(deadframe, 'f', @@ -912,6 +911,25 @@ ptr = cpu.ts.cast_to_baseclass(gcref) return cast_base_ptr_to_instance(AllVirtuals, ptr) +def invent_fail_descr_for_op(opnum, optimizer, copied_guard=False): + if opnum == rop.GUARD_NOT_FORCED or opnum == rop.GUARD_NOT_FORCED_2: + assert not copied_guard + resumedescr = ResumeGuardForcedDescr() + resumedescr._init(optimizer.metainterp_sd, optimizer.jitdriver_sd) + elif opnum in (rop.GUARD_IS_OBJECT, rop.GUARD_SUBCLASS, rop.GUARD_GC_TYPE): + # note - this only happens in tests + resumedescr = ResumeAtPositionDescr() + elif opnum in (rop.GUARD_EXCEPTION, rop.GUARD_NO_EXCEPTION): + if copied_guard: + resumedescr = ResumeGuardCopiedExcDescr() + else: + resumedescr = ResumeGuardExcDescr() + else: + if copied_guard: + resumedescr = ResumeGuardCopiedDescr() + else: + resumedescr = ResumeGuardDescr() + return resumedescr class ResumeGuardForcedDescr(ResumeGuardDescr): def _init(self, metainterp_sd, jitdriver_sd): diff --git a/rpython/jit/metainterp/optimizeopt/dependency.py b/rpython/jit/metainterp/optimizeopt/dependency.py --- a/rpython/jit/metainterp/optimizeopt/dependency.py +++ b/rpython/jit/metainterp/optimizeopt/dependency.py @@ -542,7 +542,7 @@ def __init__(self, loop): self.loop = loop self.label = Node(loop.label, 0) - self.nodes = [ Node(op,i+1) for i,op in enumerate(loop.operations) if not op.is_debug() ] + self.nodes = [ Node(op,i+1) for i,op in enumerate(loop.operations) if not op.is_jit_debug() ] self.inodes = [] # imaginary nodes self.jump = Node(loop.jump, len(self.nodes)+1) self.invariant_vars = {} diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -390,17 +390,14 @@ prepare_arguments(state, pack, args) vecop = VecOperation(left.vector, args, left, pack.numops(), left.getdescr()) + if left.is_guard(): + prepare_fail_arguments(state, pack, left, vecop) state.oplist.append(vecop) for i,node in enumerate(pack.operations): op = node.getoperation() state.setvector_of_box(op,i,vecop) if pack.is_accumulating(): state.renamer.start_renaming(op, vecop) - if left.is_guard(): - assert isinstance(left, GuardResOp) - assert isinstance(vecop, GuardResOp) - vecop.setfailargs(left.getfailargs()) - vecop.rd_snapshot = left.rd_snapshot def prepare_arguments(state, pack, args): # Transforming one argument to a vector box argument @@ -439,6 +436,20 @@ position_values(state, restrict, pack, args, i, pos) # d) restrict.check(args[i]) +def prepare_fail_arguments(state, pack, left, vecop): + assert isinstance(left, GuardResOp) + assert isinstance(vecop, GuardResOp) + args = left.getfailargs() + for i, arg in enumerate(args): + pos, newarg = state.getvector_of_box(arg) + if newarg is None: + newarg = arg + if newarg.is_vector(): # can be moved to guard exit! + newarg = unpack_from_vector(state, newarg, 0, 1) + args[i] = newarg + vecop.setfailargs(args) + vecop.rd_snapshot = left.rd_snapshot + @always_inline def crop_vector(state, oprestrict, restrict, pack, args, i): # convert size i64 -> i32, i32 -> i64, ... diff --git a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py --- a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py @@ -12,7 +12,6 @@ from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.lltypesystem import lltype from rpython.conftest import option -from rpython.jit.metainterp.compile import invent_fail_descr_for_op class FakeDependencyGraph(DependencyGraph): """ A dependency graph that is able to emit every instruction @@ -46,9 +45,6 @@ def parse_loop(self, ops, add_label=True): loop = self.parse(ops, postprocess=self.postprocess) loop.operations = filter(lambda op: op.getopnum() != rop.DEBUG_MERGE_POINT, loop.operations) - #for op in loop.operations: - # if op.is_guard() and op.getdescr() is None: - # op.setdescr(invent_fail_descr_for_op(op.opnum, None)) token = JitCellToken() if add_label: label = ResOperation(rop.LABEL, loop.inputargs, descr=TargetToken(token)) diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py --- a/rpython/jit/metainterp/optimizeopt/vector.py +++ b/rpython/jit/metainterp/optimizeopt/vector.py @@ -11,8 +11,7 @@ from rpython.jit.metainterp.resume import Snapshot from rpython.jit.metainterp.jitexc import NotAVectorizeableLoop, NotAProfitableLoop #from rpython.jit.metainterp.optimizeopt.unroll import optimize_unroll -from rpython.jit.metainterp.compile import (CompileLoopVersionDescr, - invent_fail_descr_for_op, ResumeGuardDescr) +from rpython.jit.metainterp.compile import (CompileLoopVersionDescr, ResumeGuardDescr) from rpython.jit.metainterp.history import (INT, FLOAT, VECTOR, ConstInt, ConstFloat, TargetToken, JitCellToken, AbstractFailDescr) from rpython.jit.metainterp.optimizeopt.optimizer import Optimizer, Optimization @@ -161,7 +160,7 @@ guard_count = 0 at_least_one_array_access = True for i,op in enumerate(loop.operations): - if op.is_debug(): + if op.is_jit_debug(): continue if op.vector >= 0 and not op.is_guard(): diff --git a/rpython/jit/metainterp/test/test_vector.py b/rpython/jit/metainterp/test/test_vector.py --- a/rpython/jit/metainterp/test/test_vector.py +++ b/rpython/jit/metainterp/test/test_vector.py @@ -166,6 +166,9 @@ (rffi.DOUBLE, lambda x: x == 0.0, 1.0, 0.0, 33,34, True), (rffi.DOUBLE, lambda x: x == 0.0, 1.0, 0.1, 4,34, False), (lltype.Signed, lambda x: not bool(x), 1, None, -1,32, False), + (lltype.Signed, lambda x: not bool(x), 1, 0, 14,32, True), + (lltype.Signed, lambda x: not bool(x), 1, 0, 15,31, True), + (lltype.Signed, lambda x: not bool(x), 1, 0, 16,30, True), (lltype.Signed, lambda x: x == 0, 1, None, -1,33, False), (lltype.Signed, lambda x: x == 0, 1, 0, 33,34, True), # any From noreply at buildbot.pypy.org Thu Oct 8 11:06:39 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 8 Oct 2015 11:06:39 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: bail if the argument is in failargs as well Message-ID: <20151008090639.DD64F1C13BE@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80038:d3a1d758a0a3 Date: 2015-10-08 11:06 +0200 http://bitbucket.org/pypy/pypy/changeset/d3a1d758a0a3/ Log: bail if the argument is in failargs as well diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -267,6 +267,7 @@ raise NotImplementedError("Purely abstract") class RegisterManager(object): + """ Class that keeps track of register allocations """ box_types = None # or a list of acceptable types diff --git a/rpython/jit/backend/x86/vector_ext.py b/rpython/jit/backend/x86/vector_ext.py --- a/rpython/jit/backend/x86/vector_ext.py +++ b/rpython/jit/backend/x86/vector_ext.py @@ -597,9 +597,9 @@ def consider_vec_float_eq(self, op): lhs = op.getarg(0) args = op.getarglist() + rhsloc = self.make_sure_var_in_reg(op.getarg(1), args) lhsloc = self.xrm.force_result_in_reg(op, op.getarg(0), args) - rhsloc = self.make_sure_var_in_reg(op.getarg(1), args) - resloc = self.force_allocate_reg_or_cc(op) + #resloc = self.force_allocate_reg_or_cc(op) self.perform(op, [lhsloc, rhsloc, imm(lhs.bytesize)], lhsloc) consider_vec_float_ne = consider_vec_float_eq @@ -693,9 +693,6 @@ consider_vec_i = _consider_vec consider_vec_f = _consider_vec - def consider_guard_early_exit(self, op): - pass - def consider_vec_cast_float_to_int(self, op): args = op.getarglist() srcloc = self.make_sure_var_in_reg(op.getarg(0), args) diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -442,6 +442,10 @@ args = left.getfailargs() for i, arg in enumerate(args): pos, newarg = state.getvector_of_box(arg) + if newarg in vecop.getarglist(): + # in this case we do not know which slot + # failed. thus we bail! + raise NotAVectorizeableLoop() if newarg is None: newarg = arg if newarg.is_vector(): # can be moved to guard exit! diff --git a/rpython/jit/metainterp/test/test_vector.py b/rpython/jit/metainterp/test/test_vector.py --- a/rpython/jit/metainterp/test/test_vector.py +++ b/rpython/jit/metainterp/test/test_vector.py @@ -196,7 +196,9 @@ i = 0 ; nobreak = False while i < d: myjitdriver.jit_merge_point() - if func(va[i]): + b = func(va[i]) + if b: + assert b break i += 1 else: From noreply at buildbot.pypy.org Thu Oct 8 11:27:41 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 8 Oct 2015 11:27:41 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: assembler, missing return to not enter the non vector guarding instruction Message-ID: <20151008092741.96FAE1C1319@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80039:a01cf8fd40d6 Date: 2015-10-08 11:27 +0200 http://bitbucket.org/pypy/pypy/changeset/a01cf8fd40d6/ Log: assembler, missing return to not enter the non vector guarding instruction diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -409,15 +409,16 @@ def _consider_guard_cc(true): - def function(self, op): + def consider_guard_cc(self, op): arg = op.getarg(0) if arg.is_vector(): loc = self.loc(arg) self.assembler.guard_vector(op, self.loc(arg), true) + return else: self.load_condition_into_cc(arg) self.perform_guard(op, [], None) - return function + return consider_guard_cc consider_guard_true = _consider_guard_cc(True) consider_guard_false = _consider_guard_cc(False) diff --git a/rpython/jit/backend/x86/vector_ext.py b/rpython/jit/backend/x86/vector_ext.py --- a/rpython/jit/backend/x86/vector_ext.py +++ b/rpython/jit/backend/x86/vector_ext.py @@ -599,7 +599,6 @@ args = op.getarglist() rhsloc = self.make_sure_var_in_reg(op.getarg(1), args) lhsloc = self.xrm.force_result_in_reg(op, op.getarg(0), args) - #resloc = self.force_allocate_reg_or_cc(op) self.perform(op, [lhsloc, rhsloc, imm(lhs.bytesize)], lhsloc) consider_vec_float_ne = consider_vec_float_eq From noreply at buildbot.pypy.org Thu Oct 8 11:28:15 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Oct 2015 11:28:15 +0200 (CEST) Subject: [pypy-commit] pypy default: Commented-out code that can be enabled to display the failargs Message-ID: <20151008092815.E23E31C1319@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80040:148365731b0d Date: 2015-10-08 11:01 +0200 http://bitbucket.org/pypy/pypy/changeset/148365731b0d/ Log: Commented-out code that can be enabled to display the failargs diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -195,12 +195,17 @@ args = self.getarglist() descr = self.getdescr() if descr is None or we_are_translated(): - return '%s%s%s(%s)' % (prefix, sres, self.getopname(), - ', '.join([a.repr_short(memo) for a in args])) + s = '%s%s%s(%s)' % (prefix, sres, self.getopname(), + ', '.join([a.repr_short(memo) for a in args])) else: - return '%s%s%s(%s)' % (prefix, sres, self.getopname(), - ', '.join([a.repr_short(memo) for a in args] + - ['descr=%r' % descr])) + s = '%s%s%s(%s)' % (prefix, sres, self.getopname(), + ', '.join([a.repr_short(memo) for a in args] + + ['descr=%r' % descr])) + # --- enable to display the failargs too: + #if isinstance(self, GuardResOp): + # s += ' [%s]' % (', '.join([a.repr_short(memo) for a in + # self.getfailargs()]),) + return s def repr_short(self, memo): try: From noreply at buildbot.pypy.org Thu Oct 8 11:28:18 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Oct 2015 11:28:18 +0200 (CEST) Subject: [pypy-commit] pypy default: fix (we're supposed to call str(e.value); I'm not sure why str(e) works Message-ID: <20151008092818.028071C1319@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80041:c6ffc38a6c33 Date: 2015-10-08 11:18 +0200 http://bitbucket.org/pypy/pypy/changeset/c6ffc38a6c33/ Log: fix (we're supposed to call str(e.value); I'm not sure why str(e) works too, but it doesn't for me) diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -576,7 +576,7 @@ return ovfcheck(i + j) err = py.test.raises(Exception, "self.encoding_test(f, [7, 2], ''," "transform=True, liveness=True)") - assert "ovfcheck()" in str(err) + assert "ovfcheck()" in str(err.value) def test_ovfcheck_reraise(self): def f(i, j): From noreply at buildbot.pypy.org Thu Oct 8 11:28:20 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Oct 2015 11:28:20 +0200 (CEST) Subject: [pypy-commit] pypy default: A workaround: without adding "goto_if_not_float_lt" & friends, the JIT Message-ID: <20151008092820.265B11C1319@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80042:6b8493305a9b Date: 2015-10-08 11:28 +0200 http://bitbucket.org/pypy/pypy/changeset/6b8493305a9b/ Log: A workaround: without adding "goto_if_not_float_lt" & friends, the JIT code systematically makes the boolean result of "float_lt" survive across the following "goto_if_not". This means the JIT backends can't use their more efficient fast-path encodings for a float comparison followed by a guard, because the integer that stores the result of the comparison is also used in some failargs. diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -205,6 +205,8 @@ if v is op.result: if op.opname not in ('int_lt', 'int_le', 'int_eq', 'int_ne', 'int_gt', 'int_ge', + 'float_lt', 'float_le', 'float_eq', + 'float_ne', 'float_gt', 'float_ge', 'int_is_zero', 'int_is_true', 'ptr_eq', 'ptr_ne', 'ptr_iszero', 'ptr_nonzero'): diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -672,6 +672,55 @@ b = longlong.getrealfloat(b) return a >= b + @arguments("f", "f", "L", "pc", returns="L") + def bhimpl_goto_if_not_float_lt(a, b, target, pc): + a = longlong.getrealfloat(a) + b = longlong.getrealfloat(b) + if a < b: + return pc + else: + return target + @arguments("f", "f", "L", "pc", returns="L") + def bhimpl_goto_if_not_float_le(a, b, target, pc): + a = longlong.getrealfloat(a) + b = longlong.getrealfloat(b) + if a <= b: + return pc + else: + return target + @arguments("f", "f", "L", "pc", returns="L") + def bhimpl_goto_if_not_float_eq(a, b, target, pc): + a = longlong.getrealfloat(a) + b = longlong.getrealfloat(b) + if a == b: + return pc + else: + return target + @arguments("f", "f", "L", "pc", returns="L") + def bhimpl_goto_if_not_float_ne(a, b, target, pc): + a = longlong.getrealfloat(a) + b = longlong.getrealfloat(b) + if a != b: + return pc + else: + return target + @arguments("f", "f", "L", "pc", returns="L") + def bhimpl_goto_if_not_float_gt(a, b, target, pc): + a = longlong.getrealfloat(a) + b = longlong.getrealfloat(b) + if a > b: + return pc + else: + return target + @arguments("f", "f", "L", "pc", returns="L") + def bhimpl_goto_if_not_float_ge(a, b, target, pc): + a = longlong.getrealfloat(a) + b = longlong.getrealfloat(b) + if a >= b: + return pc + else: + return target + @arguments("f", returns="i") def bhimpl_cast_float_to_int(a): a = longlong.getrealfloat(a) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -358,7 +358,8 @@ self.opimpl_goto_if_not(condbox, target, orgpc) for _opimpl in ['int_lt', 'int_le', 'int_eq', 'int_ne', 'int_gt', 'int_ge', - 'ptr_eq', 'ptr_ne']: + 'ptr_eq', 'ptr_ne', 'float_lt', 'float_le', 'float_eq', + 'float_ne', 'float_gt', 'float_ge']: exec py.code.Source(''' @arguments("box", "box", "label", "orgpc") def opimpl_goto_if_not_%s(self, b1, b2, target, orgpc): From noreply at buildbot.pypy.org Thu Oct 8 11:33:20 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Oct 2015 11:33:20 +0200 (CEST) Subject: [pypy-commit] pypy default: At this point, don't optimize "float_eq(f1, f1)". That may be false sometimes. Message-ID: <20151008093320.284EB1C1319@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80043:d9388b6db911 Date: 2015-10-08 11:33 +0200 http://bitbucket.org/pypy/pypy/changeset/d9388b6db911/ Log: At this point, don't optimize "float_eq(f1, f1)". That may be false sometimes. diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -363,12 +363,13 @@ exec py.code.Source(''' @arguments("box", "box", "label", "orgpc") def opimpl_goto_if_not_%s(self, b1, b2, target, orgpc): - if b1 is b2: + if %s and b1 is b2: condbox = %s else: condbox = self.execute(rop.%s, b1, b2) self.opimpl_goto_if_not(condbox, target, orgpc) - ''' % (_opimpl, FASTPATHS_SAME_BOXES[_opimpl.split("_")[-1]], _opimpl.upper()) + ''' % (_opimpl, not _opimpl.startswith('float_'), + FASTPATHS_SAME_BOXES[_opimpl.split("_")[-1]], _opimpl.upper()) ).compile() def _establish_nullity(self, box, orgpc): From noreply at buildbot.pypy.org Thu Oct 8 12:00:31 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Oct 2015 12:00:31 +0200 (CEST) Subject: [pypy-commit] pypy default: Bug and fix for stacklets on shdowstack: in some cases we have garbage Message-ID: <20151008100031.20F0F1C1319@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80044:27d3379e61b6 Date: 2015-10-08 11:59 +0200 http://bitbucket.org/pypy/pypy/changeset/27d3379e61b6/ Log: Bug and fix for stacklets on shdowstack: in some cases we have garbage in s_sscopy, and the custom tracer runs at that point diff --git a/rpython/rlib/_stacklet_shadowstack.py b/rpython/rlib/_stacklet_shadowstack.py --- a/rpython/rlib/_stacklet_shadowstack.py +++ b/rpython/rlib/_stacklet_shadowstack.py @@ -76,9 +76,12 @@ def alloc_stacklet(): new_stacklet = lltype.malloc(STACKLET) new_stacklet.s_handle = _c.null_handle + new_stacklet.s_sscopy = llmemory.NULL return new_stacklet def attach_handle_on_stacklet(stacklet, h): + ll_assert(stacklet.s_handle == _c.null_handle, "attach stacklet 1: garbage") + ll_assert(stacklet.s_sscopy == llmemory.NULL, "attach stacklet 2: garbage") if not h: raise MemoryError elif _c.is_empty_handle(h): From noreply at buildbot.pypy.org Thu Oct 8 13:40:53 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 8 Oct 2015 13:40:53 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: reverted change Message-ID: <20151008114053.A194B1C1319@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80045:d27d8ef22c97 Date: 2015-10-08 13:38 +0200 http://bitbucket.org/pypy/pypy/changeset/d27d8ef22c97/ Log: reverted change diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -414,7 +414,6 @@ if arg.is_vector(): loc = self.loc(arg) self.assembler.guard_vector(op, self.loc(arg), true) - return else: self.load_condition_into_cc(arg) self.perform_guard(op, [], None) diff --git a/rpython/jit/backend/x86/test/test_x86vector.py b/rpython/jit/backend/x86/test/test_x86vector.py --- a/rpython/jit/backend/x86/test/test_x86vector.py +++ b/rpython/jit/backend/x86/test/test_x86vector.py @@ -89,3 +89,4 @@ res = self.do_test(callback) & 0xffffffff assert res == 22 + diff --git a/rpython/jit/metainterp/test/test_vector.py b/rpython/jit/metainterp/test/test_vector.py --- a/rpython/jit/metainterp/test/test_vector.py +++ b/rpython/jit/metainterp/test/test_vector.py @@ -517,5 +517,27 @@ res = self.meta_interp(f, [22], vec_all=True) assert res == f(22) + def test_guard_test_location_assert(self): + myjitdriver = JitDriver(greens = [], reds = 'auto') + T1 = lltype.Array(rffi.SIGNED, hints={'nolength': True}) + def f(size): + vector_a = lltype.malloc(T1, size, flavor='raw', zero=True) + for i in range(size): + vector_a[i] = 0 + i = 0 + breaks = 0 + while i < size: + myjitdriver.jit_merge_point() + a = vector_a[i] + if a: + breaks = 1 + break + del a + i += 1 + lltype.free(vector_a, flavor='raw') + return breaks + res = self.meta_interp(f, [22], vec_all=True, vec_guard_ratio=5) + assert res == f(22) + class TestLLtype(LLJitMixin, VectorizeTests): pass diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -581,7 +581,7 @@ 'vec_cost': 0, 'vec_length': 60, 'vec_ratio': 2, - 'vec_guard_ratio': 3, + 'vec_guard_ratio': 5, } unroll_parameters = unrolling_iterable(PARAMETERS.items()) From noreply at buildbot.pypy.org Thu Oct 8 13:40:55 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 8 Oct 2015 13:40:55 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: merged default Message-ID: <20151008114055.E5E171C1319@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80046:b022ce811db0 Date: 2015-10-08 13:40 +0200 http://bitbucket.org/pypy/pypy/changeset/b022ce811db0/ Log: merged default diff --git a/pypy/module/itertools/__init__.py b/pypy/module/itertools/__init__.py --- a/pypy/module/itertools/__init__.py +++ b/pypy/module/itertools/__init__.py @@ -10,7 +10,6 @@ repeat(elem [,n]) --> elem, elem, elem, ... endlessly or up to n times Iterators terminating on the shortest input sequence: - izip(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ... ifilter(pred, seq) --> elements of seq where pred(elem) is True ifilterfalse(pred, seq) --> elements of seq where pred(elem) is False islice(seq, [start,] stop [, step]) --> elements from @@ -22,6 +21,14 @@ takewhile(pred, seq) --> seq[0], seq[1], until pred fails dropwhile(pred, seq) --> seq[n], seq[n+1], starting when pred fails groupby(iterable[, keyfunc]) --> sub-iterators grouped by value of keyfunc(v) + izip(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ... + izip_longest(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ... + + Combinatoric generators: + product(p, q, ... [repeat=1]) --> cartesian product + permutations(p[, r]) + combinations(p, r) + combinations_with_replacement(p, r) """ interpleveldefs = { diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -649,33 +649,38 @@ class W_IZipLongest(W_IMap): _error_name = "izip_longest" + _immutable_fields_ = ["w_fillvalue"] + + def _fetch(self, index): + w_iter = self.iterators_w[index] + if w_iter is not None: + space = self.space + try: + return space.next(w_iter) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + self.active -= 1 + if self.active <= 0: + # It was the last active iterator + raise + self.iterators_w[index] = None + return self.w_fillvalue def next_w(self): - space = self.space + # common case: 2 arguments + if len(self.iterators_w) == 2: + objects = [self._fetch(0), self._fetch(1)] + else: + objects = self._get_objects() + return self.space.newtuple(objects) + + def _get_objects(self): + # the loop is out of the way of the JIT nb = len(self.iterators_w) - if nb == 0: - raise OperationError(space.w_StopIteration, space.w_None) - - objects_w = [None] * nb - for index in range(nb): - w_value = self.w_fillvalue - w_it = self.iterators_w[index] - if w_it is not None: - try: - w_value = space.next(w_it) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - - self.active -= 1 - if self.active == 0: - # It was the last active iterator - raise - self.iterators_w[index] = None - - objects_w[index] = w_value - return space.newtuple(objects_w) + raise OperationError(self.space.w_StopIteration, self.space.w_None) + return [self._fetch(index) for index in range(nb)] def W_IZipLongest___new__(space, w_subtype, __args__): arguments_w, kwds_w = __args__.unpack() diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -205,6 +205,8 @@ if v is op.result: if op.opname not in ('int_lt', 'int_le', 'int_eq', 'int_ne', 'int_gt', 'int_ge', + 'float_lt', 'float_le', 'float_eq', + 'float_ne', 'float_gt', 'float_ge', 'int_is_zero', 'int_is_true', 'ptr_eq', 'ptr_ne', 'ptr_iszero', 'ptr_nonzero'): diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -576,7 +576,7 @@ return ovfcheck(i + j) err = py.test.raises(Exception, "self.encoding_test(f, [7, 2], ''," "transform=True, liveness=True)") - assert "ovfcheck()" in str(err) + assert "ovfcheck()" in str(err.value) def test_ovfcheck_reraise(self): def f(i, j): diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -672,6 +672,55 @@ b = longlong.getrealfloat(b) return a >= b + @arguments("f", "f", "L", "pc", returns="L") + def bhimpl_goto_if_not_float_lt(a, b, target, pc): + a = longlong.getrealfloat(a) + b = longlong.getrealfloat(b) + if a < b: + return pc + else: + return target + @arguments("f", "f", "L", "pc", returns="L") + def bhimpl_goto_if_not_float_le(a, b, target, pc): + a = longlong.getrealfloat(a) + b = longlong.getrealfloat(b) + if a <= b: + return pc + else: + return target + @arguments("f", "f", "L", "pc", returns="L") + def bhimpl_goto_if_not_float_eq(a, b, target, pc): + a = longlong.getrealfloat(a) + b = longlong.getrealfloat(b) + if a == b: + return pc + else: + return target + @arguments("f", "f", "L", "pc", returns="L") + def bhimpl_goto_if_not_float_ne(a, b, target, pc): + a = longlong.getrealfloat(a) + b = longlong.getrealfloat(b) + if a != b: + return pc + else: + return target + @arguments("f", "f", "L", "pc", returns="L") + def bhimpl_goto_if_not_float_gt(a, b, target, pc): + a = longlong.getrealfloat(a) + b = longlong.getrealfloat(b) + if a > b: + return pc + else: + return target + @arguments("f", "f", "L", "pc", returns="L") + def bhimpl_goto_if_not_float_ge(a, b, target, pc): + a = longlong.getrealfloat(a) + b = longlong.getrealfloat(b) + if a >= b: + return pc + else: + return target + @arguments("f", returns="i") def bhimpl_cast_float_to_int(a): a = longlong.getrealfloat(a) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -358,16 +358,18 @@ self.opimpl_goto_if_not(condbox, target, orgpc) for _opimpl in ['int_lt', 'int_le', 'int_eq', 'int_ne', 'int_gt', 'int_ge', - 'ptr_eq', 'ptr_ne']: + 'ptr_eq', 'ptr_ne', 'float_lt', 'float_le', 'float_eq', + 'float_ne', 'float_gt', 'float_ge']: exec py.code.Source(''' @arguments("box", "box", "label", "orgpc") def opimpl_goto_if_not_%s(self, b1, b2, target, orgpc): - if b1 is b2: + if %s and b1 is b2: condbox = %s else: condbox = self.execute(rop.%s, b1, b2) self.opimpl_goto_if_not(condbox, target, orgpc) - ''' % (_opimpl, FASTPATHS_SAME_BOXES[_opimpl.split("_")[-1]], _opimpl.upper()) + ''' % (_opimpl, not _opimpl.startswith('float_'), + FASTPATHS_SAME_BOXES[_opimpl.split("_")[-1]], _opimpl.upper()) ).compile() def _establish_nullity(self, box, orgpc): diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -316,12 +316,17 @@ args = self.getarglist() descr = self.getdescr() if descr is None or we_are_translated(): - return '%s%s%s(%s)' % (prefix, sres, self.getopname(), - ', '.join([a.repr_short(memo) for a in args])) + s = '%s%s%s(%s)' % (prefix, sres, self.getopname(), + ', '.join([a.repr_short(memo) for a in args])) else: - return '%s%s%s(%s)' % (prefix, sres, self.getopname(), - ', '.join([a.repr_short(memo) for a in args] + - ['descr=%r' % descr])) + s = '%s%s%s(%s)' % (prefix, sres, self.getopname(), + ', '.join([a.repr_short(memo) for a in args] + + ['descr=%r' % descr])) + # --- enable to display the failargs too: + #if isinstance(self, GuardResOp): + # s += ' [%s]' % (', '.join([a.repr_short(memo) for a in + # self.getfailargs()]),) + return s def repr_short(self, memo): try: diff --git a/rpython/rlib/_stacklet_shadowstack.py b/rpython/rlib/_stacklet_shadowstack.py --- a/rpython/rlib/_stacklet_shadowstack.py +++ b/rpython/rlib/_stacklet_shadowstack.py @@ -76,9 +76,12 @@ def alloc_stacklet(): new_stacklet = lltype.malloc(STACKLET) new_stacklet.s_handle = _c.null_handle + new_stacklet.s_sscopy = llmemory.NULL return new_stacklet def attach_handle_on_stacklet(stacklet, h): + ll_assert(stacklet.s_handle == _c.null_handle, "attach stacklet 1: garbage") + ll_assert(stacklet.s_sscopy == llmemory.NULL, "attach stacklet 2: garbage") if not h: raise MemoryError elif _c.is_empty_handle(h): From noreply at buildbot.pypy.org Thu Oct 8 15:15:11 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 8 Oct 2015 15:15:11 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: fix: resume guard can now be copied Message-ID: <20151008131511.E8E1A1C1192@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80047:b9281122c8c8 Date: 2015-10-08 15:15 +0200 http://bitbucket.org/pypy/pypy/changeset/b9281122c8c8/ Log: fix: resume guard can now be copied diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -691,9 +691,9 @@ continue accum = self.accumulation.get(arg, None) if accum: - from rpython.jit.metainterp.compile import ResumeGuardDescr + from rpython.jit.metainterp.compile import AbstractResumeGuardDescr assert isinstance(accum, AccumPack) - assert isinstance(descr, ResumeGuardDescr) + assert isinstance(descr, AbstractResumeGuardDescr) descr.attach_accum_info(i, accum.operator, arg, None) seed = accum.getleftmostseed() failargs[i] = self.renamer.rename_map.get(seed, seed) diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py --- a/rpython/jit/metainterp/optimizeopt/vector.py +++ b/rpython/jit/metainterp/optimizeopt/vector.py @@ -11,7 +11,8 @@ from rpython.jit.metainterp.resume import Snapshot from rpython.jit.metainterp.jitexc import NotAVectorizeableLoop, NotAProfitableLoop #from rpython.jit.metainterp.optimizeopt.unroll import optimize_unroll -from rpython.jit.metainterp.compile import (CompileLoopVersionDescr, ResumeGuardDescr) +from rpython.jit.metainterp.compile import (CompileLoopVersionDescr, + ResumeGuardDescr, ResumeGuardCopiedDescr, AbstractResumeGuardDescr) from rpython.jit.metainterp.history import (INT, FLOAT, VECTOR, ConstInt, ConstFloat, TargetToken, JitCellToken, AbstractFailDescr) from rpython.jit.metainterp.optimizeopt.optimizer import Optimizer, Optimization @@ -279,7 +280,11 @@ assert isinstance(copied_op, GuardResOp) descr = copied_op.getdescr() if descr: - assert isinstance(descr, ResumeGuardDescr) + assert isinstance(descr, AbstractResumeGuardDescr) + if isinstance(descr, ResumeGuardDescr): + descr = descr.prev.clone() + else: + descr = descr.clone() copied_op.setdescr(descr.clone()) # copy failargs/snapshot copied_op.rd_snapshot = \ From noreply at buildbot.pypy.org Thu Oct 8 15:16:21 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 8 Oct 2015 15:16:21 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: missing not, took wrong path... Message-ID: <20151008131621.8A1381C1192@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80048:4510528cd619 Date: 2015-10-08 15:16 +0200 http://bitbucket.org/pypy/pypy/changeset/4510528cd619/ Log: missing not, took wrong path... diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py --- a/rpython/jit/metainterp/optimizeopt/vector.py +++ b/rpython/jit/metainterp/optimizeopt/vector.py @@ -281,7 +281,7 @@ descr = copied_op.getdescr() if descr: assert isinstance(descr, AbstractResumeGuardDescr) - if isinstance(descr, ResumeGuardDescr): + if not isinstance(descr, ResumeGuardDescr): descr = descr.prev.clone() else: descr = descr.clone() From noreply at buildbot.pypy.org Thu Oct 8 17:19:24 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 8 Oct 2015 17:19:24 +0200 (CEST) Subject: [pypy-commit] pypy compress-numbering: break everything - encode resume data in a more compact way (but reading is not there just yet) Message-ID: <20151008151924.32F4B1C1192@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: compress-numbering Changeset: r80049:2255631c47aa Date: 2015-10-08 17:19 +0200 http://bitbucket.org/pypy/pypy/changeset/2255631c47aa/ Log: break everything - encode resume data in a more compact way (but reading is not there just yet) diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -823,12 +823,13 @@ class ResumeGuardDescr(AbstractResumeGuardDescr): _attrs_ = ('rd_numb', 'rd_count', 'rd_consts', 'rd_virtuals', - 'rd_pendingfields', 'status') + 'rd_pendingfields', 'rd_stackdepth', 'status') rd_numb = lltype.nullptr(NUMBERING) rd_count = 0 rd_consts = None rd_virtuals = None + rd_stackdepth = 0 rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) def copy_all_attributes_from(self, other): @@ -839,6 +840,7 @@ self.rd_consts = other.rd_consts self.rd_pendingfields = other.rd_pendingfields self.rd_virtuals = other.rd_virtuals + self.rd_stackdepth = other.rd_stackdepth self.rd_numb = other.rd_numb # we don't copy status diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -12,6 +12,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr from rpython.rtyper.rclass import OBJECTPTR from rpython.jit.metainterp.walkvirtual import VirtualVisitor +from rpython.jit.metainterp import resumecode # Logic to encode the chain of frames and the state of the boxes at a @@ -196,25 +197,9 @@ # env numbering - def number(self, optimizer, snapshot, frameinfo, first_iteration=False): - if snapshot is None: - return lltype.nullptr(NUMBERING), {}, 0 - if snapshot in self.numberings: - numb, liveboxes, v = self.numberings[snapshot] - return numb, liveboxes.copy(), v - - if first_iteration: - numb1, liveboxes, v = self.number(optimizer, snapshot.prev, frameinfo) - else: - numb1, liveboxes, v = self.number(optimizer, snapshot.prev, frameinfo.prev) - n = len(liveboxes) - v - boxes = snapshot.boxes + def _number_boxes(self, boxes, liveboxes, optimizer, v, n): length = len(boxes) - numb = lltype.malloc(NUMBERING, length) - if first_iteration: - numb.packed_jitcode_pc = -1 - else: - numb.packed_jitcode_pc = frameinfo.packed_jitcode_pc + current = [] for i in range(length): box = boxes[i] box = optimizer.get_box_replacement(box) @@ -238,12 +223,58 @@ tagged = tag(n, TAGBOX) n += 1 liveboxes[box] = tagged - numb.nums[i] = tagged - # - numb.prev = numb1 - self.numberings[snapshot] = numb, liveboxes, v - return numb, liveboxes.copy(), v + current.append(tagged) + return v, n, current + def _get_prev_snapshot(self, snapshot): + cur_snapshot = snapshot + while True: + try: + return self.numberings[cur_snapshot], cur_snapshot + except KeyError: + pass + cur_snapshot = cur_snapshot.prev + if not cur_snapshot: + return (lltype.nullptr(resumecode.NUMBERING), 0, {}, 0), None + + def number(self, optimizer, snapshot, frameinfo): + # find the parent + + p = self._get_prev_snapshot(snapshot) + (prev_numb, prev_numb_index, liveboxes, v), s = p + n = len(liveboxes) - v + first = True + all_lists = [] + total_lgt = 0 + cur_snapshot = snapshot + liveboxes_to_save = [] + while cur_snapshot != s: + liveboxes_to_save.append((liveboxes.copy(), v)) + v, n, current = self._number_boxes(cur_snapshot.boxes, liveboxes, optimizer, + v, n) + cur_snapshot = cur_snapshot.prev + if first: + first = False + else: + jitcode_pos, pc = unpack_uint(frameinfo.packed_jitcode_pc) + current.append(rffi.cast(rffi.USHORT, jitcode_pos)) + current.append(rffi.cast(rffi.USHORT, pc)) + lst = resumecode.create_numbering(current) + total_lgt += len(lst) + all_lists.append(lst) + numb = lltype.malloc(resumecode.NUMBERING, total_lgt) + numb.prev = prev_numb + numb.prev_index = rffi.cast(rffi.USHORT, prev_numb_index) + index = 0 + for i in range(len(all_lists)): + lst = all_lists[i] + liveboxes_snapshot, v = liveboxes_to_save[i] + self.numberings[snapshot] = (numb, index, liveboxes_snapshot, v) + resumecode.copy_from_list_to_numb(lst, numb, index) + index += len(lst) + snapshot = snapshot.prev + return numb, liveboxes, v + def forget_numberings(self): # XXX ideally clear only the affected numberings self.numberings.clear() @@ -385,8 +416,15 @@ assert not storage.rd_numb snapshot = self.snapshot_storage.rd_snapshot assert snapshot is not None # is that true? + # count stack depth + frame_info_list = self.snapshot_storage.rd_frame_info_list + stack_depth = 1 + while frame_info_list.prev is not None: + frame_info_list = frame_info_list.prev + stack_depth += 1 + storage.rd_stack_depth = stack_depth numb, liveboxes_from_env, v = self.memo.number(optimizer, snapshot, - self.snapshot_storage.rd_frame_info_list, first_iteration=True) + self.snapshot_storage.rd_frame_info_list) self.liveboxes_from_env = liveboxes_from_env self.liveboxes = {} storage.rd_numb = numb diff --git a/rpython/jit/metainterp/resumecode.py b/rpython/jit/metainterp/resumecode.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/resumecode.py @@ -0,0 +1,59 @@ + +""" Resume bytecode. It goes as following: + + + +until the length of the array, then to the parent at the convinient index. +numb are encoded in the variable length byte encoding as follows: +if the first bit is set, then it's the first +7 bits then the next byte, otherwise it's the next 7 bit. +""" + +from rpython.rtyper.lltypesystem import rffi, lltype + +NUMBERINGP = lltype.Ptr(lltype.GcForwardReference()) +NUMBERING = lltype.GcStruct('Numbering', + ('prev', NUMBERINGP), + ('prev_index', rffi.USHORT), + ('code', lltype.Array(rffi.UCHAR))) +NUMBERINGP.TO.become(NUMBERING) + +def create_numbering(lst): + count = 0 + for item in lst: + if item > 127: + count += 1 + count += 1 + numb = [rffi.cast(rffi.UCHAR, 0)] * count + index = 0 + for item in lst: + if item <= 128: + numb[index] = rffi.cast(rffi.UCHAR, item) + index += 1 + else: + assert (item >> 8) <= 127 + numb[index] = rffi.cast(rffi.UCHAR, (item >> 8) | 0x80) + numb[index + 1] = rffi.cast(rffi.UCHAR, item & 0xff) + index += 2 + return numb + +def copy_from_list_to_numb(lst, numb, index): + i = 0 + while i < len(lst): + numb.code[i + index] = lst[i] + i += 1 + +def numb_next_item(numb, index): + one = rffi.cast(lltype.Signed, numb.code[index]) + if one & 0x80: + two = rffi.cast(lltype.Signed, numb.code[index + 1]) + return ((one & 0x7f) << 8) | two, index + 2 + return one, index + 1 + +def unpack_numbering(numb): + l = [] + i = 0 + while i < len(numb.code): + next, i = numb_next_item(numb, i) + l.append(next) + return l diff --git a/rpython/jit/metainterp/test/test_resumecode.py b/rpython/jit/metainterp/test/test_resumecode.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/test/test_resumecode.py @@ -0,0 +1,18 @@ + +from rpython.jit.metainterp.resumecode import NUMBERING +from rpython.jit.metainterp.resumecode import create_numbering,\ + unpack_numbering, copy_from_list_to_numb +from rpython.rtyper.lltypesystem import lltype + +def test_pack_unpack(): + examples = [ + [1, 2, 3, 4, 257, 10000, 13, 15], + [1, 2, 3, 4], + range(1, 10, 2), + [13000, 12000, 10000, 256, 255, 254, 257] + ] + for l in examples: + lst = create_numbering(l) + n = lltype.malloc(NUMBERING, len(lst)) + copy_from_list_to_numb(lst, n, 0) + assert unpack_numbering(n) == l From noreply at buildbot.pypy.org Thu Oct 8 17:55:17 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 8 Oct 2015 17:55:17 +0200 (CEST) Subject: [pypy-commit] pypy callfamily: Close branch callfamily Message-ID: <20151008155517.D4C5F1C1319@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: callfamily Changeset: r80050:b8988547945a Date: 2015-10-08 16:46 +0100 http://bitbucket.org/pypy/pypy/changeset/b8988547945a/ Log: Close branch callfamily From noreply at buildbot.pypy.org Thu Oct 8 17:58:50 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 8 Oct 2015 17:58:50 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in callfamily (pull request #339) Message-ID: <20151008155850.E47491C131C@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r80051:ab645e87e727 Date: 2015-10-08 16:46 +0100 http://bitbucket.org/pypy/pypy/changeset/ab645e87e727/ Log: Merged in callfamily (pull request #339) Callfamily diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -213,8 +213,6 @@ v = graph.getreturnvar() if v.annotation is None: self.setbinding(v, annmodel.s_ImpossibleValue) - # policy-dependent computation - self.bookkeeper.compute_at_fixpoint() def validate(self): """Check that the annotation results are valid""" @@ -292,6 +290,18 @@ graph, block, index = position_key self.reflowpendingblock(graph, block) + def call_sites(self): + newblocks = self.added_blocks + if newblocks is None: + newblocks = self.annotated # all of them + for block in newblocks: + for op in block.operations: + if op.opname in ('simple_call', 'call_args'): + yield op + + # some blocks are partially annotated + if op.result.annotation is None: + break # ignore the unannotated part #___ simplification (should be moved elsewhere?) _______ @@ -309,6 +319,7 @@ graphs[graph] = True for graph in graphs: simplify.eliminate_empty_blocks(graph) + self.bookkeeper.compute_at_fixpoint() if block_subset is None: perform_normalizations(self) @@ -396,8 +407,7 @@ i = 0 while i < len(block.operations): op = block.operations[i] - self.bookkeeper.enter((graph, block, i)) - try: + with self.bookkeeper.at_position((graph, block, i)): new_ops = op.transform(self) if new_ops is not None: block.operations[i:i+1] = new_ops @@ -406,8 +416,6 @@ new_ops[-1].result = op.result op = new_ops[0] self.consider_op(op) - finally: - self.bookkeeper.leave() i += 1 except BlockedInference as e: diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -5,6 +5,7 @@ from __future__ import absolute_import import sys, types, inspect, weakref +from contextlib import contextmanager from rpython.flowspace.model import Constant from rpython.annotator.model import (SomeOrderedDict, @@ -88,34 +89,29 @@ del TLS.bookkeeper del self.position_key + @contextmanager + def at_position(self, pos): + """A context manager calling `self.enter()` and `self.leave()`""" + if hasattr(self, 'position_key') and pos is None: + yield + return + self.enter(pos) + try: + yield + finally: + self.leave() + def compute_at_fixpoint(self): # getbookkeeper() needs to work during this function, so provide # one with a dummy position - self.enter(None) - try: - def call_sites(): - newblocks = self.annotator.added_blocks - if newblocks is None: - newblocks = self.annotator.annotated # all of them - annotation = self.annotator.annotation - for block in newblocks: - for op in block.operations: - if op.opname in ('simple_call', 'call_args'): - yield op - - # some blocks are partially annotated - if annotation(op.result) is None: - break # ignore the unannotated part - - for call_op in call_sites(): + with self.at_position(None): + for call_op in self.annotator.call_sites(): self.consider_call_site(call_op) for pbc, args_s in self.emulated_pbc_calls.itervalues(): args = simple_args(args_s) pbc.consider_call_site(args, s_ImpossibleValue, None) self.emulated_pbc_calls = {} - finally: - self.leave() def check_no_flags_on_instances(self): # sanity check: no flags attached to heap stored instances @@ -501,10 +497,6 @@ """Analyse a call to a SomePBC() with the given args (list of annotations). """ - descs = list(pbc.descriptions) - first = descs[0] - first.mergecallfamilies(*descs[1:]) - if emulated is None: whence = self.position_key # fish the existing annotation for the result variable, @@ -522,12 +514,9 @@ op = None s_previous_result = s_ImpossibleValue - def schedule(graph, inputcells): - return self.annotator.recursivecall(graph, whence, inputcells) - results = [] - for desc in descs: - results.append(desc.pycall(schedule, args, s_previous_result, op)) + for desc in pbc.descriptions: + results.append(desc.pycall(whence, args, s_previous_result, op)) s_result = unionof(*results) return s_result @@ -552,10 +541,7 @@ "replace" can be set to a list of old unique_key values to forget now, because the given "unique_key" replaces them. """ - emulate_enter = not hasattr(self, 'position_key') - if emulate_enter: - self.enter(None) - try: + with self.at_position(None): emulated_pbc_calls = self.emulated_pbc_calls prev = [unique_key] prev.extend(replace) @@ -570,9 +556,6 @@ else: emulated = callback return self.pbc_call(pbc, args, emulated=emulated) - finally: - if emulate_enter: - self.leave() def _find_current_op(self, opname=None, arity=None, pos=None, s_type=None): """ Find operation that is currently being annotated. Do some diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -53,6 +53,22 @@ table.append(row) self.total_calltable_size += 1 + def find_row(self, bookkeeper, descs, args, op): + shape = rawshape(args) + with bookkeeper.at_position(None): + row = build_calltable_row(descs, args, op) + index = self.calltable_lookup_row(shape, row) + return shape, index + +def build_calltable_row(descs, args, op): + # see comments in CallFamily + row = {} + for desc in descs: + graph = desc.get_graph(args, op) + assert isinstance(graph, FunctionGraph) + row[desc.rowkey()] = graph + return row + class FrozenAttrFamily(object): """A family of FrozenDesc objects that have any common 'getattr' sites. @@ -295,22 +311,23 @@ else: return self.specializer(self, inputcells) - def pycall(self, schedule, args, s_previous_result, op=None): + def pycall(self, whence, args, s_previous_result, op=None): inputcells = self.parse_arguments(args) result = self.specialize(inputcells, op) if isinstance(result, FunctionGraph): graph = result # common case + annotator = self.bookkeeper.annotator # if that graph has a different signature, we need to re-parse # the arguments. # recreate the args object because inputcells may have been changed new_args = args.unmatch_signature(self.signature, inputcells) inputcells = self.parse_arguments(new_args, graph) - result = schedule(graph, inputcells) + result = annotator.recursivecall(graph, whence, inputcells) signature = getattr(self.pyobj, '_signature_', None) if signature: sigresult = enforce_signature_return(self, signature[1], result) if sigresult is not None: - self.bookkeeper.annotator.addpendingblock( + annotator.addpendingblock( graph, graph.returnblock, [sigresult]) result = sigresult # Some specializations may break the invariant of returning @@ -320,6 +337,10 @@ result = unionof(result, s_previous_result) return result + def get_graph(self, args, op): + inputs_s = self.parse_arguments(args) + return self.specialize(inputs_s, op) + def get_call_parameters(self, args_s): args = simple_args(args_s) inputcells = self.parse_arguments(args) @@ -347,37 +368,15 @@ @staticmethod def consider_call_site(descs, args, s_result, op): + family = descs[0].getcallfamily() shape = rawshape(args) - row = FunctionDesc.row_to_consider(descs, args, op) - family = descs[0].getcallfamily() + row = build_calltable_row(descs, args, op) family.calltable_add_row(shape, row) - - @staticmethod - def variant_for_call_site(bookkeeper, family, descs, args, op): - shape = rawshape(args) - bookkeeper.enter(None) - try: - row = FunctionDesc.row_to_consider(descs, args, op) - finally: - bookkeeper.leave() - index = family.calltable_lookup_row(shape, row) - return shape, index + descs[0].mergecallfamilies(*descs[1:]) def rowkey(self): return self - @staticmethod - def row_to_consider(descs, args, op): - # see comments in CallFamily - row = {} - for desc in descs: - def enlist(graph, ignore): - row[desc.rowkey()] = graph - return s_ImpossibleValue # meaningless - desc.pycall(enlist, args, s_ImpossibleValue, op) - assert row - return row - def get_s_signatures(self, shape): family = self.getcallfamily() table = family.calltables.get(shape) @@ -624,7 +623,7 @@ "specialization" % (self.name,)) return self.getclassdef(None) - def pycall(self, schedule, args, s_previous_result, op=None): + def pycall(self, whence, args, s_previous_result, op=None): from rpython.annotator.model import SomeInstance, SomeImpossibleValue if self.specialize: if self.specialize == 'specialize:ctr_location': @@ -777,6 +776,8 @@ @staticmethod def consider_call_site(descs, args, s_result, op): + descs[0].getcallfamily() + descs[0].mergecallfamilies(*descs[1:]) from rpython.annotator.model import SomeInstance, SomePBC, s_None if len(descs) == 1: # call to a single class, look at the result annotation @@ -890,13 +891,20 @@ def getuniquegraph(self): return self.funcdesc.getuniquegraph() - def pycall(self, schedule, args, s_previous_result, op=None): + def func_args(self, args): from rpython.annotator.model import SomeInstance if self.selfclassdef is None: raise Exception("calling %r" % (self,)) s_instance = SomeInstance(self.selfclassdef, flags=self.flags) - args = args.prepend(s_instance) - return self.funcdesc.pycall(schedule, args, s_previous_result, op) + return args.prepend(s_instance) + + def pycall(self, whence, args, s_previous_result, op=None): + func_args = self.func_args(args) + return self.funcdesc.pycall(whence, func_args, s_previous_result, op) + + def get_graph(self, args, op): + func_args = self.func_args(args) + return self.funcdesc.get_graph(func_args, op) def bind_under(self, classdef, name): self.bookkeeper.warning("rebinding an already bound %r" % (self,)) @@ -913,9 +921,10 @@ def consider_call_site(descs, args, s_result, op): cnt, keys, star = rawshape(args) shape = cnt + 1, keys, star # account for the extra 'self' - row = FunctionDesc.row_to_consider(descs, args, op) + row = build_calltable_row(descs, args, op) family = descs[0].getcallfamily() family.calltable_add_row(shape, row) + descs[0].mergecallfamilies(*descs[1:]) def rowkey(self): # we are computing call families and call tables that always contain @@ -1064,19 +1073,28 @@ return '' % (self.funcdesc, self.frozendesc) - def pycall(self, schedule, args, s_previous_result, op=None): + def func_args(self, args): from rpython.annotator.model import SomePBC s_self = SomePBC([self.frozendesc]) - args = args.prepend(s_self) - return self.funcdesc.pycall(schedule, args, s_previous_result, op) + return args.prepend(s_self) + + def pycall(self, whence, args, s_previous_result, op=None): + func_args = self.func_args(args) + return self.funcdesc.pycall(whence, func_args, s_previous_result, op) + + def get_graph(self, args, op): + func_args = self.func_args(args) + return self.funcdesc.get_graph(func_args, op) + @staticmethod def consider_call_site(descs, args, s_result, op): cnt, keys, star = rawshape(args) shape = cnt + 1, keys, star # account for the extra 'self' - row = FunctionDesc.row_to_consider(descs, args, op) + row = build_calltable_row(descs, args, op) family = descs[0].getcallfamily() family.calltable_add_row(shape, row) + descs[0].mergecallfamilies(*descs[1:]) def rowkey(self): return self.funcdesc diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -1097,102 +1097,6 @@ assert acc1 is acc2 assert acc1.attrs.keys() == ['v1'] - def test_simple_pbc_call(self): - def f1(x,y=0): - pass - def f2(x): - pass - def f3(x): - pass - def g(f): - f(1) - def h(): - f1(1) - f1(1,2) - g(f2) - g(f3) - - a = self.RPythonAnnotator() - s = a.build_types(h, []) - - fdesc1 = a.bookkeeper.getdesc(f1) - fdesc2 = a.bookkeeper.getdesc(f2) - fdesc3 = a.bookkeeper.getdesc(f3) - - fam1 = fdesc1.getcallfamily() - fam2 = fdesc2.getcallfamily() - fam3 = fdesc3.getcallfamily() - - assert fam1 is not fam2 - assert fam1 is not fam3 - assert fam3 is fam2 - - gf1 = graphof(a, f1) - gf2 = graphof(a, f2) - gf3 = graphof(a, f3) - - assert fam1.calltables == {(2, (), False): [{fdesc1: gf1}], - (1, (), False): [{fdesc1: gf1}]} - assert fam2.calltables == {(1, (), False): [{fdesc2: gf2, fdesc3: gf3}]} - - def test_pbc_call_ins(self): - class A(object): - def m(self): - pass - class B(A): - def n(self): - pass - class C(A): - def __init__(self): - pass - def m(self): - pass - def f(x): - b = B() - c = C() - b.n() - if x: - a = b - else: - a = c - a.m() - - a = self.RPythonAnnotator() - s = a.build_types(f, [bool]) - - clsdef = a.bookkeeper.getuniqueclassdef - bookkeeper = a.bookkeeper - - def getmdesc(bmeth): - return bookkeeper.immutablevalue(bmeth).any_description() - - mdescA_m = getmdesc(A().m) - mdescC_m = getmdesc(C().m) - mdescB_n = getmdesc(B().n) - - assert mdescA_m.name == 'm' == mdescC_m.name - assert mdescB_n.name == 'n' - - famA_m = mdescA_m.getcallfamily() - famC_m = mdescC_m.getcallfamily() - famB_n = mdescB_n.getcallfamily() - - assert famA_m is famC_m - assert famB_n is not famA_m - - gfB_n = graphof(a, B.n.im_func) - gfA_m = graphof(a, A.m.im_func) - gfC_m = graphof(a, C.m.im_func) - - assert famB_n.calltables == {(1, (), False): [{mdescB_n.funcdesc: gfB_n}] } - assert famA_m.calltables == {(1, (), False): [{mdescA_m.funcdesc: gfA_m, mdescC_m.funcdesc: gfC_m }] } - - mdescCinit = getmdesc(C().__init__) - famCinit = mdescCinit.getcallfamily() - gfCinit = graphof(a, C.__init__.im_func) - - assert famCinit.calltables == {(1, (), False): [{mdescCinit.funcdesc: gfCinit}] } - def test_isinstance_unsigned_1(self): def f(x): return isinstance(x, r_uint) @@ -2153,6 +2057,7 @@ s_f = a.bookkeeper.immutablevalue(f) a.bookkeeper.emulate_pbc_call('f', s_f, [annmodel.SomeInteger(), annmodel.SomeInteger()]) a.complete() + a.simplify() assert a.binding(graphof(a, f).getreturnvar()).knowntype == int fdesc = a.bookkeeper.getdesc(f) @@ -3969,28 +3874,6 @@ e = py.test.raises(Exception, a.build_types, f, [int]) assert "field '_my_lst' was migrated" in str(e.value) - def test_call_classes_with_noarg_init(self): - class A: - foo = 21 - class B(A): - foo = 22 - class C(A): - def __init__(self): - self.foo = 42 - class D(A): - def __init__(self): - self.foo = 43 - def f(i): - if i == 1: - cls = B - elif i == 2: - cls = D - else: - cls = C - return cls().foo - a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, [int]) - def test_range_variable_step(self): def g(n): return range(0, 10, n) diff --git a/rpython/annotator/test/test_annsimplifyrpython.py b/rpython/annotator/test/test_annsimplifyrpython.py --- a/rpython/annotator/test/test_annsimplifyrpython.py +++ b/rpython/annotator/test/test_annsimplifyrpython.py @@ -1,5 +1,8 @@ -import rpython.annotator.test.test_annrpython -parent = rpython.annotator.test.test_annrpython.TestAnnotateTestCase +import py + + +from rpython.annotator.test.test_annrpython import graphof +from rpython.annotator.test.test_annrpython import TestAnnotateTestCase as parent class TestAnnotateAndSimplifyTestCase(parent): @@ -12,3 +15,122 @@ parent.RPythonAnnotator.complete(self) if self.translator is not None: self.simplify() + + def test_simple_pbc_call(self): + def f1(x,y=0): + pass + def f2(x): + pass + def f3(x): + pass + def g(f): + f(1) + def h(): + f1(1) + f1(1,2) + g(f2) + g(f3) + + a = self.RPythonAnnotator() + s = a.build_types(h, []) + + fdesc1 = a.bookkeeper.getdesc(f1) + fdesc2 = a.bookkeeper.getdesc(f2) + fdesc3 = a.bookkeeper.getdesc(f3) + + fam1 = fdesc1.getcallfamily() + fam2 = fdesc2.getcallfamily() + fam3 = fdesc3.getcallfamily() + + assert fam1 is not fam2 + assert fam1 is not fam3 + assert fam3 is fam2 + + gf1 = graphof(a, f1) + gf2 = graphof(a, f2) + gf3 = graphof(a, f3) + + assert fam1.calltables == {(2, (), False): [{fdesc1: gf1}], + (1, (), False): [{fdesc1: gf1}]} + assert fam2.calltables == {(1, (), False): [{fdesc2: gf2, fdesc3: gf3}]} + + def test_pbc_call_ins(self): + class A(object): + def m(self): + pass + class B(A): + def n(self): + pass + class C(A): + def __init__(self): + pass + def m(self): + pass + def f(x): + b = B() + c = C() + b.n() + if x: + a = b + else: + a = c + a.m() + + a = self.RPythonAnnotator() + s = a.build_types(f, [bool]) + + bookkeeper = a.bookkeeper + + def getmdesc(bmeth): + return bookkeeper.immutablevalue(bmeth).any_description() + + mdescA_m = getmdesc(A().m) + mdescC_m = getmdesc(C().m) + mdescB_n = getmdesc(B().n) + + assert mdescA_m.name == 'm' == mdescC_m.name + assert mdescB_n.name == 'n' + + famA_m = mdescA_m.getcallfamily() + famC_m = mdescC_m.getcallfamily() + famB_n = mdescB_n.getcallfamily() + + assert famA_m is famC_m + assert famB_n is not famA_m + + gfB_n = graphof(a, B.n.im_func) + gfA_m = graphof(a, A.m.im_func) + gfC_m = graphof(a, C.m.im_func) + + assert famB_n.calltables == {(1, (), False): [{mdescB_n.funcdesc: gfB_n}]} + assert famA_m.calltables == {(1, (), False): [ + {mdescA_m.funcdesc: gfA_m, mdescC_m.funcdesc: gfC_m }]} + + mdescCinit = getmdesc(C().__init__) + famCinit = mdescCinit.getcallfamily() + gfCinit = graphof(a, C.__init__.im_func) + + assert famCinit.calltables == {(1, (), False): [{mdescCinit.funcdesc: gfCinit}]} + + def test_call_classes_with_noarg_init(self): + class A: + foo = 21 + class B(A): + foo = 22 + class C(A): + def __init__(self): + self.foo = 42 + class D(A): + def __init__(self): + self.foo = 43 + def f(i): + if i == 1: + cls = B + elif i == 2: + cls = D + else: + cls = C + return cls().foo + a = self.RPythonAnnotator() + with py.test.raises(Exception): + a.build_types(f, [int]) diff --git a/rpython/rtyper/lltypesystem/rpbc.py b/rpython/rtyper/lltypesystem/rpbc.py deleted file mode 100644 --- a/rpython/rtyper/lltypesystem/rpbc.py +++ /dev/null @@ -1,303 +0,0 @@ -import types - -from rpython.annotator import description -from rpython.rlib.debug import ll_assert -from rpython.rlib.unroll import unrolling_iterable -from rpython.rtyper import callparse -from rpython.rtyper.lltypesystem import llmemory -from rpython.rtyper.lltypesystem.lltype import (typeOf, Void, ForwardReference, - Struct, Bool, Char, Ptr, malloc, nullptr, Array, Signed) -from rpython.rtyper.rmodel import Repr, inputconst -from rpython.rtyper.rpbc import ( - AbstractMultipleFrozenPBCRepr, - AbstractFunctionsPBCRepr, AbstractMultipleUnrelatedFrozenPBCRepr, - SingleFrozenPBCRepr, get_concrete_calltable) -from rpython.rtyper.typesystem import getfunctionptr -from rpython.tool.pairtype import pairtype - - -class MultipleFrozenPBCRepr(AbstractMultipleFrozenPBCRepr): - """Representation selected for multiple non-callable pre-built constants.""" - def __init__(self, rtyper, access_set): - self.rtyper = rtyper - self.access_set = access_set - self.pbc_type = ForwardReference() - self.lowleveltype = Ptr(self.pbc_type) - self.pbc_cache = {} - - def _setup_repr(self): - llfields = self._setup_repr_fields() - kwds = {'hints': {'immutable': True}} - self.pbc_type.become(Struct('pbc', *llfields, **kwds)) - - def create_instance(self): - return malloc(self.pbc_type, immortal=True) - - def null_instance(self): - return nullptr(self.pbc_type) - - def getfield(self, vpbc, attr, llops): - mangled_name, r_value = self.fieldmap[attr] - cmangledname = inputconst(Void, mangled_name) - return llops.genop('getfield', [vpbc, cmangledname], - resulttype=r_value) - - -class MultipleUnrelatedFrozenPBCRepr(AbstractMultipleUnrelatedFrozenPBCRepr): - """Representation selected for multiple non-callable pre-built constants - with no common access set.""" - - lowleveltype = llmemory.Address - EMPTY = Struct('pbc', hints={'immutable': True}) - - def convert_pbc(self, pbcptr): - return llmemory.fakeaddress(pbcptr) - - def create_instance(self): - return malloc(self.EMPTY, immortal=True) - - def null_instance(self): - return llmemory.Address._defl() - - -class __extend__(pairtype(MultipleUnrelatedFrozenPBCRepr, - MultipleUnrelatedFrozenPBCRepr), - pairtype(MultipleUnrelatedFrozenPBCRepr, - SingleFrozenPBCRepr), - pairtype(SingleFrozenPBCRepr, - MultipleUnrelatedFrozenPBCRepr)): - def rtype_is_((robj1, robj2), hop): - if isinstance(robj1, MultipleUnrelatedFrozenPBCRepr): - r = robj1 - else: - r = robj2 - vlist = hop.inputargs(r, r) - return hop.genop('adr_eq', vlist, resulttype=Bool) - - -class __extend__(pairtype(MultipleFrozenPBCRepr, - MultipleUnrelatedFrozenPBCRepr)): - def convert_from_to((robj1, robj2), v, llops): - return llops.genop('cast_ptr_to_adr', [v], resulttype=llmemory.Address) - - -# ____________________________________________________________ - -class FunctionsPBCRepr(AbstractFunctionsPBCRepr): - """Representation selected for a PBC of function(s).""" - - def setup_specfunc(self): - fields = [] - for row in self.uniquerows: - fields.append((row.attrname, row.fntype)) - kwds = {'hints': {'immutable': True}} - return Ptr(Struct('specfunc', *fields, **kwds)) - - def create_specfunc(self): - return malloc(self.lowleveltype.TO, immortal=True) - - def get_specfunc_row(self, llop, v, c_rowname, resulttype): - return llop.genop('getfield', [v, c_rowname], resulttype=resulttype) - - -class SmallFunctionSetPBCRepr(Repr): - def __init__(self, rtyper, s_pbc): - self.rtyper = rtyper - self.s_pbc = s_pbc - self.callfamily = s_pbc.any_description().getcallfamily() - concretetable, uniquerows = get_concrete_calltable(self.rtyper, - self.callfamily) - assert len(uniquerows) == 1 - self.lowleveltype = Char - self.pointer_repr = FunctionsPBCRepr(rtyper, s_pbc) - self._conversion_tables = {} - self._compression_function = None - self._dispatch_cache = {} - - def _setup_repr(self): - if self.s_pbc.subset_of: - assert self.s_pbc.can_be_None == self.s_pbc.subset_of.can_be_None - r = self.rtyper.getrepr(self.s_pbc.subset_of) - if r is not self: - r.setup() - self.descriptions = r.descriptions - self.c_pointer_table = r.c_pointer_table - return - self.descriptions = list(self.s_pbc.descriptions) - if self.s_pbc.can_be_None: - self.descriptions.insert(0, None) - POINTER_TABLE = Array(self.pointer_repr.lowleveltype, - hints={'nolength': True}) - pointer_table = malloc(POINTER_TABLE, len(self.descriptions), - immortal=True) - for i, desc in enumerate(self.descriptions): - if desc is not None: - pointer_table[i] = self.pointer_repr.convert_desc(desc) - else: - pointer_table[i] = self.pointer_repr.convert_const(None) - self.c_pointer_table = inputconst(Ptr(POINTER_TABLE), pointer_table) - - def get_s_callable(self): - return self.s_pbc - - def get_r_implfunc(self): - return self, 0 - - def get_s_signatures(self, shape): - funcdesc = self.s_pbc.any_description() - return funcdesc.get_s_signatures(shape) - - def convert_desc(self, funcdesc): - return chr(self.descriptions.index(funcdesc)) - - def convert_const(self, value): - if isinstance(value, types.MethodType) and value.im_self is None: - value = value.im_func # unbound method -> bare function - if value is None: - return chr(0) - funcdesc = self.rtyper.annotator.bookkeeper.getdesc(value) - return self.convert_desc(funcdesc) - - def rtype_simple_call(self, hop): - return self.call(hop) - - def rtype_call_args(self, hop): - return self.call(hop) - - def dispatcher(self, shape, index, argtypes, resulttype): - key = shape, index, tuple(argtypes), resulttype - if key in self._dispatch_cache: - return self._dispatch_cache[key] - from rpython.translator.unsimplify import varoftype - from rpython.flowspace.model import FunctionGraph, Link, Block, SpaceOperation - inputargs = [varoftype(t) for t in [Char] + argtypes] - startblock = Block(inputargs) - startblock.exitswitch = inputargs[0] - graph = FunctionGraph("dispatcher", startblock, varoftype(resulttype)) - row_of_graphs = self.callfamily.calltables[shape][index] - links = [] - descs = list(self.s_pbc.descriptions) - if self.s_pbc.can_be_None: - descs.insert(0, None) - for desc in descs: - if desc is None: - continue - args_v = [varoftype(t) for t in argtypes] - b = Block(args_v) - llfn = self.rtyper.getcallable(row_of_graphs[desc]) - v_fn = inputconst(typeOf(llfn), llfn) - v_result = varoftype(resulttype) - b.operations.append( - SpaceOperation("direct_call", [v_fn] + args_v, v_result)) - b.closeblock(Link([v_result], graph.returnblock)) - i = self.descriptions.index(desc) - links.append(Link(inputargs[1:], b, chr(i))) - links[-1].llexitcase = chr(i) - startblock.closeblock(*links) - self.rtyper.annotator.translator.graphs.append(graph) - ll_ret = getfunctionptr(graph) - #FTYPE = FuncType - c_ret = self._dispatch_cache[key] = inputconst(typeOf(ll_ret), ll_ret) - return c_ret - - def call(self, hop): - bk = self.rtyper.annotator.bookkeeper - args = hop.spaceop.build_args(hop.args_s[1:]) - s_pbc = hop.args_s[0] # possibly more precise than self.s_pbc - descs = list(s_pbc.descriptions) - vfcs = description.FunctionDesc.variant_for_call_site - shape, index = vfcs(bk, self.callfamily, descs, args, hop.spaceop) - row_of_graphs = self.callfamily.calltables[shape][index] - anygraph = row_of_graphs.itervalues().next() # pick any witness - vlist = [hop.inputarg(self, arg=0)] - vlist += callparse.callparse(self.rtyper, anygraph, hop) - rresult = callparse.getrresult(self.rtyper, anygraph) - hop.exception_is_here() - v_dispatcher = self.dispatcher(shape, index, [v.concretetype for v in vlist[1:]], rresult.lowleveltype) - v_result = hop.genop('direct_call', [v_dispatcher] + vlist, - resulttype=rresult) - return hop.llops.convertvar(v_result, rresult, hop.r_result) - - def rtype_bool(self, hop): - if not self.s_pbc.can_be_None: - return inputconst(Bool, True) - else: - v1, = hop.inputargs(self) - return hop.genop('char_ne', [v1, inputconst(Char, '\000')], - resulttype=Bool) - - -class __extend__(pairtype(SmallFunctionSetPBCRepr, FunctionsPBCRepr)): - def convert_from_to((r_set, r_ptr), v, llops): - if r_ptr.lowleveltype is Void: - return inputconst(Void, None) - else: - assert v.concretetype is Char - v_int = llops.genop('cast_char_to_int', [v], - resulttype=Signed) - return llops.genop('getarrayitem', [r_set.c_pointer_table, v_int], - resulttype=r_ptr.lowleveltype) - - -def compression_function(r_set): - if r_set._compression_function is None: - table = [] - for i, p in enumerate(r_set.c_pointer_table.value): - table.append((chr(i), p)) - last_c, last_p = table[-1] - unroll_table = unrolling_iterable(table[:-1]) - - def ll_compress(fnptr): - for c, p in unroll_table: - if fnptr == p: - return c - else: - ll_assert(fnptr == last_p, "unexpected function pointer") - return last_c - r_set._compression_function = ll_compress - return r_set._compression_function - - -class __extend__(pairtype(FunctionsPBCRepr, SmallFunctionSetPBCRepr)): - def convert_from_to((r_ptr, r_set), v, llops): - if r_ptr.lowleveltype is Void: - desc, = r_ptr.s_pbc.descriptions - return inputconst(Char, r_set.convert_desc(desc)) - else: - ll_compress = compression_function(r_set) - return llops.gendirectcall(ll_compress, v) - - -def conversion_table(r_from, r_to): - if r_to in r_from._conversion_tables: - return r_from._conversion_tables[r_to] - else: - t = malloc(Array(Char, hints={'nolength': True}), - len(r_from.descriptions), immortal=True) - l = [] - for i, d in enumerate(r_from.descriptions): - if d in r_to.descriptions: - j = r_to.descriptions.index(d) - l.append(j) - t[i] = chr(j) - else: - l.append(None) - if l == range(len(r_from.descriptions)): - r = None - else: - r = inputconst(Ptr(Array(Char, hints={'nolength': True})), t) - r_from._conversion_tables[r_to] = r - return r - - -class __extend__(pairtype(SmallFunctionSetPBCRepr, SmallFunctionSetPBCRepr)): - def convert_from_to((r_from, r_to), v, llops): - c_table = conversion_table(r_from, r_to) - if c_table: - assert v.concretetype is Char - v_int = llops.genop('cast_char_to_int', [v], - resulttype=Signed) - return llops.genop('getarrayitem', [c_table, v_int], - resulttype=Char) - else: - return v diff --git a/rpython/rtyper/rnone.py b/rpython/rtyper/rnone.py --- a/rpython/rtyper/rnone.py +++ b/rpython/rtyper/rnone.py @@ -3,7 +3,7 @@ from rpython.rtyper.rmodel import Repr, TyperError, inputconst from rpython.rtyper.lltypesystem.lltype import Void, Bool, Ptr, Char from rpython.rtyper.lltypesystem.llmemory import Address -from rpython.rtyper.lltypesystem.rpbc import SmallFunctionSetPBCRepr +from rpython.rtyper.rpbc import SmallFunctionSetPBCRepr from rpython.rtyper.annlowlevel import llstr from rpython.tool.pairtype import pairtype diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -1,47 +1,59 @@ import types -from rpython.annotator import model as annmodel, description +from rpython.flowspace.model import FunctionGraph, Link, Block, SpaceOperation +from rpython.annotator import model as annmodel +from rpython.annotator.description import ( + FunctionDesc, ClassDesc, MethodDesc, FrozenDesc, MethodOfFrozenDesc) from rpython.flowspace.model import Constant from rpython.annotator.argument import simple_args +from rpython.rlib.debug import ll_assert +from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper import rclass, callparse from rpython.rtyper.rclass import CLASSTYPE, OBJECT_VTABLE, OBJECTPTR from rpython.rtyper.error import TyperError -from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import llmemory +from rpython.rtyper.lltypesystem.lltype import (typeOf, Void, ForwardReference, + Struct, Bool, Char, Ptr, malloc, nullptr, Array, Signed, cast_pointer) from rpython.rtyper.rmodel import (Repr, inputconst, CanBeNull, mangle, warning, impossible_repr) +from rpython.rtyper.typesystem import getfunctionptr from rpython.tool.pairtype import pair, pairtype +from rpython.translator.unsimplify import varoftype def small_cand(rtyper, s_pbc): if 1 < len(s_pbc.descriptions) < rtyper.getconfig().translation.withsmallfuncsets: callfamily = s_pbc.any_description().getcallfamily() - concretetable, uniquerows = get_concrete_calltable(rtyper, callfamily) - if len(uniquerows) == 1 and (not s_pbc.subset_of or small_cand(rtyper, s_pbc.subset_of)): + llct = get_concrete_calltable(rtyper, callfamily) + if (len(llct.uniquerows) == 1 and + (not s_pbc.subset_of or small_cand(rtyper, s_pbc.subset_of))): return True return False class __extend__(annmodel.SomePBC): def rtyper_makerepr(self, rtyper): - from rpython.rtyper.lltypesystem.rpbc import ( - FunctionsPBCRepr, SmallFunctionSetPBCRepr) kind = self.getKind() - if issubclass(kind, description.FunctionDesc): - sample = self.any_description() - callfamily = sample.querycallfamily() - if callfamily and callfamily.total_calltable_size > 0: - getRepr = FunctionsPBCRepr - if small_cand(rtyper, self): - getRepr = SmallFunctionSetPBCRepr + if issubclass(kind, FunctionDesc): + if len(self.descriptions) == 1 and not self.can_be_None: + getRepr = FunctionRepr else: - getRepr = getFrozenPBCRepr - elif issubclass(kind, description.ClassDesc): + sample = self.any_description() + callfamily = sample.querycallfamily() + if callfamily and callfamily.total_calltable_size > 0: + getRepr = FunctionsPBCRepr + if small_cand(rtyper, self): + getRepr = SmallFunctionSetPBCRepr + else: + getRepr = getFrozenPBCRepr + elif issubclass(kind, ClassDesc): # user classes getRepr = ClassesPBCRepr - elif issubclass(kind, description.MethodDesc): + elif issubclass(kind, MethodDesc): getRepr = MethodsPBCRepr - elif issubclass(kind, description.FrozenDesc): + elif issubclass(kind, FrozenDesc): getRepr = getFrozenPBCRepr - elif issubclass(kind, description.MethodOfFrozenDesc): + elif issubclass(kind, MethodOfFrozenDesc): getRepr = MethodOfFrozenPBCRepr else: raise TyperError("unexpected PBC kind %r" % (kind,)) @@ -55,24 +67,34 @@ t = self.subset_of.rtyper_makekey() else: t = () - return tuple([self.__class__, self.can_be_None]+lst)+t + return tuple([self.__class__, self.can_be_None] + lst) + t # ____________________________________________________________ class ConcreteCallTableRow(dict): """A row in a concrete call table.""" + @classmethod + def from_row(cls, rtyper, row): + concreterow = cls() + for funcdesc, graph in row.items(): + llfn = rtyper.getcallable(graph) + concreterow[funcdesc] = llfn + assert len(concreterow) > 0 + # 'typeOf(llfn)' should be the same for all graphs + concreterow.fntype = typeOf(llfn) + return concreterow -def build_concrete_calltable(rtyper, callfamily): - """Build a complete call table of a call family - with concrete low-level function objs. - """ - concretetable = {} # (shape,index): row, maybe with duplicates - uniquerows = [] # list of rows, without duplicates - def lookuprow(row): - # a 'matching' row is one that has the same llfn, expect - # that it may have more or less 'holes' - for existingindex, existingrow in enumerate(uniquerows): +class LLCallTable(object): + """A call table of a call family with low-level functions.""" + def __init__(self, table, uniquerows): + self.table = table # (shape,index): row, maybe with duplicates + self.uniquerows = uniquerows # list of rows, without duplicates + + def lookup(self, row): + """A 'matching' row is one that has the same llfn, except + that it may have more or less 'holes'.""" + for existingindex, existingrow in enumerate(self.uniquerows): if row.fntype != existingrow.fntype: continue # not the same pointer type, cannot match for funcdesc, llfn in row.items(): @@ -90,47 +112,49 @@ return existingindex, merged raise LookupError - def addrow(row): - # add a row to the table, potentially merging it with an existing row + def add(self, row): + """Add a row to the table, potentially merging it with an existing row + """ try: - index, merged = lookuprow(row) + index, merged = self.lookup(row) except LookupError: - uniquerows.append(row) # new row + self.uniquerows.append(row) # new row else: - if merged == uniquerows[index]: + if merged == self.uniquerows[index]: pass # already exactly in the table else: - del uniquerows[index] - addrow(merged) # add the potentially larger merged row + del self.uniquerows[index] + self.add(merged) # add the potentially larger merged row + + +def build_concrete_calltable(rtyper, callfamily): + """Build a complete call table of a call family + with concrete low-level function objs. + """ + concretetable = {} + uniquerows = [] + llct = LLCallTable(concretetable, uniquerows) concreterows = {} for shape, rows in callfamily.calltables.items(): for index, row in enumerate(rows): - concreterow = ConcreteCallTableRow() - for funcdesc, graph in row.items(): - llfn = rtyper.getcallable(graph) - concreterow[funcdesc] = llfn - assert len(concreterow) > 0 - concreterow.fntype = lltype.typeOf(llfn)# 'llfn' from the loop above - # (they should all have the same type) + concreterow = ConcreteCallTableRow.from_row(rtyper, row) concreterows[shape, index] = concreterow - - for row in concreterows.values(): - addrow(row) + llct.add(concreterow) for (shape, index), row in concreterows.items(): - existingindex, biggerrow = lookuprow(row) - row = uniquerows[existingindex] - assert biggerrow == row # otherwise, addrow() is broken - concretetable[shape, index] = row + existingindex, biggerrow = llct.lookup(row) + row = llct.uniquerows[existingindex] + assert biggerrow == row + llct.table[shape, index] = row - if len(uniquerows) == 1: - uniquerows[0].attrname = None + if len(llct.uniquerows) == 1: + llct.uniquerows[0].attrname = None else: - for finalindex, row in enumerate(uniquerows): + for finalindex, row in enumerate(llct.uniquerows): row.attrname = 'variant%d' % finalindex - return concretetable, uniquerows + return llct def get_concrete_calltable(rtyper, callfamily): """Get a complete call table of a call family @@ -140,40 +164,20 @@ try: cached = rtyper.concrete_calltables[callfamily] except KeyError: - concretetable, uniquerows = build_concrete_calltable(rtyper, callfamily) - cached = concretetable, uniquerows, callfamily.total_calltable_size + llct = build_concrete_calltable(rtyper, callfamily) + cached = llct, callfamily.total_calltable_size rtyper.concrete_calltables[callfamily] = cached else: - concretetable, uniquerows, oldsize = cached + llct, oldsize = cached if oldsize != callfamily.total_calltable_size: raise TyperError("call table was unexpectedly extended") - return concretetable, uniquerows + return llct - -class AbstractFunctionsPBCRepr(CanBeNull, Repr): - """Representation selected for a PBC of function(s).""" - +class FunctionReprBase(Repr): def __init__(self, rtyper, s_pbc): self.rtyper = rtyper self.s_pbc = s_pbc self.callfamily = s_pbc.any_description().getcallfamily() - if len(s_pbc.descriptions) == 1 and not s_pbc.can_be_None: - # a single function - self.lowleveltype = lltype.Void - else: - concretetable, uniquerows = get_concrete_calltable(self.rtyper, - self.callfamily) - self.concretetable = concretetable - self.uniquerows = uniquerows - if len(uniquerows) == 1: - row = uniquerows[0] - self.lowleveltype = row.fntype - else: - # several functions, each with several specialized variants. - # each function becomes a pointer to a Struct containing - # pointers to its variants. - self.lowleveltype = self.setup_specfunc() - self.funccache = {} def get_s_callable(self): return self.s_pbc @@ -185,50 +189,107 @@ funcdesc = self.s_pbc.any_description() return funcdesc.get_s_signatures(shape) + def rtype_simple_call(self, hop): + return self.call(hop) + + def rtype_call_args(self, hop): + return self.call(hop) + + def call(self, hop): + bk = self.rtyper.annotator.bookkeeper + args = hop.spaceop.build_args(hop.args_s[1:]) + s_pbc = hop.args_s[0] # possibly more precise than self.s_pbc + descs = list(s_pbc.descriptions) + shape, index = self.callfamily.find_row(bk, descs, args, hop.spaceop) + row_of_graphs = self.callfamily.calltables[shape][index] + anygraph = row_of_graphs.itervalues().next() # pick any witness + vfn = hop.inputarg(self, arg=0) + vlist = [self.convert_to_concrete_llfn(vfn, shape, index, + hop.llops)] + vlist += callparse.callparse(self.rtyper, anygraph, hop) + rresult = callparse.getrresult(self.rtyper, anygraph) + hop.exception_is_here() + if isinstance(vlist[0], Constant): + v = hop.genop('direct_call', vlist, resulttype=rresult) + else: + vlist.append(hop.inputconst(Void, row_of_graphs.values())) + v = hop.genop('indirect_call', vlist, resulttype=rresult) + if hop.r_result is impossible_repr: + return None # see test_always_raising_methods + else: + return hop.llops.convertvar(v, rresult, hop.r_result) + + +class FunctionsPBCRepr(CanBeNull, FunctionReprBase): + """Representation selected for a PBC of functions.""" + + def __init__(self, rtyper, s_pbc): + FunctionReprBase.__init__(self, rtyper, s_pbc) + llct = get_concrete_calltable(self.rtyper, self.callfamily) + self.concretetable = llct.table + self.uniquerows = llct.uniquerows + if len(llct.uniquerows) == 1: + row = llct.uniquerows[0] + self.lowleveltype = row.fntype + else: + # several functions, each with several specialized variants. + # each function becomes a pointer to a Struct containing + # pointers to its variants. + self.lowleveltype = self.setup_specfunc() + self.funccache = {} + + def setup_specfunc(self): + fields = [] + for row in self.uniquerows: + fields.append((row.attrname, row.fntype)) + kwds = {'hints': {'immutable': True}} + return Ptr(Struct('specfunc', *fields, **kwds)) + + def create_specfunc(self): + return malloc(self.lowleveltype.TO, immortal=True) + + def get_specfunc_row(self, llop, v, c_rowname, resulttype): + return llop.genop('getfield', [v, c_rowname], resulttype=resulttype) + def convert_desc(self, funcdesc): # get the whole "column" of the call table corresponding to this desc try: return self.funccache[funcdesc] except KeyError: pass - if self.lowleveltype is lltype.Void: - result = None + llfns = {} + found_anything = False + for row in self.uniquerows: + if funcdesc in row: + llfn = row[funcdesc] + found_anything = True + else: + # missing entry -- need a 'null' of the type that matches + # this row + llfn = self.rtyper.type_system.null_callable(row.fntype) + llfns[row.attrname] = llfn + if len(self.uniquerows) == 1: + if found_anything: + result = llfn # from the loop above + else: + # extremely rare case, shown only sometimes by + # test_bug_callfamily: don't emit NULL, because that + # would be interpreted as equal to None... It should + # never be called anyway. + result = rffi.cast(self.lowleveltype, ~len(self.funccache)) else: - llfns = {} - found_anything = False - for row in self.uniquerows: - if funcdesc in row: - llfn = row[funcdesc] - found_anything = True - else: - # missing entry -- need a 'null' of the type that matches - # this row - llfn = self.rtyper.type_system.null_callable(row.fntype) - llfns[row.attrname] = llfn - if len(self.uniquerows) == 1: - if found_anything: - result = llfn # from the loop above - else: - # extremely rare case, shown only sometimes by - # test_bug_callfamily: don't emit NULL, because that - # would be interpreted as equal to None... It should - # never be called anyway. - result = rffi.cast(self.lowleveltype, ~len(self.funccache)) - else: - # build a Struct with all the values collected in 'llfns' - result = self.create_specfunc() - for attrname, llfn in llfns.items(): - setattr(result, attrname, llfn) + # build a Struct with all the values collected in 'llfns' + result = self.create_specfunc() + for attrname, llfn in llfns.items(): + setattr(result, attrname, llfn) self.funccache[funcdesc] = result return result def convert_const(self, value): if isinstance(value, types.MethodType) and value.im_self is None: - value = value.im_func # unbound method -> bare function + value = value.im_func # unbound method -> bare function elif isinstance(value, staticmethod): - value = value.__get__(42) # hackish, get the function wrapped by staticmethod - if self.lowleveltype is lltype.Void: - return None + value = value.__get__(42) # hackish, get the function wrapped by staticmethod if value is None: null = self.rtyper.type_system.null_callable(self.lowleveltype) return null @@ -241,30 +302,42 @@ 'index' and 'shape' tells which of its items we are interested in. """ assert v.concretetype == self.lowleveltype - if self.lowleveltype is lltype.Void: - assert len(self.s_pbc.descriptions) == 1 - # lowleveltype wouldn't be Void otherwise - funcdesc, = self.s_pbc.descriptions - row_of_one_graph = self.callfamily.calltables[shape][index] - graph = row_of_one_graph[funcdesc] - llfn = self.rtyper.getcallable(graph) - return inputconst(lltype.typeOf(llfn), llfn) - elif len(self.uniquerows) == 1: + if len(self.uniquerows) == 1: return v else: # 'v' is a Struct pointer, read the corresponding field row = self.concretetable[shape, index] - cname = inputconst(lltype.Void, row.attrname) + cname = inputconst(Void, row.attrname) return self.get_specfunc_row(llop, v, cname, row.fntype) + +class FunctionRepr(FunctionReprBase): + """Repr for a constant function""" + + lowleveltype = Void + + def convert_desc(self, funcdesc): + return None + + def convert_const(self, value): + return None + + def convert_to_concrete_llfn(self, v, shape, index, llop): + """Convert the variable 'v' to a variable referring to a concrete + low-level function. In case the call table contains multiple rows, + 'index' and 'shape' tells which of its items we are interested in. + """ + assert v.concretetype == Void + funcdesc, = self.s_pbc.descriptions + row_of_one_graph = self.callfamily.calltables[shape][index] + graph = row_of_one_graph[funcdesc] + llfn = self.rtyper.getcallable(graph) + return inputconst(typeOf(llfn), llfn) + def get_unique_llfn(self): # try to build a unique low-level function. Avoid to use # whenever possible! Doesn't work with specialization, multiple # different call sites, etc. - if self.lowleveltype is not lltype.Void: - raise TyperError("cannot pass multiple functions here") - assert len(self.s_pbc.descriptions) == 1 - # lowleveltype wouldn't be Void otherwise funcdesc, = self.s_pbc.descriptions tables = [] # find the simple call in the calltable for shape, table in self.callfamily.calltables.items(): @@ -280,69 +353,229 @@ if not graphs: raise TyperError("cannot pass here a function that is not called") graph = graphs[0] - if graphs != [graph]*len(graphs): + if graphs != [graph] * len(graphs): raise TyperError("cannot pass a specialized function here") llfn = self.rtyper.getcallable(graph) - return inputconst(lltype.typeOf(llfn), llfn) + return inputconst(typeOf(llfn), llfn) def get_concrete_llfn(self, s_pbc, args_s, op): bk = self.rtyper.annotator.bookkeeper - descs = list(s_pbc.descriptions) - vfcs = description.FunctionDesc.variant_for_call_site + funcdesc, = s_pbc.descriptions args = simple_args(args_s) - shape, index = vfcs(bk, self.callfamily, descs, args, op) - funcdesc, = descs - row_of_one_graph = self.callfamily.calltables[shape][index] - graph = row_of_one_graph[funcdesc] + with bk.at_position(None): + graph = funcdesc.get_graph(args, op) llfn = self.rtyper.getcallable(graph) - return inputconst(lltype.typeOf(llfn), llfn) + return inputconst(typeOf(llfn), llfn) - def rtype_simple_call(self, hop): - return self.call(hop) - def rtype_call_args(self, hop): - return self.call(hop) + +class __extend__(pairtype(FunctionRepr, FunctionRepr)): + def convert_from_to((r_fpbc1, r_fpbc2), v, llops): + return v + +class __extend__(pairtype(FunctionRepr, FunctionsPBCRepr)): + def convert_from_to((r_fpbc1, r_fpbc2), v, llops): + return inputconst(r_fpbc2, r_fpbc1.s_pbc.const) + +class __extend__(pairtype(FunctionsPBCRepr, FunctionRepr)): + def convert_from_to((r_fpbc1, r_fpbc2), v, llops): + return inputconst(Void, None) + +class __extend__(pairtype(FunctionsPBCRepr, FunctionsPBCRepr)): + def convert_from_to((r_fpbc1, r_fpbc2), v, llops): + # this check makes sense because both source and dest repr are FunctionsPBCRepr + if r_fpbc1.lowleveltype == r_fpbc2.lowleveltype: + return v + return NotImplemented + + +class SmallFunctionSetPBCRepr(FunctionReprBase): + def __init__(self, rtyper, s_pbc): + FunctionReprBase.__init__(self, rtyper, s_pbc) + llct = get_concrete_calltable(self.rtyper, self.callfamily) + assert len(llct.uniquerows) == 1 + self.lowleveltype = Char + self.pointer_repr = FunctionsPBCRepr(rtyper, s_pbc) + self._conversion_tables = {} + self._compression_function = None + self._dispatch_cache = {} + + def _setup_repr(self): + if self.s_pbc.subset_of: + assert self.s_pbc.can_be_None == self.s_pbc.subset_of.can_be_None + r = self.rtyper.getrepr(self.s_pbc.subset_of) + if r is not self: + r.setup() + self.descriptions = r.descriptions + self.c_pointer_table = r.c_pointer_table + return + self.descriptions = list(self.s_pbc.descriptions) + if self.s_pbc.can_be_None: + self.descriptions.insert(0, None) + POINTER_TABLE = Array(self.pointer_repr.lowleveltype, + hints={'nolength': True}) + pointer_table = malloc(POINTER_TABLE, len(self.descriptions), + immortal=True) + for i, desc in enumerate(self.descriptions): + if desc is not None: + pointer_table[i] = self.pointer_repr.convert_desc(desc) + else: + pointer_table[i] = self.pointer_repr.convert_const(None) + self.c_pointer_table = inputconst(Ptr(POINTER_TABLE), pointer_table) + + def convert_desc(self, funcdesc): + return chr(self.descriptions.index(funcdesc)) + + def convert_const(self, value): + if isinstance(value, types.MethodType) and value.im_self is None: + value = value.im_func # unbound method -> bare function + if value is None: + return chr(0) + funcdesc = self.rtyper.annotator.bookkeeper.getdesc(value) + return self.convert_desc(funcdesc) + + def dispatcher(self, shape, index, argtypes, resulttype): + key = shape, index, tuple(argtypes), resulttype + if key in self._dispatch_cache: + return self._dispatch_cache[key] + graph = self.make_dispatcher(shape, index, argtypes, resulttype) + self.rtyper.annotator.translator.graphs.append(graph) + ll_ret = getfunctionptr(graph) + c_ret = self._dispatch_cache[key] = inputconst(typeOf(ll_ret), ll_ret) + return c_ret + + def make_dispatcher(self, shape, index, argtypes, resulttype): + inputargs = [varoftype(t) for t in [Char] + argtypes] + startblock = Block(inputargs) + startblock.exitswitch = inputargs[0] + graph = FunctionGraph("dispatcher", startblock, varoftype(resulttype)) + row_of_graphs = self.callfamily.calltables[shape][index] + links = [] + descs = list(self.s_pbc.descriptions) + if self.s_pbc.can_be_None: + descs.insert(0, None) + for desc in descs: + if desc is None: + continue + args_v = [varoftype(t) for t in argtypes] + b = Block(args_v) + llfn = self.rtyper.getcallable(row_of_graphs[desc]) + v_fn = inputconst(typeOf(llfn), llfn) + v_result = varoftype(resulttype) + b.operations.append( + SpaceOperation("direct_call", [v_fn] + args_v, v_result)) + b.closeblock(Link([v_result], graph.returnblock)) + i = self.descriptions.index(desc) + links.append(Link(inputargs[1:], b, chr(i))) + links[-1].llexitcase = chr(i) + startblock.closeblock(*links) + return graph def call(self, hop): bk = self.rtyper.annotator.bookkeeper args = hop.spaceop.build_args(hop.args_s[1:]) s_pbc = hop.args_s[0] # possibly more precise than self.s_pbc descs = list(s_pbc.descriptions) - vfcs = description.FunctionDesc.variant_for_call_site - shape, index = vfcs(bk, self.callfamily, descs, args, hop.spaceop) + shape, index = self.callfamily.find_row(bk, descs, args, hop.spaceop) row_of_graphs = self.callfamily.calltables[shape][index] anygraph = row_of_graphs.itervalues().next() # pick any witness - vfn = hop.inputarg(self, arg=0) - vlist = [self.convert_to_concrete_llfn(vfn, shape, index, - hop.llops)] + vlist = [hop.inputarg(self, arg=0)] vlist += callparse.callparse(self.rtyper, anygraph, hop) rresult = callparse.getrresult(self.rtyper, anygraph) hop.exception_is_here() - if isinstance(vlist[0], Constant): - v = hop.genop('direct_call', vlist, resulttype = rresult) + v_dispatcher = self.dispatcher(shape, index, + [v.concretetype for v in vlist[1:]], rresult.lowleveltype) + v_result = hop.genop('direct_call', [v_dispatcher] + vlist, + resulttype=rresult) + return hop.llops.convertvar(v_result, rresult, hop.r_result) + + def rtype_bool(self, hop): + if not self.s_pbc.can_be_None: + return inputconst(Bool, True) else: - vlist.append(hop.inputconst(lltype.Void, row_of_graphs.values())) - v = hop.genop('indirect_call', vlist, resulttype = rresult) - if hop.r_result is impossible_repr: - return None # see test_always_raising_methods + v1, = hop.inputargs(self) + return hop.genop('char_ne', [v1, inputconst(Char, '\000')], + resulttype=Bool) + + +class __extend__(pairtype(SmallFunctionSetPBCRepr, FunctionRepr)): + def convert_from_to((r_set, r_ptr), v, llops): + return inputconst(Void, None) + +class __extend__(pairtype(SmallFunctionSetPBCRepr, FunctionsPBCRepr)): + def convert_from_to((r_set, r_ptr), v, llops): + assert v.concretetype is Char + v_int = llops.genop('cast_char_to_int', [v], resulttype=Signed) + return llops.genop('getarrayitem', [r_set.c_pointer_table, v_int], + resulttype=r_ptr.lowleveltype) + + +def compression_function(r_set): + if r_set._compression_function is None: + table = [] + for i, p in enumerate(r_set.c_pointer_table.value): + table.append((chr(i), p)) + last_c, last_p = table[-1] + unroll_table = unrolling_iterable(table[:-1]) + + def ll_compress(fnptr): + for c, p in unroll_table: + if fnptr == p: + return c + else: + ll_assert(fnptr == last_p, "unexpected function pointer") + return last_c + r_set._compression_function = ll_compress + return r_set._compression_function + + +class __extend__(pairtype(FunctionRepr, SmallFunctionSetPBCRepr)): + def convert_from_to((r_ptr, r_set), v, llops): + desc, = r_ptr.s_pbc.descriptions + return inputconst(Char, r_set.convert_desc(desc)) + +class __extend__(pairtype(FunctionsPBCRepr, SmallFunctionSetPBCRepr)): + def convert_from_to((r_ptr, r_set), v, llops): + ll_compress = compression_function(r_set) + return llops.gendirectcall(ll_compress, v) + + +def conversion_table(r_from, r_to): + if r_to in r_from._conversion_tables: + return r_from._conversion_tables[r_to] + else: + t = malloc(Array(Char, hints={'nolength': True}), + len(r_from.descriptions), immortal=True) + l = [] + for i, d in enumerate(r_from.descriptions): + if d in r_to.descriptions: + j = r_to.descriptions.index(d) + l.append(j) + t[i] = chr(j) + else: + l.append(None) + if l == range(len(r_from.descriptions)): + r = None else: - return hop.llops.convertvar(v, rresult, hop.r_result) + r = inputconst(Ptr(Array(Char, hints={'nolength': True})), t) + r_from._conversion_tables[r_to] = r + return r -class __extend__(pairtype(AbstractFunctionsPBCRepr, AbstractFunctionsPBCRepr)): - def convert_from_to((r_fpbc1, r_fpbc2), v, llops): - # this check makes sense because both source and dest repr are FunctionsPBCRepr - if r_fpbc1.lowleveltype == r_fpbc2.lowleveltype: + +class __extend__(pairtype(SmallFunctionSetPBCRepr, SmallFunctionSetPBCRepr)): + def convert_from_to((r_from, r_to), v, llops): + c_table = conversion_table(r_from, r_to) + if c_table: + assert v.concretetype is Char + v_int = llops.genop('cast_char_to_int', [v], + resulttype=Signed) + return llops.genop('getarrayitem', [c_table, v_int], + resulttype=Char) + else: return v - if r_fpbc1.lowleveltype is lltype.Void: - return inputconst(r_fpbc2, r_fpbc1.s_pbc.const) - if r_fpbc2.lowleveltype is lltype.Void: - return inputconst(lltype.Void, None) - return NotImplemented def getFrozenPBCRepr(rtyper, s_pbc): - from rpython.rtyper.lltypesystem.rpbc import ( - MultipleUnrelatedFrozenPBCRepr, MultipleFrozenPBCRepr) descs = list(s_pbc.descriptions) assert len(descs) >= 1 if len(descs) == 1 and not s_pbc.can_be_None: @@ -369,7 +602,7 @@ class SingleFrozenPBCRepr(Repr): """Representation selected for a single non-callable pre-built constant.""" - lowleveltype = lltype.Void + lowleveltype = Void def __init__(self, frozendesc): self.frozendesc = frozendesc @@ -394,9 +627,18 @@ return self.getstr() -class AbstractMultipleUnrelatedFrozenPBCRepr(CanBeNull, Repr): +class MultipleFrozenPBCReprBase(CanBeNull, Repr): + def convert_const(self, pbc): + if pbc is None: + return self.null_instance() + frozendesc = self.rtyper.annotator.bookkeeper.getdesc(pbc) + return self.convert_desc(frozendesc) + +class MultipleUnrelatedFrozenPBCRepr(MultipleFrozenPBCReprBase): """For a SomePBC of frozen PBCs that have no common access set. The only possible operation on such a thing is comparison with 'is'.""" + lowleveltype = llmemory.Address + EMPTY = Struct('pbc', hints={'immutable': True}) def __init__(self, rtyper): self.rtyper = rtyper @@ -407,7 +649,7 @@ return self.converted_pbc_cache[frozendesc] except KeyError: r = self.rtyper.getrepr(annmodel.SomePBC([frozendesc])) - if r.lowleveltype is lltype.Void: + if r.lowleveltype is Void: # must create a new empty structure, as a placeholder pbc = self.create_instance() else: @@ -416,22 +658,62 @@ self.converted_pbc_cache[frozendesc] = convpbc return convpbc - def convert_const(self, pbc): - if pbc is None: - return self.null_instance() - if isinstance(pbc, types.MethodType) and pbc.im_self is None: - value = pbc.im_func # unbound method -> bare function - frozendesc = self.rtyper.annotator.bookkeeper.getdesc(pbc) - return self.convert_desc(frozendesc) + def convert_pbc(self, pbcptr): + return llmemory.fakeaddress(pbcptr) + + def create_instance(self): + return malloc(self.EMPTY, immortal=True) + + def null_instance(self): + return llmemory.Address._defl() def rtype_getattr(_, hop): if not hop.s_result.is_constant(): raise TyperError("getattr on a constant PBC returns a non-constant") return hop.inputconst(hop.r_result, hop.s_result.const) -class AbstractMultipleFrozenPBCRepr(AbstractMultipleUnrelatedFrozenPBCRepr): +class __extend__(pairtype(MultipleUnrelatedFrozenPBCRepr, + MultipleUnrelatedFrozenPBCRepr), + pairtype(MultipleUnrelatedFrozenPBCRepr, + SingleFrozenPBCRepr), + pairtype(SingleFrozenPBCRepr, + MultipleUnrelatedFrozenPBCRepr)): + def rtype_is_((robj1, robj2), hop): + if isinstance(robj1, MultipleUnrelatedFrozenPBCRepr): + r = robj1 + else: + r = robj2 + vlist = hop.inputargs(r, r) + return hop.genop('adr_eq', vlist, resulttype=Bool) + + +class MultipleFrozenPBCRepr(MultipleFrozenPBCReprBase): """For a SomePBC of frozen PBCs with a common attribute access set.""" + def __init__(self, rtyper, access_set): + self.rtyper = rtyper + self.access_set = access_set + self.pbc_type = ForwardReference() + self.lowleveltype = Ptr(self.pbc_type) + self.pbc_cache = {} + + def _setup_repr(self): + llfields = self._setup_repr_fields() + kwds = {'hints': {'immutable': True}} + self.pbc_type.become(Struct('pbc', *llfields, **kwds)) + + def create_instance(self): + return malloc(self.pbc_type, immortal=True) + + def null_instance(self): + return nullptr(self.pbc_type) + + def getfield(self, vpbc, attr, llops): + mangled_name, r_value = self.fieldmap[attr] + cmangledname = inputconst(Void, mangled_name) + return llops.genop('getfield', [vpbc, cmangledname], + resulttype=r_value) + def _setup_repr_fields(self): fields = [] self.fieldmap = {} @@ -448,7 +730,7 @@ def convert_desc(self, frozendesc): if (self.access_set is not None and - frozendesc not in self.access_set.descs): + frozendesc not in self.access_set.descs): raise TyperError("not found in PBC access set: %r" % (frozendesc,)) try: return self.pbc_cache[frozendesc] @@ -457,7 +739,7 @@ result = self.create_instance() self.pbc_cache[frozendesc] = result for attr, (mangled_name, r_value) in self.fieldmap.items(): - if r_value.lowleveltype is lltype.Void: + if r_value.lowleveltype is Void: continue try: thisattrvalue = frozendesc.attrcache[attr] @@ -474,18 +756,23 @@ return hop.inputconst(hop.r_result, hop.s_result.const) attr = hop.args_s[1].const - vpbc, vattr = hop.inputargs(self, lltype.Void) + vpbc, vattr = hop.inputargs(self, Void) v_res = self.getfield(vpbc, attr, hop.llops) mangled_name, r_res = self.fieldmap[attr] return hop.llops.convertvar(v_res, r_res, hop.r_result) -class __extend__(pairtype(AbstractMultipleFrozenPBCRepr, AbstractMultipleFrozenPBCRepr)): +class __extend__(pairtype(MultipleFrozenPBCRepr, + MultipleUnrelatedFrozenPBCRepr)): + def convert_from_to((robj1, robj2), v, llops): + return llops.genop('cast_ptr_to_adr', [v], resulttype=llmemory.Address) + +class __extend__(pairtype(MultipleFrozenPBCRepr, MultipleFrozenPBCRepr)): def convert_from_to((r_pbc1, r_pbc2), v, llops): if r_pbc1.access_set == r_pbc2.access_set: return v return NotImplemented -class __extend__(pairtype(SingleFrozenPBCRepr, AbstractMultipleFrozenPBCRepr)): +class __extend__(pairtype(SingleFrozenPBCRepr, MultipleFrozenPBCRepr)): def convert_from_to((r_pbc1, r_pbc2), v, llops): frozendesc1 = r_pbc1.frozendesc access = frozendesc1.queryattrfamily() @@ -495,10 +782,10 @@ return Constant(value, lltype) return NotImplemented -class __extend__(pairtype(AbstractMultipleUnrelatedFrozenPBCRepr, +class __extend__(pairtype(MultipleFrozenPBCReprBase, SingleFrozenPBCRepr)): def convert_from_to((r_pbc1, r_pbc2), v, llops): - return inputconst(lltype.Void, r_pbc2.frozendesc) + return inputconst(Void, r_pbc2.frozendesc) class MethodOfFrozenPBCRepr(Repr): @@ -523,7 +810,10 @@ im_selves = [] for desc in s_pbc.descriptions: - assert desc.funcdesc is self.funcdesc, "You can't mix a set of methods on a frozen PBC in RPython that are different underlaying functions" + if desc.funcdesc is not self.funcdesc: + raise TyperError( + "You can't mix a set of methods on a frozen PBC in " + "RPython that are different underlying functions") im_selves.append(desc.frozendesc) self.s_im_self = annmodel.SomePBC(im_selves) @@ -589,7 +879,7 @@ # raise TyperError("unsupported: variable of type " # "class-pointer or None") if s_pbc.is_constant(): - self.lowleveltype = lltype.Void + self.lowleveltype = Void else: self.lowleveltype = self.getlowleveltype() @@ -612,7 +902,7 @@ def convert_desc(self, desc): if desc not in self.s_pbc.descriptions: raise TyperError("%r not in %r" % (desc, self)) - if self.lowleveltype is lltype.Void: + if self.lowleveltype is Void: return None subclassdef = desc.getuniqueclassdef() r_subclass = rclass.getclassrepr(self.rtyper, subclassdef) @@ -620,7 +910,7 @@ def convert_const(self, cls): if cls is None: - if self.lowleveltype is lltype.Void: + if self.lowleveltype is Void: return None else: T = self.lowleveltype @@ -637,12 +927,12 @@ if attr == '__name__': from rpython.rtyper.lltypesystem import rstr class_repr = self.rtyper.rootclass_repr - vcls, vattr = hop.inputargs(class_repr, lltype.Void) - cname = inputconst(lltype.Void, 'name') + vcls, vattr = hop.inputargs(class_repr, Void) + cname = inputconst(Void, 'name') return hop.genop('getfield', [vcls, cname], - resulttype = lltype.Ptr(rstr.STR)) + resulttype = Ptr(rstr.STR)) access_set, class_repr = self.get_access_set(attr) - vcls, vattr = hop.inputargs(class_repr, lltype.Void) + vcls, vattr = hop.inputargs(class_repr, Void) v_res = class_repr.getpbcfield(vcls, access_set, attr, hop.llops) s_res = access_set.s_value r_res = self.rtyper.getrepr(s_res) @@ -671,7 +961,7 @@ if len(self.s_pbc.descriptions) == 1: # instantiating a single class - if self.lowleveltype is not lltype.Void: + if self.lowleveltype is not Void: assert 0, "XXX None-or-1-class instantation not implemented" assert isinstance(s_instance, annmodel.SomeInstance) classdef = s_instance.classdef @@ -728,10 +1018,10 @@ classdef = desc.getclassdef(None) assert hasattr(classdef, 'my_instantiate_graph') graphs.append(classdef.my_instantiate_graph) - c_graphs = hop.inputconst(lltype.Void, graphs) + c_graphs = hop.inputconst(Void, graphs) # # "my_instantiate = typeptr.instantiate" - c_name = hop.inputconst(lltype.Void, 'instantiate') + c_name = hop.inputconst(Void, 'instantiate') v_instantiate = hop.genop('getfield', [vtypeptr, c_name], resulttype=OBJECT_VTABLE.instantiate) # "my_instantiate()" @@ -751,7 +1041,7 @@ return None def ll_str(self, ptr): - cls = lltype.cast_pointer(CLASSTYPE, ptr) + cls = cast_pointer(CLASSTYPE, ptr) return cls.name @@ -766,7 +1056,7 @@ # turn a PBC of classes to a standard pointer-to-vtable class repr if r_clspbc.lowleveltype == r_cls.lowleveltype: return v - if r_clspbc.lowleveltype is lltype.Void: + if r_clspbc.lowleveltype is Void: return inputconst(r_cls, r_clspbc.s_pbc.const) # convert from ptr-to-object-vtable to ptr-to-more-precise-vtable return r_cls.fromclasstype(v, llops) @@ -776,10 +1066,10 @@ # this check makes sense because both source and dest repr are ClassesPBCRepr if r_clspbc1.lowleveltype == r_clspbc2.lowleveltype: return v - if r_clspbc1.lowleveltype is lltype.Void: + if r_clspbc1.lowleveltype is Void: return inputconst(r_clspbc2, r_clspbc1.s_pbc.const) - if r_clspbc2.lowleveltype is lltype.Void: - return inputconst(lltype.Void, r_clspbc2.s_pbc.const) + if r_clspbc2.lowleveltype is Void: + return inputconst(Void, r_clspbc2.s_pbc.const) return NotImplemented def adjust_shape(hop2, s_shape): @@ -856,12 +1146,9 @@ return self.redispatch_call(hop, call_args=True) def redispatch_call(self, hop, call_args): - from rpython.rtyper.lltypesystem.rpbc import ( - FunctionsPBCRepr, SmallFunctionSetPBCRepr) r_class = self.r_im_self.rclass mangled_name, r_func = r_class.clsfields[self.methodname] - assert isinstance(r_func, (FunctionsPBCRepr, - SmallFunctionSetPBCRepr)) + assert isinstance(r_func, FunctionReprBase) # s_func = r_func.s_pbc -- not precise enough, see # test_precise_method_call_1. Build a more precise one... funcdescs = [desc.funcdesc for desc in hop.args_s[0].descriptions] @@ -882,28 +1169,3 @@ # now hop2 looks like simple_call(function, self, args...) return hop2.dispatch() - -# ____________________________________________________________ - -def samesig(funcs): - import inspect - argspec = inspect.getargspec(funcs[0]) - for func in funcs: - if inspect.getargspec(func) != argspec: - return False - return True - -# ____________________________________________________________ - -def commonbase(classdefs): - result = classdefs[0] - for cdef in classdefs[1:]: - result = result.commonbase(cdef) - if result is None: - raise TyperError("no common base class in %r" % (classdefs,)) - return result - -def allattributenames(classdef): - for cdef1 in classdef.getmro(): - for attrname in cdef1.attrs: - yield cdef1, attrname From noreply at buildbot.pypy.org Thu Oct 8 18:16:09 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 8 Oct 2015 18:16:09 +0200 (CEST) Subject: [pypy-commit] pypy compress-numbering: fix until we start finding real problems Message-ID: <20151008161609.B16361C13BE@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: compress-numbering Changeset: r80052:dcf276f1076c Date: 2015-10-08 17:48 +0200 http://bitbucket.org/pypy/pypy/changeset/dcf276f1076c/ Log: fix until we start finding real problems diff --git a/rpython/jit/metainterp/test/test_resume.py b/rpython/jit/metainterp/test/test_resume.py --- a/rpython/jit/metainterp/test/test_resume.py +++ b/rpython/jit/metainterp/test/test_resume.py @@ -10,7 +10,7 @@ VArrayInfoNotClear, VStrPlainInfo, VStrConcatInfo, VStrSliceInfo,\ VUniPlainInfo, VUniConcatInfo, VUniSliceInfo, Snapshot, FrameInfo,\ capture_resumedata, ResumeDataLoopMemo, UNASSIGNEDVIRTUAL, INT,\ - annlowlevel, PENDINGFIELDSP + annlowlevel, PENDINGFIELDSP, unpack_uint from rpython.jit.metainterp.optimizeopt import info from rpython.jit.metainterp.history import ConstInt, Const, AbstractDescr from rpython.jit.metainterp.history import ConstPtr, ConstFloat @@ -531,18 +531,25 @@ assert snap1.prev is snap assert snap1.boxes is l1 +class FakeJitCode(object): + def __init__(self, name, index): + self.name = name + self.index = index + def test_FrameInfo_create(): - jitcode = "JITCODE" + jitcode = FakeJitCode("jitcode", 13) fi = FrameInfo(None, jitcode, 1) assert fi.prev is None - assert fi.jitcode is jitcode - assert fi.pc == 1 + jitcode_pos, pc = unpack_uint(fi.packed_jitcode_pc) + assert jitcode_pos == 13 + assert pc == 1 - jitcode1 = "JITCODE1" + jitcode1 = FakeJitCode("JITCODE1", 42) fi1 = FrameInfo(fi, jitcode1, 3) assert fi1.prev is fi - assert fi1.jitcode is jitcode1 - assert fi1.pc == 3 + jitcode_pos, pc = unpack_uint(fi1.packed_jitcode_pc) + assert jitcode_pos == 42 + assert pc == 3 def test_Numbering_create(): l = [rffi.r_short(1), rffi.r_short(2)] @@ -558,7 +565,7 @@ def test_capture_resumedata(): b1, b2, b3 = [InputArgInt(), InputArgRef(), InputArgInt()] c1, c2, c3 = [ConstInt(1), ConstInt(2), ConstInt(3)] - fs = [FakeFrame("code0", 0, b1, c1, b2)] + fs = [FakeFrame(FakeJitCode("code0", 13), 0, b1, c1, b2)] storage = Storage() capture_resumedata(fs, None, [], storage) @@ -567,22 +574,21 @@ assert fs[0].parent_resumedata_frame_info_list is None assert storage.rd_frame_info_list.prev is None - assert storage.rd_frame_info_list.jitcode == 'code0' + assert unpack_uint(storage.rd_frame_info_list.packed_jitcode_pc)[0] == 13 assert storage.rd_snapshot.boxes == [] # for virtualrefs snapshot = storage.rd_snapshot.prev assert snapshot.prev is None assert snapshot.boxes == fs[0]._env storage = Storage() - fs = [FakeFrame("code0", 0, b1, c1, b2), - FakeFrame("code1", 3, b3, c2, b1), - FakeFrame("code2", 9, c3, b2)] + fs = [FakeFrame(FakeJitCode("code0", 0), 0, b1, c1, b2), + FakeFrame(FakeJitCode("code1", 1), 3, b3, c2, b1), + FakeFrame(FakeJitCode("code2", 2), 9, c3, b2)] capture_resumedata(fs, None, [], storage) frame_info_list = storage.rd_frame_info_list assert frame_info_list.prev is fs[2].parent_resumedata_frame_info_list - assert frame_info_list.jitcode == 'code2' - assert frame_info_list.pc == 9 + assert unpack_uint(frame_info_list.packed_jitcode_pc) == (2, 9) assert storage.rd_snapshot.boxes == [] # for virtualrefs snapshot = storage.rd_snapshot.prev @@ -591,14 +597,14 @@ frame_info_list = frame_info_list.prev assert frame_info_list.prev is fs[1].parent_resumedata_frame_info_list - assert frame_info_list.jitcode == 'code1' + assert unpack_uint(frame_info_list.packed_jitcode_pc) == (1, 3) snapshot = snapshot.prev assert snapshot.prev is fs[1].parent_resumedata_snapshot assert snapshot.boxes == fs[1]._env frame_info_list = frame_info_list.prev assert frame_info_list.prev is None - assert frame_info_list.jitcode == 'code0' + assert unpack_uint(frame_info_list.packed_jitcode_pc) == (0, 0) snapshot = snapshot.prev assert snapshot.prev is None assert snapshot.boxes == fs[0]._env @@ -611,9 +617,8 @@ frame_info_list = storage.rd_frame_info_list assert frame_info_list.prev is fs[2].parent_resumedata_frame_info_list - assert frame_info_list.jitcode == 'code2' - assert frame_info_list.pc == 15 - + assert unpack_uint(frame_info_list.packed_jitcode_pc) == (2, 15) + snapshot = storage.rd_snapshot assert snapshot.boxes == vrs + vbs # in the same list @@ -914,8 +919,9 @@ snap2 = Snapshot(snap, env2) memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) + frameinfo = FrameInfo(None, FakeJitCode("jitcode", 0), 0) - numb, liveboxes, v = memo.number(FakeOptimizer(), snap1) + numb, liveboxes, v = memo.number(FakeOptimizer(), snap1, frameinfo) assert v == 0 assert liveboxes == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX), From noreply at buildbot.pypy.org Thu Oct 8 18:51:58 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 8 Oct 2015 18:51:58 +0200 (CEST) Subject: [pypy-commit] pypy default: document merged branch Message-ID: <20151008165158.AB0171C069F@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r80053:0f69683ff9c2 Date: 2015-10-08 16:52 +0100 http://bitbucket.org/pypy/pypy/changeset/0f69683ff9c2/ Log: document merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -56,3 +56,7 @@ .. branch: cffi-stdcall Win32: support '__stdcall' in CFFI. + +.. branch: callfamily + +Refactorings of annotation and rtyping of function calls. From noreply at buildbot.pypy.org Thu Oct 8 23:02:22 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Oct 2015 23:02:22 +0200 (CEST) Subject: [pypy-commit] cffi default: ffi.memmove() Message-ID: <20151008210222.40C311C21B0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2330:055b350b5272 Date: 2015-10-08 22:54 +0200 http://bitbucket.org/cffi/cffi/changeset/055b350b5272/ Log: ffi.memmove() diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -5651,7 +5651,8 @@ return x; } -static int _my_PyObject_GetContiguousBuffer(PyObject *x, Py_buffer *view) +static int _my_PyObject_GetContiguousBuffer(PyObject *x, Py_buffer *view, + int writable_only) { #if PY_MAJOR_VERSION < 3 /* Some objects only support the buffer interface and CPython doesn't @@ -5664,10 +5665,19 @@ /* we used to try all three in some vaguely sensible order, i.e. first the write. But trying to call the write on a read-only buffer fails with TypeError. So we use a less- - sensible order now. See test_from_buffer_more_cases. */ - readbufferproc proc = (readbufferproc)pb->bf_getreadbuffer; - if (!proc) proc = (readbufferproc)pb->bf_getcharbuffer; - if (!proc) proc = (readbufferproc)pb->bf_getwritebuffer; + sensible order now. See test_from_buffer_more_cases. + + If 'writable_only', we only try bf_getwritebuffer. + */ + readbufferproc proc = NULL; + if (!writable_only) { + proc = (readbufferproc)pb->bf_getreadbuffer; + if (!proc) + proc = (readbufferproc)pb->bf_getcharbuffer; + } + if (!proc) + proc = (readbufferproc)pb->bf_getwritebuffer; + if (proc && pb->bf_getsegcount) { if ((*pb->bf_getsegcount)(x, NULL) != 1) { PyErr_SetString(PyExc_TypeError, @@ -5684,7 +5694,8 @@ } #endif - if (PyObject_GetBuffer(x, view, PyBUF_SIMPLE) < 0) + if (PyObject_GetBuffer(x, view, writable_only ? PyBUF_WRITABLE + : PyBUF_SIMPLE) < 0) return -1; if (!PyBuffer_IsContiguous(view, 'A')) { @@ -5743,7 +5754,7 @@ } view = PyObject_Malloc(sizeof(Py_buffer)); - if (_my_PyObject_GetContiguousBuffer(x, view) < 0) + if (_my_PyObject_GetContiguousBuffer(x, view, 0) < 0) goto error1; cd = (CDataObject *)PyObject_GC_New(CDataObject_owngc_frombuf, @@ -5782,6 +5793,52 @@ return direct_from_buffer(ct, x); } +static int _fetch_as_buffer(PyObject *x, Py_buffer *view, int writable_only) +{ + if (CData_Check(x)) { + CTypeDescrObject *ct = ((CDataObject *)x)->c_type; + if (!(ct->ct_flags & (CT_POINTER|CT_ARRAY))) { + PyErr_Format(PyExc_TypeError, + "expected a pointer or array ctype, got '%s'", + ct->ct_name); + return -1; + } + view->buf = ((CDataObject *)x)->c_data; + view->obj = NULL; + return 0; + } + else { + return _my_PyObject_GetContiguousBuffer(x, view, writable_only); + } +} + +static PyObject *b_memmove(PyObject *self, PyObject *args, PyObject *kwds) +{ + PyObject *dest_obj, *src_obj; + Py_buffer dest_view, src_view; + Py_ssize_t n; + static char *keywords[] = {"dest", "src", "n", NULL}; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "OOn", keywords, + &dest_obj, &src_obj, &n)) + return NULL; + + if (_fetch_as_buffer(src_obj, &src_view, 0) < 0) { + return NULL; + } + if (_fetch_as_buffer(dest_obj, &dest_view, 1) < 0) { + PyBuffer_Release(&src_view); + return NULL; + } + + memmove(dest_view.buf, src_view.buf, n); + + PyBuffer_Release(&dest_view); + PyBuffer_Release(&src_view); + Py_INCREF(Py_None); + return Py_None; +} + static PyObject *b__get_types(PyObject *self, PyObject *noarg) { return PyTuple_Pack(2, (PyObject *)&CData_Type, @@ -6108,6 +6165,7 @@ {"newp_handle", b_newp_handle, METH_VARARGS}, {"from_handle", b_from_handle, METH_O}, {"from_buffer", b_from_buffer, METH_VARARGS}, + {"memmove", (PyCFunction)b_memmove, METH_VARARGS | METH_KEYWORDS}, {"gcp", (PyCFunction)b_gcp, METH_VARARGS | METH_KEYWORDS}, #ifdef MS_WIN32 {"getwinerror", (PyCFunction)b_getwinerror, METH_VARARGS | METH_KEYWORDS}, diff --git a/c/ffi_obj.c b/c/ffi_obj.c --- a/c/ffi_obj.c +++ b/c/ffi_obj.c @@ -852,6 +852,24 @@ return x; } +PyDoc_STRVAR(ffi_memmove_doc, +"ffi.memmove(dest, src, n) copies n bytes of memory from src to dest.\n" +"\n" +"Like the C function memmove(), the memory areas may overlap;\n" +"apart from that it behaves like the C function memcpy().\n" +"\n" +"'src' can be any cdata ptr or array, or any Python buffer object.\n" +"'dest' can be any cdata ptr or array, or a writable Python buffer\n" +"object. The size to copy, 'n', is always measured in bytes.\n" +"\n" +"Unlike other methods, this one supports all Python buffer including\n" +"byte strings and bytearrays---but it still does not support\n" +"non-contiguous buffers."); + +#define ffi_memmove b_memmove /* ffi_memmove() => b_memmove() + from _cffi_backend.c */ + + #define METH_VKW (METH_VARARGS | METH_KEYWORDS) static PyMethodDef ffi_methods[] = { {"addressof", (PyCFunction)ffi_addressof, METH_VARARGS, ffi_addressof_doc}, @@ -869,6 +887,7 @@ {"getwinerror",(PyCFunction)ffi_getwinerror,METH_VKW, ffi_getwinerror_doc}, #endif {"integer_const",(PyCFunction)ffi_int_const,METH_VKW, ffi_int_const_doc}, + {"memmove", (PyCFunction)ffi_memmove, METH_VKW, ffi_memmove_doc}, {"new", (PyCFunction)ffi_new, METH_VKW, ffi_new_doc}, {"new_allocator",(PyCFunction)ffi_new_allocator,METH_VKW,ffi_new_allocator_doc}, {"new_handle", (PyCFunction)ffi_new_handle, METH_O, ffi_new_handle_doc}, diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3404,6 +3404,65 @@ check(4 | 8, "CHB", "GTB") check(4 | 16, "CHB", "ROB") +def test_memmove(): + Short = new_primitive_type("short") + ShortA = new_array_type(new_pointer_type(Short), None) + Char = new_primitive_type("char") + CharA = new_array_type(new_pointer_type(Char), None) + p = newp(ShortA, [-1234, -2345, -3456, -4567, -5678]) + memmove(p, p + 1, 4) + assert list(p) == [-2345, -3456, -3456, -4567, -5678] + p[2] = 999 + memmove(p + 2, p, 6) + assert list(p) == [-2345, -3456, -2345, -3456, 999] + memmove(p + 4, newp(CharA, b"\x71\x72"), 2) + if sys.byteorder == 'little': + assert list(p) == [-2345, -3456, -2345, -3456, 0x7271] + else: + assert list(p) == [-2345, -3456, -2345, -3456, 0x7172] + +def test_memmove_buffer(): + import array + Short = new_primitive_type("short") + ShortA = new_array_type(new_pointer_type(Short), None) + a = array.array('H', [10000, 20000, 30000]) + p = newp(ShortA, 5) + memmove(p, a, 6) + assert list(p) == [10000, 20000, 30000, 0, 0] + memmove(p + 1, a, 6) + assert list(p) == [10000, 10000, 20000, 30000, 0] + b = array.array('h', [-1000, -2000, -3000]) + memmove(b, a, 4) + assert b.tolist() == [10000, 20000, -3000] + assert a.tolist() == [10000, 20000, 30000] + p[0] = 999 + p[1] = 998 + p[2] = 997 + p[3] = 996 + p[4] = 995 + memmove(b, p, 2) + assert b.tolist() == [999, 20000, -3000] + memmove(b, p + 2, 4) + assert b.tolist() == [997, 996, -3000] + p[2] = -p[2] + p[3] = -p[3] + memmove(b, p + 2, 6) + assert b.tolist() == [-997, -996, 995] + +def test_memmove_readonly_readwrite(): + ffi = FFI() + SignedChar = new_primitive_type("signed char") + SignedCharA = new_array_type(new_pointer_type(SignedChar), None) + p = newp(SignedCharA, 5) + memmove(p, b"abcde", 3) + assert list(p) == [ord("a"), ord("b"), ord("c"), 0, 0] + memmove(p, bytearray(b"ABCDE"), 2) + assert list(p) == [ord("A"), ord("B"), ord("c"), 0, 0] + py.test.raises((TypeError, BufferError), memmove, b"abcde", p, 3) + ba = bytearray(b"xxxxx") + memmove(dest=ba, src=p, n=3) + assert ba == bytearray(b"ABcxx") + def test_dereference_null_ptr(): BInt = new_primitive_type("int") BIntPtr = new_pointer_type(BInt) diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -310,6 +310,22 @@ """ return self._backend.from_buffer(self.BCharA, python_buffer) + def memmove(self, dest, src, n): + """ffi.memmove(dest, src, n) copies n bytes of memory from src to dest. + + Like the C function memmove(), the memory areas may overlap; + apart from that it behaves like the C function memcpy(). + + 'src' can be any cdata ptr or array, or any Python buffer object. + 'dest' can be any cdata ptr or array, or a writable Python buffer + object. The size to copy, 'n', is always measured in bytes. + + Unlike other methods, this one supports all Python buffer including + byte strings and bytearrays---but it still does not support + non-contiguous buffers. + """ + return self._backend.memmove(dest, src, n) + def callback(self, cdecl, python_callable=None, error=None, onerror=None): """Return a callback object or a decorator making such a callback object. 'cdecl' must name a C function pointer type. diff --git a/testing/cffi0/test_ffi_backend.py b/testing/cffi0/test_ffi_backend.py --- a/testing/cffi0/test_ffi_backend.py +++ b/testing/cffi0/test_ffi_backend.py @@ -313,6 +313,59 @@ ffi.cast("unsigned short *", c)[1] += 500 assert list(a) == [10000, 20500, 30000] + def test_memmove(self): + ffi = FFI() + p = ffi.new("short[]", [-1234, -2345, -3456, -4567, -5678]) + ffi.memmove(p, p + 1, 4) + assert list(p) == [-2345, -3456, -3456, -4567, -5678] + p[2] = 999 + ffi.memmove(p + 2, p, 6) + assert list(p) == [-2345, -3456, -2345, -3456, 999] + ffi.memmove(p + 4, ffi.new("char[]", b"\x71\x72"), 2) + if sys.byteorder == 'little': + assert list(p) == [-2345, -3456, -2345, -3456, 0x7271] + else: + assert list(p) == [-2345, -3456, -2345, -3456, 0x7172] + + def test_memmove_buffer(self): + import array + ffi = FFI() + a = array.array('H', [10000, 20000, 30000]) + p = ffi.new("short[]", 5) + ffi.memmove(p, a, 6) + assert list(p) == [10000, 20000, 30000, 0, 0] + ffi.memmove(p + 1, a, 6) + assert list(p) == [10000, 10000, 20000, 30000, 0] + b = array.array('h', [-1000, -2000, -3000]) + ffi.memmove(b, a, 4) + assert b.tolist() == [10000, 20000, -3000] + assert a.tolist() == [10000, 20000, 30000] + p[0] = 999 + p[1] = 998 + p[2] = 997 + p[3] = 996 + p[4] = 995 + ffi.memmove(b, p, 2) + assert b.tolist() == [999, 20000, -3000] + ffi.memmove(b, p + 2, 4) + assert b.tolist() == [997, 996, -3000] + p[2] = -p[2] + p[3] = -p[3] + ffi.memmove(b, p + 2, 6) + assert b.tolist() == [-997, -996, 995] + + def test_memmove_readonly_readwrite(self): + ffi = FFI() + p = ffi.new("signed char[]", 5) + ffi.memmove(p, b"abcde", 3) + assert list(p) == [ord("a"), ord("b"), ord("c"), 0, 0] + ffi.memmove(p, bytearray(b"ABCDE"), 2) + assert list(p) == [ord("A"), ord("B"), ord("c"), 0, 0] + py.test.raises((TypeError, BufferError), ffi.memmove, b"abcde", p, 3) + ba = bytearray(b"xxxxx") + ffi.memmove(dest=ba, src=p, n=3) + assert ba == bytearray(b"ABcxx") + def test_all_primitives(self): ffi = FFI() for name in [ diff --git a/testing/cffi1/test_ffi_obj.py b/testing/cffi1/test_ffi_obj.py --- a/testing/cffi1/test_ffi_obj.py +++ b/testing/cffi1/test_ffi_obj.py @@ -236,6 +236,59 @@ ffi.cast("unsigned short *", c)[1] += 500 assert list(a) == [10000, 20500, 30000] +def test_memmove(): + ffi = _cffi1_backend.FFI() + p = ffi.new("short[]", [-1234, -2345, -3456, -4567, -5678]) + ffi.memmove(p, p + 1, 4) + assert list(p) == [-2345, -3456, -3456, -4567, -5678] + p[2] = 999 + ffi.memmove(p + 2, p, 6) + assert list(p) == [-2345, -3456, -2345, -3456, 999] + ffi.memmove(p + 4, ffi.new("char[]", b"\x71\x72"), 2) + if sys.byteorder == 'little': + assert list(p) == [-2345, -3456, -2345, -3456, 0x7271] + else: + assert list(p) == [-2345, -3456, -2345, -3456, 0x7172] + +def test_memmove_buffer(): + import array + ffi = _cffi1_backend.FFI() + a = array.array('H', [10000, 20000, 30000]) + p = ffi.new("short[]", 5) + ffi.memmove(p, a, 6) + assert list(p) == [10000, 20000, 30000, 0, 0] + ffi.memmove(p + 1, a, 6) + assert list(p) == [10000, 10000, 20000, 30000, 0] + b = array.array('h', [-1000, -2000, -3000]) + ffi.memmove(b, a, 4) + assert b.tolist() == [10000, 20000, -3000] + assert a.tolist() == [10000, 20000, 30000] + p[0] = 999 + p[1] = 998 + p[2] = 997 + p[3] = 996 + p[4] = 995 + ffi.memmove(b, p, 2) + assert b.tolist() == [999, 20000, -3000] + ffi.memmove(b, p + 2, 4) + assert b.tolist() == [997, 996, -3000] + p[2] = -p[2] + p[3] = -p[3] + ffi.memmove(b, p + 2, 6) + assert b.tolist() == [-997, -996, 995] + +def test_memmove_readonly_readwrite(): + ffi = _cffi1_backend.FFI() + p = ffi.new("signed char[]", 5) + ffi.memmove(p, b"abcde", 3) + assert list(p) == [ord("a"), ord("b"), ord("c"), 0, 0] + ffi.memmove(p, bytearray(b"ABCDE"), 2) + assert list(p) == [ord("A"), ord("B"), ord("c"), 0, 0] + py.test.raises((TypeError, BufferError), ffi.memmove, b"abcde", p, 3) + ba = bytearray(b"xxxxx") + ffi.memmove(dest=ba, src=p, n=3) + assert ba == bytearray(b"ABcxx") + def test_ffi_types(): CData = _cffi1_backend.FFI.CData CType = _cffi1_backend.FFI.CType From noreply at buildbot.pypy.org Thu Oct 8 23:02:24 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Oct 2015 23:02:24 +0200 (CEST) Subject: [pypy-commit] cffi default: Comment out the prints (not py3) Message-ID: <20151008210224.4DD2C1C21B0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2331:7b8014e87682 Date: 2015-10-08 22:55 +0200 http://bitbucket.org/cffi/cffi/changeset/7b8014e87682/ Log: Comment out the prints (not py3) diff --git a/testing/cffi0/test_verify.py b/testing/cffi0/test_verify.py --- a/testing/cffi0/test_verify.py +++ b/testing/cffi0/test_verify.py @@ -2275,16 +2275,16 @@ @ffi.callback("int __stdcall(int)") def cb2(x): return x * 3 - print 'cb1 =', cb1 + #print 'cb1 =', cb1 res = lib.call1(cb1) assert res == 500*999*2 - print 'cb2 =', cb2 - print ffi.typeof(lib.call2) - print 'call2 =', lib.call2 + #print 'cb2 =', cb2 + #print ffi.typeof(lib.call2) + #print 'call2 =', lib.call2 res = lib.call2(cb2) - print '...' + #print '...' assert res == -500*999*3 - print 'done' + #print 'done' if sys.platform == 'win32': assert '__stdcall' in str(ffi.typeof(cb2)) assert '__stdcall' not in str(ffi.typeof(cb1)) diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -1364,15 +1364,15 @@ return result; } """) - print '<<< cb1 =', ffi.addressof(lib, 'cb1') + #print '<<< cb1 =', ffi.addressof(lib, 'cb1') ptr_call1 = ffi.addressof(lib, 'call1') assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 assert ptr_call1(ffi.addressof(lib, 'cb1')) == 500*999*2 - print '<<< cb2 =', ffi.addressof(lib, 'cb2') + #print '<<< cb2 =', ffi.addressof(lib, 'cb2') ptr_call2 = ffi.addressof(lib, 'call2') assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3 - print '<<< done' + #print '<<< done' def test_win32_calling_convention_2(): # any mistake in the declaration of plain function (including the From noreply at buildbot.pypy.org Thu Oct 8 23:02:26 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 8 Oct 2015 23:02:26 +0200 (CEST) Subject: [pypy-commit] cffi default: write a "document me" Message-ID: <20151008210226.42E3A1C21B1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2332:bc4c1ecefcab Date: 2015-10-08 22:58 +0200 http://bitbucket.org/cffi/cffi/changeset/bc4c1ecefcab/ Log: write a "document me" diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -23,6 +23,8 @@ pointers; regular C functions don't need to have their `calling convention`_ declared. +* ffi.memmove XXX + .. _`calling convention`: using.html#windows-calling-conventions From noreply at buildbot.pypy.org Thu Oct 8 23:28:14 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 8 Oct 2015 23:28:14 +0200 (CEST) Subject: [pypy-commit] pypy remove-getfield-pure: fix remaining tests Message-ID: <20151008212814.18EAC1C1319@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: remove-getfield-pure Changeset: r80064:dcf869daced9 Date: 2015-10-08 23:28 +0200 http://bitbucket.org/pypy/pypy/changeset/dcf869daced9/ Log: fix remaining tests diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -2317,7 +2317,7 @@ setfield_gc(p1, i1, descr=valuedescr) # # some operations on which the above setfield_gc cannot have effect - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) setarrayitem_gc(p3, 0, i5, descr=arraydescr) @@ -2330,7 +2330,7 @@ preamble = """ [p1, i1, i2, p3] # - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) # @@ -2338,11 +2338,12 @@ setfield_gc(p1, i4, descr=nextdescr) setarrayitem_gc(p3, 0, i5, descr=arraydescr) escape_n() - jump(p1, i1, i2, p3, i3) - """ - expected = """ - [p1, i1, i2, p3, i3] + jump(p1, i1, i2, p3) + """ + expected = """ + [p1, i1, i2, p3] # + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) # @@ -2350,8 +2351,7 @@ setfield_gc(p1, i4, descr=nextdescr) setarrayitem_gc(p3, 0, i5, descr=arraydescr) escape_n() - ifoo = arraylen_gc(p3, descr=arraydescr) # killed by the backend - jump(p1, i1, i2, p3, i3) + jump(p1, i1, i2, p3) """ self.optimize_loop(ops, expected, preamble) @@ -8780,13 +8780,13 @@ def test_virtual_back_and_forth(self): ops = """ [p0] - p1 = getfield_gc_pure_r(p0, descr=bdescr) + p1 = getfield_gc_pure_r(p0, descr=valuedescr3) ptemp = new_with_vtable(descr=nodesize) setfield_gc(ptemp, p1, descr=nextdescr) p2 = getfield_gc_r(ptemp, descr=nextdescr) - ix = getarrayitem_gc_pure_i(p2, 0, descr=arraydescr) + ix = getarrayitem_gc_pure_i(p2, 0, descr=arrayimmutdescr) pfoo = getfield_gc_r(ptemp, descr=nextdescr) - guard_value(pfoo, ConstPtr(myarray)) [] + guard_value(pfoo, ConstPtr(immutarray)) [] ifoo = int_add(ix, 13) escape_n(ix) jump(p0) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -217,6 +217,7 @@ floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) arrayimmutdescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed, hints={"immutable": True})) + immutarray = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(arrayimmutdescr.A, 13, zero=True)) gcarrayimmutdescr = cpu.arraydescrof(lltype.GcArray(llmemory.GCREF, hints={"immutable": True})) floatarrayimmutdescr = cpu.arraydescrof(lltype.GcArray(lltype.Float, hints={"immutable": True})) From noreply at buildbot.pypy.org Fri Oct 9 05:31:17 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 9 Oct 2015 05:31:17 +0200 (CEST) Subject: [pypy-commit] pypy type_system-cleanup: Kill LowLevelTypeSystem.check_null Message-ID: <20151009033117.2DC341C13BE@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: type_system-cleanup Changeset: r80068:2349f1dae6a1 Date: 2015-10-08 23:40 +0100 http://bitbucket.org/pypy/pypy/changeset/2349f1dae6a1/ Log: Kill LowLevelTypeSystem.check_null diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -239,7 +239,8 @@ if hop.s_result.is_constant(): return hop.inputconst(Bool, hop.s_result.const) else: - return hop.rtyper.type_system.check_null(self, hop) + vlist = hop.inputargs(self) + return hop.genop('ptr_nonzero', vlist, resulttype=Bool) class IteratorRepr(Repr): diff --git a/rpython/rtyper/typesystem.py b/rpython/rtyper/typesystem.py --- a/rpython/rtyper/typesystem.py +++ b/rpython/rtyper/typesystem.py @@ -8,11 +8,6 @@ class LowLevelTypeSystem(object): name = "lltypesystem" - def check_null(self, repr, hop): - # None is a nullptr, which is false; everything else is true. - vlist = hop.inputargs(repr) - return hop.genop('ptr_nonzero', vlist, resulttype=lltype.Bool) - def generic_is(self, robj1, robj2, hop): roriginal1 = robj1 roriginal2 = robj2 From noreply at buildbot.pypy.org Fri Oct 9 05:31:10 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 9 Oct 2015 05:31:10 +0200 (CEST) Subject: [pypy-commit] pypy type_system-cleanup: Kill LowLevelTypeSystem.deref Message-ID: <20151009033110.D912C1C1192@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: type_system-cleanup Changeset: r80065:df52cada2be9 Date: 2015-10-08 20:35 +0100 http://bitbucket.org/pypy/pypy/changeset/df52cada2be9/ Log: Kill LowLevelTypeSystem.deref diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -1114,10 +1114,8 @@ if getattr(graph, 'func', None) is f] init_graph = t._graphof(Frame.__init__.im_func) - deref = t.rtyper.type_system.deref - def direct_calls(graph): - return [deref(op.args[0].value)._callable.func_name + return [op.args[0].value._obj._callable.func_name for block, op in graph.iterblockops() if op.opname == 'direct_call'] diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -466,7 +466,7 @@ raise LLException(etype, evalue, *extraargs) def invoke_callable_with_pyexceptions(self, fptr, *args): - obj = self.llinterpreter.typer.type_system.deref(fptr) + obj = fptr._obj try: return obj._callable(*args) except LLException, e: @@ -644,7 +644,7 @@ array[index] = item def perform_call(self, f, ARGS, args): - fobj = self.llinterpreter.typer.type_system.deref(f) + fobj = f._obj has_callable = getattr(fobj, '_callable', None) is not None if hasattr(fobj, 'graph'): graph = fobj.graph @@ -669,7 +669,7 @@ graphs = args[-1] args = args[:-1] if graphs is not None: - obj = self.llinterpreter.typer.type_system.deref(f) + obj = f._obj if hasattr(obj, 'graph'): assert obj.graph in graphs else: diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -874,7 +874,7 @@ # build the 'direct_call' operation f = self.rtyper.getcallable(graph) c = inputconst(typeOf(f), f) - fobj = self.rtyper.type_system.deref(f) + fobj = f._obj return self.genop('direct_call', [c]+newargs_v, resulttype = typeOf(fobj).RESULT) diff --git a/rpython/rtyper/test/test_rvirtualizable.py b/rpython/rtyper/test/test_rvirtualizable.py --- a/rpython/rtyper/test/test_rvirtualizable.py +++ b/rpython/rtyper/test/test_rvirtualizable.py @@ -337,7 +337,6 @@ g(a) t, typer, graph = self.gengraph(f, []) - deref = typer.type_system.deref desc = typer.annotator.bookkeeper.getdesc(g) g_graphs = desc._cache.items() @@ -354,7 +353,7 @@ def get_direct_call_graph(graph): for block, op in graph.iterblockops(): if op.opname == 'direct_call': - return deref(op.args[0].value).graph + return op.args[0].value._obj.graph return None assert get_direct_call_graph(f_graph) is g_graph_directly diff --git a/rpython/rtyper/typesystem.py b/rpython/rtyper/typesystem.py --- a/rpython/rtyper/typesystem.py +++ b/rpython/rtyper/typesystem.py @@ -8,10 +8,6 @@ class LowLevelTypeSystem(object): name = "lltypesystem" - def deref(self, obj): - assert isinstance(lltype.typeOf(obj), lltype.Ptr) - return obj._obj - def check_null(self, repr, hop): # None is a nullptr, which is false; everything else is true. vlist = hop.inputargs(repr) @@ -71,8 +67,7 @@ # _callable is normally graph.func, but can be overridden: # see fakeimpl in extfunc.py _callable = fnobjattrs.pop('_callable', graph.func) - return lltype.functionptr(FT, name, graph = graph, - _callable = _callable, **fnobjattrs) + return lltype.functionptr(FT, name, graph=graph, + _callable=_callable, **fnobjattrs) else: - return lltype.functionptr(FT, name, graph = graph) - + return lltype.functionptr(FT, name, graph=graph) From noreply at buildbot.pypy.org Fri Oct 9 05:31:12 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 9 Oct 2015 05:31:12 +0200 (CEST) Subject: [pypy-commit] pypy type_system-cleanup: Kill LowLevelTypeSystem.null_callable Message-ID: <20151009033112.F15CD1C1319@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: type_system-cleanup Changeset: r80066:f6ba7904d62c Date: 2015-10-08 20:46 +0100 http://bitbucket.org/pypy/pypy/changeset/f6ba7904d62c/ Log: Kill LowLevelTypeSystem.null_callable diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -266,7 +266,7 @@ else: # missing entry -- need a 'null' of the type that matches # this row - llfn = self.rtyper.type_system.null_callable(row.fntype) + llfn = nullptr(row.fntype.TO) llfns[row.attrname] = llfn if len(self.uniquerows) == 1: if found_anything: @@ -291,7 +291,7 @@ elif isinstance(value, staticmethod): value = value.__get__(42) # hackish, get the function wrapped by staticmethod if value is None: - null = self.rtyper.type_system.null_callable(self.lowleveltype) + null = nullptr(self.lowleveltype.TO) return null funcdesc = self.rtyper.annotator.bookkeeper.getdesc(value) return self.convert_desc(funcdesc) @@ -914,7 +914,7 @@ return None else: T = self.lowleveltype - return self.rtyper.type_system.null_callable(T) + return nullptr(T.TO) bk = self.rtyper.annotator.bookkeeper classdesc = bk.getdesc(cls) return self.convert_desc(classdesc) diff --git a/rpython/rtyper/typesystem.py b/rpython/rtyper/typesystem.py --- a/rpython/rtyper/typesystem.py +++ b/rpython/rtyper/typesystem.py @@ -13,9 +13,6 @@ vlist = hop.inputargs(repr) return hop.genop('ptr_nonzero', vlist, resulttype=lltype.Bool) - def null_callable(self, T): - return lltype.nullptr(T.TO) - def getexternalcallable(self, ll_args, ll_result, name, **kwds): FT = lltype.FuncType(ll_args, ll_result) return lltype.functionptr(FT, name, **kwds) From noreply at buildbot.pypy.org Fri Oct 9 05:31:19 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 9 Oct 2015 05:31:19 +0200 (CEST) Subject: [pypy-commit] pypy type_system-cleanup: Kill LowLevelTypeSystem.generic_is Message-ID: <20151009033119.47AAE1C1192@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: type_system-cleanup Changeset: r80069:37e366591ab1 Date: 2015-10-09 00:07 +0100 http://bitbucket.org/pypy/pypy/changeset/37e366591ab1/ Log: Kill LowLevelTypeSystem.generic_is diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -2,7 +2,7 @@ from rpython.flowspace.model import Constant from rpython.rtyper.error import TyperError, MissingRTypeOperation from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.lltypesystem.lltype import Void, Bool, LowLevelType +from rpython.rtyper.lltypesystem.lltype import Void, Bool, LowLevelType, Ptr from rpython.tool.pairtype import pairtype, extendabletype, pair @@ -283,7 +283,23 @@ def rtype_is_((robj1, robj2), hop): if hop.s_result.is_constant(): return inputconst(Bool, hop.s_result.const) - return hop.rtyper.type_system.generic_is(robj1, robj2, hop) + roriginal1 = robj1 + roriginal2 = robj2 + if robj1.lowleveltype is Void: + robj1 = robj2 + elif robj2.lowleveltype is Void: + robj2 = robj1 + if (not isinstance(robj1.lowleveltype, Ptr) or + not isinstance(robj2.lowleveltype, Ptr)): + raise TyperError('is of instances of the non-pointers: %r, %r' % ( + roriginal1, roriginal2)) + if robj1.lowleveltype != robj2.lowleveltype: + raise TyperError('is of instances of different pointer types: %r, %r' % ( + roriginal1, roriginal2)) + + v_list = hop.inputargs(robj1, robj2) + return hop.genop('ptr_eq', v_list, resulttype=Bool) + # default implementation for checked getitems diff --git a/rpython/rtyper/typesystem.py b/rpython/rtyper/typesystem.py --- a/rpython/rtyper/typesystem.py +++ b/rpython/rtyper/typesystem.py @@ -8,25 +8,6 @@ class LowLevelTypeSystem(object): name = "lltypesystem" - def generic_is(self, robj1, robj2, hop): - roriginal1 = robj1 - roriginal2 = robj2 - if robj1.lowleveltype is lltype.Void: - robj1 = robj2 - elif robj2.lowleveltype is lltype.Void: - robj2 = robj1 - if (not isinstance(robj1.lowleveltype, lltype.Ptr) or - not isinstance(robj2.lowleveltype, lltype.Ptr)): - raise TyperError('is of instances of the non-pointers: %r, %r' % ( - roriginal1, roriginal2)) - if robj1.lowleveltype != robj2.lowleveltype: - raise TyperError('is of instances of different pointer types: %r, %r' % ( - roriginal1, roriginal2)) - - v_list = hop.inputargs(robj1, robj2) - return hop.genop('ptr_eq', v_list, resulttype=lltype.Bool) - - def _getconcretetype(v): return v.concretetype From noreply at buildbot.pypy.org Fri Oct 9 05:31:15 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 9 Oct 2015 05:31:15 +0200 (CEST) Subject: [pypy-commit] pypy type_system-cleanup: Kill LowLevelTypeSystem.getexternalcallable Message-ID: <20151009033115.186141C131C@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: type_system-cleanup Changeset: r80067:f025a9b30012 Date: 2015-10-08 23:06 +0100 http://bitbucket.org/pypy/pypy/changeset/f025a9b30012/ Log: Kill LowLevelTypeSystem.getexternalcallable diff --git a/rpython/rtyper/extfunc.py b/rpython/rtyper/extfunc.py --- a/rpython/rtyper/extfunc.py +++ b/rpython/rtyper/extfunc.py @@ -1,6 +1,6 @@ from rpython.rtyper import extregistry from rpython.rtyper.extregistry import ExtRegistryEntry -from rpython.rtyper.lltypesystem.lltype import typeOf +from rpython.rtyper.lltypesystem.lltype import typeOf, FuncType, functionptr from rpython.annotator import model as annmodel from rpython.annotator.signature import annotation @@ -200,13 +200,10 @@ obj = rtyper.getannmixlevel().delayedfunction( impl, signature_args, hop.s_result) else: - #if not self.safe_not_sandboxed: - # print '>>>>>>>>>>>>>-----------------------------------' - # print name, self.name - # print '<<<<<<<<<<<<<-----------------------------------' - obj = rtyper.type_system.getexternalcallable(args_ll, ll_result, - name, _external_name=self.name, _callable=fakeimpl, - _safe_not_sandboxed=self.safe_not_sandboxed) + FT = FuncType(args_ll, ll_result) + obj = functionptr(FT, name, _external_name=self.name, + _callable=fakeimpl, + _safe_not_sandboxed=self.safe_not_sandboxed) vlist = [hop.inputconst(typeOf(obj), obj)] + hop.inputargs(*args_r) hop.exception_is_here() return hop.genop('direct_call', vlist, r_result) diff --git a/rpython/rtyper/typesystem.py b/rpython/rtyper/typesystem.py --- a/rpython/rtyper/typesystem.py +++ b/rpython/rtyper/typesystem.py @@ -13,10 +13,6 @@ vlist = hop.inputargs(repr) return hop.genop('ptr_nonzero', vlist, resulttype=lltype.Bool) - def getexternalcallable(self, ll_args, ll_result, name, **kwds): - FT = lltype.FuncType(ll_args, ll_result) - return lltype.functionptr(FT, name, **kwds) - def generic_is(self, robj1, robj2, hop): roriginal1 = robj1 roriginal2 = robj2 From noreply at buildbot.pypy.org Fri Oct 9 05:31:21 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 9 Oct 2015 05:31:21 +0200 (CEST) Subject: [pypy-commit] pypy type_system-cleanup: Kill LowLevelTypeSystem Message-ID: <20151009033121.5F4EE1C1192@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: type_system-cleanup Changeset: r80070:de1316b4c1c4 Date: 2015-10-09 00:51 +0100 http://bitbucket.org/pypy/pypy/changeset/de1316b4c1c4/ Log: Kill LowLevelTypeSystem diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -178,7 +178,6 @@ """ fnptr = getfunctionptr(graph) FUNC = lltype.typeOf(fnptr).TO - assert self.rtyper.type_system.name == "lltypesystem" fnaddr = llmemory.cast_ptr_to_adr(fnptr) NON_VOID_ARGS = [ARG for ARG in FUNC.ARGS if ARG is not lltype.Void] calldescr = self.cpu.calldescrof(FUNC, tuple(NON_VOID_ARGS), diff --git a/rpython/jit/codewriter/test/test_call.py b/rpython/jit/codewriter/test/test_call.py --- a/rpython/jit/codewriter/test/test_call.py +++ b/rpython/jit/codewriter/test/test_call.py @@ -159,8 +159,6 @@ class FakeRTyper: class annotator: translator = None - class type_system: - name = 'lltypesystem' def getfunctionptr(graph): F = lltype.FuncType([], lltype.Signed) diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -30,7 +30,6 @@ return Constant(x, lltype.typeOf(x)) class FakeRTyper: - class type_system: name = 'lltypesystem' instance_reprs = {} class FakeCPU: diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -25,7 +25,7 @@ Ptr, ContainerType, FuncType, functionptr, typeOf, RuntimeTypeInfo, attachRuntimeTypeInfo, Primitive) from rpython.rtyper.rmodel import Repr, inputconst, BrokenReprTyperError -from rpython.rtyper.typesystem import LowLevelTypeSystem, getfunctionptr +from rpython.rtyper.typesystem import getfunctionptr from rpython.rtyper import rclass from rpython.rtyper.rclass import RootClassRepr from rpython.tool.pairtype import pair @@ -38,7 +38,6 @@ def __init__(self, annotator): self.annotator = annotator self.lowlevel_ann_policy = LowLevelAnnotatorPolicy(self) - self.type_system = LowLevelTypeSystem() self.reprs = {} self._reprs_must_call_setup = [] self._seen_reprs_must_call_setup = {} diff --git a/rpython/rtyper/typesystem.py b/rpython/rtyper/typesystem.py --- a/rpython/rtyper/typesystem.py +++ b/rpython/rtyper/typesystem.py @@ -2,20 +2,13 @@ """typesystem.py -- Typesystem-specific operations for RTyper.""" from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.error import TyperError -class LowLevelTypeSystem(object): - name = "lltypesystem" - def _getconcretetype(v): return v.concretetype - -def getfunctionptr(graph, getconcretetype=None): +def getfunctionptr(graph, getconcretetype=_getconcretetype): """Return callable given a Python function.""" - if getconcretetype is None: - getconcretetype = _getconcretetype llinputs = [getconcretetype(v) for v in graph.getargs()] lloutput = getconcretetype(graph.getreturnvar()) From noreply at buildbot.pypy.org Fri Oct 9 05:31:23 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 9 Oct 2015 05:31:23 +0200 (CEST) Subject: [pypy-commit] pypy type_system-cleanup: Remove rpython/rtyper/typesystem.py and move getfunctionptr() to lltype Message-ID: <20151009033123.7C5C01C1192@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: type_system-cleanup Changeset: r80071:68c2ec00cda8 Date: 2015-10-09 04:24 +0100 http://bitbucket.org/pypy/pypy/changeset/68c2ec00cda8/ Log: Remove rpython/rtyper/typesystem.py and move getfunctionptr() to lltype diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -9,7 +9,7 @@ QuasiImmutAnalyzer, RandomEffectsAnalyzer, effectinfo_from_writeanalyze, EffectInfo, CallInfoCollection) from rpython.rtyper.lltypesystem import lltype, llmemory -from rpython.rtyper.typesystem import getfunctionptr +from rpython.rtyper.lltypesystem.lltype import getfunctionptr from rpython.rlib import rposix from rpython.translator.backendopt.canraise import RaiseAnalyzer from rpython.translator.backendopt.writeanalyze import ReadWriteAnalyzer diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -2265,6 +2265,35 @@ o = _func(TYPE, _name=name, **attrs) return _ptr(Ptr(TYPE), o) +def _getconcretetype(v): + return v.concretetype + +def getfunctionptr(graph, getconcretetype=_getconcretetype): + """Return callable given a Python function.""" + llinputs = [getconcretetype(v) for v in graph.getargs()] + lloutput = getconcretetype(graph.getreturnvar()) + + FT = FuncType(llinputs, lloutput) + name = graph.name + if hasattr(graph, 'func') and callable(graph.func): + # the Python function object can have _llfnobjattrs_, specifying + # attributes that are forced upon the functionptr(). The idea + # for not passing these extra attributes as arguments to + # getcallable() itself is that multiple calls to getcallable() + # for the same graph should return equal functionptr() objects. + if hasattr(graph.func, '_llfnobjattrs_'): + fnobjattrs = graph.func._llfnobjattrs_.copy() + # can specify a '_name', but use graph.name by default + name = fnobjattrs.pop('_name', name) + else: + fnobjattrs = {} + # _callable is normally graph.func, but can be overridden: + # see fakeimpl in extfunc.py + _callable = fnobjattrs.pop('_callable', graph.func) + return functionptr(FT, name, graph=graph, _callable=_callable, + **fnobjattrs) + else: + return functionptr(FT, name, graph=graph) def nullptr(T): return Ptr(T)._defl() @@ -2444,3 +2473,5 @@ for item in v.items: for i in dissect_ll_instance(item, t.OF, memo): yield i + + diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -13,11 +13,11 @@ from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.lltypesystem import llmemory -from rpython.rtyper.lltypesystem.lltype import (typeOf, Void, ForwardReference, - Struct, Bool, Char, Ptr, malloc, nullptr, Array, Signed, cast_pointer) +from rpython.rtyper.lltypesystem.lltype import ( + typeOf, Void, ForwardReference, Struct, Bool, Char, Ptr, malloc, nullptr, + Array, Signed, cast_pointer, getfunctionptr) from rpython.rtyper.rmodel import (Repr, inputconst, CanBeNull, mangle, warning, impossible_repr) -from rpython.rtyper.typesystem import getfunctionptr from rpython.tool.pairtype import pair, pairtype from rpython.translator.unsimplify import varoftype diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -23,9 +23,8 @@ from rpython.rtyper.exceptiondata import ExceptionData from rpython.rtyper.lltypesystem.lltype import (Signed, Void, LowLevelType, Ptr, ContainerType, FuncType, functionptr, typeOf, RuntimeTypeInfo, - attachRuntimeTypeInfo, Primitive) + attachRuntimeTypeInfo, Primitive, getfunctionptr) from rpython.rtyper.rmodel import Repr, inputconst, BrokenReprTyperError -from rpython.rtyper.typesystem import getfunctionptr from rpython.rtyper import rclass from rpython.rtyper.rclass import RootClassRepr from rpython.tool.pairtype import pair diff --git a/rpython/rtyper/typesystem.py b/rpython/rtyper/typesystem.py deleted file mode 100644 --- a/rpython/rtyper/typesystem.py +++ /dev/null @@ -1,35 +0,0 @@ - -"""typesystem.py -- Typesystem-specific operations for RTyper.""" - -from rpython.rtyper.lltypesystem import lltype - - -def _getconcretetype(v): - return v.concretetype - -def getfunctionptr(graph, getconcretetype=_getconcretetype): - """Return callable given a Python function.""" - llinputs = [getconcretetype(v) for v in graph.getargs()] - lloutput = getconcretetype(graph.getreturnvar()) - - FT = lltype.FuncType(llinputs, lloutput) - name = graph.name - if hasattr(graph, 'func') and callable(graph.func): - # the Python function object can have _llfnobjattrs_, specifying - # attributes that are forced upon the functionptr(). The idea - # for not passing these extra attributes as arguments to - # getcallable() itself is that multiple calls to getcallable() - # for the same graph should return equal functionptr() objects. - if hasattr(graph.func, '_llfnobjattrs_'): - fnobjattrs = graph.func._llfnobjattrs_.copy() - # can specify a '_name', but use graph.name by default - name = fnobjattrs.pop('_name', name) - else: - fnobjattrs = {} - # _callable is normally graph.func, but can be overridden: - # see fakeimpl in extfunc.py - _callable = fnobjattrs.pop('_callable', graph.func) - return lltype.functionptr(FT, name, graph=graph, - _callable=_callable, **fnobjattrs) - else: - return lltype.functionptr(FT, name, graph=graph) diff --git a/rpython/translator/backendopt/mallocv.py b/rpython/translator/backendopt/mallocv.py --- a/rpython/translator/backendopt/mallocv.py +++ b/rpython/translator/backendopt/mallocv.py @@ -4,7 +4,7 @@ from rpython.translator.backendopt.support import log from rpython.translator.simplify import join_blocks from rpython.translator.unsimplify import varoftype -from rpython.rtyper.typesystem import getfunctionptr +from rpython.rtyper.lltypesystem.lltype import getfunctionptr from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.lloperation import llop diff --git a/rpython/translator/c/dlltool.py b/rpython/translator/c/dlltool.py --- a/rpython/translator/c/dlltool.py +++ b/rpython/translator/c/dlltool.py @@ -1,6 +1,6 @@ from rpython.translator.c.genc import CBuilder -from rpython.rtyper.typesystem import getfunctionptr +from rpython.rtyper.lltypesystem.lltype import getfunctionptr from rpython.translator.tool.cbuild import ExternalCompilationInfo diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -3,7 +3,7 @@ import sys, os from rpython.rlib import exports from rpython.rlib.entrypoint import entrypoint -from rpython.rtyper.typesystem import getfunctionptr +from rpython.rtyper.lltypesystem.lltype import getfunctionptr from rpython.rtyper.lltypesystem import lltype, rffi from rpython.tool import runsubprocess from rpython.tool.nullpath import NullPyPathLocal @@ -468,7 +468,7 @@ '$(CC) -o $*.o -c $*.vmprof.lbl.s', 'mv $*.gctmp $*.gcmap', 'rm $*.vmprof.lbl.s']) - + # the rule to compute gcmaptable.s mk.rule('gcmaptable.s', '$(GCMAPFILES)', [ @@ -759,7 +759,7 @@ database, database.translator.rtyper) for line in preimplementationlines: print >> f, line - f.write('#endif /* _PY_PREIMPL_H */\n') + f.write('#endif /* _PY_PREIMPL_H */\n') def gen_startupcode(f, database): # generate the start-up code and put it into a function diff --git a/rpython/translator/c/test/test_database.py b/rpython/translator/c/test/test_database.py --- a/rpython/translator/c/test/test_database.py +++ b/rpython/translator/c/test/test_database.py @@ -4,7 +4,7 @@ from rpython.translator.c.database import LowLevelDatabase from rpython.flowspace.model import Constant, Variable, SpaceOperation from rpython.flowspace.model import Block, Link, FunctionGraph -from rpython.rtyper.typesystem import getfunctionptr +from rpython.rtyper.lltypesystem.lltype import getfunctionptr from rpython.rtyper.lltypesystem.rffi import VOIDP, INT_real, INT, CArrayPtr @@ -43,7 +43,7 @@ def test_inlined_struct(): db = LowLevelDatabase() - pfx = db.namespace.global_prefix + 'g_' + pfx = db.namespace.global_prefix + 'g_' S = GcStruct('test', ('x', Struct('subtest', ('y', Signed)))) s = malloc(S) s.x.y = 42 @@ -56,7 +56,7 @@ def test_complete(): db = LowLevelDatabase() - pfx = db.namespace.global_prefix + 'g_' + pfx = db.namespace.global_prefix + 'g_' T = GcStruct('subtest', ('y', Signed)) S = GcStruct('test', ('x', Ptr(T))) s = malloc(S) @@ -136,7 +136,7 @@ block.closeblock(Link([result], graph.returnblock)) graph.getreturnvar().concretetype = Signed # -------------------- end -------------------- - + F = FuncType([Signed], Signed) f = functionptr(F, "f", graph=graph) db = LowLevelDatabase() @@ -206,7 +206,7 @@ s.ptr2 = ptr2 return s.ptr1.x * s.ptr2.x t, graph = makegraph(ll_f, [int]) - + db = LowLevelDatabase(t) db.get(getfunctionptr(graph)) db.complete() From noreply at buildbot.pypy.org Fri Oct 9 05:31:25 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 9 Oct 2015 05:31:25 +0200 (CEST) Subject: [pypy-commit] pypy type_system-cleanup: remove references to type_system from pypy Message-ID: <20151009033125.9A79C1C1192@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: type_system-cleanup Changeset: r80072:db34e9582e69 Date: 2015-10-09 04:31 +0100 http://bitbucket.org/pypy/pypy/changeset/db34e9582e69/ Log: remove references to type_system from pypy diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -118,7 +118,7 @@ if buffering < 0: buffering = DEFAULT_BUFFER_SIZE - if space.config.translation.type_system == 'lltype' and 'st_blksize' in STAT_FIELD_TYPES: + if 'st_blksize' in STAT_FIELD_TYPES: fileno = space.c_int_w(space.call_method(w_raw, "fileno")) try: st = os.fstat(fileno) diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -57,8 +57,7 @@ raise OperationError(space.w_ValueError, space.wrap("recursion limit must be positive")) space.sys.recursionlimit = new_limit - if space.config.translation.type_system == 'lltype': - _stack_set_length_fraction(new_limit * 0.001) + _stack_set_length_fraction(new_limit * 0.001) def getrecursionlimit(space): """Return the last value set by setrecursionlimit(). diff --git a/pypy/tool/pypyjit.py b/pypy/tool/pypyjit.py --- a/pypy/tool/pypyjit.py +++ b/pypy/tool/pypyjit.py @@ -93,7 +93,6 @@ try: interp, graph = get_interpreter(entry_point, [], backendopt=False, config=config, - type_system=config.translation.type_system, policy=PyPyAnnotatorPolicy(space)) except Exception, e: print '%s: %s' % (e.__class__, e) From noreply at buildbot.pypy.org Fri Oct 9 09:57:26 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Oct 2015 09:57:26 +0200 (CEST) Subject: [pypy-commit] cffi default: Test and fix: a negative size was implicitly converted to a huge size_t Message-ID: <20151009075726.C9EBE1C133C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2333:2a9a1726dae3 Date: 2015-10-09 09:58 +0200 http://bitbucket.org/cffi/cffi/changeset/2a9a1726dae3/ Log: Test and fix: a negative size was implicitly converted to a huge size_t number diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -5822,6 +5822,10 @@ if (!PyArg_ParseTupleAndKeywords(args, kwds, "OOn", keywords, &dest_obj, &src_obj, &n)) return NULL; + if (n < 0) { + PyErr_SetString(PyExc_ValueError, "negative size"); + return NULL; + } if (_fetch_as_buffer(src_obj, &src_view, 0) < 0) { return NULL; diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3450,7 +3450,6 @@ assert b.tolist() == [-997, -996, 995] def test_memmove_readonly_readwrite(): - ffi = FFI() SignedChar = new_primitive_type("signed char") SignedCharA = new_array_type(new_pointer_type(SignedChar), None) p = newp(SignedCharA, 5) @@ -3463,6 +3462,12 @@ memmove(dest=ba, src=p, n=3) assert ba == bytearray(b"ABcxx") +def test_memmove_sign_check(): + SignedChar = new_primitive_type("signed char") + SignedCharA = new_array_type(new_pointer_type(SignedChar), None) + p = newp(SignedCharA, 5) + py.test.raises(ValueError, memmove, p, p + 1, -1) # not segfault + def test_dereference_null_ptr(): BInt = new_primitive_type("int") BIntPtr = new_pointer_type(BInt) From noreply at buildbot.pypy.org Fri Oct 9 10:18:14 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Oct 2015 10:18:14 +0200 (CEST) Subject: [pypy-commit] cffi default: Test yet another case (for pypy) Message-ID: <20151009081814.809D61C133C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2334:336b4e746d80 Date: 2015-10-09 10:19 +0200 http://bitbucket.org/cffi/cffi/changeset/336b4e746d80/ Log: Test yet another case (for pypy) diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3461,6 +3461,8 @@ ba = bytearray(b"xxxxx") memmove(dest=ba, src=p, n=3) assert ba == bytearray(b"ABcxx") + memmove(ba, b"EFGH", 4) + assert ba == bytearray(b"EFGHx") def test_memmove_sign_check(): SignedChar = new_primitive_type("signed char") From noreply at buildbot.pypy.org Fri Oct 9 10:23:22 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Oct 2015 10:23:22 +0200 (CEST) Subject: [pypy-commit] pypy default: ffi.memmove() Message-ID: <20151009082322.E97AD1C133C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80073:1adcd2cc4ebf Date: 2015-10-09 10:23 +0200 http://bitbucket.org/pypy/pypy/changeset/1adcd2cc4ebf/ Log: ffi.memmove() diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -310,6 +310,22 @@ """ return self._backend.from_buffer(self.BCharA, python_buffer) + def memmove(self, dest, src, n): + """ffi.memmove(dest, src, n) copies n bytes of memory from src to dest. + + Like the C function memmove(), the memory areas may overlap; + apart from that it behaves like the C function memcpy(). + + 'src' can be any cdata ptr or array, or any Python buffer object. + 'dest' can be any cdata ptr or array, or a writable Python buffer + object. The size to copy, 'n', is always measured in bytes. + + Unlike other methods, this one supports all Python buffer including + byte strings and bytearrays---but it still does not support + non-contiguous buffers. + """ + return self._backend.memmove(dest, src, n) + def callback(self, cdecl, python_callable=None, error=None, onerror=None): """Return a callback object or a decorator making such a callback object. 'cdecl' must name a C function pointer type. diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -47,6 +47,7 @@ 'string': 'func.string', 'buffer': 'cbuffer.buffer', + 'memmove': 'func.memmove', 'get_errno': 'cerrno.get_errno', 'set_errno': 'cerrno.set_errno', diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py --- a/pypy/module/_cffi_backend/ffi_obj.py +++ b/pypy/module/_cffi_backend/ffi_obj.py @@ -391,6 +391,25 @@ return cerrno.getwinerror(self.space, code) + @unwrap_spec(n=int) + def descr_memmove(self, w_dest, w_src, n): + """\ +ffi.memmove(dest, src, n) copies n bytes of memory from src to dest. + +Like the C function memmove(), the memory areas may overlap; +apart from that it behaves like the C function memcpy(). + +'src' can be any cdata ptr or array, or any Python buffer object. +'dest' can be any cdata ptr or array, or a writable Python buffer +object. The size to copy, 'n', is always measured in bytes. + +Unlike other methods, this one supports all Python buffer including +byte strings and bytearrays---but it still does not support +non-contiguous buffers.""" + # + return func.memmove(self.space, w_dest, w_src, n) + + @unwrap_spec(w_init=WrappedDefault(None)) def descr_new(self, w_arg, w_init): """\ @@ -623,6 +642,7 @@ gc = interp2app(W_FFIObject.descr_gc), getctype = interp2app(W_FFIObject.descr_getctype), integer_const = interp2app(W_FFIObject.descr_integer_const), + memmove = interp2app(W_FFIObject.descr_memmove), new = interp2app(W_FFIObject.descr_new), new_allocator = interp2app(W_FFIObject.descr_new_allocator), new_handle = interp2app(W_FFIObject.descr_new_handle), diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -1,3 +1,8 @@ +from rpython.rtyper.annlowlevel import llstr +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw +from rpython.rlib.objectmodel import keepalive_until_here + from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from pypy.module._cffi_backend import ctypeobj, cdataobj, allocator @@ -79,6 +84,26 @@ # ____________________________________________________________ +def _fetch_as_read_buffer(space, w_x): + # xxx do we really need to implement the same mess as in CPython 2.7 + # w.r.t. buffers and memoryviews?? + try: + buf = space.readbuf_w(w_x) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + buf = space.buffer_w(w_x, space.BUF_SIMPLE) + return buf + +def _fetch_as_write_buffer(space, w_x): + try: + buf = space.writebuf_w(w_x) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + buf = space.buffer_w(w_x, space.BUF_WRITABLE) + return buf + @unwrap_spec(w_ctype=ctypeobj.W_CType) def from_buffer(space, w_ctype, w_x): from pypy.module._cffi_backend import ctypearray, ctypeprim @@ -88,14 +113,7 @@ raise oefmt(space.w_TypeError, "needs 'char[]', got '%s'", w_ctype.name) # - # xxx do we really need to implement the same mess as in CPython 2.7 - # w.r.t. buffers and memoryviews?? - try: - buf = space.readbuf_w(w_x) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - buf = space.buffer_w(w_x, space.BUF_SIMPLE) + buf = _fetch_as_read_buffer(space, w_x) try: _cdata = buf.get_raw_address() except ValueError: @@ -106,6 +124,68 @@ # return cdataobj.W_CDataFromBuffer(space, _cdata, w_ctype, buf, w_x) +c_memmove = rffi.llexternal('memmove', [rffi.CCHARP, rffi.CCHARP, + rffi.SIZE_T], lltype.Void, + _nowrapper=True) + + at unwrap_spec(n=int) +def memmove(space, w_dest, w_src, n): + if n < 0: + raise oefmt(space.w_ValueError, "negative size") + + # cases... + src_buf = None + src_data = lltype.nullptr(rffi.CCHARP.TO) + if isinstance(w_src, cdataobj.W_CData): + src_data = w_src.unsafe_escaping_ptr() + src_is_ptr = True + else: + src_buf = _fetch_as_read_buffer(space, w_src) + try: + src_data = src_buf.get_raw_address() + src_is_ptr = True + except ValueError: + src_is_ptr = False + + if src_is_ptr: + src_string = None + else: + if n == src_buf.getlength(): + src_string = src_buf.as_str() + else: + src_string = src_buf.getslice(0, n, 1, n) + + dest_buf = None + dest_data = lltype.nullptr(rffi.CCHARP.TO) + if isinstance(w_dest, cdataobj.W_CData): + dest_data = w_dest.unsafe_escaping_ptr() + dest_is_ptr = True + else: + dest_buf = _fetch_as_write_buffer(space, w_dest) + try: + dest_data = dest_buf.get_raw_address() + dest_is_ptr = True + except ValueError: + dest_is_ptr = False + + if dest_is_ptr: + if src_is_ptr: + c_memmove(dest_data, src_data, rffi.cast(rffi.SIZE_T, n)) + else: + copy_string_to_raw(llstr(src_string), dest_data, 0, n) + else: + if src_is_ptr: + for i in range(n): + dest_buf.setitem(i, src_data[i]) + else: + for i in range(n): + dest_buf.setitem(i, src_string[i]) + + keepalive_until_here(src_buf) + keepalive_until_here(dest_buf) + keepalive_until_here(w_src) + keepalive_until_here(w_dest) + # ____________________________________________________________ @unwrap_spec(w_cdata=cdataobj.W_CData) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3393,6 +3393,72 @@ check(4 | 8, "CHB", "GTB") check(4 | 16, "CHB", "ROB") +def test_memmove(): + Short = new_primitive_type("short") + ShortA = new_array_type(new_pointer_type(Short), None) + Char = new_primitive_type("char") + CharA = new_array_type(new_pointer_type(Char), None) + p = newp(ShortA, [-1234, -2345, -3456, -4567, -5678]) + memmove(p, p + 1, 4) + assert list(p) == [-2345, -3456, -3456, -4567, -5678] + p[2] = 999 + memmove(p + 2, p, 6) + assert list(p) == [-2345, -3456, -2345, -3456, 999] + memmove(p + 4, newp(CharA, b"\x71\x72"), 2) + if sys.byteorder == 'little': + assert list(p) == [-2345, -3456, -2345, -3456, 0x7271] + else: + assert list(p) == [-2345, -3456, -2345, -3456, 0x7172] + +def test_memmove_buffer(): + import array + Short = new_primitive_type("short") + ShortA = new_array_type(new_pointer_type(Short), None) + a = array.array('H', [10000, 20000, 30000]) + p = newp(ShortA, 5) + memmove(p, a, 6) + assert list(p) == [10000, 20000, 30000, 0, 0] + memmove(p + 1, a, 6) + assert list(p) == [10000, 10000, 20000, 30000, 0] + b = array.array('h', [-1000, -2000, -3000]) + memmove(b, a, 4) + assert b.tolist() == [10000, 20000, -3000] + assert a.tolist() == [10000, 20000, 30000] + p[0] = 999 + p[1] = 998 + p[2] = 997 + p[3] = 996 + p[4] = 995 + memmove(b, p, 2) + assert b.tolist() == [999, 20000, -3000] + memmove(b, p + 2, 4) + assert b.tolist() == [997, 996, -3000] + p[2] = -p[2] + p[3] = -p[3] + memmove(b, p + 2, 6) + assert b.tolist() == [-997, -996, 995] + +def test_memmove_readonly_readwrite(): + SignedChar = new_primitive_type("signed char") + SignedCharA = new_array_type(new_pointer_type(SignedChar), None) + p = newp(SignedCharA, 5) + memmove(p, b"abcde", 3) + assert list(p) == [ord("a"), ord("b"), ord("c"), 0, 0] + memmove(p, bytearray(b"ABCDE"), 2) + assert list(p) == [ord("A"), ord("B"), ord("c"), 0, 0] + py.test.raises((TypeError, BufferError), memmove, b"abcde", p, 3) + ba = bytearray(b"xxxxx") + memmove(dest=ba, src=p, n=3) + assert ba == bytearray(b"ABcxx") + memmove(ba, b"EFGH", 4) + assert ba == bytearray(b"EFGHx") + +def test_memmove_sign_check(): + SignedChar = new_primitive_type("signed char") + SignedCharA = new_array_type(new_pointer_type(SignedChar), None) + p = newp(SignedCharA, 5) + py.test.raises(ValueError, memmove, p, p + 1, -1) # not segfault + def test_dereference_null_ptr(): BInt = new_primitive_type("int") BIntPtr = new_pointer_type(BInt) diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py --- a/pypy/module/_cffi_backend/test/test_ffi_obj.py +++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py @@ -247,6 +247,63 @@ ffi.cast("unsigned short *", c)[1] += 500 assert list(a) == [10000, 20500, 30000] + def test_memmove(self): + import sys + import _cffi_backend as _cffi1_backend + ffi = _cffi1_backend.FFI() + p = ffi.new("short[]", [-1234, -2345, -3456, -4567, -5678]) + ffi.memmove(p, p + 1, 4) + assert list(p) == [-2345, -3456, -3456, -4567, -5678] + p[2] = 999 + ffi.memmove(p + 2, p, 6) + assert list(p) == [-2345, -3456, -2345, -3456, 999] + ffi.memmove(p + 4, ffi.new("char[]", b"\x71\x72"), 2) + if sys.byteorder == 'little': + assert list(p) == [-2345, -3456, -2345, -3456, 0x7271] + else: + assert list(p) == [-2345, -3456, -2345, -3456, 0x7172] + + def test_memmove_buffer(self): + import _cffi_backend as _cffi1_backend + import array + ffi = _cffi1_backend.FFI() + a = array.array('H', [10000, 20000, 30000]) + p = ffi.new("short[]", 5) + ffi.memmove(p, a, 6) + assert list(p) == [10000, 20000, 30000, 0, 0] + ffi.memmove(p + 1, a, 6) + assert list(p) == [10000, 10000, 20000, 30000, 0] + b = array.array('h', [-1000, -2000, -3000]) + ffi.memmove(b, a, 4) + assert b.tolist() == [10000, 20000, -3000] + assert a.tolist() == [10000, 20000, 30000] + p[0] = 999 + p[1] = 998 + p[2] = 997 + p[3] = 996 + p[4] = 995 + ffi.memmove(b, p, 2) + assert b.tolist() == [999, 20000, -3000] + ffi.memmove(b, p + 2, 4) + assert b.tolist() == [997, 996, -3000] + p[2] = -p[2] + p[3] = -p[3] + ffi.memmove(b, p + 2, 6) + assert b.tolist() == [-997, -996, 995] + + def test_memmove_readonly_readwrite(self): + import _cffi_backend as _cffi1_backend + ffi = _cffi1_backend.FFI() + p = ffi.new("signed char[]", 5) + ffi.memmove(p, b"abcde", 3) + assert list(p) == [ord("a"), ord("b"), ord("c"), 0, 0] + ffi.memmove(p, bytearray(b"ABCDE"), 2) + assert list(p) == [ord("A"), ord("B"), ord("c"), 0, 0] + raises((TypeError, BufferError), ffi.memmove, b"abcde", p, 3) + ba = bytearray(b"xxxxx") + ffi.memmove(dest=ba, src=p, n=3) + assert ba == bytearray(b"ABcxx") + def test_ffi_types(self): import _cffi_backend as _cffi1_backend CData = _cffi1_backend.FFI.CData diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py @@ -314,6 +314,59 @@ ffi.cast("unsigned short *", c)[1] += 500 assert list(a) == [10000, 20500, 30000] + def test_memmove(self): + ffi = FFI() + p = ffi.new("short[]", [-1234, -2345, -3456, -4567, -5678]) + ffi.memmove(p, p + 1, 4) + assert list(p) == [-2345, -3456, -3456, -4567, -5678] + p[2] = 999 + ffi.memmove(p + 2, p, 6) + assert list(p) == [-2345, -3456, -2345, -3456, 999] + ffi.memmove(p + 4, ffi.new("char[]", b"\x71\x72"), 2) + if sys.byteorder == 'little': + assert list(p) == [-2345, -3456, -2345, -3456, 0x7271] + else: + assert list(p) == [-2345, -3456, -2345, -3456, 0x7172] + + def test_memmove_buffer(self): + import array + ffi = FFI() + a = array.array('H', [10000, 20000, 30000]) + p = ffi.new("short[]", 5) + ffi.memmove(p, a, 6) + assert list(p) == [10000, 20000, 30000, 0, 0] + ffi.memmove(p + 1, a, 6) + assert list(p) == [10000, 10000, 20000, 30000, 0] + b = array.array('h', [-1000, -2000, -3000]) + ffi.memmove(b, a, 4) + assert b.tolist() == [10000, 20000, -3000] + assert a.tolist() == [10000, 20000, 30000] + p[0] = 999 + p[1] = 998 + p[2] = 997 + p[3] = 996 + p[4] = 995 + ffi.memmove(b, p, 2) + assert b.tolist() == [999, 20000, -3000] + ffi.memmove(b, p + 2, 4) + assert b.tolist() == [997, 996, -3000] + p[2] = -p[2] + p[3] = -p[3] + ffi.memmove(b, p + 2, 6) + assert b.tolist() == [-997, -996, 995] + + def test_memmove_readonly_readwrite(self): + ffi = FFI() + p = ffi.new("signed char[]", 5) + ffi.memmove(p, b"abcde", 3) + assert list(p) == [ord("a"), ord("b"), ord("c"), 0, 0] + ffi.memmove(p, bytearray(b"ABCDE"), 2) + assert list(p) == [ord("A"), ord("B"), ord("c"), 0, 0] + py.test.raises((TypeError, BufferError), ffi.memmove, b"abcde", p, 3) + ba = bytearray(b"xxxxx") + ffi.memmove(dest=ba, src=p, n=3) + assert ba == bytearray(b"ABcxx") + def test_all_primitives(self): ffi = FFI() for name in [ diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py @@ -2276,16 +2276,16 @@ @ffi.callback("int __stdcall(int)") def cb2(x): return x * 3 - print 'cb1 =', cb1 + #print 'cb1 =', cb1 res = lib.call1(cb1) assert res == 500*999*2 - print 'cb2 =', cb2 - print ffi.typeof(lib.call2) - print 'call2 =', lib.call2 + #print 'cb2 =', cb2 + #print ffi.typeof(lib.call2) + #print 'call2 =', lib.call2 res = lib.call2(cb2) - print '...' + #print '...' assert res == -500*999*3 - print 'done' + #print 'done' if sys.platform == 'win32': assert '__stdcall' in str(ffi.typeof(cb2)) assert '__stdcall' not in str(ffi.typeof(cb1)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py @@ -237,6 +237,59 @@ ffi.cast("unsigned short *", c)[1] += 500 assert list(a) == [10000, 20500, 30000] +def test_memmove(): + ffi = _cffi1_backend.FFI() + p = ffi.new("short[]", [-1234, -2345, -3456, -4567, -5678]) + ffi.memmove(p, p + 1, 4) + assert list(p) == [-2345, -3456, -3456, -4567, -5678] + p[2] = 999 + ffi.memmove(p + 2, p, 6) + assert list(p) == [-2345, -3456, -2345, -3456, 999] + ffi.memmove(p + 4, ffi.new("char[]", b"\x71\x72"), 2) + if sys.byteorder == 'little': + assert list(p) == [-2345, -3456, -2345, -3456, 0x7271] + else: + assert list(p) == [-2345, -3456, -2345, -3456, 0x7172] + +def test_memmove_buffer(): + import array + ffi = _cffi1_backend.FFI() + a = array.array('H', [10000, 20000, 30000]) + p = ffi.new("short[]", 5) + ffi.memmove(p, a, 6) + assert list(p) == [10000, 20000, 30000, 0, 0] + ffi.memmove(p + 1, a, 6) + assert list(p) == [10000, 10000, 20000, 30000, 0] + b = array.array('h', [-1000, -2000, -3000]) + ffi.memmove(b, a, 4) + assert b.tolist() == [10000, 20000, -3000] + assert a.tolist() == [10000, 20000, 30000] + p[0] = 999 + p[1] = 998 + p[2] = 997 + p[3] = 996 + p[4] = 995 + ffi.memmove(b, p, 2) + assert b.tolist() == [999, 20000, -3000] + ffi.memmove(b, p + 2, 4) + assert b.tolist() == [997, 996, -3000] + p[2] = -p[2] + p[3] = -p[3] + ffi.memmove(b, p + 2, 6) + assert b.tolist() == [-997, -996, 995] + +def test_memmove_readonly_readwrite(): + ffi = _cffi1_backend.FFI() + p = ffi.new("signed char[]", 5) + ffi.memmove(p, b"abcde", 3) + assert list(p) == [ord("a"), ord("b"), ord("c"), 0, 0] + ffi.memmove(p, bytearray(b"ABCDE"), 2) + assert list(p) == [ord("A"), ord("B"), ord("c"), 0, 0] + py.test.raises((TypeError, BufferError), ffi.memmove, b"abcde", p, 3) + ba = bytearray(b"xxxxx") + ffi.memmove(dest=ba, src=p, n=3) + assert ba == bytearray(b"ABcxx") + def test_ffi_types(): CData = _cffi1_backend.FFI.CData CType = _cffi1_backend.FFI.CType diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1365,15 +1365,15 @@ return result; } """) - print '<<< cb1 =', ffi.addressof(lib, 'cb1') + #print '<<< cb1 =', ffi.addressof(lib, 'cb1') ptr_call1 = ffi.addressof(lib, 'call1') assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 assert ptr_call1(ffi.addressof(lib, 'cb1')) == 500*999*2 - print '<<< cb2 =', ffi.addressof(lib, 'cb2') + #print '<<< cb2 =', ffi.addressof(lib, 'cb2') ptr_call2 = ffi.addressof(lib, 'call2') assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3 - print '<<< done' + #print '<<< done' def test_win32_calling_convention_2(): # any mistake in the declaration of plain function (including the From noreply at buildbot.pypy.org Fri Oct 9 10:49:03 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Fri, 9 Oct 2015 10:49:03 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: removed call to __class__ but inlined the object names Message-ID: <20151009084903.304641C0EFC@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80074:ef6dda7a8060 Date: 2015-10-09 10:49 +0200 http://bitbucket.org/pypy/pypy/changeset/ef6dda7a8060/ Log: removed call to __class__ but inlined the object names diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -685,9 +685,7 @@ _attrs_ = () def clone(self): - cloned = self.__class__() - cloned.copy_all_attributes_from(self) - return cloned + return self def exits_early(self): return False @@ -847,6 +845,12 @@ assert isinstance(other, ResumeGuardCopiedDescr) self.prev = other.prev + def clone(self): + cloned = ResumeGuardCopiedDescr() + cloned.copy_all_attributes_from(self) + return cloned + + class ResumeGuardDescr(AbstractResumeGuardDescr): _attrs_ = ('rd_numb', 'rd_count', 'rd_consts', 'rd_virtuals', 'rd_frame_info_list', 'rd_pendingfields', 'status') @@ -879,6 +883,11 @@ self.rd_count = len(boxes) self.store_hash(metainterp_sd) + def clone(self): + cloned = ResumeGuardDescr() + cloned.copy_all_attributes_from(self) + return cloned + class ResumeGuardExcDescr(ResumeGuardDescr): pass @@ -1015,12 +1024,6 @@ self.original_greenkey, jitcell_token) metainterp_sd.stats.add_jitcell_token(jitcell_token) - def clone(self): - cloned = ResumeFromInterpDescr(self.original_greenkey) - cloned.copy_all_attributes_from(self) - return cloned - - def compile_trace(metainterp, resumekey): """Try to compile a new bridge leading from the beginning of the history From noreply at buildbot.pypy.org Fri Oct 9 11:24:20 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Oct 2015 11:24:20 +0200 (CEST) Subject: [pypy-commit] pypy default: I think it's actually better if we don't put @jit.jit_callback here (see Message-ID: <20151009092421.09A6F1C13C8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80075:112382340157 Date: 2015-10-09 11:24 +0200 http://bitbucket.org/pypy/pypy/changeset/112382340157/ Log: I think it's actually better if we don't put @jit.jit_callback here (see comment) diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -200,7 +200,10 @@ extra_line="\nDuring the call to 'onerror', " "another exception occurred:\n\n") - at jit.jit_callback("CFFI") +# XXX fix me: with this line, we get a single compiled version, which +# is good for small examples but gets worse and worse as the number of +# callbacks grows: +# @jit.jit_callback("CFFI") def py_invoke_callback(callback, ll_res, ll_args): extra_line = '' try: From noreply at buildbot.pypy.org Fri Oct 9 12:26:34 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Fri, 9 Oct 2015 12:26:34 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: small refactoring towards unpacking vector elements after the guard has failed Message-ID: <20151009102634.6345E1C1192@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80076:cbbff5a45632 Date: 2015-10-09 12:26 +0200 http://bitbucket.org/pypy/pypy/changeset/cbbff5a45632/ Log: small refactoring towards unpacking vector elements after the guard has failed diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -1061,20 +1061,20 @@ # ----------------------------------------------------- def _accumulate(self, descr, failargs, values): - accuminfo = descr.rd_accum_list - while accuminfo: - i = accuminfo.getpos_in_failargs() + info = descr.rd_vector_info + while info: + i = info.getpos_in_failargs() value = values[i] assert isinstance(value, list) - if accuminfo.accum_operation == '+': + if info.accum_operation == '+': value = sum(value) - elif accuminfo.accum_operation == '*': + elif info.accum_operation == '*': def prod(acc, x): return acc * x value = reduce(prod, value, 1) else: raise NotImplementedError("accum operator in fail guard") values[i] = value - accuminfo = accuminfo.next() + info = info.next() def fail_guard(self, descr, saved_data=None, extra_value=None): values = [] diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -558,7 +558,7 @@ self.current_clt.frame_info) self._check_frame_depth(self.mc, regalloc.get_gcmap()) bridgestartpos = self.mc.get_relative_pos() - self._accum_update_at_exit(arglocs, inputargs, faildescr, regalloc) + self._update_at_exit(arglocs, inputargs, faildescr, regalloc) frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) codeendpos = self.mc.get_relative_pos() self.write_pending_failure_recoveries(regalloc) @@ -610,15 +610,15 @@ guard_locs = self.rebuild_faillocs_from_descr(faildescr, version.inputargs) bridge_locs = self.rebuild_faillocs_from_descr(bridge_faildescr, version.inputargs) #import pdb; pdb.set_trace() - guard_accum_info = faildescr.rd_accum_list + guard_accum_info = faildescr.rd_vector_info # O(n^2), but usually you only have at most 1 fail argument while guard_accum_info: - bridge_accum_info = bridge_faildescr.rd_accum_list + bridge_accum_info = bridge_faildescr.rd_vector_info while bridge_accum_info: - if bridge_accum_info.scalar_position == guard_accum_info.scalar_position: + if bridge_accum_info.failargs_pos == guard_accum_info.failargs_pos: # the mapping might be wrong! - if bridge_accum_info.vector_loc is not guard_accum_info.vector_loc: - self.mov(guard_accum_info.vector_loc, bridge_accum_info.vector_loc) + if bridge_accum_info.location is not guard_accum_info.location: + self.mov(guard_accum_info.location, bridge_accum_info.location) bridge_accum_info = bridge_accum_info.next() guard_accum_info = guard_accum_info.next() @@ -1876,8 +1876,8 @@ self.mc.force_frame_size(DEFAULT_FRAME_BYTES) startpos = self.mc.get_relative_pos() # - self._accum_update_at_exit(guardtok.fail_locs, guardtok.failargs, - guardtok.faildescr, regalloc) + self._update_at_exit(guardtok.fail_locs, guardtok.failargs, + guardtok.faildescr, regalloc) # fail_descr, target = self.store_info_on_descr(startpos, guardtok) self.mc.PUSH(imm(fail_descr)) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -322,10 +322,10 @@ if not descr: return faillocs assert isinstance(descr, AbstractFailDescr) - if descr.rd_accum_list: - accuminfo = descr.rd_accum_list + if descr.rd_vector_info: + accuminfo = descr.rd_vector_info while accuminfo: - accuminfo.vector_loc = faillocs[accuminfo.getpos_in_failargs()] + accuminfo.location = faillocs[accuminfo.getpos_in_failargs()] loc = self.loc(accuminfo.getoriginal()) faillocs[accuminfo.getpos_in_failargs()] = loc accuminfo = accuminfo.next() diff --git a/rpython/jit/backend/x86/vector_ext.py b/rpython/jit/backend/x86/vector_ext.py --- a/rpython/jit/backend/x86/vector_ext.py +++ b/rpython/jit/backend/x86/vector_ext.py @@ -74,20 +74,20 @@ index += 1 self.mc.PBLENDW_xxi(loc.value, temp.value, select) - def _accum_update_at_exit(self, fail_locs, fail_args, faildescr, regalloc): + def _update_at_exit(self, fail_locs, fail_args, faildescr, regalloc): """ If accumulation is done in this loop, at the guard exit some vector registers must be adjusted to yield the correct value """ if not isinstance(faildescr, ResumeGuardDescr): return assert regalloc is not None - accum_info = faildescr.rd_accum_list + accum_info = faildescr.rd_vector_info while accum_info: - pos = accum_info.scalar_position + pos = accum_info.getpos_in_failargs() scalar_loc = fail_locs[pos] - vector_loc = accum_info.vector_loc + vector_loc = accum_info.location # the upper elements will be lost if saved to the stack! - scalar_arg = accum_info.scalar_box + scalar_arg = accum_info.getoriginal() assert isinstance(vector_loc, RegLoc) if not isinstance(scalar_loc, RegLoc): scalar_loc = regalloc.force_allocate_reg(scalar_arg) diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -687,17 +687,9 @@ def clone(self): return self - def exits_early(self): - return False - - def attach_accum_info(self, pos, operator, arg, loc): - self.rd_accum_list = \ - AccumInfo(self.rd_accum_list, pos, operator, arg, loc) - def copy_all_attributes_from(self, other): pass - class AbstractResumeGuardDescr(ResumeDescr): _attrs_ = ('status',) @@ -873,10 +865,10 @@ self.rd_virtuals = other.rd_virtuals self.rd_numb = other.rd_numb # we don't copy status - if other.rd_accum_list: - self.rd_accum_list = other.rd_accum_list.clone() + if other.rd_vector_info: + self.rd_vector_info = other.rd_vector_info.clone() else: - other.rd_accum_list = None + other.rd_vector_info = None def store_final_boxes(self, guard_op, boxes, metainterp_sd): guard_op.setfailargs(boxes) @@ -907,6 +899,11 @@ def loop_version(self): return True + def clone(self): + cloned = CompileLoopVersionDescr() + cloned.copy_all_attributes_from(self) + return cloned + class AllVirtuals: llopaque = True cache = None diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -146,9 +146,9 @@ index = -1 final_descr = False - _attrs_ = ('adr_jump_offset', 'rd_locs', 'rd_loop_token', 'rd_accum_list') + _attrs_ = ('adr_jump_offset', 'rd_locs', 'rd_loop_token', 'rd_vector_info') - rd_accum_list = None + rd_vector_info = None def handle_fail(self, deadframe, metainterp_sd, jitdriver_sd): raise NotImplementedError @@ -164,6 +164,12 @@ # compile a loop version out of this guard? return False + def attach_vector_info(self, info): + from rpython.jit.metainterp.resume import VectorInfo + assert isinstance(info, VectorInfo) + info.prev = self.rd_vector_info + self.rd_vector_info = info + class BasicFinalDescr(AbstractFailDescr): final_descr = True diff --git a/rpython/jit/metainterp/optimizeopt/dependency.py b/rpython/jit/metainterp/optimizeopt/dependency.py --- a/rpython/jit/metainterp/optimizeopt/dependency.py +++ b/rpython/jit/metainterp/optimizeopt/dependency.py @@ -192,7 +192,7 @@ def exits_early(self): if self.op.is_guard(): descr = self.op.getdescr() - return isinstance(descr, compile.CompileLoopVersionDescr) + return descr.exits_early() return False def loads_from_complex_object(self): @@ -713,7 +713,7 @@ self.guard_argument_protection(guard_node, tracker) # descr = guard_op.getdescr() - if isinstance(descr, compile.CompileLoopVersionDescr): + if descr.exits_early(): return # handle fail args if guard_op.getfailargs(): diff --git a/rpython/jit/metainterp/optimizeopt/guard.py b/rpython/jit/metainterp/optimizeopt/guard.py --- a/rpython/jit/metainterp/optimizeopt/guard.py +++ b/rpython/jit/metainterp/optimizeopt/guard.py @@ -87,7 +87,7 @@ # guard descr = CompileLoopVersionDescr() descr.copy_all_attributes_from(self.op.getdescr()) - descr.rd_accum_list = None # do not copy the accum list + descr.rd_vector_info = None # do not copy the accum list assert isinstance(descr, ResumeGuardDescr) guard = ResOperation(self.op.getopnum(), [compare], descr=descr) guard.setfailargs(loop.label.getarglist_copy()) diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -694,7 +694,8 @@ from rpython.jit.metainterp.compile import AbstractResumeGuardDescr assert isinstance(accum, AccumPack) assert isinstance(descr, AbstractResumeGuardDescr) - descr.attach_accum_info(i, accum.operator, arg, None) + info = AccumInfo(i, arg, accum.operator) + descr.attach_vector_info(info) seed = accum.getleftmostseed() failargs[i] = self.renamer.rename_map.get(seed, seed) diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py --- a/rpython/jit/metainterp/optimizeopt/vector.py +++ b/rpython/jit/metainterp/optimizeopt/vector.py @@ -10,7 +10,6 @@ from rpython.jit.metainterp.resume import Snapshot from rpython.jit.metainterp.jitexc import NotAVectorizeableLoop, NotAProfitableLoop -#from rpython.jit.metainterp.optimizeopt.unroll import optimize_unroll from rpython.jit.metainterp.compile import (CompileLoopVersionDescr, ResumeDescr) from rpython.jit.metainterp.history import (INT, FLOAT, VECTOR, ConstInt, ConstFloat, TargetToken, JitCellToken, AbstractFailDescr) diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -34,38 +34,67 @@ self.jitcode = jitcode self.pc = pc -class AccumInfo(object): - _attrs_ = ('prev', 'accum_operation', 'scalar_position', 'scalar_box', 'vector_loc') +class VectorInfo(object): + """ + prev: the previous VectorInfo or None + failargs_pos: the index where to find it in the fail arguments + location: the register location (an integer), specified by the backend + variable: the original variable that lived at failargs_pos + """ + _attrs_ = ('prev', 'failargs_pos', 'location', 'variable') + prev = None + failargs_pos = -1 + location = None + variable = None - def __init__(self, prev, position, operation, box, loc): - self.prev = prev - self.accum_operation = operation - self.scalar_position = position - self.scalar_box = box - self.vector_loc = loc - - def getoriginal(self): - return self.scalar_box + def __init__(self, position, variable): + self.failargs_pos = position + self.variable = variable def getpos_in_failargs(self): - return self.scalar_position + return self.failargs_pos def next(self): return self.prev + def getoriginal(self): + return self.variable + def clone(self): prev = None if self.prev: prev = self.prev.clone() - return AccumInfo(prev, self.scalar_position, self.accum_operation, - self.scalar_box, None) + return self.instance_clone(prev) + + def instance_clone(self, prev): + raise NotImplementedError + +class UnpackAtExitInfo(VectorInfo): + def instance_clone(self, prev): + info = UnpackAtExitInfo(self.failargs_pos, self.variable) + info.prev = prev + return info + +class AccumInfo(VectorInfo): + _attrs_ = ('accum_operation', 'scalar') + + def __init__(self, position, variable, operation): + VectorInfo.__init__(self, position, variable) + self.accum_operation = operation + + def instance_clone(self, prev): + info = AccumInfo(self.failargs_pos, self.variable, + self.accum_operation) + info.location = self.location + info.prev = prev + return info def __repr__(self): return 'AccumInfo(%s,%s,%s,%s,%s)' % (self.prev is None, self.accum_operation, - self.scalar_position, - self.scalar_box, - self.vector_loc) + self.failargs_pos, + self.variable, + self.location) def _ensure_parent_resumedata(framestack, n): target = framestack[n] From noreply at buildbot.pypy.org Fri Oct 9 13:51:30 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 9 Oct 2015 13:51:30 +0200 (CEST) Subject: [pypy-commit] pypy fortran-order: test, fix 'A' for empty_like, zeros_like order Message-ID: <20151009115130.9E1161C133C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: fortran-order Changeset: r80077:cf73e9b4ad42 Date: 2015-10-07 00:18 +0300 http://bitbucket.org/pypy/pypy/changeset/cf73e9b4ad42/ Log: test, fix 'A' for empty_like, zeros_like order diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -297,9 +297,9 @@ space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = descriptor.variable_dtype(space, dtype.char + '1') - if npy_order == NPY.KEEPORDER: + if npy_order in (NPY.KEEPORDER, NPY.ANYORDER): # Try to copy the stride pattern - impl = w_a.implementation.astype(space, dtype, npy_order) + impl = w_a.implementation.astype(space, dtype, NPY.KEEPORDER) if subok: w_type = space.type(w_a) else: diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -445,6 +445,8 @@ b = np.empty_like(A((2, 3)), subok=False) assert b.shape == (2, 3) assert type(b) is np.ndarray + b = np.empty_like(np.array(3.0), order='A') + assert type(b) is np.ndarray def test_size(self): from numpy import array,arange,cos From noreply at buildbot.pypy.org Fri Oct 9 13:51:32 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 9 Oct 2015 13:51:32 +0200 (CEST) Subject: [pypy-commit] pypy fortran-order: refactor order handling in nditer Message-ID: <20151009115132.D00591C133C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: fortran-order Changeset: r80078:5ccb2d126b87 Date: 2015-10-09 09:26 +0300 http://bitbucket.org/pypy/pypy/changeset/5ccb2d126b87/ Log: refactor order handling in nditer diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -349,7 +349,6 @@ def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, w_itershape, buffersize=0, order=NPY.KEEPORDER): - self.order = order self.external_loop = False self.buffered = False self.tracked_index = '' @@ -377,7 +376,25 @@ for w_elem in w_seq_as_list] else: self.seq = [convert_to_array(space, w_seq)] - + if order == NPY.ANYORDER: + # 'A' means "'F' order if all the arrays are Fortran contiguous, + # 'C' order otherwise" + order = NPY.CORDER + for s in self.seq: + if s and not(s.get_flags() & NPY.ARRAY_F_CONTIGUOUS): + break + else: + order = NPY.FORTRANORDER + elif order == NPY.KEEPORDER: + # 'K' means "as close to the order the array elements appear in + # memory as possible", so match self.order to seq.order + order = NPY.CORDER + for s in self.seq: + if s and not(s.get_order() == NPY.FORTRANORDER): + break + else: + order = NPY.FORTRANORDER + self.order = order parse_func_flags(space, self, w_flags) self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, len(self.seq), parse_op_flag) @@ -488,7 +505,7 @@ space.str_w(self_d.descr_repr(space)), space.str_w(seq_d.descr_repr(space)), i, self.casting) - elif self.buffered: + elif self.buffered and not (self.external_loop and len(self.seq)<2): for i in range(len(self.seq)): if i not in outargs: self.seq[i] = self.seq[i].descr_copy(space, @@ -510,12 +527,19 @@ def get_iter(self, space, i): arr = self.seq[i] - dtype = self.dtypes[i] - shape = self.shape imp = arr.implementation - backward = is_backward(imp.order, self.order) if arr.is_scalar(): return ConcreteIter(imp, 1, [], [], [], self.op_flags[i], self) + shape = self.shape + if (self.external_loop and len(self.seq)<2 and self.buffered): + # Special case, always return a memory-ordered iterator + stride = imp.dtype.elsize + backstride = imp.size * stride - stride + return ConcreteIter(imp, imp.get_size(), + [support.product(shape)], [stride], [backstride], + self.op_flags[i], self) + backward = imp.order != self.order + # XXX cleanup needed if (abs(imp.strides[0]) < abs(imp.strides[-1]) and not backward) or \ (abs(imp.strides[0]) > abs(imp.strides[-1]) and backward): # flip the strides. Is this always true for multidimension? diff --git a/pypy/module/micronumpy/test/test_nditer.py b/pypy/module/micronumpy/test/test_nditer.py --- a/pypy/module/micronumpy/test/test_nditer.py +++ b/pypy/module/micronumpy/test/test_nditer.py @@ -114,10 +114,7 @@ from numpy import nditer, array a = array([[1, 2], [3, 4]], order="C") - try: - b = array([[1, 2], [3, 4]], order="F") - except (NotImplementedError, ValueError): - skip('Fortran order not implemented') + b = array([[1, 2], [3, 4]], order="F") it = nditer([a, b]) r = list(it) @@ -161,11 +158,7 @@ assert r[0][0] == 100 r = [] - try: - it = nditer(a, flags=['buffered'], order='F') - except NotImplementedError as e: - assert 'unsupported value for order' in str(e) - skip('buffered with order="F" requires fortran tmp array creation') + it = nditer(a, flags=['buffered'], order='F') for x in it: r.append(x) array_r = array(r) From noreply at buildbot.pypy.org Fri Oct 9 13:51:36 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 9 Oct 2015 13:51:36 +0200 (CEST) Subject: [pypy-commit] pypy fortran-order: merge default into branch Message-ID: <20151009115136.C510C1C133C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: fortran-order Changeset: r80079:fb278777555d Date: 2015-10-09 09:28 +0300 http://bitbucket.org/pypy/pypy/changeset/fb278777555d/ Log: merge default into branch diff too long, truncating to 2000 out of 3790 lines diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -26,6 +26,9 @@ _r_words = re.compile(r"\w+|\S") _parser_cache = None _r_int_literal = re.compile(r"-?0?x?[0-9a-f]+[lu]*$", re.IGNORECASE) +_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") +_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") +_r_cdecl = re.compile(r"\b__cdecl\b") def _get_parser(): global _parser_cache @@ -44,6 +47,14 @@ macrovalue = macrovalue.replace('\\\n', '').strip() macros[macroname] = macrovalue csource = _r_define.sub('', csource) + # BIG HACK: replace WINAPI or __stdcall with "volatile const". + # It doesn't make sense for the return type of a function to be + # "volatile volatile const", so we abuse it to detect __stdcall... + # Hack number 2 is that "int(volatile *fptr)();" is not valid C + # syntax, so we place the "volatile" before the opening parenthesis. + csource = _r_stdcall2.sub(' volatile volatile const(', csource) + csource = _r_stdcall1.sub(' volatile volatile const ', csource) + csource = _r_cdecl.sub(' ', csource) # Replace "[...]" with "[__dotdotdotarray__]" csource = _r_partial_array.sub('[__dotdotdotarray__]', csource) # Replace "...}" with "__dotdotdotNUM__}". This construction should @@ -449,7 +460,14 @@ if not ellipsis and args == [model.void_type]: args = [] result, quals = self._get_type_and_quals(typenode.type) - return model.RawFunctionType(tuple(args), result, ellipsis) + # the 'quals' on the result type are ignored. HACK: we absure them + # to detect __stdcall functions: we textually replace "__stdcall" + # with "volatile volatile const" above. + abi = None + if hasattr(typenode.type, 'quals'): # else, probable syntax error anyway + if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']: + abi = '__stdcall' + return model.RawFunctionType(tuple(args), result, ellipsis, abi) def _as_func_arg(self, type, quals): if isinstance(type, model.ArrayType): diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,4 +1,4 @@ -import types +import types, sys import weakref from .lock import allocate_lock @@ -193,18 +193,21 @@ class BaseFunctionType(BaseType): - _attrs_ = ('args', 'result', 'ellipsis') + _attrs_ = ('args', 'result', 'ellipsis', 'abi') - def __init__(self, args, result, ellipsis): + def __init__(self, args, result, ellipsis, abi=None): self.args = args self.result = result self.ellipsis = ellipsis + self.abi = abi # reprargs = [arg._get_c_name() for arg in self.args] if self.ellipsis: reprargs.append('...') reprargs = reprargs or ['void'] replace_with = self._base_pattern % (', '.join(reprargs),) + if abi is not None: + replace_with = replace_with[:1] + abi + ' ' + replace_with[1:] self.c_name_with_marker = ( self.result.c_name_with_marker.replace('&', replace_with)) @@ -222,7 +225,7 @@ "type, not a pointer-to-function type" % (self,)) def as_function_pointer(self): - return FunctionPtrType(self.args, self.result, self.ellipsis) + return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi) class FunctionPtrType(BaseFunctionType): @@ -233,11 +236,18 @@ args = [] for tp in self.args: args.append(tp.get_cached_btype(ffi, finishlist)) + abi_args = () + if self.abi == "__stdcall": + if not self.ellipsis: # __stdcall ignored for variadic funcs + try: + abi_args = (ffi._backend.FFI_STDCALL,) + except AttributeError: + pass return global_cache(self, ffi, 'new_function_type', - tuple(args), result, self.ellipsis) + tuple(args), result, self.ellipsis, *abi_args) def as_raw_function(self): - return RawFunctionType(self.args, self.result, self.ellipsis) + return RawFunctionType(self.args, self.result, self.ellipsis, self.abi) class PointerType(BaseType): diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h --- a/lib_pypy/cffi/parse_c_type.h +++ b/lib_pypy/cffi/parse_c_type.h @@ -5,7 +5,7 @@ #define _CFFI_OP(opcode, arg) (_cffi_opcode_t)(opcode | (((uintptr_t)(arg)) << 8)) #define _CFFI_GETOP(cffi_opcode) ((unsigned char)(uintptr_t)cffi_opcode) -#define _CFFI_GETARG(cffi_opcode) (((uintptr_t)cffi_opcode) >> 8) +#define _CFFI_GETARG(cffi_opcode) (((intptr_t)cffi_opcode) >> 8) #define _CFFI_OP_PRIMITIVE 1 #define _CFFI_OP_POINTER 3 diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -607,7 +607,11 @@ call_arguments.append('x%d' % i) repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' - name_and_arguments = '_cffi_d_%s(%s)' % (name, repr_arguments) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments) prnt('static %s' % (tp.result.get_c_name(name_and_arguments),)) prnt('{') call_arguments = ', '.join(call_arguments) @@ -710,7 +714,8 @@ if difference: repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' - name_and_arguments = '_cffi_f_%s(%s)' % (name, repr_arguments) + name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name, + repr_arguments) prnt('static %s' % (tp_result.get_c_name(name_and_arguments),)) prnt('{') if result_decl: @@ -1135,7 +1140,13 @@ else: self.cffi_types[index] = CffiOp(OP_NOOP, realindex) index += 1 - self.cffi_types[index] = CffiOp(OP_FUNCTION_END, int(tp.ellipsis)) + flags = int(tp.ellipsis) + if tp.abi is not None: + if tp.abi == '__stdcall': + flags |= 2 + else: + raise NotImplementedError("abi=%r" % (tp.abi,)) + self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags) def _emit_bytecode_PointerType(self, tp, index): self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype]) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -159,7 +159,11 @@ arglist = ', '.join(arglist) or 'void' wrappername = '_cffi_f_%s' % name self.export_symbols.append(wrappername) - funcdecl = ' %s(%s)' % (wrappername, arglist) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + funcdecl = ' %s%s(%s)' % (abi, wrappername, arglist) context = 'result of %s' % name prnt(tpresult.get_c_name(funcdecl, context)) prnt('{') diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -83,7 +83,7 @@ RPython Mixed Modules -===================== +--------------------- This is the internal way to write built-in extension modules in PyPy. It cannot be used by any 3rd-party module: the extension modules are diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -53,3 +53,10 @@ Fix performance regression on operations mixing numpy scalars and Python floats, cf. issue #2148. + +.. branch: cffi-stdcall +Win32: support '__stdcall' in CFFI. + +.. branch: callfamily + +Refactorings of annotation and rtyping of function calls. diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,9 +1,16 @@ import sys from pypy.interpreter.mixedmodule import MixedModule -from rpython.rlib import rdynload +from rpython.rlib import rdynload, clibffi VERSION = "1.3.0" +FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI +try: + FFI_STDCALL = clibffi.FFI_STDCALL + has_stdcall = True +except AttributeError: + has_stdcall = False + class Module(MixedModule): @@ -44,8 +51,8 @@ 'get_errno': 'cerrno.get_errno', 'set_errno': 'cerrno.set_errno', - 'FFI_DEFAULT_ABI': 'ctypefunc._get_abi(space, "FFI_DEFAULT_ABI")', - 'FFI_CDECL': 'ctypefunc._get_abi(space,"FFI_DEFAULT_ABI")',#win32 name + 'FFI_DEFAULT_ABI': 'space.wrap(%d)' % FFI_DEFAULT_ABI, + 'FFI_CDECL': 'space.wrap(%d)' % FFI_DEFAULT_ABI, # win32 name # CFFI 1.0 'FFI': 'ffi_obj.W_FFIObject', @@ -53,6 +60,9 @@ if sys.platform == 'win32': interpleveldefs['getwinerror'] = 'cerrno.getwinerror' + if has_stdcall: + interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL + def get_dict_rtld_constants(): found = {} diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -12,6 +12,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from pypy.interpreter.error import OperationError, oefmt +from pypy.module import _cffi_backend from pypy.module._cffi_backend import ctypearray, cdataobj, cerrno from pypy.module._cffi_backend.ctypeobj import W_CType from pypy.module._cffi_backend.ctypeptr import W_CTypePtrBase, W_CTypePointer @@ -23,20 +24,22 @@ class W_CTypeFunc(W_CTypePtrBase): - _attrs_ = ['fargs', 'ellipsis', 'cif_descr'] - _immutable_fields_ = ['fargs[*]', 'ellipsis', 'cif_descr'] + _attrs_ = ['fargs', 'ellipsis', 'abi', 'cif_descr'] + _immutable_fields_ = ['fargs[*]', 'ellipsis', 'abi', 'cif_descr'] kind = "function" cif_descr = lltype.nullptr(CIF_DESCRIPTION) - def __init__(self, space, fargs, fresult, ellipsis): + def __init__(self, space, fargs, fresult, ellipsis, + abi=_cffi_backend.FFI_DEFAULT_ABI): assert isinstance(ellipsis, bool) - extra = self._compute_extra_text(fargs, fresult, ellipsis) + extra, xpos = self._compute_extra_text(fargs, fresult, ellipsis, abi) size = rffi.sizeof(rffi.VOIDP) - W_CTypePtrBase.__init__(self, space, size, extra, 2, fresult, + W_CTypePtrBase.__init__(self, space, size, extra, xpos, fresult, could_cast_anything=False) self.fargs = fargs self.ellipsis = ellipsis + self.abi = abi # fresult is stored in self.ctitem if not ellipsis: @@ -44,7 +47,7 @@ # at all. The cif is computed on every call from the actual # types passed in. For all other functions, the cif_descr # is computed here. - builder = CifDescrBuilder(fargs, fresult) + builder = CifDescrBuilder(fargs, fresult, abi) try: builder.rawallocate(self) except OperationError, e: @@ -76,7 +79,7 @@ ctypefunc.fargs = fvarargs ctypefunc.ctitem = self.ctitem #ctypefunc.cif_descr = NULL --- already provided as the default - CifDescrBuilder(fvarargs, self.ctitem).rawallocate(ctypefunc) + CifDescrBuilder(fvarargs, self.ctitem, self.abi).rawallocate(ctypefunc) return ctypefunc @rgc.must_be_light_finalizer @@ -84,8 +87,13 @@ if self.cif_descr: lltype.free(self.cif_descr, flavor='raw') - def _compute_extra_text(self, fargs, fresult, ellipsis): + def _compute_extra_text(self, fargs, fresult, ellipsis, abi): + from pypy.module._cffi_backend import newtype argnames = ['(*)('] + xpos = 2 + if _cffi_backend.has_stdcall and abi == _cffi_backend.FFI_STDCALL: + argnames[0] = '(__stdcall *)(' + xpos += len('__stdcall ') for i, farg in enumerate(fargs): if i > 0: argnames.append(', ') @@ -95,7 +103,7 @@ argnames.append(', ') argnames.append('...') argnames.append(')') - return ''.join(argnames) + return ''.join(argnames), xpos def _fget(self, attrchar): if attrchar == 'a': # args @@ -106,7 +114,7 @@ if attrchar == 'E': # ellipsis return self.space.wrap(self.ellipsis) if attrchar == 'A': # abi - return self.space.wrap(clibffi.FFI_DEFAULT_ABI) # XXX + return self.space.wrap(self.abi) return W_CTypePtrBase._fget(self, attrchar) def call(self, funcaddr, args_w): @@ -181,11 +189,6 @@ def set_mustfree_flag(data, flag): rffi.ptradd(data, -1)[0] = chr(flag) -def _get_abi(space, name): - abi = getattr(clibffi, name) - assert isinstance(abi, int) - return space.wrap(abi) - # ____________________________________________________________ @@ -260,9 +263,10 @@ class CifDescrBuilder(object): rawmem = lltype.nullptr(rffi.CCHARP.TO) - def __init__(self, fargs, fresult): + def __init__(self, fargs, fresult, fabi): self.fargs = fargs self.fresult = fresult + self.fabi = fabi def fb_alloc(self, size): size = llmemory.raw_malloc_usage(size) @@ -421,7 +425,7 @@ cif_descr.exchange_size = exchange_offset def fb_extra_fields(self, cif_descr): - cif_descr.abi = clibffi.FFI_DEFAULT_ABI # XXX + cif_descr.abi = self.fabi cif_descr.nargs = len(self.fargs) cif_descr.rtype = self.rtype cif_descr.atypes = self.atypes diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -143,7 +143,7 @@ # obscure hack when untranslated, maybe, approximate, don't use if isinstance(align, llmemory.FieldOffset): align = rffi.sizeof(align.TYPE.y) - if (1 << (8*align-2)) > sys.maxint: + if sys.platform != 'win32' and (1 << (8*align-2)) > sys.maxint: align /= 2 else: # a different hack when translated, to avoid seeing constants diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -4,10 +4,11 @@ from rpython.rlib.objectmodel import specialize, r_dict, compute_identity_hash from rpython.rlib.rarithmetic import ovfcheck, intmask -from rpython.rlib import jit, rweakref +from rpython.rlib import jit, rweakref, clibffi from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform +from pypy.module import _cffi_backend from pypy.module._cffi_backend import (ctypeobj, ctypeprim, ctypeptr, ctypearray, ctypestruct, ctypevoid, ctypeenum) @@ -592,8 +593,9 @@ # ____________________________________________________________ - at unwrap_spec(w_fresult=ctypeobj.W_CType, ellipsis=int) -def new_function_type(space, w_fargs, w_fresult, ellipsis=0): + at unwrap_spec(w_fresult=ctypeobj.W_CType, ellipsis=int, abi=int) +def new_function_type(space, w_fargs, w_fresult, ellipsis=0, + abi=_cffi_backend.FFI_DEFAULT_ABI): fargs = [] for w_farg in space.fixedview(w_fargs): if not isinstance(w_farg, ctypeobj.W_CType): @@ -602,28 +604,28 @@ if isinstance(w_farg, ctypearray.W_CTypeArray): w_farg = w_farg.ctptr fargs.append(w_farg) - return _new_function_type(space, fargs, w_fresult, bool(ellipsis)) + return _new_function_type(space, fargs, w_fresult, bool(ellipsis), abi) -def _func_key_hash(unique_cache, fargs, fresult, ellipsis): +def _func_key_hash(unique_cache, fargs, fresult, ellipsis, abi): x = compute_identity_hash(fresult) for w_arg in fargs: y = compute_identity_hash(w_arg) x = intmask((1000003 * x) ^ y) - x ^= ellipsis + x ^= (ellipsis - abi) if unique_cache.for_testing: # constant-folded to False in translation; x &= 3 # but for test, keep only 2 bits of hash return x # can't use @jit.elidable here, because it might call back to random # space functions via force_lazy_struct() -def _new_function_type(space, fargs, fresult, ellipsis=False): +def _new_function_type(space, fargs, fresult, ellipsis, abi): try: - return _get_function_type(space, fargs, fresult, ellipsis) + return _get_function_type(space, fargs, fresult, ellipsis, abi) except KeyError: - return _build_function_type(space, fargs, fresult, ellipsis) + return _build_function_type(space, fargs, fresult, ellipsis, abi) @jit.elidable -def _get_function_type(space, fargs, fresult, ellipsis): +def _get_function_type(space, fargs, fresult, ellipsis, abi): # This function is elidable because if called again with exactly the # same arguments (and if it didn't raise KeyError), it would give # the same result, at least as long as this result is still live. @@ -633,18 +635,19 @@ # one such dict, but in case of hash collision, there might be # more. unique_cache = space.fromcache(UniqueCache) - func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis, abi) for weakdict in unique_cache.functions: ctype = weakdict.get(func_hash) if (ctype is not None and ctype.ctitem is fresult and ctype.fargs == fargs and - ctype.ellipsis == ellipsis): + ctype.ellipsis == ellipsis and + ctype.abi == abi): return ctype raise KeyError @jit.dont_look_inside -def _build_function_type(space, fargs, fresult, ellipsis): +def _build_function_type(space, fargs, fresult, ellipsis, abi): from pypy.module._cffi_backend import ctypefunc # if ((fresult.size < 0 and @@ -658,9 +661,9 @@ raise oefmt(space.w_TypeError, "invalid result type: '%s'", fresult.name) # - fct = ctypefunc.W_CTypeFunc(space, fargs, fresult, ellipsis) + fct = ctypefunc.W_CTypeFunc(space, fargs, fresult, ellipsis, abi) unique_cache = space.fromcache(UniqueCache) - func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis, abi) for weakdict in unique_cache.functions: if weakdict.get(func_hash) is None: weakdict.set(func_hash, fct) diff --git a/pypy/module/_cffi_backend/realize_c_type.py b/pypy/module/_cffi_backend/realize_c_type.py --- a/pypy/module/_cffi_backend/realize_c_type.py +++ b/pypy/module/_cffi_backend/realize_c_type.py @@ -5,6 +5,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.error import oefmt from pypy.interpreter.baseobjspace import W_Root +from pypy.module import _cffi_backend from pypy.module._cffi_backend.ctypeobj import W_CType from pypy.module._cffi_backend import cffi_opcode, newtype, ctypestruct from pypy.module._cffi_backend import parse_c_type @@ -164,16 +165,28 @@ OP_FUNCTION_END = cffi_opcode.OP_FUNCTION_END while getop(opcodes[base_index + num_args]) != OP_FUNCTION_END: num_args += 1 - ellipsis = (getarg(opcodes[base_index + num_args]) & 1) != 0 + # + ellipsis = (getarg(opcodes[base_index + num_args]) & 0x01) != 0 + abi = (getarg(opcodes[base_index + num_args]) & 0xFE) + if abi == 0: + abi = _cffi_backend.FFI_DEFAULT_ABI + elif abi == 2: + if _cffi_backend.has_stdcall: + abi = _cffi_backend.FFI_STDCALL + else: + abi = _cffi_backend.FFI_DEFAULT_ABI + else: + raise oefmt(ffi.w_FFIError, "abi number %d not supported", abi) + # fargs = [realize_c_type(ffi, opcodes, base_index + i) for i in range(num_args)] - return fargs, fret, ellipsis + return fargs, fret, ellipsis, abi def unwrap_as_fnptr(self, ffi): if self._ctfuncptr is None: - fargs, fret, ellipsis = self._unpack(ffi) + fargs, fret, ellipsis, abi = self._unpack(ffi) self._ctfuncptr = newtype._new_function_type( - ffi.space, fargs, fret, ellipsis) + ffi.space, fargs, fret, ellipsis, abi) return self._ctfuncptr def unwrap_as_fnptr_in_elidable(self): @@ -190,7 +203,7 @@ # type ptr-to-struct. This is how recompiler.py produces # trampoline functions for PyPy. if self.nostruct_ctype is None: - fargs, fret, ellipsis = self._unpack(ffi) + fargs, fret, ellipsis, abi = self._unpack(ffi) # 'locs' will be a string of the same length as the final fargs, # containing 'A' where a struct argument was detected, and 'R' # in first position if a struct return value was detected @@ -207,7 +220,7 @@ locs = ['R'] + locs fret = newtype.new_void_type(ffi.space) ctfuncptr = newtype._new_function_type( - ffi.space, fargs, fret, ellipsis) + ffi.space, fargs, fret, ellipsis, abi) if locs == ['\x00'] * len(locs): locs = None else: @@ -218,7 +231,7 @@ locs[0] == 'R') def unexpected_fn_type(self, ffi): - fargs, fret, ellipsis = self._unpack(ffi) + fargs, fret, ellipsis, abi = self._unpack(ffi) argnames = [farg.name for farg in fargs] if ellipsis: argnames.append('...') diff --git a/pypy/module/_cffi_backend/src/parse_c_type.c b/pypy/module/_cffi_backend/src/parse_c_type.c --- a/pypy/module/_cffi_backend/src/parse_c_type.c +++ b/pypy/module/_cffi_backend/src/parse_c_type.c @@ -51,6 +51,9 @@ TOK_UNSIGNED, TOK_VOID, TOK_VOLATILE, + + TOK_CDECL, + TOK_STDCALL, }; typedef struct { @@ -165,6 +168,8 @@ switch (*p) { case '_': if (tok->size == 5 && !memcmp(p, "_Bool", 5)) tok->kind = TOK__BOOL; + if (tok->size == 7 && !memcmp(p,"__cdecl",7)) tok->kind = TOK_CDECL; + if (tok->size == 9 && !memcmp(p,"__stdcall",9))tok->kind = TOK_STDCALL; break; case 'c': if (tok->size == 4 && !memcmp(p, "char", 4)) tok->kind = TOK_CHAR; @@ -236,7 +241,7 @@ type). The 'outer' argument is the index of the opcode outside this "sequel". */ - int check_for_grouping; + int check_for_grouping, abi=0; _cffi_opcode_t result, *p_current; header: @@ -253,6 +258,12 @@ /* ignored for now */ next_token(tok); goto header; + case TOK_CDECL: + case TOK_STDCALL: + /* must be in a function; checked below */ + abi = tok->kind; + next_token(tok); + goto header; default: break; } @@ -269,6 +280,11 @@ while (tok->kind == TOK_OPEN_PAREN) { next_token(tok); + if (tok->kind == TOK_CDECL || tok->kind == TOK_STDCALL) { + abi = tok->kind; + next_token(tok); + } + if ((check_for_grouping--) == 1 && (tok->kind == TOK_STAR || tok->kind == TOK_CONST || tok->kind == TOK_VOLATILE || @@ -286,7 +302,14 @@ } else { /* function type */ - int arg_total, base_index, arg_next, has_ellipsis=0; + int arg_total, base_index, arg_next, flags=0; + + if (abi == TOK_STDCALL) { + flags = 2; + /* note that an ellipsis below will overwrite this flags, + which is the goal: variadic functions are always cdecl */ + } + abi = 0; if (tok->kind == TOK_VOID && get_following_char(tok) == ')') { next_token(tok); @@ -315,7 +338,7 @@ _cffi_opcode_t oarg; if (tok->kind == TOK_DOTDOTDOT) { - has_ellipsis = 1; + flags = 1; /* ellipsis */ next_token(tok); break; } @@ -339,8 +362,7 @@ next_token(tok); } } - tok->output[arg_next] = _CFFI_OP(_CFFI_OP_FUNCTION_END, - has_ellipsis); + tok->output[arg_next] = _CFFI_OP(_CFFI_OP_FUNCTION_END, flags); } if (tok->kind != TOK_CLOSE_PAREN) @@ -348,6 +370,9 @@ next_token(tok); } + if (abi != 0) + return parse_error(tok, "expected '('"); + while (tok->kind == TOK_OPEN_BRACKET) { *p_current = _CFFI_OP(_CFFI_GETOP(*p_current), tok->output_index); p_current = tok->output + tok->output_index; diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2316,9 +2316,6 @@ f(); f() assert get_errno() == 77 -def test_abi(): - assert isinstance(FFI_DEFAULT_ABI, int) - def test_cast_to_array(): # not valid in C! extension to get a non-owning BInt = new_primitive_type("int") @@ -3427,3 +3424,16 @@ "be 'foo *', but the types are different (check " "that you are not e.g. mixing up different ffi " "instances)") + +def test_stdcall_function_type(): + assert FFI_CDECL == FFI_DEFAULT_ABI + try: + stdcall = FFI_STDCALL + except NameError: + stdcall = FFI_DEFAULT_ABI + BInt = new_primitive_type("int") + BFunc = new_function_type((BInt, BInt), BInt, False, stdcall) + if stdcall != FFI_DEFAULT_ABI: + assert repr(BFunc) == "" + else: + assert repr(BFunc) == "" diff --git a/pypy/module/_cffi_backend/test/test_parse_c_type.py b/pypy/module/_cffi_backend/test/test_parse_c_type.py --- a/pypy/module/_cffi_backend/test/test_parse_c_type.py +++ b/pypy/module/_cffi_backend/test/test_parse_c_type.py @@ -338,3 +338,17 @@ # not supported (really obscure): # "char[+5]" # "char['A']" + +def test_stdcall_cdecl(): + assert parse("int __stdcall(int)") == [Prim(cffi_opcode.PRIM_INT), + '->', Func(0), NoOp(4), FuncEnd(2), + Prim(cffi_opcode.PRIM_INT)] + assert parse("int __stdcall func(int)") == parse("int __stdcall(int)") + assert parse("int (__stdcall *)()") == [Prim(cffi_opcode.PRIM_INT), + NoOp(3), '->', Pointer(1), + Func(0), FuncEnd(2), 0] + assert parse("int (__stdcall *p)()") == parse("int (__stdcall*)()") + parse_error("__stdcall int", "identifier expected", 0) + parse_error("__cdecl int", "identifier expected", 0) + parse_error("int __stdcall", "expected '('", 13) + parse_error("int __cdecl", "expected '('", 11) diff --git a/pypy/module/itertools/__init__.py b/pypy/module/itertools/__init__.py --- a/pypy/module/itertools/__init__.py +++ b/pypy/module/itertools/__init__.py @@ -10,7 +10,6 @@ repeat(elem [,n]) --> elem, elem, elem, ... endlessly or up to n times Iterators terminating on the shortest input sequence: - izip(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ... ifilter(pred, seq) --> elements of seq where pred(elem) is True ifilterfalse(pred, seq) --> elements of seq where pred(elem) is False islice(seq, [start,] stop [, step]) --> elements from @@ -22,6 +21,14 @@ takewhile(pred, seq) --> seq[0], seq[1], until pred fails dropwhile(pred, seq) --> seq[n], seq[n+1], starting when pred fails groupby(iterable[, keyfunc]) --> sub-iterators grouped by value of keyfunc(v) + izip(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ... + izip_longest(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ... + + Combinatoric generators: + product(p, q, ... [repeat=1]) --> cartesian product + permutations(p[, r]) + combinations(p, r) + combinations_with_replacement(p, r) """ interpleveldefs = { diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -649,33 +649,38 @@ class W_IZipLongest(W_IMap): _error_name = "izip_longest" + _immutable_fields_ = ["w_fillvalue"] + + def _fetch(self, index): + w_iter = self.iterators_w[index] + if w_iter is not None: + space = self.space + try: + return space.next(w_iter) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + self.active -= 1 + if self.active <= 0: + # It was the last active iterator + raise + self.iterators_w[index] = None + return self.w_fillvalue def next_w(self): - space = self.space + # common case: 2 arguments + if len(self.iterators_w) == 2: + objects = [self._fetch(0), self._fetch(1)] + else: + objects = self._get_objects() + return self.space.newtuple(objects) + + def _get_objects(self): + # the loop is out of the way of the JIT nb = len(self.iterators_w) - if nb == 0: - raise OperationError(space.w_StopIteration, space.w_None) - - objects_w = [None] * nb - for index in range(nb): - w_value = self.w_fillvalue - w_it = self.iterators_w[index] - if w_it is not None: - try: - w_value = space.next(w_it) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - - self.active -= 1 - if self.active == 0: - # It was the last active iterator - raise - self.iterators_w[index] = None - - objects_w[index] = w_value - return space.newtuple(objects_w) + raise OperationError(self.space.w_StopIteration, self.space.w_None) + return [self._fetch(index) for index in range(nb)] def W_IZipLongest___new__(space, w_subtype, __args__): arguments_w, kwds_w = __args__.unpack() diff --git a/pypy/module/pypyjit/__init__.py b/pypy/module/pypyjit/__init__.py --- a/pypy/module/pypyjit/__init__.py +++ b/pypy/module/pypyjit/__init__.py @@ -15,6 +15,7 @@ 'set_compile_hook': 'interp_resop.set_compile_hook', 'set_abort_hook': 'interp_resop.set_abort_hook', 'get_stats_snapshot': 'interp_resop.get_stats_snapshot', + 'get_stats_asmmemmgr': 'interp_resop.get_stats_asmmemmgr', # those things are disabled because they have bugs, but if # they're found to be useful, fix test_ztranslation_jit_stats # in the backend first. get_stats_snapshot still produces diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -333,6 +333,13 @@ return space.wrap(W_JitInfoSnapshot(space, w_times, w_counters, w_counter_times)) +def get_stats_asmmemmgr(space): + """Returns the raw memory currently used by the JIT backend, + as a pair (total_memory_allocated, memory_in_use).""" + m1 = jit_hooks.stats_asmmemmgr_allocated(None) + m2 = jit_hooks.stats_asmmemmgr_used(None) + return space.newtuple([space.wrap(m1), space.wrap(m2)]) + def enable_debug(space): """ Set the jit debugging - completely necessary for some stats to work, most notably assembler counters. diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -273,16 +273,16 @@ guard_not_invalidated(descr=...) f100 = float_mul(f98, 0.500000) i101 = int_add(i79, 1) - i102 = arraylen_gc(p85, descr=) + i102 = arraylen_gc(p85, descr=) i103 = int_lt(i102, i101) cond_call(i103, ConstClass(_ll_list_resize_hint_really_look_inside_iff__listPtr_Signed_Bool), p76, i101, 1, descr=) guard_no_exception(descr=...) - p104 = getfield_gc_r(p76, descr=) - p105 = new_with_vtable(descr=) - setfield_gc(p105, f100, descr=) - setarrayitem_gc(p104, i79, p105, descr=) + p104 = getfield_gc_r(p76, descr=) + p105 = new_with_vtable(descr=) + setfield_gc(p105, f100, descr=) + setarrayitem_gc(p104, i79, p105, descr=) i106 = getfield_raw_i(#, descr=) - setfield_gc(p76, i101, descr=) + setfield_gc(p76, i101, descr=) i107 = int_lt(i106, 0) guard_false(i107, descr=...) jump(..., descr=...) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py @@ -1,6 +1,6 @@ # Generated by pypy/tool/import_cffi.py import py -from cffi import FFI +from cffi import FFI, CDefError import math, os, sys import ctypes.util from cffi.backend_ctypes import CTypesBackend @@ -428,3 +428,59 @@ res = m.QueryPerformanceFrequency(p_freq) assert res != 0 assert p_freq[0] != 0 + + def test_explicit_cdecl_stdcall(self): + if sys.platform != 'win32': + py.test.skip("Windows-only test") + if self.Backend is CTypesBackend: + py.test.skip("not with the ctypes backend") + # + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + BOOL QueryPerformanceFrequency(LONGLONG *lpFrequency); + """) + m = ffi.dlopen("Kernel32.dll") + tp = ffi.typeof(m.QueryPerformanceFrequency) + assert str(tp) == "" + # + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + BOOL __cdecl QueryPerformanceFrequency(LONGLONG *lpFrequency); + """) + m = ffi.dlopen("Kernel32.dll") + tpc = ffi.typeof(m.QueryPerformanceFrequency) + assert tpc is tp + # + ffi = FFI(backend=self.Backend()) + ffi.cdef(""" + BOOL WINAPI QueryPerformanceFrequency(LONGLONG *lpFrequency); + """) + m = ffi.dlopen("Kernel32.dll") + tps = ffi.typeof(m.QueryPerformanceFrequency) + assert tps is not tpc + assert str(tps) == "" + # + ffi = FFI(backend=self.Backend()) + ffi.cdef("typedef int (__cdecl *fnc_t)(int);") + ffi.cdef("typedef int (__stdcall *fns_t)(int);") + tpc = ffi.typeof("fnc_t") + tps = ffi.typeof("fns_t") + assert str(tpc) == "" + assert str(tps) == "" + # + fnc = ffi.cast("fnc_t", 0) + fns = ffi.cast("fns_t", 0) + ffi.new("fnc_t[]", [fnc]) + py.test.raises(TypeError, ffi.new, "fnc_t[]", [fns]) + py.test.raises(TypeError, ffi.new, "fns_t[]", [fnc]) + ffi.new("fns_t[]", [fns]) + + def test_stdcall_only_on_windows(self): + if sys.platform == 'win32': + py.test.skip("not-Windows-only test") + ffi = FFI(backend=self.Backend()) + ffi.cdef("double __stdcall sin(double x);") # stdcall ignored + m = ffi.dlopen(lib_m) + assert "double(*)(double)" in str(ffi.typeof(m.sin)) + x = m.sin(1.23) + assert x == math.sin(1.23) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py @@ -365,3 +365,17 @@ assert C.TWO == 2 assert C.NIL == 0 assert C.NEG == -1 + +def test_stdcall(): + ffi = FFI() + tp = ffi.typeof("int(*)(int __stdcall x(int)," + " long (__cdecl*y)(void)," + " short(WINAPI *z)(short))") + if sys.platform == 'win32': + stdcall = '__stdcall ' + else: + stdcall = '' + assert str(tp) == ( + "" % (stdcall, stdcall)) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py @@ -1221,25 +1221,6 @@ lib = ffi.verify('#include ', libraries=lib_m) assert lib.sin(1.23) == math.sin(1.23) -def test_callback_calling_convention(): - py.test.skip("later") - if sys.platform != 'win32': - py.test.skip("Windows only") - ffi = FFI() - ffi.cdef(""" - int call1(int(*__cdecl cb)(int)); - int call2(int(*__stdcall cb)(int)); - """) - lib = ffi.verify(""" - int call1(int(*__cdecl cb)(int)) { - return cb(42) + 1; - } - int call2(int(*__stdcall cb)(int)) { - return cb(-42) - 6; - } - """) - xxx - def test_opaque_integer_as_function_result(): #import platform #if platform.machine().startswith('sparc'): @@ -2261,3 +2242,180 @@ assert foo_s.fields[0][1].type is ffi.typeof("int") assert foo_s.fields[1][0] == 'b' assert foo_s.fields[1][1].type is ffi.typeof("void *") + +def test_win32_calling_convention_0(): + ffi = FFI() + ffi.cdef(""" + int call1(int(__cdecl *cb)(int)); + int (*const call2)(int(__stdcall *cb)(int)); + """) + lib = ffi.verify(r""" + #ifndef _MSC_VER + # define __stdcall /* nothing */ + #endif + int call1(int(*cb)(int)) { + int i, result = 0; + //printf("call1: cb = %p\n", cb); + for (i = 0; i < 1000; i++) + result += cb(i); + //printf("result = %d\n", result); + return result; + } + int call2(int(__stdcall *cb)(int)) { + int i, result = 0; + //printf("call2: cb = %p\n", cb); + for (i = 0; i < 1000; i++) + result += cb(-i); + //printf("result = %d\n", result); + return result; + } + """) + @ffi.callback("int(int)") + def cb1(x): + return x * 2 + @ffi.callback("int __stdcall(int)") + def cb2(x): + return x * 3 + print 'cb1 =', cb1 + res = lib.call1(cb1) + assert res == 500*999*2 + print 'cb2 =', cb2 + print ffi.typeof(lib.call2) + print 'call2 =', lib.call2 + res = lib.call2(cb2) + print '...' + assert res == -500*999*3 + print 'done' + if sys.platform == 'win32': + assert '__stdcall' in str(ffi.typeof(cb2)) + assert '__stdcall' not in str(ffi.typeof(cb1)) + py.test.raises(TypeError, lib.call1, cb2) + py.test.raises(TypeError, lib.call2, cb1) + else: + assert '__stdcall' not in str(ffi.typeof(cb2)) + assert ffi.typeof(cb2) is ffi.typeof(cb1) + +def test_win32_calling_convention_1(): + ffi = FFI() + ffi.cdef(""" + int __cdecl call1(int(__cdecl *cb)(int)); + int __stdcall call2(int(__stdcall *cb)(int)); + int (__cdecl *const cb1)(int); + int (__stdcall *const cb2)(int); + """) + lib = ffi.verify(r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + int __cdecl cb1(int x) { return x * 2; } + int __stdcall cb2(int x) { return x * 3; } + + int __cdecl call1(int(__cdecl *cb)(int)) { + int i, result = 0; + //printf("here1\n"); + //printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + for (i = 0; i < 1000; i++) + result += cb(i); + //printf("result = %d\n", result); + return result; + } + int __stdcall call2(int(__stdcall *cb)(int)) { + int i, result = 0; + //printf("here1\n"); + //printf("cb = %p, cb2 = %p\n", cb, (void *)cb2); + for (i = 0; i < 1000; i++) + result += cb(-i); + //printf("result = %d\n", result); + return result; + } + """) + assert lib.call1(lib.cb1) == 500*999*2 + assert lib.call2(lib.cb2) == -500*999*3 + +def test_win32_calling_convention_2(): + # any mistake in the declaration of plain function (including the + # precise argument types and, here, the calling convention) are + # automatically corrected. But this does not apply to the 'cb' + # function pointer argument. + ffi = FFI() + ffi.cdef(""" + int __stdcall call1(int(__cdecl *cb)(int)); + int __cdecl call2(int(__stdcall *cb)(int)); + int (__cdecl *const cb1)(int); + int (__stdcall *const cb2)(int); + """) + lib = ffi.verify(r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + int __cdecl call1(int(__cdecl *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(i); + return result; + } + int __stdcall call2(int(__stdcall *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(-i); + return result; + } + int __cdecl cb1(int x) { return x * 2; } + int __stdcall cb2(int x) { return x * 3; } + """) + assert lib.call1(lib.cb1) == 500*999*2 + assert lib.call2(lib.cb2) == -500*999*3 + +def test_win32_calling_convention_3(): + ffi = FFI() + ffi.cdef(""" + struct point { int x, y; }; + + int (*const cb1)(struct point); + int (__stdcall *const cb2)(struct point); + + struct point __stdcall call1(int(*cb)(struct point)); + struct point call2(int(__stdcall *cb)(struct point)); + """) + lib = ffi.verify(r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + struct point { int x, y; }; + int cb1(struct point pt) { return pt.x + 10 * pt.y; } + int __stdcall cb2(struct point pt) { return pt.x + 100 * pt.y; } + struct point __stdcall call1(int(__cdecl *cb)(struct point)) { + int i; + struct point result = { 0, 0 }; + //printf("here1\n"); + //printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + for (i = 0; i < 1000; i++) { + struct point p = { i, -i }; + int r = cb(p); + result.x += r; + result.y -= r; + } + return result; + } + struct point __cdecl call2(int(__stdcall *cb)(struct point)) { + int i; + struct point result = { 0, 0 }; + for (i = 0; i < 1000; i++) { + struct point p = { -i, i }; + int r = cb(p); + result.x += r; + result.y -= r; + } + return result; + } + """) + if sys.platform == 'win32': + py.test.raises(TypeError, lib.call1, lib.cb2) + py.test.raises(TypeError, lib.call2, lib.cb1) + pt = lib.call1(lib.cb1) + assert (pt.x, pt.y) == (-9*500*999, 9*500*999) + pt = lib.call2(lib.cb2) + assert (pt.x, pt.y) == (99*500*999, -99*500*999) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_parse_c_type.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_parse_c_type.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_parse_c_type.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_parse_c_type.py @@ -342,3 +342,17 @@ # not supported (really obscure): # "char[+5]" # "char['A']" + +def test_stdcall_cdecl(): + assert parse("int __stdcall(int)") == [Prim(lib._CFFI_PRIM_INT), + '->', Func(0), NoOp(4), FuncEnd(2), + Prim(lib._CFFI_PRIM_INT)] + assert parse("int __stdcall func(int)") == parse("int __stdcall(int)") + assert parse("int (__stdcall *)()") == [Prim(lib._CFFI_PRIM_INT), + NoOp(3), '->', Pointer(1), + Func(0), FuncEnd(2), 0] + assert parse("int (__stdcall *p)()") == parse("int (__stdcall*)()") + parse_error("__stdcall int", "identifier expected", 0) + parse_error("__cdecl int", "identifier expected", 0) + parse_error("int __stdcall", "expected '('", 13) + parse_error("int __cdecl", "expected '('", 11) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_realize_c_type.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_realize_c_type.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_realize_c_type.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_realize_c_type.py @@ -1,5 +1,5 @@ # Generated by pypy/tool/import_cffi.py -import py +import py, sys from cffi import cffi_opcode @@ -47,3 +47,29 @@ def test_all_primitives(): for name in cffi_opcode.PRIMITIVE_TO_INDEX: check(name, name) + + +def check_func(input, expected_output=None): + import _cffi_backend + ffi = _cffi_backend.FFI() + ct = ffi.typeof(ffi.callback(input, lambda: None)) + assert isinstance(ct, ffi.CType) + if sys.platform != 'win32': + expected_output = expected_output.replace('__stdcall *', '*') + assert ct.cname == expected_output + +def test_funcptr_stdcall(): + check_func("int(int)", "int(*)(int)") + check_func("int foobar(int)", "int(*)(int)") + check_func("int __stdcall(int)", "int(__stdcall *)(int)") + check_func("int __stdcall foobar(int)", "int(__stdcall *)(int)") + check_func("void __cdecl(void)", "void(*)()") + check_func("void __cdecl foobar(void)", "void(*)()") + check_func("void __stdcall(void)", "void(__stdcall *)()") + check_func("void __stdcall foobar(long, short)", + "void(__stdcall *)(long, short)") + check_func("void(void __cdecl(void), void __stdcall(void))", + "void(*)(void(*)(), void(__stdcall *)())") + +def test_variadic_overrides_stdcall(): + check("void (__stdcall*)(int, ...)", "void(*)(int, ...)") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1281,3 +1281,200 @@ """) assert lib.aaa == 42 py.test.raises(AttributeError, "lib.aaa = 43") + +def test_win32_calling_convention_0(): + ffi = FFI() + ffi.cdef(""" + int call1(int(__cdecl *cb)(int)); + int (*const call2)(int(__stdcall *cb)(int)); + """) + lib = verify(ffi, 'test_win32_calling_convention_0', r""" + #ifndef _MSC_VER + # define __stdcall /* nothing */ + #endif + int call1(int(*cb)(int)) { + int i, result = 0; + //printf("call1: cb = %p\n", cb); + for (i = 0; i < 1000; i++) + result += cb(i); + //printf("result = %d\n", result); + return result; + } + int call2(int(__stdcall *cb)(int)) { + int i, result = 0; + //printf("call2: cb = %p\n", cb); + for (i = 0; i < 1000; i++) + result += cb(-i); + //printf("result = %d\n", result); + return result; + } + """) + @ffi.callback("int(int)") + def cb1(x): + return x * 2 + @ffi.callback("int __stdcall(int)") + def cb2(x): + return x * 3 + res = lib.call1(cb1) + assert res == 500*999*2 + assert res == ffi.addressof(lib, 'call1')(cb1) + res = lib.call2(cb2) + assert res == -500*999*3 + assert res == ffi.addressof(lib, 'call2')(cb2) + if sys.platform == 'win32': + assert '__stdcall' in str(ffi.typeof(cb2)) + assert '__stdcall' not in str(ffi.typeof(cb1)) + py.test.raises(TypeError, lib.call1, cb2) + py.test.raises(TypeError, lib.call2, cb1) + else: + assert '__stdcall' not in str(ffi.typeof(cb2)) + assert ffi.typeof(cb2) is ffi.typeof(cb1) + +def test_win32_calling_convention_1(): + ffi = FFI() + ffi.cdef(""" + int __cdecl call1(int(__cdecl *cb)(int)); + int __stdcall call2(int(__stdcall *cb)(int)); + int (__cdecl *const cb1)(int); + int (__stdcall *const cb2)(int); + """) + lib = verify(ffi, 'test_win32_calling_convention_1', r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + int __cdecl cb1(int x) { return x * 2; } + int __stdcall cb2(int x) { return x * 3; } + + int __cdecl call1(int(__cdecl *cb)(int)) { + int i, result = 0; + //printf("here1\n"); + //printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + for (i = 0; i < 1000; i++) + result += cb(i); + //printf("result = %d\n", result); + return result; + } + int __stdcall call2(int(__stdcall *cb)(int)) { + int i, result = 0; + //printf("here1\n"); + //printf("cb = %p, cb2 = %p\n", cb, (void *)cb2); + for (i = 0; i < 1000; i++) + result += cb(-i); + //printf("result = %d\n", result); + return result; + } + """) + print '<<< cb1 =', ffi.addressof(lib, 'cb1') + ptr_call1 = ffi.addressof(lib, 'call1') + assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + assert ptr_call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + print '<<< cb2 =', ffi.addressof(lib, 'cb2') + ptr_call2 = ffi.addressof(lib, 'call2') + assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + print '<<< done' + +def test_win32_calling_convention_2(): + # any mistake in the declaration of plain function (including the + # precise argument types and, here, the calling convention) are + # automatically corrected. But this does not apply to the 'cb' + # function pointer argument. + ffi = FFI() + ffi.cdef(""" + int __stdcall call1(int(__cdecl *cb)(int)); + int __cdecl call2(int(__stdcall *cb)(int)); + int (__cdecl *const cb1)(int); + int (__stdcall *const cb2)(int); + """) + lib = verify(ffi, 'test_win32_calling_convention_2', """ + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + int __cdecl call1(int(__cdecl *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(i); + return result; + } + int __stdcall call2(int(__stdcall *cb)(int)) { + int i, result = 0; + for (i = 0; i < 1000; i++) + result += cb(-i); + return result; + } + int __cdecl cb1(int x) { return x * 2; } + int __stdcall cb2(int x) { return x * 3; } + """) + ptr_call1 = ffi.addressof(lib, 'call1') + ptr_call2 = ffi.addressof(lib, 'call2') + if sys.platform == 'win32': + py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) + py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1')) + assert lib.call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + assert ptr_call1(ffi.addressof(lib, 'cb1')) == 500*999*2 + assert lib.call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + assert ptr_call2(ffi.addressof(lib, 'cb2')) == -500*999*3 + +def test_win32_calling_convention_3(): + ffi = FFI() + ffi.cdef(""" + struct point { int x, y; }; + + int (*const cb1)(struct point); + int (__stdcall *const cb2)(struct point); + + struct point __stdcall call1(int(*cb)(struct point)); + struct point call2(int(__stdcall *cb)(struct point)); + """) + lib = verify(ffi, 'test_win32_calling_convention_3', r""" + #ifndef _MSC_VER + # define __cdecl + # define __stdcall + #endif + struct point { int x, y; }; + int cb1(struct point pt) { return pt.x + 10 * pt.y; } + int __stdcall cb2(struct point pt) { return pt.x + 100 * pt.y; } + struct point __stdcall call1(int(__cdecl *cb)(struct point)) { + int i; + struct point result = { 0, 0 }; + //printf("here1\n"); + //printf("cb = %p, cb1 = %p\n", cb, (void *)cb1); + for (i = 0; i < 1000; i++) { + struct point p = { i, -i }; + int r = cb(p); + result.x += r; + result.y -= r; + } + return result; + } + struct point __cdecl call2(int(__stdcall *cb)(struct point)) { + int i; + struct point result = { 0, 0 }; + for (i = 0; i < 1000; i++) { + struct point p = { -i, i }; + int r = cb(p); + result.x += r; + result.y -= r; + } + return result; + } + """) + ptr_call1 = ffi.addressof(lib, 'call1') + ptr_call2 = ffi.addressof(lib, 'call2') + if sys.platform == 'win32': + py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) + py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) + py.test.raises(TypeError, ptr_call2, ffi.addressof(lib, 'cb1')) + pt = lib.call1(ffi.addressof(lib, 'cb1')) + assert (pt.x, pt.y) == (-9*500*999, 9*500*999) + pt = ptr_call1(ffi.addressof(lib, 'cb1')) + assert (pt.x, pt.y) == (-9*500*999, 9*500*999) + pt = lib.call2(ffi.addressof(lib, 'cb2')) + assert (pt.x, pt.y) == (99*500*999, -99*500*999) + pt = ptr_call2(ffi.addressof(lib, 'cb2')) + assert (pt.x, pt.y) == (99*500*999, -99*500*999) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py @@ -1201,25 +1201,6 @@ lib = ffi.verify('#include ', libraries=lib_m) assert lib.sin(1.23) == math.sin(1.23) -def test_callback_calling_convention(): - py.test.skip("later") - if sys.platform != 'win32': - py.test.skip("Windows only") - ffi = FFI() - ffi.cdef(""" - int call1(int(*__cdecl cb)(int)); - int call2(int(*__stdcall cb)(int)); - """) - lib = ffi.verify(""" - int call1(int(*__cdecl cb)(int)) { - return cb(42) + 1; - } - int call2(int(*__stdcall cb)(int)) { - return cb(-42) - 6; - } - """) - xxx - def test_opaque_integer_as_function_result(): #import platform #if platform.machine().startswith('sparc'): diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -213,8 +213,6 @@ v = graph.getreturnvar() if v.annotation is None: self.setbinding(v, annmodel.s_ImpossibleValue) - # policy-dependent computation - self.bookkeeper.compute_at_fixpoint() def validate(self): """Check that the annotation results are valid""" @@ -292,6 +290,18 @@ graph, block, index = position_key self.reflowpendingblock(graph, block) + def call_sites(self): + newblocks = self.added_blocks + if newblocks is None: + newblocks = self.annotated # all of them + for block in newblocks: + for op in block.operations: + if op.opname in ('simple_call', 'call_args'): + yield op + + # some blocks are partially annotated + if op.result.annotation is None: + break # ignore the unannotated part #___ simplification (should be moved elsewhere?) _______ @@ -309,6 +319,7 @@ graphs[graph] = True for graph in graphs: simplify.eliminate_empty_blocks(graph) + self.bookkeeper.compute_at_fixpoint() if block_subset is None: perform_normalizations(self) @@ -396,8 +407,7 @@ i = 0 while i < len(block.operations): op = block.operations[i] - self.bookkeeper.enter((graph, block, i)) - try: + with self.bookkeeper.at_position((graph, block, i)): new_ops = op.transform(self) if new_ops is not None: block.operations[i:i+1] = new_ops @@ -406,8 +416,6 @@ new_ops[-1].result = op.result op = new_ops[0] self.consider_op(op) - finally: - self.bookkeeper.leave() i += 1 except BlockedInference as e: diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -5,6 +5,7 @@ from __future__ import absolute_import import sys, types, inspect, weakref +from contextlib import contextmanager from rpython.flowspace.model import Constant from rpython.annotator.model import (SomeOrderedDict, @@ -88,34 +89,29 @@ del TLS.bookkeeper del self.position_key + @contextmanager + def at_position(self, pos): + """A context manager calling `self.enter()` and `self.leave()`""" + if hasattr(self, 'position_key') and pos is None: + yield + return + self.enter(pos) + try: + yield + finally: + self.leave() + def compute_at_fixpoint(self): # getbookkeeper() needs to work during this function, so provide # one with a dummy position - self.enter(None) - try: - def call_sites(): - newblocks = self.annotator.added_blocks - if newblocks is None: - newblocks = self.annotator.annotated # all of them - annotation = self.annotator.annotation - for block in newblocks: - for op in block.operations: - if op.opname in ('simple_call', 'call_args'): - yield op - - # some blocks are partially annotated - if annotation(op.result) is None: - break # ignore the unannotated part - - for call_op in call_sites(): + with self.at_position(None): + for call_op in self.annotator.call_sites(): self.consider_call_site(call_op) for pbc, args_s in self.emulated_pbc_calls.itervalues(): args = simple_args(args_s) pbc.consider_call_site(args, s_ImpossibleValue, None) self.emulated_pbc_calls = {} - finally: - self.leave() def check_no_flags_on_instances(self): # sanity check: no flags attached to heap stored instances @@ -501,10 +497,6 @@ """Analyse a call to a SomePBC() with the given args (list of annotations). """ - descs = list(pbc.descriptions) - first = descs[0] - first.mergecallfamilies(*descs[1:]) - if emulated is None: whence = self.position_key # fish the existing annotation for the result variable, @@ -522,12 +514,9 @@ op = None s_previous_result = s_ImpossibleValue - def schedule(graph, inputcells): - return self.annotator.recursivecall(graph, whence, inputcells) - results = [] - for desc in descs: - results.append(desc.pycall(schedule, args, s_previous_result, op)) + for desc in pbc.descriptions: + results.append(desc.pycall(whence, args, s_previous_result, op)) s_result = unionof(*results) return s_result @@ -552,10 +541,7 @@ "replace" can be set to a list of old unique_key values to forget now, because the given "unique_key" replaces them. """ - emulate_enter = not hasattr(self, 'position_key') - if emulate_enter: - self.enter(None) - try: + with self.at_position(None): emulated_pbc_calls = self.emulated_pbc_calls prev = [unique_key] prev.extend(replace) @@ -570,9 +556,6 @@ else: emulated = callback return self.pbc_call(pbc, args, emulated=emulated) - finally: - if emulate_enter: - self.leave() def _find_current_op(self, opname=None, arity=None, pos=None, s_type=None): """ Find operation that is currently being annotated. Do some diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -53,6 +53,22 @@ table.append(row) self.total_calltable_size += 1 + def find_row(self, bookkeeper, descs, args, op): + shape = rawshape(args) + with bookkeeper.at_position(None): + row = build_calltable_row(descs, args, op) + index = self.calltable_lookup_row(shape, row) + return shape, index + +def build_calltable_row(descs, args, op): + # see comments in CallFamily + row = {} + for desc in descs: + graph = desc.get_graph(args, op) + assert isinstance(graph, FunctionGraph) + row[desc.rowkey()] = graph + return row + class FrozenAttrFamily(object): """A family of FrozenDesc objects that have any common 'getattr' sites. @@ -295,22 +311,23 @@ else: return self.specializer(self, inputcells) - def pycall(self, schedule, args, s_previous_result, op=None): + def pycall(self, whence, args, s_previous_result, op=None): inputcells = self.parse_arguments(args) result = self.specialize(inputcells, op) if isinstance(result, FunctionGraph): graph = result # common case + annotator = self.bookkeeper.annotator # if that graph has a different signature, we need to re-parse # the arguments. # recreate the args object because inputcells may have been changed new_args = args.unmatch_signature(self.signature, inputcells) inputcells = self.parse_arguments(new_args, graph) - result = schedule(graph, inputcells) + result = annotator.recursivecall(graph, whence, inputcells) signature = getattr(self.pyobj, '_signature_', None) if signature: sigresult = enforce_signature_return(self, signature[1], result) if sigresult is not None: - self.bookkeeper.annotator.addpendingblock( + annotator.addpendingblock( graph, graph.returnblock, [sigresult]) result = sigresult # Some specializations may break the invariant of returning @@ -320,6 +337,10 @@ result = unionof(result, s_previous_result) return result + def get_graph(self, args, op): + inputs_s = self.parse_arguments(args) + return self.specialize(inputs_s, op) + def get_call_parameters(self, args_s): args = simple_args(args_s) inputcells = self.parse_arguments(args) @@ -347,37 +368,15 @@ @staticmethod def consider_call_site(descs, args, s_result, op): + family = descs[0].getcallfamily() shape = rawshape(args) - row = FunctionDesc.row_to_consider(descs, args, op) - family = descs[0].getcallfamily() + row = build_calltable_row(descs, args, op) family.calltable_add_row(shape, row) - - @staticmethod - def variant_for_call_site(bookkeeper, family, descs, args, op): - shape = rawshape(args) - bookkeeper.enter(None) - try: - row = FunctionDesc.row_to_consider(descs, args, op) - finally: - bookkeeper.leave() - index = family.calltable_lookup_row(shape, row) - return shape, index + descs[0].mergecallfamilies(*descs[1:]) def rowkey(self): return self - @staticmethod - def row_to_consider(descs, args, op): - # see comments in CallFamily - row = {} - for desc in descs: - def enlist(graph, ignore): - row[desc.rowkey()] = graph - return s_ImpossibleValue # meaningless - desc.pycall(enlist, args, s_ImpossibleValue, op) - assert row - return row - def get_s_signatures(self, shape): family = self.getcallfamily() table = family.calltables.get(shape) @@ -624,7 +623,7 @@ "specialization" % (self.name,)) return self.getclassdef(None) - def pycall(self, schedule, args, s_previous_result, op=None): + def pycall(self, whence, args, s_previous_result, op=None): from rpython.annotator.model import SomeInstance, SomeImpossibleValue if self.specialize: if self.specialize == 'specialize:ctr_location': @@ -777,6 +776,8 @@ @staticmethod def consider_call_site(descs, args, s_result, op): + descs[0].getcallfamily() + descs[0].mergecallfamilies(*descs[1:]) from rpython.annotator.model import SomeInstance, SomePBC, s_None if len(descs) == 1: # call to a single class, look at the result annotation @@ -890,13 +891,20 @@ def getuniquegraph(self): return self.funcdesc.getuniquegraph() - def pycall(self, schedule, args, s_previous_result, op=None): + def func_args(self, args): from rpython.annotator.model import SomeInstance if self.selfclassdef is None: raise Exception("calling %r" % (self,)) s_instance = SomeInstance(self.selfclassdef, flags=self.flags) - args = args.prepend(s_instance) - return self.funcdesc.pycall(schedule, args, s_previous_result, op) + return args.prepend(s_instance) + + def pycall(self, whence, args, s_previous_result, op=None): + func_args = self.func_args(args) + return self.funcdesc.pycall(whence, func_args, s_previous_result, op) + + def get_graph(self, args, op): + func_args = self.func_args(args) + return self.funcdesc.get_graph(func_args, op) def bind_under(self, classdef, name): self.bookkeeper.warning("rebinding an already bound %r" % (self,)) @@ -913,9 +921,10 @@ def consider_call_site(descs, args, s_result, op): cnt, keys, star = rawshape(args) shape = cnt + 1, keys, star # account for the extra 'self' - row = FunctionDesc.row_to_consider(descs, args, op) + row = build_calltable_row(descs, args, op) family = descs[0].getcallfamily() family.calltable_add_row(shape, row) + descs[0].mergecallfamilies(*descs[1:]) def rowkey(self): # we are computing call families and call tables that always contain @@ -1064,19 +1073,28 @@ return '' % (self.funcdesc, self.frozendesc) - def pycall(self, schedule, args, s_previous_result, op=None): + def func_args(self, args): from rpython.annotator.model import SomePBC s_self = SomePBC([self.frozendesc]) - args = args.prepend(s_self) - return self.funcdesc.pycall(schedule, args, s_previous_result, op) + return args.prepend(s_self) + + def pycall(self, whence, args, s_previous_result, op=None): + func_args = self.func_args(args) + return self.funcdesc.pycall(whence, func_args, s_previous_result, op) + + def get_graph(self, args, op): + func_args = self.func_args(args) + return self.funcdesc.get_graph(func_args, op) + @staticmethod def consider_call_site(descs, args, s_result, op): cnt, keys, star = rawshape(args) shape = cnt + 1, keys, star # account for the extra 'self' - row = FunctionDesc.row_to_consider(descs, args, op) + row = build_calltable_row(descs, args, op) family = descs[0].getcallfamily() family.calltable_add_row(shape, row) + descs[0].mergecallfamilies(*descs[1:]) def rowkey(self): return self.funcdesc diff --git a/rpython/annotator/specialize.py b/rpython/annotator/specialize.py --- a/rpython/annotator/specialize.py +++ b/rpython/annotator/specialize.py @@ -364,12 +364,6 @@ def specialize_argtype(funcdesc, args_s, *argindices): key = tuple([args_s[i].knowntype for i in argindices]) - for cls in key: - try: - assert '_must_specialize_' not in cls.classdesc.pyobj.__dict__, ( - "%s has the tag _must_specialize_" % (cls,)) - except AttributeError: - pass return maybe_star_args(funcdesc, key, args_s) def specialize_arglistitemtype(funcdesc, args_s, i): diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -1097,102 +1097,6 @@ assert acc1 is acc2 assert acc1.attrs.keys() == ['v1'] - def test_simple_pbc_call(self): - def f1(x,y=0): - pass - def f2(x): - pass - def f3(x): - pass - def g(f): - f(1) - def h(): - f1(1) - f1(1,2) - g(f2) - g(f3) - - a = self.RPythonAnnotator() - s = a.build_types(h, []) - - fdesc1 = a.bookkeeper.getdesc(f1) - fdesc2 = a.bookkeeper.getdesc(f2) - fdesc3 = a.bookkeeper.getdesc(f3) - - fam1 = fdesc1.getcallfamily() - fam2 = fdesc2.getcallfamily() - fam3 = fdesc3.getcallfamily() - - assert fam1 is not fam2 - assert fam1 is not fam3 - assert fam3 is fam2 - - gf1 = graphof(a, f1) - gf2 = graphof(a, f2) - gf3 = graphof(a, f3) - - assert fam1.calltables == {(2, (), False): [{fdesc1: gf1}], - (1, (), False): [{fdesc1: gf1}]} - assert fam2.calltables == {(1, (), False): [{fdesc2: gf2, fdesc3: gf3}]} - - def test_pbc_call_ins(self): - class A(object): - def m(self): - pass - class B(A): - def n(self): - pass - class C(A): - def __init__(self): - pass - def m(self): - pass - def f(x): - b = B() - c = C() - b.n() - if x: - a = b - else: - a = c - a.m() - - a = self.RPythonAnnotator() - s = a.build_types(f, [bool]) - - clsdef = a.bookkeeper.getuniqueclassdef - bookkeeper = a.bookkeeper - - def getmdesc(bmeth): - return bookkeeper.immutablevalue(bmeth).any_description() - - mdescA_m = getmdesc(A().m) - mdescC_m = getmdesc(C().m) - mdescB_n = getmdesc(B().n) - - assert mdescA_m.name == 'm' == mdescC_m.name - assert mdescB_n.name == 'n' - - famA_m = mdescA_m.getcallfamily() - famC_m = mdescC_m.getcallfamily() - famB_n = mdescB_n.getcallfamily() - - assert famA_m is famC_m - assert famB_n is not famA_m - - gfB_n = graphof(a, B.n.im_func) - gfA_m = graphof(a, A.m.im_func) - gfC_m = graphof(a, C.m.im_func) - - assert famB_n.calltables == {(1, (), False): [{mdescB_n.funcdesc: gfB_n}] } - assert famA_m.calltables == {(1, (), False): [{mdescA_m.funcdesc: gfA_m, mdescC_m.funcdesc: gfC_m }] } - - mdescCinit = getmdesc(C().__init__) - famCinit = mdescCinit.getcallfamily() - gfCinit = graphof(a, C.__init__.im_func) - - assert famCinit.calltables == {(1, (), False): [{mdescCinit.funcdesc: gfCinit}] } - def test_isinstance_unsigned_1(self): def f(x): return isinstance(x, r_uint) @@ -2153,6 +2057,7 @@ s_f = a.bookkeeper.immutablevalue(f) a.bookkeeper.emulate_pbc_call('f', s_f, [annmodel.SomeInteger(), annmodel.SomeInteger()]) a.complete() + a.simplify() assert a.binding(graphof(a, f).getreturnvar()).knowntype == int fdesc = a.bookkeeper.getdesc(f) @@ -3969,28 +3874,6 @@ e = py.test.raises(Exception, a.build_types, f, [int]) assert "field '_my_lst' was migrated" in str(e.value) - def test_call_classes_with_noarg_init(self): - class A: - foo = 21 - class B(A): - foo = 22 - class C(A): - def __init__(self): - self.foo = 42 - class D(A): - def __init__(self): - self.foo = 43 - def f(i): - if i == 1: - cls = B - elif i == 2: - cls = D - else: - cls = C - return cls().foo - a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, [int]) - def test_range_variable_step(self): def g(n): return range(0, 10, n) diff --git a/rpython/annotator/test/test_annsimplifyrpython.py b/rpython/annotator/test/test_annsimplifyrpython.py --- a/rpython/annotator/test/test_annsimplifyrpython.py +++ b/rpython/annotator/test/test_annsimplifyrpython.py @@ -1,5 +1,8 @@ -import rpython.annotator.test.test_annrpython -parent = rpython.annotator.test.test_annrpython.TestAnnotateTestCase +import py + + +from rpython.annotator.test.test_annrpython import graphof +from rpython.annotator.test.test_annrpython import TestAnnotateTestCase as parent class TestAnnotateAndSimplifyTestCase(parent): @@ -12,3 +15,122 @@ parent.RPythonAnnotator.complete(self) if self.translator is not None: self.simplify() + + def test_simple_pbc_call(self): + def f1(x,y=0): + pass + def f2(x): + pass + def f3(x): + pass + def g(f): + f(1) + def h(): + f1(1) + f1(1,2) + g(f2) + g(f3) + + a = self.RPythonAnnotator() + s = a.build_types(h, []) + + fdesc1 = a.bookkeeper.getdesc(f1) + fdesc2 = a.bookkeeper.getdesc(f2) + fdesc3 = a.bookkeeper.getdesc(f3) + From noreply at buildbot.pypy.org Fri Oct 9 13:51:38 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 9 Oct 2015 13:51:38 +0200 (CEST) Subject: [pypy-commit] pypy fortran-order: document branch Message-ID: <20151009115138.EE3E11C133C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: fortran-order Changeset: r80080:c205c030aa9c Date: 2015-10-09 14:50 +0300 http://bitbucket.org/pypy/pypy/changeset/c205c030aa9c/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -60,3 +60,7 @@ .. branch: callfamily Refactorings of annotation and rtyping of function calls. + +.. branch: fortran-order + +Allow creation of fortran-ordered ndarrays From noreply at buildbot.pypy.org Fri Oct 9 13:51:41 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 9 Oct 2015 13:51:41 +0200 (CEST) Subject: [pypy-commit] pypy fortran-order: close branch to be merged Message-ID: <20151009115141.0B4D61C133C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: fortran-order Changeset: r80081:69af1190de8f Date: 2015-10-09 14:50 +0300 http://bitbucket.org/pypy/pypy/changeset/69af1190de8f/ Log: close branch to be merged From noreply at buildbot.pypy.org Fri Oct 9 13:51:43 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 9 Oct 2015 13:51:43 +0200 (CEST) Subject: [pypy-commit] pypy default: merge fortran-order into default Message-ID: <20151009115143.A3BCC1C133C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80082:26b886602ace Date: 2015-10-09 14:51 +0300 http://bitbucket.org/pypy/pypy/changeset/26b886602ace/ Log: merge fortran-order into default diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -60,3 +60,7 @@ .. branch: callfamily Refactorings of annotation and rtyping of function calls. + +.. branch: fortran-order + +Allow creation of fortran-ordered ndarrays diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -12,6 +12,7 @@ from pypy.module.micronumpy.descriptor import get_dtype_cache, W_Dtype from pypy.module.micronumpy.concrete import ConcreteArray from pypy.module.micronumpy import ufuncs +import pypy.module.micronumpy.constants as NPY from rpython.rlib.rawstorage import RAW_STORAGE_PTR from pypy.interpreter.typedef import TypeDef from pypy.interpreter.baseobjspace import W_Root @@ -203,12 +204,12 @@ return shape, dtype def simple_new(space, nd, dims, typenum, - order='C', owning=False, w_subtype=None): + order=NPY.CORDER, owning=False, w_subtype=None): shape, dtype = get_shape_and_dtype(space, nd, dims, typenum) return W_NDimArray.from_shape(space, shape, dtype) def simple_new_from_data(space, nd, dims, typenum, data, - order='C', owning=False, w_subtype=None): + order=NPY.CORDER, owning=False, w_subtype=None): shape, dtype = get_shape_and_dtype(space, nd, dims, typenum) storage = rffi.cast(RAW_STORAGE_PTR, data) return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, @@ -238,7 +239,7 @@ raise OperationError(space.w_NotImplementedError, space.wrap("strides must be NULL")) - order = 'C' if flags & NPY_C_CONTIGUOUS else 'F' + order = NPY.CORDER if flags & NPY_C_CONTIGUOUS else NPY.FORTRANORDER owning = True if flags & NPY_OWNDATA else False w_subtype = None diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -4,16 +4,17 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.micronumpy.ndarray import W_NDimArray from pypy.module.micronumpy.descriptor import get_dtype_cache +import pypy.module.micronumpy.constants as NPY def scalar(space): dtype = get_dtype_cache(space).w_float64dtype return W_NDimArray.new_scalar(space, dtype, space.wrap(10.)) -def array(space, shape, order='C'): +def array(space, shape, order=NPY.CORDER): dtype = get_dtype_cache(space).w_float64dtype return W_NDimArray.from_shape(space, shape, dtype, order=order) -def iarray(space, shape, order='C'): +def iarray(space, shape, order=NPY.CORDER): dtype = get_dtype_cache(space).w_int64dtype return W_NDimArray.from_shape(space, shape, dtype, order=order) @@ -32,8 +33,8 @@ def test_FLAGS(self, space, api): s = array(space, [10]) - c = array(space, [10, 5, 3], order='C') - f = array(space, [10, 5, 3], order='F') + c = array(space, [10, 5, 3], order=NPY.CORDER) + f = array(space, [10, 5, 3], order=NPY.FORTRANORDER) assert api._PyArray_FLAGS(s) & 0x0001 assert api._PyArray_FLAGS(s) & 0x0002 assert api._PyArray_FLAGS(c) & 0x0001 diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -108,7 +108,8 @@ w_axis = space.wrap(0) if space.is_none(w_axis): args_w = [w_arg.reshape(space, - space.newlist([w_arg.descr_get_size(space)])) + space.newlist([w_arg.descr_get_size(space)]), + w_arg.get_order()) for w_arg in args_w] w_axis = space.wrap(0) dtype = args_w[0].get_dtype() @@ -140,7 +141,7 @@ dtype = find_result_type(space, args_w, []) # concatenate does not handle ndarray subtypes, it always returns a ndarray - res = W_NDimArray.from_shape(space, shape, dtype, 'C') + res = W_NDimArray.from_shape(space, shape, dtype, NPY.CORDER) chunks = [Chunk(0, i, 1, i) for i in shape] axis_start = 0 for arr in args_w: diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -38,7 +38,8 @@ self.implementation = implementation @staticmethod - def from_shape(space, shape, dtype, order='C', w_instance=None, zero=True): + def from_shape(space, shape, dtype, order=NPY.CORDER, + w_instance=None, zero=True): from pypy.module.micronumpy import concrete, descriptor, boxes from pypy.module.micronumpy.strides import calc_strides if len(shape) > NPY.MAXDIMS: @@ -59,8 +60,9 @@ @staticmethod def from_shape_and_storage(space, shape, storage, dtype, storage_bytes=-1, - order='C', owning=False, w_subtype=None, - w_base=None, writable=True, strides=None, start=0): + order=NPY.CORDER, owning=False, w_subtype=None, + w_base=None, writable=True, strides=None, + start=0): from pypy.module.micronumpy import concrete from pypy.module.micronumpy.strides import (calc_strides, calc_backstrides) diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -56,6 +56,9 @@ jit.hint(len(backstrides), promote=True) return backstrides + def get_flags(self): + return self.flags + def getitem(self, index): return self.dtype.read(self, index, 0) @@ -89,17 +92,18 @@ def get_storage_size(self): return self.size - def reshape(self, orig_array, new_shape): + def reshape(self, orig_array, new_shape, order=NPY.ANYORDER): # Since we got to here, prod(new_shape) == self.size + order = support.get_order_as_CF(self.order, order) new_strides = None if self.size == 0: - new_strides, _ = calc_strides(new_shape, self.dtype, self.order) + new_strides, _ = calc_strides(new_shape, self.dtype, order) else: if len(self.get_shape()) == 0: new_strides = [self.dtype.elsize] * len(new_shape) else: new_strides = calc_new_strides(new_shape, self.get_shape(), - self.get_strides(), self.order) + self.get_strides(), order) if new_strides is None or len(new_strides) != len(new_shape): return None if new_strides is not None: @@ -303,10 +307,11 @@ return SliceArray(self.start, strides, backstrides, shape, self, orig_array) - def copy(self, space): + def copy(self, space, order=NPY.ANYORDER): + order = support.get_order_as_CF(self.order, order) strides, backstrides = calc_strides(self.get_shape(), self.dtype, - self.order) - impl = ConcreteArray(self.get_shape(), self.dtype, self.order, strides, + order) + impl = ConcreteArray(self.get_shape(), self.dtype, order, strides, backstrides) return loop.setslice(space, self.get_shape(), impl, self) @@ -360,12 +365,12 @@ # but make the array storage contiguous in memory shape = self.get_shape() strides = self.get_strides() - if order not in ('C', 'F'): - raise oefmt(space.w_ValueError, "Unknown order %s in astype", order) + if order not in (NPY.KEEPORDER, NPY.FORTRANORDER, NPY.CORDER): + raise oefmt(space.w_ValueError, "Unknown order %d in astype", order) if len(strides) == 0: t_strides = [] backstrides = [] - elif order != self.order: + elif order in (NPY.FORTRANORDER, NPY.CORDER): t_strides, backstrides = calc_strides(shape, dtype, order) else: indx_array = range(len(strides)) @@ -378,6 +383,7 @@ t_strides[i] = base base *= shape[i] backstrides = calc_backstrides(t_strides, shape) + order = support.get_order_as_CF(self.order, order) impl = ConcreteArray(shape, dtype, order, t_strides, backstrides) loop.setslice(space, impl.get_shape(), impl, self) return impl @@ -429,6 +435,8 @@ self.shape = shape # already tested for overflow in from_shape_and_storage self.size = support.product(shape) * dtype.elsize + if order not in (NPY.CORDER, NPY.FORTRANORDER): + raise oefmt(dtype.itemtype.space.w_ValueError, "ConcreteArrayNotOwning but order is not 0,1 rather %d", order) self.order = order self.dtype = dtype self.strides = strides @@ -562,6 +570,8 @@ self.parent = parent self.storage = parent.storage self.gcstruct = parent.gcstruct + if parent.order not in (NPY.CORDER, NPY.FORTRANORDER): + raise oefmt(dtype.itemtype.space.w_ValueError, "SliceArray but parent order is not 0,1 rather %d", parent.order) self.order = parent.order self.dtype = dtype try: @@ -602,13 +612,13 @@ s = self.get_strides()[0] // dtype.elsize except IndexError: s = 1 - if self.order == 'C': + if self.order != NPY.FORTRANORDER: new_shape.reverse() for sh in new_shape: strides.append(s * dtype.elsize) backstrides.append(s * (sh - 1) * dtype.elsize) s *= max(1, sh) - if self.order == 'C': + if self.order != NPY.FORTRANORDER: strides.reverse() backstrides.reverse() new_shape.reverse() diff --git a/pypy/module/micronumpy/converters.py b/pypy/module/micronumpy/converters.py --- a/pypy/module/micronumpy/converters.py +++ b/pypy/module/micronumpy/converters.py @@ -77,9 +77,8 @@ elif order.startswith('K') or order.startswith('k'): return NPY.KEEPORDER else: - raise OperationError(space.w_TypeError, space.wrap( - "order not understood")) - + raise oefmt(space.w_TypeError, "Unknown order: '%s'", order) + return -1 def multi_axis_converter(space, w_axis, ndim): if space.is_none(w_axis): diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -5,10 +5,10 @@ from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.micronumpy import descriptor, loop, support -from pypy.module.micronumpy.base import ( +from pypy.module.micronumpy.base import (wrap_impl, W_NDimArray, convert_to_array, W_NumpyObject) -from pypy.module.micronumpy.converters import shape_converter -from . import constants as NPY +from pypy.module.micronumpy.converters import shape_converter, order_converter +import pypy.module.micronumpy.constants as NPY from .casting import scalar2dtype @@ -101,13 +101,8 @@ dtype = descriptor.decode_w_dtype(space, w_dtype) if space.is_none(w_order): - order = 'C' - else: - order = space.str_w(w_order) - if order == 'K': - order = 'C' - if order != 'C': # or order != 'F': - raise oefmt(space.w_ValueError, "Unknown order: %s", order) + w_order = space.wrap('C') + npy_order = order_converter(space, w_order, NPY.CORDER) if isinstance(w_object, W_NDimArray): if (dtype is None or w_object.get_dtype() is dtype): @@ -126,7 +121,7 @@ copy = True if copy: shape = w_object.get_shape() - w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) + w_arr = W_NDimArray.from_shape(space, shape, dtype, order=npy_order) if support.product(shape) == 1: w_arr.set_scalar_value(dtype.coerce(space, w_object.implementation.getitem(0))) @@ -151,7 +146,7 @@ if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): dtype = find_dtype_for_seq(space, elems_w, dtype) - w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) + w_arr = W_NDimArray.from_shape(space, shape, dtype, order=npy_order) if support.product(shape) == 1: # safe from overflow since from_shape checks w_arr.set_scalar_value(dtype.coerce(space, elems_w[0])) else: @@ -268,6 +263,7 @@ def _zeros_or_empty(space, w_shape, w_dtype, w_order, zero): + order = order_converter(space, w_order, NPY.CORDER) dtype = space.interp_w(descriptor.W_Dtype, space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) if dtype.is_str_or_unicode() and dtype.elsize < 1: @@ -281,7 +277,7 @@ support.product_check(shape) except OverflowError: raise oefmt(space.w_ValueError, "array is too big.") - return W_NDimArray.from_shape(space, shape, dtype=dtype, zero=zero) + return W_NDimArray.from_shape(space, shape, dtype, order, zero=zero) def empty(space, w_shape, w_dtype=None, w_order=None): return _zeros_or_empty(space, w_shape, w_dtype, w_order, zero=False) @@ -293,6 +289,7 @@ @unwrap_spec(subok=bool) def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): w_a = convert_to_array(space, w_a) + npy_order = order_converter(space, w_order, w_a.get_order()) if space.is_none(w_dtype): dtype = w_a.get_dtype() else: @@ -300,7 +297,16 @@ space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = descriptor.variable_dtype(space, dtype.char + '1') + if npy_order in (NPY.KEEPORDER, NPY.ANYORDER): + # Try to copy the stride pattern + impl = w_a.implementation.astype(space, dtype, NPY.KEEPORDER) + if subok: + w_type = space.type(w_a) + else: + w_type = None + return wrap_impl(space, w_type, w_a, impl) return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, + order=npy_order, w_instance=w_a if subok else None, zero=False) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -680,7 +680,7 @@ def tostring(space, arr): builder = StringBuilder() iter, state = arr.create_iter() - w_res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype(), order='C') + w_res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype()) itemsize = arr.get_dtype().elsize with w_res_str.implementation as storage: res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -97,11 +97,15 @@ self.fill(space, self.get_dtype().coerce(space, w_value)) def descr_tostring(self, space, w_order=None): - order = order_converter(space, w_order, NPY.CORDER) - if order == NPY.FORTRANORDER: - raise OperationError(space.w_NotImplementedError, space.wrap( - "unsupported value for order")) - return space.wrap(loop.tostring(space, self)) + try: + order = order_converter(space, w_order, NPY.CORDER) + except: + raise oefmt(space.w_TypeError, "order not understood") + order = support.get_order_as_CF(self.get_order(), order) + arr = self + if order != arr.get_order(): + arr = W_NDimArray(self.implementation.transpose(self, None)) + return space.wrap(loop.tostring(space, arr)) def getitem_filter(self, space, arr): if arr.ndims() > 1 and arr.get_shape() != self.get_shape(): @@ -365,11 +369,13 @@ return self.implementation.getitem(self.implementation.start) def descr_copy(self, space, w_order=None): - order = order_converter(space, w_order, NPY.KEEPORDER) - if order == NPY.FORTRANORDER: - raise OperationError(space.w_NotImplementedError, space.wrap( - "unsupported value for order")) - copy = self.implementation.copy(space) + if w_order is None: + order = NPY.KEEPORDER + elif space.isinstance_w(w_order, space.w_int): + order = space.int_w(w_order) + else: + order = order_converter(space, w_order, NPY.KEEPORDER) + copy = self.implementation.copy(space, order) w_subtype = space.type(self) return wrap_impl(space, w_subtype, self, copy) @@ -392,15 +398,15 @@ 'array does not have imaginary part to set') self.implementation.set_imag(space, self, w_value) - def reshape(self, space, w_shape): + def reshape(self, space, w_shape, order): new_shape = get_shape_from_iterable(space, self.get_size(), w_shape) - new_impl = self.implementation.reshape(self, new_shape) + new_impl = self.implementation.reshape(self, new_shape, order) if new_impl is not None: return wrap_impl(space, space.type(self), self, new_impl) # Create copy with contiguous data - arr = self.descr_copy(space) + arr = self.descr_copy(space, space.wrap(order)) if arr.get_size() > 0: - new_implementation = arr.implementation.reshape(self, new_shape) + new_implementation = arr.implementation.reshape(self, new_shape, order) if new_implementation is None: raise oefmt(space.w_ValueError, 'could not reshape array of size %d to shape %s', @@ -434,16 +440,13 @@ if order == NPY.KEEPORDER: raise OperationError(space.w_ValueError, space.wrap( "order 'K' is not permitted for reshaping")) - if order != NPY.CORDER and order != NPY.ANYORDER: - raise OperationError(space.w_NotImplementedError, space.wrap( - "unsupported value for order")) if len(args_w) == 1: if space.is_none(args_w[0]): return self.descr_view(space) w_shape = args_w[0] else: w_shape = space.newtuple(args_w) - return self.reshape(space, w_shape) + return self.reshape(space, w_shape, order) def descr_get_transpose(self, space, axes=None): return W_NDimArray(self.implementation.transpose(self, axes)) @@ -514,20 +517,8 @@ return space.newlist(l_w) def descr_ravel(self, space, w_order=None): - if space.is_none(w_order): - order = 'C' - else: - order = space.str_w(w_order) - if order == 'K' and is_c_contiguous(self.implementation): - for s in self.implementation.get_strides(): - if s < 0: - break - else: - order = 'C' - if order != 'C': - raise OperationError(space.w_NotImplementedError, space.wrap( - "order != 'C' only partially implemented")) - return self.reshape(space, space.wrap(-1)) + order = order_converter(space, w_order, self.get_order()) + return self.reshape(space, space.wrap(-1), order) @unwrap_spec(w_axis=WrappedDefault(None), w_out=WrappedDefault(None), @@ -541,14 +532,15 @@ space.wrap("axis unsupported for compress")) arr = self else: - arr = self.reshape(space, space.wrap(-1)) + arr = self.reshape(space, space.wrap(-1), self.get_order()) index = convert_to_array(space, w_obj) return arr.getitem_filter(space, index) def descr_flatten(self, space, w_order=None): + order = order_converter(space, w_order, self.get_order()) if self.is_scalar(): # scalars have no storage - return self.reshape(space, space.wrap(1)) + return self.reshape(space, space.wrap(1), order) w_res = self.descr_ravel(space, w_order) if w_res.implementation.storage == self.implementation.storage: return w_res.descr_copy(space) @@ -631,7 +623,7 @@ space.newtuple([space.wrap(addr), space.w_False])) space.setitem_str(w_d, 'shape', self.descr_get_shape(space)) space.setitem_str(w_d, 'typestr', self.get_dtype().descr_get_str(space)) - if self.implementation.order == 'C': + if self.implementation.order == NPY.CORDER: # Array is contiguous, no strides in the interface. strides = space.w_None else: @@ -690,8 +682,9 @@ "according to the rule %s", space.str_w(self.get_dtype().descr_repr(space)), space.str_w(new_dtype.descr_repr(space)), casting) - order = support.get_order_as_CF(self.get_order(), order) - if (not copy and new_dtype == self.get_dtype() and order == self.get_order() + order = order_converter(space, space.wrap(order), self.get_order()) + if (not copy and new_dtype == self.get_dtype() + and (order in (NPY.KEEPORDER, NPY.ANYORDER) or order == self.get_order()) and (subok or type(self) is W_NDimArray)): return self impl = self.implementation @@ -972,7 +965,7 @@ raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) # Adapt the smallest dim to the new itemsize - if self.get_order() == 'F': + if self.get_order() == NPY.FORTRANORDER: minstride = strides[0] mini = 0 else: @@ -1136,7 +1129,7 @@ matches = True if dtype != out.get_dtype(): matches = False - elif not out.implementation.order == "C": + elif not out.implementation.order == NPY.CORDER: matches = False elif out.ndims() != len(out_shape): matches = False @@ -1195,7 +1188,7 @@ out = out_converter(space, w_out) if space.is_none(w_axis): w_axis = space.wrap(0) - arr = self.reshape(space, space.wrap(-1)) + arr = self.reshape(space, space.wrap(-1), self.get_order()) else: arr = self ufunc = getattr(ufuncs.get(space), ufunc_name) @@ -1408,10 +1401,6 @@ strides=strides) order = order_converter(space, w_order, NPY.CORDER) - if order == NPY.CORDER: - order = 'C' - else: - order = 'F' if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): return W_NDimArray.from_shape(space, shape, dtype, order) strides, backstrides = calc_strides(shape, dtype.base, order) @@ -1448,7 +1437,7 @@ raise OperationError(space.w_ValueError, space.wrap( "subtype must be a subtype of ndarray, not a class instance")) return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, - buf_len, 'C', False, w_subtype, + buf_len, NPY.CORDER, False, w_subtype, strides=strides) else: return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -11,6 +11,8 @@ shape_agreement, shape_agreement_multiple) from pypy.module.micronumpy.casting import (find_binop_result_dtype, can_cast_array, can_cast_type) +import pypy.module.micronumpy.constants as NPY +from pypy.module.micronumpy.converters import order_converter def parse_op_arg(space, name, w_op_flags, n, parse_one_arg): @@ -142,14 +144,13 @@ 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' 'multi-index is being tracked') - -def is_backward(imp, order): - if order == 'K' or (order == 'C' and imp.order == 'C'): +def is_backward(imp_order, order): + if imp_order == order: return False - elif order == 'F' and imp.order == 'C': + if order == NPY.KEEPORDER: + return False + else: return True - else: - raise NotImplementedError('not implemented yet') class OperandIter(ArrayIter): @@ -234,7 +235,7 @@ continue assert isinstance(op_it, ArrayIter) indx = len(op_it.strides) - if it.order == 'F': + if it.order == NPY.FORTRANORDER: indx = len(op_it.array.strides) - indx assert indx >=0 astrides = op_it.array.strides[indx:] @@ -250,7 +251,7 @@ it.order) it.iters[i] = (new_iter, new_iter.reset()) if len(it.shape) > 1: - if it.order == 'F': + if it.order == NPY.FORTRANORDER: it.shape = it.shape[1:] else: it.shape = it.shape[:-1] @@ -261,10 +262,10 @@ break # Always coalesce at least one for i in range(len(it.iters)): - new_iter = coalesce_iter(it.iters[i][0], it.op_flags[i], it, 'C') + new_iter = coalesce_iter(it.iters[i][0], it.op_flags[i], it, NPY.CORDER) it.iters[i] = (new_iter, new_iter.reset()) if len(it.shape) > 1: - if it.order == 'F': + if it.order == NPY.FORTRANORDER: it.shape = it.shape[1:] else: it.shape = it.shape[:-1] @@ -287,7 +288,7 @@ return old_iter strides = old_iter.strides backstrides = old_iter.backstrides - if order == 'F': + if order == NPY.FORTRANORDER: new_shape = shape[1:] new_strides = strides[1:] new_backstrides = backstrides[1:] @@ -346,8 +347,8 @@ class W_NDIter(W_NumpyObject): _immutable_fields_ = ['ndim', ] def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, - w_casting, w_op_axes, w_itershape, buffersize=0, order='K'): - self.order = order + w_casting, w_op_axes, w_itershape, buffersize=0, + order=NPY.KEEPORDER): self.external_loop = False self.buffered = False self.tracked_index = '' @@ -375,7 +376,25 @@ for w_elem in w_seq_as_list] else: self.seq = [convert_to_array(space, w_seq)] - + if order == NPY.ANYORDER: + # 'A' means "'F' order if all the arrays are Fortran contiguous, + # 'C' order otherwise" + order = NPY.CORDER + for s in self.seq: + if s and not(s.get_flags() & NPY.ARRAY_F_CONTIGUOUS): + break + else: + order = NPY.FORTRANORDER + elif order == NPY.KEEPORDER: + # 'K' means "as close to the order the array elements appear in + # memory as possible", so match self.order to seq.order + order = NPY.CORDER + for s in self.seq: + if s and not(s.get_order() == NPY.FORTRANORDER): + break + else: + order = NPY.FORTRANORDER + self.order = order parse_func_flags(space, self, w_flags) self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, len(self.seq), parse_op_flag) @@ -439,12 +458,15 @@ str(self.shape)) if self.tracked_index != "": - if self.order == "K": - self.order = self.seq[0].implementation.order + order = self.order + if order == NPY.KEEPORDER: + order = self.seq[0].implementation.order if self.tracked_index == "multi": backward = False else: - backward = self.order != self.tracked_index + backward = (( + order == NPY.CORDER and self.tracked_index != 'C') or ( + order == NPY.FORTRANORDER and self.tracked_index != 'F')) self.index_iter = IndexIterator(self.shape, backward=backward) # handle w_op_dtypes part 2: copy where needed if possible @@ -456,7 +478,6 @@ self.dtypes[i] = seq_d elif self_d != seq_d: impl = self.seq[i].implementation - order = support.get_order_as_CF(impl.order, self.order) if self.buffered or 'r' in self.op_flags[i].tmp_copy: if not can_cast_array( space, self.seq[i], self_d, self.casting): @@ -466,7 +487,7 @@ space.str_w(seq_d.descr_repr(space)), space.str_w(self_d.descr_repr(space)), self.casting) - + order = support.get_order_as_CF(impl.order, self.order) new_impl = impl.astype(space, self_d, order).copy(space) self.seq[i] = W_NDimArray(new_impl) else: @@ -484,7 +505,7 @@ space.str_w(self_d.descr_repr(space)), space.str_w(seq_d.descr_repr(space)), i, self.casting) - elif self.buffered: + elif self.buffered and not (self.external_loop and len(self.seq)<2): for i in range(len(self.seq)): if i not in outargs: self.seq[i] = self.seq[i].descr_copy(space, @@ -506,12 +527,19 @@ def get_iter(self, space, i): arr = self.seq[i] - dtype = self.dtypes[i] - shape = self.shape imp = arr.implementation - backward = is_backward(imp, self.order) if arr.is_scalar(): return ConcreteIter(imp, 1, [], [], [], self.op_flags[i], self) + shape = self.shape + if (self.external_loop and len(self.seq)<2 and self.buffered): + # Special case, always return a memory-ordered iterator + stride = imp.dtype.elsize + backstride = imp.size * stride - stride + return ConcreteIter(imp, imp.get_size(), + [support.product(shape)], [stride], [backstride], + self.op_flags[i], self) + backward = imp.order != self.order + # XXX cleanup needed if (abs(imp.strides[0]) < abs(imp.strides[-1]) and not backward) or \ (abs(imp.strides[0]) > abs(imp.strides[-1]) and backward): # flip the strides. Is this always true for multidimension? @@ -704,13 +732,15 @@ @unwrap_spec(w_flags=WrappedDefault(None), w_op_flags=WrappedDefault(None), - w_op_dtypes=WrappedDefault(None), order=str, + w_op_dtypes=WrappedDefault(None), w_order=WrappedDefault(None), w_casting=WrappedDefault(None), w_op_axes=WrappedDefault(None), - w_itershape=WrappedDefault(None), buffersize=int) + w_itershape=WrappedDefault(None), w_buffersize=WrappedDefault(0)) def descr_new_nditer(space, w_subtype, w_seq, w_flags, w_op_flags, w_op_dtypes, - w_casting, w_op_axes, w_itershape, buffersize=0, order='K'): + w_casting, w_op_axes, w_itershape, w_buffersize, w_order): + npy_order = order_converter(space, w_order, NPY.KEEPORDER) + buffersize = space.int_w(w_buffersize) return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, - w_itershape, buffersize, order) + w_itershape, buffersize, npy_order) W_NDIter.typedef = TypeDef('numpy.nditer', __new__ = interp2app(descr_new_nditer), diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -310,14 +310,14 @@ backstrides = [] s = 1 shape_rev = shape[:] - if order == 'C': + if order in [NPY.CORDER, NPY.ANYORDER]: shape_rev.reverse() for sh in shape_rev: slimit = max(sh, 1) strides.append(s * dtype.elsize) backstrides.append(s * (slimit - 1) * dtype.elsize) s *= slimit - if order == 'C': + if order in [NPY.CORDER, NPY.ANYORDER]: strides.reverse() backstrides.reverse() return strides, backstrides @@ -345,7 +345,7 @@ last_step = 1 oldI = 0 new_strides = [] - if order == 'F': + if order == NPY.FORTRANORDER: for i in range(len(old_shape)): steps.append(old_strides[i] / last_step) last_step *= old_shape[i] @@ -365,7 +365,7 @@ if oldI < len(old_shape): cur_step = steps[oldI] n_old_elems_to_use *= old_shape[oldI] - elif order == 'C': + else: for i in range(len(old_shape) - 1, -1, -1): steps.insert(0, old_strides[i] / last_step) last_step *= old_shape[i] diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -7,6 +7,7 @@ from pypy.interpreter.typedef import GetSetProperty from pypy.objspace.std.typeobject import W_TypeObject from pypy.objspace.std.objspace import StdObjSpace +from pypy.module.micronumpy import constants as NPY def issequence_w(space, w_obj): from pypy.module.micronumpy.base import W_NDimArray @@ -176,15 +177,11 @@ return space.is_true(space.gt(w_priority_r, w_priority_l)) def get_order_as_CF(proto_order, req_order): - if req_order == 'C': - return 'C' - elif req_order == 'F': - return 'F' - elif req_order == 'K': - return proto_order - elif req_order == 'A': - return proto_order - + if req_order == NPY.CORDER: + return NPY.CORDER + elif req_order == NPY.FORTRANORDER: + return NPY.FORTRANORDER + return proto_order def descr_set_docstring(space, w_obj, w_docstring): if not isinstance(space, StdObjSpace): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -6,6 +6,7 @@ from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.strides import Chunk, new_view, EllipsisChunk from pypy.module.micronumpy.ndarray import W_NDimArray +import pypy.module.micronumpy.constants as NPY from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest @@ -45,20 +46,20 @@ return self.space.newtuple(args_w) def test_strides_f(self): - a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + a = create_array(self.space, [10, 5, 3], MockDtype(), order=NPY.FORTRANORDER) assert a.strides == [1, 10, 50] assert a.backstrides == [9, 40, 100] def test_strides_c(self): - a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + a = create_array(self.space, [10, 5, 3], MockDtype(), order=NPY.CORDER) assert a.strides == [15, 3, 1] assert a.backstrides == [135, 12, 2] - a = create_array(self.space, [1, 0, 7], MockDtype(), order='C') + a = create_array(self.space, [1, 0, 7], MockDtype(), order=NPY.CORDER) assert a.strides == [7, 7, 1] assert a.backstrides == [0, 0, 6] def test_create_slice_f(self): - a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + a = create_array(self.space, [10, 5, 3], MockDtype(), order=NPY.FORTRANORDER) s = create_slice(self.space, a, [Chunk(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] @@ -77,7 +78,7 @@ assert s.shape == [10, 3] def test_create_slice_c(self): - a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + a = create_array(self.space, [10, 5, 3], MockDtype(), order=NPY.CORDER) s = create_slice(self.space, a, [Chunk(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] @@ -97,7 +98,7 @@ assert s.shape == [10, 3] def test_slice_of_slice_f(self): - a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + a = create_array(self.space, [10, 5, 3], MockDtype(), order=NPY.FORTRANORDER) s = create_slice(self.space, a, [Chunk(5, 0, 0, 1)]) assert s.start == 5 s2 = create_slice(self.space, s, [Chunk(3, 0, 0, 1)]) @@ -114,7 +115,7 @@ assert s2.start == 1 * 15 + 2 * 3 def test_slice_of_slice_c(self): - a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + a = create_array(self.space, [10, 5, 3], MockDtype(), order=NPY.CORDER) s = create_slice(self.space, a, [Chunk(5, 0, 0, 1)]) assert s.start == 15 * 5 s2 = create_slice(self.space, s, [Chunk(3, 0, 0, 1)]) @@ -131,14 +132,14 @@ assert s2.start == 1 * 15 + 2 * 3 def test_negative_step_f(self): - a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + a = create_array(self.space, [10, 5, 3], MockDtype(), order=NPY.FORTRANORDER) s = create_slice(self.space, a, [Chunk(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] assert s.backstrides == [-8, 40, 100] def test_negative_step_c(self): - a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + a = create_array(self.space, [10, 5, 3], MockDtype(), order=NPY.CORDER) s = create_slice(self.space, a, [Chunk(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] @@ -155,17 +156,17 @@ def test_calc_new_strides(self): from pypy.module.micronumpy.strides import calc_new_strides - assert calc_new_strides([2, 4], [4, 2], [4, 2], "C") == [8, 2] - assert calc_new_strides([2, 4, 3], [8, 3], [1, 16], 'F') == [1, 2, 16] - assert calc_new_strides([2, 3, 4], [8, 3], [1, 16], 'F') is None - assert calc_new_strides([24], [2, 4, 3], [48, 6, 1], 'C') is None - assert calc_new_strides([24], [2, 4, 3], [24, 6, 2], 'C') == [2] - assert calc_new_strides([105, 1], [3, 5, 7], [35, 7, 1],'C') == [1, 1] - assert calc_new_strides([1, 105], [3, 5, 7], [35, 7, 1],'C') == [105, 1] - assert calc_new_strides([1, 105], [3, 5, 7], [35, 7, 1],'F') is None - assert calc_new_strides([1, 1, 1, 105, 1], [15, 7], [7, 1],'C') == \ + assert calc_new_strides([2, 4], [4, 2], [4, 2], NPY.CORDER) == [8, 2] + assert calc_new_strides([2, 4, 3], [8, 3], [1, 16], NPY.FORTRANORDER) == [1, 2, 16] + assert calc_new_strides([2, 3, 4], [8, 3], [1, 16], NPY.FORTRANORDER) is None + assert calc_new_strides([24], [2, 4, 3], [48, 6, 1], NPY.CORDER) is None + assert calc_new_strides([24], [2, 4, 3], [24, 6, 2], NPY.CORDER) == [2] + assert calc_new_strides([105, 1], [3, 5, 7], [35, 7, 1],NPY.CORDER) == [1, 1] + assert calc_new_strides([1, 105], [3, 5, 7], [35, 7, 1],NPY.CORDER) == [105, 1] + assert calc_new_strides([1, 105], [3, 5, 7], [35, 7, 1],NPY.FORTRANORDER) is None + assert calc_new_strides([1, 1, 1, 105, 1], [15, 7], [7, 1],NPY.CORDER) == \ [105, 105, 105, 1, 1] - assert calc_new_strides([1, 1, 105, 1, 1], [7, 15], [1, 7],'F') == \ + assert calc_new_strides([1, 1, 105, 1, 1], [7, 15], [1, 7],NPY.FORTRANORDER) == \ [1, 1, 1, 105, 105] def test_find_shape(self): @@ -444,6 +445,8 @@ b = np.empty_like(A((2, 3)), subok=False) assert b.shape == (2, 3) assert type(b) is np.ndarray + b = np.empty_like(np.array(3.0), order='A') + assert type(b) is np.ndarray def test_size(self): from numpy import array,arange,cos @@ -534,10 +537,10 @@ assert (b == a).all() b = a.copy(order='A') assert (b == a).all() - import sys - if '__pypy__' in sys.builtin_module_names: - raises(NotImplementedError, a.copy, order='F') - raises(NotImplementedError, a.copy, order=True) + b = a.copy(order='F') + assert (b == a).all() + b = a.copy(order=True) + assert (b == a).all() def test_iterator_init(self): from numpy import array @@ -918,9 +921,11 @@ assert a.reshape((0,), order='A').shape == (0,) raises(TypeError, a.reshape, (0,), badarg="C") raises(ValueError, a.reshape, (0,), order="K") - import sys - if '__pypy__' in sys.builtin_module_names: - raises(NotImplementedError, a.reshape, (0,), order='F') + b = a.reshape((0,), order='F') + assert b.shape == (0,) + a = array(range(24), 'uint8') + assert a.reshape([2, 3, 4], order=True).strides ==(1, 2, 6) + assert a.reshape([2, 3, 4], order=False).strides ==(12, 4, 1) def test_slice_reshape(self): from numpy import zeros, arange @@ -2676,11 +2681,11 @@ assert a[1][2][1] == 15 def test_create_order(self): - import sys, numpy as np + import numpy as np for order in [False, True, 'C', 'F']: a = np.empty((2, 3), float, order=order) assert a.shape == (2, 3) - if order in [True, 'F'] and '__pypy__' not in sys.builtin_module_names: + if order in [True, 'F']: assert a.flags['F'] assert not a.flags['C'] else: @@ -3577,10 +3582,7 @@ assert a.tostring(order) == '\x01\x02\x03\x04' import sys for order in (True, 'F'): - if '__pypy__' in sys.builtin_module_names: - raises(NotImplementedError, a.tostring, order) - else: - assert a.tostring(order) == '\x01\x03\x02\x04' + assert a.tostring(order) == '\x01\x03\x02\x04' assert array(2.2-1.1j, dtype='>c16').tostring() == \ '@\x01\x99\x99\x99\x99\x99\x9a\xbf\xf1\x99\x99\x99\x99\x99\x9a' assert array(2.2-1.1j, dtype=' Author: Armin Rigo Branch: ppc-updated-backend Changeset: r80083:69f522aef55b Date: 2015-10-09 15:01 +0200 http://bitbucket.org/pypy/pypy/changeset/69f522aef55b/ Log: PPC Backend #7: PyPy Translation All tests pass, and a full PyPy translation works. Some hard gdb- ing but not too bad. diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -913,7 +913,7 @@ descr.adr_jump_offset = failure_recovery_pos relative_offset = tok.pos_recovery_stub - tok.offset guard_pos = block_start + tok.offset - if not tok.is_guard_not_invalidated: + if not tok.guard_not_invalidated(): # patch the guard jump to the stub # overwrite the generate NOP with a B_offs to the pos of the # stub diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -36,11 +36,9 @@ class ArmGuardToken(GuardToken): def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, - offset, exc, frame_depth, is_guard_not_invalidated=False, - is_guard_not_forced=False, fcond=c.AL): + offset, guard_opnum, frame_depth, fcond=c.AL): GuardToken.__init__(self, cpu, gcmap, faildescr, failargs, fail_locs, - exc, frame_depth, is_guard_not_invalidated, - is_guard_not_forced) + guard_opnum, frame_depth) self.fcond = fcond self.offset = offset @@ -175,10 +173,7 @@ self.mc.RSB_ri(resloc.value, l0.value, imm=0) return fcond - def build_guard_token(self, op, frame_depth, arglocs, offset, fcond, save_exc, - is_guard_not_invalidated=False, - is_guard_not_forced=False): - assert isinstance(save_exc, bool) + def build_guard_token(self, op, frame_depth, arglocs, offset, fcond): assert isinstance(fcond, int) descr = op.getdescr() assert isinstance(descr, AbstractFailDescr) @@ -189,16 +184,12 @@ failargs=op.getfailargs(), fail_locs=arglocs, offset=offset, - exc=save_exc, + guard_opnum=op.getopnum(), frame_depth=frame_depth, - is_guard_not_invalidated=is_guard_not_invalidated, - is_guard_not_forced=is_guard_not_forced, fcond=fcond) return token - def _emit_guard(self, op, arglocs, save_exc, - is_guard_not_invalidated=False, - is_guard_not_forced=False): + def _emit_guard(self, op, arglocs, is_guard_not_invalidated=False): if is_guard_not_invalidated: fcond = c.cond_none else: @@ -206,10 +197,9 @@ self.guard_success_cc = c.cond_none assert fcond != c.cond_none pos = self.mc.currpos() - token = self.build_guard_token(op, arglocs[0].value, arglocs[1:], pos, fcond, save_exc, - is_guard_not_invalidated, - is_guard_not_forced) + token = self.build_guard_token(op, arglocs[0].value, arglocs[1:], pos, fcond) self.pending_guards.append(token) + assert token.guard_not_invalidated() == is_guard_not_invalidated # For all guards that are not GUARD_NOT_INVALIDATED we emit a # breakpoint to ensure the location is patched correctly. In the case # of GUARD_NOT_INVALIDATED we use just a NOP, because it is only @@ -221,12 +211,12 @@ return c.AL def emit_op_guard_true(self, op, arglocs, regalloc, fcond): - fcond = self._emit_guard(op, arglocs, save_exc=False) + fcond = self._emit_guard(op, arglocs) return fcond def emit_op_guard_false(self, op, arglocs, regalloc, fcond): self.guard_success_cc = c.get_opposite_of(self.guard_success_cc) - fcond = self._emit_guard(op, arglocs, save_exc=False) + fcond = self._emit_guard(op, arglocs) return fcond def emit_op_guard_value(self, op, arglocs, regalloc, fcond): @@ -244,7 +234,7 @@ self.mc.VCMP(l0.value, l1.value) self.mc.VMRS(cond=fcond) self.guard_success_cc = c.EQ - fcond = self._emit_guard(op, failargs, save_exc=False) + fcond = self._emit_guard(op, failargs) return fcond emit_op_guard_nonnull = emit_op_guard_true @@ -256,14 +246,14 @@ def emit_op_guard_class(self, op, arglocs, regalloc, fcond): self._cmp_guard_class(op, arglocs, regalloc, fcond) self.guard_success_cc = c.EQ - self._emit_guard(op, arglocs[2:], save_exc=False) + self._emit_guard(op, arglocs[2:]) return fcond def emit_op_guard_nonnull_class(self, op, arglocs, regalloc, fcond): self.mc.CMP_ri(arglocs[0].value, 1) self._cmp_guard_class(op, arglocs, regalloc, c.HS) self.guard_success_cc = c.EQ - self._emit_guard(op, arglocs[2:], save_exc=False) + self._emit_guard(op, arglocs[2:]) return fcond def _cmp_guard_class(self, op, locs, regalloc, fcond): @@ -288,7 +278,7 @@ def emit_op_guard_gc_type(self, op, arglocs, regalloc, fcond): self._cmp_guard_gc_type(arglocs[0], arglocs[1].value, fcond) self.guard_success_cc = c.EQ - self._emit_guard(op, arglocs[2:], save_exc=False) + self._emit_guard(op, arglocs[2:]) return fcond def emit_op_guard_is_object(self, op, arglocs, regalloc, fcond): @@ -309,7 +299,7 @@ self.mc.LDRB_rr(r.ip.value, r.ip.value, r.lr.value) self.mc.TST_ri(r.ip.value, imm=(IS_OBJECT_FLAG & 0xff)) self.guard_success_cc = c.NE - self._emit_guard(op, arglocs[1:], save_exc=False) + self._emit_guard(op, arglocs[1:]) return fcond def emit_op_guard_subclass(self, op, arglocs, regalloc, fcond): @@ -353,12 +343,11 @@ self.mc.CMP_rr(r.ip.value, r.lr.value) # the guard passes if we get a result of "below or equal" self.guard_success_cc = c.LS - self._emit_guard(op, arglocs[2:], save_exc=False) + self._emit_guard(op, arglocs[2:]) return fcond def emit_op_guard_not_invalidated(self, op, locs, regalloc, fcond): - return self._emit_guard(op, locs, save_exc=False, - is_guard_not_invalidated=True) + return self._emit_guard(op, locs, is_guard_not_invalidated=True) def emit_op_label(self, op, arglocs, regalloc, fcond): self._check_frame_depth_debug(self.mc) @@ -498,7 +487,7 @@ self.mc.LDR_ri(loc.value, loc.value) self.mc.CMP_ri(loc.value, 0) self.guard_success_cc = c.EQ - fcond = self._emit_guard(op, failargs, save_exc=True) + fcond = self._emit_guard(op, failargs) # If the previous operation was a COND_CALL, overwrite its conditional # jump to jump over this GUARD_NO_EXCEPTION as well, if we can if self._find_nearby_operation(-1).getopnum() == rop.COND_CALL: @@ -515,7 +504,7 @@ self.mc.CMP_rr(r.ip.value, loc.value) self.guard_success_cc = c.EQ - self._emit_guard(op, failargs, save_exc=True) + self._emit_guard(op, failargs) self._store_and_reset_exception(self.mc, resloc) return fcond @@ -1047,7 +1036,7 @@ def store_force_descr(self, op, fail_locs, frame_depth): pos = self.mc.currpos() - guard_token = self.build_guard_token(op, frame_depth, fail_locs, pos, c.AL, True, False, True) + guard_token = self.build_guard_token(op, frame_depth, fail_locs, pos, c.AL) #self.pending_guards.append(guard_token) self._finish_gcmap = guard_token.gcmap self._store_force_index(op) @@ -1152,7 +1141,7 @@ self.mc.LDR_ri(r.ip.value, r.fp.value, imm=ofs) self.mc.CMP_ri(r.ip.value, 0) self.guard_success_cc = c.EQ - self._emit_guard(op, arglocs, save_exc=True, is_guard_not_forced=True) + self._emit_guard(op, arglocs) return fcond def _genop_call_may_force(self, op, arglocs, regalloc, fcond): diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -23,8 +23,8 @@ class GuardToken(object): - def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, exc, - frame_depth, is_guard_not_invalidated, is_guard_not_forced): + def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, + guard_opnum, frame_depth): assert isinstance(faildescr, AbstractFailDescr) self.cpu = cpu self.faildescr = faildescr @@ -32,9 +32,16 @@ self.fail_locs = fail_locs self.gcmap = self.compute_gcmap(gcmap, failargs, fail_locs, frame_depth) - self.exc = exc - self.is_guard_not_invalidated = is_guard_not_invalidated - self.is_guard_not_forced = is_guard_not_forced + self.guard_opnum = guard_opnum + + def guard_not_invalidated(self): + return self.guard_opnum == rop.GUARD_NOT_INVALIDATED + + def must_save_exception(self): + guard_opnum = self.guard_opnum + return (guard_opnum == rop.GUARD_EXCEPTION or + guard_opnum == rop.GUARD_NO_EXCEPTION or + guard_opnum == rop.GUARD_NOT_FORCED) def compute_gcmap(self, gcmap, failargs, fail_locs, frame_depth): # note that regalloc has a very similar compute, but @@ -172,7 +179,7 @@ if box is not None and box.type == FLOAT: withfloats = True break - exc = guardtok.exc + exc = guardtok.must_save_exception() target = self.failure_recovery_code[exc + 2 * withfloats] fail_descr = cast_instance_to_gcref(guardtok.faildescr) fail_descr = rffi.cast(lltype.Signed, fail_descr) diff --git a/rpython/jit/backend/ppc/arch.py b/rpython/jit/backend/ppc/arch.py --- a/rpython/jit/backend/ppc/arch.py +++ b/rpython/jit/backend/ppc/arch.py @@ -70,7 +70,8 @@ LR_BC_OFFSET = 16 _GAP = 0 if IS_BIG_ENDIAN else 16 PARAM_SAVE_AREA_OFFSET = 48 - _GAP -THREADLOCAL_ADDR_OFFSET = 112 - _GAP +LOCAL_VARS_OFFSET = 112 - _GAP +THREADLOCAL_ADDR_OFFSET = LOCAL_VARS_OFFSET GPR_SAVE_AREA_OFFSET = 120 - _GAP REGISTERS_SAVED = [r.r25, r.r26, r.r27, r.r28, r.r29, r.r30, r.r31] diff --git a/rpython/jit/backend/ppc/callbuilder.py b/rpython/jit/backend/ppc/callbuilder.py --- a/rpython/jit/backend/ppc/callbuilder.py +++ b/rpython/jit/backend/ppc/callbuilder.py @@ -124,9 +124,9 @@ gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap if gcrootmap: if gcrootmap.is_shadow_stack and self.is_call_release_gil: - # in this mode, 'ebx' happens to contain the shadowstack + # in this mode, RSHADOWOLD happens to contain the shadowstack # top at this point, so reuse it instead of loading it again - ssreg = self.RSHADOWPTR + ssreg = self.RSHADOWOLD self.asm._reload_frame_if_necessary(self.mc, shadowstack_reg=ssreg) def emit_raw_call(self): diff --git a/rpython/jit/backend/ppc/codebuilder.py b/rpython/jit/backend/ppc/codebuilder.py --- a/rpython/jit/backend/ppc/codebuilder.py +++ b/rpython/jit/backend/ppc/codebuilder.py @@ -936,11 +936,9 @@ class PPCGuardToken(GuardToken): def __init__(self, cpu, gcmap, descr, failargs, faillocs, - exc, frame_depth, is_guard_not_invalidated=False, - is_guard_not_forced=False, fcond=c.cond_none): - GuardToken.__init__(self, cpu, gcmap, descr, failargs, faillocs, exc, - frame_depth, is_guard_not_invalidated, - is_guard_not_forced) + guard_opnum, frame_depth, fcond=c.cond_none): + GuardToken.__init__(self, cpu, gcmap, descr, failargs, faillocs, + guard_opnum, frame_depth) self.fcond = fcond diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -254,9 +254,7 @@ _mixin_ = True - def _emit_guard(self, op, arglocs, save_exc=False, - is_guard_not_invalidated=False, - is_guard_not_forced=False): + def _emit_guard(self, op, arglocs, is_guard_not_invalidated=False): if is_guard_not_invalidated: fcond = c.cond_none else: @@ -264,22 +262,18 @@ self.guard_success_cc = c.cond_none assert fcond != c.cond_none fcond = c.negate(fcond) - token = self.build_guard_token(op, arglocs[0].value, arglocs[1:], - fcond, save_exc, is_guard_not_invalidated, - is_guard_not_forced) + token = self.build_guard_token(op, arglocs[0].value, arglocs[1:], fcond) token.pos_jump_offset = self.mc.currpos() + assert token.guard_not_invalidated() == is_guard_not_invalidated if not is_guard_not_invalidated: self.mc.trap() # has to be patched later on self.pending_guard_tokens.append(token) - def build_guard_token(self, op, frame_depth, arglocs, fcond, save_exc, - is_guard_not_invalidated=False, - is_guard_not_forced=False): + def build_guard_token(self, op, frame_depth, arglocs, fcond): descr = op.getdescr() gcmap = allocate_gcmap(self, frame_depth, r.JITFRAME_FIXED_SIZE) token = PPCGuardToken(self.cpu, gcmap, descr, op.getfailargs(), - arglocs, save_exc, frame_depth, - is_guard_not_invalidated, is_guard_not_forced, + arglocs, op.getopnum(), frame_depth, fcond) return token @@ -440,7 +434,7 @@ def emit_guard_not_forced_2(self, op, arglocs, regalloc): guard_token = self.build_guard_token(op, arglocs[0].value, arglocs[1:], - c.cond_none, save_exc=False) + c.cond_none) self._finish_gcmap = guard_token.gcmap self._store_force_index(op) self.store_info_on_descr(0, guard_token) @@ -531,7 +525,7 @@ self.mc.load_from_addr(r.SCRATCH2, self.cpu.pos_exception()) self.mc.cmp_op(0, r.SCRATCH2.value, 0, imm=True) self.guard_success_cc = c.EQ - self._emit_guard(op, arglocs, save_exc=True) + self._emit_guard(op, arglocs) # If the previous operation was a COND_CALL, overwrite its conditional # jump to jump over this GUARD_NO_EXCEPTION as well, if we can if self._find_nearby_operation(regalloc,-1).getopnum() == rop.COND_CALL: @@ -553,7 +547,7 @@ mc.load(r.SCRATCH.value, r.SCRATCH2.value, diff) mc.cmp_op(0, r.SCRATCH.value, loc.value) self.guard_success_cc = c.EQ - self._emit_guard(op, failargs, save_exc=True) + self._emit_guard(op, failargs) if resloc: mc.load(resloc.value, r.SCRATCH2.value, 0) @@ -1281,12 +1275,12 @@ self.mc.load_imm(r.r4, value) self.mc.cmp_op(0, r.r5.value, r.r4.value, imm=False) jump_if_eq = self.mc.currpos() - self.mc.nop() # patched later + self.mc.trap() # patched later return jump_if_eq def _call_assembler_patch_je(self, result_loc, je_location): jump_to_done = self.mc.currpos() - self.mc.nop() # patched later + self.mc.trap() # patched later # currpos = self.mc.currpos() pmc = OverwritingBuilder(self.mc, je_location, 1) @@ -1325,23 +1319,26 @@ baseofs = self.cpu.get_baseofs_of_frame_field() newlooptoken.compiled_loop_token.update_frame_info( oldlooptoken.compiled_loop_token, baseofs) - if IS_PPC_32 or not IS_BIG_ENDIAN: - # we overwrite the instructions at the old _ll_function_addr - # to start with a JMP to the new _ll_function_addr. - # Ideally we should rather patch all existing CALLs, but well. - mc = PPCBuilder() - mc.b_abs(target) - mc.copy_to_raw_memory(oldadr) - else: + if IS_PPC_64 and IS_BIG_ENDIAN: # PPC64 big-endian trampolines are data so overwrite the code # address in the function descriptor at the old address. # Copy the whole 3-word trampoline, even though the other - # words are always zero so far. + # words are always zero so far. That's not enough in all + # cases: if the "target" trampoline is itself redirected + # later, then the "old" trampoline won't be updated; so + # we still need the jump below to be safe. odata = rffi.cast(rffi.CArrayPtr(lltype.Signed), oldadr) tdata = rffi.cast(rffi.CArrayPtr(lltype.Signed), target) odata[0] = tdata[0] odata[1] = tdata[1] odata[2] = tdata[2] + oldadr += 3 * WORD + target += 3 * WORD + # we overwrite the instructions at the old _ll_function_addr + # to start with a JMP to the new _ll_function_addr. + mc = PPCBuilder() + mc.b_abs(target) + mc.copy_to_raw_memory(oldadr) class OpAssembler(IntOpAssembler, GuardOpAssembler, diff --git a/rpython/jit/backend/ppc/ppc_assembler.py b/rpython/jit/backend/ppc/ppc_assembler.py --- a/rpython/jit/backend/ppc/ppc_assembler.py +++ b/rpython/jit/backend/ppc/ppc_assembler.py @@ -8,7 +8,8 @@ GPR_SAVE_AREA_OFFSET, THREADLOCAL_ADDR_OFFSET, STD_FRAME_SIZE_IN_BYTES, - IS_BIG_ENDIAN) + IS_BIG_ENDIAN, + LOCAL_VARS_OFFSET) from rpython.jit.backend.ppc.helper.assembler import Saved_Volatiles from rpython.jit.backend.ppc.helper.regalloc import _check_imm_arg import rpython.jit.backend.ppc.register as r @@ -233,6 +234,7 @@ # Second argument is the new size, which is still in r0 here mc.mr(r.r4.value, r.r0.value) + # This trashes r0 and r2 self._store_and_reset_exception(mc, r.RCS2, r.RCS3) # Do the call @@ -283,6 +285,7 @@ mc.store(exctploc.value, r.r2.value, diff) def _reload_frame_if_necessary(self, mc, shadowstack_reg=None): + # might trash the VOLATILE registers different from r3 and f1 gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap: if gcrootmap.is_shadow_stack: @@ -492,6 +495,8 @@ old_mc = self.mc self.mc = mc + extra_stack_size = LOCAL_VARS_OFFSET + 4 * WORD + 8 + extra_stack_size = (extra_stack_size + 15) & ~15 if for_frame: # NOTE: don't save registers on the jitframe here! It might # override already-saved values that will be restored @@ -508,12 +513,12 @@ # We need to increase our stack frame size a bit to store them. # self.mc.load(r.SCRATCH.value, r.SP.value, 0) # SP back chain - self.mc.store_update(r.SCRATCH.value, r.SP.value, -6 * WORD) - self.mc.std(r.RCS1.value, r.SP.value, 1 * WORD) - self.mc.std(r.RCS2.value, r.SP.value, 2 * WORD) - self.mc.std(r.RCS3.value, r.SP.value, 3 * WORD) - self.mc.std(r.r3.value, r.SP.value, 4 * WORD) - self.mc.stfd(r.f1.value, r.SP.value, 5 * WORD) + self.mc.store_update(r.SCRATCH.value, r.SP.value, -extra_stack_size) + self.mc.std(r.RCS1.value, r.SP.value, LOCAL_VARS_OFFSET + 0 * WORD) + self.mc.std(r.RCS2.value, r.SP.value, LOCAL_VARS_OFFSET + 1 * WORD) + self.mc.std(r.RCS3.value, r.SP.value, LOCAL_VARS_OFFSET + 2 * WORD) + self.mc.std(r.r3.value, r.SP.value, LOCAL_VARS_OFFSET + 3 * WORD) + self.mc.stfd(r.f1.value, r.SP.value, LOCAL_VARS_OFFSET + 4 * WORD) saved_regs = None saved_fp_regs = None @@ -536,6 +541,8 @@ # since the call to write barrier can't collect # (and this is assumed a bit left and right here, like lack # of _reload_frame_if_necessary) + # This trashes r0 and r2, which is fine in this case + assert argument_loc is not r.r0 self._store_and_reset_exception(mc, r.RCS2, r.RCS3) if withcards: @@ -545,6 +552,8 @@ mc.mflr(r.RCS1.value) # func = rffi.cast(lltype.Signed, func) + # Note: if not 'for_frame', argument_loc is r0, which must carefully + # not be overwritten above mc.mr(r.r3.value, argument_loc.value) mc.load_imm(mc.RAW_CALL_REG, func) mc.raw_call() @@ -564,12 +573,12 @@ mc.andix(r.RCS2.value, r.RCS2.value, card_marking_mask & 0xFF) if for_frame: - self.mc.ld(r.RCS1.value, r.SP.value, 1 * WORD) - self.mc.ld(r.RCS2.value, r.SP.value, 2 * WORD) - self.mc.ld(r.RCS3.value, r.SP.value, 3 * WORD) - self.mc.ld(r.r3.value, r.SP.value, 4 * WORD) - self.mc.lfd(r.f1.value, r.SP.value, 5 * WORD) - self.mc.addi(r.SP.value, r.SP.value, 6 * WORD) + self.mc.ld(r.RCS1.value, r.SP.value, LOCAL_VARS_OFFSET + 0 * WORD) + self.mc.ld(r.RCS2.value, r.SP.value, LOCAL_VARS_OFFSET + 1 * WORD) + self.mc.ld(r.RCS3.value, r.SP.value, LOCAL_VARS_OFFSET + 2 * WORD) + self.mc.ld(r.r3.value, r.SP.value, LOCAL_VARS_OFFSET + 3 * WORD) + self.mc.lfd(r.f1.value, r.SP.value, LOCAL_VARS_OFFSET + 4 * WORD) + self.mc.addi(r.SP.value, r.SP.value, extra_stack_size) else: self._pop_core_regs_from_jitframe(mc, saved_regs) @@ -875,13 +884,12 @@ def target_arglocs(self, looptoken): return looptoken._ppc_arglocs - def materialize_loop(self, looptoken, show=False): + def materialize_loop(self, looptoken): self.datablockwrapper.done() self.datablockwrapper = None allblocks = self.get_asmmemmgr_blocks(looptoken) start = self.mc.materialize(self.cpu, allblocks, self.cpu.gc_ll_descr.gcrootmap) - #print "=== Loop start is at %s ===" % hex(r_uint(start)) return start def load_gcmap(self, mc, reg, gcmap): @@ -946,7 +954,7 @@ # relative_target = tok.pos_recovery_stub - tok.pos_jump_offset # - if not tok.is_guard_not_invalidated: + if not tok.guard_not_invalidated(): mc = PPCBuilder() mc.b_cond_offset(relative_target, tok.fcond) mc.copy_to_raw_memory(addr) @@ -1219,7 +1227,7 @@ force_realignment = (itemsize % WORD) != 0 if force_realignment: constsize += WORD - 1 - mc.addi(r.RSZ.value, r.RSZ.value, constsize) + mc.addi(r.RSZ.value, varsizeloc.value, constsize) if force_realignment: # "& ~(WORD-1)" bit_limit = 60 if WORD == 8 else 61 @@ -1324,8 +1332,10 @@ def notimplemented_op(self, op, arglocs, regalloc): - print "[PPC/asm] %s not implemented" % op.getopname() - raise NotImplementedError(op) + msg = '[PPC/asm] %s not implemented\n' % op.getopname() + if we_are_translated(): + llop.debug_print(lltype.Void, msg) + raise NotImplementedError(msg) operations = [notimplemented_op] * (rop._LAST + 1) diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -25,6 +25,7 @@ from rpython.jit.backend.llsupport.descr import unpack_interiorfielddescr from rpython.jit.backend.llsupport.gcmap import allocate_gcmap from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.debug import debug_print from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.rlib import rgc from rpython.rlib.rarithmetic import r_uint @@ -1115,8 +1116,10 @@ return locs def notimplemented(self, op): - print "[PPC/regalloc] %s not implemented" % op.getopname() - raise NotImplementedError(op) + msg = '[PPC/regalloc] %s not implemented\n' % op.getopname() + if we_are_translated(): + llop.debug_print(lltype.Void, msg) + raise NotImplementedError(msg) def force_int(intvalue): # a hack before transaction: force the intvalue argument through diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -3903,6 +3903,30 @@ x = self.cpu.get_float_value(deadframe, 0) assert longlong.getrealfloat(x) == 13.5 assert called == [finish_descr2] + del called[:] + + # compile a second replacement + ops = ''' + [f0, f1] + f2 = float_mul(f0, f1) + finish(f2)''' + loop3 = parse(ops) + looptoken3 = JitCellToken() + looptoken3.outermost_jitdriver_sd = FakeJitDriverSD() + self.cpu.compile_loop(loop3.inputargs, loop3.operations, looptoken3) + finish_descr3 = loop3.operations[-1].getdescr() + + # install it + self.cpu.redirect_call_assembler(looptoken2, looptoken3) + + # now, our call_assembler should go to looptoken3 + args = [longlong.getfloatstorage(0.5), + longlong.getfloatstorage(9.0)] # 0.5*9.0 == 1.25+3.25 + deadframe = self.cpu.execute_token(othertoken, *args) + x = self.cpu.get_float_value(deadframe, 0) + assert longlong.getrealfloat(x) == 13.5 + assert called == [finish_descr3] + del called[:] def test_short_result_of_getfield_direct(self): # Test that a getfield that returns a CHAR, SHORT or INT, signed diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -602,7 +602,7 @@ relative_target = tok.pos_recovery_stub - (tok.pos_jump_offset + 4) assert rx86.fits_in_32bits(relative_target) # - if not tok.is_guard_not_invalidated: + if not tok.guard_not_invalidated(): mc = codebuf.MachineCodeBlockWrapper() mc.writeimm32(relative_target) mc.copy_to_raw_memory(addr) @@ -1774,15 +1774,9 @@ def implement_guard_recovery(self, guard_opnum, faildescr, failargs, fail_locs, frame_depth): - exc = (guard_opnum == rop.GUARD_EXCEPTION or - guard_opnum == rop.GUARD_NO_EXCEPTION or - guard_opnum == rop.GUARD_NOT_FORCED) - is_guard_not_invalidated = guard_opnum == rop.GUARD_NOT_INVALIDATED - is_guard_not_forced = guard_opnum == rop.GUARD_NOT_FORCED gcmap = allocate_gcmap(self, frame_depth, JITFRAME_FIXED_SIZE) - return GuardToken(self.cpu, gcmap, faildescr, failargs, - fail_locs, exc, frame_depth, - is_guard_not_invalidated, is_guard_not_forced) + return GuardToken(self.cpu, gcmap, faildescr, failargs, fail_locs, + guard_opnum, frame_depth) def generate_propagate_error_64(self): assert WORD == 8 From noreply at buildbot.pypy.org Fri Oct 9 15:14:34 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Oct 2015 15:14:34 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: More tests Message-ID: <20151009131434.C17D91C133C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: ppc-updated-backend Changeset: r80084:b91501e7ca46 Date: 2015-10-09 15:13 +0200 http://bitbucket.org/pypy/pypy/changeset/b91501e7ca46/ Log: More tests diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -75,6 +75,11 @@ if "cppyy" in working_modules: working_modules.remove("cppyy") # depends on ctypes +if sys.platform.startswith("linux"): + _mach = os.popen('uname -m', 'r').read().strip() + if _mach.startswith('ppc'): + working_modules.remove("_continuation") + module_dependencies = { '_multiprocessing': [('objspace.usemodules.time', True), diff --git a/rpython/jit/backend/ppc/test/test_exception.py b/rpython/jit/backend/ppc/test/test_exception.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/ppc/test/test_exception.py @@ -0,0 +1,11 @@ + +import py +from rpython.jit.backend.ppc.test.support import JitPPCMixin +from rpython.jit.metainterp.test.test_exception import ExceptionTests + +class TestExceptions(JitPPCMixin, ExceptionTests): + # for the individual tests see + # ====> ../../../metainterp/test/test_exception.py + + def test_bridge_from_interpreter_exc(self): + py.test.skip("Widening to trash") diff --git a/rpython/jit/backend/ppc/test/test_fficall.py b/rpython/jit/backend/ppc/test/test_fficall.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/ppc/test/test_fficall.py @@ -0,0 +1,23 @@ +import py +from rpython.jit.metainterp.test import test_fficall +from rpython.jit.backend.ppc.test.support import JitPPCMixin + +class TestFfiCall(JitPPCMixin, test_fficall.FfiCallTests): + # for the individual tests see + # ====> ../../../metainterp/test/test_fficall.py + + def _add_libffi_types_to_ll2types_maybe(self): + # this is needed by test_guard_not_forced_fails, because it produces a + # loop which reads the value of types.* in a variable, then a guard + # fail and we switch to blackhole: the problem is that at this point + # the blackhole interp has a real integer, but it needs to convert it + # back to a lltype pointer (which is handled by ll2ctypes, deeply in + # the logic). The workaround is to teach ll2ctypes in advance which + # are the addresses of the various types.* structures. + # Try to comment this code out and run the test to see how it fails :) + from rpython.rtyper.lltypesystem import rffi, lltype, ll2ctypes + from rpython.rlib.jit_libffi import types + for key, value in types.__dict__.iteritems(): + if isinstance(value, lltype._ptr): + addr = rffi.cast(lltype.Signed, value) + ll2ctypes._int2obj[addr] = value diff --git a/rpython/jit/backend/ppc/test/test_quasiimmut.py b/rpython/jit/backend/ppc/test/test_quasiimmut.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/ppc/test/test_quasiimmut.py @@ -0,0 +1,9 @@ + +import py +from rpython.jit.backend.ppc.test.support import JitPPCMixin +from rpython.jit.metainterp.test import test_quasiimmut + +class TestLoopSpec(JitPPCMixin, test_quasiimmut.QuasiImmutTests): + # for the individual tests see + # ====> ../../../metainterp/test/test_loop.py + pass diff --git a/rpython/jit/backend/ppc/test/test_rawmem.py b/rpython/jit/backend/ppc/test/test_rawmem.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/ppc/test/test_rawmem.py @@ -0,0 +1,9 @@ + +from rpython.jit.backend.ppc.test.support import JitPPCMixin +from rpython.jit.metainterp.test.test_rawmem import RawMemTests + + +class TestRawMem(JitPPCMixin, RawMemTests): + # for the individual tests see + # ====> ../../../metainterp/test/test_rawmem.py + pass diff --git a/rpython/jit/backend/ppc/test/test_string.py b/rpython/jit/backend/ppc/test/test_string.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/ppc/test/test_string.py @@ -0,0 +1,13 @@ +import py +from rpython.jit.metainterp.test import test_string +from rpython.jit.backend.ppc.test.support import JitPPCMixin + +class TestString(JitPPCMixin, test_string.TestLLtype): + # for the individual tests see + # ====> ../../../metainterp/test/test_string.py + pass + +class TestUnicode(JitPPCMixin, test_string.TestLLtypeUnicode): + # for the individual tests see + # ====> ../../../metainterp/test/test_string.py + pass diff --git a/rpython/jit/backend/ppc/test/test_virtualizable.py b/rpython/jit/backend/ppc/test/test_virtualizable.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/ppc/test/test_virtualizable.py @@ -0,0 +1,8 @@ + +import py +from rpython.jit.metainterp.test.test_virtualizable import ImplicitVirtualizableTests +from rpython.jit.backend.ppc.test.support import JitPPCMixin + +class TestVirtualizable(JitPPCMixin, ImplicitVirtualizableTests): + def test_blackhole_should_not_reenter(self): + py.test.skip("Assertion error & llinterp mess") diff --git a/rpython/jit/backend/ppc/test/test_zrpy_releasegil.py b/rpython/jit/backend/ppc/test/test_zrpy_releasegil.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/ppc/test/test_zrpy_releasegil.py @@ -0,0 +1,5 @@ +from rpython.jit.backend.llsupport.test.zrpy_releasegil_test import ReleaseGILTests + + +class TestShadowStack(ReleaseGILTests): + gcrootfinder = "shadowstack" diff --git a/rpython/jit/metainterp/test/test_fficall.py b/rpython/jit/metainterp/test/test_fficall.py --- a/rpython/jit/metainterp/test/test_fficall.py +++ b/rpython/jit/metainterp/test/test_fficall.py @@ -70,6 +70,7 @@ cif_description.exchange_result = (len(avalues)+1) * 16 unroll_avalues = unrolling_iterable(avalues) + BIG_ENDIAN = (sys.byteorder == 'big') def fake_call_impl_any(cif_description, func_addr, exchange_buffer): ofs = 16 @@ -86,13 +87,19 @@ avalue = intmask(avalue) assert got == avalue ofs += 16 + write_to_ofs = 0 if rvalue is not None: write_rvalue = rvalue + if BIG_ENDIAN: + if (lltype.typeOf(write_rvalue) is rffi.SIGNEDCHAR or + lltype.typeOf(write_rvalue) is rffi.UCHAR): + # 'write_rvalue' is an int type smaller than Signed + write_to_ofs = rffi.sizeof(rffi.LONG) - 1 else: write_rvalue = 12923 # ignored TYPE = rffi.CArray(lltype.typeOf(write_rvalue)) data = rffi.ptradd(exchange_buffer, ofs) - rffi.cast(lltype.Ptr(TYPE), data)[0] = write_rvalue + rffi.cast(lltype.Ptr(TYPE), data)[write_to_ofs] = write_rvalue def f(i): exbuf = lltype.malloc(rffi.CCHARP.TO, (len(avalues)+2) * 16, @@ -181,7 +188,7 @@ kwds.setdefault('supports_singlefloats', True) self._run([types.float] * 2, types.double, [r_singlefloat(10.5), r_singlefloat(31.5)], - -4.5) + -4.5, **kwds) def test_simple_call_singlefloat(self, **kwds): kwds.setdefault('supports_singlefloats', True) diff --git a/rpython/memory/gc/env.py b/rpython/memory/gc/env.py --- a/rpython/memory/gc/env.py +++ b/rpython/memory/gc/env.py @@ -135,7 +135,7 @@ arch = os.uname()[4] # machine if arch.endswith('86') or arch == 'x86_64': return get_L2cache_linux2_cpuinfo() - if arch in ('alpha', 'ppc', 'ppc64'): + if arch in ('alpha', 'ppc'): return get_L2cache_linux2_cpuinfo(label='L2 cache') if arch == 'ia64': return get_L2cache_linux2_ia64() From noreply at buildbot.pypy.org Fri Oct 9 15:58:38 2015 From: noreply at buildbot.pypy.org (sbauman) Date: Fri, 9 Oct 2015 15:58:38 +0200 (CEST) Subject: [pypy-commit] pypy default: Handle getfield_gc_pure* and getfield_gc_* uniformly in heap.py Message-ID: <20151009135838.27FEF1C1192@cobra.cs.uni-duesseldorf.de> Author: Spenser Bauman Branch: Changeset: r80085:2a46aa80547f Date: 2015-10-08 11:58 -0400 http://bitbucket.org/pypy/pypy/changeset/2a46aa80547f/ Log: Handle getfield_gc_pure* and getfield_gc_* uniformly in heap.py diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -507,16 +507,7 @@ optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I - def optimize_GETFIELD_GC_PURE_I(self, op): - structinfo = self.ensure_ptr_info_arg0(op) - cf = self.field_cache(op.getdescr()) - field = cf.getfield_from_cache(self, structinfo, op.getdescr()) - if field is not None: - self.make_equal_to(op, field) - return - # default case: produce the operation - self.make_nonnull(op.getarg(0)) - self.emit_operation(op) + optimize_GETFIELD_GC_PURE_I = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_PURE_I optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_PURE_I From noreply at buildbot.pypy.org Fri Oct 9 15:58:40 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 9 Oct 2015 15:58:40 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in sbauman/pypy (pull request #340) Message-ID: <20151009135840.4C79B1C1192@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r80086:de57e6e64272 Date: 2015-10-09 15:56 +0200 http://bitbucket.org/pypy/pypy/changeset/de57e6e64272/ Log: Merged in sbauman/pypy (pull request #340) Handle getfield_gc_pure* and getfield_gc_* uniformly in heap.py diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -507,16 +507,7 @@ optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I - def optimize_GETFIELD_GC_PURE_I(self, op): - structinfo = self.ensure_ptr_info_arg0(op) - cf = self.field_cache(op.getdescr()) - field = cf.getfield_from_cache(self, structinfo, op.getdescr()) - if field is not None: - self.make_equal_to(op, field) - return - # default case: produce the operation - self.make_nonnull(op.getarg(0)) - self.emit_operation(op) + optimize_GETFIELD_GC_PURE_I = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_PURE_I optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_PURE_I From noreply at buildbot.pypy.org Fri Oct 9 17:40:14 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Fri, 9 Oct 2015 17:40:14 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: removed method and reindented resume_in_blackhole Message-ID: <20151009154014.993801C133C@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80087:841182cde9b1 Date: 2015-10-09 17:03 +0200 http://bitbucket.org/pypy/pypy/changeset/841182cde9b1/ Log: removed method and reindented resume_in_blackhole diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -687,9 +687,6 @@ def clone(self): return self - def copy_all_attributes_from(self, other): - pass - class AbstractResumeGuardDescr(ResumeDescr): _attrs_ = ('status',) @@ -720,7 +717,7 @@ resume_in_blackhole(metainterp_sd, jitdriver_sd, self.prev, deadframe) else: assert isinstance(self, ResumeGuardDescr) - resume_in_blackhole(metainterp_sd, jitdriver_sd, self, deadframe) + resume_in_blackhole(metainterp_sd, jitdriver_sd, self, deadframe) assert 0, "unreachable" def _trace_and_compile_from_bridge(self, deadframe, metainterp_sd, From noreply at buildbot.pypy.org Fri Oct 9 17:40:16 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Fri, 9 Oct 2015 17:40:16 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: reverted the jitlogparser file (modifications not used anymore) Message-ID: <20151009154016.BF2471C133C@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80088:8f3c47c05e89 Date: 2015-10-09 17:07 +0200 http://bitbucket.org/pypy/pypy/changeset/8f3c47c05e89/ Log: reverted the jitlogparser file (modifications not used anymore) diff --git a/rpython/tool/jitlogparser/parser.py b/rpython/tool/jitlogparser/parser.py --- a/rpython/tool/jitlogparser/parser.py +++ b/rpython/tool/jitlogparser/parser.py @@ -167,6 +167,7 @@ def update_memo(self, val, name): pass + class NonCodeError(Exception): pass @@ -413,35 +414,16 @@ def import_log(logname, ParserCls=SimpleParser): log = parse_log_file(logname) addrs = parse_addresses(extract_category(log, 'jit-backend-addr')) - from rpython.jit.backend.tool.viewcode import CodeRange - ranges = {} - backend_name = None + from rpython.jit.backend.tool.viewcode import World + world = World() for entry in extract_category(log, 'jit-backend-dump'): - for line in entry.splitlines(True): - # copied from class World - if line.startswith('BACKEND '): - backend_name = line.split(' ')[1].strip() - if line.startswith('CODE_DUMP '): - pieces = line.split() - assert pieces[1].startswith('@') - assert pieces[2].startswith('+') - if len(pieces) == 3: - continue # empty line - baseaddr = long(pieces[1][1:], 16) - if baseaddr < 0: - baseaddr += (2 * sys.maxint + 2) - offset = int(pieces[2][1:]) - addr = baseaddr + offset - data = pieces[3].replace(':', '').decode('hex') - coderange = CodeRange(None, addr, data) - ranges[addr] = coderange + world.parse(entry.splitlines(True)) dumps = {} - for rang in sorted(ranges.values()): - addr = rang.addr - if addr in addrs and addrs[addr]: - name = addrs[addr].pop(0) # they should come in order - data = rang.data.encode('hex') # backward compatibility - dumps[name] = (backend_name, addr, data) + for r in world.ranges: + if r.addr in addrs and addrs[r.addr]: + name = addrs[r.addr].pop(0) # they should come in order + data = r.data.encode('hex') # backward compatibility + dumps[name] = (world.backend_name, r.addr, data) loops = [] cat = extract_category(log, 'jit-log-opt') if not cat: @@ -468,9 +450,6 @@ parser.postprocess(loop, backend_tp=bname, backend_dump=dump, dump_start=start_ofs)) - loop.start_ofs = start_ofs - else: - loop.start_ofs = -1 loops += split_trace(loop) return log, loops From noreply at buildbot.pypy.org Fri Oct 9 17:56:21 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Oct 2015 17:56:21 +0200 (CEST) Subject: [pypy-commit] pypy default: cffi callbacks performance: rweaklist instead of rweakvaluedictionary Message-ID: <20151009155621.40AEC1C133C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80089:178e16fc3032 Date: 2015-10-09 15:20 +0200 http://bitbucket.org/pypy/pypy/changeset/178e16fc3032/ Log: cffi callbacks performance: rweaklist instead of rweakvaluedictionary diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -3,12 +3,12 @@ """ import sys, os -from rpython.rlib import clibffi, rweakref, jit, jit_libffi -from rpython.rlib.objectmodel import compute_unique_id, keepalive_until_here +from rpython.rlib import clibffi, jit, jit_libffi +from rpython.rlib.objectmodel import keepalive_until_here from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.error import OperationError, oefmt -from pypy.module._cffi_backend import cerrno, misc +from pypy.module._cffi_backend import cerrno, misc, handle from pypy.module._cffi_backend.cdataobj import W_CData from pypy.module._cffi_backend.ctypefunc import SIZE_OF_FFI_ARG, W_CTypeFunc from pypy.module._cffi_backend.ctypeprim import W_CTypePrimitiveSigned @@ -64,8 +64,14 @@ convert_from_object_fficallback(fresult, self._closure.ll_error, w_error) # - self.unique_id = compute_unique_id(self) - global_callback_mapping.set(self.unique_id, self) + # We must setup the GIL here, in case the callback is invoked in + # some other non-Pythonic thread. This is the same as cffi on + # CPython. + if space.config.translation.thread: + from pypy.module.thread.os_thread import setup_threads + setup_threads(space) + # + handle_index = handle.get_handles(space).reserve_next_handle_index() # cif_descr = self.getfunctype().cif_descr if not cif_descr: @@ -74,7 +80,7 @@ "return type or with '...'", self.getfunctype().name) with self as ptr: closure_ptr = rffi.cast(clibffi.FFI_CLOSUREP, ptr) - unique_id = rffi.cast(rffi.VOIDP, self.unique_id) + unique_id = rffi.cast(rffi.VOIDP, handle_index) res = clibffi.c_ffi_prep_closure(closure_ptr, cif_descr.cif, invoke_callback, unique_id) @@ -82,12 +88,8 @@ raise OperationError(space.w_SystemError, space.wrap("libffi failed to build this callback")) # - # We must setup the GIL here, in case the callback is invoked in - # some other non-Pythonic thread. This is the same as cffi on - # CPython. - if space.config.translation.thread: - from pypy.module.thread.os_thread import setup_threads - setup_threads(space) + _current_space.space = space + handle.get_handles(space).store_handle(handle_index, self) def _repr_extra(self): space = self.space @@ -127,9 +129,6 @@ keepalive_until_here(self) # to keep self._closure.ll_error alive -global_callback_mapping = rweakref.RWeakValueDictionary(int, W_CDataCallback) - - def convert_from_object_fficallback(fresult, ll_res, w_res): space = fresult.space small_result = fresult.size < SIZE_OF_FFI_ARG @@ -213,6 +212,12 @@ except OperationError, e: _handle_applevel_exception(callback, e, ll_res, extra_line) +class CurrentSpace: + def _cleanup_(self): + if hasattr(self, 'space'): + del self.space +_current_space = CurrentSpace() + def _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): """ Callback specification. ffi_cif - something ffi specific, don't care @@ -223,8 +228,9 @@ """ ll_res = rffi.cast(rffi.CCHARP, ll_res) unique_id = rffi.cast(lltype.Signed, ll_userdata) - callback = global_callback_mapping.get(unique_id) - if callback is None: + space = _current_space.space + callback = handle.get_handles(space).fetch_handle(unique_id) + if callback is None or not isinstance(callback, W_CDataCallback): # oups! try: os.write(STDERR, "SystemError: invoking a callback " @@ -237,7 +243,6 @@ return # must_leave = False - space = callback.space try: must_leave = space.threadlocals.try_enter_thread(space) py_invoke_callback(callback, ll_res, ll_args) diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -9,16 +9,16 @@ def __init__(self, space): self.initialize() -def get(space): +def get_handles(space): return space.fromcache(CffiHandles) # ____________________________________________________________ def _newp_handle(space, w_ctype, w_x): - index = get(space).reserve_next_handle_index() + index = get_handles(space).reserve_next_handle_index() _cdata = rffi.cast(rffi.CCHARP, index + 1) new_cdataobj = cdataobj.W_CDataHandle(space, _cdata, w_ctype, w_x) - get(space).store_handle(index, new_cdataobj) + get_handles(space).store_handle(index, new_cdataobj) return new_cdataobj @unwrap_spec(w_ctype=ctypeobj.W_CType) @@ -39,7 +39,7 @@ "new_handle(), got '%s'", ctype.name) with w_cdata as ptr: index = rffi.cast(lltype.Signed, ptr) - original_cdataobj = get(space).fetch_handle(index - 1) + original_cdataobj = get_handles(space).fetch_handle(index - 1) # if isinstance(original_cdataobj, cdataobj.W_CDataHandle): return original_cdataobj.w_keepalive From noreply at buildbot.pypy.org Fri Oct 9 17:56:23 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Oct 2015 17:56:23 +0200 (CEST) Subject: [pypy-commit] pypy default: Another passing test about green fields Message-ID: <20151009155623.690631C133C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80090:7498b2a47354 Date: 2015-10-09 16:36 +0200 http://bitbucket.org/pypy/pypy/changeset/7498b2a47354/ Log: Another passing test about green fields diff --git a/rpython/jit/metainterp/test/test_greenfield.py b/rpython/jit/metainterp/test/test_greenfield.py --- a/rpython/jit/metainterp/test/test_greenfield.py +++ b/rpython/jit/metainterp/test/test_greenfield.py @@ -1,5 +1,5 @@ from rpython.jit.metainterp.test.support import LLJitMixin -from rpython.rlib.jit import JitDriver +from rpython.rlib.jit import JitDriver, assert_green class GreenFieldsTests: @@ -52,6 +52,27 @@ self.check_trace_count(6) self.check_resops(guard_value=0) + def test_green_field_3(self): + myjitdriver = JitDriver(greens=['ctx.x'], reds=['ctx']) + class Ctx(object): + _immutable_fields_ = ['x'] + def __init__(self, x, y): + self.x = x + self.y = y + def f(x, y): + ctx = Ctx(x, y) + while ctx.y > 0: + myjitdriver.can_enter_jit(ctx=ctx) + myjitdriver.jit_merge_point(ctx=ctx) + assert_green(ctx.x) + ctx.y -= ctx.x + return -2100 + def g(): + return f(5, 35) + f(6, 42) + # + res = self.meta_interp(g, []) + assert res == -4200 + class TestLLtypeGreenFieldsTests(GreenFieldsTests, LLJitMixin): pass From noreply at buildbot.pypy.org Fri Oct 9 17:56:25 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Oct 2015 17:56:25 +0200 (CEST) Subject: [pypy-commit] pypy default: Try to fix JItting of callbacks Message-ID: <20151009155625.8C64E1C133C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80091:4f2af3c1fb05 Date: 2015-10-09 16:44 +0200 http://bitbucket.org/pypy/pypy/changeset/4f2af3c1fb05/ Log: Try to fix JItting of callbacks diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1058,6 +1058,14 @@ args = Arguments.frompacked(self, w_args, w_kwds) return self.call_args(w_callable, args) + def _try_fetch_pycode(self, w_func): + from pypy.interpreter.function import Function, Method + if isinstance(w_func, Method): + w_func = w_func.w_function + if isinstance(w_func, Function): + return w_func.code + return None + def call_function(self, w_func, *args_w): nargs = len(args_w) # used for pruning funccall versions if not self.config.objspace.disable_call_speedhacks and nargs < 5: diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -34,7 +34,7 @@ class W_CDataCallback(W_CData): - #_immutable_fields_ = ... + _immutable_fields_ = ['key_pycode'] w_onerror = None def __init__(self, space, ctype, w_callable, w_error, w_onerror): @@ -46,6 +46,7 @@ raise oefmt(space.w_TypeError, "expected a callable object, not %T", w_callable) self.w_callable = w_callable + self.key_pycode = space._try_fetch_pycode(w_callable) if not space.is_none(w_onerror): if not space.is_true(space.callable(w_onerror)): raise oefmt(space.w_TypeError, @@ -107,6 +108,7 @@ def invoke(self, ll_args): space = self.space ctype = self.getfunctype() + ctype = jit.promote(ctype) args_w = [] for i, farg in enumerate(ctype.fargs): ll_arg = rffi.cast(rffi.CCHARP, ll_args[i]) @@ -199,11 +201,18 @@ extra_line="\nDuring the call to 'onerror', " "another exception occurred:\n\n") -# XXX fix me: with this line, we get a single compiled version, which -# is good for small examples but gets worse and worse as the number of -# callbacks grows: -# @jit.jit_callback("CFFI") +def get_printable_location(key_pycode): + if key_pycode is None: + return 'cffi_callback ' + return 'cffi_callback ' + key_pycode.get_repr() + +jitdriver = JitDriver(name='cffi_callback', + greens=['callback.key_pycode'], + reds=['callback', 'll_res', 'll_args'], + get_printable_location=get_printable_location) + def py_invoke_callback(callback, ll_res, ll_args): + jitdriver.jit_merge_point(callback=callback, ll_res=ll_res, ll_args=ll_args) extra_line = '' try: w_res = callback.invoke(ll_args) From noreply at buildbot.pypy.org Fri Oct 9 17:56:27 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Oct 2015 17:56:27 +0200 (CEST) Subject: [pypy-commit] pypy default: Translation fix Message-ID: <20151009155627.ACDD11C133C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80092:ef6fa2082f08 Date: 2015-10-09 16:11 +0100 http://bitbucket.org/pypy/pypy/changeset/ef6fa2082f08/ Log: Translation fix diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -206,10 +206,10 @@ return 'cffi_callback ' return 'cffi_callback ' + key_pycode.get_repr() -jitdriver = JitDriver(name='cffi_callback', - greens=['callback.key_pycode'], - reds=['callback', 'll_res', 'll_args'], - get_printable_location=get_printable_location) +jitdriver = jit.JitDriver(name='cffi_callback', + greens=['callback.key_pycode'], + reds=['ll_res', 'll_args', 'callback'], + get_printable_location=get_printable_location) def py_invoke_callback(callback, ll_res, ll_args): jitdriver.jit_merge_point(callback=callback, ll_res=ll_res, ll_args=ll_args) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -318,6 +318,9 @@ def unicode_from_object(self, w_obj): return w_some_obj() + def _try_fetch_pycode(self, w_func): + return None + # ---------- def translates(self, func=None, argtypes=None, seeobj_w=[], **kwds): From noreply at buildbot.pypy.org Fri Oct 9 17:56:30 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Oct 2015 17:56:30 +0200 (CEST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20151009155630.07D8C1C133C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80093:dc5ec2ee219f Date: 2015-10-09 17:51 +0200 http://bitbucket.org/pypy/pypy/changeset/dc5ec2ee219f/ Log: merge heads diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -507,16 +507,7 @@ optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I - def optimize_GETFIELD_GC_PURE_I(self, op): - structinfo = self.ensure_ptr_info_arg0(op) - cf = self.field_cache(op.getdescr()) - field = cf.getfield_from_cache(self, structinfo, op.getdescr()) - if field is not None: - self.make_equal_to(op, field) - return - # default case: produce the operation - self.make_nonnull(op.getarg(0)) - self.emit_operation(op) + optimize_GETFIELD_GC_PURE_I = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_PURE_I optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_PURE_I From noreply at buildbot.pypy.org Fri Oct 9 19:17:23 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 9 Oct 2015 19:17:23 +0200 (CEST) Subject: [pypy-commit] pypy type_system-cleanup: fix tests Message-ID: <20151009171723.2768E1C1192@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: type_system-cleanup Changeset: r80094:7c36291b8749 Date: 2015-10-09 18:16 +0100 http://bitbucket.org/pypy/pypy/changeset/7c36291b8749/ Log: fix tests diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -460,8 +460,7 @@ class BaseTest(object): def parse(self, s, boxkinds=None, want_fail_descr=True, postprocess=None): - self.oparse = OpParser(s, self.cpu, self.namespace, 'lltype', - boxkinds, + self.oparse = OpParser(s, self.cpu, self.namespace, boxkinds, None, False, postprocess) return self.oparse.parse() @@ -569,4 +568,3 @@ return newloop # ____________________________________________________________ - diff --git a/rpython/rtyper/test/test_llannotation.py b/rpython/rtyper/test/test_llannotation.py --- a/rpython/rtyper/test/test_llannotation.py +++ b/rpython/rtyper/test/test_llannotation.py @@ -5,7 +5,7 @@ from rpython.rlib.rarithmetic import r_uint, r_singlefloat from rpython.rtyper.llannotation import ( SomePtr, annotation_to_lltype, ll_to_annotation) -from rpython.rtyper.typesystem import lltype +from rpython.rtyper.lltypesystem import lltype import rpython.rtyper.rtyper # make sure to import the world class C(object): From noreply at buildbot.pypy.org Fri Oct 9 19:37:36 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Oct 2015 19:37:36 +0200 (CEST) Subject: [pypy-commit] cffi default: Another test (passing) Message-ID: <20151009173736.613421C141C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2335:6a85e808c9ac Date: 2015-10-09 19:27 +0200 http://bitbucket.org/cffi/cffi/changeset/6a85e808c9ac/ Log: Another test (passing) diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -3470,6 +3470,12 @@ p = newp(SignedCharA, 5) py.test.raises(ValueError, memmove, p, p + 1, -1) # not segfault +def test_memmove_bad_cdata(): + BInt = new_primitive_type("int") + p = cast(BInt, 42) + py.test.raises(TypeError, memmove, p, bytearray(b'a'), 1) + py.test.raises(TypeError, memmove, bytearray(b'a'), p, 1) + def test_dereference_null_ptr(): BInt = new_primitive_type("int") BIntPtr = new_pointer_type(BInt) From noreply at buildbot.pypy.org Fri Oct 9 19:41:05 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 9 Oct 2015 19:41:05 +0200 (CEST) Subject: [pypy-commit] pypy default: Test and fix Message-ID: <20151009174105.DF43F1C133C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80095:5c2b0a8678a4 Date: 2015-10-09 19:40 +0200 http://bitbucket.org/pypy/pypy/changeset/5c2b0a8678a4/ Log: Test and fix diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -18,6 +18,7 @@ _attrs_ = ['ctptr'] _immutable_fields_ = ['ctptr'] kind = "array" + is_nonfunc_pointer_or_array = True def __init__(self, space, ctptr, length, arraysize, extra): W_CTypePtrOrArray.__init__(self, space, arraysize, extra, 0, diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -21,6 +21,7 @@ cast_anything = False is_primitive_integer = False + is_nonfunc_pointer_or_array = False kind = "?" def __init__(self, space, size, name, name_position): diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -172,6 +172,7 @@ _immutable_fields_ = ['is_file', 'cache_array_type?', 'is_void_ptr'] kind = "pointer" cache_array_type = None + is_nonfunc_pointer_or_array = True def __init__(self, space, ctitem): from pypy.module._cffi_backend import ctypearray diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -124,6 +124,14 @@ # return cdataobj.W_CDataFromBuffer(space, _cdata, w_ctype, buf, w_x) + +def unsafe_escaping_ptr_for_ptr_or_array(w_cdata): + if not w_cdata.ctype.is_nonfunc_pointer_or_array: + raise oefmt(w_cdata.space.w_TypeError, + "expected a pointer or array ctype, got '%s'", + w_cdata.ctype.name) + return w_cdata.unsafe_escaping_ptr() + c_memmove = rffi.llexternal('memmove', [rffi.CCHARP, rffi.CCHARP, rffi.SIZE_T], lltype.Void, _nowrapper=True) @@ -137,7 +145,7 @@ src_buf = None src_data = lltype.nullptr(rffi.CCHARP.TO) if isinstance(w_src, cdataobj.W_CData): - src_data = w_src.unsafe_escaping_ptr() + src_data = unsafe_escaping_ptr_for_ptr_or_array(w_src) src_is_ptr = True else: src_buf = _fetch_as_read_buffer(space, w_src) @@ -158,7 +166,7 @@ dest_buf = None dest_data = lltype.nullptr(rffi.CCHARP.TO) if isinstance(w_dest, cdataobj.W_CData): - dest_data = w_dest.unsafe_escaping_ptr() + dest_data = unsafe_escaping_ptr_for_ptr_or_array(w_dest) dest_is_ptr = True else: dest_buf = _fetch_as_write_buffer(space, w_dest) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3459,6 +3459,12 @@ p = newp(SignedCharA, 5) py.test.raises(ValueError, memmove, p, p + 1, -1) # not segfault +def test_memmove_bad_cdata(): + BInt = new_primitive_type("int") + p = cast(BInt, 42) + py.test.raises(TypeError, memmove, p, bytearray(b'a'), 1) + py.test.raises(TypeError, memmove, bytearray(b'a'), p, 1) + def test_dereference_null_ptr(): BInt = new_primitive_type("int") BIntPtr = new_pointer_type(BInt) From noreply at buildbot.pypy.org Fri Oct 9 20:47:03 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 9 Oct 2015 20:47:03 +0200 (CEST) Subject: [pypy-commit] pypy type_system-cleanup: Close branch type_system-cleanup Message-ID: <20151009184703.0B33E1C141C@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: type_system-cleanup Changeset: r80096:f92f750a921c Date: 2015-10-09 19:47 +0100 http://bitbucket.org/pypy/pypy/changeset/f92f750a921c/ Log: Close branch type_system-cleanup From noreply at buildbot.pypy.org Fri Oct 9 20:47:20 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 9 Oct 2015 20:47:20 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged type_system-cleanup into default Message-ID: <20151009184720.BEC851C141C@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r80097:ca136a601d5b Date: 2015-10-09 19:47 +0100 http://bitbucket.org/pypy/pypy/changeset/ca136a601d5b/ Log: Merged type_system-cleanup into default diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -118,7 +118,7 @@ if buffering < 0: buffering = DEFAULT_BUFFER_SIZE - if space.config.translation.type_system == 'lltype' and 'st_blksize' in STAT_FIELD_TYPES: + if 'st_blksize' in STAT_FIELD_TYPES: fileno = space.c_int_w(space.call_method(w_raw, "fileno")) try: st = os.fstat(fileno) diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -57,8 +57,7 @@ raise OperationError(space.w_ValueError, space.wrap("recursion limit must be positive")) space.sys.recursionlimit = new_limit - if space.config.translation.type_system == 'lltype': - _stack_set_length_fraction(new_limit * 0.001) + _stack_set_length_fraction(new_limit * 0.001) def getrecursionlimit(space): """Return the last value set by setrecursionlimit(). diff --git a/pypy/tool/pypyjit.py b/pypy/tool/pypyjit.py --- a/pypy/tool/pypyjit.py +++ b/pypy/tool/pypyjit.py @@ -93,7 +93,6 @@ try: interp, graph = get_interpreter(entry_point, [], backendopt=False, config=config, - type_system=config.translation.type_system, policy=PyPyAnnotatorPolicy(space)) except Exception, e: print '%s: %s' % (e.__class__, e) diff --git a/rpython/jit/backend/arm/test/support.py b/rpython/jit/backend/arm/test/support.py --- a/rpython/jit/backend/arm/test/support.py +++ b/rpython/jit/backend/arm/test/support.py @@ -8,7 +8,6 @@ from rpython.rlib.jit import JitDriver class JitARMMixin(support.LLJitMixin): - type_system = 'lltype' CPUClass = getcpuclass() # we have to disable unroll enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap" diff --git a/rpython/jit/backend/llsupport/test/test_regalloc_integration.py b/rpython/jit/backend/llsupport/test/test_regalloc_integration.py --- a/rpython/jit/backend/llsupport/test/test_regalloc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_regalloc_integration.py @@ -86,11 +86,9 @@ EffectInfo.MOST_GENERAL) namespace = locals().copy() - type_system = 'lltype' def parse(self, s, boxkinds=None, namespace=None): return parse(s, self.cpu, namespace or self.namespace, - type_system=self.type_system, boxkinds=boxkinds) def interpret(self, ops, args, run=True, namespace=None): diff --git a/rpython/jit/backend/test/calling_convention_test.py b/rpython/jit/backend/test/calling_convention_test.py --- a/rpython/jit/backend/test/calling_convention_test.py +++ b/rpython/jit/backend/test/calling_convention_test.py @@ -23,7 +23,6 @@ pass class CallingConvTests(Runner): - type_system = 'lltype' Ptr = lltype.Ptr FuncType = lltype.FuncType diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -1775,7 +1775,6 @@ class LLtypeBackendTest(BaseBackendTest): - type_system = 'lltype' Ptr = lltype.Ptr FuncType = lltype.FuncType malloc = staticmethod(lltype.malloc) @@ -3147,7 +3146,7 @@ ops = [ ResOperation(rop.CALL_RELEASE_GIL_I, [ConstInt(saveerr), ConstInt(func1_adr)] - + inputargs, + + inputargs, descr=calldescr), ResOperation(rop.GUARD_NOT_FORCED, [], descr=faildescr), ] diff --git a/rpython/jit/backend/test/support.py b/rpython/jit/backend/test/support.py --- a/rpython/jit/backend/test/support.py +++ b/rpython/jit/backend/test/support.py @@ -7,7 +7,6 @@ class BaseCompiledMixin(object): - type_system = None CPUClass = None basic = False @@ -29,7 +28,6 @@ self.pre_translation_hook() t = self._get_TranslationContext() - t.config.translation.type_system = self.type_system # force typesystem-specific options if listcomp: t.config.translation.list_comprehension_operations = True @@ -111,7 +109,6 @@ class CCompiledMixin(BaseCompiledMixin): - type_system = 'lltype' slow = False def setup_class(cls): diff --git a/rpython/jit/backend/x86/test/test_basic.py b/rpython/jit/backend/x86/test/test_basic.py --- a/rpython/jit/backend/x86/test/test_basic.py +++ b/rpython/jit/backend/x86/test/test_basic.py @@ -6,7 +6,6 @@ from rpython.rlib.jit import JitDriver class Jit386Mixin(support.LLJitMixin): - type_system = 'lltype' CPUClass = getcpuclass() # we have to disable unroll enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap" diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -9,7 +9,7 @@ QuasiImmutAnalyzer, RandomEffectsAnalyzer, effectinfo_from_writeanalyze, EffectInfo, CallInfoCollection) from rpython.rtyper.lltypesystem import lltype, llmemory -from rpython.rtyper.typesystem import getfunctionptr +from rpython.rtyper.lltypesystem.lltype import getfunctionptr from rpython.rlib import rposix from rpython.translator.backendopt.canraise import RaiseAnalyzer from rpython.translator.backendopt.writeanalyze import ReadWriteAnalyzer @@ -178,7 +178,6 @@ """ fnptr = getfunctionptr(graph) FUNC = lltype.typeOf(fnptr).TO - assert self.rtyper.type_system.name == "lltypesystem" fnaddr = llmemory.cast_ptr_to_adr(fnptr) NON_VOID_ARGS = [ARG for ARG in FUNC.ARGS if ARG is not lltype.Void] calldescr = self.cpu.calldescrof(FUNC, tuple(NON_VOID_ARGS), diff --git a/rpython/jit/codewriter/test/test_call.py b/rpython/jit/codewriter/test/test_call.py --- a/rpython/jit/codewriter/test/test_call.py +++ b/rpython/jit/codewriter/test/test_call.py @@ -159,8 +159,6 @@ class FakeRTyper: class annotator: translator = None - class type_system: - name = 'lltypesystem' def getfunctionptr(graph): F = lltype.FuncType([], lltype.Signed) diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -30,7 +30,6 @@ return Constant(x, lltype.typeOf(x)) class FakeRTyper: - class type_system: name = 'lltypesystem' instance_reprs = {} class FakeCPU: diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -88,8 +88,6 @@ # ____________________________________________________________ class LLtypeMixin(object): - type_system = 'lltype' - def get_class_of_box(self, box): base = box.getref_base() return lltype.cast_opaque_ptr(rclass.OBJECTPTR, base).typeptr @@ -123,7 +121,7 @@ ('value', lltype.Signed), ('next', lltype.Ptr(NODE3)), hints={'immutable': True})) - + node = lltype.malloc(NODE) node.value = 5 node.next = node @@ -236,7 +234,7 @@ inst_step = cpu.fielddescrof(W_ROOT, 'inst_step') inst_w_list = cpu.fielddescrof(W_ROOT, 'inst_w_list') w_root_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True) - + tsize = cpu.sizeof(T, None) cdescr = cpu.fielddescrof(T, 'c') ddescr = cpu.fielddescrof(T, 'd') @@ -462,8 +460,7 @@ class BaseTest(object): def parse(self, s, boxkinds=None, want_fail_descr=True, postprocess=None): - self.oparse = OpParser(s, self.cpu, self.namespace, 'lltype', - boxkinds, + self.oparse = OpParser(s, self.cpu, self.namespace, boxkinds, None, False, postprocess) return self.oparse.parse() @@ -571,4 +568,3 @@ return newloop # ____________________________________________________________ - diff --git a/rpython/jit/metainterp/test/support.py b/rpython/jit/metainterp/test/support.py --- a/rpython/jit/metainterp/test/support.py +++ b/rpython/jit/metainterp/test/support.py @@ -166,7 +166,7 @@ class JitMixin: basic = True enable_opts = ENABLE_ALL_OPTS - + # Basic terminology: the JIT produces "loops" and "bridges". # Bridges are always attached to failing guards. Every loop is @@ -243,7 +243,6 @@ def meta_interp(self, *args, **kwds): kwds['CPUClass'] = self.CPUClass - kwds['type_system'] = self.type_system if "backendopt" not in kwds: kwds["backendopt"] = False if "enable_opts" not in kwds and hasattr(self, 'enable_opts'): @@ -286,7 +285,6 @@ class LLJitMixin(JitMixin): - type_system = 'lltype' CPUClass = runner.LLGraphCPU @staticmethod diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -894,8 +894,8 @@ try: return ovfcheck(x * y) except OverflowError: - raise - + raise + def f(x, y): try: return g(x, y) @@ -1074,7 +1074,7 @@ from rpython.jit.metainterp.warmspot import WarmRunnerDesc interp, graph = get_interpreter(f, [0, 0], backendopt=False, - inline_threshold=0, type_system=self.type_system) + inline_threshold=0) clear_tcache() translator = interp.typer.annotator.translator translator.config.translation.gc = "boehm" @@ -4342,8 +4342,8 @@ myjitdriver = JitDriver(greens = ['num'], reds = ['n', 'x', 'x0', 'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'l', 's']) - - + + self.meta_interp(allfuncs, [9, 2000]) def test_unichar_ord_is_never_signed_on_64bit(self): diff --git a/rpython/jit/metainterp/test/test_loop.py b/rpython/jit/metainterp/test/test_loop.py --- a/rpython/jit/metainterp/test/test_loop.py +++ b/rpython/jit/metainterp/test/test_loop.py @@ -20,7 +20,6 @@ return ll_meta_interp(f, args, enable_opts=self.enable_opts, policy=policy, CPUClass=self.CPUClass, - type_system=self.type_system, backendopt=backendopt) def run_directly(self, f, args): @@ -1038,7 +1037,7 @@ def test_unroll_issue_3(self): py.test.skip("decide") - + from rpython.rlib.rerased import new_erasing_pair b_erase, b_unerase = new_erasing_pair("B") # list of ints c_erase, c_unerase = new_erasing_pair("C") # list of Nones @@ -1083,7 +1082,7 @@ elif i % 5 == 0: s += 1 elif i % 7 == 0: - s += 1 + s += 1 i -= 1 return s @@ -1093,7 +1092,7 @@ def test_sharing_guards(self): py.test.skip("unimplemented") driver = JitDriver(greens = [], reds = 'auto') - + def f(i): s = 0 while i > 0: diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -1114,10 +1114,8 @@ if getattr(graph, 'func', None) is f] init_graph = t._graphof(Frame.__init__.im_func) - deref = t.rtyper.type_system.deref - def direct_calls(graph): - return [deref(op.args[0].value)._callable.func_name + return [op.args[0].value._obj._callable.func_name for block, op in graph.iterblockops() if op.opname == 'direct_call'] diff --git a/rpython/jit/metainterp/test/test_warmspot.py b/rpython/jit/metainterp/test/test_warmspot.py --- a/rpython/jit/metainterp/test/test_warmspot.py +++ b/rpython/jit/metainterp/test/test_warmspot.py @@ -13,7 +13,8 @@ self.result = result -class WarmspotTests(object): +class TestLLWarmspot(LLJitMixin): + CPUClass = runner.LLGraphCPU def test_basic(self): mydriver = JitDriver(reds=['a'], @@ -575,10 +576,6 @@ "with the same jitdriver") -class TestLLWarmspot(WarmspotTests, LLJitMixin): - CPUClass = runner.LLGraphCPU - type_system = 'lltype' - class TestWarmspotDirect(object): def setup_class(cls): from rpython.jit.metainterp.typesystem import llhelper diff --git a/rpython/jit/metainterp/test/test_warmstate.py b/rpython/jit/metainterp/test/test_warmstate.py --- a/rpython/jit/metainterp/test/test_warmstate.py +++ b/rpython/jit/metainterp/test/test_warmstate.py @@ -76,7 +76,7 @@ hash_whatever(lltype.typeOf(s2), s2)) assert equal_whatever(lltype.typeOf(s1), s1, s2) fn(42) - interpret(fn, [42], type_system='lltype') + interpret(fn, [42]) def test_make_unwrap_greenkey(): diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -52,7 +52,7 @@ warmrunnerdesc.finish() translator.warmrunnerdesc = warmrunnerdesc # for later debugging -def ll_meta_interp(function, args, backendopt=False, type_system='lltype', +def ll_meta_interp(function, args, backendopt=False, listcomp=False, translationoptions={}, **kwds): if listcomp: extraconfigopts = {'translation.list_comprehension_operations': True} @@ -62,7 +62,6 @@ extraconfigopts['translation.' + key] = value interp, graph = get_interpreter(function, args, backendopt=False, # will be done below - type_system=type_system, **extraconfigopts) clear_tcache() return jittify_and_run(interp, graph, args, backendopt=backendopt, **kwds) @@ -71,7 +70,7 @@ backendopt=False, trace_limit=sys.maxint, inline=False, loop_longevity=0, retrace_limit=5, function_threshold=4, disable_unrolling=sys.maxint, - enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, + enable_opts=ALL_OPTS_NAMES, max_retrace_guards=15, max_unroll_recursion=7, **kwds): from rpython.config.config import ConfigError translator = interp.typer.annotator.translator @@ -525,17 +524,13 @@ fatalerror('~~~ Crash in JIT! %s' % (e,)) crash_in_jit._dont_inline_ = True - if self.translator.rtyper.type_system.name == 'lltypesystem': - def maybe_enter_jit(*args): - try: - maybe_compile_and_run(state.increment_threshold, *args) - except Exception, e: - crash_in_jit(e) - maybe_enter_jit._always_inline_ = True - else: - def maybe_enter_jit(*args): + def maybe_enter_jit(*args): + try: maybe_compile_and_run(state.increment_threshold, *args) - maybe_enter_jit._always_inline_ = True + except Exception as e: + crash_in_jit(e) + maybe_enter_jit._always_inline_ = True + jd._maybe_enter_jit_fn = maybe_enter_jit jd._maybe_compile_and_run_fn = maybe_compile_and_run diff --git a/rpython/jit/tool/oparser.py b/rpython/jit/tool/oparser.py --- a/rpython/jit/tool/oparser.py +++ b/rpython/jit/tool/oparser.py @@ -84,7 +84,7 @@ use_mock_model = False - def __init__(self, input, cpu, namespace, type_system, boxkinds, + def __init__(self, input, cpu, namespace, boxkinds, invent_fail_descr=default_fail_descr, nonstrict=False, postproces=None): self.input = input @@ -92,7 +92,6 @@ self._postproces = postproces self.cpu = cpu self._consts = namespace - self.type_system = type_system self.boxkinds = boxkinds or {} if namespace is not None: self._cache = namespace.setdefault('_CACHE_', {}) @@ -133,7 +132,7 @@ def box_for_var(self, elem): xxx try: - return self._cache[self.type_system, elem] + return self._cache[elem] except KeyError: pass if elem.startswith('i'): @@ -155,7 +154,7 @@ break else: raise ParseError("Unknown variable type: %s" % elem) - self._cache[self.type_system, elem] = box + self._cache[elem] = box box._str = elem return box @@ -426,13 +425,13 @@ inpargs = self.parse_header_line(line[1:-1]) return base_indent, inpargs, lines -def parse(input, cpu=None, namespace=None, type_system='lltype', +def parse(input, cpu=None, namespace=None, boxkinds=None, invent_fail_descr=default_fail_descr, no_namespace=False, nonstrict=False, OpParser=OpParser, postprocess=None): if namespace is None and not no_namespace: namespace = {} - return OpParser(input, cpu, namespace, type_system, boxkinds, + return OpParser(input, cpu, namespace, boxkinds, invent_fail_descr, nonstrict, postprocess).parse() def pure_parse(*args, **kwds): diff --git a/rpython/jit/tool/test/test_jitoutput.py b/rpython/jit/tool/test/test_jitoutput.py --- a/rpython/jit/tool/test/test_jitoutput.py +++ b/rpython/jit/tool/test/test_jitoutput.py @@ -21,7 +21,7 @@ cap = py.io.StdCaptureFD() try: - ll_meta_interp(f, [10], CPUClass=runner.LLGraphCPU, type_system='lltype', + ll_meta_interp(f, [10], CPUClass=runner.LLGraphCPU, ProfilerClass=Profiler) finally: out, err = cap.reset() diff --git a/rpython/rtyper/extfunc.py b/rpython/rtyper/extfunc.py --- a/rpython/rtyper/extfunc.py +++ b/rpython/rtyper/extfunc.py @@ -1,6 +1,6 @@ from rpython.rtyper import extregistry from rpython.rtyper.extregistry import ExtRegistryEntry -from rpython.rtyper.lltypesystem.lltype import typeOf +from rpython.rtyper.lltypesystem.lltype import typeOf, FuncType, functionptr from rpython.annotator import model as annmodel from rpython.annotator.signature import annotation @@ -157,7 +157,6 @@ r_result = rtyper.getrepr(s_result) ll_result = r_result.lowleveltype name = getattr(self, 'name', None) or self.instance.__name__ - fake_method_name = rtyper.type_system.name[:2] + 'typefakeimpl' impl = getattr(self, 'lltypeimpl', None) fakeimpl = getattr(self, 'lltypefakeimpl', self.instance) if impl: @@ -201,13 +200,10 @@ obj = rtyper.getannmixlevel().delayedfunction( impl, signature_args, hop.s_result) else: - #if not self.safe_not_sandboxed: - # print '>>>>>>>>>>>>>-----------------------------------' - # print name, self.name - # print '<<<<<<<<<<<<<-----------------------------------' - obj = rtyper.type_system.getexternalcallable(args_ll, ll_result, - name, _external_name=self.name, _callable=fakeimpl, - _safe_not_sandboxed=self.safe_not_sandboxed) + FT = FuncType(args_ll, ll_result) + obj = functionptr(FT, name, _external_name=self.name, + _callable=fakeimpl, + _safe_not_sandboxed=self.safe_not_sandboxed) vlist = [hop.inputconst(typeOf(obj), obj)] + hop.inputargs(*args_r) hop.exception_is_here() return hop.genop('direct_call', vlist, r_result) diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -466,7 +466,7 @@ raise LLException(etype, evalue, *extraargs) def invoke_callable_with_pyexceptions(self, fptr, *args): - obj = self.llinterpreter.typer.type_system.deref(fptr) + obj = fptr._obj try: return obj._callable(*args) except LLException, e: @@ -644,7 +644,7 @@ array[index] = item def perform_call(self, f, ARGS, args): - fobj = self.llinterpreter.typer.type_system.deref(f) + fobj = f._obj has_callable = getattr(fobj, '_callable', None) is not None if hasattr(fobj, 'graph'): graph = fobj.graph @@ -669,7 +669,7 @@ graphs = args[-1] args = args[:-1] if graphs is not None: - obj = self.llinterpreter.typer.type_system.deref(f) + obj = f._obj if hasattr(obj, 'graph'): assert obj.graph in graphs else: diff --git a/rpython/rtyper/lltypesystem/lltype.py b/rpython/rtyper/lltypesystem/lltype.py --- a/rpython/rtyper/lltypesystem/lltype.py +++ b/rpython/rtyper/lltypesystem/lltype.py @@ -2265,6 +2265,35 @@ o = _func(TYPE, _name=name, **attrs) return _ptr(Ptr(TYPE), o) +def _getconcretetype(v): + return v.concretetype + +def getfunctionptr(graph, getconcretetype=_getconcretetype): + """Return callable given a Python function.""" + llinputs = [getconcretetype(v) for v in graph.getargs()] + lloutput = getconcretetype(graph.getreturnvar()) + + FT = FuncType(llinputs, lloutput) + name = graph.name + if hasattr(graph, 'func') and callable(graph.func): + # the Python function object can have _llfnobjattrs_, specifying + # attributes that are forced upon the functionptr(). The idea + # for not passing these extra attributes as arguments to + # getcallable() itself is that multiple calls to getcallable() + # for the same graph should return equal functionptr() objects. + if hasattr(graph.func, '_llfnobjattrs_'): + fnobjattrs = graph.func._llfnobjattrs_.copy() + # can specify a '_name', but use graph.name by default + name = fnobjattrs.pop('_name', name) + else: + fnobjattrs = {} + # _callable is normally graph.func, but can be overridden: + # see fakeimpl in extfunc.py + _callable = fnobjattrs.pop('_callable', graph.func) + return functionptr(FT, name, graph=graph, _callable=_callable, + **fnobjattrs) + else: + return functionptr(FT, name, graph=graph) def nullptr(T): return Ptr(T)._defl() @@ -2444,3 +2473,5 @@ for item in v.items: for i in dissect_ll_instance(item, t.OF, memo): yield i + + diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -2,7 +2,7 @@ from rpython.flowspace.model import Constant from rpython.rtyper.error import TyperError, MissingRTypeOperation from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.lltypesystem.lltype import Void, Bool, LowLevelType +from rpython.rtyper.lltypesystem.lltype import Void, Bool, LowLevelType, Ptr from rpython.tool.pairtype import pairtype, extendabletype, pair @@ -239,7 +239,8 @@ if hop.s_result.is_constant(): return hop.inputconst(Bool, hop.s_result.const) else: - return hop.rtyper.type_system.check_null(self, hop) + vlist = hop.inputargs(self) + return hop.genop('ptr_nonzero', vlist, resulttype=Bool) class IteratorRepr(Repr): @@ -282,7 +283,23 @@ def rtype_is_((robj1, robj2), hop): if hop.s_result.is_constant(): return inputconst(Bool, hop.s_result.const) - return hop.rtyper.type_system.generic_is(robj1, robj2, hop) + roriginal1 = robj1 + roriginal2 = robj2 + if robj1.lowleveltype is Void: + robj1 = robj2 + elif robj2.lowleveltype is Void: + robj2 = robj1 + if (not isinstance(robj1.lowleveltype, Ptr) or + not isinstance(robj2.lowleveltype, Ptr)): + raise TyperError('is of instances of the non-pointers: %r, %r' % ( + roriginal1, roriginal2)) + if robj1.lowleveltype != robj2.lowleveltype: + raise TyperError('is of instances of different pointer types: %r, %r' % ( + roriginal1, roriginal2)) + + v_list = hop.inputargs(robj1, robj2) + return hop.genop('ptr_eq', v_list, resulttype=Bool) + # default implementation for checked getitems diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -13,11 +13,11 @@ from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.lltypesystem import llmemory -from rpython.rtyper.lltypesystem.lltype import (typeOf, Void, ForwardReference, - Struct, Bool, Char, Ptr, malloc, nullptr, Array, Signed, cast_pointer) +from rpython.rtyper.lltypesystem.lltype import ( + typeOf, Void, ForwardReference, Struct, Bool, Char, Ptr, malloc, nullptr, + Array, Signed, cast_pointer, getfunctionptr) from rpython.rtyper.rmodel import (Repr, inputconst, CanBeNull, mangle, warning, impossible_repr) -from rpython.rtyper.typesystem import getfunctionptr from rpython.tool.pairtype import pair, pairtype from rpython.translator.unsimplify import varoftype @@ -266,7 +266,7 @@ else: # missing entry -- need a 'null' of the type that matches # this row - llfn = self.rtyper.type_system.null_callable(row.fntype) + llfn = nullptr(row.fntype.TO) llfns[row.attrname] = llfn if len(self.uniquerows) == 1: if found_anything: @@ -291,7 +291,7 @@ elif isinstance(value, staticmethod): value = value.__get__(42) # hackish, get the function wrapped by staticmethod if value is None: - null = self.rtyper.type_system.null_callable(self.lowleveltype) + null = nullptr(self.lowleveltype.TO) return null funcdesc = self.rtyper.annotator.bookkeeper.getdesc(value) return self.convert_desc(funcdesc) @@ -914,7 +914,7 @@ return None else: T = self.lowleveltype - return self.rtyper.type_system.null_callable(T) + return nullptr(T.TO) bk = self.rtyper.annotator.bookkeeper classdesc = bk.getdesc(cls) return self.convert_desc(classdesc) diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -23,9 +23,8 @@ from rpython.rtyper.exceptiondata import ExceptionData from rpython.rtyper.lltypesystem.lltype import (Signed, Void, LowLevelType, Ptr, ContainerType, FuncType, functionptr, typeOf, RuntimeTypeInfo, - attachRuntimeTypeInfo, Primitive) + attachRuntimeTypeInfo, Primitive, getfunctionptr) from rpython.rtyper.rmodel import Repr, inputconst, BrokenReprTyperError -from rpython.rtyper.typesystem import LowLevelTypeSystem, getfunctionptr from rpython.rtyper import rclass from rpython.rtyper.rclass import RootClassRepr from rpython.tool.pairtype import pair @@ -38,7 +37,6 @@ def __init__(self, annotator): self.annotator = annotator self.lowlevel_ann_policy = LowLevelAnnotatorPolicy(self) - self.type_system = LowLevelTypeSystem() self.reprs = {} self._reprs_must_call_setup = [] self._seen_reprs_must_call_setup = {} @@ -874,7 +872,7 @@ # build the 'direct_call' operation f = self.rtyper.getcallable(graph) c = inputconst(typeOf(f), f) - fobj = self.rtyper.type_system.deref(f) + fobj = f._obj return self.genop('direct_call', [c]+newargs_v, resulttype = typeOf(fobj).RESULT) diff --git a/rpython/rtyper/test/test_llannotation.py b/rpython/rtyper/test/test_llannotation.py --- a/rpython/rtyper/test/test_llannotation.py +++ b/rpython/rtyper/test/test_llannotation.py @@ -5,7 +5,7 @@ from rpython.rlib.rarithmetic import r_uint, r_singlefloat from rpython.rtyper.llannotation import ( SomePtr, annotation_to_lltype, ll_to_annotation) -from rpython.rtyper.typesystem import lltype +from rpython.rtyper.lltypesystem import lltype import rpython.rtyper.rtyper # make sure to import the world class C(object): diff --git a/rpython/rtyper/test/test_llinterp.py b/rpython/rtyper/test/test_llinterp.py --- a/rpython/rtyper/test/test_llinterp.py +++ b/rpython/rtyper/test/test_llinterp.py @@ -59,8 +59,7 @@ _tcache.clear() def get_interpreter(func, values, view='auto', viewbefore='auto', policy=None, - type_system="lltype", backendopt=False, config=None, - **extraconfigopts): + backendopt=False, config=None, **extraconfigopts): extra_key = [(key, value) for key, value in extraconfigopts.iteritems()] extra_key.sort() extra_key = tuple(extra_key) @@ -93,10 +92,8 @@ return interp, graph def interpret(func, values, view='auto', viewbefore='auto', policy=None, - type_system="lltype", backendopt=False, config=None, - malloc_check=True, **kwargs): + backendopt=False, config=None, malloc_check=True, **kwargs): interp, graph = get_interpreter(func, values, view, viewbefore, policy, - type_system=type_system, backendopt=backendopt, config=config, **kwargs) if not malloc_check: @@ -112,10 +109,9 @@ return result def interpret_raises(exc, func, values, view='auto', viewbefore='auto', - policy=None, type_system="lltype", + policy=None, backendopt=False): interp, graph = get_interpreter(func, values, view, viewbefore, policy, - type_system=type_system, backendopt=backendopt) info = py.test.raises(LLException, "interp.eval_graph(graph, values)") try: diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -191,7 +191,6 @@ class TestRlist(BaseRtypingTest): - type_system = 'lltype' rlist = ll_rlist def test_simple(self): diff --git a/rpython/rtyper/test/test_rvirtualizable.py b/rpython/rtyper/test/test_rvirtualizable.py --- a/rpython/rtyper/test/test_rvirtualizable.py +++ b/rpython/rtyper/test/test_rvirtualizable.py @@ -153,10 +153,7 @@ if vinst_ll.vable_token: raise ValueError annhelper = MixLevelHelperAnnotator(rtyper) - if self.type_system == 'lltype': - s_vinst = SomePtr(v_inst_ll_type) - else: - s_vinst = annmodel.SomeOOInstance(v_inst_ll_type) + s_vinst = SomePtr(v_inst_ll_type) funcptr = annhelper.delayedfunction(mycall, [s_vinst], annmodel.s_None) annhelper.finish() replace_force_virtualizable_with_call(graphs, v_inst_ll_type, funcptr) @@ -340,7 +337,6 @@ g(a) t, typer, graph = self.gengraph(f, []) - deref = typer.type_system.deref desc = typer.annotator.bookkeeper.getdesc(g) g_graphs = desc._cache.items() @@ -357,7 +353,7 @@ def get_direct_call_graph(graph): for block, op in graph.iterblockops(): if op.opname == 'direct_call': - return deref(op.args[0].value).graph + return op.args[0].value._obj.graph return None assert get_direct_call_graph(f_graph) is g_graph_directly diff --git a/rpython/rtyper/test/tool.py b/rpython/rtyper/test/tool.py --- a/rpython/rtyper/test/tool.py +++ b/rpython/rtyper/test/tool.py @@ -3,7 +3,6 @@ from rpython.rtyper.test.test_llinterp import gengraph, interpret, interpret_raises class BaseRtypingTest(object): - type_system = 'lltype' FLOAT_PRECISION = 8 def gengraph(self, func, argtypes=[], viewbefore='auto', policy=None, @@ -12,10 +11,10 @@ backendopt=backendopt, config=config) def interpret(self, fn, args, **kwds): - return interpret(fn, args, type_system=self.type_system, **kwds) + return interpret(fn, args, **kwds) def interpret_raises(self, exc, fn, args, **kwds): - return interpret_raises(exc, fn, args, type_system=self.type_system, **kwds) + return interpret_raises(exc, fn, args, **kwds) def float_eq(self, x, y): return x == y diff --git a/rpython/rtyper/typesystem.py b/rpython/rtyper/typesystem.py deleted file mode 100644 --- a/rpython/rtyper/typesystem.py +++ /dev/null @@ -1,78 +0,0 @@ - -"""typesystem.py -- Typesystem-specific operations for RTyper.""" - -from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.error import TyperError - - -class LowLevelTypeSystem(object): - name = "lltypesystem" - - def deref(self, obj): - assert isinstance(lltype.typeOf(obj), lltype.Ptr) - return obj._obj - - def check_null(self, repr, hop): - # None is a nullptr, which is false; everything else is true. - vlist = hop.inputargs(repr) - return hop.genop('ptr_nonzero', vlist, resulttype=lltype.Bool) - - def null_callable(self, T): - return lltype.nullptr(T.TO) - - def getexternalcallable(self, ll_args, ll_result, name, **kwds): - FT = lltype.FuncType(ll_args, ll_result) - return lltype.functionptr(FT, name, **kwds) - - def generic_is(self, robj1, robj2, hop): - roriginal1 = robj1 - roriginal2 = robj2 - if robj1.lowleveltype is lltype.Void: - robj1 = robj2 - elif robj2.lowleveltype is lltype.Void: - robj2 = robj1 - if (not isinstance(robj1.lowleveltype, lltype.Ptr) or - not isinstance(robj2.lowleveltype, lltype.Ptr)): - raise TyperError('is of instances of the non-pointers: %r, %r' % ( - roriginal1, roriginal2)) - if robj1.lowleveltype != robj2.lowleveltype: - raise TyperError('is of instances of different pointer types: %r, %r' % ( - roriginal1, roriginal2)) - - v_list = hop.inputargs(robj1, robj2) - return hop.genop('ptr_eq', v_list, resulttype=lltype.Bool) - - -def _getconcretetype(v): - return v.concretetype - - -def getfunctionptr(graph, getconcretetype=None): - """Return callable given a Python function.""" - if getconcretetype is None: - getconcretetype = _getconcretetype - llinputs = [getconcretetype(v) for v in graph.getargs()] - lloutput = getconcretetype(graph.getreturnvar()) - - FT = lltype.FuncType(llinputs, lloutput) - name = graph.name - if hasattr(graph, 'func') and callable(graph.func): - # the Python function object can have _llfnobjattrs_, specifying - # attributes that are forced upon the functionptr(). The idea - # for not passing these extra attributes as arguments to - # getcallable() itself is that multiple calls to getcallable() - # for the same graph should return equal functionptr() objects. - if hasattr(graph.func, '_llfnobjattrs_'): - fnobjattrs = graph.func._llfnobjattrs_.copy() - # can specify a '_name', but use graph.name by default - name = fnobjattrs.pop('_name', name) - else: - fnobjattrs = {} - # _callable is normally graph.func, but can be overridden: - # see fakeimpl in extfunc.py - _callable = fnobjattrs.pop('_callable', graph.func) - return lltype.functionptr(FT, name, graph = graph, - _callable = _callable, **fnobjattrs) - else: - return lltype.functionptr(FT, name, graph = graph) - diff --git a/rpython/translator/backendopt/mallocv.py b/rpython/translator/backendopt/mallocv.py --- a/rpython/translator/backendopt/mallocv.py +++ b/rpython/translator/backendopt/mallocv.py @@ -4,7 +4,7 @@ from rpython.translator.backendopt.support import log from rpython.translator.simplify import join_blocks from rpython.translator.unsimplify import varoftype -from rpython.rtyper.typesystem import getfunctionptr +from rpython.rtyper.lltypesystem.lltype import getfunctionptr from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.lloperation import llop diff --git a/rpython/translator/c/dlltool.py b/rpython/translator/c/dlltool.py --- a/rpython/translator/c/dlltool.py +++ b/rpython/translator/c/dlltool.py @@ -1,6 +1,6 @@ from rpython.translator.c.genc import CBuilder -from rpython.rtyper.typesystem import getfunctionptr +from rpython.rtyper.lltypesystem.lltype import getfunctionptr from rpython.translator.tool.cbuild import ExternalCompilationInfo diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -3,7 +3,7 @@ import sys, os from rpython.rlib import exports from rpython.rlib.entrypoint import entrypoint -from rpython.rtyper.typesystem import getfunctionptr +from rpython.rtyper.lltypesystem.lltype import getfunctionptr from rpython.rtyper.lltypesystem import lltype, rffi from rpython.tool import runsubprocess from rpython.tool.nullpath import NullPyPathLocal @@ -468,7 +468,7 @@ '$(CC) -o $*.o -c $*.vmprof.lbl.s', 'mv $*.gctmp $*.gcmap', 'rm $*.vmprof.lbl.s']) - + # the rule to compute gcmaptable.s mk.rule('gcmaptable.s', '$(GCMAPFILES)', [ @@ -759,7 +759,7 @@ database, database.translator.rtyper) for line in preimplementationlines: print >> f, line - f.write('#endif /* _PY_PREIMPL_H */\n') + f.write('#endif /* _PY_PREIMPL_H */\n') def gen_startupcode(f, database): # generate the start-up code and put it into a function diff --git a/rpython/translator/c/test/test_database.py b/rpython/translator/c/test/test_database.py --- a/rpython/translator/c/test/test_database.py +++ b/rpython/translator/c/test/test_database.py @@ -4,7 +4,7 @@ from rpython.translator.c.database import LowLevelDatabase from rpython.flowspace.model import Constant, Variable, SpaceOperation from rpython.flowspace.model import Block, Link, FunctionGraph -from rpython.rtyper.typesystem import getfunctionptr +from rpython.rtyper.lltypesystem.lltype import getfunctionptr from rpython.rtyper.lltypesystem.rffi import VOIDP, INT_real, INT, CArrayPtr @@ -43,7 +43,7 @@ def test_inlined_struct(): db = LowLevelDatabase() - pfx = db.namespace.global_prefix + 'g_' + pfx = db.namespace.global_prefix + 'g_' S = GcStruct('test', ('x', Struct('subtest', ('y', Signed)))) s = malloc(S) s.x.y = 42 @@ -56,7 +56,7 @@ def test_complete(): db = LowLevelDatabase() - pfx = db.namespace.global_prefix + 'g_' + pfx = db.namespace.global_prefix + 'g_' T = GcStruct('subtest', ('y', Signed)) S = GcStruct('test', ('x', Ptr(T))) s = malloc(S) @@ -136,7 +136,7 @@ block.closeblock(Link([result], graph.returnblock)) graph.getreturnvar().concretetype = Signed # -------------------- end -------------------- - + F = FuncType([Signed], Signed) f = functionptr(F, "f", graph=graph) db = LowLevelDatabase() @@ -206,7 +206,7 @@ s.ptr2 = ptr2 return s.ptr1.x * s.ptr2.x t, graph = makegraph(ll_f, [int]) - + db = LowLevelDatabase(t) db.get(getfunctionptr(graph)) db.complete() diff --git a/rpython/translator/test/test_unsimplify.py b/rpython/translator/test/test_unsimplify.py --- a/rpython/translator/test/test_unsimplify.py +++ b/rpython/translator/test/test_unsimplify.py @@ -7,7 +7,7 @@ from rpython.rlib.objectmodel import we_are_translated from rpython.tool.udir import udir -def translate(func, argtypes, type_system="lltype"): +def translate(func, argtypes): t = TranslationContext() t.buildannotator().build_types(func, argtypes) t.entry_point_graph = graphof(t, func) @@ -73,14 +73,13 @@ def test_call_initial_function(): tmpfile = str(udir.join('test_call_initial_function')) - type_system = 'lltype' def f(x): return x * 6 def hello_world(): if we_are_translated(): fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0644) os.close(fd) - graph, t = translate(f, [int], type_system) + graph, t = translate(f, [int]) call_initial_function(t, hello_world) # if os.path.exists(tmpfile): @@ -92,14 +91,13 @@ def test_call_final_function(): tmpfile = str(udir.join('test_call_final_function')) - type_system = 'lltype' def f(x): return x * 6 def goodbye_world(): if we_are_translated(): fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0644) os.close(fd) - graph, t = translate(f, [int], type_system) + graph, t = translate(f, [int]) call_final_function(t, goodbye_world) # if os.path.exists(tmpfile): From noreply at buildbot.pypy.org Fri Oct 9 22:08:29 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 9 Oct 2015 22:08:29 +0200 (CEST) Subject: [pypy-commit] pypy default: document merged branch Message-ID: <20151009200829.714EF1C148F@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r80098:ed3fe62985f8 Date: 2015-10-09 21:08 +0100 http://bitbucket.org/pypy/pypy/changeset/ed3fe62985f8/ Log: document merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -64,3 +64,7 @@ .. branch: fortran-order Allow creation of fortran-ordered ndarrays + +.. branch: type_system-cleanup + +Remove some remnants of the old ootypesystem vs lltypesystem dichotomy. From noreply at buildbot.pypy.org Sat Oct 10 03:09:26 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 10 Oct 2015 03:09:26 +0200 (CEST) Subject: [pypy-commit] pypy llconst: Remove concretetype attribute from Constant and create LLConstant Message-ID: <20151010010926.624861C0EFC@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: llconst Changeset: r80099:251f3b632147 Date: 2015-10-10 00:58 +0100 http://bitbucket.org/pypy/pypy/changeset/251f3b632147/ Log: Remove concretetype attribute from Constant and create LLConstant diff --git a/rpython/flowspace/model.py b/rpython/flowspace/model.py --- a/rpython/flowspace/model.py +++ b/rpython/flowspace/model.py @@ -337,12 +337,7 @@ class Constant(Hashable): - __slots__ = ["concretetype"] - - def __init__(self, value, concretetype=None): - Hashable.__init__(self, value) - if concretetype is not None: - self.concretetype = concretetype + __slots__ = [] def foldable(self): to_check = self.value diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py --- a/rpython/memory/gctransform/transform.py +++ b/rpython/memory/gctransform/transform.py @@ -1,6 +1,6 @@ from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.flowspace.model import ( - SpaceOperation, Variable, Constant, checkgraph) + SpaceOperation, Variable, checkgraph) from rpython.translator.unsimplify import insert_empty_block from rpython.translator.unsimplify import insert_empty_startblock from rpython.translator.unsimplify import starts_with_empty_block @@ -13,6 +13,7 @@ from rpython.rtyper import rmodel from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator from rpython.rtyper.rtyper import LowLevelOpList +from rpython.rtyper.rmodel import inputconst from rpython.rtyper.rbuiltin import gen_cast from rpython.rlib.rarithmetic import ovfcheck from rpython.rtyper.lltypesystem.lloperation import llop @@ -273,7 +274,7 @@ def inittime_helper(self, ll_helper, ll_args, ll_result, inline=True): ptr = self.annotate_helper(ll_helper, ll_args, ll_result, inline=inline) - return Constant(ptr, lltype.typeOf(ptr)) + return inputconst(lltype.typeOf(ptr), ptr) def annotate_finalizer(self, ll_finalizer, ll_args, ll_result): fptr = self.annotate_helper(ll_finalizer, ll_args, ll_result) diff --git a/rpython/rlib/nonconst.py b/rpython/rlib/nonconst.py --- a/rpython/rlib/nonconst.py +++ b/rpython/rlib/nonconst.py @@ -1,9 +1,8 @@ - -""" simple non-constant constant. Ie constant which does not get annotated as constant +""" simple non-constant constant. +Ie constant which does not get annotated as constant """ from rpython.rtyper.extregistry import ExtRegistryEntry -from rpython.flowspace.model import Constant from rpython.annotator.model import not_const class NonConstant(object): @@ -39,7 +38,6 @@ def specialize_call(self, hop): hop.exception_cannot_occur() - retval = Constant(hop.r_result.convert_const(hop.args_v[0].value)) - retval.concretetype = hop.r_result.lowleveltype - return retval - + value = hop.r_result.convert_const(hop.args_v[0].value) + lltype = hop.r_result.lowleveltype + return hop.inputconst(lltype, value) diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -11,10 +11,9 @@ SomePtr, annotation_to_lltype, lltype_to_annotation) from rpython.rtyper.normalizecalls import perform_normalizations from rpython.rtyper.lltypesystem import lltype, llmemory -from rpython.flowspace.model import Constant from rpython.rlib.objectmodel import specialize from rpython.rtyper import extregistry -from rpython.rtyper.rmodel import warning +from rpython.rtyper.rmodel import warning, LLConstant class KeyComp(object): @@ -162,7 +161,7 @@ def constfunc(self, ll_function, args_s, s_result): p = self.delayedfunction(ll_function, args_s, s_result) - return Constant(p, lltype.typeOf(p)) + return LLConstant(p, lltype.typeOf(p)) def graph2delayed(self, graph, FUNCTYPE=None): if FUNCTYPE is None: @@ -177,7 +176,7 @@ def graph2const(self, graph): p = self.graph2delayed(graph) - return Constant(p, lltype.typeOf(p)) + return LLConstant(p, lltype.typeOf(p)) def getdelayedrepr(self, s_value, check_never_seen=True): """Like rtyper.getrepr(), but the resulting repr will not be setup() at @@ -309,7 +308,7 @@ vlist = hop.inputargs(*args_r) p = self.instance.llfnptr TYPE = lltype.typeOf(p) - c_func = Constant(p, TYPE) + c_func = LLConstant(p, TYPE) FUNCTYPE = TYPE.TO for r_arg, ARGTYPE in zip(args_r, FUNCTYPE.ARGS): assert r_arg.lowleveltype == ARGTYPE diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -355,8 +355,15 @@ # ____________________________________________________________ +class LLConstant(Constant): + __slots__ = ["concretetype"] + def __init__(self, value, concretetype): + Constant.__init__(self, value) + self.concretetype = concretetype + + def inputconst(reqtype, value): - """Return a Constant with the given value, of the requested type, + """Return a LLConstant with the given value, of the requested type, which can be a Repr instance or a low-level type. """ if isinstance(reqtype, Repr): @@ -369,9 +376,7 @@ if not lltype._contains_value(value): raise TyperError("inputconst(): expected a %r, got %r" % (lltype, value)) - c = Constant(value) - c.concretetype = lltype - return c + return LLConstant(value, lltype) class BrokenReprTyperError(TyperError): """ raised when trying to setup a Repr whose setup diff --git a/rpython/rtyper/rnone.py b/rpython/rtyper/rnone.py --- a/rpython/rtyper/rnone.py +++ b/rpython/rtyper/rnone.py @@ -1,6 +1,5 @@ -from rpython.flowspace.model import Constant from rpython.annotator.model import SomeNone -from rpython.rtyper.rmodel import Repr, TyperError, inputconst +from rpython.rtyper.rmodel import Repr, TyperError, inputconst, LLConstant from rpython.rtyper.lltypesystem.lltype import Void, Bool, Ptr, Char from rpython.rtyper.lltypesystem.llmemory import Address from rpython.rtyper.rpbc import SmallFunctionSetPBCRepr @@ -11,7 +10,7 @@ lowleveltype = Void def rtype_bool(self, hop): - return Constant(False, Bool) + return LLConstant(False, Bool) def none_call(self, hop): raise TyperError("attempt to call constant None") diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -17,7 +17,7 @@ typeOf, Void, ForwardReference, Struct, Bool, Char, Ptr, malloc, nullptr, Array, Signed, cast_pointer, getfunctionptr) from rpython.rtyper.rmodel import (Repr, inputconst, CanBeNull, mangle, - warning, impossible_repr) + warning, impossible_repr, LLConstant) from rpython.tool.pairtype import pair, pairtype from rpython.translator.unsimplify import varoftype @@ -779,7 +779,7 @@ if access is r_pbc2.access_set: value = r_pbc2.convert_desc(frozendesc1) lltype = r_pbc2.lowleveltype - return Constant(value, lltype) + return LLConstant(value, lltype) return NotImplemented class __extend__(pairtype(MultipleFrozenPBCReprBase, diff --git a/rpython/rtyper/rptr.py b/rpython/rtyper/rptr.py --- a/rpython/rtyper/rptr.py +++ b/rpython/rtyper/rptr.py @@ -4,7 +4,7 @@ from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.rmodel import Repr +from rpython.rtyper.rmodel import Repr, inputconst from rpython.rtyper.rint import IntegerRepr from rpython.tool.pairtype import pairtype @@ -48,7 +48,7 @@ pass else: assert hop.s_result.is_constant() - return hop.inputconst(hop.r_result, hop.s_result.const) + return inputconst(hop.r_result, hop.s_result.const) assert attr in self.lowleveltype.TO._flds # check that the field exists FIELD_TYPE = getattr(self.lowleveltype.TO, attr) if isinstance(FIELD_TYPE, lltype.ContainerType): @@ -75,7 +75,7 @@ def rtype_len(self, hop): ARRAY = hop.args_r[0].lowleveltype.TO if isinstance(ARRAY, lltype.FixedSizeArray): - return hop.inputconst(lltype.Signed, ARRAY.length) + return inputconst(lltype.Signed, ARRAY.length) else: vlist = hop.inputargs(self) return hop.genop('getarraysize', vlist, @@ -100,7 +100,7 @@ opname = 'direct_call' else: opname = 'indirect_call' - vlist.append(hop.inputconst(lltype.Void, None)) + vlist.append(inputconst(lltype.Void, None)) hop.exception_is_here() return hop.genop(opname, vlist, resulttype = self.lowleveltype.TO.RESULT) @@ -123,15 +123,14 @@ if isinstance(hop.r_result, InteriorPtrRepr): v_array, v_index = hop.inputargs(r_ptr, lltype.Signed) INTERIOR_PTR_TYPE = r_ptr.lowleveltype._interior_ptr_type_with_index(ITEM_TYPE) - cflags = hop.inputconst(lltype.Void, {'flavor': 'gc'}) - args = [flowmodel.Constant(INTERIOR_PTR_TYPE, lltype.Void), - cflags] + cflags = inputconst(lltype.Void, {'flavor': 'gc'}) + args = [inputconst(lltype.Void, INTERIOR_PTR_TYPE), cflags] v_interior_ptr = hop.genop('malloc', args, resulttype=lltype.Ptr(INTERIOR_PTR_TYPE)) hop.genop('setfield', - [v_interior_ptr, flowmodel.Constant('ptr', lltype.Void), v_array]) + [v_interior_ptr, inputconst(lltype.Void, 'ptr'), v_array]) hop.genop('setfield', - [v_interior_ptr, flowmodel.Constant('index', lltype.Void), v_index]) + [v_interior_ptr, inputconst(lltype.Void, 'index'), v_index]) return v_interior_ptr else: newopname = 'getarraysubstruct' @@ -220,7 +219,7 @@ self.v_offsets.append(None) else: assert isinstance(offset, str) - self.v_offsets.append(flowmodel.Constant(offset, lltype.Void)) + self.v_offsets.append(inputconst(lltype.Void, offset)) self.parentptrtype = lltype.Ptr(ptrtype.PARENTTYPE) self.resulttype = lltype.Ptr(ptrtype.TO) assert numitemoffsets <= 1 @@ -237,7 +236,7 @@ name = nameiter.next() vlist.append( hop.genop('getfield', - [v_self, flowmodel.Constant(name, lltype.Void)], + [v_self, inputconst(lltype.Void, name)], resulttype=INTERIOR_TYPE._flds[name])) else: vlist.append(v_self) @@ -246,7 +245,7 @@ name = nameiter.next() vlist.append( hop.genop('getfield', - [v_self, flowmodel.Constant(name, lltype.Void)], + [v_self, inputconst(lltype.Void, name)], resulttype=INTERIOR_TYPE._flds[name])) else: vlist.append(v_offset) @@ -296,14 +295,14 @@ if isinstance(ITEM_TYPE, lltype.ContainerType): v_array, v_index = hop.inputargs(r_ptr, lltype.Signed) INTERIOR_PTR_TYPE = r_ptr.lowleveltype._interior_ptr_type_with_index(ITEM_TYPE) - cflags = hop.inputconst(lltype.Void, {'flavor': 'gc'}) - args = [flowmodel.Constant(INTERIOR_PTR_TYPE, lltype.Void), cflags] + cflags = inputconst(lltype.Void, {'flavor': 'gc'}) + args = [inputconst(lltype.Void, INTERIOR_PTR_TYPE), cflags] v_interior_ptr = hop.genop('malloc', args, resulttype=lltype.Ptr(INTERIOR_PTR_TYPE)) hop.genop('setfield', - [v_interior_ptr, flowmodel.Constant('ptr', lltype.Void), v_array]) + [v_interior_ptr, inputconst(lltype.Void, 'ptr'), v_array]) hop.genop('setfield', - [v_interior_ptr, flowmodel.Constant('index', lltype.Void), v_index]) + [v_interior_ptr, inputconst(lltype.Void, 'index'), v_index]) return v_interior_ptr else: v_self, v_index = hop.inputargs(r_ptr, lltype.Signed) diff --git a/rpython/translator/backendopt/constfold.py b/rpython/translator/backendopt/constfold.py --- a/rpython/translator/backendopt/constfold.py +++ b/rpython/translator/backendopt/constfold.py @@ -1,5 +1,6 @@ -from rpython.flowspace.model import (Constant, Variable, SpaceOperation, +from rpython.flowspace.model import (Variable, SpaceOperation, mkentrymap) +from rpython.rtyper.rmodel import LLConstant from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.lloperation import llop from rpython.translator.unsimplify import insert_empty_block, split_block @@ -13,7 +14,7 @@ vargs = [] args = [] for v in spaceop.args: - if isinstance(v, Constant): + if isinstance(v, LLConstant): args.append(v.value) elif v in constants: v = constants[v] @@ -41,7 +42,7 @@ # success in folding this space operation if spaceop.opname in fixup_op_result: result = fixup_op_result[spaceop.opname](result) - constants[spaceop.result] = Constant(result, RESTYPE) + constants[spaceop.result] = LLConstant(result, RESTYPE) folded_count += 1 continue # failed to fold an operation, exit early if requested @@ -50,7 +51,7 @@ else: if vargsmodif: if (spaceop.opname == 'indirect_call' - and isinstance(vargs[0], Constant)): + and isinstance(vargs[0], LLConstant)): spaceop = SpaceOperation('direct_call', vargs[:-1], spaceop.result) else: @@ -216,7 +217,7 @@ if isinstance(vexit, Variable): for link in block.exits: if vexit in link.args and link.exitcase != 'default': - remap = {vexit: Constant(link.llexitcase, + remap = {vexit: LLConstant(link.llexitcase, vexit.concretetype)} link.args = [remap.get(v, v) for v in link.args] count += 1 @@ -233,7 +234,7 @@ rest = links[1:] diffuse = [] for i, c in enumerate(firstlink.args): - if not isinstance(c, Constant): + if not isinstance(c, LLConstant): continue for lnk in rest: if lnk.args[i] != c: @@ -268,7 +269,7 @@ for link in list(graph.iterlinks()): constants = {} for v1, v2 in zip(link.args, link.target.inputargs): - if isinstance(v1, Constant): + if isinstance(v1, LLConstant): constants[v2] = v1 if constants: prepare_constant_fold_link(link, constants, splitblocks) diff --git a/rpython/translator/backendopt/inline.py b/rpython/translator/backendopt/inline.py --- a/rpython/translator/backendopt/inline.py +++ b/rpython/translator/backendopt/inline.py @@ -2,6 +2,7 @@ from rpython.flowspace.model import (Variable, Constant, Block, Link, SpaceOperation, FunctionGraph, mkentrymap) +from rpython.rtyper.rmodel import LLConstant, inputconst from rpython.rtyper.lltypesystem.lltype import Bool, Signed, typeOf, Void, Ptr, normalizeptr from rpython.tool.algo import sparsemat from rpython.translator.backendopt import removenoops @@ -355,9 +356,8 @@ def generic_exception_matching(self, afterblock, copiedexceptblock): #XXXXX don't look: insert blocks that do exception matching #for the cases where direct matching did not work - exc_match = Constant( - self.translator.rtyper.exceptiondata.fn_exception_match) - exc_match.concretetype = typeOf(exc_match.value) + ll_exc_match = self.translator.rtyper.exceptiondata.fn_exception_match + exc_match = inputconst(typeOf(ll_exc_match), ll_exc_match) blocks = [] for i, link in enumerate(afterblock.exits[1:]): etype = copiedexceptblock.inputargs[0].copy() @@ -366,8 +366,8 @@ block = Block([etype, evalue] + passon_vars) res = Variable() res.concretetype = Bool - cexitcase = Constant(link.llexitcase) - cexitcase.concretetype = typeOf(cexitcase.value) + exitcase = link.llexitcase + cexitcase = LLConstant(exitcase, typeOf(exitcase)) args = [exc_match, etype, cexitcase] block.operations.append(SpaceOperation("direct_call", args, res)) block.exitswitch = res @@ -597,8 +597,8 @@ '_dont_inline_', False): continue if candidate(graph): - tag = Constant('inline', Void) - label = Constant(n, Signed) + tag = LLConstant('inline', Void) + label = LLConstant(n, Signed) dummy = Variable() dummy.concretetype = Void count = SpaceOperation('instrument_count', diff --git a/rpython/translator/backendopt/malloc.py b/rpython/translator/backendopt/malloc.py --- a/rpython/translator/backendopt/malloc.py +++ b/rpython/translator/backendopt/malloc.py @@ -1,6 +1,7 @@ from rpython.flowspace.model import Variable, Constant, SpaceOperation from rpython.tool.algo.unionfind import UnionFind from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.rmodel import LLConstant from rpython.translator import simplify from rpython.translator.backendopt import removenoops from rpython.translator.backendopt.support import log @@ -88,7 +89,7 @@ for key in self.needsubmallocs: v = Variable() v.concretetype = self.newvarstype[key] - c = Constant(v.concretetype.TO, lltype.Void) + c = LLConstant(v.concretetype.TO, lltype.Void) if c.value == op.args[0].value: progress = False # replacing a malloc with # the same malloc! @@ -348,10 +349,8 @@ return False def recreate_malloc(self, c, v): - return SpaceOperation(self.MALLOC_OP, [c, - Constant({'flavor': 'gc'}, - lltype.Void)], - v) + return SpaceOperation( + self.MALLOC_OP, [c, LLConstant({'flavor': 'gc'}, lltype.Void)], v) def get_STRUCT(self, TYPE): STRUCT = TYPE.TO @@ -436,8 +435,7 @@ ('data', FIELDTYPE))) elif not isinstance(FIELDTYPE, lltype.ContainerType): example = FIELDTYPE._defl() - constant = Constant(example) - constant.concretetype = FIELDTYPE + constant = LLConstant(example, FIELDTYPE) self.flatconstants[key] = constant self.flatnames.append(key) self.newvarstype[key] = FIELDTYPE @@ -458,7 +456,7 @@ from rpython.rtyper.lltypesystem.rstr import string_repr msg = "unreachable operation (from malloc.py)" ll_msg = string_repr.convert_const(msg) - c_msg = Constant(ll_msg, lltype.typeOf(ll_msg)) + c_msg = LLConstant(ll_msg, lltype.typeOf(ll_msg)) return SpaceOperation("debug_fatalerror", [c_msg], v_result) def flowin_op(self, op, vars, newvarsmap): @@ -469,7 +467,7 @@ if key not in newvarsmap: newop = self.handle_unreachable(op.result) elif key in self.accessed_substructs: - c_name = Constant('data', lltype.Void) + c_name = LLConstant('data', lltype.Void) newop = SpaceOperation("getfield", [newvarsmap[key], c_name], op.result) @@ -486,7 +484,7 @@ newop = self.handle_unreachable(op.result) self.newops.append(newop) elif key in self.accessed_substructs: - c_name = Constant('data', lltype.Void) + c_name = LLConstant('data', lltype.Void) newop = SpaceOperation("setfield", [newvarsmap[key], c_name, op.args[2]], op.result) @@ -522,7 +520,7 @@ except KeyError: newop = self.handle_unreachable(op.result) else: - cname = Constant('data', lltype.Void) + cname = LLConstant('data', lltype.Void) newop = SpaceOperation(opname, [v, cname], op.result) @@ -530,7 +528,7 @@ elif op.opname in ("ptr_iszero", "ptr_nonzero"): # we know the pointer is not NULL if it comes from # a successful malloc - c = Constant(op.opname == "ptr_nonzero", lltype.Bool) + c = LLConstant(op.opname == "ptr_nonzero", lltype.Bool) newop = SpaceOperation('same_as', [c], op.result) self.newops.append(newop) else: diff --git a/rpython/translator/backendopt/mallocv.py b/rpython/translator/backendopt/mallocv.py --- a/rpython/translator/backendopt/mallocv.py +++ b/rpython/translator/backendopt/mallocv.py @@ -7,6 +7,7 @@ from rpython.rtyper.lltypesystem.lltype import getfunctionptr from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.rmodel import LLConstant def virtualize_mallocs(translator, graphs, verbose=False): @@ -576,16 +577,15 @@ v_incoming_value = varoftype(TVAL) block = Block([v_ignored_type, v_incoming_value]) # - c_EXCTYPE = Constant(typedesc.MALLOCTYPE, lltype.Void) + c_EXCTYPE = LLConstant(typedesc.MALLOCTYPE, lltype.Void) v = varoftype(lltype.Ptr(typedesc.MALLOCTYPE)) - c_flavor = Constant({'flavor': 'gc'}, lltype.Void) + c_flavor = LLConstant({'flavor': 'gc'}, lltype.Void) op = SpaceOperation('malloc', [c_EXCTYPE, c_flavor], v) block.operations.append(op) # for name, FIELDTYPE in typedesc.names_and_types: EXACTPTR = lltype.Ptr(typedesc.name2subtype[name]) - c_name = Constant(name) - c_name.concretetype = lltype.Void + c_name = LLConstant(name, lltype.Void) # v_in = varoftype(EXACTPTR) op = SpaceOperation('cast_pointer', [v_incoming_value], v_in) @@ -608,7 +608,7 @@ block.operations.append(op) # exc_type = self.mallocv.EXCTYPE_to_vtable[typedesc.MALLOCTYPE] - c_exc_type = Constant(exc_type, TEXC) + c_exc_type = LLConstant(exc_type, TEXC) block.closeblock(Link([c_exc_type, v_exc_value], exceptblock)) return block @@ -774,8 +774,7 @@ self.setnode(v_result, newrtnode) if v_result.concretetype is not lltype.Void: assert v_result.concretetype == lltype.typeOf(value) - c_value = Constant(value) - c_value.concretetype = v_result.concretetype + c_value = LLConstant(value, v_result.concretetype) self.renamings[newrtnode] = c_value def handle_default(self, op): @@ -793,7 +792,7 @@ from rpython.rtyper.lltypesystem.rstr import string_repr msg = 'unreachable: %s' % (op,) ll_msg = string_repr.convert_const(msg) - c_msg = Constant(ll_msg, lltype.typeOf(ll_msg)) + c_msg = LLConstant(ll_msg, lltype.typeOf(ll_msg)) newresult = self.make_rt_result(op.result) return [SpaceOperation('debug_fatalerror', [c_msg], newresult)] @@ -889,8 +888,7 @@ for name, FIELDTYPE in typedesc.names_and_types: fieldnode = RuntimeSpecNode(name, FIELDTYPE) virtualnode.fields.append(fieldnode) - c = Constant(FIELDTYPE._defl()) - c.concretetype = FIELDTYPE + c = LLConstant(FIELDTYPE._defl(), FIELDTYPE) self.renamings[fieldnode] = c self.v_expand_malloc = None # done return [] @@ -939,8 +937,7 @@ def handle_residual_call(self, op, newgraph, newnodes): fspecptr = getfunctionptr(newgraph) - newargs = [Constant(fspecptr, - concretetype=lltype.typeOf(fspecptr))] + newargs = [LLConstant(fspecptr, concretetype=lltype.typeOf(fspecptr))] newargs += self.expand_nodes(newnodes) newresult = self.make_rt_result(op.result) newop = SpaceOperation('direct_call', newargs, newresult) diff --git a/rpython/translator/backendopt/test/test_constfold.py b/rpython/translator/backendopt/test/test_constfold.py --- a/rpython/translator/backendopt/test/test_constfold.py +++ b/rpython/translator/backendopt/test/test_constfold.py @@ -1,9 +1,9 @@ -import py -from rpython.flowspace.model import checkgraph, Constant, summary +from rpython.flowspace.model import checkgraph, summary from rpython.translator.translator import TranslationContext, graphof from rpython.rtyper.llinterp import LLInterpreter from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.rmodel import LLConstant from rpython.rtyper import rclass from rpython.rlib import objectmodel from rpython.translator.backendopt.constfold import constant_fold_graph @@ -99,7 +99,7 @@ constant_fold_graph(graph) assert summary(graph) == {'int_mul': 1, 'int_eq': 3, 'int_add': 2} for link in graph.iterlinks(): - if Constant(139) in link.args: + if LLConstant(139, lltype.Signed) in link.args: break else: raise AssertionError("139 not found in the graph as a constant") @@ -316,7 +316,7 @@ elif n == 4: return -123 elif n == 5: return 12973 else: return n - + graph, t = get_graph(fn, [int]) from rpython.translator.backendopt.removenoops import remove_same_as from rpython.translator.backendopt import merge_if_blocks @@ -335,7 +335,7 @@ elif n == 4: return -123 elif n == 5: return 12973 else: return n - + graph, t = get_graph(fn, []) from rpython.translator.backendopt.removenoops import remove_same_as from rpython.translator.backendopt import merge_if_blocks diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -2,13 +2,13 @@ from rpython.translator.unsimplify import varoftype from rpython.translator.unsimplify import insert_empty_block, split_block from rpython.translator.backendopt import canraise, inline -from rpython.flowspace.model import Block, Constant, Variable, Link, \ - SpaceOperation, FunctionGraph, mkentrymap +from rpython.flowspace.model import ( + Block, Variable, Link, SpaceOperation, FunctionGraph, mkentrymap) from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rtyper.lltypesystem import lloperation from rpython.rtyper.rclass import ll_inst_type from rpython.rtyper import rtyper -from rpython.rtyper.rmodel import inputconst +from rpython.rtyper.rmodel import inputconst, LLConstant from rpython.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat from rpython.rlib.debug import ll_assert @@ -41,10 +41,10 @@ assert 0, "not implemented yet" def error_constant(T): - return Constant(error_value(T), T) + return LLConstant(error_value(T), T) def constant_value(llvalue): - return Constant(llvalue, lltype.typeOf(llvalue)) + return LLConstant(llvalue, lltype.typeOf(llvalue)) class ExceptionTransformer(object): @@ -460,16 +460,16 @@ null_value = lltype.nullptr(self.lltype_of_exception_value.TO) self.exc_data_ptr = exc_data - self.cexcdata = Constant(exc_data, lltype.Ptr(self.EXCDATA)) - self.c_null_etype = Constant(null_type, self.lltype_of_exception_type) - self.c_null_evalue = Constant(null_value, self.lltype_of_exception_value) + self.cexcdata = LLConstant(exc_data, lltype.Ptr(self.EXCDATA)) + self.c_null_etype = LLConstant(null_type, self.lltype_of_exception_type) + self.c_null_evalue = LLConstant(null_value, self.lltype_of_exception_value) return exc_data, null_type, null_value def constant_func(self, name, inputtypes, rettype, graph, **kwds): FUNC_TYPE = lltype.FuncType(inputtypes, rettype) fn_ptr = lltype.functionptr(FUNC_TYPE, name, graph=graph, **kwds) - return Constant(fn_ptr, lltype.Ptr(FUNC_TYPE)) + return LLConstant(fn_ptr, lltype.Ptr(FUNC_TYPE)) def gen_getfield(self, name, llops): c_name = inputconst(lltype.Void, name) diff --git a/rpython/translator/transform.py b/rpython/translator/transform.py --- a/rpython/translator/transform.py +++ b/rpython/translator/transform.py @@ -9,6 +9,7 @@ SpaceOperation, Variable, Constant, Link, checkgraph) from rpython.annotator import model as annmodel from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.rmodel import inputconst def checkgraphs(self, blocks): seen = set() @@ -205,7 +206,8 @@ graph = rtyper.annotate_helper(stack_check, []) rtyper.specialize_more_blocks() stack_check_ptr = rtyper.getcallable(graph) - stack_check_ptr_const = Constant(stack_check_ptr, lltype.typeOf(stack_check_ptr)) + stack_check_ptr_const = inputconst(lltype.typeOf(stack_check_ptr), + stack_check_ptr) edges = set() insert_in = set() block2graph = {} From noreply at buildbot.pypy.org Sat Oct 10 03:09:28 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 10 Oct 2015 03:09:28 +0200 (CEST) Subject: [pypy-commit] pypy llconst: create ll_const() Message-ID: <20151010010928.D3C341C0EFC@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: llconst Changeset: r80100:3b4b18239cb8 Date: 2015-10-10 02:09 +0100 http://bitbucket.org/pypy/pypy/changeset/3b4b18239cb8/ Log: create ll_const() diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py --- a/rpython/rtyper/annlowlevel.py +++ b/rpython/rtyper/annlowlevel.py @@ -13,7 +13,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rlib.objectmodel import specialize from rpython.rtyper import extregistry -from rpython.rtyper.rmodel import warning, LLConstant +from rpython.rtyper.rmodel import warning, LLConstant, ll_const class KeyComp(object): @@ -161,7 +161,7 @@ def constfunc(self, ll_function, args_s, s_result): p = self.delayedfunction(ll_function, args_s, s_result) - return LLConstant(p, lltype.typeOf(p)) + return ll_const(p) def graph2delayed(self, graph, FUNCTYPE=None): if FUNCTYPE is None: @@ -176,7 +176,7 @@ def graph2const(self, graph): p = self.graph2delayed(graph) - return LLConstant(p, lltype.typeOf(p)) + return ll_const(p) def getdelayedrepr(self, s_value, check_never_seen=True): """Like rtyper.getrepr(), but the resulting repr will not be setup() at diff --git a/rpython/rtyper/controllerentry.py b/rpython/rtyper/controllerentry.py --- a/rpython/rtyper/controllerentry.py +++ b/rpython/rtyper/controllerentry.py @@ -201,7 +201,7 @@ from rpython.rtyper.lltypesystem import lltype assert hop.s_result.is_constant() hop.exception_cannot_occur() - return hop.inputconst(lltype.Bool, hop.s_result.const) + return hop.inputconst(hop.s_result.const) # ____________________________________________________________ diff --git a/rpython/rtyper/extfunc.py b/rpython/rtyper/extfunc.py --- a/rpython/rtyper/extfunc.py +++ b/rpython/rtyper/extfunc.py @@ -1,6 +1,7 @@ from rpython.rtyper import extregistry from rpython.rtyper.extregistry import ExtRegistryEntry -from rpython.rtyper.lltypesystem.lltype import typeOf, FuncType, functionptr +from rpython.rtyper.lltypesystem.lltype import FuncType, functionptr +from rpython.rtyper.rmodel import ll_const from rpython.annotator import model as annmodel from rpython.annotator.signature import annotation @@ -204,7 +205,7 @@ obj = functionptr(FT, name, _external_name=self.name, _callable=fakeimpl, _safe_not_sandboxed=self.safe_not_sandboxed) - vlist = [hop.inputconst(typeOf(obj), obj)] + hop.inputargs(*args_r) + vlist = [ll_const(obj)] + hop.inputargs(*args_r) hop.exception_is_here() return hop.genop('direct_call', vlist, r_result) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -3,6 +3,7 @@ """ from rpython.rtyper.extregistry import ExtRegistryEntry +from rpython.rtyper.rmodel import ll_const class LLOp(object): @@ -118,7 +119,7 @@ def specialize_call(self, hop): from rpython.rtyper.lltypesystem import lltype hop.exception_cannot_occur() - return hop.inputconst(lltype.Void, None) + return ll_const(None) def enum_ops_without_sideeffects(raising_is_ok=False): """Enumerate operations that have no side-effects diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -12,7 +12,7 @@ from rpython.rtyper.lltypesystem.lltype import (GcStruct, Signed, Array, Char, UniChar, Ptr, malloc, Bool, Void, GcArray, nullptr, cast_primitive, typeOf, staticAdtMethod, GcForwardReference) -from rpython.rtyper.rmodel import inputconst, Repr +from rpython.rtyper.rmodel import inputconst, Repr, ll_const from rpython.rtyper.rint import IntegerRepr from rpython.rtyper.rstr import (AbstractStringRepr, AbstractCharRepr, AbstractUniCharRepr, AbstractStringIteratorRepr, AbstractLLHelpers, @@ -1179,11 +1179,11 @@ elif code == 'x': assert isinstance(r_arg, IntegerRepr) vchunk = hop.gendirectcall(ll_str.ll_int2hex, vitem, - inputconst(Bool, False)) + ll_const(False)) elif code == 'o': assert isinstance(r_arg, IntegerRepr) vchunk = hop.gendirectcall(ll_str.ll_int2oct, vitem, - inputconst(Bool, False)) + ll_const(False)) else: raise TyperError("%%%s is not RPython" % (code,)) else: diff --git a/rpython/rtyper/rint.py b/rpython/rtyper/rint.py --- a/rpython/rtyper/rint.py +++ b/rpython/rtyper/rint.py @@ -9,7 +9,7 @@ Char, UniChar, UnsignedLongLong, SignedLongLong, build_number, Number, cast_primitive, typeOf, SignedLongLongLong) from rpython.rtyper.rfloat import FloatRepr -from rpython.rtyper.rmodel import inputconst, log +from rpython.rtyper.rmodel import inputconst, log, ll_const from rpython.tool.pairtype import pairtype class IntegerRepr(FloatRepr): @@ -158,15 +158,13 @@ from rpython.rtyper.lltypesystem.ll_str import ll_int2hex self = self.as_int varg = hop.inputarg(self, 0) - true = inputconst(Bool, True) - return hop.gendirectcall(ll_int2hex, varg, true) + return hop.gendirectcall(ll_int2hex, varg, ll_const(True)) def rtype_oct(self, hop): from rpython.rtyper.lltypesystem.ll_str import ll_int2oct self = self.as_int varg = hop.inputarg(self, 0) - true = inputconst(Bool, True) - return hop.gendirectcall(ll_int2oct, varg, true) + return hop.gendirectcall(ll_int2oct, varg, ll_const(True)) _integer_reprs = {} diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -2,7 +2,8 @@ from rpython.flowspace.model import Constant from rpython.rtyper.error import TyperError, MissingRTypeOperation from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.lltypesystem.lltype import Void, Bool, LowLevelType, Ptr +from rpython.rtyper.lltypesystem.lltype import ( + Void, Bool, LowLevelType, Ptr, typeOf) from rpython.tool.pairtype import pairtype, extendabletype, pair @@ -356,11 +357,15 @@ # ____________________________________________________________ class LLConstant(Constant): + """Low-level constant""" __slots__ = ["concretetype"] def __init__(self, value, concretetype): Constant.__init__(self, value) self.concretetype = concretetype +def ll_const(obj): + """Convert an object into a LLConstant""" + return LLConstant(obj, typeOf(obj)) def inputconst(reqtype, value): """Return a LLConstant with the given value, of the requested type, diff --git a/rpython/rtyper/rnone.py b/rpython/rtyper/rnone.py --- a/rpython/rtyper/rnone.py +++ b/rpython/rtyper/rnone.py @@ -1,5 +1,6 @@ from rpython.annotator.model import SomeNone -from rpython.rtyper.rmodel import Repr, TyperError, inputconst, LLConstant +from rpython.rtyper.rmodel import ( + Repr, TyperError, inputconst, LLConstant, ll_const) from rpython.rtyper.lltypesystem.lltype import Void, Bool, Ptr, Char from rpython.rtyper.lltypesystem.llmemory import Address from rpython.rtyper.rpbc import SmallFunctionSetPBCRepr @@ -43,7 +44,7 @@ class __extend__(pairtype(Repr, NoneRepr)): def convert_from_to((r_from, _), v, llops): - return inputconst(Void, None) + return ll_const(None) def rtype_is_((robj1, rnone2), hop): if hop.s_result.is_constant(): @@ -69,13 +70,13 @@ cnull = hop.inputconst(Address, robj1.null_instance()) return hop.genop('adr_eq', [v1, cnull], resulttype=Bool) elif robj1 == none_repr: - return hop.inputconst(Bool, True) + return ll_const(True) elif isinstance(robj1, SmallFunctionSetPBCRepr): if robj1.s_pbc.can_be_None: v1 = hop.inputarg(robj1, pos) return hop.genop('char_eq', [v1, inputconst(Char, '\000')], resulttype=Bool) else: - return inputconst(Bool, False) + return ll_const(False) else: raise TyperError('rtype_is_None of %r' % (robj1)) diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -16,8 +16,9 @@ from rpython.rtyper.lltypesystem.lltype import ( typeOf, Void, ForwardReference, Struct, Bool, Char, Ptr, malloc, nullptr, Array, Signed, cast_pointer, getfunctionptr) -from rpython.rtyper.rmodel import (Repr, inputconst, CanBeNull, mangle, - warning, impossible_repr, LLConstant) +from rpython.rtyper.rmodel import ( + Repr, inputconst, CanBeNull, mangle, warning, impossible_repr, + LLConstant, ll_const) from rpython.tool.pairtype import pair, pairtype from rpython.translator.unsimplify import varoftype @@ -332,7 +333,7 @@ row_of_one_graph = self.callfamily.calltables[shape][index] graph = row_of_one_graph[funcdesc] llfn = self.rtyper.getcallable(graph) - return inputconst(typeOf(llfn), llfn) + return ll_const(llfn) def get_unique_llfn(self): # try to build a unique low-level function. Avoid to use @@ -356,7 +357,7 @@ if graphs != [graph] * len(graphs): raise TyperError("cannot pass a specialized function here") llfn = self.rtyper.getcallable(graph) - return inputconst(typeOf(llfn), llfn) + return ll_const(llfn) def get_concrete_llfn(self, s_pbc, args_s, op): bk = self.rtyper.annotator.bookkeeper @@ -365,8 +366,7 @@ with bk.at_position(None): graph = funcdesc.get_graph(args, op) llfn = self.rtyper.getcallable(graph) - return inputconst(typeOf(llfn), llfn) - + return ll_const(llfn) class __extend__(pairtype(FunctionRepr, FunctionRepr)): @@ -379,7 +379,7 @@ class __extend__(pairtype(FunctionsPBCRepr, FunctionRepr)): def convert_from_to((r_fpbc1, r_fpbc2), v, llops): - return inputconst(Void, None) + return ll_const(None) class __extend__(pairtype(FunctionsPBCRepr, FunctionsPBCRepr)): def convert_from_to((r_fpbc1, r_fpbc2), v, llops): @@ -421,7 +421,7 @@ pointer_table[i] = self.pointer_repr.convert_desc(desc) else: pointer_table[i] = self.pointer_repr.convert_const(None) - self.c_pointer_table = inputconst(Ptr(POINTER_TABLE), pointer_table) + self.c_pointer_table = ll_const(pointer_table) def convert_desc(self, funcdesc): return chr(self.descriptions.index(funcdesc)) @@ -441,7 +441,7 @@ graph = self.make_dispatcher(shape, index, argtypes, resulttype) self.rtyper.annotator.translator.graphs.append(graph) ll_ret = getfunctionptr(graph) - c_ret = self._dispatch_cache[key] = inputconst(typeOf(ll_ret), ll_ret) + c_ret = self._dispatch_cache[key] = ll_const(ll_ret) return c_ret def make_dispatcher(self, shape, index, argtypes, resulttype): @@ -460,7 +460,7 @@ args_v = [varoftype(t) for t in argtypes] b = Block(args_v) llfn = self.rtyper.getcallable(row_of_graphs[desc]) - v_fn = inputconst(typeOf(llfn), llfn) + v_fn = ll_const(llfn) v_result = varoftype(resulttype) b.operations.append( SpaceOperation("direct_call", [v_fn] + args_v, v_result)) @@ -491,7 +491,7 @@ def rtype_bool(self, hop): if not self.s_pbc.can_be_None: - return inputconst(Bool, True) + return ll_const(True) else: v1, = hop.inputargs(self) return hop.genop('char_ne', [v1, inputconst(Char, '\000')], @@ -500,7 +500,7 @@ class __extend__(pairtype(SmallFunctionSetPBCRepr, FunctionRepr)): def convert_from_to((r_set, r_ptr), v, llops): - return inputconst(Void, None) + return ll_const(None) class __extend__(pairtype(SmallFunctionSetPBCRepr, FunctionsPBCRepr)): def convert_from_to((r_set, r_ptr), v, llops): diff --git a/rpython/rtyper/rptr.py b/rpython/rtyper/rptr.py --- a/rpython/rtyper/rptr.py +++ b/rpython/rtyper/rptr.py @@ -4,7 +4,7 @@ from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.rmodel import Repr, inputconst +from rpython.rtyper.rmodel import Repr, inputconst, ll_const from rpython.rtyper.rint import IntegerRepr from rpython.tool.pairtype import pairtype @@ -100,7 +100,7 @@ opname = 'direct_call' else: opname = 'indirect_call' - vlist.append(inputconst(lltype.Void, None)) + vlist.append(ll_const(None)) hop.exception_is_here() return hop.genop(opname, vlist, resulttype = self.lowleveltype.TO.RESULT) diff --git a/rpython/rtyper/rrange.py b/rpython/rtyper/rrange.py --- a/rpython/rtyper/rrange.py +++ b/rpython/rtyper/rrange.py @@ -2,7 +2,7 @@ from rpython.rtyper.error import TyperError from rpython.rtyper.lltypesystem.lltype import Signed, Void, Ptr from rpython.rtyper.rlist import dum_nocheck, dum_checkidx -from rpython.rtyper.rmodel import Repr, IteratorRepr +from rpython.rtyper.rmodel import Repr, IteratorRepr, ll_const from rpython.rtyper.rint import IntegerRepr from rpython.tool.pairtype import pairtype @@ -94,9 +94,9 @@ # Irregular operations. def rtype_builtin_range(hop): - vstep = hop.inputconst(Signed, 1) + vstep = ll_const(1) if hop.nb_args == 1: - vstart = hop.inputconst(Signed, 0) + vstart = ll_const(0) vstop, = hop.inputargs(Signed) elif hop.nb_args == 2: vstart, vstop = hop.inputargs(Signed, Signed) diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -24,7 +24,8 @@ from rpython.rtyper.lltypesystem.lltype import (Signed, Void, LowLevelType, Ptr, ContainerType, FuncType, functionptr, typeOf, RuntimeTypeInfo, attachRuntimeTypeInfo, Primitive, getfunctionptr) -from rpython.rtyper.rmodel import Repr, inputconst, BrokenReprTyperError +from rpython.rtyper.rmodel import ( + Repr, inputconst, BrokenReprTyperError, ll_const) from rpython.rtyper import rclass from rpython.rtyper.rclass import RootClassRepr from rpython.tool.pairtype import pair @@ -871,7 +872,7 @@ # build the 'direct_call' operation f = self.rtyper.getcallable(graph) - c = inputconst(typeOf(f), f) + c = ll_const(f) fobj = f._obj return self.genop('direct_call', [c]+newargs_v, resulttype = typeOf(fobj).RESULT) @@ -882,14 +883,14 @@ argtypes = [v.concretetype for v in args_v] FUNCTYPE = FuncType(argtypes, resulttype or Void) f = functionptr(FUNCTYPE, fnname, **flags) - cf = inputconst(typeOf(f), f) + cf = ll_const(f) return self.genop('direct_call', [cf]+list(args_v), resulttype) def gencapicall(self, cfnname, args_v, resulttype=None, **flags): return self.genexternalcall(cfnname, args_v, resulttype=resulttype, external="CPython", **flags) def genconst(self, ll_value): - return inputconst(typeOf(ll_value), ll_value) + return ll_const(ll_value) def genvoidconst(self, placeholder): return inputconst(Void, placeholder) diff --git a/rpython/rtyper/rvirtualizable.py b/rpython/rtyper/rvirtualizable.py --- a/rpython/rtyper/rvirtualizable.py +++ b/rpython/rtyper/rvirtualizable.py @@ -1,4 +1,4 @@ -from rpython.rtyper.rmodel import inputconst, log +from rpython.rtyper.rmodel import inputconst, log, ll_const from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.rclass import (FieldListAccessor, InstanceRepr) @@ -55,7 +55,7 @@ def replace_force_virtualizable_with_call(graphs, VTYPEPTR, funcptr): # funcptr should be a function pointer with a VTYPEPTR argument - c_funcptr = inputconst(lltype.typeOf(funcptr), funcptr) + c_funcptr = ll_const(funcptr) count = 0 for graph in graphs: for block in graph.iterblocks(): diff --git a/rpython/translator/backendopt/constfold.py b/rpython/translator/backendopt/constfold.py --- a/rpython/translator/backendopt/constfold.py +++ b/rpython/translator/backendopt/constfold.py @@ -1,6 +1,6 @@ from rpython.flowspace.model import (Variable, SpaceOperation, mkentrymap) -from rpython.rtyper.rmodel import LLConstant +from rpython.rtyper.rmodel import LLConstant, ll_const from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.lloperation import llop from rpython.translator.unsimplify import insert_empty_block, split_block diff --git a/rpython/translator/backendopt/inline.py b/rpython/translator/backendopt/inline.py --- a/rpython/translator/backendopt/inline.py +++ b/rpython/translator/backendopt/inline.py @@ -2,7 +2,7 @@ from rpython.flowspace.model import (Variable, Constant, Block, Link, SpaceOperation, FunctionGraph, mkentrymap) -from rpython.rtyper.rmodel import LLConstant, inputconst +from rpython.rtyper.rmodel import LLConstant, ll_const from rpython.rtyper.lltypesystem.lltype import Bool, Signed, typeOf, Void, Ptr, normalizeptr from rpython.tool.algo import sparsemat from rpython.translator.backendopt import removenoops @@ -357,7 +357,7 @@ #XXXXX don't look: insert blocks that do exception matching #for the cases where direct matching did not work ll_exc_match = self.translator.rtyper.exceptiondata.fn_exception_match - exc_match = inputconst(typeOf(ll_exc_match), ll_exc_match) + exc_match = ll_const(ll_exc_match) blocks = [] for i, link in enumerate(afterblock.exits[1:]): etype = copiedexceptblock.inputargs[0].copy() @@ -366,8 +366,7 @@ block = Block([etype, evalue] + passon_vars) res = Variable() res.concretetype = Bool - exitcase = link.llexitcase - cexitcase = LLConstant(exitcase, typeOf(exitcase)) + cexitcase = ll_const(link.llexitcase) args = [exc_match, etype, cexitcase] block.operations.append(SpaceOperation("direct_call", args, res)) block.exitswitch = res diff --git a/rpython/translator/backendopt/malloc.py b/rpython/translator/backendopt/malloc.py --- a/rpython/translator/backendopt/malloc.py +++ b/rpython/translator/backendopt/malloc.py @@ -1,7 +1,7 @@ from rpython.flowspace.model import Variable, Constant, SpaceOperation from rpython.tool.algo.unionfind import UnionFind from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.rmodel import LLConstant +from rpython.rtyper.rmodel import LLConstant, ll_const from rpython.translator import simplify from rpython.translator.backendopt import removenoops from rpython.translator.backendopt.support import log @@ -456,7 +456,7 @@ from rpython.rtyper.lltypesystem.rstr import string_repr msg = "unreachable operation (from malloc.py)" ll_msg = string_repr.convert_const(msg) - c_msg = LLConstant(ll_msg, lltype.typeOf(ll_msg)) + c_msg = ll_const(ll_msg) return SpaceOperation("debug_fatalerror", [c_msg], v_result) def flowin_op(self, op, vars, newvarsmap): diff --git a/rpython/translator/backendopt/mallocv.py b/rpython/translator/backendopt/mallocv.py --- a/rpython/translator/backendopt/mallocv.py +++ b/rpython/translator/backendopt/mallocv.py @@ -7,7 +7,7 @@ from rpython.rtyper.lltypesystem.lltype import getfunctionptr from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.lloperation import llop -from rpython.rtyper.rmodel import LLConstant +from rpython.rtyper.rmodel import LLConstant, ll_const def virtualize_mallocs(translator, graphs, verbose=False): @@ -792,7 +792,7 @@ from rpython.rtyper.lltypesystem.rstr import string_repr msg = 'unreachable: %s' % (op,) ll_msg = string_repr.convert_const(msg) - c_msg = LLConstant(ll_msg, lltype.typeOf(ll_msg)) + c_msg = ll_const(ll_msg) newresult = self.make_rt_result(op.result) return [SpaceOperation('debug_fatalerror', [c_msg], newresult)] @@ -937,7 +937,7 @@ def handle_residual_call(self, op, newgraph, newnodes): fspecptr = getfunctionptr(newgraph) - newargs = [LLConstant(fspecptr, concretetype=lltype.typeOf(fspecptr))] + newargs = [ll_const(fspecptr)] newargs += self.expand_nodes(newnodes) newresult = self.make_rt_result(op.result) newop = SpaceOperation('direct_call', newargs, newresult) diff --git a/rpython/translator/backendopt/support.py b/rpython/translator/backendopt/support.py --- a/rpython/translator/backendopt/support.py +++ b/rpython/translator/backendopt/support.py @@ -1,7 +1,7 @@ import py from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.rmodel import inputconst +from rpython.rtyper.rmodel import ll_const from rpython.tool.ansi_print import ansi_log from rpython.translator.simplify import get_graph @@ -22,10 +22,11 @@ yield op def annotate(translator, func, result, args): - args = [arg.concretetype for arg in args] - graph = translator.rtyper.annotate_helper(func, args) - fptr = lltype.functionptr(lltype.FuncType(args, result.concretetype), func.func_name, graph=graph) - c = inputconst(lltype.typeOf(fptr), fptr) + args = [arg.concretetype for arg in args] + graph = translator.rtyper.annotate_helper(func, args) + fptr = lltype.functionptr(lltype.FuncType(args, result.concretetype), + func.func_name, graph=graph) + c = ll_const(fptr) return c def var_needsgc(var): diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -8,7 +8,7 @@ from rpython.rtyper.lltypesystem import lloperation from rpython.rtyper.rclass import ll_inst_type from rpython.rtyper import rtyper -from rpython.rtyper.rmodel import inputconst, LLConstant +from rpython.rtyper.rmodel import inputconst, LLConstant, ll_const from rpython.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat from rpython.rlib.debug import ll_assert @@ -43,9 +43,6 @@ def error_constant(T): return LLConstant(error_value(T), T) -def constant_value(llvalue): - return LLConstant(llvalue, lltype.typeOf(llvalue)) - class ExceptionTransformer(object): def __init__(self, translator): @@ -62,9 +59,9 @@ (n_i_error_ll_exc_type, n_i_error_ll_exc) = self.get_builtin_exception(NotImplementedError) - self.c_assertion_error_ll_exc_type = constant_value( + self.c_assertion_error_ll_exc_type = ll_const( assertion_error_ll_exc_type) - self.c_n_i_error_ll_exc_type = constant_value(n_i_error_ll_exc_type) + self.c_n_i_error_ll_exc_type = ll_const(n_i_error_ll_exc_type) def rpyexc_occured(): exc_type = exc_data.exc_type @@ -469,7 +466,7 @@ def constant_func(self, name, inputtypes, rettype, graph, **kwds): FUNC_TYPE = lltype.FuncType(inputtypes, rettype) fn_ptr = lltype.functionptr(FUNC_TYPE, name, graph=graph, **kwds) - return LLConstant(fn_ptr, lltype.Ptr(FUNC_TYPE)) + return ll_const(fn_ptr) def gen_getfield(self, name, llops): c_name = inputconst(lltype.Void, name) diff --git a/rpython/translator/transform.py b/rpython/translator/transform.py --- a/rpython/translator/transform.py +++ b/rpython/translator/transform.py @@ -9,7 +9,7 @@ SpaceOperation, Variable, Constant, Link, checkgraph) from rpython.annotator import model as annmodel from rpython.rtyper.lltypesystem import lltype -from rpython.rtyper.rmodel import inputconst +from rpython.rtyper.rmodel import ll_const def checkgraphs(self, blocks): seen = set() @@ -206,8 +206,7 @@ graph = rtyper.annotate_helper(stack_check, []) rtyper.specialize_more_blocks() stack_check_ptr = rtyper.getcallable(graph) - stack_check_ptr_const = inputconst(lltype.typeOf(stack_check_ptr), - stack_check_ptr) + stack_check_ptr_const = ll_const(stack_check_ptr) edges = set() insert_in = set() block2graph = {} From noreply at buildbot.pypy.org Sat Oct 10 05:20:16 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 10 Oct 2015 05:20:16 +0200 (CEST) Subject: [pypy-commit] pypy llconst: fix tests Message-ID: <20151010032016.22F3C1C0EFC@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: llconst Changeset: r80101:21621915e9f0 Date: 2015-10-10 04:20 +0100 http://bitbucket.org/pypy/pypy/changeset/21621915e9f0/ Log: fix tests diff --git a/rpython/jit/codewriter/flatten.py b/rpython/jit/codewriter/flatten.py --- a/rpython/jit/codewriter/flatten.py +++ b/rpython/jit/codewriter/flatten.py @@ -1,4 +1,5 @@ from rpython.flowspace.model import Variable, Constant, c_last_exception +from rpython.rtyper.rmodel import ll_const from rpython.jit.metainterp.history import AbstractDescr, getkind from rpython.rtyper.lltypesystem import lltype @@ -166,7 +167,7 @@ exc_data = self.cpu.rtyper.exceptiondata ll_ovf = exc_data.get_standard_ll_exc_instance_by_class( OverflowError) - c = Constant(ll_ovf, concretetype=lltype.typeOf(ll_ovf)) + c = ll_const(ll_ovf) self.emitline("raise", c) else: self.emitline("reraise") @@ -226,8 +227,7 @@ self.make_exception_link(link, False) break self.emitline('goto_if_exception_mismatch', - Constant(link.llexitcase, - lltype.typeOf(link.llexitcase)), + ll_const(link.llexitcase), TLabel(link)) self.make_exception_link(link, False) self.emitline(Label(link)) diff --git a/rpython/jit/codewriter/format.py b/rpython/jit/codewriter/format.py --- a/rpython/jit/codewriter/format.py +++ b/rpython/jit/codewriter/format.py @@ -1,5 +1,5 @@ import py -from rpython.flowspace.model import Constant +from rpython.rtyper.rmodel import ll_const, LLConstant from rpython.rtyper.lltypesystem import lltype from rpython.jit.codewriter.flatten import SSARepr, Label, TLabel, Register from rpython.jit.codewriter.flatten import ListOfKind, IndirectCallTargets @@ -14,7 +14,7 @@ def repr(x): if isinstance(x, Register): return '%%%s%d' % (x.kind[0], x.index) # e.g. %i1 or %r2 or %f3 - elif isinstance(x, Constant): + elif isinstance(x, LLConstant): if (isinstance(x.concretetype, lltype.Ptr) and isinstance(x.concretetype.TO, lltype.Struct)): return '$<* struct %s>' % (x.concretetype.TO._name,) @@ -117,7 +117,7 @@ return reg elif s[0] == '$': intvalue = int(s[1:]) - return Constant(intvalue, lltype.Signed) + return ll_const(intvalue) elif s[0] == 'L': return TLabel(s) elif s[0] in 'IRF' and s[1] == '[' and s[-1] == ']': diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -8,11 +8,11 @@ from rpython.jit.metainterp.history import getkind from rpython.jit.metainterp.typesystem import deref, arrayItem from rpython.jit.metainterp.blackhole import BlackholeInterpreter -from rpython.flowspace.model import SpaceOperation, Variable, Constant,\ - c_last_exception +from rpython.flowspace.model import SpaceOperation, Variable, Constant from rpython.rlib import objectmodel from rpython.rlib.jit import _we_are_jitted from rpython.rlib.rgc import lltype_is_gc +from rpython.rtyper.rmodel import ll_const, LLConstant from rpython.rtyper.lltypesystem import lltype, llmemory, rstr, rffi from rpython.rtyper.lltypesystem import rbytearray from rpython.rtyper import rclass @@ -44,7 +44,7 @@ constant_result = excmatch(*[a.value for a in op.args[1:]]) block.operations[i] = SpaceOperation( 'same_as', - [Constant(constant_result, lltype.Bool)], + [LLConstant(constant_result, lltype.Bool)], op.result) if block.exitswitch is op.result: block.exitswitch = None @@ -81,13 +81,13 @@ # def do_rename(var, var_or_const): if var.concretetype is lltype.Void: - renamings[var] = Constant(None, lltype.Void) + renamings[var] = ll_const(None) return renamings[var] = var_or_const if isinstance(var_or_const, Constant): value = var_or_const.value value = lltype._cast_whatever(var.concretetype, value) - renamings_constants[var] = Constant(value, var.concretetype) + renamings_constants[var] = LLConstant(value, var.concretetype) # for op in block.operations: if renamings_constants: @@ -221,7 +221,7 @@ for link in block.exits: while v in link.args: index = link.args.index(v) - link.args[index] = Constant(link.llexitcase, + link.args[index] = LLConstant(link.llexitcase, lltype.Bool) return True return False @@ -257,7 +257,7 @@ vtable = heaptracker.get_vtable_for_gcstruct(self.cpu, TO) if vtable.subclassrange_max - vtable.subclassrange_min == 1: # it's a precise class check - const_vtable = Constant(vtable, lltype.typeOf(vtable)) + const_vtable = ll_const(vtable) return [None, # hack, do the right renaming from op.args[0] to op.result SpaceOperation("record_exact_class", [op.args[0], const_vtable], None)] @@ -274,7 +274,7 @@ # only for untranslated tests: get a real integer estimate arg = op.args[0].value arg = llmemory.raw_malloc_usage(arg) - return [Constant(arg, lltype.Signed)] + return [LLConstant(arg, lltype.Signed)] def rewrite_op_jit_record_exact_class(self, op): return SpaceOperation("record_exact_class", [op.args[0], op.args[1]], None) @@ -471,7 +471,7 @@ assert jitdriver_sd is not None ops = self.promote_greens(op.args[1:], jitdriver_sd.jitdriver) num_green_args = len(jitdriver_sd.jitdriver.greens) - args = ([Constant(jitdriver_sd.index, lltype.Signed)] + + args = ([LLConstant(jitdriver_sd.index, lltype.Signed)] + self.make_three_lists(op.args[1:1+num_green_args]) + self.make_three_lists(op.args[1+num_green_args:])) kind = getkind(op.result.concretetype)[0] @@ -582,7 +582,7 @@ EffectInfo.OS_STREQ_NONNULL) # XXX this is fairly ugly way of creating a constant, # however, callinfocollection has no better interface - c = Constant(p.adr.ptr, lltype.typeOf(p.adr.ptr)) + c = ll_const(p.adr.ptr) op1 = SpaceOperation('str_guard_value', [op.args[0], c, descr], op.result) return [SpaceOperation('-live-', [], None), op1, None] @@ -643,8 +643,8 @@ # substruct self.zero_contents(ops, v, FIELD) else: - c_name = Constant(name, lltype.Void) - c_null = Constant(FIELD._defl(), FIELD) + c_name = LLConstant(name, lltype.Void) + c_null = LLConstant(FIELD._defl(), FIELD) op = SpaceOperation('setfield', [v, c_name, c_null], None) self.extend_with(ops, self.rewrite_op_setfield(op, @@ -851,7 +851,7 @@ raise Exception("%r: only supported for gckind=raw" % (op,)) ofs = llmemory.offsetof(STRUCT, op.args[1].value) return SpaceOperation('int_add', - [op.args[0], Constant(ofs, lltype.Signed)], + [op.args[0], LLConstant(ofs, lltype.Signed)], op.result) def is_typeptr_getset(self, op): @@ -909,7 +909,7 @@ def handle_getfield_typeptr(self, op): if isinstance(op.args[0], Constant): cls = op.args[0].value.typeptr - return Constant(cls, concretetype=rclass.CLASSTYPE) + return LLConstant(cls, rclass.CLASSTYPE) op0 = SpaceOperation('-live-', [], None) op1 = SpaceOperation('guard_class', [op.args[0]], op.result) return [op0, op1] @@ -1232,7 +1232,7 @@ elif longlong_arg: if v_result.concretetype is lltype.Bool: longlong_zero = rffi.cast(v_arg.concretetype, 0) - c_longlong_zero = Constant(longlong_zero, v_arg.concretetype) + c_longlong_zero = ll_const(longlong_zero) if unsigned1: name = 'ullong_ne' else: @@ -1295,11 +1295,11 @@ if v_result.concretetype is lltype.Bool: result.append(SpaceOperation('int_is_true', [v_arg], v_result)) elif min2: - c_bytes = Constant(size2, lltype.Signed) + c_bytes = LLConstant(size2, lltype.Signed) result.append(SpaceOperation('int_signext', [v_arg, c_bytes], v_result)) else: - c_mask = Constant(int((1 << (8 * size2)) - 1), lltype.Signed) + c_mask = LLConstant(int((1 << (8 * size2)) - 1), lltype.Signed) result.append(SpaceOperation('int_and', [v_arg, c_mask], v_result)) return result @@ -1323,7 +1323,7 @@ if op.args[0].concretetype != rffi.CCHARP: v_prod = varoftype(lltype.Signed) by = llmemory.sizeof(op.args[0].concretetype.TO.OF) - c_by = Constant(by, lltype.Signed) + c_by = LLConstant(by, lltype.Signed) ops.append(SpaceOperation('int_mul', [v_shift, c_by], v_prod)) v_shift = v_prod # @@ -1407,9 +1407,7 @@ def rewrite_op_llong_neg(self, op): v = varoftype(lltype.SignedLongLong) - op0 = SpaceOperation('cast_int_to_longlong', - [Constant(0, lltype.Signed)], - v) + op0 = SpaceOperation('cast_int_to_longlong', [ll_const(0)], v) args = [v, op.args[0]] op1 = SpaceOperation('llong_sub', args, op.result) return (self._normalize(self.rewrite_operation(op0)) + @@ -1417,9 +1415,7 @@ def rewrite_op_llong_is_true(self, op): v = varoftype(op.args[0].concretetype) - op0 = SpaceOperation('cast_primitive', - [Constant(0, lltype.Signed)], - v) + op0 = SpaceOperation('cast_primitive', [ll_const(0)], v) args = [op.args[0], v] op1 = SpaceOperation('llong_ne', args, op.result) return (self._normalize(self.rewrite_operation(op0)) + @@ -1471,14 +1467,12 @@ def rewrite_op_int_neg_ovf(self, op): op1 = SpaceOperation('int_sub_ovf', - [Constant(0, lltype.Signed), op.args[0]], - op.result) + [ll_const(0), op.args[0]], op.result) return self.rewrite_operation(op1) def rewrite_op_float_is_true(self, op): op1 = SpaceOperation('float_ne', - [op.args[0], Constant(0.0, lltype.Float)], - op.result) + [op.args[0], ll_const(0.0)], op.result) return self.rewrite_operation(op1) def rewrite_op_int_is_true(self, op): @@ -1491,7 +1485,7 @@ else: raise AssertionError("don't know the truth value of %r" % (value,)) - return Constant(value, lltype.Bool) + return LLConstant(value, lltype.Bool) return op def promote_greens(self, args, jitdriver): @@ -1545,7 +1539,7 @@ "Constant specified red in jit_merge_point()") assert len(dict.fromkeys(redlist)) == len(list(redlist)), ( "duplicate red variable on jit_merge_point()") - args = ([Constant(self.portal_jd.index, lltype.Signed)] + + args = ([LLConstant(self.portal_jd.index, lltype.Signed)] + self.make_three_lists(op.args[2:2+num_green_args]) + redlists) op1 = SpaceOperation('jit_merge_point', args, None) @@ -1558,7 +1552,7 @@ def handle_jit_marker__loop_header(self, op, jitdriver): jd = self.callcontrol.jitdriver_sd_from_jitdriver(jitdriver) assert jd is not None - c_index = Constant(jd.index, lltype.Signed) + c_index = LLConstant(jd.index, lltype.Signed) return SpaceOperation('loop_header', [c_index], None) # a 'can_enter_jit' in the source graph becomes a 'loop_header' @@ -1683,7 +1677,7 @@ assert v_length.concretetype is lltype.Signed return v_length else: - return Constant(0, lltype.Signed) # length: default to 0 + return ll_const(0) # length: default to 0 # ---------- fixed lists ---------- diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -3,12 +3,13 @@ from rpython.annotator import model as annmodel from rpython.rtyper.llannotation import lltype_to_annotation from rpython.annotator.policy import AnnotatorPolicy -from rpython.flowspace.model import Variable, Constant +from rpython.flowspace.model import Variable from rpython.jit.metainterp.typesystem import deref from rpython.rlib import rgc from rpython.rlib.jit import elidable, oopspec from rpython.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint, intmask from rpython.rlib.rarithmetic import LONG_BIT +from rpython.rtyper.rmodel import ll_const, LLConstant from rpython.rtyper import rlist from rpython.rtyper.lltypesystem import rlist as rlist_ll from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator @@ -758,7 +759,7 @@ if isinstance(obj, Index): result.append(opargs[obj.n]) else: - result.append(Constant(obj, lltype.typeOf(obj))) + result.append(ll_const(obj)) return result def get_call_oopspec_opargs(fnobj, opargs): @@ -837,8 +838,8 @@ mixlevelann.finish() else: # for testing only - c_func = Constant(oopspec_name, - lltype.Ptr(lltype.FuncType(ll_args, ll_res))) + c_func = LLConstant(oopspec_name, + lltype.Ptr(lltype.FuncType(ll_args, ll_res))) # if not hasattr(rtyper, '_builtin_func_for_spec_cache'): rtyper._builtin_func_for_spec_cache = {} diff --git a/rpython/jit/codewriter/test/test_assembler.py b/rpython/jit/codewriter/test/test_assembler.py --- a/rpython/jit/codewriter/test/test_assembler.py +++ b/rpython/jit/codewriter/test/test_assembler.py @@ -5,7 +5,7 @@ from rpython.jit.codewriter.jitcode import MissingLiveness from rpython.jit.codewriter import heaptracker, longlong from rpython.jit.metainterp.history import AbstractDescr -from rpython.flowspace.model import Constant +from rpython.rtyper.rmodel import LLConstant from rpython.rtyper.lltypesystem import lltype, llmemory @@ -30,10 +30,10 @@ ssarepr = SSARepr("test") ssarepr.insns = [ ('int_return', Register('int', 13)), - ('int_return', Constant(18, lltype.Signed)), - ('int_return', Constant(-4, lltype.Signed)), - ('int_return', Constant(128, lltype.Signed)), - ('int_return', Constant(-129, lltype.Signed)), + ('int_return', LLConstant(18, lltype.Signed)), + ('int_return', LLConstant(-4, lltype.Signed)), + ('int_return', LLConstant(128, lltype.Signed)), + ('int_return', LLConstant(-129, lltype.Signed)), ] assembler = Assembler() jitcode = assembler.assemble(ssarepr) @@ -50,9 +50,9 @@ ssarepr = SSARepr("test") ssarepr.insns = [ ('float_return', Register('float', 13)), - ('float_return', Constant(18.0, lltype.Float)), - ('float_return', Constant(-4.0, lltype.Float)), - ('float_return', Constant(128.1, lltype.Float)), + ('float_return', LLConstant(18.0, lltype.Float)), + ('float_return', LLConstant(-4.0, lltype.Float)), + ('float_return', LLConstant(128.1, lltype.Float)), ] assembler = Assembler() jitcode = assembler.assemble(ssarepr) @@ -71,9 +71,9 @@ from rpython.rlib.rarithmetic import r_longlong, r_ulonglong ssarepr = SSARepr("test") ssarepr.insns = [ - ('float_return', Constant(r_longlong(-18000000000000000), + ('float_return', LLConstant(r_longlong(-18000000000000000), lltype.SignedLongLong)), - ('float_return', Constant(r_ulonglong(9900000000000000000), + ('float_return', LLConstant(r_ulonglong(9900000000000000000), lltype.UnsignedLongLong)), ] assembler = Assembler() @@ -91,10 +91,10 @@ F = lltype.FuncType([], lltype.Signed) f = lltype.functionptr(F, 'f') ssarepr.insns = [ - ('int_return', Constant('X', lltype.Char)), - ('int_return', Constant(unichr(0x1234), lltype.UniChar)), - ('int_return', Constant(f, lltype.Ptr(F))), - ('ref_return', Constant(s, lltype.Ptr(S))), + ('int_return', LLConstant('X', lltype.Char)), + ('int_return', LLConstant(unichr(0x1234), lltype.UniChar)), + ('int_return', LLConstant(f, lltype.Ptr(F))), + ('ref_return', LLConstant(s, lltype.Ptr(S))), ] assembler = Assembler() jitcode = assembler.assemble(ssarepr) @@ -115,9 +115,9 @@ i0, i1 = Register('int', 0x16), Register('int', 0x17) ssarepr.insns = [ (Label('L1'),), - ('goto_if_not_int_gt', i0, Constant(4, lltype.Signed), TLabel('L2')), + ('goto_if_not_int_gt', i0, LLConstant(4, lltype.Signed), TLabel('L2')), ('int_add', i1, i0, '->', i1), - ('int_sub', i0, Constant(1, lltype.Signed), '->', i0), + ('int_sub', i0, LLConstant(1, lltype.Signed), '->', i0), ('goto', TLabel('L1')), (Label('L2'),), ('int_return', i1), @@ -139,7 +139,7 @@ ssarepr = SSARepr("test") i0, i1 = Register('int', 0x16), Register('int', 0x17) ssarepr.insns = [ - ('foobar', ListOfKind('int', [i0, i1, Constant(42, lltype.Signed)]), + ('foobar', ListOfKind('int', [i0, i1, LLConstant(42, lltype.Signed)]), ListOfKind('ref', [])), ] assembler = Assembler() @@ -154,10 +154,10 @@ # encoded directly. ssarepr = SSARepr("test") ssarepr.insns = [ - ('foobar', ListOfKind('int', [Constant(42, lltype.Signed)])), - ('foobar', ListOfKind('int', [Constant(42, lltype.Signed)])), - ('baz', Constant(42, lltype.Signed)), - ('bok', Constant(41, lltype.Signed)), + ('foobar', ListOfKind('int', [LLConstant(42, lltype.Signed)])), + ('foobar', ListOfKind('int', [LLConstant(42, lltype.Signed)])), + ('baz', LLConstant(42, lltype.Signed)), + ('bok', LLConstant(41, lltype.Signed)), ] assembler = Assembler() jitcode = assembler.assemble(ssarepr) @@ -214,10 +214,10 @@ ssarepr = SSARepr("test") i0, i1, i2 = Register('int', 0), Register('int', 1), Register('int', 2) ssarepr.insns = [ - ('int_add', i0, Constant(10, lltype.Signed), '->', i1), + ('int_add', i0, LLConstant(10, lltype.Signed), '->', i1), ('-live-', i0, i1), ('-live-', i1, i2), - ('int_add', i0, Constant(3, lltype.Signed), '->', i2), + ('int_add', i0, LLConstant(3, lltype.Signed), '->', i2), ('-live-', i2), ] assembler = Assembler() @@ -233,7 +233,7 @@ def test_assemble_error_string_constant(): ssarepr = SSARepr("test") - c = Constant('foobar', lltype.Void) + c = LLConstant('foobar', lltype.Void) ssarepr.insns = [ ('duh', c), ] diff --git a/rpython/jit/codewriter/test/test_call.py b/rpython/jit/codewriter/test/test_call.py --- a/rpython/jit/codewriter/test/test_call.py +++ b/rpython/jit/codewriter/test/test_call.py @@ -1,6 +1,7 @@ import py from rpython.flowspace.model import SpaceOperation, Constant, Variable +from rpython.rtyper.rmodel import ll_const, LLConstant from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.translator.unsimplify import varoftype from rpython.rlib import jit @@ -19,7 +20,7 @@ F = lltype.FuncType([], lltype.Signed) f = lltype.functionptr(F, 'f', graph='fgraph') v = varoftype(lltype.Signed) - op = SpaceOperation('direct_call', [Constant(f, lltype.Ptr(F))], v) + op = SpaceOperation('direct_call', [ll_const(f)], v) # lst = cc.graphs_from(op, {}.__contains__) assert lst is None # residual call @@ -33,7 +34,7 @@ v = varoftype(lltype.Signed) graphlst = ['f1graph', 'f2graph'] op = SpaceOperation('indirect_call', [varoftype(lltype.Ptr(F)), - Constant(graphlst, lltype.Void)], v) + LLConstant(graphlst, lltype.Void)], v) # lst = cc.graphs_from(op, {'f1graph': True, 'f2graph': True}.__contains__) assert lst == ['f1graph', 'f2graph'] # normal indirect call @@ -48,8 +49,8 @@ cc = CallControl() F = lltype.FuncType([], lltype.Signed) v = varoftype(lltype.Signed) - op = SpaceOperation('indirect_call', [varoftype(lltype.Ptr(F)), - Constant(None, lltype.Void)], v) + op = SpaceOperation('indirect_call', + [varoftype(lltype.Ptr(F)), ll_const(None)], v) lst = cc.graphs_from(op, {}.__contains__) assert lst is None diff --git a/rpython/jit/codewriter/test/test_flatten.py b/rpython/jit/codewriter/test/test_flatten.py --- a/rpython/jit/codewriter/test/test_flatten.py +++ b/rpython/jit/codewriter/test/test_flatten.py @@ -1,4 +1,4 @@ -import py, sys +import py from rpython.jit.codewriter import support from rpython.jit.codewriter.heaptracker import int_signext from rpython.jit.codewriter.flatten import flatten_graph, reorder_renaming_list @@ -9,7 +9,8 @@ from rpython.jit.metainterp.history import AbstractDescr from rpython.rtyper.lltypesystem import lltype, rstr, rffi from rpython.rtyper import rclass -from rpython.flowspace.model import SpaceOperation, Variable, Constant +from rpython.flowspace.model import SpaceOperation +from rpython.rtyper.rmodel import LLConstant, ll_const from rpython.translator.unsimplify import varoftype from rpython.rlib.rarithmetic import ovfcheck, r_uint, r_longlong, r_ulonglong from rpython.rlib.jit import dont_look_inside, _we_are_jitted, JitDriver @@ -44,7 +45,7 @@ def __getitem__(self, key): F = lltype.FuncType([lltype.Signed, lltype.Signed], lltype.Signed) f = lltype.functionptr(F, key[0]) - c_func = Constant(f, lltype.typeOf(f)) + c_func = ll_const(f) return c_func, lltype.Signed class FakeCPU: @@ -140,7 +141,7 @@ def encoding_test(self, func, args, expected, transform=False, liveness=False, cc=None, jd=None): - + graphs = self.make_graphs(func, args) #graphs[0].show() if transform: @@ -252,7 +253,7 @@ v4 = varoftype(lltype.Ptr(rstr.STR)) v5 = varoftype(lltype.Float) op = SpaceOperation('residual_call_ir_f', - [Constant(12345, lltype.Signed), # function ptr + [LLConstant(12345, lltype.Signed), # function ptr ListOfKind('int', [v1, v2]), # int args ListOfKind('ref', [v3, v4])], # ref args v5) # result diff --git a/rpython/jit/codewriter/test/test_format.py b/rpython/jit/codewriter/test/test_format.py --- a/rpython/jit/codewriter/test/test_format.py +++ b/rpython/jit/codewriter/test/test_format.py @@ -1,5 +1,5 @@ import py -from rpython.flowspace.model import Constant +from rpython.rtyper.rmodel import ll_const from rpython.jit.codewriter.format import format_assembler, unformat_assembler from rpython.jit.codewriter.flatten import Label, TLabel, SSARepr, Register from rpython.jit.codewriter.flatten import ListOfKind @@ -39,9 +39,7 @@ s = lltype.malloc(S) s.x = 123 ssarepr = SSARepr("test") - ssarepr.insns = [ - ('foobar', '->', Constant(s, lltype.typeOf(s))), - ] + ssarepr.insns = [('foobar', '->', ll_const(s))] asm = format_assembler(ssarepr) expected = """ foobar -> $<* struct S> @@ -53,9 +51,9 @@ i0, i1 = Register('int', 0), Register('int', 1) ssarepr.insns = [ (Label('L1'),), - ('goto_if_not_int_gt', i0, Constant(0, lltype.Signed), TLabel('L2')), + ('goto_if_not_int_gt', i0, ll_const(0), TLabel('L2')), ('int_add', i1, i0, '->', i1), - ('int_sub', i0, Constant(1, lltype.Signed), '->', i0), + ('int_sub', i0, ll_const(1), '->', i0), ('goto', TLabel('L1')), (Label('L2'),), ('int_return', i1), @@ -75,9 +73,7 @@ def test_format_assembler_list(): ssarepr = SSARepr("test") i0, i1 = Register('int', 0), Register('int', 1) - ssarepr.insns = [ - ('foobar', ListOfKind('int', [i0, Constant(123, lltype.Signed), i1])), - ] + ssarepr.insns = [('foobar', ListOfKind('int', [i0, ll_const(123), i1]))] asm = format_assembler(ssarepr) expected = """ foobar I[%i0, $123, %i1] @@ -117,9 +113,7 @@ foo $123 """ ssarepr = unformat_assembler(input) - assert ssarepr.insns == [ - ('foo', Constant(123, lltype.Signed)), - ] + assert ssarepr.insns == [('foo', ll_const(123))] def test_unformat_assembler_single_return(): input = """ diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -1,22 +1,10 @@ - import py import random -try: - from itertools import product -except ImportError: - # Python 2.5, this is taken from the CPython docs, but simplified. - def product(*args): - # product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy - # product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111 - pools = map(tuple, args) - result = [[]] - for pool in pools: - result = [x+[y] for x in result for y in pool] - for prod in result: - yield tuple(prod) +from itertools import product -from rpython.flowspace.model import FunctionGraph, Block, Link, c_last_exception -from rpython.flowspace.model import SpaceOperation, Variable, Constant +from rpython.flowspace.model import ( + Block, Link, SpaceOperation, Variable, Constant) +from rpython.rtyper.rmodel import ll_const, LLConstant from rpython.rtyper.lltypesystem import lltype, llmemory, rstr, rffi from rpython.rtyper import rclass from rpython.rtyper.lltypesystem.module import ll_math @@ -26,9 +14,6 @@ from rpython.jit.codewriter.jtransform import Transformer, UnsupportedMallocFlags from rpython.jit.metainterp.history import getkind -def const(x): - return Constant(x, lltype.typeOf(x)) - class FakeRTyper: instance_reprs = {} @@ -212,7 +197,7 @@ assert block.operations == [] assert block.exitswitch == ('int_gt', v1, v2, '-live-before') assert block.exits == exits - assert exits[1].args == [const(True)] + assert exits[1].args == [ll_const(True)] def test_optimize_goto_if_not__unknownop(): v3 = Variable(); v3.concretetype = lltype.Bool @@ -264,8 +249,8 @@ 'float_gt': ('float_gt', 'float_lt'), } v3 = varoftype(lltype.Signed) - for v1 in [varoftype(lltype.Signed), const(42)]: - for v2 in [varoftype(lltype.Signed), const(43)]: + for v1 in [varoftype(lltype.Signed), ll_const(42)]: + for v2 in [varoftype(lltype.Signed), ll_const(43)]: for name1, name2 in ops.items(): op = SpaceOperation(name1, [v1, v2], v3) op1 = Transformer(FakeCPU()).rewrite_operation(op) @@ -282,8 +267,8 @@ def test_symmetric_int_add_ovf(): v3 = varoftype(lltype.Signed) - for v1 in [varoftype(lltype.Signed), const(42)]: - for v2 in [varoftype(lltype.Signed), const(43)]: + for v1 in [varoftype(lltype.Signed), ll_const(42)]: + for v2 in [varoftype(lltype.Signed), ll_const(43)]: op = SpaceOperation('int_add_nonneg_ovf', [v1, v2], v3) oplist = Transformer(FakeCPU()).rewrite_operation(op) op1, op0 = oplist @@ -332,7 +317,7 @@ def get_direct_call_op(argtypes, restype): FUNC = lltype.FuncType(argtypes, restype) fnptr = lltype.functionptr(FUNC, "g") # no graph - c_fnptr = const(fnptr) + c_fnptr = ll_const(fnptr) vars = [varoftype(TYPE) for TYPE in argtypes] v_result = varoftype(restype) op = SpaceOperation('direct_call', [c_fnptr] + vars, v_result) @@ -386,7 +371,7 @@ op = get_direct_call_op(argtypes, restype) op.opname = 'indirect_call' op.args[0] = varoftype(op.args[0].concretetype) - op.args.append(Constant(['somegraph1', 'somegraph2'], lltype.Void)) + op.args.append(LLConstant(['somegraph1', 'somegraph2'], lltype.Void)) tr = Transformer(FakeCPU(), FakeResidualIndirectCallControl()) tr.graph = 'someinitialgraph' oplist = tr.rewrite_operation(op) @@ -414,7 +399,7 @@ op = get_direct_call_op(argtypes, restype) op.opname = 'indirect_call' op.args[0] = varoftype(op.args[0].concretetype) - op.args.append(Constant(['somegraph1', 'somegraph2'], lltype.Void)) + op.args.append(LLConstant(['somegraph1', 'somegraph2'], lltype.Void)) tr = Transformer(FakeCPU(), FakeRegularIndirectCallControl()) tr.graph = 'someinitialgraph' oplist = tr.rewrite_operation(op) @@ -467,7 +452,7 @@ ('chr', 'i'), ('unc', 'i')]: v_parent = varoftype(lltype.Ptr(S)) - c_name = Constant(name, lltype.Void) + c_name = LLConstant(name, lltype.Void) v_result = varoftype(getattr(S, name)) op = SpaceOperation('getfield', [v_parent, c_name], v_result) op1 = Transformer(FakeCPU()).rewrite_operation(op) @@ -478,7 +463,7 @@ def test_getfield_typeptr(): v_parent = varoftype(rclass.OBJECTPTR) - c_name = Constant('typeptr', lltype.Void) + c_name = LLConstant('typeptr', lltype.Void) v_result = varoftype(rclass.OBJECT.typeptr) op = SpaceOperation('getfield', [v_parent, c_name], v_result) oplist = Transformer(FakeCPU()).rewrite_operation(op) @@ -508,7 +493,7 @@ ('chr', 'i'), ('unc', 'i')]: v_parent = varoftype(lltype.Ptr(S)) - c_name = Constant(name, lltype.Void) + c_name = LLConstant(name, lltype.Void) v_newvalue = varoftype(getattr(S, name)) op = SpaceOperation('setfield', [v_parent, c_name, v_newvalue], varoftype(lltype.Void)) @@ -521,8 +506,8 @@ def test_malloc_new(): S = lltype.GcStruct('S') v = varoftype(lltype.Ptr(S)) - op = SpaceOperation('malloc', [Constant(S, lltype.Void), - Constant({'flavor': 'gc'}, lltype.Void)], v) + op = SpaceOperation('malloc', [LLConstant(S, lltype.Void), + LLConstant({'flavor': 'gc'}, lltype.Void)], v) op1 = Transformer(FakeCPU()).rewrite_operation(op) assert op1.opname == 'new' assert op1.args == [('sizedescr', S)] @@ -530,8 +515,8 @@ def test_malloc_new_zero_2(): S = lltype.GcStruct('S', ('x', lltype.Signed)) v = varoftype(lltype.Ptr(S)) - op = SpaceOperation('malloc', [Constant(S, lltype.Void), - Constant({'flavor': 'gc', + op = SpaceOperation('malloc', [LLConstant(S, lltype.Void), + LLConstant({'flavor': 'gc', 'zero': True}, lltype.Void)], v) op1, op2 = Transformer(FakeCPU()).rewrite_operation(op) assert op1.opname == 'new' @@ -545,8 +530,8 @@ S2 = lltype.GcStruct('S2', ('parent', S), ('xx', lltype.Ptr(S0))) v = varoftype(lltype.Ptr(S2)) - op = SpaceOperation('malloc', [Constant(S2, lltype.Void), - Constant({'flavor': 'gc', + op = SpaceOperation('malloc', [LLConstant(S2, lltype.Void), + LLConstant({'flavor': 'gc', 'zero': True}, lltype.Void)], v) op1, op2, op3 = Transformer(FakeCPU()).rewrite_operation(op) assert op1.opname == 'new' @@ -561,8 +546,8 @@ S = lltype.GcStruct('S', ('parent', rclass.OBJECT)) heaptracker.set_testing_vtable_for_gcstruct(S, vtable, 'S') v = varoftype(lltype.Ptr(S)) - op = SpaceOperation('malloc', [Constant(S, lltype.Void), - Constant({'flavor': 'gc'}, lltype.Void)], v) + op = SpaceOperation('malloc', [LLConstant(S, lltype.Void), + LLConstant({'flavor': 'gc'}, lltype.Void)], v) cpu = FakeCPU() op1 = Transformer(cpu).rewrite_operation(op) assert op1.opname == 'new_with_vtable' @@ -576,8 +561,8 @@ lltype.attachRuntimeTypeInfo(S, destrptr=destructor) heaptracker.set_testing_vtable_for_gcstruct(S, vtable, 'S') v = varoftype(lltype.Ptr(S)) - op = SpaceOperation('malloc', [Constant(S, lltype.Void), - Constant({'flavor': 'gc'}, lltype.Void)], v) + op = SpaceOperation('malloc', [LLConstant(S, lltype.Void), + LLConstant({'flavor': 'gc'}, lltype.Void)], v) tr = Transformer(FakeCPU(), FakeResidualCallControl()) oplist = tr.rewrite_operation(op) op0, op1 = oplist @@ -591,8 +576,8 @@ S = rffi.CArray(lltype.Char) v1 = varoftype(lltype.Signed) v = varoftype(lltype.Ptr(S)) - flags = Constant({'flavor': 'raw'}, lltype.Void) - op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + flags = LLConstant({'flavor': 'raw'}, lltype.Void) + op = SpaceOperation('malloc_varsize', [LLConstant(S, lltype.Void), flags, v1], v) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op0, op1 = tr.rewrite_operation(op) @@ -608,8 +593,8 @@ S = rffi.CArray(lltype.Signed) v1 = varoftype(lltype.Signed) v = varoftype(lltype.Ptr(S)) - flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) - op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + flags = LLConstant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [LLConstant(S, lltype.Void), flags, v1], v) tr = Transformer(FakeCPU(), FakeResidualCallControl()) op0, op1 = tr.rewrite_operation(op) @@ -622,8 +607,8 @@ S = rffi.CArray(lltype.Signed) v1 = varoftype(lltype.Signed) v = varoftype(lltype.Ptr(S)) - flags = Constant({'flavor': 'raw', 'unsupported_flag': True}, lltype.Void) - op = SpaceOperation('malloc_varsize', [Constant(S, lltype.Void), flags, + flags = LLConstant({'flavor': 'raw', 'unsupported_flag': True}, lltype.Void) + op = SpaceOperation('malloc_varsize', [LLConstant(S, lltype.Void), flags, v1], v) tr = Transformer(FakeCPU(), FakeResidualCallControl()) py.test.raises(UnsupportedMallocFlags, tr.rewrite_operation, op) @@ -631,8 +616,8 @@ def test_raw_malloc_fixedsize(): S = lltype.Struct('dummy', ('x', lltype.Signed)) v = varoftype(lltype.Ptr(S)) - flags = Constant({'flavor': 'raw', 'zero': True}, lltype.Void) - op = SpaceOperation('malloc', [Constant(S, lltype.Void), flags], v) + flags = LLConstant({'flavor': 'raw', 'zero': True}, lltype.Void) + op = SpaceOperation('malloc', [LLConstant(S, lltype.Void), flags], v) tr = Transformer(FakeCPU(), FakeResidualCallControl()) op0, op1 = tr.rewrite_operation(op) assert op0.opname == 'residual_call_r_i' @@ -642,7 +627,7 @@ def test_raw_free(): S = rffi.CArray(lltype.Char) - flags = Constant({'flavor': 'raw', 'track_allocation': True}, + flags = LLConstant({'flavor': 'raw', 'track_allocation': True}, lltype.Void) op = SpaceOperation('free', [varoftype(lltype.Ptr(S)), flags], varoftype(lltype.Void)) @@ -654,7 +639,7 @@ def test_raw_free_no_track_allocation(): S = rffi.CArray(lltype.Signed) - flags = Constant({'flavor': 'raw', 'track_allocation': False}, + flags = LLConstant({'flavor': 'raw', 'track_allocation': False}, lltype.Void) op = SpaceOperation('free', [varoftype(lltype.Ptr(S)), flags], varoftype(lltype.Void)) @@ -689,7 +674,7 @@ v1 = varoftype(lltype.Signed) v2 = varoftype(lltype.Signed) v3 = varoftype(lltype.Bool) - c0 = const(0) + c0 = ll_const(0) # for opname, reducedname in [('int_eq', 'int_is_zero'), ('int_ne', 'int_is_true')]: @@ -712,7 +697,7 @@ v1 = varoftype(lltype.Ptr(rstr.STR)) v2 = varoftype(lltype.Ptr(rstr.STR)) v3 = varoftype(lltype.Bool) - c0 = const(lltype.nullptr(rstr.STR)) + c0 = ll_const(lltype.nullptr(rstr.STR)) # for opname, reducedname in [('ptr_eq', 'ptr_iszero'), ('ptr_ne', 'ptr_nonzero')]: @@ -735,7 +720,7 @@ v1 = varoftype(rclass.OBJECTPTR) v2 = varoftype(rclass.OBJECTPTR) v3 = varoftype(lltype.Bool) - c0 = const(lltype.nullptr(rclass.OBJECT)) + c0 = ll_const(lltype.nullptr(rclass.OBJECT)) for opname, newopname, reducedname in [ ('ptr_eq', 'instance_ptr_eq', 'ptr_iszero'), @@ -760,7 +745,7 @@ v1 = varoftype(rclass.NONGCOBJECTPTR) v2 = varoftype(rclass.NONGCOBJECTPTR) v3 = varoftype(lltype.Bool) - c0 = const(lltype.nullptr(rclass.NONGCOBJECT)) + c0 = ll_const(lltype.nullptr(rclass.NONGCOBJECT)) # for opname, reducedname in [('ptr_eq', 'int_is_zero'), ('ptr_ne', 'int_is_true')]: @@ -793,7 +778,7 @@ v = varoftype(lltype.Ptr(rstr.STR)) v_result = varoftype(lltype.Signed) op = SpaceOperation('getinteriorarraysize', - [v, Constant('chars', lltype.Void)], + [v, LLConstant('chars', lltype.Void)], v_result) op1 = Transformer().rewrite_operation(op) assert op1.opname == 'strlen' @@ -804,7 +789,7 @@ v = varoftype(lltype.Ptr(rstr.UNICODE)) v_result = varoftype(lltype.Signed) op = SpaceOperation('getinteriorarraysize', - [v, Constant('chars', lltype.Void)], + [v, LLConstant('chars', lltype.Void)], v_result) op1 = Transformer().rewrite_operation(op) assert op1.opname == 'unicodelen' @@ -816,7 +801,7 @@ v_index = varoftype(lltype.Signed) v_result = varoftype(lltype.Char) op = SpaceOperation('getinteriorfield', - [v, Constant('chars', lltype.Void), v_index], + [v, LLConstant('chars', lltype.Void), v_index], v_result) op1 = Transformer().rewrite_operation(op) assert op1.opname == 'strgetitem' @@ -828,7 +813,7 @@ v_index = varoftype(lltype.Signed) v_result = varoftype(lltype.UniChar) op = SpaceOperation('getinteriorfield', - [v, Constant('chars', lltype.Void), v_index], + [v, LLConstant('chars', lltype.Void), v_index], v_result) op1 = Transformer().rewrite_operation(op) assert op1.opname == 'unicodegetitem' @@ -841,13 +826,13 @@ v = varoftype(lltype.Ptr(DICT)) i = varoftype(lltype.Signed) v_result = varoftype(lltype.Signed) - op = SpaceOperation('getinteriorfield', [v, i, Constant('v', lltype.Void)], + op = SpaceOperation('getinteriorfield', [v, i, LLConstant('v', lltype.Void)], v_result) op1 = Transformer(FakeCPU()).rewrite_operation(op) assert op1.opname == 'getinteriorfield_gc_i' assert op1.args == [v, i, ('interiorfielddescr', DICT, 'v')] - op = SpaceOperation('getinteriorfield', [v, i, Constant('v', lltype.Void)], - Constant(None, lltype.Void)) + op = SpaceOperation('getinteriorfield', [v, i, LLConstant('v', lltype.Void)], + LLConstant(None, lltype.Void)) op1 = Transformer(FakeCPU()).rewrite_operation(op) assert op1 is None @@ -857,7 +842,7 @@ v_newchr = varoftype(lltype.Char) v_void = varoftype(lltype.Void) op = SpaceOperation('setinteriorfield', - [v, Constant('chars', lltype.Void), v_index, v_newchr], + [v, LLConstant('chars', lltype.Void), v_index, v_newchr], v_void) op1 = Transformer().rewrite_operation(op) assert op1.opname == 'strsetitem' @@ -870,7 +855,7 @@ v_newchr = varoftype(lltype.UniChar) v_void = varoftype(lltype.Void) op = SpaceOperation('setinteriorfield', - [v, Constant('chars', lltype.Void), v_index, v_newchr], + [v, LLConstant('chars', lltype.Void), v_index, v_newchr], v_void) op1 = Transformer().rewrite_operation(op) assert op1.opname == 'unicodesetitem' @@ -883,13 +868,13 @@ v = varoftype(lltype.Ptr(DICT)) i = varoftype(lltype.Signed) v_void = varoftype(lltype.Void) - op = SpaceOperation('setinteriorfield', [v, i, Constant('v', lltype.Void), + op = SpaceOperation('setinteriorfield', [v, i, LLConstant('v', lltype.Void), i], v_void) op1 = Transformer(FakeCPU()).rewrite_operation(op) assert op1.opname == 'setinteriorfield_gc_i' assert op1.args == [v, i, i, ('interiorfielddescr', DICT, 'v')] - op = SpaceOperation('setinteriorfield', [v, i, Constant('v', lltype.Void), + op = SpaceOperation('setinteriorfield', [v, i, LLConstant('v', lltype.Void), v_void], v_void) op1 = Transformer(FakeCPU()).rewrite_operation(op) assert not op1 @@ -922,7 +907,7 @@ v1 = varoftype(lltype.Signed) v2 = varoftype(lltype.Signed) op = SpaceOperation('hint', - [v1, Constant({'promote': True}, lltype.Void)], + [v1, LLConstant({'promote': True}, lltype.Void)], v2) oplist = Transformer().rewrite_operation(op) op0, op1, op2 = oplist @@ -937,7 +922,7 @@ v1 = varoftype(lltype.Signed) v2 = varoftype(lltype.Signed) op = SpaceOperation('hint', - [v1, Constant({'promote': True}, lltype.Void)], + [v1, LLConstant({'promote': True}, lltype.Void)], v2) returnblock = Block([varoftype(lltype.Signed)]) returnblock.operations = () @@ -970,8 +955,8 @@ vvoid2 = varoftype(lltype.Void) v5 = varoftype(lltype.Void) op = SpaceOperation('jit_marker', - [Constant('jit_merge_point', lltype.Void), - Constant(jd.jitdriver, lltype.Void), + [LLConstant('jit_merge_point', lltype.Void), + LLConstant(jd.jitdriver, lltype.Void), v1, v2, vvoid1, v3, v4, vvoid2], v5) tr = Transformer() tr.portal_jd = jd @@ -994,7 +979,7 @@ S = lltype.GcStruct('S', ('x', lltype.Char)) v1 = varoftype(lltype.Ptr(S)) v2 = varoftype(lltype.Char) - op = SpaceOperation('getfield', [v1, Constant('x', lltype.Void)], v2) + op = SpaceOperation('getfield', [v1, LLConstant('x', lltype.Void)], v2) op1 = Transformer(FakeCPU()).rewrite_operation(op) assert op1.opname == 'getfield_gc_i' assert op1.args == [v1, ('fielddescr', S, 'x')] @@ -1005,7 +990,7 @@ hints={'immutable': True}) v1 = varoftype(lltype.Ptr(S)) v2 = varoftype(lltype.Char) - op = SpaceOperation('getfield', [v1, Constant('x', lltype.Void)], v2) + op = SpaceOperation('getfield', [v1, LLConstant('x', lltype.Void)], v2) op1 = Transformer(FakeCPU()).rewrite_operation(op) assert op1.opname == 'getfield_gc_i_pure' assert op1.args == [v1, ('fielddescr', S, 'x')] @@ -1023,7 +1008,7 @@ hints={'immutable': True}) v1 = varoftype(lltype.Ptr(S)) v2 = varoftype(lltype.Char) - op = SpaceOperation('getfield', [v1, Constant('x', lltype.Void)], v2) + op = SpaceOperation('getfield', [v1, LLConstant('x', lltype.Void)], v2) op1 = Transformer(FakeCPU(), FakeCC()).rewrite_operation(op) assert op1.opname == 'getfield_gc_i_greenfield' assert op1.args == [v1, ('fielddescr', S, 'x')] @@ -1040,8 +1025,8 @@ assert oplist[0].args[0] == 'somejitcode' def test_str_newstr(): - c_STR = Constant(rstr.STR, lltype.Void) - c_flavor = Constant({'flavor': 'gc'}, lltype.Void) + c_STR = LLConstant(rstr.STR, lltype.Void) + c_flavor = LLConstant({'flavor': 'gc'}, lltype.Void) v1 = varoftype(lltype.Signed) v2 = varoftype(lltype.Ptr(rstr.STR)) op = SpaceOperation('malloc_varsize', [c_STR, c_flavor, v1], v2) @@ -1051,10 +1036,10 @@ assert op1.result == v2 def test_malloc_varsize_zero(): - c_A = Constant(lltype.GcArray(lltype.Signed), lltype.Void) + c_A = LLConstant(lltype.GcArray(lltype.Signed), lltype.Void) v1 = varoftype(lltype.Signed) v2 = varoftype(c_A.value) - c_flags = Constant({"flavor": "gc", "zero": True}, lltype.Void) + c_flags = LLConstant({"flavor": "gc", "zero": True}, lltype.Void) op = SpaceOperation('malloc_varsize', [c_A, c_flags, v1], v2) op1 = Transformer(FakeCPU()).rewrite_operation(op) assert op1.opname == 'new_array_clear' @@ -1068,7 +1053,7 @@ v1 = varoftype(PSTR) v2 = varoftype(PSTR) v3 = varoftype(PSTR) - op = SpaceOperation('direct_call', [const(func), v1, v2], v3) + op = SpaceOperation('direct_call', [ll_const(func), v1, v2], v3) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) assert op1.opname == 'residual_call_r_r' @@ -1082,7 +1067,7 @@ v1 = varoftype(PSTR) v2 = varoftype(PSTR) op = SpaceOperation('hint', - [v1, Constant({'promote_string': True}, lltype.Void)], + [v1, LLConstant({'promote_string': True}, lltype.Void)], v2) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op0, op1, _ = tr.rewrite_operation(op) @@ -1098,10 +1083,10 @@ v2 = varoftype(PSTR) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = SpaceOperation('hint', - [v1, Constant({'promote_string': True}, lltype.Void)], + [v1, LLConstant({'promote_string': True}, lltype.Void)], v2) op2 = SpaceOperation('hint', - [v1, Constant({'promote_string': True, + [v1, LLConstant({'promote_string': True, 'promote': True}, lltype.Void)], v2) lst1 = tr.rewrite_operation(op1) @@ -1113,10 +1098,10 @@ v2 = varoftype(lltype.Signed) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = SpaceOperation('hint', - [v1, Constant({'promote': True}, lltype.Void)], + [v1, LLConstant({'promote': True}, lltype.Void)], v2) op2 = SpaceOperation('hint', - [v1, Constant({'promote_string': True, + [v1, LLConstant({'promote_string': True, 'promote': True}, lltype.Void)], v2) lst1 = tr.rewrite_operation(op1) @@ -1132,7 +1117,7 @@ v1 = varoftype(PSTR) v2 = varoftype(PSTR) v3 = varoftype(PSTR) - op = SpaceOperation('direct_call', [const(func), v1, v2], v3) + op = SpaceOperation('direct_call', [ll_const(func), v1, v2], v3) cc = FakeBuiltinCallControl() tr = Transformer(FakeCPU(), cc) op1 = tr.rewrite_operation(op) @@ -1159,7 +1144,7 @@ v2 = varoftype(INT) v3 = varoftype(INT) v4 = varoftype(PSTR) - op = SpaceOperation('direct_call', [const(func), v1, v2, v3], v4) + op = SpaceOperation('direct_call', [ll_const(func), v1, v2, v3], v4) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) assert op1.opname == 'residual_call_ir_r' @@ -1180,7 +1165,7 @@ v2 = varoftype(INT) v3 = varoftype(INT) v4 = varoftype(PUNICODE) - op = SpaceOperation('direct_call', [const(func), v1, v2, v3], v4) + op = SpaceOperation('direct_call', [ll_const(func), v1, v2, v3], v4) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) assert op1.opname == 'residual_call_ir_r' @@ -1199,7 +1184,7 @@ _callable=rstr.LLHelpers.ll_str2unicode) v1 = varoftype(PSTR) v2 = varoftype(PUNICODE) - op = SpaceOperation('direct_call', [const(func), v1], v2) + op = SpaceOperation('direct_call', [ll_const(func), v1], v2) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) assert op1.opname == 'residual_call_r_r' @@ -1217,7 +1202,7 @@ v1 = varoftype(PUNICODE) v2 = varoftype(PUNICODE) v3 = varoftype(lltype.Bool) - op = SpaceOperation('direct_call', [const(func), v1, v2], v3) + op = SpaceOperation('direct_call', [ll_const(func), v1, v2], v3) cc = FakeBuiltinCallControl() tr = Transformer(FakeCPU(), cc) op1 = tr.rewrite_operation(op) @@ -1244,7 +1229,7 @@ v4 = varoftype(INT) v5 = varoftype(INT) v6 = varoftype(lltype.Void) - op = SpaceOperation('direct_call', [const(func), v1, v2, v3, v4, v5], v6) + op = SpaceOperation('direct_call', [ll_const(func), v1, v2, v3, v4, v5], v6) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) assert op1.opname == 'residual_call_ir_v' @@ -1261,7 +1246,7 @@ _callable=ll_math.sqrt_nonneg) v1 = varoftype(FLOAT) v2 = varoftype(FLOAT) - op = SpaceOperation('direct_call', [const(func), v1], v2) + op = SpaceOperation('direct_call', [ll_const(func), v1], v2) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) op1 = tr.rewrite_operation(op) assert op1.opname == 'residual_call_irf_f' @@ -1280,8 +1265,8 @@ STRUCT = lltype.GcStruct('struct', ('inst_x', lltype.Signed), ('mutate_x', rclass.OBJECTPTR), hints={'immutable_fields': accessor}) - for v_x in [const(lltype.malloc(STRUCT)), varoftype(lltype.Ptr(STRUCT))]: - op = SpaceOperation('getfield', [v_x, Constant('inst_x', lltype.Void)], + for v_x in [ll_const(lltype.malloc(STRUCT)), varoftype(lltype.Ptr(STRUCT))]: + op = SpaceOperation('getfield', [v_x, LLConstant('inst_x', lltype.Void)], v2) tr = Transformer(FakeCPU()) [_, op1, op2] = tr.rewrite_operation(op) @@ -1305,9 +1290,9 @@ STRUCT = lltype.GcStruct('struct', ('inst_x', lltype.Signed), ('mutate_x', rclass.OBJECTPTR), hints={'immutable_fields': accessor}) - for v_x in [const(lltype.malloc(STRUCT)), varoftype(lltype.Ptr(STRUCT))]: + for v_x in [ll_const(lltype.malloc(STRUCT)), varoftype(lltype.Ptr(STRUCT))]: op = SpaceOperation('jit_force_quasi_immutable', - [v_x, Constant('mutate_x', lltype.Void)], + [v_x, LLConstant('mutate_x', lltype.Void)], varoftype(lltype.Void)) tr = Transformer(FakeCPU(), FakeRegularCallControl()) tr.graph = 'currentgraph' @@ -1322,7 +1307,7 @@ STRUCT = lltype.GcStruct('struct', ('parent', PARENT), ('x', lltype.Signed)) v_x = varoftype(lltype.Ptr(STRUCT)) - op = SpaceOperation('getfield', [v_x, Constant('x', lltype.Void)], + op = SpaceOperation('getfield', [v_x, LLConstant('x', lltype.Void)], varoftype(lltype.Signed)) tr = Transformer(None, None) py.test.raises(NotImplementedError, tr.rewrite_operation, op) @@ -1332,7 +1317,7 @@ tlfield = ThreadLocalField(lltype.Signed, 'foobar_test_', loop_invariant=loop_inv) OS_THREADLOCALREF_GET = effectinfo.EffectInfo.OS_THREADLOCALREF_GET - c = const(tlfield.offset) + c = ll_const(tlfield.offset) v = varoftype(lltype.Signed) op = SpaceOperation('threadlocalref_get', [c], v) cc = FakeBuiltinCallControl() diff --git a/rpython/jit/codewriter/test/test_list.py b/rpython/jit/codewriter/test/test_list.py --- a/rpython/jit/codewriter/test/test_list.py +++ b/rpython/jit/codewriter/test/test_list.py @@ -1,6 +1,7 @@ from rpython.rtyper.lltypesystem import lltype from rpython.translator.unsimplify import varoftype -from rpython.flowspace.model import Constant, SpaceOperation +from rpython.flowspace.model import SpaceOperation +from rpython.rtyper.rmodel import LLConstant from rpython.jit.codewriter.jtransform import Transformer, NotSupported from rpython.jit.codewriter.flatten import GraphFlattener @@ -63,7 +64,7 @@ raise ValueError(property) tr._get_list_nonneg_canraise_flags = force_flags op = SpaceOperation('direct_call', - [Constant("myfunc", lltype.Void)] + args, + [LLConstant("myfunc", lltype.Void)] + args, v_result) try: oplist = tr._handle_list_call(op, oopspec_name, args) @@ -85,11 +86,11 @@ def test_newlist(): builtin_test('newlist', [], FIXEDLIST, """new_array $0, -> %r0""") - builtin_test('newlist', [Constant(5, lltype.Signed)], FIXEDLIST, + builtin_test('newlist', [LLConstant(5, lltype.Signed)], FIXEDLIST, """new_array $5, -> %r0""") builtin_test('newlist', [varoftype(lltype.Signed)], FIXEDLIST, """new_array %i0, -> %r0""") - builtin_test('newlist_clear', [Constant(5, lltype.Signed)], FIXEDLIST, + builtin_test('newlist_clear', [LLConstant(5, lltype.Signed)], FIXEDLIST, """new_array_clear $5, -> %r0""") builtin_test('newlist', [], FIXEDPTRLIST, """new_array_clear $0, -> %r0""") @@ -98,8 +99,8 @@ builtin_test('list.ll_arraycopy', [varoftype(FIXEDLIST), varoftype(FIXEDLIST), - varoftype(lltype.Signed), - varoftype(lltype.Signed), + varoftype(lltype.Signed), + varoftype(lltype.Signed), varoftype(lltype.Signed)], lltype.Void, """ residual_call_ir_v $'myfunc', I[%i0, %i1, %i2], R[%r0, %r1], @@ -165,11 +166,11 @@ " , ") builtin_test('newlist', [], VARLIST, """newlist $0, """+alldescrs+""" -> %r0""") - builtin_test('newlist', [Constant(5, lltype.Signed)], VARLIST, + builtin_test('newlist', [LLConstant(5, lltype.Signed)], VARLIST, """newlist $5, """+alldescrs+""" -> %r0""") builtin_test('newlist', [varoftype(lltype.Signed)], VARLIST, """newlist %i0, """+alldescrs+""" -> %r0""") - builtin_test('newlist_clear', [Constant(5, lltype.Signed)], VARLIST, + builtin_test('newlist_clear', [LLConstant(5, lltype.Signed)], VARLIST, """newlist_clear $5, """+alldescrs+""" -> %r0""") def test_resizable_getitem(): diff --git a/rpython/jit/codewriter/test/test_longlong.py b/rpython/jit/codewriter/test/test_longlong.py --- a/rpython/jit/codewriter/test/test_longlong.py +++ b/rpython/jit/codewriter/test/test_longlong.py @@ -1,13 +1,12 @@ import py, sys from rpython.rlib.rarithmetic import r_longlong, intmask, is_valid_int -from rpython.flowspace.model import SpaceOperation, Variable, Constant -from rpython.flowspace.model import Block, Link +from rpython.flowspace.model import SpaceOperation, Variable +from rpython.rtyper.rmodel import ll_const from rpython.translator.unsimplify import varoftype from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.jit.codewriter.jtransform import Transformer, NotSupported +from rpython.jit.codewriter.jtransform import Transformer from rpython.jit.codewriter.effectinfo import EffectInfo -from rpython.jit.codewriter.test.test_jtransform import const from rpython.jit.codewriter import longlong @@ -79,7 +78,7 @@ assert len(oplist) == 2 assert oplist[0].opname == 'residual_call_irf_f' assert oplist[0].args[0].value == opname.split('_')[0]+'_from_int' - assert list(oplist[0].args[1]) == [const(0)] + assert list(oplist[0].args[1]) == [ll_const(0)] assert list(oplist[0].args[2]) == [] assert list(oplist[0].args[3]) == [] assert oplist[0].args[4] == 'calldescr-84' @@ -104,7 +103,7 @@ assert len(oplist) == 2 assert oplist[0].opname == 'residual_call_irf_f' assert oplist[0].args[0].value == 'llong_from_int' - assert list(oplist[0].args[1]) == [const(0)] + assert list(oplist[0].args[1]) == [ll_const(0)] assert list(oplist[0].args[2]) == [] assert list(oplist[0].args[3]) == [] assert oplist[0].args[4] == 'calldescr-84' @@ -224,7 +223,7 @@ def test_constants(self): for TYPE in [lltype.SignedLongLong, lltype.UnsignedLongLong]: v_x = varoftype(TYPE) - vlist = [v_x, const(rffi.cast(TYPE, 7))] + vlist = [v_x, ll_const(rffi.cast(TYPE, 7))] v_result = varoftype(TYPE) op = SpaceOperation('llong_add', vlist, v_result) tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) diff --git a/rpython/jit/codewriter/test/test_regalloc.py b/rpython/jit/codewriter/test/test_regalloc.py --- a/rpython/jit/codewriter/test/test_regalloc.py +++ b/rpython/jit/codewriter/test/test_regalloc.py @@ -1,15 +1,14 @@ -import py, sys from rpython.jit.codewriter import support from rpython.jit.codewriter.regalloc import perform_register_allocation from rpython.jit.codewriter.flatten import flatten_graph, ListOfKind from rpython.jit.codewriter.format import assert_format from rpython.jit.metainterp.history import AbstractDescr -from rpython.flowspace.model import Variable, Constant, SpaceOperation +from rpython.flowspace.model import Variable, SpaceOperation from rpython.flowspace.model import FunctionGraph, Block, Link from rpython.flowspace.model import c_last_exception +from rpython.rtyper.rmodel import ll_const from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper import rclass -from rpython.rlib.rarithmetic import ovfcheck class TestRegAlloc: @@ -158,7 +157,7 @@ v4 = Variable(); v4.concretetype = lltype.Signed block = Block([v1]) block.operations = [ - SpaceOperation('int_add', [v1, Constant(1, lltype.Signed)], v2), + SpaceOperation('int_add', [v1, ll_const(1)], v2), SpaceOperation('rescall', [ListOfKind('int', [v1, v2])], v3), ] graph = FunctionGraph('f', block, v4) @@ -212,7 +211,7 @@ v5 = Variable(); v5.concretetype = lltype.Signed block = Block([v1]) block.operations = [ - SpaceOperation('int_add', [v1, Constant(1, lltype.Signed)], v2), + SpaceOperation('int_add', [v1, ll_const(1)], v2), SpaceOperation('rescall', [ListOfKind('int', [v1, v2])], v5), SpaceOperation('rescall', [ListOfKind('int', [v1, v2])], v3), ] diff --git a/rpython/jit/codewriter/test/test_support.py b/rpython/jit/codewriter/test/test_support.py --- a/rpython/jit/codewriter/test/test_support.py +++ b/rpython/jit/codewriter/test/test_support.py @@ -1,15 +1,13 @@ import py, sys from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.annlowlevel import llstr -from rpython.flowspace.model import Variable, Constant, SpaceOperation +from rpython.flowspace.model import Variable, SpaceOperation +from rpython.rtyper.rmodel import ll_const, LLConstant from rpython.jit.codewriter.support import decode_builtin_call, LLtypeHelpers from rpython.jit.codewriter.support import _ll_1_int_abs -def newconst(x): - return Constant(x, lltype.typeOf(x)) - def voidconst(x): - return Constant(x, lltype.Void) + return LLConstant(x, lltype.Void) # ____________________________________________________________ @@ -27,14 +25,14 @@ vc.concretetype = lltype.Char v_result = Variable('result') v_result.concretetype = lltype.Signed - op = SpaceOperation('direct_call', [newconst(fnobj), + op = SpaceOperation('direct_call', [ll_const(fnobj), vi, voidconst('mymarker'), vc], v_result) oopspec, opargs = decode_builtin_call(op) assert oopspec == 'foobar' - assert opargs == [newconst(2), vc, vi] + assert opargs == [ll_const(2), vc, vi] #impl = runner.get_oopspec_impl('foobar', lltype.Signed) #assert impl(2, 'A', 5) == 5 * ord('A') @@ -56,15 +54,15 @@ v_result.concretetype = lltype.Signed myarray = lltype.malloc(A, 10) myarray[5] = 42 - op = SpaceOperation('direct_call', [newconst(fnobj), - newconst(myarray), + op = SpaceOperation('direct_call', [ll_const(fnobj), + ll_const(myarray), vi, voidconst('mymarker'), vc], v_result) oopspec, opargs = decode_builtin_call(op) assert oopspec == 'spam.foobar' - assert opargs == [newconst(myarray), newconst(2), vc, vi] + assert opargs == [ll_const(myarray), ll_const(2), vc, vi] #impl = runner.get_oopspec_impl('spam.foobar', lltype.Ptr(A)) #assert impl(myarray, 2, 'A', 5) == 42 * ord('A') diff --git a/rpython/jit/codewriter/test/test_void_list.py b/rpython/jit/codewriter/test/test_void_list.py --- a/rpython/jit/codewriter/test/test_void_list.py +++ b/rpython/jit/codewriter/test/test_void_list.py @@ -1,6 +1,6 @@ +from rpython.rtyper.rmodel import ll_const from rpython.rtyper.lltypesystem import lltype from rpython.translator.unsimplify import varoftype -from rpython.flowspace.model import Constant from rpython.jit.codewriter.jtransform import NotSupported from rpython.jit.codewriter.test.test_list import builtin_test @@ -20,26 +20,23 @@ def test_newlist(): builtin_test('newlist', [], FIXEDLIST, NotSupported) - builtin_test('newlist', [Constant(5, lltype.Signed)], FIXEDLIST, + builtin_test('newlist', [ll_const(5)], FIXEDLIST, NotSupported) builtin_test('newlist', [varoftype(lltype.Signed)], FIXEDLIST, NotSupported) - builtin_test('newlist', [Constant(5, lltype.Signed), - Constant(0, lltype.Signed)], FIXEDLIST, + builtin_test('newlist', [ll_const(5), ll_const(0)], FIXEDLIST, NotSupported) - builtin_test('newlist', [Constant(5, lltype.Signed), - Constant(1, lltype.Signed)], FIXEDLIST, + builtin_test('newlist', [ll_const(5), ll_const(1)], FIXEDLIST, NotSupported) - builtin_test('newlist', [Constant(5, lltype.Signed), - varoftype(lltype.Signed)], FIXEDLIST, + builtin_test('newlist', [ll_const(5), varoftype(lltype.Signed)], FIXEDLIST, NotSupported) def test_fixed_ll_arraycopy(): builtin_test('list.ll_arraycopy', [varoftype(FIXEDLIST), varoftype(FIXEDLIST), - varoftype(lltype.Signed), - varoftype(lltype.Signed), + varoftype(lltype.Signed), + varoftype(lltype.Signed), varoftype(lltype.Signed)], lltype.Void, NotSupported) @@ -82,20 +79,12 @@ # Resizable lists def test_resizable_newlist(): - builtin_test('newlist', [], VARLIST, - NotSupported) - builtin_test('newlist', [Constant(5, lltype.Signed)], VARLIST, - NotSupported) - builtin_test('newlist', [varoftype(lltype.Signed)], VARLIST, - NotSupported) - builtin_test('newlist', [Constant(5, lltype.Signed), - Constant(0, lltype.Signed)], VARLIST, - NotSupported) - builtin_test('newlist', [Constant(5, lltype.Signed), - Constant(1, lltype.Signed)], VARLIST, - NotSupported) - builtin_test('newlist', [Constant(5, lltype.Signed), - varoftype(lltype.Signed)], VARLIST, + builtin_test('newlist', [], VARLIST, NotSupported) + builtin_test('newlist', [ll_const(5)], VARLIST, NotSupported) + builtin_test('newlist', [varoftype(lltype.Signed)], VARLIST, NotSupported) + builtin_test('newlist', [ll_const(5), ll_const(0)], VARLIST, NotSupported) + builtin_test('newlist', [ll_const(5), ll_const(1)], VARLIST, NotSupported) + builtin_test('newlist', [ll_const(5), varoftype(lltype.Signed)], VARLIST, NotSupported) def test_resizable_getitem(): diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -8,8 +8,9 @@ from rpython.annotator import model as annmodel from rpython.rtyper.llinterp import LLException from rpython.rtyper.test.test_llinterp import get_interpreter, clear_tcache -from rpython.flowspace.model import SpaceOperation, Variable, Constant +from rpython.flowspace.model import SpaceOperation, Variable from rpython.flowspace.model import checkgraph, Link, copygraph +from rpython.rtyper.rmodel import LLConstant from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.debug import fatalerror @@ -364,7 +365,7 @@ if not driver.inline_jit_merge_point: continue new_driver = driver.clone() - c_new_driver = Constant(new_driver, v_driver.concretetype) + c_new_driver = LLConstant(new_driver, v_driver.concretetype) op.args[1] = c_new_driver def find_portals(self): @@ -674,7 +675,7 @@ jitdriver_name, func, ARGS, argspec) v_result = op.result - c_accessor = Constant(accessor, concretetype=lltype.Void) + c_accessor = LLConstant(accessor, concretetype=lltype.Void) newop = SpaceOperation('direct_call', [c_accessor] + op.args[2:], v_result) block.operations[index] = newop @@ -728,7 +729,7 @@ op1.args[0].value == 'jit_merge_point') op0 = SpaceOperation( 'jit_marker', - [Constant('can_enter_jit', lltype.Void)] + op1.args[1:], + [LLConstant('can_enter_jit', lltype.Void)] + op1.args[1:], None) operations.insert(0, op0) can_enter_jits = [(jd.portal_graph, jd.portal_graph.startblock, 0)] @@ -741,7 +742,7 @@ greens_v, reds_v = support.decode_hp_hint_args(op) args_v = greens_v + reds_v - vlist = [Constant(jit_enter_fnptr, FUNCPTR)] + args_v + vlist = [LLConstant(jit_enter_fnptr, FUNCPTR)] + args_v v_result = Variable() v_result.concretetype = lltype.Void @@ -789,7 +790,7 @@ FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, RESULT)) ptr = self.helper_func(FUNCPTR, new_func) op.opname = 'direct_call' - op.args = [Constant(ptr, FUNCPTR)] + op.args[2:] + op.args = [LLConstant(ptr, FUNCPTR)] + op.args[2:] def rewrite_jit_merge_points(self, policy): for jd in self.jitdrivers_sd: @@ -993,7 +994,7 @@ assert op.opname == 'jit_marker' assert op.args[0].value == 'jit_merge_point' greens_v, reds_v = support.decode_hp_hint_args(op) - vlist = [Constant(jd.portal_runner_ptr, jd._PTR_PORTAL_FUNCTYPE)] + vlist = [LLConstant(jd.portal_runner_ptr, jd._PTR_PORTAL_FUNCTYPE)] vlist += greens_v vlist += reds_v v_result = Variable() @@ -1046,7 +1047,7 @@ else: TP = PTR_SET_PARAM_FUNCTYPE funcptr = self.helper_func(TP, closure) - return Constant(funcptr, TP) + return LLConstant(funcptr, TP) # for graph, block, i in find_set_param(graphs): @@ -1082,7 +1083,7 @@ func = quasiimmut.make_invalidation_function(ARG, mutatefieldname) FUNC = lltype.Ptr(lltype.FuncType([ARG], lltype.Void)) llptr = self.helper_func(FUNC, func) - cptr = Constant(llptr, FUNC) + cptr = LLConstant(llptr, FUNC) self._cache_force_quasiimmed_funcs[key] = cptr op.opname = 'direct_call' op.args = [cptr, op.args[0]] diff --git a/rpython/memory/gctransform/asmgcroot.py b/rpython/memory/gctransform/asmgcroot.py --- a/rpython/memory/gctransform/asmgcroot.py +++ b/rpython/memory/gctransform/asmgcroot.py @@ -1,8 +1,10 @@ -from rpython.flowspace.model import (Constant, Variable, Block, Link, - copygraph, SpaceOperation, checkgraph) +import sys +from rpython.flowspace.model import ( + Variable, Block, Link, copygraph, SpaceOperation, checkgraph) from rpython.rlib.debug import ll_assert from rpython.rlib.nonconst import NonConstant from rpython.rlib import rgil +from rpython.rtyper.rmodel import ll_const, LLConstant from rpython.rtyper.annlowlevel import llhelper from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rtyper.lltypesystem.lloperation import llop @@ -12,7 +14,6 @@ from rpython.rtyper.rbuiltin import gen_cast from rpython.translator.unsimplify import varoftype from rpython.translator.tool.cbuild import ExternalCompilationInfo -import sys # @@ -94,7 +95,7 @@ CONTAINER = lltype.FixedSizeArray(TYPE, 1) p = lltype.malloc(CONTAINER, flavor='raw', zero=True, immortal=True) - sradict[key] = Constant(p, lltype.Ptr(CONTAINER)) + sradict[key] = ll_const(p) sra.append(sradict[key]) # # make a copy of the graph that will reload the values @@ -103,7 +104,7 @@ # # edit the original graph to only store the value of the arguments block = Block(graph.startblock.inputargs) - c_item0 = Constant('item0', lltype.Void) + c_item0 = LLConstant('item0', lltype.Void) assert len(block.inputargs) == len(sra) for v_arg, c_p in zip(block.inputargs, sra): if isinstance(v_arg.concretetype, lltype.Ptr): @@ -120,7 +121,7 @@ fnptr2 = lltype.functionptr(FUNC2, fnptr._obj._name + '_reload', graph=graph2) - c_fnptr2 = Constant(fnptr2, lltype.Ptr(FUNC2)) + c_fnptr2 = ll_const(fnptr2) HELPERFUNC = lltype.FuncType([lltype.Ptr(FUNC2), ASM_FRAMEDATA_HEAD_PTR], FUNC1.RESULT) v_asm_stackwalk = varoftype(lltype.Ptr(HELPERFUNC), "asm_stackwalk") @@ -129,8 +130,7 @@ v_result = varoftype(FUNC1.RESULT) block.operations.append( SpaceOperation("indirect_call", [v_asm_stackwalk, c_fnptr2, - c_gcrootanchor, - Constant(None, lltype.Void)], + c_gcrootanchor, ll_const(None)], v_result)) block.closeblock(Link([v_result], graph.returnblock)) graph.startblock = block @@ -819,7 +819,7 @@ gcrootanchor = lltype.malloc(ASM_FRAMEDATA_HEAD_PTR.TO, immortal=True) gcrootanchor.prev = gcrootanchor gcrootanchor.next = gcrootanchor -c_gcrootanchor = Constant(gcrootanchor, ASM_FRAMEDATA_HEAD_PTR) +c_gcrootanchor = ll_const(gcrootanchor) eci = ExternalCompilationInfo(compile_extra=['-DPYPY_USE_ASMGCC']) @@ -831,21 +831,20 @@ _nowrapper=True, random_effects_on_gcobjs=True, compilation_info=eci) -c_asm_stackwalk = Constant(pypy_asm_stackwalk, - lltype.typeOf(pypy_asm_stackwalk)) +c_asm_stackwalk = ll_const(pypy_asm_stackwalk) pypy_asm_gcroot = rffi.llexternal('pypy_asm_gcroot', [llmemory.Address], llmemory.Address, sandboxsafe=True, _nowrapper=True) -c_asm_gcroot = Constant(pypy_asm_gcroot, lltype.typeOf(pypy_asm_gcroot)) +c_asm_gcroot = ll_const(pypy_asm_gcroot) pypy_asm_nocollect = rffi.llexternal('pypy_asm_gc_nocollect', [rffi.CCHARP], lltype.Void, sandboxsafe=True, _nowrapper=True) -c_asm_nocollect = Constant(pypy_asm_nocollect, lltype.typeOf(pypy_asm_nocollect)) +c_asm_nocollect = ll_const(pypy_asm_nocollect) QSORT_CALLBACK_PTR = lltype.Ptr(lltype.FuncType([llmemory.Address, llmemory.Address], rffi.INT)) diff --git a/rpython/memory/gctransform/boehm.py b/rpython/memory/gctransform/boehm.py --- a/rpython/memory/gctransform/boehm.py +++ b/rpython/memory/gctransform/boehm.py @@ -2,7 +2,7 @@ from rpython.memory.gctransform.support import (get_rtti, _static_deallocator_body_for_type, LLTransformerOp, ll_call_destructor) from rpython.rtyper.lltypesystem import lltype, llmemory -from rpython.flowspace.model import Constant +from rpython.rtyper.rmodel import LLConstant from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper import rmodel @@ -68,13 +68,13 @@ resulttype=llmemory.Address) finalizer_ptr = self.finalizer_funcptr_for_type(TYPE) if finalizer_ptr: - c_finalizer_ptr = Constant(finalizer_ptr, self.FINALIZER_PTR) + c_finalizer_ptr = LLConstant(finalizer_ptr, self.FINALIZER_PTR) hop.genop("boehm_register_finalizer", [v_raw, c_finalizer_ptr]) return v_raw def gct_fv_gc_malloc_varsize(self, hop, flags, TYPE, v_length, c_const_size, c_item_size, c_offset_to_length): - # XXX same behavior for zero=True: in theory that's wrong + # XXX same behavior for zero=True: in theory that's wrong if c_offset_to_length is None: v_raw = hop.genop("direct_call", [self.malloc_varsize_no_length_ptr, v_length, diff --git a/rpython/memory/gctransform/test/test_framework.py b/rpython/memory/gctransform/test/test_framework.py --- a/rpython/memory/gctransform/test/test_framework.py +++ b/rpython/memory/gctransform/test/test_framework.py @@ -1,6 +1,7 @@ from rpython.annotator.listdef import s_list_of_strings from rpython.annotator.model import SomeInteger -from rpython.flowspace.model import Constant, SpaceOperation, mkentrymap +from rpython.flowspace.model import SpaceOperation, mkentrymap +from rpython.rtyper.rmodel import LLConstant from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.lloperation import llop from rpython.memory.gc.semispace import SemiSpaceGC @@ -69,7 +70,7 @@ return -x t = rtype(g, [int]) gg = graphof(t, g) - assert not CollectAnalyzer(t).analyze_direct_call(gg) + assert not CollectAnalyzer(t).analyze_direct_call(gg) def test_cancollect_external(): fext1 = rffi.llexternal('fext1', [], lltype.Void, releasegil=False) @@ -110,7 +111,7 @@ def entrypoint(argv): return g() + 2 - + t = rtype(entrypoint, [s_list_of_strings]) t.config.translation.gc = "minimark" cbuild = CStandaloneBuilder(t, entrypoint, t.config, @@ -134,7 +135,7 @@ def entrypoint(argv): return g() + 2 - + t = rtype(entrypoint, [s_list_of_strings]) t.config.translation.gc = "minimark" cbuild = CStandaloneBuilder(t, entrypoint, t.config, @@ -167,7 +168,7 @@ assert 'can cause the GC to be called' in str(f.value) assert 'trace_func' in str(f.value) assert 'MyStructure' in str(f.value) - + class WriteBarrierTransformer(ShadowStackFrameworkGCTransformer): clean_sets = {} GC_PARAMS = {} @@ -198,7 +199,7 @@ PTR_TYPE = lltype.Ptr(lltype.GcStruct('S', ('x', PTR_TYPE2))) write_barrier_check(SpaceOperation( "setfield", - [varoftype(PTR_TYPE), Constant('x', lltype.Void), + [varoftype(PTR_TYPE), LLConstant('x', lltype.Void), varoftype(PTR_TYPE2)], varoftype(lltype.Void))) @@ -208,8 +209,8 @@ PTR_TYPE = lltype.Ptr(lltype.GcStruct('S', ('x', PTR_TYPE2))) write_barrier_check(SpaceOperation( "setfield", - [varoftype(PTR_TYPE), Constant('x', lltype.Void), - Constant('foo', varoftype(PTR_TYPE2))], + [varoftype(PTR_TYPE), LLConstant('x', lltype.Void), + LLConstant('foo', varoftype(PTR_TYPE2))], varoftype(lltype.Void)), needs_write_barrier=False) def test_write_barrier_support_setarrayitem(): @@ -228,7 +229,7 @@ write_barrier_check(SpaceOperation( "setinteriorfield", [varoftype(ARRAYPTR2), varoftype(lltype.Signed), - Constant('b', lltype.Void), varoftype(PTR_TYPE2)], + LLConstant('b', lltype.Void), varoftype(PTR_TYPE2)], varoftype(lltype.Void))) def test_remove_duplicate_write_barrier(): @@ -306,7 +307,7 @@ def test_find_clean_setarrayitems(): S = lltype.GcStruct('S') A = lltype.GcArray(lltype.Ptr(S)) - + def f(): l = lltype.malloc(A, 3) l[0] = lltype.malloc(S) @@ -327,7 +328,7 @@ def test_find_clean_setarrayitems_2(): S = lltype.GcStruct('S') A = lltype.GcArray(lltype.Ptr(S)) - + def f(): l = lltype.malloc(A, 3) l[0] = lltype.malloc(S) @@ -349,7 +350,7 @@ def test_find_clean_setarrayitems_3(): S = lltype.GcStruct('S') A = lltype.GcArray(lltype.Ptr(S)) - + def f(): l = lltype.malloc(A, 3) l[0] = lltype.malloc(S) diff --git a/rpython/memory/test/test_gctypelayout.py b/rpython/memory/test/test_gctypelayout.py --- a/rpython/memory/test/test_gctypelayout.py +++ b/rpython/memory/test/test_gctypelayout.py @@ -6,7 +6,7 @@ from rpython.rtyper import rclass from rpython.rtyper.rclass import IR_IMMUTABLE, IR_QUASIIMMUTABLE from rpython.rtyper.test.test_llinterp import get_interpreter -from rpython.flowspace.model import Constant +from rpython.flowspace.model import LLConstant class FakeGC: object_minimal_size = 0 @@ -92,7 +92,7 @@ 100000 * gc.is_gcarrayofgcptr(tid2)) interp, graph = get_interpreter(f, [], backendopt=True) assert interp.eval_graph(graph, []) == 11001 - assert graph.startblock.exits[0].args == [Constant(11001, lltype.Signed)] + assert graph.startblock.exits[0].args == [LLConstant(11001, lltype.Signed)] def test_gc_pointers_inside(): from rpython.rtyper import rclass diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py --- a/rpython/memory/test/test_transformed_gc.py +++ b/rpython/memory/test/test_transformed_gc.py @@ -741,7 +741,7 @@ def fix_graph_of_g(translator): from rpython.translator.translator import graphof - from rpython.flowspace.model import Constant + from rpython.flowspace.model import LLConstant from rpython.rtyper.lltypesystem import rffi layoutbuilder = cls.ensure_layoutbuilder(translator) @@ -751,11 +751,11 @@ graph = graphof(translator, g) for op in graph.startblock.operations: if op.opname == 'do_malloc_fixedsize': - op.args = [Constant(type_id, llgroup.HALFWORD), - Constant(llmemory.sizeof(P), lltype.Signed), - Constant(False, lltype.Bool), # has_finalizer - Constant(False, lltype.Bool), # is_finalizer_light - Constant(False, lltype.Bool)] # contains_weakptr + op.args = [LLConstant(type_id, llgroup.HALFWORD), + LLConstant(llmemory.sizeof(P), lltype.Signed), + LLConstant(False, lltype.Bool), # has_finalizer + LLConstant(False, lltype.Bool), # is_finalizer_light + LLConstant(False, lltype.Bool)] # contains_weakptr break else: assert 0, "oups, not found" @@ -779,7 +779,7 @@ return 0 def fix_graph_of_g(translator): from rpython.translator.translator import graphof - from rpython.flowspace.model import Constant + from rpython.flowspace.model import LLConstant from rpython.rtyper.lltypesystem import rffi layoutbuilder = cls.ensure_layoutbuilder(translator) type_id = layoutbuilder.get_type_id(P) @@ -788,11 +788,11 @@ graph = graphof(translator, g) for op in graph.startblock.operations: if op.opname == 'do_malloc_fixedsize': - op.args = [Constant(type_id, llgroup.HALFWORD), - Constant(llmemory.sizeof(P), lltype.Signed), - Constant(False, lltype.Bool), # has_finalizer - Constant(False, lltype.Bool), # is_finalizer_light - Constant(False, lltype.Bool)] # contains_weakptr + op.args = [LLConstant(type_id, llgroup.HALFWORD), + LLConstant(llmemory.sizeof(P), lltype.Signed), + LLConstant(False, lltype.Bool), # has_finalizer + LLConstant(False, lltype.Bool), # is_finalizer_light + LLConstant(False, lltype.Bool)] # contains_weakptr break else: assert 0, "oups, not found" @@ -1071,7 +1071,7 @@ def test_adr_of_nursery(self): run = self.runner("adr_of_nursery") res = run([]) - + class TestGenerationalNoFullCollectGC(GCTest): # test that nursery is doing its job and that no full collection @@ -1131,7 +1131,7 @@ 'large_object': 8*WORD, 'translated_to_c': False} root_stack_depth = 200 - + def define_ref_from_rawmalloced_to_regular(cls): import gc S = lltype.GcStruct('S', ('x', lltype.Signed)) @@ -1182,7 +1182,7 @@ run = self.runner("write_barrier_direct") res = run([]) assert res == 42 - + class TestMiniMarkGC(TestHybridGC): gcname = "minimark" GC_CAN_TEST_ID = True @@ -1199,7 +1199,7 @@ 'translated_to_c': False, } root_stack_depth = 200 - + def define_no_clean_setarrayitems(cls): # The optimization find_clean_setarrayitems() in # gctransformer/framework.py does not work with card marking. @@ -1224,7 +1224,7 @@ run = self.runner("no_clean_setarrayitems") res = run([]) assert res == 123 - + def define_nursery_hash_base(cls): class A: pass @@ -1263,19 +1263,19 @@ 'translated_to_c': False, } root_stack_depth = 200 - + def define_malloc_array_of_gcptr(self): S = lltype.GcStruct('S', ('x', lltype.Signed)) A = lltype.GcArray(lltype.Ptr(S)) def f(): lst = lltype.malloc(A, 5) - return (lst[0] == lltype.nullptr(S) + return (lst[0] == lltype.nullptr(S) and lst[1] == lltype.nullptr(S) and lst[2] == lltype.nullptr(S) and lst[3] == lltype.nullptr(S) and lst[4] == lltype.nullptr(S)) return f - + def test_malloc_array_of_gcptr(self): run = self.runner('malloc_array_of_gcptr') res = run([]) @@ -1356,7 +1356,7 @@ def define_gettypeid(cls): class A(object): pass - + def fn(): a = A() return rgc.get_typeid(a) diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -7,6 +7,7 @@ from rpython.rlib.objectmodel import we_are_translated, enforceargs, specialize from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.rmodel import ll_const # ____________________________________________________________ # General GC features @@ -46,7 +47,7 @@ """ _pinned_objects.append(obj) return True - + class PinEntry(ExtRegistryEntry): _about_ = pin @@ -617,14 +618,13 @@ def specialize_call(self, hop): from rpython.rtyper.rclass import getclassrepr, CLASSTYPE - from rpython.flowspace.model import Constant Class = hop.args_s[0].const classdef = hop.rtyper.annotator.bookkeeper.getuniqueclassdef(Class) classrepr = getclassrepr(hop.rtyper, classdef) vtable = classrepr.getvtable() assert lltype.typeOf(vtable) == CLASSTYPE hop.exception_cannot_occur() - return Constant(vtable, concretetype=CLASSTYPE) + return ll_const(vtable) class Entry(ExtRegistryEntry): _about_ = dump_rpy_heap @@ -774,7 +774,7 @@ pending.extend(get_rpy_referents(gcref)) all_typeids = {} - + def get_typeid(obj): raise Exception("does not work untranslated") diff --git a/rpython/translator/test/test_simplify.py b/rpython/translator/test/test_simplify.py --- a/rpython/translator/test/test_simplify.py +++ b/rpython/translator/test/test_simplify.py @@ -3,6 +3,7 @@ from rpython.translator.backendopt.all import backend_optimizations from rpython.translator.simplify import (get_graph, transform_dead_op_vars) from rpython.flowspace.model import Block, Constant, summary +from rpython.rtyper.rmodel import ll_const from rpython.conftest import option def translate(func, argtypes, backend_optimize=True): @@ -204,7 +205,6 @@ def test_join_blocks_cleans_links(): from rpython.rtyper.lltypesystem import lltype - from rpython.flowspace.model import Constant from rpython.translator.backendopt.removenoops import remove_same_as def f(x): return bool(x + 2) @@ -215,7 +215,7 @@ return 2 graph, t = translate(g, [int], backend_optimize=False) fgraph = graphof(t, f) - fgraph.startblock.exits[0].args = [Constant(True, lltype.Bool)] + fgraph.startblock.exits[0].args = [ll_const(True)] # does not crash: previously join_blocks would barf on this remove_same_as(graph) backend_optimizations(t) From noreply at buildbot.pypy.org Sat Oct 10 18:26:37 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Sat, 10 Oct 2015 18:26:37 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: failargs where not copied correctly, this modified the wrong list and left behind dirty entries in the rename cache Message-ID: <20151010162637.2569E1C1202@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80102:94218ef0b98b Date: 2015-10-10 18:26 +0200 http://bitbucket.org/pypy/pypy/changeset/94218ef0b98b/ Log: failargs where not copied correctly, this modified the wrong list and left behind dirty entries in the rename cache diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -63,6 +63,7 @@ stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = pipe.communicate() + import pdb; pdb.set_trace() if pipe.wait() < 0: raise IOError("subprocess was killed by signal %d" % ( pipe.returncode,)) @@ -70,7 +71,7 @@ py.test.skip(stderr) if stderr.startswith('debug_alloc.h:'): # lldebug builds stderr = '' - assert not stderr + #assert not stderr # if discard_stdout_before_last_line: stdout = stdout.splitlines(True)[-1] diff --git a/rpython/jit/metainterp/optimizeopt/renamer.py b/rpython/jit/metainterp/optimizeopt/renamer.py --- a/rpython/jit/metainterp/optimizeopt/renamer.py +++ b/rpython/jit/metainterp/optimizeopt/renamer.py @@ -18,8 +18,9 @@ if op.is_guard(): assert isinstance(op, resoperation.GuardResOp) - op.rd_snapshot = self.rename_rd_snapshot(op.rd_snapshot) - self.rename_failargs(op) + op.rd_snapshot = self.rename_rd_snapshot(op.rd_snapshot, clone=True) + failargs = self.rename_failargs(op, clone=True) + op.setfailargs(failargs) return True diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -390,14 +390,16 @@ prepare_arguments(state, pack, args) vecop = VecOperation(left.vector, args, left, pack.numops(), left.getdescr()) + for i,node in enumerate(pack.operations): + op = node.getoperation() + if op.returns_void(): + continue + state.setvector_of_box(op,i,vecop) + if pack.is_accumulating() and not op.is_guard(): + state.renamer.start_renaming(op, vecop) if left.is_guard(): prepare_fail_arguments(state, pack, left, vecop) state.oplist.append(vecop) - for i,node in enumerate(pack.operations): - op = node.getoperation() - state.setvector_of_box(op,i,vecop) - if pack.is_accumulating(): - state.renamer.start_renaming(op, vecop) def prepare_arguments(state, pack, args): # Transforming one argument to a vector box argument @@ -439,13 +441,9 @@ def prepare_fail_arguments(state, pack, left, vecop): assert isinstance(left, GuardResOp) assert isinstance(vecop, GuardResOp) - args = left.getfailargs() + args = left.getfailargs()[:] for i, arg in enumerate(args): pos, newarg = state.getvector_of_box(arg) - if newarg in vecop.getarglist(): - # in this case we do not know which slot - # failed. thus we bail! - raise NotAVectorizeableLoop() if newarg is None: newarg = arg if newarg.is_vector(): # can be moved to guard exit! @@ -682,7 +680,7 @@ op = node.getoperation() if op.is_guard(): # add accumulation info to the descriptor - failargs = op.getfailargs() + failargs = op.getfailargs()[:] descr = op.getdescr() # note: stitching a guard must resemble the order of the label # otherwise a wrong mapping is handed to the register allocator @@ -698,6 +696,7 @@ descr.attach_vector_info(info) seed = accum.getleftmostseed() failargs[i] = self.renamer.rename_map.get(seed, seed) + op.setfailargs(failargs) def profitable(self): return self.costmodel.profitable() @@ -783,8 +782,11 @@ return self.box_to_vbox.get(arg, (-1, None)) def setvector_of_box(self, var, off, vector): + if var.returns_void(): + assert 0, "not allowed to rename void resop" assert off < vector.count assert not var.is_vector() + print "rename", var, off, "=>", vector self.box_to_vbox[var] = (off, vector) def remember_args_in_vector(self, pack, index, box): @@ -963,7 +965,10 @@ def __repr__(self): if len(self.operations) == 0: return "Pack(empty)" - return "Pack(%dx %s)" % (self.numops(), self.operations) + packs = self.operations[0].op.getopname() + '[' + ','.join(['%2d' % (o.opidx) for o in self.operations]) + ']' + if self.operations[0].op.getdescr(): + packs += 'descr=' + str(self.operations[0].op.getdescr()) + return "Pack(%dx %s)" % (self.numops(), packs) def is_accumulating(self): return False diff --git a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py --- a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py @@ -1,14 +1,16 @@ import py import pytest -from rpython.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, FakeMetaInterpStaticData, convert_old_style_to_targets) +from rpython.jit.metainterp.compile import invent_fail_descr_for_op from rpython.jit.metainterp.history import TargetToken, JitCellToken, TreeLoop from rpython.jit.metainterp.optimizeopt.dependency import (DependencyGraph, Dependency, IndexVar, MemoryRef, Node) from rpython.jit.metainterp.optimizeopt.vector import VectorLoop +from rpython.jit.metainterp.optimizeopt.test.test_util import ( + LLtypeMixin, BaseTest, FakeMetaInterpStaticData, convert_old_style_to_targets) from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.jit.backend.llgraph.runner import ArrayDescr +from rpython.jit.tool.oparser import OpParser from rpython.rtyper.lltypesystem import rffi from rpython.rtyper.lltypesystem import lltype from rpython.conftest import option @@ -42,6 +44,56 @@ graph.parsestr = ops return graph + def match_op(self, expected, actual, remap): + if expected.getopnum() != actual.getopnum(): + return False + expargs = expected.getarglist() + actargs = [remap.get(arg, None) for arg in actual.getarglist()] + if not all([e == a or a is None for e,a in zip(expargs,actargs)]): + return False + if expected.getfailargs(): + expargs = expected.getfailargs() + actargs = [remap.get(arg, None) for arg in actual.getfailargs()] + if not all([e == a or a is None for e,a in zip(expargs,actargs)]): + return False + return True + + def ensure_operations(self, opstrlist, trace, inthatorder=True): + oparse = OpParser('', self.cpu, self.namespace, 'lltype', None, + None, True, None) + oplist = [] + for op_str in opstrlist: + op = oparse.parse_next_op(op_str) + if not op.returns_void(): + var = op_str.split('=')[0].strip() + if '[' in var: + var = var[:var.find('[')] + elem = op_str[:len(var)] + oparse._cache['lltype', elem] = op + oplist.append(op) + oplist_i = 0 + match = False + remap = {} + last_match = 0 + for i, op in enumerate(trace.operations): + if oplist_i >= len(oplist): + break + curtomatch = oplist[oplist_i] + if self.match_op(curtomatch, op, remap): + if not op.returns_void(): + remap[curtomatch] = op + oplist_i += 1 + last_match = i + + msg = "could not find all ops in the trace sequence\n\n" + if oplist_i != len(oplist): + l = [str(o) for o in oplist[oplist_i:]] + msg += "sequence\n " + '\n '.join(l) + msg += "\n\ndoes not match\n " + l = [str(o) for o in trace.operations[last_match+1:]] + msg += '\n '.join(l) + assert oplist_i == len(oplist), msg + def parse_loop(self, ops, add_label=True): loop = self.parse(ops, postprocess=self.postprocess) loop.operations = filter(lambda op: op.getopnum() != rop.DEBUG_MERGE_POINT, loop.operations) @@ -54,6 +106,10 @@ jump = loop.operations[-1] loop = VectorLoop(label, loop.operations[0:-1], jump) loop.jump.setdescr(token) + for op in loop.operations: + if op.is_guard() and not op.getdescr(): + descr = invent_fail_descr_for_op(op.getopnum(), None) + op.setdescr(descr) return loop def parse_trace(self, source, inc_label_jump=True, pargs=2, iargs=10, diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py @@ -23,6 +23,7 @@ from rpython.jit.metainterp.optimizeopt.version import LoopVersionInfo from rpython.jit.backend.llsupport.descr import ArrayDescr from rpython.jit.metainterp.optimizeopt.dependency import Node, DependencyGraph +from rpython.jit.tool.oparser import OpParser class FakeJitDriverStaticData(object): vec=True @@ -284,6 +285,24 @@ assert trace.operations[0] is add assert trace.operations[1] is guard + def test_vectorize_guard(self): + trace = self.parse_loop(""" + [p0,p1,i0] + i10 = getarrayitem_raw_i(p0,i0,descr=int32arraydescr) + i20 = int_is_true(i10) + guard_true(i20) [i20] + i1 = int_add(i0, 1) + jump(p0,p1,i1) + """) + self.vectorize(trace) + self.debug_print_operations(trace) + self.ensure_operations([ + 'v10[4xi32] = vec_getarrayitem_raw_i(p0,i0,descr=int32arraydescr)', + 'v11[4xi32] = vec_int_is_true(v10[4xi32])', + 'i100 = vec_unpack_i(v11[4xi32], 0, 1)', + 'guard_true(v11[4xi32]) [i100]', + ], trace) + def test_vectorize_skip(self): ops = """ [p0,i0] diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py --- a/rpython/jit/metainterp/optimizeopt/vector.py +++ b/rpython/jit/metainterp/optimizeopt/vector.py @@ -84,6 +84,8 @@ prefix_label = self.prefix_label.copy() renamer.rename(prefix_label) oplist = [] + op1 = self.operations[2] + assert op1.getarg(0) is op1.getfailargs()[0] for op in self.operations: newop = op.copy() renamer.rename(newop) @@ -606,7 +608,7 @@ See limintations (vectorization.rst). """ if l_op.getopnum() == r_op.getopnum(): - return True + return l_op.bytesize == r_op.bytesize return False class PackSet(object): @@ -625,7 +627,10 @@ """ Check to ensure that two nodes might be packed into a Pair. """ if isomorphic(lnode.getoperation(), rnode.getoperation()): - if lnode.independent(rnode): + # even if a guard depends on the previous it is able to + lop = lnode.getoperation() + independent = lnode.independent(rnode) + if independent: if forward and origin_pack.is_accumulating(): # in this case the splitted accumulator must # be combined. This case is not supported @@ -734,6 +739,9 @@ return None operator = AccumPack.SUPPORTED[opnum] return AccumPack([lnode, rnode], operator, index) + is_guard = left.is_guard() and left.getopnum() in (rop.GUARD_TRUE, rop.GUARD_FALSE) + if is_guard: + return AccumPack([lnode, rnode], 'g', 0) return None @@ -748,6 +756,9 @@ for pack in self.packs: if not pack.is_accumulating(): continue + if pack.leftmost().is_guard(): + # guard breaks dependencies, thus it is an accumulation pack + continue for i,node in enumerate(pack.operations): op = node.getoperation() state.accumulation[op] = pack diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -266,10 +266,6 @@ # -------------- def copy(self): - if self.is_guard(): - op = self.copy_and_change(self.opnum) - op.setfailargs(self.getfailargs()[:]) - return op return self.copy_and_change(self.opnum) def copy_and_change(self, opnum, args=None, descr=None): @@ -284,6 +280,7 @@ if descr is DONT_CHANGE: descr = None newop = ResOperation(opnum, args, descr) + newop.datatype = self.datatype newop.count = self.count newop.bytesize = self.bytesize newop.signed = self.signed @@ -1600,8 +1597,10 @@ def inputarg_from_tp(tp): if tp == 'i': return InputArgInt() - elif tp == 'r': + elif tp == 'r' or tp == 'p': return InputArgRef() + elif tp == 'v': + return InputArgVector() else: assert tp == 'f' return InputArgFloat() diff --git a/rpython/jit/tool/oparser.py b/rpython/jit/tool/oparser.py --- a/rpython/jit/tool/oparser.py +++ b/rpython/jit/tool/oparser.py @@ -9,7 +9,7 @@ from rpython.jit.metainterp.resoperation import rop, ResOperation, \ InputArgInt, InputArgRef, InputArgFloat, InputArgVector, \ - ResOpWithDescr, N_aryOp, UnaryOp, PlainResOp, optypes + ResOpWithDescr, N_aryOp, UnaryOp, PlainResOp, optypes, OpHelpers class ParseError(Exception): pass @@ -132,42 +132,25 @@ else: raise - def box_for_var(self, elem): - xxx + def inputarg_for_var(self, elem): try: return self._cache[self.type_system, elem] except KeyError: pass - if elem.startswith('i'): - # integer - box = self.model.BoxInt() - _box_counter_more_than(self.model, elem[1:]) - elif elem.startswith('f'): - box = self.model.BoxFloat() - _box_counter_more_than(self.model, elem[1:]) - elif elem.startswith('p'): - # pointer - ts = getattr(self.cpu, 'ts', self.model.llhelper) - box = ts.BoxRef() - _box_counter_more_than(self.model, elem[1:]) - elif elem.startswith('v'): - pattern = re.compile('.*\[(u?)(i|f)(\d+)(#|\|)(\d+)\]') - match = pattern.match(elem) - if match: - item_type = match.group(2)[0] - item_size = int(match.group(3)) // 8 - item_count = int(match.group(5)) - item_signed = not (match.group(1) == 'u') - if item_type == 'f': - item_signed = False - box = self.model.BoxVector(item_type, item_count, item_size, item_signed) - lbracket = elem.find('[') - number = elem[1:lbracket] - else: - box = self.model.BoxVector('f',-1,-1,False) - number = elem[1:] - _box_counter_more_than(self.model, number) + if elem[0] in 'ifrp': + box = OpHelpers.inputarg_from_tp(elem[0]) + number = elem[1:] + if elem.startswith('v'): + pattern = re.compile('.*\[(\d+)x(i|f)(\d+)\]') + match = pattern.match(elem) + if match: + box.datatype = match.group(2)[0] + box.bytesize = int(match.group(3)) // 8 + box.count = int(match.group(1)) + box.signed == item_type == 'i' + number = elem[1:elem.find('[')] else: + number = elem[1:] for prefix, boxclass in self.boxkinds.iteritems(): if elem.startswith(prefix): box = boxclass() @@ -204,7 +187,7 @@ return v def newvar(self, elem): - box = self.box_for_var(elem) + box = self.inputarg_for_var(elem) self.vars[elem] = box return box @@ -366,15 +349,16 @@ Internally you will see the same variable names as in the trace as string. """ - regex = re.compile("[prifv](\d+)") - match = regex.match(name) - if match: - counter = int(match.group(1)) - countdict = val._repr_memo - assert val not in countdict._d - countdict._d[val] = counter - if countdict.counter < counter: - countdict.counter = counter + pass + #regex = re.compile("[prifv](\d+)") + #match = regex.match(name) + #if match: + # counter = int(match.group(1)) + # countdict = val._repr_memo + # assert val not in countdict._d + # countdict._d[val] = counter + # if countdict.counter < counter: + # countdict.counter = counter def update_vector(self, resop, var): pattern = re.compile('.*\[(\d+)x(u?)(i|f)(\d+)\]') @@ -501,4 +485,4 @@ def _box_counter_more_than(model, s): if s.isdigit(): - model.Box._counter = max(model.Box._counter, int(s)+1) + model._counter = max(model._counter, int(s)+1) From noreply at buildbot.pypy.org Sat Oct 10 20:09:58 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 10 Oct 2015 20:09:58 +0200 (CEST) Subject: [pypy-commit] pypy llconst: fix test Message-ID: <20151010180958.1211B1C0726@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: llconst Changeset: r80103:08389d8b7079 Date: 2015-10-10 19:10 +0100 http://bitbucket.org/pypy/pypy/changeset/08389d8b7079/ Log: fix test diff --git a/rpython/translator/unsimplify.py b/rpython/translator/unsimplify.py --- a/rpython/translator/unsimplify.py +++ b/rpython/translator/unsimplify.py @@ -1,5 +1,6 @@ from rpython.flowspace.model import (Variable, Constant, Block, Link, SpaceOperation, c_last_exception, checkgraph) +from rpython.rtyper.rmodel import ll_const def varoftype(concretetype, name=None): @@ -101,7 +102,7 @@ " containing an int or str or instance is actually" " known to be constant, e.g. always 42." % ( v, v.concretetype)) - c = Constant(None, lltype.Void) + c = ll_const(None) w = varmap[v] newop = SpaceOperation('same_as', [c], w) i = 0 From noreply at buildbot.pypy.org Sat Oct 10 23:04:37 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 10 Oct 2015 23:04:37 +0200 (CEST) Subject: [pypy-commit] pypy default: A failing test based on issue #2132. Message-ID: <20151010210437.0D1231C0EFC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80104:004a5d649ed9 Date: 2015-10-10 23:04 +0200 http://bitbucket.org/pypy/pypy/changeset/004a5d649ed9/ Log: A failing test based on issue #2132. diff --git a/rpython/jit/metainterp/test/test_exception.py b/rpython/jit/metainterp/test/test_exception.py --- a/rpython/jit/metainterp/test/test_exception.py +++ b/rpython/jit/metainterp/test/test_exception.py @@ -2,6 +2,7 @@ from rpython.jit.metainterp.test.support import LLJitMixin from rpython.rlib.jit import JitDriver, dont_look_inside from rpython.rlib.rarithmetic import ovfcheck, LONG_BIT, intmask +from rpython.rlib.objectmodel import keepalive_until_here from rpython.jit.codewriter.policy import StopAtXPolicy from rpython.rtyper.lltypesystem import lltype, rffi @@ -656,6 +657,38 @@ res = self.interp_operations(f, [5], backendopt=True) assert res == 5 + def test_guard_no_exception_incorrectly_removed_from_bridge(self): + myjitdriver = JitDriver(greens=[], reds=['i']) + @dont_look_inside + def do(n): + if n > 7: + raise ValueError + if n > 1: + return n + raise IndexError + def f(i): + while i > 0: + myjitdriver.jit_merge_point(i=i) + f = str(i) + str(i) + # ^^^ this sticks a CALL_R in the resume data, inserted + # at the start of a bridge *before* the guard_no_exception. + # Some optimization step then thinks, correctly, that the + # CALL_R cannot raise and kills the guard_no_exception... + # As a result, the final IndexError we get for i == 1 is + # not caught here and escapes. It causes issue #2132. + try: + do(i) + except ValueError: + pass + except IndexError: + pass + i -= 1 + keepalive_until_here(f) + return 10101 + assert f(14) == 10101 + res = self.meta_interp(f, [14]) + assert res == 10101 + class MyError(Exception): def __init__(self, n): From noreply at buildbot.pypy.org Sun Oct 11 02:16:11 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 11 Oct 2015 02:16:11 +0200 (CEST) Subject: [pypy-commit] pypy llconst: fixes Message-ID: <20151011001611.D12411C0EFC@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: llconst Changeset: r80105:24ea9240be59 Date: 2015-10-11 01:16 +0100 http://bitbucket.org/pypy/pypy/changeset/24ea9240be59/ Log: fixes diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py --- a/rpython/memory/test/test_transformed_gc.py +++ b/rpython/memory/test/test_transformed_gc.py @@ -741,7 +741,7 @@ def fix_graph_of_g(translator): from rpython.translator.translator import graphof - from rpython.flowspace.model import LLConstant + from rpython.rtyper.rmodel import LLConstant from rpython.rtyper.lltypesystem import rffi layoutbuilder = cls.ensure_layoutbuilder(translator) @@ -779,7 +779,7 @@ return 0 def fix_graph_of_g(translator): from rpython.translator.translator import graphof - from rpython.flowspace.model import LLConstant + from rpython.rtyper.rmodel import LLConstant from rpython.rtyper.lltypesystem import rffi layoutbuilder = cls.ensure_layoutbuilder(translator) type_id = layoutbuilder.get_type_id(P) diff --git a/rpython/rlib/jit_hooks.py b/rpython/rlib/jit_hooks.py --- a/rpython/rlib/jit_hooks.py +++ b/rpython/rlib/jit_hooks.py @@ -5,7 +5,7 @@ cast_base_ptr_to_instance, llstr) from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.lltypesystem import llmemory, lltype -from rpython.flowspace.model import Constant +from rpython.rtyper.rmodel import LLConstant from rpython.rtyper import rclass @@ -152,8 +152,8 @@ return resulttype def specialize_call(self, hop): - c_jitdriver = Constant(hop.args_s[0].const, concretetype=lltype.Void) - c_name = Constant(name, concretetype=lltype.Void) + c_jitdriver = LLConstant(hop.args_s[0].const, lltype.Void) + c_name = LLConstant(name, lltype.Void) hop.exception_cannot_occur() args_v = [hop.inputarg(arg, arg=i + 1) for i, arg in enumerate(hop.args_r[1:])] diff --git a/rpython/translator/c/test/test_database.py b/rpython/translator/c/test/test_database.py --- a/rpython/translator/c/test/test_database.py +++ b/rpython/translator/c/test/test_database.py @@ -1,9 +1,9 @@ -import sys from rpython.rtyper.lltypesystem.lltype import * from rpython.translator.translator import TranslationContext from rpython.translator.c.database import LowLevelDatabase -from rpython.flowspace.model import Constant, Variable, SpaceOperation -from rpython.flowspace.model import Block, Link, FunctionGraph +from rpython.flowspace.model import ( + Variable, SpaceOperation, Block, Link, FunctionGraph) +from rpython.rtyper.rmodel import ll_const from rpython.rtyper.lltypesystem.lltype import getfunctionptr from rpython.rtyper.lltypesystem.rffi import VOIDP, INT_real, INT, CArrayPtr @@ -127,8 +127,7 @@ x.concretetype = Signed result = Variable("result") result.concretetype = Signed - one = Constant(1) - one.concretetype = Signed + one = ll_const(1) op = SpaceOperation("int_add", [x, one], result) block = Block([x]) graph = FunctionGraph("f", block) From noreply at buildbot.pypy.org Sun Oct 11 10:26:28 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Oct 2015 10:26:28 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix for 004a5d649ed9 (issue #2132). See comment in pyjitpl.py. Message-ID: <20151011082628.A3BF21C0FEC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80106:66ca1ac6c1dc Date: 2015-10-11 09:31 +0200 http://bitbucket.org/pypy/pypy/changeset/66ca1ac6c1dc/ Log: Fix for 004a5d649ed9 (issue #2132). See comment in pyjitpl.py. diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -40,10 +40,13 @@ self.inputargs = map(mapping, inputargs) self.operations = [] for op in operations: - if op.getopnum() == rop.GUARD_VALUE: + opnum = op.getopnum() + if opnum == rop.GUARD_VALUE: # we don't care about the value 13 here, because we gonna # fish it from the extra slot on frame anyway op.getdescr().make_a_counter_per_value(op, 13) + elif opnum == rop.BRIDGE_EXCEPTION: + assert len(self.operations) == 0 # must be first if op.getdescr() is not None: if op.is_guard() or op.getopnum() == rop.FINISH: newdescr = op.getdescr() @@ -890,7 +893,10 @@ # ----------------------------------------------------- - def fail_guard(self, descr, saved_data=None, extra_value=None): + def fail_guard(self, descr, saved_data=None, extra_value=None, + propagate_exception=False): + if not propagate_exception: + assert self.last_exception is None values = [] for box in self.current_op.getfailargs(): if box is not None: @@ -899,6 +905,9 @@ value = None values.append(value) if hasattr(descr, '_llgraph_bridge'): + if propagate_exception: + assert (descr._llgraph_bridge.operations[0].opnum == + rop.BRIDGE_EXCEPTION) target = (descr._llgraph_bridge, -1) values = [value for value in values if value is not None] raise Jump(target, values) @@ -977,7 +986,7 @@ def execute_guard_no_exception(self, descr): if self.last_exception is not None: - self.fail_guard(descr) + self.fail_guard(descr, propagate_exception=True) def execute_guard_exception(self, descr, excklass): lle = self.last_exception @@ -989,7 +998,7 @@ llmemory.cast_int_to_adr(excklass), rclass.CLASSTYPE) if gotklass != excklass: - self.fail_guard(descr) + self.fail_guard(descr, propagate_exception=True) # res = lle.args[1] self.last_exception = None @@ -998,7 +1007,7 @@ def execute_guard_not_forced(self, descr): if self.forced_deadframe is not None: saved_data = self.forced_deadframe._saved_data - self.fail_guard(descr, saved_data) + self.fail_guard(descr, saved_data, propagate_exception=True) self.force_guard_op = self.current_op execute_guard_not_forced_2 = execute_guard_not_forced @@ -1220,6 +1229,9 @@ def execute_keepalive(self, descr, x): pass + def execute_bridge_exception(self, descr): + pass + def _getdescr(op): d = op.getdescr() diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -1,4 +1,5 @@ from rpython.rlib import rgc +from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import ovfcheck from rpython.rtyper.lltypesystem import llmemory, lltype from rpython.jit.metainterp import history @@ -14,6 +15,9 @@ FLAG_STR = 1 FLAG_UNICODE = 2 +class BridgeExceptionNotFirst(Exception): + pass + class GcRewriterAssembler(object): """ This class performs the following rewrites on the list of operations: @@ -164,6 +168,9 @@ continue if op.getopnum() == rop.JUMP or op.getopnum() == rop.FINISH: self.emit_pending_zeros() + if op.getopnum() == rop.BRIDGE_EXCEPTION: + self.remove_bridge_exception(operations, i) + continue # self.emit_op(op) return self._newops @@ -678,3 +685,14 @@ # assume that "self.gc_ll_descr.minimal_size_in_nursery" is 2 WORDs size = max(size, 2 * WORD) return (size + WORD-1) & ~(WORD-1) # round up + + def remove_bridge_exception(self, operations, i): + """Check that the 'bridge_exception' operation occurs at the + start of the bridge.""" + if i == 0: + return # first operation, ok + if i == 1 and operations[0].getopnum() == rop.INCREMENT_DEBUG_COUNTER: + return # 2nd operation after INCREMENT_DEBUG_COUNTER, ok + # not ok! + assert we_are_translated() + raise BridgeExceptionNotFirst diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -386,6 +386,7 @@ rop.CALL_MALLOC_NURSERY_VARSIZE_FRAME, rop.NURSERY_PTR_INCREMENT, rop.LABEL, + rop.BRIDGE_EXCEPTION, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2480,6 +2480,21 @@ exception = self.cpu.grab_exc_value(deadframe) if (isinstance(resumedescr, compile.ResumeGuardExcDescr) or isinstance(resumedescr, compile.ResumeGuardCopiedExcDescr)): + # Add a GUARD_EXCEPTION or GUARD_NO_EXCEPTION at the start + # of the bridge---except it is not really the start, because + # the history aleady contains operations from resume.py. + # The optimizer should remove these operations. However, + # 'test_guard_no_exception_incorrectly_removed_from_bridge' + # shows a corner case in which just putting GuARD_NO_EXCEPTION + # here is a bad idea: the optimizer might remove it too. + # So we put a pair BRIDGE_EXCEPTION / GUARD_(NO)_EXCEPTION. + # The BRIDGE_EXCEPTION is meant to re-raise the exception + # caught before the bridge, but in reality it must end up + # as the first operation and thus is a no-op for the backends + # (it is removed in rewrite.py). Its real purpose is only to + # pass through the optimizer unmodified, so that the following + # GUARD_NO_EXCEPTION is not killed. + self.history.record(rop.BRIDGE_EXCEPTION, [], None) if exception: self.execute_ll_raised(lltype.cast_opaque_ptr(rclass.OBJECTPTR, exception)) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -827,6 +827,7 @@ 'QUASIIMMUT_FIELD/1d/n', # [objptr], descr=SlowMutateDescr 'RECORD_EXACT_CLASS/2/n', # [objptr, clsptr] 'KEEPALIVE/1/n', + 'BRIDGE_EXCEPTION/0/n', # pyjitpl: prepare_resume_from_failure() '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', From noreply at buildbot.pypy.org Sun Oct 11 10:26:30 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Oct 2015 10:26:30 +0200 (CEST) Subject: [pypy-commit] pypy default: backout 2a46aa80547f, it seems to create issues (buildbot, pypy-c-jit) Message-ID: <20151011082630.ED41B1C0FEC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80107:3513381b7af4 Date: 2015-10-11 08:46 +0100 http://bitbucket.org/pypy/pypy/changeset/3513381b7af4/ Log: backout 2a46aa80547f, it seems to create issues (buildbot, pypy-c-jit) diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -507,7 +507,16 @@ optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I - optimize_GETFIELD_GC_PURE_I = optimize_GETFIELD_GC_I + def optimize_GETFIELD_GC_PURE_I(self, op): + structinfo = self.ensure_ptr_info_arg0(op) + cf = self.field_cache(op.getdescr()) + field = cf.getfield_from_cache(self, structinfo, op.getdescr()) + if field is not None: + self.make_equal_to(op, field) + return + # default case: produce the operation + self.make_nonnull(op.getarg(0)) + self.emit_operation(op) optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_PURE_I optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_PURE_I From noreply at buildbot.pypy.org Sun Oct 11 19:19:52 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 11 Oct 2015 19:19:52 +0200 (CEST) Subject: [pypy-commit] pypy default: cleanup: get_call_parameters()[0] is always a graph Message-ID: <20151011171952.8C5F01C0FEC@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r80108:6a6f81c1d420 Date: 2015-10-11 18:20 +0100 http://bitbucket.org/pypy/pypy/changeset/6a6f81c1d420/ Log: cleanup: get_call_parameters()[0] is always a graph diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -79,14 +79,11 @@ annmodel.TLS.check_str_without_nul = ( self.translator.config.translation.check_str_without_nul) - flowgraph, inputcells = self.get_call_parameters(function, args_s, policy) - if not isinstance(flowgraph, FunctionGraph): - assert isinstance(flowgraph, annmodel.SomeObject) - return flowgraph + flowgraph, inputs_s = self.get_call_parameters(function, args_s, policy) if main_entry_point: self.translator.entry_point_graph = flowgraph - return self.build_graph_types(flowgraph, inputcells, complete_now=complete_now) + return self.build_graph_types(flowgraph, inputs_s, complete_now=complete_now) def get_call_parameters(self, function, args_s, policy): desc = self.bookkeeper.getdesc(function) From noreply at buildbot.pypy.org Sun Oct 11 19:20:41 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 11 Oct 2015 19:20:41 +0200 (CEST) Subject: [pypy-commit] pypy llconst: fix, again Message-ID: <20151011172041.645861C0FEC@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: llconst Changeset: r80109:49daae873d69 Date: 2015-10-11 05:41 +0100 http://bitbucket.org/pypy/pypy/changeset/49daae873d69/ Log: fix, again diff --git a/rpython/memory/test/test_gctypelayout.py b/rpython/memory/test/test_gctypelayout.py --- a/rpython/memory/test/test_gctypelayout.py +++ b/rpython/memory/test/test_gctypelayout.py @@ -6,7 +6,7 @@ from rpython.rtyper import rclass from rpython.rtyper.rclass import IR_IMMUTABLE, IR_QUASIIMMUTABLE from rpython.rtyper.test.test_llinterp import get_interpreter -from rpython.flowspace.model import LLConstant +from rpython.rtyper.rmodel import LLConstant class FakeGC: object_minimal_size = 0 From noreply at buildbot.pypy.org Sun Oct 11 19:29:31 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Oct 2015 19:29:31 +0200 (CEST) Subject: [pypy-commit] pypy cffi-handle-lifetime: A branch to fix the lifetime of ffi.new_handle(x): currently, it becomes Message-ID: <20151011172931.1A5871C0FEC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-handle-lifetime Changeset: r80110:0af42d8b4142 Date: 2015-10-11 18:58 +0200 http://bitbucket.org/pypy/pypy/changeset/0af42d8b4142/ Log: A branch to fix the lifetime of ffi.new_handle(x): currently, it becomes invalid as soon as 'x' is first found as unreachable, before x.__del__() is called. There is no way to use x.__del__() at app- level to reset some fields, because there is a risk that we call ffi.from_handle() after it became dead but before x.__del__() is invoked. From noreply at buildbot.pypy.org Sun Oct 11 19:29:33 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Oct 2015 19:29:33 +0200 (CEST) Subject: [pypy-commit] pypy cffi-handle-lifetime: add the interface 'instantiate(Cls, nonmovable=True)' Message-ID: <20151011172933.4EC781C0FEC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-handle-lifetime Changeset: r80111:57dfab5e28fa Date: 2015-10-11 19:04 +0200 http://bitbucket.org/pypy/pypy/changeset/57dfab5e28fa/ Log: add the interface 'instantiate(Cls, nonmovable=True)' diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -290,7 +290,7 @@ return SomeInteger(knowntype=rpython.rlib.rarithmetic.r_longlong) @analyzer_for(rpython.rlib.objectmodel.instantiate) -def robjmodel_instantiate(s_clspbc): +def robjmodel_instantiate(s_clspbc, s_nonmovable=None): assert isinstance(s_clspbc, SomePBC) clsdef = None more_than_one = len(s_clspbc.descriptions) > 1 diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -276,7 +276,7 @@ # ____________________________________________________________ -def instantiate(cls): +def instantiate(cls, nonmovable=False): "Create an empty instance of 'cls'." if isinstance(cls, type): return cls.__new__(cls) diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -693,18 +693,24 @@ return hop.args_r[0].rtype_isinstance(hop) @typer_for(objectmodel.instantiate) -def rtype_instantiate(hop): +def rtype_instantiate(hop, i_nonmovable=None): hop.exception_cannot_occur() s_class = hop.args_s[0] assert isinstance(s_class, annmodel.SomePBC) + v_nonmovable, = parse_kwds(hop, (i_nonmovable, None)) + nonmovable = (i_nonmovable is not None and v_nonmovable.value) if len(s_class.descriptions) != 1: # instantiate() on a variable class + if nonmovable: + raise TyperError("instantiate(x, nonmovable=True) cannot be used " + "if x is not a constant class") vtypeptr, = hop.inputargs(rclass.get_type_repr(hop.rtyper)) r_class = hop.args_r[0] return r_class._instantiate_runtime_class(hop, vtypeptr, hop.r_result.lowleveltype) classdef = s_class.any_description().getuniqueclassdef() - return rclass.rtype_new_instance(hop.rtyper, classdef, hop.llops) + return rclass.rtype_new_instance(hop.rtyper, classdef, hop.llops, + nonmovable=nonmovable) @typer_for(hasattr) diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -684,10 +684,12 @@ rbase = rbase.rbase return False - def new_instance(self, llops, classcallhop=None): + def new_instance(self, llops, classcallhop=None, nonmovable=False): """Build a new instance, without calling __init__.""" flavor = self.gcflavor flags = {'flavor': flavor} + if nonmovable: + flags['nonmovable'] = True ctype = inputconst(Void, self.object_type) cflags = inputconst(Void, flags) vlist = [ctype, cflags] @@ -1031,9 +1033,10 @@ # ____________________________________________________________ -def rtype_new_instance(rtyper, classdef, llops, classcallhop=None): +def rtype_new_instance(rtyper, classdef, llops, classcallhop=None, + nonmovable=False): rinstance = getinstancerepr(rtyper, classdef) - return rinstance.new_instance(llops, classcallhop) + return rinstance.new_instance(llops, classcallhop, nonmovable=nonmovable) def ll_inst_hash(ins): if not ins: diff --git a/rpython/rtyper/test/test_rbuiltin.py b/rpython/rtyper/test/test_rbuiltin.py --- a/rpython/rtyper/test/test_rbuiltin.py +++ b/rpython/rtyper/test/test_rbuiltin.py @@ -432,6 +432,14 @@ res = self.interpret(f, [2]) assert self.class_name(res) == 'B' + def test_instantiate_nonmovable(self): + class A: + pass + def f(): + return instantiate(A, nonmovable=True) # no effect before GC + res = self.interpret(f, []) + assert self.class_name(res) == 'A' + def test_os_path_join(self): def fn(a, b): return os.path.join(a, b) From noreply at buildbot.pypy.org Sun Oct 11 19:29:35 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Oct 2015 19:29:35 +0200 (CEST) Subject: [pypy-commit] pypy cffi-handle-lifetime: Implement nonmovable=True Message-ID: <20151011172935.807FE1C0FEC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-handle-lifetime Changeset: r80112:235bc9c680e7 Date: 2015-10-11 19:29 +0200 http://bitbucket.org/pypy/pypy/changeset/235bc9c680e7/ Log: Implement nonmovable=True diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -915,10 +915,13 @@ return [op0, op1] def rewrite_op_malloc(self, op): - if op.args[1].value['flavor'] == 'raw': + d = op.args[1].value + if d.get('nonmovable', False): + raise UnsupportedMallocFlags(d) + if d.value['flavor'] == 'raw': return self._rewrite_raw_malloc(op, 'raw_malloc_fixedsize', []) # - if op.args[1].value.get('zero', False): + if d.value.get('zero', False): zero = True else: zero = False diff --git a/rpython/memory/gc/generation.py b/rpython/memory/gc/generation.py --- a/rpython/memory/gc/generation.py +++ b/rpython/memory/gc/generation.py @@ -170,7 +170,10 @@ def malloc_fixedsize_clear(self, typeid, size, has_finalizer=False, is_finalizer_light=False, - contains_weakptr=False): + contains_weakptr=False, + nonmovable=False): + if nonmovable: + raise MemoryError if (has_finalizer or (raw_malloc_usage(size) > self.lb_young_fixedsize and raw_malloc_usage(size) > self.largest_young_fixedsize)): diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -587,7 +587,8 @@ def malloc_fixedsize(self, typeid, size, needs_finalizer=False, is_finalizer_light=False, - contains_weakptr=False): + contains_weakptr=False, + nonmovable=False): size_gc_header = self.gcheaderbuilder.size_gc_header totalsize = size_gc_header + size rawtotalsize = raw_malloc_usage(totalsize) @@ -603,7 +604,7 @@ # If totalsize is greater than nonlarge_max (which should never be # the case in practice), ask for a rawmalloc. The following check # should be constant-folded. - elif rawtotalsize > self.nonlarge_max: + elif rawtotalsize > self.nonlarge_max or nonmovable: ll_assert(not contains_weakptr, "'contains_weakptr' specified for a large object") obj = self.external_malloc(typeid, 0) diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -509,7 +509,8 @@ def malloc_fixedsize_clear(self, typeid, size, needs_finalizer=False, is_finalizer_light=False, - contains_weakptr=False): + contains_weakptr=False, + nonmovable=False): size_gc_header = self.gcheaderbuilder.size_gc_header totalsize = size_gc_header + size rawtotalsize = raw_malloc_usage(totalsize) @@ -525,7 +526,7 @@ # If totalsize is greater than nonlarge_max (which should never be # the case in practice), ask for a rawmalloc. The following check # should be constant-folded. - elif rawtotalsize > self.nonlarge_max: + elif rawtotalsize > self.nonlarge_max or nonmovable: ll_assert(not contains_weakptr, "'contains_weakptr' specified for a large object") obj = self.external_malloc(typeid, 0) diff --git a/rpython/memory/gc/semispace.py b/rpython/memory/gc/semispace.py --- a/rpython/memory/gc/semispace.py +++ b/rpython/memory/gc/semispace.py @@ -98,7 +98,10 @@ def malloc_fixedsize_clear(self, typeid16, size, has_finalizer=False, is_finalizer_light=False, - contains_weakptr=False): + contains_weakptr=False, + nonmovable=False): + if nonmovable: + raise MemoryError size_gc_header = self.gcheaderbuilder.size_gc_header totalsize = size_gc_header + size result = self.free diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -305,6 +305,7 @@ annmodel.SomeInteger(nonneg=True), annmodel.SomeBool(), annmodel.SomeBool(), + annmodel.SomeBool(), annmodel.SomeBool()], s_gcref, inline = False) self.malloc_varsize_ptr = getfn( @@ -320,6 +321,7 @@ annmodel.SomeInteger(nonneg=True), annmodel.SomeBool(), annmodel.SomeBool(), + annmodel.SomeBool(), annmodel.SomeBool()], s_gcref, inline = False) self.malloc_varsize_ptr = getfn( @@ -363,7 +365,7 @@ raise NotImplementedError("GC needs write barrier, but does not provide writebarrier_before_copy functionality") # in some GCs we can inline the common case of - # malloc_fixedsize(typeid, size, False, False, False) + # malloc_fixedsize(typeid, size, False, False, False, False) if getattr(GCClass, 'inline_simple_malloc', False): # make a copy of this function so that it gets annotated # independently and the constants are folded inside @@ -382,7 +384,7 @@ malloc_fast, [s_gc, s_typeid16, annmodel.SomeInteger(nonneg=True), - s_False, s_False, s_False], s_gcref, + s_False, s_False, s_False, s_False], s_gcref, inline = True) else: self.malloc_fast_ptr = None @@ -759,15 +761,19 @@ if not op.opname.endswith('_varsize') and not flags.get('varsize'): zero = flags.get('zero', False) + c_nonmovable = rmodel.inputconst(lltype.Bool, + flags.get('nonmovable', False)) if (self.malloc_fast_ptr is not None and not c_has_finalizer.value and + not c_nonmovable.value and (self.malloc_fast_is_clearing or not zero)): malloc_ptr = self.malloc_fast_ptr else: malloc_ptr = self.malloc_fixedsize_ptr args = [self.c_const_gc, c_type_id, c_size, c_has_finalizer, c_has_light_finalizer, - rmodel.inputconst(lltype.Bool, False)] + rmodel.inputconst(lltype.Bool, False), + c_nonmovable] else: assert not c_has_finalizer.value info_varsize = self.layoutbuilder.get_info_varsize(type_id) @@ -908,11 +914,12 @@ [v_typeid, v_size, v_has_finalizer, v_has_light_finalizer, v_contains_weakptr] = op.args livevars = self.push_roots(hop) + c_nonmovable = rmodel.inputconst(lltype.Bool, False) hop.genop("direct_call", [self.malloc_fixedsize_ptr, self.c_const_gc, v_typeid, v_size, v_has_finalizer, v_has_light_finalizer, - v_contains_weakptr], + v_contains_weakptr, c_nonmovable], resultvar=op.result) self.pop_roots(hop, livevars) @@ -1018,8 +1025,9 @@ malloc_ptr = self.malloc_fixedsize_ptr c_false = rmodel.inputconst(lltype.Bool, False) c_has_weakptr = rmodel.inputconst(lltype.Bool, True) + c_nonmovable = rmodel.inputconst(lltype.Bool, False) args = [self.c_const_gc, c_type_id, c_size, - c_false, c_false, c_has_weakptr] + c_false, c_false, c_has_weakptr, c_nonmovable] # push and pop the current live variables *including* the argument # to the weakref_create operation, which must be kept alive and diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py --- a/rpython/memory/test/test_transformed_gc.py +++ b/rpython/memory/test/test_transformed_gc.py @@ -1247,6 +1247,21 @@ res = self.runner('nursery_hash_base') assert res([]) >= 195 + def define_instantiate_nonmovable(cls): + from rpython.rlib import objectmodel + from rpython.rtyper import annlowlevel + class A: + pass + def fn(): + a = objectmodel.instantiate(A, nonmovable=True) + return rgc.can_move(annlowlevel.cast_instance_to_base_ptr(a)) + return fn + + def test_instantiate_nonmovable(self): + res = self.runner('instantiate_nonmovable') + assert res([]) == 0 + + class TestIncrementalMiniMarkGC(TestMiniMarkGC): gcname = "incminimark" From noreply at buildbot.pypy.org Sun Oct 11 19:41:50 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Oct 2015 19:41:50 +0200 (CEST) Subject: [pypy-commit] pypy cffi-handle-lifetime: Adapt _cffi_backend.handle Message-ID: <20151011174150.8DE7A1C0726@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-handle-lifetime Changeset: r80113:252c59f68289 Date: 2015-10-11 19:42 +0200 http://bitbucket.org/pypy/pypy/changeset/252c59f68289/ Log: Adapt _cffi_backend.handle diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -1,8 +1,10 @@ +import py from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec from pypy.module._cffi_backend import ctypeobj, ctypeptr, cdataobj -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib import rweaklist +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rlib import rweaklist, objectmodel, jit +from rpython.rtyper import annlowlevel class CffiHandles(rweaklist.RWeakListMixin): @@ -14,11 +16,15 @@ # ____________________________________________________________ + at jit.dont_look_inside def _newp_handle(space, w_ctype, w_x): - index = get_handles(space).reserve_next_handle_index() - _cdata = rffi.cast(rffi.CCHARP, index + 1) - new_cdataobj = cdataobj.W_CDataHandle(space, _cdata, w_ctype, w_x) - get_handles(space).store_handle(index, new_cdataobj) + if not objectmodel.we_are_translated(): + py.test.skip("can't test handles untranslated for now") + new_cdataobj = objectmodel.instantiate(cdataobj.W_CDataHandle, + nonmovable=True) + gcref = annlowlevel.cast_instance_to_gcref(new_cdataobj) + _cdata = rffi.cast(rffi.CCHARP, gcref) + cdataobj.W_CDataHandle.__init__(new_cdataobj, space, _cdata, w_ctype, w_x) return new_cdataobj @unwrap_spec(w_ctype=ctypeobj.W_CType) @@ -29,6 +35,10 @@ "needs 'void *', got '%s'", w_ctype.name) return _newp_handle(space, w_ctype, w_x) + at jit.dont_look_inside +def reveal_gcref(ptr): + return rffi.cast(llmemory.GCREF, ptr) + @unwrap_spec(w_cdata=cdataobj.W_CData) def from_handle(space, w_cdata): ctype = w_cdata.ctype @@ -38,14 +48,10 @@ "expected a 'cdata' object with a 'void *' out of " "new_handle(), got '%s'", ctype.name) with w_cdata as ptr: - index = rffi.cast(lltype.Signed, ptr) - original_cdataobj = get_handles(space).fetch_handle(index - 1) + gcref = reveal_gcref(ptr) # - if isinstance(original_cdataobj, cdataobj.W_CDataHandle): - return original_cdataobj.w_keepalive - else: - if index == 0: - msg = "cannot use from_handle() on NULL pointer" - else: - msg = "'void *' value does not correspond to any object" - raise OperationError(space.w_RuntimeError, space.wrap(msg)) + if not gcref: + raise oefmt(space.w_RuntimeError, + "cannot use from_handle() on NULL pointer") + cd = annlowlevel.cast_gcref_to_instance(cdataobj.W_CDataHandle, gcref) + return cd.w_keepalive From noreply at buildbot.pypy.org Sun Oct 11 21:41:40 2015 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 11 Oct 2015 21:41:40 +0200 (CEST) Subject: [pypy-commit] buildbot default: run numpy tests in the documented way (ronan) Message-ID: <20151011194140.D65891C0726@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r963:8e092f35a054 Date: 2015-10-11 22:42 +0300 http://bitbucket.org/pypy/buildbot/changeset/8e092f35a054/ Log: run numpy tests in the documented way (ronan) diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -1024,13 +1024,10 @@ self.addStep(ShellCmd( description="test numpy", - command=[sep.join(['bin', 'nosetests'])] + ['site-packages/numpy', - # XXX enable '-with-doctest', - ], + command=[sep.join(['..', 'install', 'bin', 'pypy'])] + ['runtests.py'], #logfiles={'pytestLog': 'pytest-numpy.log'}, timeout=4000, - workdir='install', - #env={"PYTHONPATH": ['download']}, # shouldn't be needed, but what if it is set externally? + workdir='numpy_src', )) if platform != 'win32': self.addStep(ShellCmd( From noreply at buildbot.pypy.org Sun Oct 11 21:57:51 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Oct 2015 21:57:51 +0200 (CEST) Subject: [pypy-commit] pypy cffi-handle-lifetime: Test and fix Message-ID: <20151011195751.E2F881C0726@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-handle-lifetime Changeset: r80114:cd5334105e34 Date: 2015-10-11 21:58 +0200 http://bitbucket.org/pypy/pypy/changeset/cd5334105e34/ Log: Test and fix diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -598,7 +598,7 @@ if needs_finalizer and not is_finalizer_light: ll_assert(not contains_weakptr, "'needs_finalizer' and 'contains_weakptr' both specified") - obj = self.external_malloc(typeid, 0, can_make_young=False) + obj = self.external_malloc(typeid, 0, alloc_young=False) self.objects_with_finalizers.append(obj) # # If totalsize is greater than nonlarge_max (which should never be @@ -607,7 +607,7 @@ elif rawtotalsize > self.nonlarge_max or nonmovable: ll_assert(not contains_weakptr, "'contains_weakptr' specified for a large object") - obj = self.external_malloc(typeid, 0) + obj = self.external_malloc(typeid, 0, alloc_young=True) # else: # If totalsize is smaller than minimal_size_in_nursery, round it @@ -660,7 +660,7 @@ # If the total size of the object would be larger than # 'nonlarge_max', then allocate it externally. We also # go there if 'length' is actually negative. - obj = self.external_malloc(typeid, length) + obj = self.external_malloc(typeid, length, alloc_young=True) # else: # With the above checks we know now that totalsize cannot be more @@ -809,7 +809,7 @@ collect_and_reserve._dont_inline_ = True - def external_malloc(self, typeid, length, can_make_young=True): + def external_malloc(self, typeid, length, alloc_young): """Allocate a large object using the ArenaCollection or raw_malloc(), possibly as an object with card marking enabled, if it has gc pointers in its var-sized part. 'length' should be @@ -863,7 +863,9 @@ # we should get a MemoryError from major_collection_step(). # # Check if the object would fit in the ArenaCollection. - if raw_malloc_usage(totalsize) <= self.small_request_threshold: + # Also, an object allocated from ArenaCollection must be old. + if (raw_malloc_usage(totalsize) <= self.small_request_threshold + and not alloc_young): # # Yes. Round up 'totalsize' (it cannot overflow and it # must remain <= self.small_request_threshold.) @@ -875,10 +877,6 @@ # Allocate from the ArenaCollection. Don't clear it. result = self.ac.malloc(totalsize) # - # An object allocated from ArenaCollection is always old, even - # if 'can_make_young'. The interesting case of 'can_make_young' - # is for large objects, bigger than the 'large_objects' threshold, - # which are raw-malloced but still young. extra_flags = GCFLAG_TRACK_YOUNG_PTRS # else: @@ -898,11 +896,11 @@ extra_words = self.card_marking_words_for_length(length) cardheadersize = WORD * extra_words extra_flags = GCFLAG_HAS_CARDS | GCFLAG_TRACK_YOUNG_PTRS - # if 'can_make_young', then we also immediately set + # if 'alloc_young', then we also immediately set # GCFLAG_CARDS_SET, but without adding the object to # 'old_objects_with_cards_set'. In this way it should # never be added to that list as long as it is young. - if can_make_young: + if alloc_young: extra_flags |= GCFLAG_CARDS_SET # # Detect very rare cases of overflows @@ -940,7 +938,7 @@ # Record the newly allocated object and its full malloced size. # The object is young or old depending on the argument. self.rawmalloced_total_size += r_uint(allocsize) - if can_make_young: + if alloc_young: if not self.young_rawmalloced_objects: self.young_rawmalloced_objects = self.AddressDict() self.young_rawmalloced_objects.add(result + size_gc_header) diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -520,7 +520,7 @@ if needs_finalizer and not is_finalizer_light: ll_assert(not contains_weakptr, "'needs_finalizer' and 'contains_weakptr' both specified") - obj = self.external_malloc(typeid, 0, can_make_young=False) + obj = self.external_malloc(typeid, 0, alloc_young=False) self.objects_with_finalizers.append(obj) # # If totalsize is greater than nonlarge_max (which should never be @@ -529,7 +529,7 @@ elif rawtotalsize > self.nonlarge_max or nonmovable: ll_assert(not contains_weakptr, "'contains_weakptr' specified for a large object") - obj = self.external_malloc(typeid, 0) + obj = self.external_malloc(typeid, 0, alloc_young=True) # else: # If totalsize is smaller than minimal_size_in_nursery, round it @@ -582,7 +582,7 @@ # If the total size of the object would be larger than # 'nonlarge_max', then allocate it externally. We also # go there if 'length' is actually negative. - obj = self.external_malloc(typeid, length) + obj = self.external_malloc(typeid, length, alloc_young=True) # else: # With the above checks we know now that totalsize cannot be more @@ -672,7 +672,7 @@ collect_and_reserve._dont_inline_ = True - def external_malloc(self, typeid, length, can_make_young=True): + def external_malloc(self, typeid, length, alloc_young): """Allocate a large object using the ArenaCollection or raw_malloc(), possibly as an object with card marking enabled, if it has gc pointers in its var-sized part. 'length' should be @@ -712,7 +712,9 @@ self.major_collection(raw_malloc_usage(totalsize)) # # Check if the object would fit in the ArenaCollection. - if raw_malloc_usage(totalsize) <= self.small_request_threshold: + # Also, an object allocated from ArenaCollection must be old. + if (raw_malloc_usage(totalsize) <= self.small_request_threshold + and not alloc_young): # # Yes. Round up 'totalsize' (it cannot overflow and it # must remain <= self.small_request_threshold.) @@ -725,10 +727,6 @@ result = self.ac.malloc(totalsize) llmemory.raw_memclear(result, totalsize) # - # An object allocated from ArenaCollection is always old, even - # if 'can_make_young'. The interesting case of 'can_make_young' - # is for large objects, bigger than the 'large_objects' threshold, - # which are raw-malloced but still young. extra_flags = GCFLAG_TRACK_YOUNG_PTRS # else: @@ -748,11 +746,11 @@ extra_words = self.card_marking_words_for_length(length) cardheadersize = WORD * extra_words extra_flags = GCFLAG_HAS_CARDS | GCFLAG_TRACK_YOUNG_PTRS - # if 'can_make_young', then we also immediately set + # if 'alloc_young', then we also immediately set # GCFLAG_CARDS_SET, but without adding the object to # 'old_objects_with_cards_set'. In this way it should # never be added to that list as long as it is young. - if can_make_young: + if alloc_young: extra_flags |= GCFLAG_CARDS_SET # # Detect very rare cases of overflows @@ -788,7 +786,7 @@ # Record the newly allocated object and its full malloced size. # The object is young or old depending on the argument. self.rawmalloced_total_size += r_uint(allocsize) - if can_make_young: + if alloc_young: if not self.young_rawmalloced_objects: self.young_rawmalloced_objects = self.AddressDict() self.young_rawmalloced_objects.add(result + size_gc_header) diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py --- a/rpython/memory/test/test_transformed_gc.py +++ b/rpython/memory/test/test_transformed_gc.py @@ -1253,8 +1253,13 @@ class A: pass def fn(): + a1 = A() a = objectmodel.instantiate(A, nonmovable=True) - return rgc.can_move(annlowlevel.cast_instance_to_base_ptr(a)) + a.next = a1 # 'a' is known young here, so no write barrier emitted + res = rgc.can_move(annlowlevel.cast_instance_to_base_ptr(a)) + rgc.collect() + objectmodel.keepalive_until_here(a) + return res return fn def test_instantiate_nonmovable(self): From noreply at buildbot.pypy.org Sun Oct 11 22:57:58 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Oct 2015 22:57:58 +0200 (CEST) Subject: [pypy-commit] pypy cffi-handle-lifetime: translation fix Message-ID: <20151011205758.987741C0726@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-handle-lifetime Changeset: r80115:ff20fcc9fd50 Date: 2015-10-11 21:08 +0100 http://bitbucket.org/pypy/pypy/changeset/ff20fcc9fd50/ Log: translation fix diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -918,10 +918,10 @@ d = op.args[1].value if d.get('nonmovable', False): raise UnsupportedMallocFlags(d) - if d.value['flavor'] == 'raw': + if d['flavor'] == 'raw': return self._rewrite_raw_malloc(op, 'raw_malloc_fixedsize', []) # - if d.value.get('zero', False): + if d.get('zero', False): zero = True else: zero = False From noreply at buildbot.pypy.org Sun Oct 11 23:22:33 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 11 Oct 2015 23:22:33 +0200 (CEST) Subject: [pypy-commit] pypy cffi-handle-lifetime: crash cleanly if we manage to detect a bogus or dead object Message-ID: <20151011212233.BF3461C0726@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-handle-lifetime Changeset: r80116:7ba53dd4f845 Date: 2015-10-11 22:26 +0100 http://bitbucket.org/pypy/pypy/changeset/7ba53dd4f845/ Log: crash cleanly if we manage to detect a bogus or dead object diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -1,6 +1,7 @@ import py from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter.baseobjspace import W_Root from pypy.module._cffi_backend import ctypeobj, ctypeptr, cdataobj from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib import rweaklist, objectmodel, jit @@ -53,5 +54,10 @@ if not gcref: raise oefmt(space.w_RuntimeError, "cannot use from_handle() on NULL pointer") - cd = annlowlevel.cast_gcref_to_instance(cdataobj.W_CDataHandle, gcref) + cd = annlowlevel.cast_gcref_to_instance(W_Root, gcref) + # force an 'isinstance', to crash clearly if the handle is + # dead or bogus + if not isinstance(cd, cdataobj.W_CDataHandle): + raise oefmt(space.w_SystemError, + "ffi.from_handle(): dead or bogus object handle") return cd.w_keepalive From noreply at buildbot.pypy.org Mon Oct 12 09:28:53 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Oct 2015 09:28:53 +0200 (CEST) Subject: [pypy-commit] pypy cffi-handle-lifetime: Port callbacks to the new model, and make it run untranslated too. Message-ID: <20151012072853.44B141C12E2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-handle-lifetime Changeset: r80117:9a7cc64655b1 Date: 2015-10-12 09:29 +0200 http://bitbucket.org/pypy/pypy/changeset/9a7cc64655b1/ Log: Port callbacks to the new model, and make it run untranslated too. diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -1,14 +1,14 @@ """ Callbacks. """ -import sys, os +import sys, os, py -from rpython.rlib import clibffi, jit, jit_libffi +from rpython.rlib import clibffi, jit, jit_libffi, rgc, objectmodel from rpython.rlib.objectmodel import keepalive_until_here -from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from pypy.interpreter.error import OperationError, oefmt -from pypy.module._cffi_backend import cerrno, misc, handle +from pypy.module._cffi_backend import cerrno, misc from pypy.module._cffi_backend.cdataobj import W_CData from pypy.module._cffi_backend.ctypefunc import SIZE_OF_FFI_ARG, W_CTypeFunc from pypy.module._cffi_backend.ctypeprim import W_CTypePrimitiveSigned @@ -19,6 +19,22 @@ # ____________________________________________________________ +def make_callback(space, ctype, w_callable, w_error, w_onerror): + # Allocate a callback as a nonmovable W_CDataCallback instance, which + # we can cast to a plain VOIDP. As long as the object is not freed, + # we can cast the VOIDP back to a W_CDataCallback in reveal_callback(). + cdata = objectmodel.instantiate(W_CDataCallback, nonmovable=True) + gcref = rgc.cast_instance_to_gcref(cdata) + raw_cdata = rgc.hide_nonmovable_gcref(gcref) + cdata.__init__(space, ctype, w_callable, w_error, w_onerror, raw_cdata) + return cdata + +def reveal_callback(raw_ptr): + addr = rffi.cast(llmemory.Address, raw_ptr) + gcref = rgc.reveal_gcref(addr) + return rgc.try_cast_gcref_to_instance(W_CDataCallback, gcref) + + class Closure(object): """This small class is here to have a __del__ outside any cycle.""" @@ -37,7 +53,8 @@ _immutable_fields_ = ['key_pycode'] w_onerror = None - def __init__(self, space, ctype, w_callable, w_error, w_onerror): + def __init__(self, space, ctype, w_callable, w_error, w_onerror, + raw_cdata): raw_closure = rffi.cast(rffi.CCHARP, clibffi.closureHeap.alloc()) self._closure = Closure(raw_closure) W_CData.__init__(self, space, raw_closure, ctype) @@ -72,8 +89,6 @@ from pypy.module.thread.os_thread import setup_threads setup_threads(space) # - handle_index = handle.get_handles(space).reserve_next_handle_index() - # cif_descr = self.getfunctype().cif_descr if not cif_descr: raise oefmt(space.w_NotImplementedError, @@ -81,16 +96,13 @@ "return type or with '...'", self.getfunctype().name) with self as ptr: closure_ptr = rffi.cast(clibffi.FFI_CLOSUREP, ptr) - unique_id = rffi.cast(rffi.VOIDP, handle_index) + unique_id = rffi.cast(rffi.VOIDP, raw_cdata) res = clibffi.c_ffi_prep_closure(closure_ptr, cif_descr.cif, invoke_callback, unique_id) if rffi.cast(lltype.Signed, res) != clibffi.FFI_OK: raise OperationError(space.w_SystemError, space.wrap("libffi failed to build this callback")) - # - _current_space.space = space - handle.get_handles(space).store_handle(handle_index, self) def _repr_extra(self): space = self.space @@ -221,12 +233,6 @@ except OperationError, e: _handle_applevel_exception(callback, e, ll_res, extra_line) -class CurrentSpace: - def _cleanup_(self): - if hasattr(self, 'space'): - del self.space -_current_space = CurrentSpace() - def _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): """ Callback specification. ffi_cif - something ffi specific, don't care @@ -236,10 +242,8 @@ (what the real callback is for example), casted to VOIDP """ ll_res = rffi.cast(rffi.CCHARP, ll_res) - unique_id = rffi.cast(lltype.Signed, ll_userdata) - space = _current_space.space - callback = handle.get_handles(space).fetch_handle(unique_id) - if callback is None or not isinstance(callback, W_CDataCallback): + callback = reveal_callback(ll_userdata) + if callback is None: # oups! try: os.write(STDERR, "SystemError: invoking a callback " @@ -251,6 +255,7 @@ misc._raw_memclear(ll_res, SIZE_OF_FFI_ARG) return # + space = callback.space must_leave = False try: must_leave = space.threadlocals.try_enter_thread(space) diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py --- a/pypy/module/_cffi_backend/ffi_obj.py +++ b/pypy/module/_cffi_backend/ffi_obj.py @@ -294,9 +294,9 @@ CONSIDER_FN_AS_FNPTR) space = self.space if not space.is_none(w_python_callable): - return ccallback.W_CDataCallback(space, w_ctype, - w_python_callable, w_error, - w_onerror) + return ccallback.make_callback(space, w_ctype, + w_python_callable, w_error, + w_onerror) else: # decorator mode: returns a single-argument function return space.appexec([w_ctype, w_error, w_onerror], diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -24,8 +24,8 @@ @unwrap_spec(w_ctype=ctypeobj.W_CType) def callback(space, w_ctype, w_callable, w_error=None, w_onerror=None): - from pypy.module._cffi_backend.ccallback import W_CDataCallback - return W_CDataCallback(space, w_ctype, w_callable, w_error, w_onerror) + from pypy.module._cffi_backend.ccallback import make_callback + return make_callback(space, w_ctype, w_callable, w_error, w_onerror) # ____________________________________________________________ diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -4,27 +4,20 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.module._cffi_backend import ctypeobj, ctypeptr, cdataobj from rpython.rtyper.lltypesystem import lltype, llmemory, rffi -from rpython.rlib import rweaklist, objectmodel, jit -from rpython.rtyper import annlowlevel - - -class CffiHandles(rweaklist.RWeakListMixin): - def __init__(self, space): - self.initialize() - -def get_handles(space): - return space.fromcache(CffiHandles) +from rpython.rlib import rgc, objectmodel, jit # ____________________________________________________________ @jit.dont_look_inside def _newp_handle(space, w_ctype, w_x): - if not objectmodel.we_are_translated(): - py.test.skip("can't test handles untranslated for now") + # Allocate a handle as a nonmovable W_CDataHandle instance, which + # we can cast to a plain CCHARP. As long as the object is not freed, + # we can cast the CCHARP back to a W_CDataHandle with reveal_gcref(). new_cdataobj = objectmodel.instantiate(cdataobj.W_CDataHandle, nonmovable=True) - gcref = annlowlevel.cast_instance_to_gcref(new_cdataobj) - _cdata = rffi.cast(rffi.CCHARP, gcref) + gcref = rgc.cast_instance_to_gcref(new_cdataobj) + _cdata = rgc.hide_nonmovable_gcref(gcref) + _cdata = rffi.cast(rffi.CCHARP, _cdata) cdataobj.W_CDataHandle.__init__(new_cdataobj, space, _cdata, w_ctype, w_x) return new_cdataobj @@ -36,10 +29,6 @@ "needs 'void *', got '%s'", w_ctype.name) return _newp_handle(space, w_ctype, w_x) - at jit.dont_look_inside -def reveal_gcref(ptr): - return rffi.cast(llmemory.GCREF, ptr) - @unwrap_spec(w_cdata=cdataobj.W_CData) def from_handle(space, w_cdata): ctype = w_cdata.ctype @@ -49,15 +38,14 @@ "expected a 'cdata' object with a 'void *' out of " "new_handle(), got '%s'", ctype.name) with w_cdata as ptr: - gcref = reveal_gcref(ptr) + addr = rffi.cast(llmemory.Address, ptr) + gcref = rgc.reveal_gcref(addr) # if not gcref: raise oefmt(space.w_RuntimeError, "cannot use from_handle() on NULL pointer") - cd = annlowlevel.cast_gcref_to_instance(W_Root, gcref) - # force an 'isinstance', to crash clearly if the handle is - # dead or bogus - if not isinstance(cd, cdataobj.W_CDataHandle): + cd = rgc.try_cast_gcref_to_instance(cdataobj.W_CDataHandle, gcref) + if cd is None: raise oefmt(space.w_SystemError, "ffi.from_handle(): dead or bogus object handle") return cd.w_keepalive diff --git a/pypy/module/_cffi_backend/test/test_handle.py b/pypy/module/_cffi_backend/test/test_handle.py deleted file mode 100644 --- a/pypy/module/_cffi_backend/test/test_handle.py +++ /dev/null @@ -1,44 +0,0 @@ -import random -from pypy.module._cffi_backend.handle import CffiHandles - - -class PseudoWeakRef(object): - _content = 42 - - def __call__(self): - return self._content - - -def test_cffi_handles_1(): - ch = CffiHandles(None) - expected_content = {} - for i in range(10000): - index = ch.reserve_next_handle_index() - assert 0 <= index < len(ch.handles) - assert ch.handles[index]() is None - pwr = PseudoWeakRef() - expected_content[index] = pwr - ch.handles[index] = pwr - assert len(ch.handles) <= 16384 - for index, pwr in expected_content.items(): - assert ch.handles[index] is pwr - -def test_cffi_handles_2(): - ch = CffiHandles(None) - expected_content = {} - for i in range(10000): - index = ch.reserve_next_handle_index() - assert 0 <= index < len(ch.handles) - assert ch.handles[index]() is None - pwr = PseudoWeakRef() - expected_content[index] = pwr - ch.handles[index] = pwr - # - if len(expected_content) > 20: - r = random.choice(list(expected_content)) - pwr = expected_content.pop(r) - pwr._content = None - # - assert len(ch.handles) < 100 - for index, pwr in expected_content.items(): - assert ch.handles[index] is pwr diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -480,7 +480,7 @@ class _GcRef(object): # implementation-specific: there should not be any after translation - __slots__ = ['_x'] + __slots__ = ['_x', '_handle'] def __init__(self, x): self._x = x def __hash__(self): @@ -529,6 +529,48 @@ return None try_cast_gcref_to_instance._annspecialcase_ = 'specialize:arg(0)' +_ffi_cache = None +def _fetch_ffi(): + global _ffi_cache + if _ffi_cache is None: + try: + import _cffi_backend + _ffi_cache = _cffi_backend.FFI() + except (ImportError, AttributeError): + import py + py.test.skip("need CFFI >= 1.0") + return _ffi_cache + + at jit.dont_look_inside +def hide_nonmovable_gcref(gcref): + from rpython.rtyper.lltypesystem import lltype, llmemory, rffi + if we_are_translated(): + assert lltype.typeOf(gcref) == llmemory.GCREF + assert not can_move(gcref) + return rffi.cast(llmemory.Address, gcref) + else: + assert isinstance(gcref, _GcRef) + x = gcref._x + ffi = _fetch_ffi() + if not hasattr(x, '__handle'): + x.__handle = ffi.new_handle(x) + addr = int(ffi.cast("intptr_t", x.__handle)) + return rffi.cast(llmemory.Address, addr) + + at jit.dont_look_inside +def reveal_gcref(addr): + from rpython.rtyper.lltypesystem import lltype, llmemory, rffi + assert lltype.typeOf(addr) == llmemory.Address + if we_are_translated(): + return rffi.cast(llmemory.GCREF, addr) + else: + addr = rffi.cast(lltype.Signed, addr) + if addr == 0: + return lltype.nullptr(llmemory.GCREF.TO) + ffi = _fetch_ffi() + x = ffi.from_handle(ffi.cast("void *", addr)) + return _GcRef(x) + # ------------------- implementation ------------------- _cache_s_list_of_gcrefs = None From noreply at buildbot.pypy.org Mon Oct 12 11:04:42 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Mon, 12 Oct 2015 11:04:42 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: removed some debug code Message-ID: <20151012090442.E1F331C1453@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80118:16e3439165bd Date: 2015-10-12 08:53 +0200 http://bitbucket.org/pypy/pypy/changeset/16e3439165bd/ Log: removed some debug code diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -844,7 +844,7 @@ def test_where(self): result = self.run("where") assert result == -40 - self.check_vectorized(1, 1) + self.check_vectorized(1, 0) def define_searchsorted(): return """ diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py @@ -288,6 +288,7 @@ def test_vectorize_guard(self): trace = self.parse_loop(""" [p0,p1,i0] + i100 = getarrayitem_raw_i(p0,i0,descr=int16arraydescr) i10 = getarrayitem_raw_i(p0,i0,descr=int32arraydescr) i20 = int_is_true(i10) guard_true(i20) [i20] diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py --- a/rpython/jit/metainterp/optimizeopt/vector.py +++ b/rpython/jit/metainterp/optimizeopt/vector.py @@ -85,7 +85,6 @@ renamer.rename(prefix_label) oplist = [] op1 = self.operations[2] - assert op1.getarg(0) is op1.getfailargs()[0] for op in self.operations: newop = op.copy() renamer.rename(newop) From noreply at buildbot.pypy.org Mon Oct 12 11:04:45 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Mon, 12 Oct 2015 11:04:45 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: fixed the test suite (dependency, vecopt) Message-ID: <20151012090445.145EC1C1453@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80119:d847518f11de Date: 2015-10-12 09:23 +0200 http://bitbucket.org/pypy/pypy/changeset/d847518f11de/ Log: fixed the test suite (dependency, vecopt) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -686,7 +686,7 @@ assert pendingfields is not None if op.getdescr() is not None: descr = op.getdescr() - assert isinstance(descr, compile.ResumeAtPositionDescr) + assert isinstance(descr, compile.ResumeGuardDescr) else: descr = compile.invent_fail_descr_for_op(op.getopnum(), self) op.setdescr(descr) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py --- a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py @@ -7,7 +7,8 @@ IndexVar, MemoryRef, Node) from rpython.jit.metainterp.optimizeopt.vector import VectorLoop from rpython.jit.metainterp.optimizeopt.test.test_util import ( - LLtypeMixin, BaseTest, FakeMetaInterpStaticData, convert_old_style_to_targets) + LLtypeMixin, BaseTest, FakeMetaInterpStaticData, convert_old_style_to_targets, + FakeJitDriverStaticData) from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.jit.backend.llgraph.runner import ArrayDescr from rpython.jit.tool.oparser import OpParser @@ -106,9 +107,14 @@ jump = loop.operations[-1] loop = VectorLoop(label, loop.operations[0:-1], jump) loop.jump.setdescr(token) + class Optimizer(object): + metainterp_sd = FakeMetaInterpStaticData(self.cpu) + jitdriver_sd = FakeJitDriverStaticData() + opt = Optimizer() + opt.jitdriver_sd.vec = True for op in loop.operations: if op.is_guard() and not op.getdescr(): - descr = invent_fail_descr_for_op(op.getopnum(), None) + descr = invent_fail_descr_for_op(op.getopnum(), opt) op.setdescr(descr) return loop diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py @@ -1315,118 +1315,6 @@ except NotAVectorizeableLoop: pass - def test_axis_sum(self): - # TODO - trace = """ - [i1, p10, i11, p8, i12, p3, p4, p13, i14, i15, p6, p9, i16, i17, i18, i19, i20, i21, i22, i23] - f24 = raw_load_f(i16, i12, descr=floatarraydescr) - guard_not_invalidated() [i1, p9, p8, p6, p4, p3, f24, i11, i15, p13, i12, i14, p10] - i26 = int_add(i12, 8) - i27 = getarrayitem_gc_f(p13, i1, descr=floatarraydescr) - i28 = int_is_zero(i27) - guard_false(i28) [i1, p9, p8, p6, p4, p3, f24, i26, i11, i15, p13, None, i14, p10] - f30 = raw_load_f(i17, i15, descr=floatarraydescr) - f31 = float_add(f30, f24) - raw_store(i18, i15, f31, descr=floatarraydescr) - i33 = int_add(i14, 1) - i34 = getarrayitem_gc_f(p13, i19, descr=floatarraydescr) - i35 = int_lt(i34, i20) - guard_true(i35) [i1, p9, p8, p6, p4, p3, i21, i34, i15, i33, i19, p13, f31, None, i26, i11, None, None, None, i14, p10] - i37 = int_add(i34, 1) - setarrayitem_gc(p13, i19, i37, descr=floatarraydescr) - i38 = int_add(i15, i22) - i39 = int_ge(i33, i23) - guard_false(i39) [i1, p9, p8, p6, p4, p3, i33, i38, None, None, i26, i11, None, p13, None, None, p10] - jump(i1, p10, i11, p8, i26, p3, p4, p13, i33, i38, p6, p9, i16, i17, i18, i19, i20, i21, i22, i23) - """ - loop = self.parse_loop(trace) - self.vectorize(loop) - - def test_cast_1(self): - # TODO - trace = self.parse_loop(""" - [i9, i10, p2, p11, i12, i13, p4, p5, p14, i15, p8, i16, p17, i18, i19, i20, i21, i22, i23] - i24 = raw_load_i(i20, i16, descr=float32arraydescr) - guard_not_invalidated() [p8, p5, p4, p2, i24, p17, i13, i12, i10, i19, p14, p11, i18, i15, i16, None] - i27 = int_add(i16, 4) - i28 = raw_load_i(i21, i19, descr=float32arraydescr) - i30 = int_add(i19, 4) - f31 = cast_singlefloat_to_float(i24) - f32 = cast_singlefloat_to_float(i28) - f33 = float_add(f31, f32) - i34 = cast_float_to_singlefloat(f33) - raw_store(i22, i13, i34, descr=float32arraydescr) - i36 = int_add(i12, 1) - i38 = int_add(i13, 4) - i39 = int_ge(i36, i23) - guard_false(i39) [p8, p5, p4, p2, i27, i28, i30, i24, i38, i36, p17, None, None, None, None, p14, p11, i18, i15, None, None] - jump(i24, i28, p2, p11, i36, i38, p4, p5, p14, i15, p8, i27, p17, i18, i30, i20, i21, i22, i23) - """) - opt = self.vectorize(trace) - self.debug_print_operations(trace) - - def test_truediv_abs_neg_float(self): - # TODO - trace = self.parse_loop(""" - [f9,p10,i11,p4,i12,p2,p5,p13,i14,p7,i15,p8,i16,f17,i18,i19] - f20 = raw_load_f(i16, i12, descr=floatarraydescr) - guard_not_invalidated() [p8, p7, p5, p4, p2, f20, None, i12, i11, p10, i15, i14, p13] - i23 = int_add(i12, 8) - f24 = float_truediv(f20, f17) - f25 = float_abs(f20) - f26 = float_neg(f20) - raw_store(i18, i15, f24, descr=floatarraydescr) - i26 = int_add(i14, 1) - i28 = int_add(i15, 8) - i29 = int_ge(i26, i19) - guard_false(i29) [p8, p7, p5, p4, p2, f20, i23, i28, None, p13] - jump(f20, p10, i11, p4, i23, p2, p5, p13, i26, p7, i28, p8, i16, f17, i18, i19) - """) - opt = self.vectorize(trace) - self.debug_print_operations(trace) - - - def test_all_guard(self): - # TODO - trace = """ - [p0, p3, i4, i5, i6, i7] - f8 = raw_load_f(i6, i5, descr=floatarraydescr) - guard_not_invalidated() [p0, f8, p3, i5, i4] - i9 = cast_float_to_int(f8) - i11 = int_and(i9, 255) - guard_false(i11) [p0, p3, i5, i4] - i13 = int_add(i4, 1) - i15 = int_add(i5, 8) - i16 = int_ge(i13, i7) - guard_false(i16) [p0, i13, i15, p3, None, None] - jump(p0, p3, i13, i15, i6, i7) - """ - loop = self.parse_loop(trace) - opt = self.vectorize(loop) - self.debug_print_operations(loop) - - def test_111(self): - trace = """ - [p0, p1, p2, p3, i4, p5, p6, p7, i8, p9, i10, p11] - guard_not_invalidated(descr=) [p1, p0, p2, p3, p5, p6, i4] - i12 = int_lt(i4, i8) - guard_true(i12, descr=) [p1, p0, p2, p3, p5, p6, i8, i4] - i13 = uint_ge(i4, i10) - guard_false(i13, descr=) [p1, p0, i10, i4, p9, p2, p3, p5, p6, None, None] - i15 = getarrayitem_gc_i(p11, i4, descr=arraydescr) - i17 = int_add_ovf(i15, 1) - guard_no_overflow(descr=) [p1, p0, i17, p2, p3, p5, p6, i15, None, i4] - setarrayitem_gc(p11, i4, i17, descr=arraydescr) - i19 = int_add(i4, 1) - i21 = getfield_raw_i(139972894828928, descr=) - i23 = int_lt(i21, 0) - guard_false(i23, descr=) [p1, p0, p2, p3, p5, p6, i19, None, None, None] - i24 = arraylen_gc(p11, descr=arraydescr) - jump(p0, p1, p2, p3, i19, p5, p6, p7, i8, p9, i10, p11) - """ - loop = self.parse_loop(trace) - opt = self.schedule(loop, with_guard_opt=True) - self.debug_print_operations(loop) class TestLLtype(BaseTestVectorize, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py --- a/rpython/jit/metainterp/optimizeopt/vector.py +++ b/rpython/jit/metainterp/optimizeopt/vector.py @@ -84,7 +84,6 @@ prefix_label = self.prefix_label.copy() renamer.rename(prefix_label) oplist = [] - op1 = self.operations[2] for op in self.operations: newop = op.copy() renamer.rename(newop) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -81,6 +81,10 @@ def is_vector(self): return False + def returns_void(self): + return False + + def ResOperation(opnum, args, descr=None): cls = opclasses[opnum] op = cls() From noreply at buildbot.pypy.org Mon Oct 12 11:04:47 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Mon, 12 Oct 2015 11:04:47 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: fixed costmodel tests Message-ID: <20151012090447.350241C1453@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80120:a3b94268d07c Date: 2015-10-12 11:03 +0200 http://bitbucket.org/pypy/pypy/changeset/a3b94268d07c/ Log: fixed costmodel tests diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -753,6 +753,7 @@ arg = self.ensure_unpacked(i, argument) if argument is not arg: fail_args[i] = arg + op.setfailargs(fail_args) def ensure_unpacked(self, index, arg): if arg in self.seen or arg.is_vector(): @@ -786,7 +787,6 @@ assert 0, "not allowed to rename void resop" assert off < vector.count assert not var.is_vector() - print "rename", var, off, "=>", vector self.box_to_vbox[var] = (off, vector) def remember_args_in_vector(self, pack, index, box): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py b/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py --- a/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py @@ -3,7 +3,7 @@ from rpython.jit.metainterp.history import TargetToken, JitCellToken, TreeLoop from rpython.jit.metainterp.optimizeopt.util import equaloplists from rpython.jit.metainterp.optimizeopt.vector import (Pack, X86_CostModel, - NotAProfitableLoop, VectorizingOptimizer) + NotAProfitableLoop, VectorizingOptimizer, CostModel) from rpython.jit.metainterp.optimizeopt.schedule import VecScheduleState from rpython.jit.metainterp.optimizeopt.dependency import Node, DependencyGraph from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin @@ -29,9 +29,60 @@ # i1 and i0 ... # but not i0, i2 # ... - print iv, 'is after', ov, "?", val == 1 + #print iv, 'is after', ov, "?", val == 1 return val == 1 +def prepost_savings(orig_func): + def func(self, *args): + f = getattr(self.proxy, orig_func.__name__) + before_savings = self.proxy.savings + r = f(*args) + after_savings = self.proxy.savings + print " CM %s (%d -> %d, diff: %d) " % (orig_func.__name__, + before_savings, after_savings, + (after_savings - before_savings),) + print " args: ", args + return r + return func + +class FakeCostModel(CostModel): + def __init__(self, proxy): + self.proxy = proxy + + def getsavings(self): + return self.proxy.savings + + @prepost_savings + def reset_savings(self): + raise NotImplementedError + + @prepost_savings + def record_cast_int(self, op): + raise NotImplementedError + + @prepost_savings + def record_pack_savings(self, pack, times): + raise NotImplementedError + + @prepost_savings + def record_vector_pack(self, box, index, count): + raise NotImplementedError + + @prepost_savings + def record_vector_unpack(self, box, index, count): + raise NotImplementedError + + @prepost_savings + def unpack_cost(self, op, index, count): + raise NotImplementedError + + @prepost_savings + def savings_for_pack(self, pack, times): + raise NotImplementedError + + def profitable(self): + return self.proxy.savings >= 0 + class CostModelBaseTest(SchedulerBaseTest): def savings(self, loop): @@ -50,10 +101,11 @@ print "pack: \n ", print '\n '.join([str(op.getoperation()) for op in pack.operations]) print - costmodel = X86_CostModel(self.cpu, 0) + costmodel = FakeCostModel(X86_CostModel(self.cpu, 0)) + costmodel.reset_savings() state = VecScheduleState(graph, opt.packset, self.cpu, costmodel) opt.schedule(state) - return costmodel.savings + return costmodel.getsavings() def assert_operations_match(self, loop_a, loop_b): assert equaloplists(loop_a.operations, loop_b.operations) @@ -91,8 +143,10 @@ f11 = raw_load_f(p0, i1, descr=double) guard_true(i0) [f10] """) + assert loop1.operations[2].getfailargs()[0] is loop1.operations[0] savings = self.savings(loop1) assert savings == 0 + assert loop1.operations[2].getfailargs()[0] is loop1.operations[-2] def test_load_2_unpack_1_index1(self): loop1 = self.parse_trace(""" diff --git a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py --- a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py @@ -140,6 +140,9 @@ fmt = (indent, joinedargs, source, indent, joinedargs) src = "%s[%s]\n%s\n%sjump(%s)" % fmt loop = self.parse_loop(src) + # needed to assign the right number to the input + # arguments + [str(arg) for arg in loop.inputargs] loop.graph = FakeDependencyGraph(loop) return loop diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -559,6 +559,9 @@ def getfailargs(self): return self._fail_args + def getfailargs_copy(self): + return self._fail_args[:] + def setfailargs(self, fail_args): self._fail_args = fail_args From noreply at buildbot.pypy.org Mon Oct 12 11:17:53 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Mon, 12 Oct 2015 11:17:53 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: merged default Message-ID: <20151012091753.4AA371C146A@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80121:ea700f0ebd08 Date: 2015-10-12 11:16 +0200 http://bitbucket.org/pypy/pypy/changeset/ea700f0ebd08/ Log: merged default diff too long, truncating to 2000 out of 5166 lines diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -310,6 +310,22 @@ """ return self._backend.from_buffer(self.BCharA, python_buffer) + def memmove(self, dest, src, n): + """ffi.memmove(dest, src, n) copies n bytes of memory from src to dest. + + Like the C function memmove(), the memory areas may overlap; + apart from that it behaves like the C function memcpy(). + + 'src' can be any cdata ptr or array, or any Python buffer object. + 'dest' can be any cdata ptr or array, or a writable Python buffer + object. The size to copy, 'n', is always measured in bytes. + + Unlike other methods, this one supports all Python buffer including + byte strings and bytearrays---but it still does not support + non-contiguous buffers. + """ + return self._backend.memmove(dest, src, n) + def callback(self, cdecl, python_callable=None, error=None, onerror=None): """Return a callback object or a decorator making such a callback object. 'cdecl' must name a C function pointer type. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -57,6 +57,18 @@ .. branch: cffi-stdcall Win32: support '__stdcall' in CFFI. +.. branch: callfamily + +Refactorings of annotation and rtyping of function calls. + +.. branch: fortran-order + +Allow creation of fortran-ordered ndarrays + +.. branch: type_system-cleanup + +Remove some remnants of the old ootypesystem vs lltypesystem dichotomy. + .. branch: vecopt .. branch: vecopt-merge diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1058,6 +1058,14 @@ args = Arguments.frompacked(self, w_args, w_kwds) return self.call_args(w_callable, args) + def _try_fetch_pycode(self, w_func): + from pypy.interpreter.function import Function, Method + if isinstance(w_func, Method): + w_func = w_func.w_function + if isinstance(w_func, Function): + return w_func.code + return None + def call_function(self, w_func, *args_w): nargs = len(args_w) # used for pruning funccall versions if not self.config.objspace.disable_call_speedhacks and nargs < 5: diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -47,6 +47,7 @@ 'string': 'func.string', 'buffer': 'cbuffer.buffer', + 'memmove': 'func.memmove', 'get_errno': 'cerrno.get_errno', 'set_errno': 'cerrno.set_errno', diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -3,12 +3,12 @@ """ import sys, os -from rpython.rlib import clibffi, rweakref, jit, jit_libffi -from rpython.rlib.objectmodel import compute_unique_id, keepalive_until_here +from rpython.rlib import clibffi, jit, jit_libffi +from rpython.rlib.objectmodel import keepalive_until_here from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.error import OperationError, oefmt -from pypy.module._cffi_backend import cerrno, misc +from pypy.module._cffi_backend import cerrno, misc, handle from pypy.module._cffi_backend.cdataobj import W_CData from pypy.module._cffi_backend.ctypefunc import SIZE_OF_FFI_ARG, W_CTypeFunc from pypy.module._cffi_backend.ctypeprim import W_CTypePrimitiveSigned @@ -34,7 +34,7 @@ class W_CDataCallback(W_CData): - #_immutable_fields_ = ... + _immutable_fields_ = ['key_pycode'] w_onerror = None def __init__(self, space, ctype, w_callable, w_error, w_onerror): @@ -46,6 +46,7 @@ raise oefmt(space.w_TypeError, "expected a callable object, not %T", w_callable) self.w_callable = w_callable + self.key_pycode = space._try_fetch_pycode(w_callable) if not space.is_none(w_onerror): if not space.is_true(space.callable(w_onerror)): raise oefmt(space.w_TypeError, @@ -64,8 +65,14 @@ convert_from_object_fficallback(fresult, self._closure.ll_error, w_error) # - self.unique_id = compute_unique_id(self) - global_callback_mapping.set(self.unique_id, self) + # We must setup the GIL here, in case the callback is invoked in + # some other non-Pythonic thread. This is the same as cffi on + # CPython. + if space.config.translation.thread: + from pypy.module.thread.os_thread import setup_threads + setup_threads(space) + # + handle_index = handle.get_handles(space).reserve_next_handle_index() # cif_descr = self.getfunctype().cif_descr if not cif_descr: @@ -74,7 +81,7 @@ "return type or with '...'", self.getfunctype().name) with self as ptr: closure_ptr = rffi.cast(clibffi.FFI_CLOSUREP, ptr) - unique_id = rffi.cast(rffi.VOIDP, self.unique_id) + unique_id = rffi.cast(rffi.VOIDP, handle_index) res = clibffi.c_ffi_prep_closure(closure_ptr, cif_descr.cif, invoke_callback, unique_id) @@ -82,12 +89,8 @@ raise OperationError(space.w_SystemError, space.wrap("libffi failed to build this callback")) # - # We must setup the GIL here, in case the callback is invoked in - # some other non-Pythonic thread. This is the same as cffi on - # CPython. - if space.config.translation.thread: - from pypy.module.thread.os_thread import setup_threads - setup_threads(space) + _current_space.space = space + handle.get_handles(space).store_handle(handle_index, self) def _repr_extra(self): space = self.space @@ -105,6 +108,7 @@ def invoke(self, ll_args): space = self.space ctype = self.getfunctype() + ctype = jit.promote(ctype) args_w = [] for i, farg in enumerate(ctype.fargs): ll_arg = rffi.cast(rffi.CCHARP, ll_args[i]) @@ -127,9 +131,6 @@ keepalive_until_here(self) # to keep self._closure.ll_error alive -global_callback_mapping = rweakref.RWeakValueDictionary(int, W_CDataCallback) - - def convert_from_object_fficallback(fresult, ll_res, w_res): space = fresult.space small_result = fresult.size < SIZE_OF_FFI_ARG @@ -200,8 +201,18 @@ extra_line="\nDuring the call to 'onerror', " "another exception occurred:\n\n") - at jit.jit_callback("CFFI") +def get_printable_location(key_pycode): + if key_pycode is None: + return 'cffi_callback ' + return 'cffi_callback ' + key_pycode.get_repr() + +jitdriver = jit.JitDriver(name='cffi_callback', + greens=['callback.key_pycode'], + reds=['ll_res', 'll_args', 'callback'], + get_printable_location=get_printable_location) + def py_invoke_callback(callback, ll_res, ll_args): + jitdriver.jit_merge_point(callback=callback, ll_res=ll_res, ll_args=ll_args) extra_line = '' try: w_res = callback.invoke(ll_args) @@ -210,6 +221,12 @@ except OperationError, e: _handle_applevel_exception(callback, e, ll_res, extra_line) +class CurrentSpace: + def _cleanup_(self): + if hasattr(self, 'space'): + del self.space +_current_space = CurrentSpace() + def _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): """ Callback specification. ffi_cif - something ffi specific, don't care @@ -220,8 +237,9 @@ """ ll_res = rffi.cast(rffi.CCHARP, ll_res) unique_id = rffi.cast(lltype.Signed, ll_userdata) - callback = global_callback_mapping.get(unique_id) - if callback is None: + space = _current_space.space + callback = handle.get_handles(space).fetch_handle(unique_id) + if callback is None or not isinstance(callback, W_CDataCallback): # oups! try: os.write(STDERR, "SystemError: invoking a callback " @@ -234,7 +252,6 @@ return # must_leave = False - space = callback.space try: must_leave = space.threadlocals.try_enter_thread(space) py_invoke_callback(callback, ll_res, ll_args) diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -18,6 +18,7 @@ _attrs_ = ['ctptr'] _immutable_fields_ = ['ctptr'] kind = "array" + is_nonfunc_pointer_or_array = True def __init__(self, space, ctptr, length, arraysize, extra): W_CTypePtrOrArray.__init__(self, space, arraysize, extra, 0, diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -21,6 +21,7 @@ cast_anything = False is_primitive_integer = False + is_nonfunc_pointer_or_array = False kind = "?" def __init__(self, space, size, name, name_position): diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -172,6 +172,7 @@ _immutable_fields_ = ['is_file', 'cache_array_type?', 'is_void_ptr'] kind = "pointer" cache_array_type = None + is_nonfunc_pointer_or_array = True def __init__(self, space, ctitem): from pypy.module._cffi_backend import ctypearray diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py --- a/pypy/module/_cffi_backend/ffi_obj.py +++ b/pypy/module/_cffi_backend/ffi_obj.py @@ -391,6 +391,25 @@ return cerrno.getwinerror(self.space, code) + @unwrap_spec(n=int) + def descr_memmove(self, w_dest, w_src, n): + """\ +ffi.memmove(dest, src, n) copies n bytes of memory from src to dest. + +Like the C function memmove(), the memory areas may overlap; +apart from that it behaves like the C function memcpy(). + +'src' can be any cdata ptr or array, or any Python buffer object. +'dest' can be any cdata ptr or array, or a writable Python buffer +object. The size to copy, 'n', is always measured in bytes. + +Unlike other methods, this one supports all Python buffer including +byte strings and bytearrays---but it still does not support +non-contiguous buffers.""" + # + return func.memmove(self.space, w_dest, w_src, n) + + @unwrap_spec(w_init=WrappedDefault(None)) def descr_new(self, w_arg, w_init): """\ @@ -623,6 +642,7 @@ gc = interp2app(W_FFIObject.descr_gc), getctype = interp2app(W_FFIObject.descr_getctype), integer_const = interp2app(W_FFIObject.descr_integer_const), + memmove = interp2app(W_FFIObject.descr_memmove), new = interp2app(W_FFIObject.descr_new), new_allocator = interp2app(W_FFIObject.descr_new_allocator), new_handle = interp2app(W_FFIObject.descr_new_handle), diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -1,3 +1,8 @@ +from rpython.rtyper.annlowlevel import llstr +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw +from rpython.rlib.objectmodel import keepalive_until_here + from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from pypy.module._cffi_backend import ctypeobj, cdataobj, allocator @@ -79,6 +84,26 @@ # ____________________________________________________________ +def _fetch_as_read_buffer(space, w_x): + # xxx do we really need to implement the same mess as in CPython 2.7 + # w.r.t. buffers and memoryviews?? + try: + buf = space.readbuf_w(w_x) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + buf = space.buffer_w(w_x, space.BUF_SIMPLE) + return buf + +def _fetch_as_write_buffer(space, w_x): + try: + buf = space.writebuf_w(w_x) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + buf = space.buffer_w(w_x, space.BUF_WRITABLE) + return buf + @unwrap_spec(w_ctype=ctypeobj.W_CType) def from_buffer(space, w_ctype, w_x): from pypy.module._cffi_backend import ctypearray, ctypeprim @@ -88,14 +113,7 @@ raise oefmt(space.w_TypeError, "needs 'char[]', got '%s'", w_ctype.name) # - # xxx do we really need to implement the same mess as in CPython 2.7 - # w.r.t. buffers and memoryviews?? - try: - buf = space.readbuf_w(w_x) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - buf = space.buffer_w(w_x, space.BUF_SIMPLE) + buf = _fetch_as_read_buffer(space, w_x) try: _cdata = buf.get_raw_address() except ValueError: @@ -106,6 +124,76 @@ # return cdataobj.W_CDataFromBuffer(space, _cdata, w_ctype, buf, w_x) + +def unsafe_escaping_ptr_for_ptr_or_array(w_cdata): + if not w_cdata.ctype.is_nonfunc_pointer_or_array: + raise oefmt(w_cdata.space.w_TypeError, + "expected a pointer or array ctype, got '%s'", + w_cdata.ctype.name) + return w_cdata.unsafe_escaping_ptr() + +c_memmove = rffi.llexternal('memmove', [rffi.CCHARP, rffi.CCHARP, + rffi.SIZE_T], lltype.Void, + _nowrapper=True) + + at unwrap_spec(n=int) +def memmove(space, w_dest, w_src, n): + if n < 0: + raise oefmt(space.w_ValueError, "negative size") + + # cases... + src_buf = None + src_data = lltype.nullptr(rffi.CCHARP.TO) + if isinstance(w_src, cdataobj.W_CData): + src_data = unsafe_escaping_ptr_for_ptr_or_array(w_src) + src_is_ptr = True + else: + src_buf = _fetch_as_read_buffer(space, w_src) + try: + src_data = src_buf.get_raw_address() + src_is_ptr = True + except ValueError: + src_is_ptr = False + + if src_is_ptr: + src_string = None + else: + if n == src_buf.getlength(): + src_string = src_buf.as_str() + else: + src_string = src_buf.getslice(0, n, 1, n) + + dest_buf = None + dest_data = lltype.nullptr(rffi.CCHARP.TO) + if isinstance(w_dest, cdataobj.W_CData): + dest_data = unsafe_escaping_ptr_for_ptr_or_array(w_dest) + dest_is_ptr = True + else: + dest_buf = _fetch_as_write_buffer(space, w_dest) + try: + dest_data = dest_buf.get_raw_address() + dest_is_ptr = True + except ValueError: + dest_is_ptr = False + + if dest_is_ptr: + if src_is_ptr: + c_memmove(dest_data, src_data, rffi.cast(rffi.SIZE_T, n)) + else: + copy_string_to_raw(llstr(src_string), dest_data, 0, n) + else: + if src_is_ptr: + for i in range(n): + dest_buf.setitem(i, src_data[i]) + else: + for i in range(n): + dest_buf.setitem(i, src_string[i]) + + keepalive_until_here(src_buf) + keepalive_until_here(dest_buf) + keepalive_until_here(w_src) + keepalive_until_here(w_dest) + # ____________________________________________________________ @unwrap_spec(w_cdata=cdataobj.W_CData) diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -9,16 +9,16 @@ def __init__(self, space): self.initialize() -def get(space): +def get_handles(space): return space.fromcache(CffiHandles) # ____________________________________________________________ def _newp_handle(space, w_ctype, w_x): - index = get(space).reserve_next_handle_index() + index = get_handles(space).reserve_next_handle_index() _cdata = rffi.cast(rffi.CCHARP, index + 1) new_cdataobj = cdataobj.W_CDataHandle(space, _cdata, w_ctype, w_x) - get(space).store_handle(index, new_cdataobj) + get_handles(space).store_handle(index, new_cdataobj) return new_cdataobj @unwrap_spec(w_ctype=ctypeobj.W_CType) @@ -39,7 +39,7 @@ "new_handle(), got '%s'", ctype.name) with w_cdata as ptr: index = rffi.cast(lltype.Signed, ptr) - original_cdataobj = get(space).fetch_handle(index - 1) + original_cdataobj = get_handles(space).fetch_handle(index - 1) # if isinstance(original_cdataobj, cdataobj.W_CDataHandle): return original_cdataobj.w_keepalive diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3393,6 +3393,78 @@ check(4 | 8, "CHB", "GTB") check(4 | 16, "CHB", "ROB") +def test_memmove(): + Short = new_primitive_type("short") + ShortA = new_array_type(new_pointer_type(Short), None) + Char = new_primitive_type("char") + CharA = new_array_type(new_pointer_type(Char), None) + p = newp(ShortA, [-1234, -2345, -3456, -4567, -5678]) + memmove(p, p + 1, 4) + assert list(p) == [-2345, -3456, -3456, -4567, -5678] + p[2] = 999 + memmove(p + 2, p, 6) + assert list(p) == [-2345, -3456, -2345, -3456, 999] + memmove(p + 4, newp(CharA, b"\x71\x72"), 2) + if sys.byteorder == 'little': + assert list(p) == [-2345, -3456, -2345, -3456, 0x7271] + else: + assert list(p) == [-2345, -3456, -2345, -3456, 0x7172] + +def test_memmove_buffer(): + import array + Short = new_primitive_type("short") + ShortA = new_array_type(new_pointer_type(Short), None) + a = array.array('H', [10000, 20000, 30000]) + p = newp(ShortA, 5) + memmove(p, a, 6) + assert list(p) == [10000, 20000, 30000, 0, 0] + memmove(p + 1, a, 6) + assert list(p) == [10000, 10000, 20000, 30000, 0] + b = array.array('h', [-1000, -2000, -3000]) + memmove(b, a, 4) + assert b.tolist() == [10000, 20000, -3000] + assert a.tolist() == [10000, 20000, 30000] + p[0] = 999 + p[1] = 998 + p[2] = 997 + p[3] = 996 + p[4] = 995 + memmove(b, p, 2) + assert b.tolist() == [999, 20000, -3000] + memmove(b, p + 2, 4) + assert b.tolist() == [997, 996, -3000] + p[2] = -p[2] + p[3] = -p[3] + memmove(b, p + 2, 6) + assert b.tolist() == [-997, -996, 995] + +def test_memmove_readonly_readwrite(): + SignedChar = new_primitive_type("signed char") + SignedCharA = new_array_type(new_pointer_type(SignedChar), None) + p = newp(SignedCharA, 5) + memmove(p, b"abcde", 3) + assert list(p) == [ord("a"), ord("b"), ord("c"), 0, 0] + memmove(p, bytearray(b"ABCDE"), 2) + assert list(p) == [ord("A"), ord("B"), ord("c"), 0, 0] + py.test.raises((TypeError, BufferError), memmove, b"abcde", p, 3) + ba = bytearray(b"xxxxx") + memmove(dest=ba, src=p, n=3) + assert ba == bytearray(b"ABcxx") + memmove(ba, b"EFGH", 4) + assert ba == bytearray(b"EFGHx") + +def test_memmove_sign_check(): + SignedChar = new_primitive_type("signed char") + SignedCharA = new_array_type(new_pointer_type(SignedChar), None) + p = newp(SignedCharA, 5) + py.test.raises(ValueError, memmove, p, p + 1, -1) # not segfault + +def test_memmove_bad_cdata(): + BInt = new_primitive_type("int") + p = cast(BInt, 42) + py.test.raises(TypeError, memmove, p, bytearray(b'a'), 1) + py.test.raises(TypeError, memmove, bytearray(b'a'), p, 1) + def test_dereference_null_ptr(): BInt = new_primitive_type("int") BIntPtr = new_pointer_type(BInt) diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py --- a/pypy/module/_cffi_backend/test/test_ffi_obj.py +++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py @@ -247,6 +247,63 @@ ffi.cast("unsigned short *", c)[1] += 500 assert list(a) == [10000, 20500, 30000] + def test_memmove(self): + import sys + import _cffi_backend as _cffi1_backend + ffi = _cffi1_backend.FFI() + p = ffi.new("short[]", [-1234, -2345, -3456, -4567, -5678]) + ffi.memmove(p, p + 1, 4) + assert list(p) == [-2345, -3456, -3456, -4567, -5678] + p[2] = 999 + ffi.memmove(p + 2, p, 6) + assert list(p) == [-2345, -3456, -2345, -3456, 999] + ffi.memmove(p + 4, ffi.new("char[]", b"\x71\x72"), 2) + if sys.byteorder == 'little': + assert list(p) == [-2345, -3456, -2345, -3456, 0x7271] + else: + assert list(p) == [-2345, -3456, -2345, -3456, 0x7172] + + def test_memmove_buffer(self): + import _cffi_backend as _cffi1_backend + import array + ffi = _cffi1_backend.FFI() + a = array.array('H', [10000, 20000, 30000]) + p = ffi.new("short[]", 5) + ffi.memmove(p, a, 6) + assert list(p) == [10000, 20000, 30000, 0, 0] + ffi.memmove(p + 1, a, 6) + assert list(p) == [10000, 10000, 20000, 30000, 0] + b = array.array('h', [-1000, -2000, -3000]) + ffi.memmove(b, a, 4) + assert b.tolist() == [10000, 20000, -3000] + assert a.tolist() == [10000, 20000, 30000] + p[0] = 999 + p[1] = 998 + p[2] = 997 + p[3] = 996 + p[4] = 995 + ffi.memmove(b, p, 2) + assert b.tolist() == [999, 20000, -3000] + ffi.memmove(b, p + 2, 4) + assert b.tolist() == [997, 996, -3000] + p[2] = -p[2] + p[3] = -p[3] + ffi.memmove(b, p + 2, 6) + assert b.tolist() == [-997, -996, 995] + + def test_memmove_readonly_readwrite(self): + import _cffi_backend as _cffi1_backend + ffi = _cffi1_backend.FFI() + p = ffi.new("signed char[]", 5) + ffi.memmove(p, b"abcde", 3) + assert list(p) == [ord("a"), ord("b"), ord("c"), 0, 0] + ffi.memmove(p, bytearray(b"ABCDE"), 2) + assert list(p) == [ord("A"), ord("B"), ord("c"), 0, 0] + raises((TypeError, BufferError), ffi.memmove, b"abcde", p, 3) + ba = bytearray(b"xxxxx") + ffi.memmove(dest=ba, src=p, n=3) + assert ba == bytearray(b"ABcxx") + def test_ffi_types(self): import _cffi_backend as _cffi1_backend CData = _cffi1_backend.FFI.CData diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -118,7 +118,7 @@ if buffering < 0: buffering = DEFAULT_BUFFER_SIZE - if space.config.translation.type_system == 'lltype' and 'st_blksize' in STAT_FIELD_TYPES: + if 'st_blksize' in STAT_FIELD_TYPES: fileno = space.c_int_w(space.call_method(w_raw, "fileno")) try: st = os.fstat(fileno) diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -12,6 +12,7 @@ from pypy.module.micronumpy.descriptor import get_dtype_cache, W_Dtype from pypy.module.micronumpy.concrete import ConcreteArray from pypy.module.micronumpy import ufuncs +import pypy.module.micronumpy.constants as NPY from rpython.rlib.rawstorage import RAW_STORAGE_PTR from pypy.interpreter.typedef import TypeDef from pypy.interpreter.baseobjspace import W_Root @@ -203,12 +204,12 @@ return shape, dtype def simple_new(space, nd, dims, typenum, - order='C', owning=False, w_subtype=None): + order=NPY.CORDER, owning=False, w_subtype=None): shape, dtype = get_shape_and_dtype(space, nd, dims, typenum) return W_NDimArray.from_shape(space, shape, dtype) def simple_new_from_data(space, nd, dims, typenum, data, - order='C', owning=False, w_subtype=None): + order=NPY.CORDER, owning=False, w_subtype=None): shape, dtype = get_shape_and_dtype(space, nd, dims, typenum) storage = rffi.cast(RAW_STORAGE_PTR, data) return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, @@ -238,7 +239,7 @@ raise OperationError(space.w_NotImplementedError, space.wrap("strides must be NULL")) - order = 'C' if flags & NPY_C_CONTIGUOUS else 'F' + order = NPY.CORDER if flags & NPY_C_CONTIGUOUS else NPY.FORTRANORDER owning = True if flags & NPY_OWNDATA else False w_subtype = None diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -4,16 +4,17 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.micronumpy.ndarray import W_NDimArray from pypy.module.micronumpy.descriptor import get_dtype_cache +import pypy.module.micronumpy.constants as NPY def scalar(space): dtype = get_dtype_cache(space).w_float64dtype return W_NDimArray.new_scalar(space, dtype, space.wrap(10.)) -def array(space, shape, order='C'): +def array(space, shape, order=NPY.CORDER): dtype = get_dtype_cache(space).w_float64dtype return W_NDimArray.from_shape(space, shape, dtype, order=order) -def iarray(space, shape, order='C'): +def iarray(space, shape, order=NPY.CORDER): dtype = get_dtype_cache(space).w_int64dtype return W_NDimArray.from_shape(space, shape, dtype, order=order) @@ -32,8 +33,8 @@ def test_FLAGS(self, space, api): s = array(space, [10]) - c = array(space, [10, 5, 3], order='C') - f = array(space, [10, 5, 3], order='F') + c = array(space, [10, 5, 3], order=NPY.CORDER) + f = array(space, [10, 5, 3], order=NPY.FORTRANORDER) assert api._PyArray_FLAGS(s) & 0x0001 assert api._PyArray_FLAGS(s) & 0x0002 assert api._PyArray_FLAGS(c) & 0x0001 diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -108,7 +108,8 @@ w_axis = space.wrap(0) if space.is_none(w_axis): args_w = [w_arg.reshape(space, - space.newlist([w_arg.descr_get_size(space)])) + space.newlist([w_arg.descr_get_size(space)]), + w_arg.get_order()) for w_arg in args_w] w_axis = space.wrap(0) dtype = args_w[0].get_dtype() @@ -140,7 +141,7 @@ dtype = find_result_type(space, args_w, []) # concatenate does not handle ndarray subtypes, it always returns a ndarray - res = W_NDimArray.from_shape(space, shape, dtype, 'C') + res = W_NDimArray.from_shape(space, shape, dtype, NPY.CORDER) chunks = [Chunk(0, i, 1, i) for i in shape] axis_start = 0 for arr in args_w: diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -38,7 +38,8 @@ self.implementation = implementation @staticmethod - def from_shape(space, shape, dtype, order='C', w_instance=None, zero=True): + def from_shape(space, shape, dtype, order=NPY.CORDER, + w_instance=None, zero=True): from pypy.module.micronumpy import concrete, descriptor, boxes from pypy.module.micronumpy.strides import calc_strides if len(shape) > NPY.MAXDIMS: @@ -59,8 +60,9 @@ @staticmethod def from_shape_and_storage(space, shape, storage, dtype, storage_bytes=-1, - order='C', owning=False, w_subtype=None, - w_base=None, writable=True, strides=None, start=0): + order=NPY.CORDER, owning=False, w_subtype=None, + w_base=None, writable=True, strides=None, + start=0): from pypy.module.micronumpy import concrete from pypy.module.micronumpy.strides import (calc_strides, calc_backstrides) diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -56,6 +56,9 @@ jit.hint(len(backstrides), promote=True) return backstrides + def get_flags(self): + return self.flags + def getitem(self, index): return self.dtype.read(self, index, 0) @@ -89,17 +92,18 @@ def get_storage_size(self): return self.size - def reshape(self, orig_array, new_shape): + def reshape(self, orig_array, new_shape, order=NPY.ANYORDER): # Since we got to here, prod(new_shape) == self.size + order = support.get_order_as_CF(self.order, order) new_strides = None if self.size == 0: - new_strides, _ = calc_strides(new_shape, self.dtype, self.order) + new_strides, _ = calc_strides(new_shape, self.dtype, order) else: if len(self.get_shape()) == 0: new_strides = [self.dtype.elsize] * len(new_shape) else: new_strides = calc_new_strides(new_shape, self.get_shape(), - self.get_strides(), self.order) + self.get_strides(), order) if new_strides is None or len(new_strides) != len(new_shape): return None if new_strides is not None: @@ -303,10 +307,11 @@ return SliceArray(self.start, strides, backstrides, shape, self, orig_array) - def copy(self, space): + def copy(self, space, order=NPY.ANYORDER): + order = support.get_order_as_CF(self.order, order) strides, backstrides = calc_strides(self.get_shape(), self.dtype, - self.order) - impl = ConcreteArray(self.get_shape(), self.dtype, self.order, strides, + order) + impl = ConcreteArray(self.get_shape(), self.dtype, order, strides, backstrides) return loop.setslice(space, self.get_shape(), impl, self) @@ -360,12 +365,12 @@ # but make the array storage contiguous in memory shape = self.get_shape() strides = self.get_strides() - if order not in ('C', 'F'): - raise oefmt(space.w_ValueError, "Unknown order %s in astype", order) + if order not in (NPY.KEEPORDER, NPY.FORTRANORDER, NPY.CORDER): + raise oefmt(space.w_ValueError, "Unknown order %d in astype", order) if len(strides) == 0: t_strides = [] backstrides = [] - elif order != self.order: + elif order in (NPY.FORTRANORDER, NPY.CORDER): t_strides, backstrides = calc_strides(shape, dtype, order) else: indx_array = range(len(strides)) @@ -378,6 +383,7 @@ t_strides[i] = base base *= shape[i] backstrides = calc_backstrides(t_strides, shape) + order = support.get_order_as_CF(self.order, order) impl = ConcreteArray(shape, dtype, order, t_strides, backstrides) loop.setslice(space, impl.get_shape(), impl, self) return impl @@ -429,6 +435,8 @@ self.shape = shape # already tested for overflow in from_shape_and_storage self.size = support.product(shape) * dtype.elsize + if order not in (NPY.CORDER, NPY.FORTRANORDER): + raise oefmt(dtype.itemtype.space.w_ValueError, "ConcreteArrayNotOwning but order is not 0,1 rather %d", order) self.order = order self.dtype = dtype self.strides = strides @@ -562,6 +570,8 @@ self.parent = parent self.storage = parent.storage self.gcstruct = parent.gcstruct + if parent.order not in (NPY.CORDER, NPY.FORTRANORDER): + raise oefmt(dtype.itemtype.space.w_ValueError, "SliceArray but parent order is not 0,1 rather %d", parent.order) self.order = parent.order self.dtype = dtype try: @@ -602,13 +612,13 @@ s = self.get_strides()[0] // dtype.elsize except IndexError: s = 1 - if self.order == 'C': + if self.order != NPY.FORTRANORDER: new_shape.reverse() for sh in new_shape: strides.append(s * dtype.elsize) backstrides.append(s * (sh - 1) * dtype.elsize) s *= max(1, sh) - if self.order == 'C': + if self.order != NPY.FORTRANORDER: strides.reverse() backstrides.reverse() new_shape.reverse() diff --git a/pypy/module/micronumpy/converters.py b/pypy/module/micronumpy/converters.py --- a/pypy/module/micronumpy/converters.py +++ b/pypy/module/micronumpy/converters.py @@ -77,9 +77,8 @@ elif order.startswith('K') or order.startswith('k'): return NPY.KEEPORDER else: - raise OperationError(space.w_TypeError, space.wrap( - "order not understood")) - + raise oefmt(space.w_TypeError, "Unknown order: '%s'", order) + return -1 def multi_axis_converter(space, w_axis, ndim): if space.is_none(w_axis): diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -5,10 +5,10 @@ from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.micronumpy import descriptor, loop, support -from pypy.module.micronumpy.base import ( +from pypy.module.micronumpy.base import (wrap_impl, W_NDimArray, convert_to_array, W_NumpyObject) -from pypy.module.micronumpy.converters import shape_converter -from . import constants as NPY +from pypy.module.micronumpy.converters import shape_converter, order_converter +import pypy.module.micronumpy.constants as NPY from .casting import scalar2dtype @@ -101,13 +101,8 @@ dtype = descriptor.decode_w_dtype(space, w_dtype) if space.is_none(w_order): - order = 'C' - else: - order = space.str_w(w_order) - if order == 'K': - order = 'C' - if order != 'C': # or order != 'F': - raise oefmt(space.w_ValueError, "Unknown order: %s", order) + w_order = space.wrap('C') + npy_order = order_converter(space, w_order, NPY.CORDER) if isinstance(w_object, W_NDimArray): if (dtype is None or w_object.get_dtype() is dtype): @@ -126,7 +121,7 @@ copy = True if copy: shape = w_object.get_shape() - w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) + w_arr = W_NDimArray.from_shape(space, shape, dtype, order=npy_order) if support.product(shape) == 1: w_arr.set_scalar_value(dtype.coerce(space, w_object.implementation.getitem(0))) @@ -151,7 +146,7 @@ if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): dtype = find_dtype_for_seq(space, elems_w, dtype) - w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) + w_arr = W_NDimArray.from_shape(space, shape, dtype, order=npy_order) if support.product(shape) == 1: # safe from overflow since from_shape checks w_arr.set_scalar_value(dtype.coerce(space, elems_w[0])) else: @@ -268,6 +263,7 @@ def _zeros_or_empty(space, w_shape, w_dtype, w_order, zero): + order = order_converter(space, w_order, NPY.CORDER) dtype = space.interp_w(descriptor.W_Dtype, space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) if dtype.is_str_or_unicode() and dtype.elsize < 1: @@ -281,7 +277,7 @@ support.product_check(shape) except OverflowError: raise oefmt(space.w_ValueError, "array is too big.") - return W_NDimArray.from_shape(space, shape, dtype=dtype, zero=zero) + return W_NDimArray.from_shape(space, shape, dtype, order, zero=zero) def empty(space, w_shape, w_dtype=None, w_order=None): return _zeros_or_empty(space, w_shape, w_dtype, w_order, zero=False) @@ -293,6 +289,7 @@ @unwrap_spec(subok=bool) def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): w_a = convert_to_array(space, w_a) + npy_order = order_converter(space, w_order, w_a.get_order()) if space.is_none(w_dtype): dtype = w_a.get_dtype() else: @@ -300,7 +297,16 @@ space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = descriptor.variable_dtype(space, dtype.char + '1') + if npy_order in (NPY.KEEPORDER, NPY.ANYORDER): + # Try to copy the stride pattern + impl = w_a.implementation.astype(space, dtype, NPY.KEEPORDER) + if subok: + w_type = space.type(w_a) + else: + w_type = None + return wrap_impl(space, w_type, w_a, impl) return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, + order=npy_order, w_instance=w_a if subok else None, zero=False) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -777,7 +777,7 @@ def tostring(space, arr): builder = StringBuilder() iter, state = arr.create_iter() - w_res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype(), order='C') + w_res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype()) itemsize = arr.get_dtype().elsize with w_res_str.implementation as storage: res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -97,11 +97,15 @@ self.fill(space, self.get_dtype().coerce(space, w_value)) def descr_tostring(self, space, w_order=None): - order = order_converter(space, w_order, NPY.CORDER) - if order == NPY.FORTRANORDER: - raise OperationError(space.w_NotImplementedError, space.wrap( - "unsupported value for order")) - return space.wrap(loop.tostring(space, self)) + try: + order = order_converter(space, w_order, NPY.CORDER) + except: + raise oefmt(space.w_TypeError, "order not understood") + order = support.get_order_as_CF(self.get_order(), order) + arr = self + if order != arr.get_order(): + arr = W_NDimArray(self.implementation.transpose(self, None)) + return space.wrap(loop.tostring(space, arr)) def getitem_filter(self, space, arr): if arr.ndims() > 1 and arr.get_shape() != self.get_shape(): @@ -365,11 +369,13 @@ return self.implementation.getitem(self.implementation.start) def descr_copy(self, space, w_order=None): - order = order_converter(space, w_order, NPY.KEEPORDER) - if order == NPY.FORTRANORDER: - raise OperationError(space.w_NotImplementedError, space.wrap( - "unsupported value for order")) - copy = self.implementation.copy(space) + if w_order is None: + order = NPY.KEEPORDER + elif space.isinstance_w(w_order, space.w_int): + order = space.int_w(w_order) + else: + order = order_converter(space, w_order, NPY.KEEPORDER) + copy = self.implementation.copy(space, order) w_subtype = space.type(self) return wrap_impl(space, w_subtype, self, copy) @@ -392,15 +398,15 @@ 'array does not have imaginary part to set') self.implementation.set_imag(space, self, w_value) - def reshape(self, space, w_shape): + def reshape(self, space, w_shape, order): new_shape = get_shape_from_iterable(space, self.get_size(), w_shape) - new_impl = self.implementation.reshape(self, new_shape) + new_impl = self.implementation.reshape(self, new_shape, order) if new_impl is not None: return wrap_impl(space, space.type(self), self, new_impl) # Create copy with contiguous data - arr = self.descr_copy(space) + arr = self.descr_copy(space, space.wrap(order)) if arr.get_size() > 0: - new_implementation = arr.implementation.reshape(self, new_shape) + new_implementation = arr.implementation.reshape(self, new_shape, order) if new_implementation is None: raise oefmt(space.w_ValueError, 'could not reshape array of size %d to shape %s', @@ -434,16 +440,13 @@ if order == NPY.KEEPORDER: raise OperationError(space.w_ValueError, space.wrap( "order 'K' is not permitted for reshaping")) - if order != NPY.CORDER and order != NPY.ANYORDER: - raise OperationError(space.w_NotImplementedError, space.wrap( - "unsupported value for order")) if len(args_w) == 1: if space.is_none(args_w[0]): return self.descr_view(space) w_shape = args_w[0] else: w_shape = space.newtuple(args_w) - return self.reshape(space, w_shape) + return self.reshape(space, w_shape, order) def descr_get_transpose(self, space, axes=None): return W_NDimArray(self.implementation.transpose(self, axes)) @@ -514,20 +517,8 @@ return space.newlist(l_w) def descr_ravel(self, space, w_order=None): - if space.is_none(w_order): - order = 'C' - else: - order = space.str_w(w_order) - if order == 'K' and is_c_contiguous(self.implementation): - for s in self.implementation.get_strides(): - if s < 0: - break - else: - order = 'C' - if order != 'C': - raise OperationError(space.w_NotImplementedError, space.wrap( - "order != 'C' only partially implemented")) - return self.reshape(space, space.wrap(-1)) + order = order_converter(space, w_order, self.get_order()) + return self.reshape(space, space.wrap(-1), order) @unwrap_spec(w_axis=WrappedDefault(None), w_out=WrappedDefault(None), @@ -541,14 +532,15 @@ space.wrap("axis unsupported for compress")) arr = self else: - arr = self.reshape(space, space.wrap(-1)) + arr = self.reshape(space, space.wrap(-1), self.get_order()) index = convert_to_array(space, w_obj) return arr.getitem_filter(space, index) def descr_flatten(self, space, w_order=None): + order = order_converter(space, w_order, self.get_order()) if self.is_scalar(): # scalars have no storage - return self.reshape(space, space.wrap(1)) + return self.reshape(space, space.wrap(1), order) w_res = self.descr_ravel(space, w_order) if w_res.implementation.storage == self.implementation.storage: return w_res.descr_copy(space) @@ -631,7 +623,7 @@ space.newtuple([space.wrap(addr), space.w_False])) space.setitem_str(w_d, 'shape', self.descr_get_shape(space)) space.setitem_str(w_d, 'typestr', self.get_dtype().descr_get_str(space)) - if self.implementation.order == 'C': + if self.implementation.order == NPY.CORDER: # Array is contiguous, no strides in the interface. strides = space.w_None else: @@ -690,8 +682,9 @@ "according to the rule %s", space.str_w(self.get_dtype().descr_repr(space)), space.str_w(new_dtype.descr_repr(space)), casting) - order = support.get_order_as_CF(self.get_order(), order) - if (not copy and new_dtype == self.get_dtype() and order == self.get_order() + order = order_converter(space, space.wrap(order), self.get_order()) + if (not copy and new_dtype == self.get_dtype() + and (order in (NPY.KEEPORDER, NPY.ANYORDER) or order == self.get_order()) and (subok or type(self) is W_NDimArray)): return self impl = self.implementation @@ -972,7 +965,7 @@ raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) # Adapt the smallest dim to the new itemsize - if self.get_order() == 'F': + if self.get_order() == NPY.FORTRANORDER: minstride = strides[0] mini = 0 else: @@ -1136,7 +1129,7 @@ matches = True if dtype != out.get_dtype(): matches = False - elif not out.implementation.order == "C": + elif not out.implementation.order == NPY.CORDER: matches = False elif out.ndims() != len(out_shape): matches = False @@ -1195,7 +1188,7 @@ out = out_converter(space, w_out) if space.is_none(w_axis): w_axis = space.wrap(0) - arr = self.reshape(space, space.wrap(-1)) + arr = self.reshape(space, space.wrap(-1), self.get_order()) else: arr = self ufunc = getattr(ufuncs.get(space), ufunc_name) @@ -1408,10 +1401,6 @@ strides=strides) order = order_converter(space, w_order, NPY.CORDER) - if order == NPY.CORDER: - order = 'C' - else: - order = 'F' if space.is_w(w_subtype, space.gettypefor(W_NDimArray)): return W_NDimArray.from_shape(space, shape, dtype, order) strides, backstrides = calc_strides(shape, dtype.base, order) @@ -1448,7 +1437,7 @@ raise OperationError(space.w_ValueError, space.wrap( "subtype must be a subtype of ndarray, not a class instance")) return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, - buf_len, 'C', False, w_subtype, + buf_len, NPY.CORDER, False, w_subtype, strides=strides) else: return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -11,6 +11,8 @@ shape_agreement, shape_agreement_multiple) from pypy.module.micronumpy.casting import (find_binop_result_dtype, can_cast_array, can_cast_type) +import pypy.module.micronumpy.constants as NPY +from pypy.module.micronumpy.converters import order_converter def parse_op_arg(space, name, w_op_flags, n, parse_one_arg): @@ -142,14 +144,13 @@ 'Iterator flag EXTERNAL_LOOP cannot be used if an index or ' 'multi-index is being tracked') - -def is_backward(imp, order): - if order == 'K' or (order == 'C' and imp.order == 'C'): +def is_backward(imp_order, order): + if imp_order == order: return False - elif order == 'F' and imp.order == 'C': + if order == NPY.KEEPORDER: + return False + else: return True - else: - raise NotImplementedError('not implemented yet') class OperandIter(ArrayIter): @@ -234,7 +235,7 @@ continue assert isinstance(op_it, ArrayIter) indx = len(op_it.strides) - if it.order == 'F': + if it.order == NPY.FORTRANORDER: indx = len(op_it.array.strides) - indx assert indx >=0 astrides = op_it.array.strides[indx:] @@ -250,7 +251,7 @@ it.order) it.iters[i] = (new_iter, new_iter.reset()) if len(it.shape) > 1: - if it.order == 'F': + if it.order == NPY.FORTRANORDER: it.shape = it.shape[1:] else: it.shape = it.shape[:-1] @@ -261,10 +262,10 @@ break # Always coalesce at least one for i in range(len(it.iters)): - new_iter = coalesce_iter(it.iters[i][0], it.op_flags[i], it, 'C') + new_iter = coalesce_iter(it.iters[i][0], it.op_flags[i], it, NPY.CORDER) it.iters[i] = (new_iter, new_iter.reset()) if len(it.shape) > 1: - if it.order == 'F': + if it.order == NPY.FORTRANORDER: it.shape = it.shape[1:] else: it.shape = it.shape[:-1] @@ -287,7 +288,7 @@ return old_iter strides = old_iter.strides backstrides = old_iter.backstrides - if order == 'F': + if order == NPY.FORTRANORDER: new_shape = shape[1:] new_strides = strides[1:] new_backstrides = backstrides[1:] @@ -346,8 +347,8 @@ class W_NDIter(W_NumpyObject): _immutable_fields_ = ['ndim', ] def __init__(self, space, w_seq, w_flags, w_op_flags, w_op_dtypes, - w_casting, w_op_axes, w_itershape, buffersize=0, order='K'): - self.order = order + w_casting, w_op_axes, w_itershape, buffersize=0, + order=NPY.KEEPORDER): self.external_loop = False self.buffered = False self.tracked_index = '' @@ -375,7 +376,25 @@ for w_elem in w_seq_as_list] else: self.seq = [convert_to_array(space, w_seq)] - + if order == NPY.ANYORDER: + # 'A' means "'F' order if all the arrays are Fortran contiguous, + # 'C' order otherwise" + order = NPY.CORDER + for s in self.seq: + if s and not(s.get_flags() & NPY.ARRAY_F_CONTIGUOUS): + break + else: + order = NPY.FORTRANORDER + elif order == NPY.KEEPORDER: + # 'K' means "as close to the order the array elements appear in + # memory as possible", so match self.order to seq.order + order = NPY.CORDER + for s in self.seq: + if s and not(s.get_order() == NPY.FORTRANORDER): + break + else: + order = NPY.FORTRANORDER + self.order = order parse_func_flags(space, self, w_flags) self.op_flags = parse_op_arg(space, 'op_flags', w_op_flags, len(self.seq), parse_op_flag) @@ -439,12 +458,15 @@ str(self.shape)) if self.tracked_index != "": - if self.order == "K": - self.order = self.seq[0].implementation.order + order = self.order + if order == NPY.KEEPORDER: + order = self.seq[0].implementation.order if self.tracked_index == "multi": backward = False else: - backward = self.order != self.tracked_index + backward = (( + order == NPY.CORDER and self.tracked_index != 'C') or ( + order == NPY.FORTRANORDER and self.tracked_index != 'F')) self.index_iter = IndexIterator(self.shape, backward=backward) # handle w_op_dtypes part 2: copy where needed if possible @@ -456,7 +478,6 @@ self.dtypes[i] = seq_d elif self_d != seq_d: impl = self.seq[i].implementation - order = support.get_order_as_CF(impl.order, self.order) if self.buffered or 'r' in self.op_flags[i].tmp_copy: if not can_cast_array( space, self.seq[i], self_d, self.casting): @@ -466,7 +487,7 @@ space.str_w(seq_d.descr_repr(space)), space.str_w(self_d.descr_repr(space)), self.casting) - + order = support.get_order_as_CF(impl.order, self.order) new_impl = impl.astype(space, self_d, order).copy(space) self.seq[i] = W_NDimArray(new_impl) else: @@ -484,7 +505,7 @@ space.str_w(self_d.descr_repr(space)), space.str_w(seq_d.descr_repr(space)), i, self.casting) - elif self.buffered: + elif self.buffered and not (self.external_loop and len(self.seq)<2): for i in range(len(self.seq)): if i not in outargs: self.seq[i] = self.seq[i].descr_copy(space, @@ -506,12 +527,19 @@ def get_iter(self, space, i): arr = self.seq[i] - dtype = self.dtypes[i] - shape = self.shape imp = arr.implementation - backward = is_backward(imp, self.order) if arr.is_scalar(): return ConcreteIter(imp, 1, [], [], [], self.op_flags[i], self) + shape = self.shape + if (self.external_loop and len(self.seq)<2 and self.buffered): + # Special case, always return a memory-ordered iterator + stride = imp.dtype.elsize + backstride = imp.size * stride - stride + return ConcreteIter(imp, imp.get_size(), + [support.product(shape)], [stride], [backstride], + self.op_flags[i], self) + backward = imp.order != self.order + # XXX cleanup needed if (abs(imp.strides[0]) < abs(imp.strides[-1]) and not backward) or \ (abs(imp.strides[0]) > abs(imp.strides[-1]) and backward): # flip the strides. Is this always true for multidimension? @@ -704,13 +732,15 @@ @unwrap_spec(w_flags=WrappedDefault(None), w_op_flags=WrappedDefault(None), - w_op_dtypes=WrappedDefault(None), order=str, + w_op_dtypes=WrappedDefault(None), w_order=WrappedDefault(None), w_casting=WrappedDefault(None), w_op_axes=WrappedDefault(None), - w_itershape=WrappedDefault(None), buffersize=int) + w_itershape=WrappedDefault(None), w_buffersize=WrappedDefault(0)) def descr_new_nditer(space, w_subtype, w_seq, w_flags, w_op_flags, w_op_dtypes, - w_casting, w_op_axes, w_itershape, buffersize=0, order='K'): + w_casting, w_op_axes, w_itershape, w_buffersize, w_order): + npy_order = order_converter(space, w_order, NPY.KEEPORDER) + buffersize = space.int_w(w_buffersize) return W_NDIter(space, w_seq, w_flags, w_op_flags, w_op_dtypes, w_casting, w_op_axes, - w_itershape, buffersize, order) + w_itershape, buffersize, npy_order) W_NDIter.typedef = TypeDef('numpy.nditer', __new__ = interp2app(descr_new_nditer), diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -311,14 +311,14 @@ backstrides = [] s = 1 shape_rev = shape[:] - if order == 'C': + if order in [NPY.CORDER, NPY.ANYORDER]: shape_rev.reverse() for sh in shape_rev: slimit = max(sh, 1) strides.append(s * dtype.elsize) backstrides.append(s * (slimit - 1) * dtype.elsize) s *= slimit - if order == 'C': + if order in [NPY.CORDER, NPY.ANYORDER]: strides.reverse() backstrides.reverse() return strides, backstrides @@ -346,7 +346,7 @@ last_step = 1 oldI = 0 new_strides = [] - if order == 'F': + if order == NPY.FORTRANORDER: for i in range(len(old_shape)): steps.append(old_strides[i] / last_step) last_step *= old_shape[i] @@ -366,7 +366,7 @@ if oldI < len(old_shape): cur_step = steps[oldI] n_old_elems_to_use *= old_shape[oldI] - elif order == 'C': + else: for i in range(len(old_shape) - 1, -1, -1): steps.insert(0, old_strides[i] / last_step) last_step *= old_shape[i] diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -7,6 +7,7 @@ from pypy.interpreter.typedef import GetSetProperty from pypy.objspace.std.typeobject import W_TypeObject from pypy.objspace.std.objspace import StdObjSpace +from pypy.module.micronumpy import constants as NPY def issequence_w(space, w_obj): from pypy.module.micronumpy.base import W_NDimArray @@ -176,15 +177,11 @@ return space.is_true(space.gt(w_priority_r, w_priority_l)) def get_order_as_CF(proto_order, req_order): - if req_order == 'C': - return 'C' - elif req_order == 'F': - return 'F' - elif req_order == 'K': - return proto_order - elif req_order == 'A': - return proto_order - + if req_order == NPY.CORDER: + return NPY.CORDER + elif req_order == NPY.FORTRANORDER: + return NPY.FORTRANORDER + return proto_order def descr_set_docstring(space, w_obj, w_docstring): if not isinstance(space, StdObjSpace): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -6,6 +6,7 @@ from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.strides import Chunk, new_view, EllipsisChunk from pypy.module.micronumpy.ndarray import W_NDimArray +import pypy.module.micronumpy.constants as NPY from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest @@ -45,20 +46,20 @@ return self.space.newtuple(args_w) def test_strides_f(self): - a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + a = create_array(self.space, [10, 5, 3], MockDtype(), order=NPY.FORTRANORDER) assert a.strides == [1, 10, 50] assert a.backstrides == [9, 40, 100] def test_strides_c(self): - a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + a = create_array(self.space, [10, 5, 3], MockDtype(), order=NPY.CORDER) assert a.strides == [15, 3, 1] assert a.backstrides == [135, 12, 2] - a = create_array(self.space, [1, 0, 7], MockDtype(), order='C') + a = create_array(self.space, [1, 0, 7], MockDtype(), order=NPY.CORDER) assert a.strides == [7, 7, 1] assert a.backstrides == [0, 0, 6] def test_create_slice_f(self): - a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + a = create_array(self.space, [10, 5, 3], MockDtype(), order=NPY.FORTRANORDER) s = create_slice(self.space, a, [Chunk(3, 0, 0, 1)]) assert s.start == 3 assert s.strides == [10, 50] @@ -77,7 +78,7 @@ assert s.shape == [10, 3] def test_create_slice_c(self): - a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + a = create_array(self.space, [10, 5, 3], MockDtype(), order=NPY.CORDER) s = create_slice(self.space, a, [Chunk(3, 0, 0, 1)]) assert s.start == 45 assert s.strides == [3, 1] @@ -97,7 +98,7 @@ assert s.shape == [10, 3] def test_slice_of_slice_f(self): - a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + a = create_array(self.space, [10, 5, 3], MockDtype(), order=NPY.FORTRANORDER) s = create_slice(self.space, a, [Chunk(5, 0, 0, 1)]) assert s.start == 5 s2 = create_slice(self.space, s, [Chunk(3, 0, 0, 1)]) @@ -114,7 +115,7 @@ assert s2.start == 1 * 15 + 2 * 3 def test_slice_of_slice_c(self): - a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + a = create_array(self.space, [10, 5, 3], MockDtype(), order=NPY.CORDER) s = create_slice(self.space, a, [Chunk(5, 0, 0, 1)]) assert s.start == 15 * 5 s2 = create_slice(self.space, s, [Chunk(3, 0, 0, 1)]) @@ -131,14 +132,14 @@ assert s2.start == 1 * 15 + 2 * 3 def test_negative_step_f(self): - a = create_array(self.space, [10, 5, 3], MockDtype(), order='F') + a = create_array(self.space, [10, 5, 3], MockDtype(), order=NPY.FORTRANORDER) s = create_slice(self.space, a, [Chunk(9, -1, -2, 5)]) assert s.start == 9 assert s.strides == [-2, 10, 50] assert s.backstrides == [-8, 40, 100] def test_negative_step_c(self): - a = create_array(self.space, [10, 5, 3], MockDtype(), order='C') + a = create_array(self.space, [10, 5, 3], MockDtype(), order=NPY.CORDER) s = create_slice(self.space, a, [Chunk(9, -1, -2, 5)]) assert s.start == 135 assert s.strides == [-30, 3, 1] @@ -155,17 +156,17 @@ def test_calc_new_strides(self): from pypy.module.micronumpy.strides import calc_new_strides - assert calc_new_strides([2, 4], [4, 2], [4, 2], "C") == [8, 2] - assert calc_new_strides([2, 4, 3], [8, 3], [1, 16], 'F') == [1, 2, 16] - assert calc_new_strides([2, 3, 4], [8, 3], [1, 16], 'F') is None - assert calc_new_strides([24], [2, 4, 3], [48, 6, 1], 'C') is None - assert calc_new_strides([24], [2, 4, 3], [24, 6, 2], 'C') == [2] - assert calc_new_strides([105, 1], [3, 5, 7], [35, 7, 1],'C') == [1, 1] - assert calc_new_strides([1, 105], [3, 5, 7], [35, 7, 1],'C') == [105, 1] - assert calc_new_strides([1, 105], [3, 5, 7], [35, 7, 1],'F') is None - assert calc_new_strides([1, 1, 1, 105, 1], [15, 7], [7, 1],'C') == \ + assert calc_new_strides([2, 4], [4, 2], [4, 2], NPY.CORDER) == [8, 2] + assert calc_new_strides([2, 4, 3], [8, 3], [1, 16], NPY.FORTRANORDER) == [1, 2, 16] + assert calc_new_strides([2, 3, 4], [8, 3], [1, 16], NPY.FORTRANORDER) is None + assert calc_new_strides([24], [2, 4, 3], [48, 6, 1], NPY.CORDER) is None + assert calc_new_strides([24], [2, 4, 3], [24, 6, 2], NPY.CORDER) == [2] + assert calc_new_strides([105, 1], [3, 5, 7], [35, 7, 1],NPY.CORDER) == [1, 1] + assert calc_new_strides([1, 105], [3, 5, 7], [35, 7, 1],NPY.CORDER) == [105, 1] + assert calc_new_strides([1, 105], [3, 5, 7], [35, 7, 1],NPY.FORTRANORDER) is None + assert calc_new_strides([1, 1, 1, 105, 1], [15, 7], [7, 1],NPY.CORDER) == \ [105, 105, 105, 1, 1] - assert calc_new_strides([1, 1, 105, 1, 1], [7, 15], [1, 7],'F') == \ + assert calc_new_strides([1, 1, 105, 1, 1], [7, 15], [1, 7],NPY.FORTRANORDER) == \ [1, 1, 1, 105, 105] def test_find_shape(self): @@ -444,6 +445,8 @@ b = np.empty_like(A((2, 3)), subok=False) assert b.shape == (2, 3) assert type(b) is np.ndarray + b = np.empty_like(np.array(3.0), order='A') + assert type(b) is np.ndarray def test_size(self): from numpy import array,arange,cos @@ -534,10 +537,10 @@ assert (b == a).all() b = a.copy(order='A') assert (b == a).all() - import sys - if '__pypy__' in sys.builtin_module_names: - raises(NotImplementedError, a.copy, order='F') - raises(NotImplementedError, a.copy, order=True) + b = a.copy(order='F') + assert (b == a).all() + b = a.copy(order=True) + assert (b == a).all() def test_iterator_init(self): from numpy import array @@ -918,9 +921,11 @@ assert a.reshape((0,), order='A').shape == (0,) raises(TypeError, a.reshape, (0,), badarg="C") raises(ValueError, a.reshape, (0,), order="K") - import sys - if '__pypy__' in sys.builtin_module_names: - raises(NotImplementedError, a.reshape, (0,), order='F') + b = a.reshape((0,), order='F') + assert b.shape == (0,) + a = array(range(24), 'uint8') + assert a.reshape([2, 3, 4], order=True).strides ==(1, 2, 6) + assert a.reshape([2, 3, 4], order=False).strides ==(12, 4, 1) def test_slice_reshape(self): from numpy import zeros, arange @@ -2676,11 +2681,11 @@ assert a[1][2][1] == 15 def test_create_order(self): - import sys, numpy as np + import numpy as np for order in [False, True, 'C', 'F']: a = np.empty((2, 3), float, order=order) assert a.shape == (2, 3) - if order in [True, 'F'] and '__pypy__' not in sys.builtin_module_names: + if order in [True, 'F']: assert a.flags['F'] assert not a.flags['C'] else: @@ -3577,10 +3582,7 @@ assert a.tostring(order) == '\x01\x02\x03\x04' import sys for order in (True, 'F'): - if '__pypy__' in sys.builtin_module_names: - raises(NotImplementedError, a.tostring, order) - else: - assert a.tostring(order) == '\x01\x03\x02\x04' + assert a.tostring(order) == '\x01\x03\x02\x04' assert array(2.2-1.1j, dtype='>c16').tostring() == \ '@\x01\x99\x99\x99\x99\x99\x9a\xbf\xf1\x99\x99\x99\x99\x99\x9a' assert array(2.2-1.1j, dtype=' Author: Richard Plangger Branch: vecopt-merge Changeset: r80122:4bbebd20bbe3 Date: 2015-10-12 11:19 +0200 http://bitbucket.org/pypy/pypy/changeset/4bbebd20bbe3/ Log: removed parameter diff --git a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py --- a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py @@ -60,7 +60,7 @@ return True def ensure_operations(self, opstrlist, trace, inthatorder=True): - oparse = OpParser('', self.cpu, self.namespace, 'lltype', None, + oparse = OpParser('', self.cpu, self.namespace, None, None, True, None) oplist = [] for op_str in opstrlist: From noreply at buildbot.pypy.org Mon Oct 12 11:19:23 2015 From: noreply at buildbot.pypy.org (fijal) Date: Mon, 12 Oct 2015 11:19:23 +0200 (CEST) Subject: [pypy-commit] pypy default: mac support for vmprof Message-ID: <20151012091923.DFCAB1C146A@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r80123:4444053b6b16 Date: 2015-10-12 11:19 +0200 http://bitbucket.org/pypy/pypy/changeset/4444053b6b16/ Log: mac support for vmprof diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -92,12 +92,13 @@ PLT = "" size_decl = "" type_decl = "" + extra_align = "" else: PLT = "@PLT" type_decl = "\t.type\t%s, @function" % (tramp_name,) size_decl = "\t.size\t%s, .-%s" % ( tramp_name, tramp_name) - + extra_align = "\t.cfi_def_cfa_offset 8" assert detect_cpu.autodetect().startswith(detect_cpu.MODEL_X86_64), ( "rvmprof only supports x86-64 CPUs for now") @@ -124,15 +125,17 @@ # that don't start with \t are silently ignored (: WAT!?) target.write("""\ \t.text +\t.section\t__TEXT,__text,regular,pure_instructions \t.globl\t%(tramp_name)s +\t.align\t4, 0x90 %(type_decl)s %(tramp_name)s: \t.cfi_startproc \tpushq\t%(reg)s \t.cfi_def_cfa_offset 16 \tcall %(cont_name)s%(PLT)s +%(extra_align)s \taddq\t$8, %%rsp -\t.cfi_def_cfa_offset 8 \tret \t.cfi_endproc %(size_decl)s diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -31,7 +31,11 @@ #include #include #include "vmprof_getpc.h" +#ifdef __APPLE__ +#include "libunwind.h" +#else #include "vmprof_unwind.h" +#endif #include "vmprof_mt.h" @@ -39,10 +43,12 @@ // functions copied from libunwind using dlopen +#ifndef __APPLE__ // should be linux only probably static int (*unw_get_reg)(unw_cursor_t*, int, unw_word_t*) = NULL; static int (*unw_step)(unw_cursor_t*) = NULL; static int (*unw_init_local)(unw_cursor_t *, unw_context_t *) = NULL; static int (*unw_get_proc_info)(unw_cursor_t *, unw_proc_info_t *) = NULL; +#endif static int profile_file = -1; static long prepare_interval_usec; @@ -67,6 +73,7 @@ return "bad value for 'interval'"; prepare_interval_usec = (int)(interval * 1000000.0); +#ifndef __APPLE__ if (!unw_get_reg) { void *libhandle; @@ -81,6 +88,7 @@ if (!(unw_step = dlsym(libhandle, UNW_PREFIX "_step"))) goto error; } +#endif if (prepare_concurrent_bufs() < 0) return "out of memory"; @@ -206,7 +214,12 @@ void *ip; int n = 0; unw_cursor_t cursor; +#ifdef __APPLE__ + unw_context_t uc; + unw_getcontext(&uc); +#else unw_context_t uc = *ucontext; +#endif int ret = unw_init_local(&cursor, &uc); assert(ret >= 0); diff --git a/rpython/rlib/rvmprof/src/vmprof_unwind.h b/rpython/rlib/rvmprof/src/vmprof_unwind.h --- a/rpython/rlib/rvmprof/src/vmprof_unwind.h +++ b/rpython/rlib/rvmprof/src/vmprof_unwind.h @@ -64,8 +64,7 @@ typedef struct unw_cursor { unw_word_t opaque[UNW_TDEP_CURSOR_LEN]; - } -unw_cursor_t; + } unw_cursor_t; #define UNW_REG_IP UNW_X86_64_RIP #define UNW_REG_SP UNW_X86_64_RSP @@ -84,7 +83,7 @@ int format; /* unwind-info format (arch-specific) */ int unwind_info_size; /* size of the information (if applicable) */ void *unwind_info; /* unwind-info (arch-specific) */ - } -unw_proc_info_t; + } unw_proc_info_t; // end of copy + diff --git a/rpython/rlib/rvmprof/test/test_rvmprof.py b/rpython/rlib/rvmprof/test/test_rvmprof.py --- a/rpython/rlib/rvmprof/test/test_rvmprof.py +++ b/rpython/rlib/rvmprof/test/test_rvmprof.py @@ -2,6 +2,7 @@ from rpython.tool.udir import udir from rpython.rlib import rvmprof from rpython.translator.c.test.test_genc import compile +from rpython.rlib.objectmodel import we_are_translated def test_vmprof_execute_code_1(): @@ -96,7 +97,12 @@ @rvmprof.vmprof_execute_code("xcode1", lambda code, num: code) def main(code, num): print num - return 42 + s = 0 + for i in range(num): + s += (i << 1) + if s % 32423423423 == 0: + print s + return s tmpfilename = str(udir.join('test_rvmprof')) @@ -104,16 +110,37 @@ code = MyCode() rvmprof.register_code(code, get_name) fd = os.open(tmpfilename, os.O_WRONLY | os.O_CREAT, 0666) - rvmprof.enable(fd, 0.5) - res = main(code, 5) - assert res == 42 + if we_are_translated(): + num = 100000000 + period = 0.0001 + else: + num = 10000 + period = 0.9 + rvmprof.enable(fd, period) + res = main(code, num) + #assert res == 499999500000 rvmprof.disable() os.close(fd) return 0 + def check_profile(filename): + from vmprof import read_profile + + prof = read_profile(filename) + assert prof.get_tree().name.startswith("py:") + assert prof.get_tree().count + assert f() == 0 assert os.path.exists(tmpfilename) fn = compile(f, [], gcpolicy="minimark") - os.unlink(tmpfilename) assert fn() == 0 - assert os.path.exists(tmpfilename) + try: + import vmprof + except ImportError: + py.test.skip("vmprof unimportable") + else: + check_profile(tmpfilename) + finally: + assert os.path.exists(tmpfilename) + os.unlink(tmpfilename) + \ No newline at end of file From noreply at buildbot.pypy.org Mon Oct 12 11:28:31 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Oct 2015 11:28:31 +0200 (CEST) Subject: [pypy-commit] pypy cffi-handle-lifetime: Simplify again the code by moving the special logic into its own function Message-ID: <20151012092831.DEF571C146A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-handle-lifetime Changeset: r80124:75e8da1995d8 Date: 2015-10-12 11:28 +0200 http://bitbucket.org/pypy/pypy/changeset/75e8da1995d8/ Log: Simplify again the code by moving the special logic into its own function diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py --- a/rpython/memory/gc/base.py +++ b/rpython/memory/gc/base.py @@ -172,6 +172,9 @@ def can_move(self, addr): return False + def malloc_fixedsize_nonmovable(self, typeid): + raise MemoryError + def pin(self, addr): return False diff --git a/rpython/memory/gc/generation.py b/rpython/memory/gc/generation.py --- a/rpython/memory/gc/generation.py +++ b/rpython/memory/gc/generation.py @@ -170,10 +170,7 @@ def malloc_fixedsize_clear(self, typeid, size, has_finalizer=False, is_finalizer_light=False, - contains_weakptr=False, - nonmovable=False): - if nonmovable: - raise MemoryError + contains_weakptr=False): if (has_finalizer or (raw_malloc_usage(size) > self.lb_young_fixedsize and raw_malloc_usage(size) > self.largest_young_fixedsize)): diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -587,8 +587,7 @@ def malloc_fixedsize(self, typeid, size, needs_finalizer=False, is_finalizer_light=False, - contains_weakptr=False, - nonmovable=False): + contains_weakptr=False): size_gc_header = self.gcheaderbuilder.size_gc_header totalsize = size_gc_header + size rawtotalsize = raw_malloc_usage(totalsize) @@ -604,7 +603,7 @@ # If totalsize is greater than nonlarge_max (which should never be # the case in practice), ask for a rawmalloc. The following check # should be constant-folded. - elif rawtotalsize > self.nonlarge_max or nonmovable: + elif rawtotalsize > self.nonlarge_max: ll_assert(not contains_weakptr, "'contains_weakptr' specified for a large object") obj = self.external_malloc(typeid, 0, alloc_young=True) @@ -693,6 +692,11 @@ return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + def malloc_fixedsize_nonmovable(self, typeid): + obj = self.external_malloc(typeid, 0, alloc_young=True) + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + + def collect(self, gen=2): """Do a minor (gen=0), start a major (gen=1), or do a full major (gen>=2) collection.""" diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -509,8 +509,7 @@ def malloc_fixedsize_clear(self, typeid, size, needs_finalizer=False, is_finalizer_light=False, - contains_weakptr=False, - nonmovable=False): + contains_weakptr=False): size_gc_header = self.gcheaderbuilder.size_gc_header totalsize = size_gc_header + size rawtotalsize = raw_malloc_usage(totalsize) @@ -526,7 +525,7 @@ # If totalsize is greater than nonlarge_max (which should never be # the case in practice), ask for a rawmalloc. The following check # should be constant-folded. - elif rawtotalsize > self.nonlarge_max or nonmovable: + elif rawtotalsize > self.nonlarge_max: ll_assert(not contains_weakptr, "'contains_weakptr' specified for a large object") obj = self.external_malloc(typeid, 0, alloc_young=True) @@ -615,6 +614,11 @@ return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + def malloc_fixedsize_nonmovable(self, typeid): + obj = self.external_malloc(typeid, 0, alloc_young=True) + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + + def collect(self, gen=1): """Do a minor (gen=0) or major (gen>0) collection.""" self.minor_collection() diff --git a/rpython/memory/gc/semispace.py b/rpython/memory/gc/semispace.py --- a/rpython/memory/gc/semispace.py +++ b/rpython/memory/gc/semispace.py @@ -98,10 +98,7 @@ def malloc_fixedsize_clear(self, typeid16, size, has_finalizer=False, is_finalizer_light=False, - contains_weakptr=False, - nonmovable=False): - if nonmovable: - raise MemoryError + contains_weakptr=False): size_gc_header = self.gcheaderbuilder.size_gc_header totalsize = size_gc_header + size result = self.free diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -305,7 +305,6 @@ annmodel.SomeInteger(nonneg=True), annmodel.SomeBool(), annmodel.SomeBool(), - annmodel.SomeBool(), annmodel.SomeBool()], s_gcref, inline = False) self.malloc_varsize_ptr = getfn( @@ -321,7 +320,6 @@ annmodel.SomeInteger(nonneg=True), annmodel.SomeBool(), annmodel.SomeBool(), - annmodel.SomeBool(), annmodel.SomeBool()], s_gcref, inline = False) self.malloc_varsize_ptr = getfn( @@ -365,7 +363,7 @@ raise NotImplementedError("GC needs write barrier, but does not provide writebarrier_before_copy functionality") # in some GCs we can inline the common case of - # malloc_fixedsize(typeid, size, False, False, False, False) + # malloc_fixedsize(typeid, size, False, False, False) if getattr(GCClass, 'inline_simple_malloc', False): # make a copy of this function so that it gets annotated # independently and the constants are folded inside @@ -384,7 +382,7 @@ malloc_fast, [s_gc, s_typeid16, annmodel.SomeInteger(nonneg=True), - s_False, s_False, s_False, s_False], s_gcref, + s_False, s_False, s_False], s_gcref, inline = True) else: self.malloc_fast_ptr = None @@ -533,6 +531,9 @@ getfn(func, [SomeAddress()], annmodel.s_None) + self.malloc_nonmovable_ptr = getfn(GCClass.malloc_fixedsize_nonmovable, + [s_gc, s_typeid16], + s_gcref) def create_custom_trace_funcs(self, gc, rtyper): custom_trace_funcs = tuple(rtyper.custom_trace_funcs) @@ -759,21 +760,22 @@ c_has_light_finalizer = rmodel.inputconst(lltype.Bool, has_light_finalizer) - if not op.opname.endswith('_varsize') and not flags.get('varsize'): + if flags.get('nonmovable'): + assert op.opname == 'malloc' + assert not flags.get('varsize') + malloc_ptr = self.malloc_nonmovable_ptr + args = [self.c_const_gc, c_type_id] + elif not op.opname.endswith('_varsize') and not flags.get('varsize'): zero = flags.get('zero', False) - c_nonmovable = rmodel.inputconst(lltype.Bool, - flags.get('nonmovable', False)) if (self.malloc_fast_ptr is not None and not c_has_finalizer.value and - not c_nonmovable.value and (self.malloc_fast_is_clearing or not zero)): malloc_ptr = self.malloc_fast_ptr else: malloc_ptr = self.malloc_fixedsize_ptr args = [self.c_const_gc, c_type_id, c_size, c_has_finalizer, c_has_light_finalizer, - rmodel.inputconst(lltype.Bool, False), - c_nonmovable] + rmodel.inputconst(lltype.Bool, False)] else: assert not c_has_finalizer.value info_varsize = self.layoutbuilder.get_info_varsize(type_id) @@ -914,12 +916,11 @@ [v_typeid, v_size, v_has_finalizer, v_has_light_finalizer, v_contains_weakptr] = op.args livevars = self.push_roots(hop) - c_nonmovable = rmodel.inputconst(lltype.Bool, False) hop.genop("direct_call", [self.malloc_fixedsize_ptr, self.c_const_gc, v_typeid, v_size, v_has_finalizer, v_has_light_finalizer, - v_contains_weakptr, c_nonmovable], + v_contains_weakptr], resultvar=op.result) self.pop_roots(hop, livevars) @@ -1025,9 +1026,8 @@ malloc_ptr = self.malloc_fixedsize_ptr c_false = rmodel.inputconst(lltype.Bool, False) c_has_weakptr = rmodel.inputconst(lltype.Bool, True) - c_nonmovable = rmodel.inputconst(lltype.Bool, False) args = [self.c_const_gc, c_type_id, c_size, - c_false, c_false, c_has_weakptr, c_nonmovable] + c_false, c_false, c_has_weakptr] # push and pop the current live variables *including* the argument # to the weakref_create operation, which must be kept alive and From noreply at buildbot.pypy.org Mon Oct 12 11:31:10 2015 From: noreply at buildbot.pypy.org (jerith) Date: Mon, 12 Oct 2015 11:31:10 +0200 (CEST) Subject: [pypy-commit] pypy unrecursive-opt: All the rest of the unrecursion. Message-ID: <20151012093110.766AB1C146A@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: unrecursive-opt Changeset: r80125:9945934d8515 Date: 2015-10-12 11:03 +0200 http://bitbucket.org/pypy/pypy/changeset/9945934d8515/ Log: All the rest of the unrecursion. diff --git a/rpython/jit/metainterp/optimizeopt/earlyforce.py b/rpython/jit/metainterp/optimizeopt/earlyforce.py --- a/rpython/jit/metainterp/optimizeopt/earlyforce.py +++ b/rpython/jit/metainterp/optimizeopt/earlyforce.py @@ -26,7 +26,10 @@ for arg in op.getarglist(): self.optimizer.force_box(arg, self) - self.emit_operation(op) + return self.emit(op) + + def propagate_postprocess(self, op, oldop): + pass def setup(self): self.optimizer.optearlyforce = self diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -144,7 +144,7 @@ if a is optheap.postponed_op: optheap.emit_postponed_op() break - optheap.next_optimization.propagate_forward(op) + optheap.emit_extra(op, emit=False) if not can_cache: return # Once it is done, we can put at least one piece of information @@ -228,7 +228,7 @@ if self.postponed_op: postponed_op = self.postponed_op self.postponed_op = None - self.next_optimization.propagate_forward(postponed_op) + self.emit_extra(postponed_op, emit=False) def produce_potential_short_preamble_ops(self, sb): descrkeys = self.cached_fields.keys() @@ -288,14 +288,14 @@ cf = submap[index] = ArrayCachedField(index) return cf - def emit_operation(self, op): + def emit(self, op, callback_func=None, *callback_args): self.emitting_operation(op) self.emit_postponed_op() if (op.is_comparison() or op.is_call_may_force() or op.is_ovf()): self.postponed_op = op else: - Optimization.emit_operation(self, op) + return Optimization.emit(self, op, callback_func, *callback_args) def emitting_operation(self, op): if op.has_no_side_effect(): @@ -344,7 +344,7 @@ if oopspecindex == EffectInfo.OS_DICT_LOOKUP: if self._optimize_CALL_DICT_LOOKUP(op): return - self.emit_operation(op) + return self.emit(op) optimize_CALL_F = optimize_CALL_I optimize_CALL_R = optimize_CALL_I optimize_CALL_N = optimize_CALL_I @@ -402,7 +402,7 @@ def optimize_GUARD_NO_EXCEPTION(self, op): if self.last_emitted_operation is REMOVED: return - self.emit_operation(op) + return self.emit(op) optimize_GUARD_EXCEPTION = optimize_GUARD_NO_EXCEPTION @@ -499,15 +499,20 @@ return # default case: produce the operation self.make_nonnull(op.getarg(0)) - self.emit_operation(op) - self.optimize_GETFIELD_GC_I_callback(op, structinfo, cf) + # return self.emit(op) + return self.emit(op) - def optimize_GETFIELD_GC_I_callback(self, op, structinfo, cf): + def postprocess_GETFIELD_GC_I(self, op, oldop): # then remember the result of reading the field + structinfo = self.ensure_ptr_info_arg0(op) + cf = self.field_cache(op.getdescr()) structinfo.setfield(op.getdescr(), op.getarg(0), op, self, cf) optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I + postprocess_GETFIELD_GC_R = postprocess_GETFIELD_GC_I + postprocess_GETFIELD_GC_F = postprocess_GETFIELD_GC_I + def optimize_GETFIELD_GC_PURE_I(self, op): structinfo = self.ensure_ptr_info_arg0(op) cf = self.field_cache(op.getdescr()) @@ -517,7 +522,7 @@ return # default case: produce the operation self.make_nonnull(op.getarg(0)) - self.emit_operation(op) + return self.emit(op) optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_PURE_I optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_PURE_I @@ -554,12 +559,16 @@ self.getintbound(op.getarg(1))) # default case: produce the operation self.make_nonnull(op.getarg(0)) - self.emit_operation(op) - self.optimize_GETARRAYITEM_GC_I_callback(op, cf, arrayinfo, indexb) + # return self.emit(op) + return self.emit(op) - def optimize_GETARRAYITEM_GC_I_callback(self, op, cf, arrayinfo, indexb): + def postprocess_GETARRAYITEM_GC_I(self, op, oldop): # the remember the result of reading the array item - if cf is not None: + arrayinfo = self.ensure_ptr_info_arg0(op) + indexb = self.getintbound(op.getarg(1)) + if indexb.is_constant(): + index = indexb.getint() + cf = self.arrayitem_cache(op.getdescr(), index) arrayinfo.setitem(op.getdescr(), indexb.getint(), self.get_box_replacement(op.getarg(0)), self.get_box_replacement(op), cf, @@ -567,6 +576,9 @@ optimize_GETARRAYITEM_GC_R = optimize_GETARRAYITEM_GC_I optimize_GETARRAYITEM_GC_F = optimize_GETARRAYITEM_GC_I + postprocess_GETARRAYITEM_GC_R = postprocess_GETARRAYITEM_GC_I + postprocess_GETARRAYITEM_GC_F = postprocess_GETARRAYITEM_GC_I + def optimize_GETARRAYITEM_GC_PURE_I(self, op): arrayinfo = self.ensure_ptr_info_arg0(op) indexb = self.getintbound(op.getarg(1)) @@ -585,7 +597,7 @@ self.force_lazy_setarrayitem(op.getdescr(), self.getintbound(op.getarg(1))) # default case: produce the operation self.make_nonnull(op.getarg(0)) - self.emit_operation(op) + return self.emit(op) optimize_GETARRAYITEM_GC_PURE_R = optimize_GETARRAYITEM_GC_PURE_I optimize_GETARRAYITEM_GC_PURE_F = optimize_GETARRAYITEM_GC_PURE_I @@ -609,7 +621,7 @@ # variable index, so make sure the lazy setarrayitems are done self.force_lazy_setarrayitem(op.getdescr(), indexb, can_cache=False) # and then emit the operation - self.emit_operation(op) + return self.emit(op) def optimize_QUASIIMMUT_FIELD(self, op): # Pattern: QUASIIMMUT_FIELD(s, descr=QuasiImmutDescr) @@ -647,9 +659,11 @@ if self._seen_guard_not_invalidated: return self._seen_guard_not_invalidated = True - self.emit_operation(op) + return self.emit(op) dispatch_opt = make_dispatcher_method(OptHeap, 'optimize_', - default=OptHeap.emit_operation) + default=OptHeap.emit) OptHeap.propagate_forward = dispatch_opt +dispatch_postprocess = make_dispatcher_method(OptHeap, 'postprocess_') +OptHeap.propagate_postprocess = dispatch_postprocess diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -141,7 +141,7 @@ return constptr # op.set_forwarded(None) - optforce.emit_operation(op) + optforce.emit_extra(op) newop = optforce.getlastop() op.set_forwarded(newop) newop.set_forwarded(self) @@ -218,7 +218,7 @@ setfieldop = ResOperation(rop.SETFIELD_GC, [op, subbox], descr=flddescr) self._fields[i] = None - optforce.emit_operation(setfieldop) + optforce.emit_extra(setfieldop) def _force_at_the_end_of_preamble(self, op, optforce, rec): if self._fields is None: @@ -406,7 +406,7 @@ itembox = buffer.values[i] setfield_op = ResOperation(rop.RAW_STORE, [op, ConstInt(offset), itembox], descr=descr) - optforce.emit_operation(setfield_op) + optforce.emit_extra(setfield_op) def _visitor_walk_recursive(self, op, visitor, optimizer): itemboxes = [optimizer.get_box_replacement(box) @@ -519,7 +519,7 @@ [op, ConstInt(i), subbox], descr=descr) self._items[i] = None - optforce.emit_operation(setop) + optforce.emit_extra(setop) optforce.pure_from_args(rop.ARRAYLEN_GC, [op], ConstInt(len(self._items))) def setitem(self, descr, index, struct, op, cf=None, optheap=None): @@ -632,7 +632,7 @@ setfieldop = ResOperation(rop.SETINTERIORFIELD_GC, [op, ConstInt(index), subbox], descr=flddescr) - optforce.emit_operation(setfieldop) + optforce.emit_extra(setfieldop) # heapcache does not work for interiorfields # if it does, we would need a fix here i += 1 diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -42,9 +42,6 @@ """Keeps track of the bounds placed on integers by guards and remove redundant guards""" - def opt_default(self, op): - return op - def propagate_forward(self, op): return dispatch_opt(self, op) @@ -63,7 +60,7 @@ dispatch_bounds_ops(self, box) def _optimize_guard_true_false_value(self, op): - return op + return self.emit(op) def _postprocess_guard_true_false_value(self, op, oldop): if op.getarg(0).type == 'i': @@ -86,7 +83,7 @@ else: self.make_constant_int(op, 0) return None - return op + return self.emit(op) def postprocess_INT_OR_or_XOR(self, op, oldop): v1 = self.get_box_replacement(op.getarg(0)) @@ -106,7 +103,7 @@ postprocess_INT_XOR = postprocess_INT_OR_or_XOR def optimize_INT_AND(self, op): - return op + return self.emit(op) def postprocess_INT_AND(self, op, oldop): b1 = self.getintbound(op.getarg(0)) @@ -125,7 +122,7 @@ r.intersect(IntBound(0, next_pow2_m1(lesser))) def optimize_INT_SUB(self, op): - return op + return self.emit(op) def postprocess_INT_SUB(self, op, oldop): b1 = self.getintbound(op.getarg(0)) @@ -138,7 +135,7 @@ arg1 = self.get_box_replacement(op.getarg(0)) arg2 = self.get_box_replacement(op.getarg(1)) if self.is_raw_ptr(arg1) or self.is_raw_ptr(arg2): - return op + return self.emit(op) v1 = self.getintbound(arg1) v2 = self.getintbound(arg2) @@ -172,7 +169,7 @@ arg2 = ConstInt(sum) op = self.replace_op_with(op, rop.INT_ADD, args=[arg1, arg2]) - return op + return self.emit(op) def postprocess_INT_ADD(self, op, oldop): b1 = self.getintbound(op.getarg(0)) @@ -183,7 +180,7 @@ r.intersect(b) def optimize_INT_MUL(self, op): - return op + return self.emit(op) def postprocess_INT_MUL(self, op, oldop): b1 = self.getintbound(op.getarg(0)) @@ -194,7 +191,7 @@ r.intersect(b) def optimize_INT_FLOORDIV(self, op): - return op + return self.emit(op) def postprocess_INT_FLOORDIV(self, op, oldop): b1 = self.getintbound(op.getarg(0)) @@ -215,7 +212,7 @@ arg2 = ConstInt(val-1) op = self.replace_op_with(op, rop.INT_AND, args=[arg1, arg2]) - return op + return self.emit(op) def postprocess_INT_MOD(self, op, oldop): b1 = self.getintbound(op.getarg(0)) @@ -236,7 +233,7 @@ r.make_lt(IntBound(val, val)) def optimize_INT_LSHIFT(self, op): - return op + return self.emit(op) def postprocess_INT_LSHIFT(self, op, oldop): arg0 = self.get_box_replacement(op.getarg(0)) @@ -262,7 +259,7 @@ # constant result (likely 0, for rshifts that kill all bits) self.make_constant_int(op, b.lower) return None - return op + return self.emit(op) def postprocess_INT_RSHIFT(self, op, oldop): b1 = self.getintbound(op.getarg(0)) @@ -295,7 +292,7 @@ self.pure_from_args(rop.INT_SUB, [args[0], result], args[1]) #elif opnum == rop.INT_MUL_OVF: # self.pure(rop.INT_MUL, args[:], result) - return op + return self.emit(op) def optimize_GUARD_OVERFLOW(self, op): # If INT_xxx_OVF was replaced by INT_xxx, *but* we still see @@ -308,7 +305,7 @@ raise InvalidLoop('An INT_xxx_OVF was proven not to overflow but' + 'guarded with GUARD_OVERFLOW') - return op + return self.emit(op) def optimize_INT_ADD_OVF(self, op): b1 = self.getintbound(op.getarg(0)) @@ -319,7 +316,7 @@ # by optimize_GUARD_NO_OVERFLOW; if we see instead an # optimize_GUARD_OVERFLOW, then InvalidLoop. op = self.replace_op_with(op, rop.INT_ADD) - return op + return self.emit(op) def postprocess_INT_ADD_OVF(self, op, oldop): b1 = self.getintbound(op.getarg(0)) @@ -339,7 +336,7 @@ resbound = b0.sub_bound(b1) if resbound.bounded(): op = self.replace_op_with(op, rop.INT_SUB) - return op + return self.emit(op) def postprocess_INT_SUB_OVF(self, op, oldop): arg0 = self.get_box_replacement(op.getarg(0)) @@ -356,7 +353,7 @@ resbound = b1.mul_bound(b2) if resbound.bounded(): op = self.replace_op_with(op, rop.INT_MUL) - return op + return self.emit(op) def postprocess_INT_MUL_OVF(self, op, oldop): b1 = self.getintbound(op.getarg(0)) @@ -375,7 +372,7 @@ elif b1.known_ge(b2) or arg1 is arg2: self.make_constant_int(op, 0) else: - return op + return self.emit(op) def optimize_INT_GT(self, op): arg1 = self.get_box_replacement(op.getarg(0)) @@ -387,7 +384,7 @@ elif b1.known_le(b2) or arg1 is arg2: self.make_constant_int(op, 0) else: - return op + return self.emit(op) def optimize_INT_LE(self, op): arg1 = self.get_box_replacement(op.getarg(0)) @@ -399,7 +396,7 @@ elif b1.known_gt(b2): self.make_constant_int(op, 0) else: - return op + return self.emit(op) def optimize_INT_GE(self, op): arg1 = self.get_box_replacement(op.getarg(0)) @@ -411,7 +408,7 @@ elif b1.known_lt(b2): self.make_constant_int(op, 0) else: - return op + return self.emit(op) def optimize_INT_EQ(self, op): arg0 = self.get_box_replacement(op.getarg(0)) @@ -425,7 +422,7 @@ elif arg0.same_box(arg1): self.make_constant_int(op, 1) else: - return op + return self.emit(op) def optimize_INT_NE(self, op): arg0 = self.get_box_replacement(op.getarg(0)) @@ -439,14 +436,14 @@ elif arg0 is arg1: self.make_constant_int(op, 0) else: - return op + return self.emit(op) def optimize_INT_FORCE_GE_ZERO(self, op): b = self.getintbound(op.getarg(0)) if b.known_ge(IntBound(0, 0)): self.make_equal_to(op, op.getarg(0)) else: - return op + return self.emit(op) def optimize_INT_SIGNEXT(self, op): b = self.getintbound(op.getarg(0)) @@ -457,7 +454,7 @@ if bounds.contains_bound(b): self.make_equal_to(op, op.getarg(0)) else: - return op + return self.emit(op) def postprocess_INT_SIGNEXT(self, op, oldop): numbits = op.getarg(1).getint() * 8 @@ -468,14 +465,14 @@ bres.intersect(bounds) def optimize_ARRAYLEN_GC(self, op): - return op + return self.emit(op) def postprocess_ARRAYLEN_GC(self, op, oldop): array = self.ensure_ptr_info_arg0(op) self.optimizer.setintbound(op, array.getlenbound(None)) def optimize_STRLEN(self, op): - return op + return self.emit(op) def postprocess_STRLEN(self, op, oldop): self.make_nonnull_str(op.getarg(0), vstring.mode_string) @@ -483,7 +480,7 @@ self.optimizer.setintbound(op, array.getlenbound(vstring.mode_string)) def optimize_UNICODELEN(self, op): - return op + return self.emit(op) def postprocess_UNICODELEN(self, op, oldop): self.make_nonnull_str(op.getarg(0), vstring.mode_unicode) @@ -491,7 +488,7 @@ self.optimizer.setintbound(op, array.getlenbound(vstring.mode_unicode)) def optimize_STRGETITEM(self, op): - return op + return self.emit(op) def postprocess_STRGETITEM(self, op, oldop): v1 = self.getintbound(op) @@ -505,7 +502,7 @@ v1.make_lt(IntUpperBound(256)) def optimize_GETFIELD_RAW_I(self, op): - return op + return self.emit(op) def postprocess_GETFIELD_RAW_I(self, op, oldop): descr = op.getdescr() @@ -535,7 +532,7 @@ postprocess_GETINTERIORFIELD_GC_F = postprocess_GETFIELD_RAW_I def optimize_GETARRAYITEM_RAW_I(self, op): - return op + return self.emit(op) def postprocess_GETARRAYITEM_RAW_I(self, op, oldop): descr = op.getdescr() @@ -555,7 +552,7 @@ postprocess_GETARRAYITEM_GC_R = postprocess_GETARRAYITEM_RAW_I def optimize_UNICODEGETITEM(self, op): - return op + return self.emit(op) def postprocess_UNICODEGETITEM(self, op, oldop): b1 = self.getintbound(op) @@ -722,6 +719,6 @@ dispatch_opt = make_dispatcher_method(OptIntBounds, 'optimize_', - default=OptIntBounds.opt_default) + default=OptIntBounds.emit) dispatch_bounds_ops = make_dispatcher_method(OptIntBounds, 'propagate_bounds_') dispatch_postprocess = make_dispatcher_method(OptIntBounds, 'postprocess_') diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -34,6 +34,19 @@ return True +class OptimizationResult(object): + def __init__(self, opt, op, callback_func=None, *callback_args): + self.opt = opt + self.op = op + if callback_func is None: + callback_func = opt.propagate_postprocess + self.callback_func = callback_func + self.callback_args = callback_args + + def callback(self, oldop): + self.callback_func(self.op, oldop, *self.callback_args) + + class Optimization(object): next_optimization = None potential_extra_ops = None @@ -41,15 +54,26 @@ def __init__(self): pass # make rpython happy - def send_extra_operation(self, op): - self.optimizer.send_extra_operation(op) + def send_extra_operation(self, op, opt=None): + self.optimizer.send_extra_operation(op, opt) def propagate_forward(self, op): raise NotImplementedError + def propagate_postprocess(self, op): + raise NotImplementedError + def emit_operation(self, op): + assert False, "This should never be called." + + def emit(self, op, callback_func=None, *callback_args): self.last_emitted_operation = op - self.next_optimization.propagate_forward(op) + return OptimizationResult(self, op, callback_func, *callback_args) + + def emit_extra(self, op, emit=True): + if emit: + self.emit(op) + self.send_extra_operation(op, self.next_optimization) def getintbound(self, op): assert op.type == 'i' @@ -272,7 +296,7 @@ def set_optimizations(self, optimizations): if optimizations: - self.first_optimization = optimizations[3] + self.first_optimization = optimizations[0] for i in range(1, len(optimizations)): optimizations[i - 1].next_optimization = optimizations[i] optimizations[-1].next_optimization = self @@ -284,7 +308,7 @@ optimizations = [] self.first_optimization = self - self.optimizations = optimizations + self.optimizations = optimizations def force_op_from_preamble(self, op): return op @@ -537,21 +561,34 @@ if op.get_forwarded() is not None: op.set_forwarded(None) - def send_extra_operation(self, op): + def send_extra_operation(self, op, opt=None): + if opt is None: + opt = self.first_optimization oldop = op - for optimization in self.optimizations[:3]: - op = optimization.propagate_forward(op) - if op is None: - return - optimization.last_emitted_operation = op - self.first_optimization.propagate_forward(op) - for optimization in reversed(self.optimizations[:3]): - optimization.propagate_postprocess(op, oldop) + opt_results = [] + while opt is not None: + opt_result = opt.propagate_forward(op) + if opt_result is None: + op = None + break + opt_results.append(opt_result) + op = opt_result.op + opt = opt.next_optimization + for opt_result in reversed(opt_results): + opt_result.callback(oldop) def propagate_forward(self, op): dispatch_opt(self, op) - def emit_operation(self, op): + def propagate_postprocess(self, op): + pass + + def emit_extra(self, op): + # no forwarding, because we're at the end of the chain + self.emit(op) + + def emit(self, op, callback_func=None, *callback_args): + # this actually emits the operation instead of forwarding it if op.returns_bool_result(): self.getintbound(op).make_bool() self._emit_operation(op) @@ -725,7 +762,7 @@ return op def optimize_default(self, op): - self.emit_operation(op) + self.emit(op) def constant_fold(self, op): argboxes = [self.get_constant_box(op.getarg(i)) @@ -803,14 +840,14 @@ #def optimize_GUARD_NO_OVERFLOW(self, op): # # otherwise the default optimizer will clear fields, which is unwanted # # in this case - # self.emit_operation(op) + # self.emit(op) # FIXME: Is this still needed? def optimize_DEBUG_MERGE_POINT(self, op): - self.emit_operation(op) + self.emit(op) def optimize_JIT_DEBUG(self, op): - self.emit_operation(op) + self.emit(op) def optimize_STRGETITEM(self, op): indexb = self.getintbound(op.getarg(1)) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -71,7 +71,10 @@ self.extra_call_pure = [] def propagate_forward(self, op): - dispatch_opt(self, op) + return dispatch_opt(self, op) + + def propagate_postprocess(self, op, oldop): + dispatch_postprocess(self, op, oldop) def optimize_default(self, op): canfold = op.is_always_pure() @@ -108,17 +111,17 @@ return # otherwise, the operation remains - self.emit_operation(op) - self.optimize_default_callback(op, save, nextop) + return self.emit(op, self.postprocess_default, save, nextop) - def optimize_default_callback(self, op, save, nextop): + def postprocess_default(self, op, oldop, save, nextop): + # postprocessor for optimize_default, not default postprocessor if op.returns_bool_result(): self.getintbound(op).make_bool() if save: recentops = self.getrecentops(op.getopnum()) recentops.add(op) if nextop: - self.emit_operation(nextop) + self.emit_extra(nextop) def getrecentops(self, opnum): if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: @@ -161,10 +164,9 @@ # replace CALL_PURE with just CALL opnum = OpHelpers.call_for_descr(op.getdescr()) newop = self.optimizer.replace_op_with(op, opnum) - self.emit_operation(newop) - self.optimize_CALL_PURE_I_callback(op) + return self.emit(newop, self.postprocess_call_pure) - def optimize_CALL_PURE_I_callback(self, op): + def postprocess_call_pure(self, op, oldop): self.call_pure_positions.append( len(self.optimizer._newoperations) - 1) @@ -196,7 +198,7 @@ # it was a CALL_PURE that was killed; so we also kill the # following GUARD_NO_EXCEPTION return - self.emit_operation(op) + return self.emit(op) def flush(self): assert self.postponed_op is None @@ -242,3 +244,4 @@ dispatch_opt = make_dispatcher_method(OptPure, 'optimize_', default=OptPure.optimize_default) +dispatch_postprocess = make_dispatcher_method(OptPure, 'postprocess_') diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -24,9 +24,6 @@ self.loop_invariant_results = {} self.loop_invariant_producer = {} - def opt_default(self, op): - return op - def setup(self): self.optimizer.optrewrite = self @@ -103,7 +100,7 @@ self.make_equal_to(op, op.getarg(1)) return - return op + return self.emit(op) def optimize_INT_OR(self, op): b1 = self.getintbound(op.getarg(0)) @@ -113,7 +110,7 @@ elif b2.equal(0): self.make_equal_to(op, op.getarg(0)) else: - return op + return self.emit(op) def optimize_INT_SUB(self, op): arg1 = self.get_box_replacement(op.getarg(0)) @@ -124,18 +121,18 @@ self.make_equal_to(op, arg1) elif b1.equal(0): op = self.replace_op_with(op, rop.INT_NEG, args=[arg2]) - return op + return self.emit(op) elif arg1.same_box(arg2): self.make_constant_int(op, 0) else: - return op + return self.emit(op) def postprocess_INT_SUB(self, op, oldop): self.optimizer.pure_reverse(op) def optimize_INT_ADD(self, op): if self.is_raw_ptr(op.getarg(0)) or self.is_raw_ptr(op.getarg(1)): - return op + return self.emit(op) arg1 = self.get_box_replacement(op.getarg(0)) b1 = self.getintbound(arg1) arg2 = self.get_box_replacement(op.getarg(1)) @@ -147,7 +144,7 @@ elif b2.equal(0): self.make_equal_to(op, arg1) else: - return op + return self.emit(op) def postprocess_INT_ADD(self, op, oldop): self.optimizer.pure_reverse(op) @@ -175,7 +172,7 @@ new_rhs = ConstInt(highest_bit(lh_info.getint())) op = self.replace_op_with(op, rop.INT_LSHIFT, args=[rhs, new_rhs]) break - return op + return self.emit(op) def optimize_UINT_FLOORDIV(self, op): b2 = self.getintbound(op.getarg(1)) @@ -183,7 +180,7 @@ if b2.is_constant() and b2.getint() == 1: self.make_equal_to(op, op.getarg(0)) else: - return op + return self.emit(op) def optimize_INT_LSHIFT(self, op): b1 = self.getintbound(op.getarg(0)) @@ -194,7 +191,7 @@ elif b1.is_constant() and b1.getint() == 0: self.make_constant_int(op, 0) else: - return op + return self.emit(op) def optimize_INT_RSHIFT(self, op): b1 = self.getintbound(op.getarg(0)) @@ -205,7 +202,7 @@ elif b1.is_constant() and b1.getint() == 0: self.make_constant_int(op, 0) else: - return op + return self.emit(op) def optimize_INT_XOR(self, op): b1 = self.getintbound(op.getarg(0)) @@ -216,7 +213,7 @@ elif b2.equal(0): self.make_equal_to(op, op.getarg(0)) else: - return op + return self.emit(op) def optimize_FLOAT_MUL(self, op): arg1 = op.getarg(0) @@ -234,8 +231,8 @@ return elif v1.getfloat() == -1.0: newop = self.replace_op_with(op, rop.FLOAT_NEG, args=[rhs]) - return newop - return op + return self.emit(newop) + return self.emit(op) def postprocess_FLOAT_MUL(self, op, oldop): self.optimizer.pure_reverse(op) @@ -259,10 +256,10 @@ c = ConstFloat(longlong.getfloatstorage(reciprocal)) newop = self.replace_op_with(op, rop.FLOAT_MUL, args=[arg1, c]) - return newop + return self.emit(newop) def optimize_FLOAT_NEG(self, op): - return op + return self.emit(op) def postprocess_FLOAT_NEG(self, op, oldop): self.optimizer.pure_reverse(op) @@ -288,7 +285,7 @@ 'was proven to always fail' % r) return - return op + return self.emit(op) def optimize_GUARD_ISNULL(self, op): info = self.getptrinfo(op.getarg(0)) @@ -299,7 +296,7 @@ r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) raise InvalidLoop('A GUARD_ISNULL (%s) was proven to always ' 'fail' % r) - return op + return self.emit(op) def postprocess_GUARD_ISNULL(self, op, oldop): self.make_constant(op.getarg(0), self.optimizer.cpu.ts.CONST_NULL) @@ -318,7 +315,7 @@ return if info.is_precise(): raise InvalidLoop() - return op + return self.emit(op) def optimize_GUARD_GC_TYPE(self, op): info = self.getptrinfo(op.getarg(0)) @@ -332,7 +329,7 @@ if info.get_descr().get_type_id() != op.getarg(1).getint(): raise InvalidLoop("wrong GC types passed around!") return - return op + return self.emit(op) def _check_subclass(self, vtable1, vtable2): # checks that vtable1 is a subclass of vtable2 @@ -366,7 +363,7 @@ if self._check_subclass(info.get_descr().get_vtable(), op.getarg(1).getint()): return - return op + return self.emit(op) def optimize_GUARD_NONNULL(self, op): opinfo = self.getptrinfo(op.getarg(0)) @@ -377,7 +374,7 @@ r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) raise InvalidLoop('A GUARD_NONNULL (%s) was proven to always ' 'fail' % r) - return op + return self.emit(op) def postprocess_GUARD_NONNULL(self, op, oldop): self.make_nonnull(op.getarg(0)) @@ -486,8 +483,8 @@ # not put in short preambles guard_nonnull and guard_class # on the same box. self.optimizer.replace_guard(op, info) - return op - return op + return self.emit(op) + return self.emit(op) def postprocess_GUARD_CLASS(self, op, oldop): expectedclassbox = op.getarg(1) @@ -525,7 +522,7 @@ # there is no reason to have a separate operation for this newop = self.replace_op_with(op, OpHelpers.call_for_descr(op.getdescr())) - return op + return self.emit(op) def postprocess_CALL_LOOPINVARIANT_I(self, op, oldop): key = make_hashable_int(op.getarg(0).getint()) @@ -549,7 +546,7 @@ return opnum = OpHelpers.call_for_type(op.type) op = op.copy_and_change(opnum, args=op.getarglist()[1:]) - return op + return self.emit(op) def _optimize_nullness(self, op, box, expect_nonnull): info = self.getnullness(box) @@ -558,7 +555,7 @@ elif info == INFO_NULL: self.make_constant_int(op, not expect_nonnull) else: - return op + return self.emit(op) def optimize_INT_IS_TRUE(self, op): if (not self.is_raw_ptr(op.getarg(0)) and @@ -605,7 +602,7 @@ # class is different self.make_constant_int(op, expect_isnot) return - return op + return self.emit(op) def optimize_PTR_EQ(self, op): return self._optimize_oois_ooisnot(op, False, False) @@ -627,7 +624,7 @@ oopspecindex = effectinfo.oopspecindex if oopspecindex == EffectInfo.OS_ARRAYCOPY: return self._optimize_CALL_ARRAYCOPY(op) - return op + return self.emit(op) def _optimize_CALL_ARRAYCOPY(self, op): length = self.get_constant_box(op.getarg(5)) @@ -648,7 +645,7 @@ dest_start = dest_start_box.getint() arraydescr = extrainfo.write_descrs_arrays[0] if arraydescr.is_array_of_structs(): - return op # not supported right now + return self.emit(op) # not supported right now # XXX fish fish fish for index in range(length.getint()): @@ -676,7 +673,7 @@ descr=arraydescr) self.optimizer.send_extra_operation(newop) return None - return op + return self.emit(op) def optimize_CALL_PURE_I(self, op): # this removes a CALL_PURE with all constant arguments. @@ -686,7 +683,7 @@ self.make_constant(op, result) self.last_emitted_operation = REMOVED return - return op + return self.emit(op) optimize_CALL_PURE_R = optimize_CALL_PURE_I optimize_CALL_PURE_F = optimize_CALL_PURE_I @@ -697,7 +694,7 @@ # it was a CALL_PURE or a CALL_LOOPINVARIANT that was killed; # so we also kill the following GUARD_NO_EXCEPTION return - return op + return self.emit(op) def optimize_GUARD_FUTURE_CONDITION(self, op): self.optimizer.notice_guard_future_condition(op) @@ -719,15 +716,15 @@ if val & (val - 1) == 0 and val > 0: # val == 2**shift op = self.replace_op_with(op, rop.INT_RSHIFT, args = [op.getarg(0), ConstInt(highest_bit(val))]) - return op + return self.emit(op) def optimize_CAST_PTR_TO_INT(self, op): self.optimizer.pure_reverse(op) - return op + return self.emit(op) def optimize_CAST_INT_TO_PTR(self, op): self.optimizer.pure_reverse(op) - return op + return self.emit(op) def optimize_SAME_AS_I(self, op): self.make_equal_to(op, op.getarg(0)) @@ -735,6 +732,6 @@ optimize_SAME_AS_F = optimize_SAME_AS_I dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_', - default=OptRewrite.opt_default) + default=OptRewrite.emit) optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD') dispatch_postprocess = make_dispatcher_method(OptRewrite, 'postprocess_') diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -15,9 +15,6 @@ _last_guard_not_forced_2 = None _finish_guard_op = None - def opt_default(self, op): - return op - def make_virtual(self, known_class, source_op, descr): opinfo = info.InstancePtrInfo(descr, known_class, is_virtual=True) opinfo.init_fields(descr, 0) @@ -59,19 +56,19 @@ def optimize_GUARD_NO_EXCEPTION(self, op): if self.last_emitted_operation is REMOVED: return - return op + return self.emit(op) def optimize_GUARD_NOT_FORCED(self, op): if self.last_emitted_operation is REMOVED: return - return op + return self.emit(op) def optimize_GUARD_NOT_FORCED_2(self, op): self._last_guard_not_forced_2 = op def optimize_FINISH(self, op): self._finish_guard_op = self._last_guard_not_forced_2 - return op + return self.emit(op) def postprocess_FINISH(self, op, oldop): guard_op = self._finish_guard_op @@ -87,7 +84,7 @@ if oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUAL: if self._optimize_JIT_FORCE_VIRTUAL(op): return - return op + return self.emit(op) optimize_CALL_MAY_FORCE_R = optimize_CALL_MAY_FORCE_I optimize_CALL_MAY_FORCE_F = optimize_CALL_MAY_FORCE_I optimize_CALL_MAY_FORCE_N = optimize_CALL_MAY_FORCE_I @@ -99,7 +96,7 @@ opinfo = self.getptrinfo(op.getarg(2)) if opinfo and opinfo.is_virtual(): return - return op + return self.emit(op) def optimize_VIRTUAL_REF(self, op): # get some constants @@ -120,7 +117,7 @@ vrefvalue.setfield(descr_virtual_token, newop, token) vrefvalue.setfield(descr_forced, newop, self.optimizer.cpu.ts.CONST_NULLREF) - return token + return self.emit(token) def optimize_VIRTUAL_REF_FINISH(self, op): # This operation is used in two cases. In normal cases, it @@ -183,7 +180,7 @@ self.make_equal_to(op, fieldop) else: self.make_nonnull(op.getarg(0)) - return op + return self.emit(op) optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I @@ -201,7 +198,7 @@ self.get_box_replacement(op.getarg(1))) else: self.make_nonnull(struct) - return op + return self.emit(op) def optimize_NEW_WITH_VTABLE(self, op): known_class = ConstInt(op.getdescr().get_vtable()) @@ -215,14 +212,14 @@ if sizebox is not None: self.make_varray(op.getdescr(), sizebox.getint(), op) else: - return op + return self.emit(op) def optimize_NEW_ARRAY_CLEAR(self, op): sizebox = self.get_constant_box(op.getarg(0)) if sizebox is not None: self.make_varray(op.getdescr(), sizebox.getint(), op, clear=True) else: - return op + return self.emit(op) def optimize_CALL_N(self, op): effectinfo = op.getdescr().get_extra_info() @@ -236,14 +233,14 @@ if info and info.is_virtual(): return else: - return op + return self.emit(op) optimize_CALL_R = optimize_CALL_N optimize_CALL_I = optimize_CALL_N def do_RAW_MALLOC_VARSIZE_CHAR(self, op): sizebox = self.get_constant_box(op.getarg(1)) if sizebox is None: - return op + return self.emit(op) self.make_virtual_raw_memory(sizebox.getint(), op) self.last_emitted_operation = REMOVED @@ -251,7 +248,7 @@ opinfo = self.getrawptrinfo(op.getarg(1)) if opinfo and opinfo.is_virtual(): return - return op + return self.emit(op) def optimize_INT_ADD(self, op): opinfo = self.getrawptrinfo(op.getarg(0), create=False) @@ -264,7 +261,7 @@ isinstance(opinfo, info.RawSlicePtrInfo)): self.make_virtual_raw_slice(offset, opinfo, op) return - return op + return self.emit(op) def optimize_ARRAYLEN_GC(self, op): opinfo = self.getptrinfo(op.getarg(0)) @@ -272,7 +269,7 @@ self.make_constant_int(op, opinfo.getlength()) else: self.make_nonnull(op.getarg(0)) - return op + return self.emit(op) def optimize_GETARRAYITEM_GC_I(self, op): opinfo = self.getptrinfo(op.getarg(0)) @@ -286,7 +283,7 @@ self.make_equal_to(op, item) return self.make_nonnull(op.getarg(0)) - return op + return self.emit(op) optimize_GETARRAYITEM_GC_R = optimize_GETARRAYITEM_GC_I optimize_GETARRAYITEM_GC_F = optimize_GETARRAYITEM_GC_I @@ -306,7 +303,7 @@ self.get_box_replacement(op.getarg(2))) return self.make_nonnull(op.getarg(0)) - return op + return self.emit(op) def _unpack_arrayitem_raw_op(self, op, indexbox): index = indexbox.getint() @@ -331,7 +328,7 @@ self.make_equal_to(op, itemvalue) return self.make_nonnull(op.getarg(0)) - return op + return self.emit(op) optimize_GETARRAYITEM_RAW_F = optimize_GETARRAYITEM_RAW_I def optimize_SETARRAYITEM_RAW(self, op): @@ -347,7 +344,7 @@ except InvalidRawOperation: pass self.make_nonnull(op.getarg(0)) - return op + return self.emit(op) def _unpack_raw_load_store_op(self, op, offsetbox): offset = offsetbox.getint() @@ -369,7 +366,7 @@ else: self.make_equal_to(op, itemop) return - return op + return self.emit(op) optimize_RAW_LOAD_F = optimize_RAW_LOAD_I def optimize_RAW_STORE(self, op): @@ -383,7 +380,7 @@ return except InvalidRawOperation: pass - return op + return self.emit(op) def optimize_GETINTERIORFIELD_GC_I(self, op): opinfo = self.getptrinfo(op.getarg(0)) @@ -399,7 +396,7 @@ self.make_equal_to(op, fld) return self.make_nonnull(op.getarg(0)) - return op + return self.emit(op) optimize_GETINTERIORFIELD_GC_R = optimize_GETINTERIORFIELD_GC_I optimize_GETINTERIORFIELD_GC_F = optimize_GETINTERIORFIELD_GC_I @@ -413,11 +410,11 @@ self.get_box_replacement(op.getarg(2))) return self.make_nonnull(op.getarg(0)) - return op + return self.emit(op) dispatch_opt = make_dispatcher_method(OptVirtualize, 'optimize_', - default=OptVirtualize.opt_default) + default=OptVirtualize.emit) OptVirtualize.propagate_forward = dispatch_opt dispatch_postprocess = make_dispatcher_method(OptVirtualize, 'postprocess_') diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -97,7 +97,7 @@ newop = ResOperation(self.mode.NEWSTR, [lengthbox]) if not we_are_translated(): newop.name = 'FORCE' - optforce.emit_operation(newop) + optforce.emit_extra(newop) newop = optforce.getlastop() newop.set_forwarded(self) op = optforce.get_box_replacement(op) @@ -120,7 +120,7 @@ lengthop = ResOperation(mode.STRLEN, [op]) lengthop.set_forwarded(self.getlenbound(mode)) self.lgtop = lengthop - string_optimizer.emit_operation(lengthop) + string_optimizer.emit_extra(lengthop) return lengthop def make_guards(self, op, short, optimizer): @@ -204,7 +204,7 @@ op = ResOperation(mode.STRSETITEM, [targetbox, offsetbox, charbox]) - string_optimizer.emit_operation(op) + string_optimizer.emit_extra(op) offsetbox = _int_add(string_optimizer, offsetbox, CONST_1) return offsetbox @@ -356,7 +356,7 @@ mode) srcoffsetbox = _int_add(string_optimizer, srcoffsetbox, CONST_1) assert not isinstance(targetbox, Const)# ConstPtr never makes sense - string_optimizer.emit_operation(ResOperation(mode.STRSETITEM, + string_optimizer.emit_extra(ResOperation(mode.STRSETITEM, [targetbox, offsetbox, charbox])) offsetbox = _int_add(string_optimizer, offsetbox, CONST_1) else: @@ -368,7 +368,7 @@ op = ResOperation(mode.COPYSTRCONTENT, [srcbox, targetbox, srcoffsetbox, offsetbox, lengthbox]) - string_optimizer.emit_operation(op) + string_optimizer.emit_extra(op) offsetbox = nextoffsetbox return offsetbox @@ -412,7 +412,7 @@ else: resbox = string_optimizer.replace_op_with(resbox, mode.STRGETITEM, [strbox, indexbox]) - string_optimizer.emit_operation(resbox) + string_optimizer.emit_extra(resbox) return resbox @@ -422,6 +422,12 @@ def setup(self): self.optimizer.optstring = self + def propagate_forward(self, op): + return dispatch_opt(self, op) + + def propagate_postprocess(self, op, oldop): + return dispatch_postprocess(self, op, oldop) + def make_vstring_plain(self, op, mode, length): vvalue = VStringPlainInfo(mode, True, length) op = self.replace_op_with(op, op.getopnum()) @@ -441,9 +447,9 @@ return vvalue def optimize_NEWSTR(self, op): - self._optimize_NEWSTR(op, mode_string) + return self._optimize_NEWSTR(op, mode_string) def optimize_NEWUNICODE(self, op): - self._optimize_NEWSTR(op, mode_unicode) + return self._optimize_NEWSTR(op, mode_unicode) def _optimize_NEWSTR(self, op, mode): length_box = self.get_constant_box(op.getarg(0)) @@ -452,11 +458,13 @@ self.make_vstring_plain(op, mode, length_box.getint()) else: self.make_nonnull_str(op, mode) - self.emit_operation(op) - self._optimize_NEWSTR_callback(op, mode) + return self.emit(op) - def _optimize_NEWSTR_callback(self, op, mode): - self.pure_from_args(mode.STRLEN, [op], op.getarg(0)) + def postprocess_NEWSTR(self, op, oldop): + self.pure_from_args(mode_string.STRLEN, [op], op.getarg(0)) + + def postprocess_NEWUNICODE(self, op, oldop): + self.pure_from_args(mode_unicode.STRLEN, [op], op.getarg(0)) def optimize_STRSETITEM(self, op): opinfo = self.getptrinfo(op.getarg(0)) @@ -467,17 +475,17 @@ indexbox = self.get_constant_box(op.getarg(1)) if indexbox is not None: opinfo.strsetitem(indexbox.getint(), - self.get_box_replacement(op.getarg(2))) + self.get_box_replacement(op.getarg(2))) return self.make_nonnull(op.getarg(0)) - self.emit_operation(op) + return self.emit(op) optimize_UNICODESETITEM = optimize_STRSETITEM def optimize_STRGETITEM(self, op): - self._optimize_STRGETITEM(op, mode_string) + return self._optimize_STRGETITEM(op, mode_string) def optimize_UNICODEGETITEM(self, op): - self._optimize_STRGETITEM(op, mode_unicode) + return self._optimize_STRGETITEM(op, mode_unicode) def _optimize_STRGETITEM(self, op, mode): self.strgetitem(op, op.getarg(0), op.getarg(1), mode) @@ -517,9 +525,9 @@ return _strgetitem(self, s, index, mode, op) def optimize_STRLEN(self, op): - self._optimize_STRLEN(op, mode_string) + return self._optimize_STRLEN(op, mode_string) def optimize_UNICODELEN(self, op): - self._optimize_STRLEN(op, mode_unicode) + return self._optimize_STRLEN(op, mode_unicode) def _optimize_STRLEN(self, op, mode): opinfo = self.getptrinfo(op.getarg(0)) @@ -528,13 +536,13 @@ if lgtop is not None: self.make_equal_to(op, lgtop) return - self.emit_operation(op) + return self.emit(op) def optimize_COPYSTRCONTENT(self, op): - self._optimize_COPYSTRCONTENT(op, mode_string) + return self._optimize_COPYSTRCONTENT(op, mode_string) def optimize_COPYUNICODECONTENT(self, op): - self._optimize_COPYSTRCONTENT(op, mode_unicode) + return self._optimize_COPYSTRCONTENT(op, mode_unicode) def _optimize_COPYSTRCONTENT(self, op, mode): # args: src dst srcstart dststart length @@ -569,7 +577,7 @@ op.getarg(1), ConstInt(index + dst_start), vresult, ]) - self.emit_operation(new_op) + self.emit_extra(new_op) else: copy_str_content(self, op.getarg(0), op.getarg(1), op.getarg(2), op.getarg(3), op.getarg(4), mode, @@ -584,13 +592,15 @@ if oopspecindex != EffectInfo.OS_NONE: for value, meth in opt_call_oopspec_ops: if oopspecindex == value: # a match with the OS_STR_xxx - if meth(self, op, mode_string): - return + handled, newop = meth(self, op, mode_string) + if handled: + return newop break if oopspecindex == value + EffectInfo._OS_offset_uni: # a match with the OS_UNI_xxx - if meth(self, op, mode_unicode): - return + handled, newop = meth(self, op, mode_unicode) + if handled: + return newop break if oopspecindex == EffectInfo.OS_STR2UNICODE: if self.opt_call_str_STR2UNICODE(op): @@ -598,7 +608,7 @@ if oopspecindex == EffectInfo.OS_SHRINK_ARRAY: if self.opt_call_SHRINK_ARRAY(op): return - self.emit_operation(op) + return self.emit(op) optimize_CALL_R = optimize_CALL_I optimize_CALL_F = optimize_CALL_I optimize_CALL_N = optimize_CALL_I @@ -610,7 +620,7 @@ def optimize_GUARD_NO_EXCEPTION(self, op): if self.last_emitted_operation is REMOVED: return - self.emit_operation(op) + return self.emit(op) def opt_call_str_STR2UNICODE(self, op): # Constant-fold unicode("constant string"). @@ -638,7 +648,7 @@ self.get_box_replacement(op.getarg(1)), self.get_box_replacement(op.getarg(2))) self.last_emitted_operation = REMOVED - return True + return True, None def opt_call_stroruni_STR_SLICE(self, op, mode): self.make_nonnull_str(op.getarg(1), mode) @@ -651,7 +661,7 @@ value = self.make_vstring_plain(op, mode, -1) value.setup_slice(vstr._chars, vstart.getint(), vstop.getint()) - return True + return True, None # startbox = op.getarg(2) strbox = op.getarg(1) @@ -664,7 +674,7 @@ # self.make_vstring_slice(op, strbox, startbox, mode, lengthbox) self.last_emitted_operation = REMOVED - return True + return True, None @specialize.arg(2) def opt_call_stroruni_STR_EQUAL(self, op, mode): @@ -687,25 +697,28 @@ l1box.value != l2box.value): # statically known to have a different length self.make_constant(op, CONST_0) - return True + return True, None # - if self.handle_str_equal_level1(arg1, arg2, op, mode): - return True - if self.handle_str_equal_level1(arg2, arg1, op, mode): - return True - if self.handle_str_equal_level2(arg1, arg2, op, mode): - return True - if self.handle_str_equal_level2(arg2, arg1, op, mode): - return True + handled, result = self.handle_str_equal_level1(arg1, arg2, op, mode) + if handled: + return True, result + handled, result = self.handle_str_equal_level1(arg2, arg1, op, mode) + if handled: + return True, result + handled, result = self.handle_str_equal_level2(arg1, arg2, op, mode) + if handled: + return True, result + handled, result = self.handle_str_equal_level2(arg2, arg1, op, mode) + if handled: + return True, result # if i1 and i1.is_nonnull() and i2 and i2.is_nonnull(): if l1box is not None and l2box is not None and l1box.same_box(l2box): do = EffectInfo.OS_STREQ_LENGTHOK else: do = EffectInfo.OS_STREQ_NONNULL - self.generate_modified_call(do, [arg1, arg2], op, mode) - return True - return False + return True, self.generate_modified_call(do, [arg1, arg2], op, mode) + return False, None def handle_str_equal_level1(self, arg1, arg2, resultop, mode): i1 = self.getptrinfo(arg1) @@ -728,7 +741,7 @@ [lengthbox, CONST_0], descr=DONT_CHANGE) seo(op) - return True + return True, None if l2box.value == 1: if i1: l1box = i1.getstrlen(arg1, self, mode, False) @@ -742,30 +755,28 @@ op = self.optimizer.replace_op_with(resultop, rop.INT_EQ, [vchar1, vchar2], descr=DONT_CHANGE) seo(op) - return True + return True, None if isinstance(i1, VStringSliceInfo): vchar = self.strgetitem(None, arg2, optimizer.CONST_0, mode) do = EffectInfo.OS_STREQ_SLICE_CHAR - self.generate_modified_call(do, [i1.s, i1.start, - i1.lgtop, vchar], - resultop, mode) - return True + return True, self.generate_modified_call(do, [i1.s, i1.start, + i1.lgtop, vchar], + resultop, mode) # if i2 and i2.is_null(): if i1 and i1.is_nonnull(): self.make_constant(resultop, CONST_0) - return True + return True, None if i1 and i1.is_null(): self.make_constant(resultop, CONST_1) - return True + return True, None op = self.optimizer.replace_op_with(resultop, rop.PTR_EQ, [arg1, llhelper.CONST_NULL], descr=DONT_CHANGE) - self.emit_operation(op) - return True + return True, self.emit(op) # - return False + return False, None def handle_str_equal_level2(self, arg1, arg2, resultbox, mode): i1 = self.getptrinfo(arg1) @@ -782,25 +793,23 @@ do = EffectInfo.OS_STREQ_NONNULL_CHAR else: do = EffectInfo.OS_STREQ_CHECKNULL_CHAR - self.generate_modified_call(do, [arg1, vchar], - resultbox, mode) - return True + return True, self.generate_modified_call(do, [arg1, vchar], + resultbox, mode) # if isinstance(i1, VStringSliceInfo) and i1.is_virtual(): if i2 and i2.is_nonnull(): do = EffectInfo.OS_STREQ_SLICE_NONNULL else: do = EffectInfo.OS_STREQ_SLICE_CHECKNULL - self.generate_modified_call(do, [i1.s, i1.start, i1.lgtop, - arg2], resultbox, mode) - return True - return False + return True, self.generate_modified_call(do, [i1.s, i1.start, i1.lgtop, + arg2], resultbox, mode) + return False, None def opt_call_stroruni_STR_CMP(self, op, mode): i1 = self.getptrinfo(op.getarg(1)) i2 = self.getptrinfo(op.getarg(2)) if not i1 or not i2: - return False + return False, None l1box = i1.getstrlen(None, self, mode, False) l2box = i2.getstrlen(None, self, mode, False) if (l1box is not None and l2box is not None and @@ -814,8 +823,8 @@ op = self.replace_op_with(op, rop.INT_SUB, [char1, char2], descr=DONT_CHANGE) seo(op) - return True - return False + return True, None + return False, None def opt_call_SHRINK_ARRAY(self, op): i1 = self.getptrinfo(op.getarg(1)) @@ -838,14 +847,12 @@ op = self.optimizer.replace_op_with(result, rop.CALL_I, [ConstInt(func)] + args, descr=calldescr) - self.emit_operation(op) - - def propagate_forward(self, op): - dispatch_opt(self, op) + return self.emit(op) dispatch_opt = make_dispatcher_method(OptString, 'optimize_', - default=OptString.emit_operation) + default=OptString.emit) +dispatch_postprocess = make_dispatcher_method(OptString, 'postprocess_') def _findall_call_oopspec(): From noreply at buildbot.pypy.org Mon Oct 12 11:31:12 2015 From: noreply at buildbot.pypy.org (jerith) Date: Mon, 12 Oct 2015 11:31:12 +0200 (CEST) Subject: [pypy-commit] pypy unrecursive-opt: Kill unnecessary oldop arg. Message-ID: <20151012093112.95CB11C146A@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: unrecursive-opt Changeset: r80126:0239a15d98b6 Date: 2015-10-12 11:13 +0200 http://bitbucket.org/pypy/pypy/changeset/0239a15d98b6/ Log: Kill unnecessary oldop arg. diff --git a/rpython/jit/metainterp/optimizeopt/earlyforce.py b/rpython/jit/metainterp/optimizeopt/earlyforce.py --- a/rpython/jit/metainterp/optimizeopt/earlyforce.py +++ b/rpython/jit/metainterp/optimizeopt/earlyforce.py @@ -28,7 +28,7 @@ self.optimizer.force_box(arg, self) return self.emit(op) - def propagate_postprocess(self, op, oldop): + def propagate_postprocess(self, op): pass def setup(self): diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -502,7 +502,7 @@ # return self.emit(op) return self.emit(op) - def postprocess_GETFIELD_GC_I(self, op, oldop): + def postprocess_GETFIELD_GC_I(self, op): # then remember the result of reading the field structinfo = self.ensure_ptr_info_arg0(op) cf = self.field_cache(op.getdescr()) @@ -562,7 +562,7 @@ # return self.emit(op) return self.emit(op) - def postprocess_GETARRAYITEM_GC_I(self, op, oldop): + def postprocess_GETARRAYITEM_GC_I(self, op): # the remember the result of reading the array item arrayinfo = self.ensure_ptr_info_arg0(op) indexb = self.getintbound(op.getarg(1)) diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -45,8 +45,8 @@ def propagate_forward(self, op): return dispatch_opt(self, op) - def propagate_postprocess(self, op, oldop): - return dispatch_postprocess(self, op, oldop) + def propagate_postprocess(self, op): + return dispatch_postprocess(self, op) def propagate_bounds_backward(self, box): # FIXME: This takes care of the instruction where box is the reuslt @@ -62,7 +62,7 @@ def _optimize_guard_true_false_value(self, op): return self.emit(op) - def _postprocess_guard_true_false_value(self, op, oldop): + def _postprocess_guard_true_false_value(self, op): if op.getarg(0).type == 'i': self.propagate_bounds_backward(op.getarg(0)) @@ -85,7 +85,7 @@ return None return self.emit(op) - def postprocess_INT_OR_or_XOR(self, op, oldop): + def postprocess_INT_OR_or_XOR(self, op): v1 = self.get_box_replacement(op.getarg(0)) b1 = self.getintbound(v1) v2 = self.get_box_replacement(op.getarg(1)) @@ -105,7 +105,7 @@ def optimize_INT_AND(self, op): return self.emit(op) - def postprocess_INT_AND(self, op, oldop): + def postprocess_INT_AND(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) @@ -124,7 +124,7 @@ def optimize_INT_SUB(self, op): return self.emit(op) - def postprocess_INT_SUB(self, op, oldop): + def postprocess_INT_SUB(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) b = b1.sub_bound(b2) @@ -171,7 +171,7 @@ return self.emit(op) - def postprocess_INT_ADD(self, op, oldop): + def postprocess_INT_ADD(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) @@ -182,7 +182,7 @@ def optimize_INT_MUL(self, op): return self.emit(op) - def postprocess_INT_MUL(self, op, oldop): + def postprocess_INT_MUL(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) @@ -193,7 +193,7 @@ def optimize_INT_FLOORDIV(self, op): return self.emit(op) - def postprocess_INT_FLOORDIV(self, op, oldop): + def postprocess_INT_FLOORDIV(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) r = self.getintbound(op) @@ -214,7 +214,7 @@ args=[arg1, arg2]) return self.emit(op) - def postprocess_INT_MOD(self, op, oldop): + def postprocess_INT_MOD(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) known_nonneg = (b1.known_ge(IntBound(0, 0)) and @@ -235,7 +235,7 @@ def optimize_INT_LSHIFT(self, op): return self.emit(op) - def postprocess_INT_LSHIFT(self, op, oldop): + def postprocess_INT_LSHIFT(self, op): arg0 = self.get_box_replacement(op.getarg(0)) b1 = self.getintbound(arg0) arg1 = self.get_box_replacement(op.getarg(1)) @@ -261,7 +261,7 @@ return None return self.emit(op) - def postprocess_INT_RSHIFT(self, op, oldop): + def postprocess_INT_RSHIFT(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) b = b1.rshift_bound(b2) @@ -318,7 +318,7 @@ op = self.replace_op_with(op, rop.INT_ADD) return self.emit(op) - def postprocess_INT_ADD_OVF(self, op, oldop): + def postprocess_INT_ADD_OVF(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) resbound = b1.add_bound(b2) @@ -338,7 +338,7 @@ op = self.replace_op_with(op, rop.INT_SUB) return self.emit(op) - def postprocess_INT_SUB_OVF(self, op, oldop): + def postprocess_INT_SUB_OVF(self, op): arg0 = self.get_box_replacement(op.getarg(0)) arg1 = self.get_box_replacement(op.getarg(1)) b0 = self.getintbound(arg0) @@ -355,7 +355,7 @@ op = self.replace_op_with(op, rop.INT_MUL) return self.emit(op) - def postprocess_INT_MUL_OVF(self, op, oldop): + def postprocess_INT_MUL_OVF(self, op): b1 = self.getintbound(op.getarg(0)) b2 = self.getintbound(op.getarg(1)) resbound = b1.mul_bound(b2) @@ -456,7 +456,7 @@ else: return self.emit(op) - def postprocess_INT_SIGNEXT(self, op, oldop): + def postprocess_INT_SIGNEXT(self, op): numbits = op.getarg(1).getint() * 8 start = -(1 << (numbits - 1)) stop = 1 << (numbits - 1) @@ -467,14 +467,14 @@ def optimize_ARRAYLEN_GC(self, op): return self.emit(op) - def postprocess_ARRAYLEN_GC(self, op, oldop): + def postprocess_ARRAYLEN_GC(self, op): array = self.ensure_ptr_info_arg0(op) self.optimizer.setintbound(op, array.getlenbound(None)) def optimize_STRLEN(self, op): return self.emit(op) - def postprocess_STRLEN(self, op, oldop): + def postprocess_STRLEN(self, op): self.make_nonnull_str(op.getarg(0), vstring.mode_string) array = self.getptrinfo(op.getarg(0)) self.optimizer.setintbound(op, array.getlenbound(vstring.mode_string)) @@ -482,7 +482,7 @@ def optimize_UNICODELEN(self, op): return self.emit(op) - def postprocess_UNICODELEN(self, op, oldop): + def postprocess_UNICODELEN(self, op): self.make_nonnull_str(op.getarg(0), vstring.mode_unicode) array = self.getptrinfo(op.getarg(0)) self.optimizer.setintbound(op, array.getlenbound(vstring.mode_unicode)) @@ -490,7 +490,7 @@ def optimize_STRGETITEM(self, op): return self.emit(op) - def postprocess_STRGETITEM(self, op, oldop): + def postprocess_STRGETITEM(self, op): v1 = self.getintbound(op) v2 = self.getptrinfo(op.getarg(0)) intbound = self.getintbound(op.getarg(1)) @@ -504,7 +504,7 @@ def optimize_GETFIELD_RAW_I(self, op): return self.emit(op) - def postprocess_GETFIELD_RAW_I(self, op, oldop): + def postprocess_GETFIELD_RAW_I(self, op): descr = op.getdescr() if descr.is_integer_bounded(): b1 = self.getintbound(op) @@ -534,7 +534,7 @@ def optimize_GETARRAYITEM_RAW_I(self, op): return self.emit(op) - def postprocess_GETARRAYITEM_RAW_I(self, op, oldop): + def postprocess_GETARRAYITEM_RAW_I(self, op): descr = op.getdescr() if descr and descr.is_item_integer_bounded(): intbound = self.getintbound(op) @@ -554,7 +554,7 @@ def optimize_UNICODEGETITEM(self, op): return self.emit(op) - def postprocess_UNICODEGETITEM(self, op, oldop): + def postprocess_UNICODEGETITEM(self, op): b1 = self.getintbound(op) b1.make_ge(IntLowerBound(0)) v2 = self.getptrinfo(op.getarg(0)) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -43,8 +43,8 @@ self.callback_func = callback_func self.callback_args = callback_args - def callback(self, oldop): - self.callback_func(self.op, oldop, *self.callback_args) + def callback(self): + self.callback_func(self.op, *self.callback_args) class Optimization(object): @@ -564,7 +564,6 @@ def send_extra_operation(self, op, opt=None): if opt is None: opt = self.first_optimization - oldop = op opt_results = [] while opt is not None: opt_result = opt.propagate_forward(op) @@ -575,7 +574,7 @@ op = opt_result.op opt = opt.next_optimization for opt_result in reversed(opt_results): - opt_result.callback(oldop) + opt_result.callback() def propagate_forward(self, op): dispatch_opt(self, op) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -73,8 +73,8 @@ def propagate_forward(self, op): return dispatch_opt(self, op) - def propagate_postprocess(self, op, oldop): - dispatch_postprocess(self, op, oldop) + def propagate_postprocess(self, op): + dispatch_postprocess(self, op) def optimize_default(self, op): canfold = op.is_always_pure() @@ -113,7 +113,7 @@ # otherwise, the operation remains return self.emit(op, self.postprocess_default, save, nextop) - def postprocess_default(self, op, oldop, save, nextop): + def postprocess_default(self, op, save, nextop): # postprocessor for optimize_default, not default postprocessor if op.returns_bool_result(): self.getintbound(op).make_bool() @@ -166,7 +166,7 @@ newop = self.optimizer.replace_op_with(op, opnum) return self.emit(newop, self.postprocess_call_pure) - def postprocess_call_pure(self, op, oldop): + def postprocess_call_pure(self, op): self.call_pure_positions.append( len(self.optimizer._newoperations) - 1) diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -38,8 +38,8 @@ return dispatch_opt(self, op) - def propagate_postprocess(self, op, oldop): - return dispatch_postprocess(self, op, oldop) + def propagate_postprocess(self, op): + return dispatch_postprocess(self, op) def try_boolinvers(self, op, targs): oldop = self.get_pure_result(targs) @@ -127,7 +127,7 @@ else: return self.emit(op) - def postprocess_INT_SUB(self, op, oldop): + def postprocess_INT_SUB(self, op): self.optimizer.pure_reverse(op) def optimize_INT_ADD(self, op): @@ -146,7 +146,7 @@ else: return self.emit(op) - def postprocess_INT_ADD(self, op, oldop): + def postprocess_INT_ADD(self, op): self.optimizer.pure_reverse(op) def optimize_INT_MUL(self, op): @@ -234,7 +234,7 @@ return self.emit(newop) return self.emit(op) - def postprocess_FLOAT_MUL(self, op, oldop): + def postprocess_FLOAT_MUL(self, op): self.optimizer.pure_reverse(op) def optimize_FLOAT_TRUEDIV(self, op): @@ -261,7 +261,7 @@ def optimize_FLOAT_NEG(self, op): return self.emit(op) - def postprocess_FLOAT_NEG(self, op, oldop): + def postprocess_FLOAT_NEG(self, op): self.optimizer.pure_reverse(op) def optimize_guard(self, op, constbox): @@ -298,7 +298,7 @@ 'fail' % r) return self.emit(op) - def postprocess_GUARD_ISNULL(self, op, oldop): + def postprocess_GUARD_ISNULL(self, op): self.make_constant(op.getarg(0), self.optimizer.cpu.ts.CONST_NULL) def optimize_GUARD_IS_OBJECT(self, op): @@ -376,7 +376,7 @@ 'fail' % r) return self.emit(op) - def postprocess_GUARD_NONNULL(self, op, oldop): + def postprocess_GUARD_NONNULL(self, op): self.make_nonnull(op.getarg(0)) self.getptrinfo(op.getarg(0)).mark_last_guard(self.optimizer) @@ -399,7 +399,7 @@ assert isinstance(constbox, Const) return self.optimize_guard(op, constbox) - def postprocess_GUARD_VALUE(self, op, oldop): + def postprocess_GUARD_VALUE(self, op): box = self.get_box_replacement(op.getarg(0)) self.make_constant(box, op.getarg(1)) @@ -431,14 +431,14 @@ def optimize_GUARD_TRUE(self, op): return self.optimize_guard(op, CONST_1) - def postprocess_GUARD_TRUE(self, op, oldop): + def postprocess_GUARD_TRUE(self, op): box = self.get_box_replacement(op.getarg(0)) self.make_constant(box, CONST_1) def optimize_GUARD_FALSE(self, op): return self.optimize_guard(op, CONST_0) - def postprocess_GUARD_FALSE(self, op, oldop): + def postprocess_GUARD_FALSE(self, op): box = self.get_box_replacement(op.getarg(0)) self.make_constant(box, CONST_0) @@ -486,7 +486,7 @@ return self.emit(op) return self.emit(op) - def postprocess_GUARD_CLASS(self, op, oldop): + def postprocess_GUARD_CLASS(self, op): expectedclassbox = op.getarg(1) info = self.getptrinfo(op.getarg(0)) old_guard_op = info.get_last_guard(self.optimizer) @@ -522,7 +522,7 @@ # there is no reason to have a separate operation for this newop = self.replace_op_with(op, OpHelpers.call_for_descr(op.getdescr())) - return self.emit(op) + return self.emit(newop, self.postprocess_CALL_LOOPINVARIANT_I, op) def postprocess_CALL_LOOPINVARIANT_I(self, op, oldop): key = make_hashable_int(op.getarg(0).getint()) diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -70,7 +70,7 @@ self._finish_guard_op = self._last_guard_not_forced_2 return self.emit(op) - def postprocess_FINISH(self, op, oldop): + def postprocess_FINISH(self, op): guard_op = self._finish_guard_op if guard_op is not None: guard_op = self.optimizer.store_final_boxes_in_guard(guard_op, []) diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py --- a/rpython/jit/metainterp/optimizeopt/vstring.py +++ b/rpython/jit/metainterp/optimizeopt/vstring.py @@ -425,8 +425,8 @@ def propagate_forward(self, op): return dispatch_opt(self, op) - def propagate_postprocess(self, op, oldop): - return dispatch_postprocess(self, op, oldop) + def propagate_postprocess(self, op): + return dispatch_postprocess(self, op) def make_vstring_plain(self, op, mode, length): vvalue = VStringPlainInfo(mode, True, length) @@ -460,10 +460,10 @@ self.make_nonnull_str(op, mode) return self.emit(op) - def postprocess_NEWSTR(self, op, oldop): + def postprocess_NEWSTR(self, op): self.pure_from_args(mode_string.STRLEN, [op], op.getarg(0)) - def postprocess_NEWUNICODE(self, op, oldop): + def postprocess_NEWUNICODE(self, op): self.pure_from_args(mode_unicode.STRLEN, [op], op.getarg(0)) def optimize_STRSETITEM(self, op): From noreply at buildbot.pypy.org Mon Oct 12 11:31:14 2015 From: noreply at buildbot.pypy.org (jerith) Date: Mon, 12 Oct 2015 11:31:14 +0200 (CEST) Subject: [pypy-commit] pypy unrecursive-opt: Fix OptSimplify and clean up some things. Message-ID: <20151012093114.B6DE91C146A@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: unrecursive-opt Changeset: r80127:61672fcaccb5 Date: 2015-10-12 11:31 +0200 http://bitbucket.org/pypy/pypy/changeset/61672fcaccb5/ Log: Fix OptSimplify and clean up some things. diff --git a/rpython/jit/metainterp/optimizeopt/earlyforce.py b/rpython/jit/metainterp/optimizeopt/earlyforce.py --- a/rpython/jit/metainterp/optimizeopt/earlyforce.py +++ b/rpython/jit/metainterp/optimizeopt/earlyforce.py @@ -28,8 +28,5 @@ self.optimizer.force_box(arg, self) return self.emit(op) - def propagate_postprocess(self, op): - pass - def setup(self): self.optimizer.optearlyforce = self diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -61,7 +61,7 @@ raise NotImplementedError def propagate_postprocess(self, op): - raise NotImplementedError + pass def emit_operation(self, op): assert False, "This should never be called." @@ -579,9 +579,6 @@ def propagate_forward(self, op): dispatch_opt(self, op) - def propagate_postprocess(self, op): - pass - def emit_extra(self, op): # no forwarding, because we're at the end of the chain self.emit(op) diff --git a/rpython/jit/metainterp/optimizeopt/simplify.py b/rpython/jit/metainterp/optimizeopt/simplify.py --- a/rpython/jit/metainterp/optimizeopt/simplify.py +++ b/rpython/jit/metainterp/optimizeopt/simplify.py @@ -8,16 +8,16 @@ self.last_label_descr = None self.unroll = unroll - def emit_operation(self, op): + def emit(self, op): if op.is_guard(): if self.optimizer.pendingfields is None: self.optimizer.pendingfields = [] - Optimization.emit_operation(self, op) + return Optimization.emit(self, op) def optimize_CALL_PURE_I(self, op): opnum = OpHelpers.call_for_descr(op.getdescr()) newop = self.optimizer.replace_op_with(op, opnum) - self.emit_operation(newop) + return self.emit(newop) optimize_CALL_PURE_R = optimize_CALL_PURE_I optimize_CALL_PURE_F = optimize_CALL_PURE_I optimize_CALL_PURE_N = optimize_CALL_PURE_I @@ -25,7 +25,7 @@ def optimize_CALL_LOOPINVARIANT_I(self, op): opnum = OpHelpers.call_for_descr(op.getdescr()) op = op.copy_and_change(opnum) - self.emit_operation(op) + return self.emit(op) optimize_CALL_LOOPINVARIANT_R = optimize_CALL_LOOPINVARIANT_I optimize_CALL_LOOPINVARIANT_F = optimize_CALL_LOOPINVARIANT_I optimize_CALL_LOOPINVARIANT_N = optimize_CALL_LOOPINVARIANT_I @@ -35,7 +35,7 @@ def optimize_VIRTUAL_REF(self, op): newop = self.replace_op_with(op, rop.SAME_AS_R, [op.getarg(0)]) - self.emit_operation(newop) + return self.emit(newop) def optimize_QUASIIMMUT_FIELD(self, op): # xxx ideally we could also kill the following GUARD_NOT_INVALIDATED @@ -51,7 +51,7 @@ # if isinstance(descr, JitCellToken): # return self.optimize_JUMP(op.copy_and_change(rop.JUMP)) # self.last_label_descr = op.getdescr() - # self.emit_operation(op) + # return self.emit(op) # def optimize_JUMP(self, op): # if not self.unroll: @@ -67,11 +67,11 @@ # else: # assert len(descr.target_tokens) == 1 # op.setdescr(descr.target_tokens[0]) - # self.emit_operation(op) + # return self.emit(op) def optimize_GUARD_FUTURE_CONDITION(self, op): self.optimizer.notice_guard_future_condition(op) dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_', - default=OptSimplify.emit_operation) + default=OptSimplify.emit) OptSimplify.propagate_forward = dispatch_opt From noreply at buildbot.pypy.org Mon Oct 12 11:33:30 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Mon, 12 Oct 2015 11:33:30 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: fixed added parameter to reshape (test_zjit) Message-ID: <20151012093330.DECFF1C146A@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80128:857944df0258 Date: 2015-10-12 11:33 +0200 http://bitbucket.org/pypy/pypy/changeset/857944df0258/ Log: fixed added parameter to reshape (test_zjit) diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -829,7 +829,8 @@ elif self.name == 'reshape': w_arg = self.args[1] assert isinstance(w_arg, ArrayConstant) - w_res = arr.reshape(interp.space, w_arg.wrap(interp.space)) + order = -1 + w_res = arr.reshape(interp.space, w_arg.wrap(interp.space), order) else: assert False else: From noreply at buildbot.pypy.org Mon Oct 12 12:01:01 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Mon, 12 Oct 2015 12:01:01 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: conflict resolution for the new packed guards (this would have been needed in the future anyway) Message-ID: <20151012100101.D39101C05B6@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80129:a0a5d6b95d2b Date: 2015-10-12 12:01 +0200 http://bitbucket.org/pypy/pypy/changeset/a0a5d6b95d2b/ Log: conflict resolution for the new packed guards (this would have been needed in the future anyway) diff --git a/rpython/jit/metainterp/optimizeopt/dependency.py b/rpython/jit/metainterp/optimizeopt/dependency.py --- a/rpython/jit/metainterp/optimizeopt/dependency.py +++ b/rpython/jit/metainterp/optimizeopt/dependency.py @@ -374,7 +374,12 @@ """ NOT_RPTYHON """ op_str = str(self.op) if self.op.is_guard(): - args_str = [str(arg) for arg in self.op.getfailargs()] + args_str = [] + for arg in self.op.getfailargs(): + name = 'None' + if arg: + name = arg.repr_short(arg._repr_memo) + args_str.append(name) op_str += " " + ','.join(args_str) return "[%d] %s" % (self.opidx, op_str) diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -92,6 +92,31 @@ visited += 1 return None + def try_to_trash_pack(self, state): + # one element a pack has several dependencies pointing to + # it thus we MUST skip this pack! + if len(state.worklist) > 0: + # break the first! + i = 0 + node = state.worklist[i] + i += 1 + while i < len(state.worklist) and not node.pack: + node = state.worklist[i] + i += 1 + + if not node.pack: + return False + + pack = node.pack + for n in node.pack.operations: + if n.depends_count() > 0: + pack.clear() + return True + else: + return False + + return False + def delay(self, node, state): """ Delay this operation? Only if any dependency has not been resolved """ @@ -158,6 +183,9 @@ if not state.has_more(): break + if self.try_to_trash_pack(state): + continue + raise AssertionError("schedule failed cannot continue. possible reason: cycle") if not we_are_translated(): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py @@ -1315,6 +1315,32 @@ except NotAVectorizeableLoop: pass + def test_remove_mew(self): + trace = self.parse_loop(""" + [p0, p1, p2, p3, i4, i5, p6, p7, i8, f9, i10, i11] + f12 = raw_load_f(i8, i5, descr=floatarraydescr) + guard_not_invalidated(descr=) [p7, p6, p2, p1, p0, f12, i4, p3, i5] + f14 = float_mul(f12, 0.0) + i15 = float_eq(f14, f14) + guard_true(i15, descr=) [p7, p6, p2, p1, p0, f12, i4, p3, i5] + f17 = call_f(1234, f12, f9, descr=writearraydescr) + i20 = call_i(1234444, 232, descr=writearraydescr) + f21 = float_mul(f17, 0.0) + i22 = float_eq(f21, f21) + guard_true(i22, descr=) [p7, p6, p2, p1, p0, f9, f12, i20, f21, f17, i4, p3, i5] + i23 = int_is_true(i20) + guard_false(i23, descr=) [p7, p6, p2, p1, p0, f9, f12, i20, f21, f17, i4, p3, i5] + raw_store(i10, i5, f17, descr=floatarraydescr) + i25 = int_add(i4, 1) + i27 = int_add(i5, 8) + i28 = int_ge(i25, i11) + guard_false(i28, descr=) [i11, i25, p7, p6, p2, p1, p0, i27, None, p3, None] + debug_merge_point(0, 0, '(numpy_call2_inc_out_right: no get_printable_location)') + jump(p0, p1, p2, p3, i25, i27, p6, p7, i8, f9, i10, i11) + """) + vopt = self.schedule(trace) + self.debug_print_operations(trace) + class TestLLtype(BaseTestVectorize, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -795,7 +795,7 @@ class InputArgVector(VectorOp, AbstractInputArg): def __init__(self): - self.type = 'v' + pass def returns_vector(self): return True From noreply at buildbot.pypy.org Mon Oct 12 12:21:29 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Oct 2015 12:21:29 +0200 (CEST) Subject: [pypy-commit] pypy cffi-handle-lifetime: Fix Message-ID: <20151012102129.BA4021C1186@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-handle-lifetime Changeset: r80130:ad4b3dabe0c9 Date: 2015-10-12 12:21 +0200 http://bitbucket.org/pypy/pypy/changeset/ad4b3dabe0c9/ Log: Fix diff --git a/rpython/rtyper/lltypesystem/rtagged.py b/rpython/rtyper/lltypesystem/rtagged.py --- a/rpython/rtyper/lltypesystem/rtagged.py +++ b/rpython/rtyper/lltypesystem/rtagged.py @@ -27,7 +27,8 @@ self.classdef, flds)) self.specialfieldname = flds[0] - def new_instance(self, llops, classcallhop=None): + def new_instance(self, llops, classcallhop=None, nonmovable=False): + assert not nonmovable if self.is_parent: raise TyperError("don't instantiate %r, it is a parent of an " "UnboxedValue class" % (self.classdef,)) From noreply at buildbot.pypy.org Mon Oct 12 12:22:51 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Mon, 12 Oct 2015 12:22:51 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: added test case to stress the case while scheduling Message-ID: <20151012102251.11EB21C12E2@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80131:d1e7c2151d01 Date: 2015-10-12 12:23 +0200 http://bitbucket.org/pypy/pypy/changeset/d1e7c2151d01/ Log: added test case to stress the case while scheduling diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -844,7 +844,6 @@ def test_where(self): result = self.run("where") assert result == -40 - self.check_vectorized(1, 0) def define_searchsorted(): return """ diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py @@ -1315,31 +1315,29 @@ except NotAVectorizeableLoop: pass - def test_remove_mew(self): + def test_pass(self): trace = self.parse_loop(""" - [p0, p1, p2, p3, i4, i5, p6, p7, i8, f9, i10, i11] - f12 = raw_load_f(i8, i5, descr=floatarraydescr) - guard_not_invalidated(descr=) [p7, p6, p2, p1, p0, f12, i4, p3, i5] - f14 = float_mul(f12, 0.0) - i15 = float_eq(f14, f14) - guard_true(i15, descr=) [p7, p6, p2, p1, p0, f12, i4, p3, i5] - f17 = call_f(1234, f12, f9, descr=writearraydescr) - i20 = call_i(1234444, 232, descr=writearraydescr) - f21 = float_mul(f17, 0.0) - i22 = float_eq(f21, f21) - guard_true(i22, descr=) [p7, p6, p2, p1, p0, f9, f12, i20, f21, f17, i4, p3, i5] - i23 = int_is_true(i20) - guard_false(i23, descr=) [p7, p6, p2, p1, p0, f9, f12, i20, f21, f17, i4, p3, i5] - raw_store(i10, i5, f17, descr=floatarraydescr) - i25 = int_add(i4, 1) - i27 = int_add(i5, 8) - i28 = int_ge(i25, i11) - guard_false(i28, descr=) [i11, i25, p7, p6, p2, p1, p0, i27, None, p3, None] - debug_merge_point(0, 0, '(numpy_call2_inc_out_right: no get_printable_location)') - jump(p0, p1, p2, p3, i25, i27, p6, p7, i8, f9, i10, i11) + [p0,i0] + f0 = raw_load_f(p0, i0, descr=floatarraydescr) + f1 = float_mul(f0, 0.0) + i2 = float_eq(f1, f1) + guard_true(i2) [p0, i0] + f2 = call_f(0, f0) + f21 = float_mul(f2, 0.0) + i3 = float_eq(f21, f21) + guard_true(i3) [p0, i0] + raw_store(p0, i0, f21, descr=floatarraydescr) + i4 = int_add(i0, 8) + jump(p0, i4) """) vopt = self.schedule(trace) - self.debug_print_operations(trace) + self.ensure_operations([ + 'v10[2xf64] = vec_raw_load_f(p0,i0,descr=floatarraydescr)', + 'v11[2xf64] = vec_float_mul(v10[2xf64], v9[2xf64])', + 'v12[2xf64] = vec_float_eq(v11[2xf64], v11[2xf64])', + 'i100 = vec_unpack_f(v12[4xi32], 0, 1)', + 'guard_true(i100) [p0, i0]', + ], trace) class TestLLtype(BaseTestVectorize, LLtypeMixin): diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -722,6 +722,8 @@ class VectorOp(object): _mixin_ = True + type = 'f' + def vector_bytesize(self): assert self.count > 0 return self.byte_size * self.count diff --git a/rpython/jit/tool/oparser.py b/rpython/jit/tool/oparser.py --- a/rpython/jit/tool/oparser.py +++ b/rpython/jit/tool/oparser.py @@ -136,7 +136,7 @@ return self._cache[elem] except KeyError: pass - if elem[0] in 'ifrp': + if elem[0] in 'ifrpv': box = OpHelpers.inputarg_from_tp(elem[0]) number = elem[1:] if elem.startswith('v'): From noreply at buildbot.pypy.org Mon Oct 12 12:25:34 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Oct 2015 12:25:34 +0200 (CEST) Subject: [pypy-commit] pypy cffi-handle-lifetime: fix Message-ID: <20151012102534.2E71C1C12E2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-handle-lifetime Changeset: r80132:8d9e9019c8ba Date: 2015-10-12 12:23 +0200 http://bitbucket.org/pypy/pypy/changeset/8d9e9019c8ba/ Log: fix diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py --- a/rpython/memory/gc/test/test_direct.py +++ b/rpython/memory/gc/test/test_direct.py @@ -565,8 +565,8 @@ tid = self.get_type_id(VAR) largeobj_size = self.gc.nonlarge_max + 1 self.gc.next_major_collection_threshold = 99999.0 - addr_src = self.gc.external_malloc(tid, largeobj_size) - addr_dst = self.gc.external_malloc(tid, largeobj_size) + addr_src = self.gc.external_malloc(tid, largeobj_size, alloc_young=True) + addr_dst = self.gc.external_malloc(tid, largeobj_size, alloc_young=True) hdr_src = self.gc.header(addr_src) hdr_dst = self.gc.header(addr_dst) # From noreply at buildbot.pypy.org Mon Oct 12 14:11:59 2015 From: noreply at buildbot.pypy.org (jerith) Date: Mon, 12 Oct 2015 14:11:59 +0200 (CEST) Subject: [pypy-commit] pypy unrecursive-opt: Now in RPython. Message-ID: <20151012121159.6BDB01C1453@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: unrecursive-opt Changeset: r80133:e80e2fc935ca Date: 2015-10-12 14:11 +0200 http://bitbucket.org/pypy/pypy/changeset/e80e2fc935ca/ Log: Now in RPython. diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -288,14 +288,14 @@ cf = submap[index] = ArrayCachedField(index) return cf - def emit(self, op, callback_func=None, *callback_args): + def emit(self, op): self.emitting_operation(op) self.emit_postponed_op() if (op.is_comparison() or op.is_call_may_force() or op.is_ovf()): self.postponed_op = op else: - return Optimization.emit(self, op, callback_func, *callback_args) + return Optimization.emit(self, op) def emitting_operation(self, op): if op.has_no_side_effect(): diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -35,16 +35,12 @@ class OptimizationResult(object): - def __init__(self, opt, op, callback_func=None, *callback_args): + def __init__(self, opt, op): self.opt = opt self.op = op - if callback_func is None: - callback_func = opt.propagate_postprocess - self.callback_func = callback_func - self.callback_args = callback_args def callback(self): - self.callback_func(self.op, *self.callback_args) + self.opt.propagate_postprocess(self.op) class Optimization(object): @@ -66,9 +62,12 @@ def emit_operation(self, op): assert False, "This should never be called." - def emit(self, op, callback_func=None, *callback_args): - self.last_emitted_operation = op - return OptimizationResult(self, op, callback_func, *callback_args) + def emit(self, op): + return self.emit_result(OptimizationResult(self, op)) + + def emit_result(self, opt_result): + self.last_emitted_operation = opt_result.op + return opt_result def emit_extra(self, op, emit=True): if emit: @@ -579,11 +578,11 @@ def propagate_forward(self, op): dispatch_opt(self, op) - def emit_extra(self, op): + def emit_extra(self, op, emit=True): # no forwarding, because we're at the end of the chain self.emit(op) - def emit(self, op, callback_func=None, *callback_args): + def emit(self, op): # this actually emits the operation instead of forwarding it if op.returns_bool_result(): self.getintbound(op).make_bool() diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -1,10 +1,36 @@ -from rpython.jit.metainterp.optimizeopt.optimizer import Optimization, REMOVED +from rpython.jit.metainterp.optimizeopt.optimizer import ( + Optimization, OptimizationResult, REMOVED) from rpython.jit.metainterp.resoperation import rop, OpHelpers, AbstractResOp,\ ResOperation from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.metainterp.optimizeopt.shortpreamble import PreambleOp +class DefaultOptimizationResult(OptimizationResult): + def __init__(self, opt, op, save, nextop): + OptimizationResult.__init__(self, opt, op) + self.save = save + self.nextop = nextop + + def callback(self): + self._callback(self.op, self.save, self.nextop) + + def _callback(self, op, save, nextop): + if op.returns_bool_result(): + self.opt.getintbound(op).make_bool() + if save: + recentops = self.opt.getrecentops(op.getopnum()) + recentops.add(op) + if nextop: + self.opt.emit_extra(nextop) + + +class CallPureOptimizationResult(OptimizationResult): + def callback(self): + self.opt.call_pure_positions.append( + len(self.opt.optimizer._newoperations) - 1) + + class RecentPureOps(object): REMEMBER_LIMIT = 16 @@ -111,17 +137,7 @@ return # otherwise, the operation remains - return self.emit(op, self.postprocess_default, save, nextop) - - def postprocess_default(self, op, save, nextop): - # postprocessor for optimize_default, not default postprocessor - if op.returns_bool_result(): - self.getintbound(op).make_bool() - if save: - recentops = self.getrecentops(op.getopnum()) - recentops.add(op) - if nextop: - self.emit_extra(nextop) + return self.emit_result(DefaultOptimizationResult(self, op, save, nextop)) def getrecentops(self, opnum): if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: @@ -164,11 +180,7 @@ # replace CALL_PURE with just CALL opnum = OpHelpers.call_for_descr(op.getdescr()) newop = self.optimizer.replace_op_with(op, opnum) - return self.emit(newop, self.postprocess_call_pure) - - def postprocess_call_pure(self, op): - self.call_pure_positions.append( - len(self.optimizer._newoperations) - 1) + return self.emit_result(CallPureOptimizationResult(self, newop)) optimize_CALL_PURE_R = optimize_CALL_PURE_I optimize_CALL_PURE_F = optimize_CALL_PURE_I diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -5,8 +5,8 @@ ConstFloat) from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.optimizeopt.intutils import IntBound -from rpython.jit.metainterp.optimizeopt.optimizer import (Optimization, REMOVED, - CONST_0, CONST_1) +from rpython.jit.metainterp.optimizeopt.optimizer import ( + Optimization, OptimizationResult, REMOVED, CONST_0, CONST_1) from rpython.jit.metainterp.optimizeopt.info import INFO_NONNULL, INFO_NULL from rpython.jit.metainterp.optimizeopt.util import _findall, make_dispatcher_method from rpython.jit.metainterp.resoperation import rop, ResOperation, opclasses,\ @@ -16,6 +16,21 @@ from rpython.rtyper import rclass import math + +class CallLoopinvariantOptimizationResult(OptimizationResult): + def __init__(self, opt, op, old_op): + OptimizationResult.__init__(self, opt, op) + self.old_op = old_op + + def callback(self): + self._callback(self.op, self.old_op) + + def _callback(self, op, old_op): + key = make_hashable_int(op.getarg(0).getint()) + self.opt.loop_invariant_producer[key] = self.opt.optimizer.getlastop() + self.opt.loop_invariant_results[key] = old_op + + class OptRewrite(Optimization): """Rewrite operations into equivalent, cheaper operations. This includes already executed operations and constants. @@ -522,21 +537,12 @@ # there is no reason to have a separate operation for this newop = self.replace_op_with(op, OpHelpers.call_for_descr(op.getdescr())) - return self.emit(newop, self.postprocess_CALL_LOOPINVARIANT_I, op) - - def postprocess_CALL_LOOPINVARIANT_I(self, op, oldop): - key = make_hashable_int(op.getarg(0).getint()) - self.loop_invariant_producer[key] = self.optimizer.getlastop() - self.loop_invariant_results[key] = oldop + return self.emit_result(CallLoopinvariantOptimizationResult(self, newop, op)) optimize_CALL_LOOPINVARIANT_R = optimize_CALL_LOOPINVARIANT_I optimize_CALL_LOOPINVARIANT_F = optimize_CALL_LOOPINVARIANT_I optimize_CALL_LOOPINVARIANT_N = optimize_CALL_LOOPINVARIANT_I - postprocess_CALL_LOOPINVARIANT_R = postprocess_CALL_LOOPINVARIANT_I - postprocess_CALL_LOOPINVARIANT_F = postprocess_CALL_LOOPINVARIANT_I - postprocess_CALL_LOOPINVARIANT_N = postprocess_CALL_LOOPINVARIANT_I - def optimize_COND_CALL(self, op): arg = op.getarg(0) b = self.getintbound(arg) From noreply at buildbot.pypy.org Mon Oct 12 14:25:30 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Mon, 12 Oct 2015 14:25:30 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: inputargvector has type V to satisfy the translation Message-ID: <20151012122530.D1D4A1C1453@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80134:34f55fc1bf0d Date: 2015-10-12 14:25 +0200 http://bitbucket.org/pypy/pypy/changeset/34f55fc1bf0d/ Log: inputargvector has type V to satisfy the translation diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -722,8 +722,6 @@ class VectorOp(object): _mixin_ = True - type = 'f' - def vector_bytesize(self): assert self.count > 0 return self.byte_size * self.count @@ -796,6 +794,7 @@ self.setref_base(lltype.nullptr(llmemory.GCREF.TO)) class InputArgVector(VectorOp, AbstractInputArg): + type = 'V' def __init__(self): pass From noreply at buildbot.pypy.org Mon Oct 12 15:52:37 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Mon, 12 Oct 2015 15:52:37 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: did not consider guard in pack load, thus a wrong pack load was computed Message-ID: <20151012135238.287FC1C05B6@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80135:b9f8f993880a Date: 2015-10-12 15:52 +0200 http://bitbucket.org/pypy/pypy/changeset/b9f8f993880a/ Log: did not consider guard in pack load, thus a wrong pack load was computed diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -891,6 +891,11 @@ descr = left.getdescr() bytesize = descr.get_item_size_in_bytes() return bytesize * self.numops() - vec_reg_size + else: + assert left.is_guard() and left.getopnum() in \ + (rop.GUARD_TRUE, rop.GUARD_FALSE) + bytesize = left.getarg(0).bytesize + return bytesize * self.numops() - vec_reg_size return 0 if self.numops() == 0: return -1 diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py @@ -1339,6 +1339,23 @@ 'guard_true(i100) [p0, i0]', ], trace) + def test_pack_too_much(self): + trace = self.parse_loop(""" + [p0, p1, i2, i3, i4, i5] + i6 = raw_load_i(i4, i3, descr=int16arraydescr) + guard_not_invalidated() [p0, i6, i3, i2, p1] + i7 = int_is_true(i6) + guard_true(i7) [p0, i6, i3, i2, p1] + i10 = getarrayitem_raw_i(139832330560762, 2, descr=chararraydescr) + guard_value(i10, 1) [p0, i6, i3, i2, p1] + i16 = int_add(i2, 1) + i18 = int_add(i3, 2) + i19 = int_ge(i16, i5) + guard_false(i19) [p0, i6, i3, i2, p1] + jump(p0, p1, i16, i18, i4, i5)""") + self.vectorize(trace) + self.debug_print_operations(trace) + class TestLLtype(BaseTestVectorize, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py --- a/rpython/jit/metainterp/optimizeopt/vector.py +++ b/rpython/jit/metainterp/optimizeopt/vector.py @@ -794,6 +794,8 @@ def split_overloaded_packs(self): newpacks = [] for i,pack in enumerate(self.packs): + if pack.operations[0].op.is_guard(): + import pdb; pdb.set_trace() load = pack.pack_load(self.vec_reg_size) if load > Pack.FULL: pack.split(newpacks, self.vec_reg_size) From noreply at buildbot.pypy.org Mon Oct 12 15:58:06 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Mon, 12 Oct 2015 15:58:06 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: removed debugging code and enhanced the test to check pack_load for guards Message-ID: <20151012135806.E44E31C1186@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80136:f1dec3e408e6 Date: 2015-10-12 15:57 +0200 http://bitbucket.org/pypy/pypy/changeset/f1dec3e408e6/ Log: removed debugging code and enhanced the test to check pack_load for guards diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py @@ -263,6 +263,9 @@ Node(ResOperation(rop.GUARD_TRUE, [vec]), 4), Node(ResOperation(rop.GUARD_TRUE, [vec]), 5), ]) + assert pack.pack_load(16) == 24-16 + assert pack.pack_load(8) == 24-8 + assert pack.pack_load(32) == 24-32 assert pack.opcount_filling_vector_register(16) == 4 ops, newops = pack.slice_operations(16) assert len(ops) == 4 @@ -1339,23 +1342,6 @@ 'guard_true(i100) [p0, i0]', ], trace) - def test_pack_too_much(self): - trace = self.parse_loop(""" - [p0, p1, i2, i3, i4, i5] - i6 = raw_load_i(i4, i3, descr=int16arraydescr) - guard_not_invalidated() [p0, i6, i3, i2, p1] - i7 = int_is_true(i6) - guard_true(i7) [p0, i6, i3, i2, p1] - i10 = getarrayitem_raw_i(139832330560762, 2, descr=chararraydescr) - guard_value(i10, 1) [p0, i6, i3, i2, p1] - i16 = int_add(i2, 1) - i18 = int_add(i3, 2) - i19 = int_ge(i16, i5) - guard_false(i19) [p0, i6, i3, i2, p1] - jump(p0, p1, i16, i18, i4, i5)""") - self.vectorize(trace) - self.debug_print_operations(trace) - class TestLLtype(BaseTestVectorize, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py --- a/rpython/jit/metainterp/optimizeopt/vector.py +++ b/rpython/jit/metainterp/optimizeopt/vector.py @@ -794,8 +794,6 @@ def split_overloaded_packs(self): newpacks = [] for i,pack in enumerate(self.packs): - if pack.operations[0].op.is_guard(): - import pdb; pdb.set_trace() load = pack.pack_load(self.vec_reg_size) if load > Pack.FULL: pack.split(newpacks, self.vec_reg_size) From noreply at buildbot.pypy.org Mon Oct 12 17:20:59 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Oct 2015 17:20:59 +0200 (CEST) Subject: [pypy-commit] pypy default: Hopefully fix the BridgeExceptionNotFirst problem Message-ID: <20151012152059.9CA081C1464@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80137:3d1ee9e224b8 Date: 2015-10-12 16:55 +0200 http://bitbucket.org/pypy/pypy/changeset/3d1ee9e224b8/ Log: Hopefully fix the BridgeExceptionNotFirst problem diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -45,8 +45,6 @@ # we don't care about the value 13 here, because we gonna # fish it from the extra slot on frame anyway op.getdescr().make_a_counter_per_value(op, 13) - elif opnum == rop.BRIDGE_EXCEPTION: - assert len(self.operations) == 0 # must be first if op.getdescr() is not None: if op.is_guard() or op.getopnum() == rop.FINISH: newdescr = op.getdescr() @@ -906,8 +904,8 @@ values.append(value) if hasattr(descr, '_llgraph_bridge'): if propagate_exception: - assert (descr._llgraph_bridge.operations[0].opnum == - rop.BRIDGE_EXCEPTION) + assert (descr._llgraph_bridge.operations[0].opnum in + (rop.SAVE_EXC_CLASS, rop.GUARD_EXCEPTION)) target = (descr._llgraph_bridge, -1) values = [value for value in values if value is not None] raise Jump(target, values) @@ -1229,8 +1227,32 @@ def execute_keepalive(self, descr, x): pass - def execute_bridge_exception(self, descr): - pass + def execute_save_exc_class(self, descr): + lle = self.last_exception + if lle is None: + return 0 + else: + return support.cast_to_int(lle.args[0]) + + def execute_save_exception(self, descr): + lle = self.last_exception + if lle is None: + res = lltype.nullptr(llmemory.GCREF.TO) + else: + res = lltype.cast_opaque_ptr(llmemory.GCREF, lle.args[1]) + self.last_exception = None + return res + + def execute_restore_exception(self, descr, kls, e): + kls = heaptracker.int2adr(kls) + if e: + value = lltype.cast_opaque_ptr(rclass.OBJECTPTR, e) + assert llmemory.cast_ptr_to_adr(value.typeptr) == kls + lle = LLException(value.typeptr, e) + else: + assert kls == llmemory.NULL + lle = None + self.last_exception = lle def _getdescr(op): diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -119,6 +119,7 @@ # barriers. We do this on each "basic block" of operations, which in # this case means between CALLs or unknown-size mallocs. # + operations = self.remove_bridge_exception(operations) for i in range(len(operations)): op = operations[i] assert op.get_forwarded() is None @@ -168,9 +169,6 @@ continue if op.getopnum() == rop.JUMP or op.getopnum() == rop.FINISH: self.emit_pending_zeros() - if op.getopnum() == rop.BRIDGE_EXCEPTION: - self.remove_bridge_exception(operations, i) - continue # self.emit_op(op) return self._newops @@ -686,13 +684,17 @@ size = max(size, 2 * WORD) return (size + WORD-1) & ~(WORD-1) # round up - def remove_bridge_exception(self, operations, i): - """Check that the 'bridge_exception' operation occurs at the - start of the bridge.""" - if i == 0: - return # first operation, ok - if i == 1 and operations[0].getopnum() == rop.INCREMENT_DEBUG_COUNTER: - return # 2nd operation after INCREMENT_DEBUG_COUNTER, ok - # not ok! - assert we_are_translated() - raise BridgeExceptionNotFirst + def remove_bridge_exception(self, operations): + """Check a common case: 'save_exception' immediately followed by + 'restore_exception' at the start of the bridge.""" + # XXX should check if the boxes are used later; but we just assume + # they aren't for now + start = 0 + if operations[0].getopnum() == rop.INCREMENT_DEBUG_COUNTER: + start = 1 + if len(operations) >= start + 3: + if (operations[start+0].getopnum() == rop.SAVE_EXC_CLASS and + operations[start+1].getopnum() == rop.SAVE_EXCEPTION and + operations[start+2].getopnum() == rop.RESTORE_EXCEPTION): + return operations[:start] + operations[start+3:] + return operations diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2099,6 +2099,60 @@ excvalue = self.cpu.grab_exc_value(deadframe) assert not excvalue + def test_save_restore_exceptions(self): + exc_tp = None + exc_ptr = None + def func(i): + if hasattr(self.cpu, '_exception_emulator'): + assert not self.cpu._exception_emulator[0] + assert not self.cpu._exception_emulator[1] + called.append(i) + if i: + raise LLException(exc_tp, exc_ptr) + + ops = ''' + [i0] + i1 = same_as_i(1) + call_n(ConstClass(fptr), i0, descr=calldescr) + i2 = save_exc_class() + p2 = save_exception() + call_n(ConstClass(fptr), 0, descr=calldescr) + restore_exception(i2, p2) + p0 = guard_exception(ConstClass(xtp)) [i1] + finish(p0) + ''' + FPTR = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void)) + fptr = llhelper(FPTR, func) + calldescr = self.cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, + EffectInfo.MOST_GENERAL) + + xtp = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) + xtp.subclassrange_min = 1 + xtp.subclassrange_max = 3 + X = lltype.GcStruct('X', ('parent', rclass.OBJECT), + hints={'vtable': xtp._obj}) + xx = lltype.malloc(X) + xx.parent.typeptr = xtp + xptr = lltype.cast_opaque_ptr(llmemory.GCREF, xx) + + exc_tp = xtp + exc_ptr = xptr + loop = parse(ops, self.cpu, namespace=locals()) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + called = [] + deadframe = self.cpu.execute_token(looptoken, 5) + assert called == [5, 0] + assert self.cpu.get_ref_value(deadframe, 0) == xptr + excvalue = self.cpu.grab_exc_value(deadframe) + assert not excvalue + called = [] + deadframe = self.cpu.execute_token(looptoken, 0) + assert called == [0, 0] + assert self.cpu.get_int_value(deadframe, 0) == 1 + excvalue = self.cpu.grab_exc_value(deadframe) + assert not excvalue + def test_cond_call_gc_wb(self): def func_void(a): record.append(rffi.cast(lltype.Signed, a)) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1609,6 +1609,15 @@ self.implement_guard(guard_token) self._store_and_reset_exception(self.mc, resloc) + def genop_save_exc_class(self, op, arglocs, resloc): + self.mc.MOV(resloc, heap(self.cpu.pos_exception())) + + def genop_save_exception(self, op, arglocs, resloc): + self._store_and_reset_exception(self.mc, resloc) + + def genop_discard_restore_exception(self, op, arglocs): + self._restore_exception(self.mc, arglocs[1], arglocs[0]) + def _store_and_reset_exception(self, mc, excvalloc=None, exctploc=None, tmploc=None): """ Resest the exception. If excvalloc is None, then store it on the diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -418,6 +418,17 @@ self.perform_guard(op, [loc, loc1], resloc) self.rm.possibly_free_var(box) + def consider_save_exception(self, op): + resloc = self.rm.force_allocate_reg(op) + self.perform(op, [], resloc) + consider_save_exc_class = consider_save_exception + + def consider_restore_exception(self, op): + args = op.getarglist() + loc0 = self.rm.make_sure_var_in_reg(op.getarg(0), args) # exc class + loc1 = self.rm.make_sure_var_in_reg(op.getarg(1), args) # exc instance + self.perform_discard(op, [loc0, loc1]) + consider_guard_no_overflow = consider_guard_no_exception consider_guard_overflow = consider_guard_no_exception consider_guard_not_forced = consider_guard_no_exception diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -386,7 +386,9 @@ rop.CALL_MALLOC_NURSERY_VARSIZE_FRAME, rop.NURSERY_PTR_INCREMENT, rop.LABEL, - rop.BRIDGE_EXCEPTION, + rop.SAVE_EXC_CLASS, + rop.SAVE_EXCEPTION, + rop.RESTORE_EXCEPTION, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2487,17 +2487,28 @@ # 'test_guard_no_exception_incorrectly_removed_from_bridge' # shows a corner case in which just putting GuARD_NO_EXCEPTION # here is a bad idea: the optimizer might remove it too. - # So we put a pair BRIDGE_EXCEPTION / GUARD_(NO)_EXCEPTION. - # The BRIDGE_EXCEPTION is meant to re-raise the exception - # caught before the bridge, but in reality it must end up - # as the first operation and thus is a no-op for the backends - # (it is removed in rewrite.py). Its real purpose is only to - # pass through the optimizer unmodified, so that the following - # GUARD_NO_EXCEPTION is not killed. - self.history.record(rop.BRIDGE_EXCEPTION, [], None) - if exception: - self.execute_ll_raised(lltype.cast_opaque_ptr(rclass.OBJECTPTR, - exception)) + # So we put a SAVE_EXCEPTION at the start, and a + # RESTORE_EXCEPTION just before the guard. (rewrite.py will + # remove the two if they end up consecutive.) + + # XXX too much jumps between older and newer models; clean up + # by killing SAVE_EXC_CLASS, RESTORE_EXCEPTION and GUARD_EXCEPTION + + exception_obj = lltype.cast_opaque_ptr(rclass.OBJECTPTR, exception) + if exception_obj: + exc_class = heaptracker.adr2int( + llmemory.cast_ptr_to_adr(exception_obj.typeptr)) + else: + exc_class = 0 + i = len(self.history.operations) + op1 = self.history.record(rop.SAVE_EXC_CLASS, [], exc_class) + op2 = self.history.record(rop.SAVE_EXCEPTION, [], exception) + assert op1 is self.history.operations[i] + assert op2 is self.history.operations[i + 1] + self.history.operations = [op1, op2] + self.history.operations[:i] + self.history.record(rop.RESTORE_EXCEPTION, [op1, op2], None) + if exception_obj: + self.execute_ll_raised(exception_obj) else: self.clear_exception() try: diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -696,7 +696,7 @@ 'GUARD_SUBCLASS/2d/n', # only if supports_guard_gc_type '_GUARD_FOLDABLE_LAST', 'GUARD_NO_EXCEPTION/0d/n', # may be called with an exception currently set - 'GUARD_EXCEPTION/1d/r', # may be called with an exception currently set + 'GUARD_EXCEPTION/1d/r', # XXX kill me, use only SAVE_EXCEPTION 'GUARD_NO_OVERFLOW/0d/n', 'GUARD_OVERFLOW/0d/n', 'GUARD_NOT_FORCED/0d/n', # may be called with an exception currently set @@ -827,7 +827,9 @@ 'QUASIIMMUT_FIELD/1d/n', # [objptr], descr=SlowMutateDescr 'RECORD_EXACT_CLASS/2/n', # [objptr, clsptr] 'KEEPALIVE/1/n', - 'BRIDGE_EXCEPTION/0/n', # pyjitpl: prepare_resume_from_failure() + 'SAVE_EXCEPTION/0/r', + 'SAVE_EXC_CLASS/0/i', # XXX kill me + 'RESTORE_EXCEPTION/2/n', # XXX kill me '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', From noreply at buildbot.pypy.org Mon Oct 12 17:21:01 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Oct 2015 17:21:01 +0200 (CEST) Subject: [pypy-commit] pypy default: Untested: ARM support Message-ID: <20151012152101.C2A1B1C1464@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80138:802959e767cc Date: 2015-10-12 17:08 +0200 http://bitbucket.org/pypy/pypy/changeset/802959e767cc/ Log: Untested: ARM support diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -508,6 +508,20 @@ self._store_and_reset_exception(self.mc, resloc) return fcond + def emit_op_save_exc_class(self, op, arglocs, regalloc, fcond): + resloc = arglocs[0] + self.mc.gen_load_int(r.ip.value, self.cpu.pos_exception()) + self.load_reg(self.mc, resloc, r.ip) + return fcond + + def emit_op_save_exception(self, op, arglocs, regalloc, fcond): + self._store_and_reset_exception(self.mc, resloc) + return fcond + + def emit_op_restore_exception(self, op, arglocs, regalloc, fcond): + self._restore_exception(self.mc, arglocs[1], arglocs[0]) + return fcond + def emit_op_debug_merge_point(self, op, arglocs, regalloc, fcond): return fcond emit_op_jit_debug = emit_op_debug_merge_point diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -707,6 +707,17 @@ [loc, loc1, resloc, pos_exc_value, pos_exception]) return arglocs + def prepare_op_save_exception(self, op, fcond): + resloc = self.force_allocate_reg(op) + return [resloc] + prepare_op_save_exc_class = prepare_op_save_exception + + def prepare_op_restore_exception(self, op, fcond): + boxes = op.getarglist() + loc0 = self.make_sure_var_in_reg(op.getarg(0), boxes) # exc class + loc1 = self.make_sure_var_in_reg(op.getarg(1), boxes) # exc instance + return [loc0, loc1] + def prepare_op_guard_no_exception(self, op, fcond): loc = self.make_sure_var_in_reg(ConstInt(self.cpu.pos_exception())) arglocs = self._prepare_guard(op, [loc]) From noreply at buildbot.pypy.org Mon Oct 12 17:21:03 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Oct 2015 17:21:03 +0200 (CEST) Subject: [pypy-commit] pypy default: fix Message-ID: <20151012152103.E6C8A1C1464@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80139:e4a32853ca92 Date: 2015-10-12 17:19 +0200 http://bitbucket.org/pypy/pypy/changeset/e4a32853ca92/ Log: fix diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -515,6 +515,7 @@ return fcond def emit_op_save_exception(self, op, arglocs, regalloc, fcond): + resloc = arglocs[0] self._store_and_reset_exception(self.mc, resloc) return fcond From noreply at buildbot.pypy.org Mon Oct 12 17:26:22 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Oct 2015 17:26:22 +0200 (CEST) Subject: [pypy-commit] pypy default: fix Message-ID: <20151012152622.7BA1E1C1464@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80140:c6c94169455a Date: 2015-10-12 17:26 +0200 http://bitbucket.org/pypy/pypy/changeset/c6c94169455a/ Log: fix diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -905,7 +905,8 @@ if hasattr(descr, '_llgraph_bridge'): if propagate_exception: assert (descr._llgraph_bridge.operations[0].opnum in - (rop.SAVE_EXC_CLASS, rop.GUARD_EXCEPTION)) + (rop.SAVE_EXC_CLASS, rop.GUARD_EXCEPTION, + rop.GUARD_NO_EXCEPTION)) target = (descr._llgraph_bridge, -1) values = [value for value in values if value is not None] raise Jump(target, values) From noreply at buildbot.pypy.org Mon Oct 12 17:37:33 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Oct 2015 17:37:33 +0200 (CEST) Subject: [pypy-commit] pypy cffi-handle-lifetime: hg merge default Message-ID: <20151012153733.EFAA61C1464@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-handle-lifetime Changeset: r80141:a698430306bd Date: 2015-10-12 17:37 +0200 http://bitbucket.org/pypy/pypy/changeset/a698430306bd/ Log: hg merge default diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -79,14 +79,11 @@ annmodel.TLS.check_str_without_nul = ( self.translator.config.translation.check_str_without_nul) - flowgraph, inputcells = self.get_call_parameters(function, args_s, policy) - if not isinstance(flowgraph, FunctionGraph): - assert isinstance(flowgraph, annmodel.SomeObject) - return flowgraph + flowgraph, inputs_s = self.get_call_parameters(function, args_s, policy) if main_entry_point: self.translator.entry_point_graph = flowgraph - return self.build_graph_types(flowgraph, inputcells, complete_now=complete_now) + return self.build_graph_types(flowgraph, inputs_s, complete_now=complete_now) def get_call_parameters(self, function, args_s, policy): desc = self.bookkeeper.getdesc(function) diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -508,6 +508,21 @@ self._store_and_reset_exception(self.mc, resloc) return fcond + def emit_op_save_exc_class(self, op, arglocs, regalloc, fcond): + resloc = arglocs[0] + self.mc.gen_load_int(r.ip.value, self.cpu.pos_exception()) + self.load_reg(self.mc, resloc, r.ip) + return fcond + + def emit_op_save_exception(self, op, arglocs, regalloc, fcond): + resloc = arglocs[0] + self._store_and_reset_exception(self.mc, resloc) + return fcond + + def emit_op_restore_exception(self, op, arglocs, regalloc, fcond): + self._restore_exception(self.mc, arglocs[1], arglocs[0]) + return fcond + def emit_op_debug_merge_point(self, op, arglocs, regalloc, fcond): return fcond emit_op_jit_debug = emit_op_debug_merge_point diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -707,6 +707,17 @@ [loc, loc1, resloc, pos_exc_value, pos_exception]) return arglocs + def prepare_op_save_exception(self, op, fcond): + resloc = self.force_allocate_reg(op) + return [resloc] + prepare_op_save_exc_class = prepare_op_save_exception + + def prepare_op_restore_exception(self, op, fcond): + boxes = op.getarglist() + loc0 = self.make_sure_var_in_reg(op.getarg(0), boxes) # exc class + loc1 = self.make_sure_var_in_reg(op.getarg(1), boxes) # exc instance + return [loc0, loc1] + def prepare_op_guard_no_exception(self, op, fcond): loc = self.make_sure_var_in_reg(ConstInt(self.cpu.pos_exception())) arglocs = self._prepare_guard(op, [loc]) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -45,8 +45,6 @@ # we don't care about the value 13 here, because we gonna # fish it from the extra slot on frame anyway op.getdescr().make_a_counter_per_value(op, 13) - elif opnum == rop.BRIDGE_EXCEPTION: - assert len(self.operations) == 0 # must be first if op.getdescr() is not None: if op.is_guard() or op.getopnum() == rop.FINISH: newdescr = op.getdescr() @@ -906,8 +904,9 @@ values.append(value) if hasattr(descr, '_llgraph_bridge'): if propagate_exception: - assert (descr._llgraph_bridge.operations[0].opnum == - rop.BRIDGE_EXCEPTION) + assert (descr._llgraph_bridge.operations[0].opnum in + (rop.SAVE_EXC_CLASS, rop.GUARD_EXCEPTION, + rop.GUARD_NO_EXCEPTION)) target = (descr._llgraph_bridge, -1) values = [value for value in values if value is not None] raise Jump(target, values) @@ -1229,8 +1228,32 @@ def execute_keepalive(self, descr, x): pass - def execute_bridge_exception(self, descr): - pass + def execute_save_exc_class(self, descr): + lle = self.last_exception + if lle is None: + return 0 + else: + return support.cast_to_int(lle.args[0]) + + def execute_save_exception(self, descr): + lle = self.last_exception + if lle is None: + res = lltype.nullptr(llmemory.GCREF.TO) + else: + res = lltype.cast_opaque_ptr(llmemory.GCREF, lle.args[1]) + self.last_exception = None + return res + + def execute_restore_exception(self, descr, kls, e): + kls = heaptracker.int2adr(kls) + if e: + value = lltype.cast_opaque_ptr(rclass.OBJECTPTR, e) + assert llmemory.cast_ptr_to_adr(value.typeptr) == kls + lle = LLException(value.typeptr, e) + else: + assert kls == llmemory.NULL + lle = None + self.last_exception = lle def _getdescr(op): diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -119,6 +119,7 @@ # barriers. We do this on each "basic block" of operations, which in # this case means between CALLs or unknown-size mallocs. # + operations = self.remove_bridge_exception(operations) for i in range(len(operations)): op = operations[i] assert op.get_forwarded() is None @@ -168,9 +169,6 @@ continue if op.getopnum() == rop.JUMP or op.getopnum() == rop.FINISH: self.emit_pending_zeros() - if op.getopnum() == rop.BRIDGE_EXCEPTION: - self.remove_bridge_exception(operations, i) - continue # self.emit_op(op) return self._newops @@ -686,13 +684,17 @@ size = max(size, 2 * WORD) return (size + WORD-1) & ~(WORD-1) # round up - def remove_bridge_exception(self, operations, i): - """Check that the 'bridge_exception' operation occurs at the - start of the bridge.""" - if i == 0: - return # first operation, ok - if i == 1 and operations[0].getopnum() == rop.INCREMENT_DEBUG_COUNTER: - return # 2nd operation after INCREMENT_DEBUG_COUNTER, ok - # not ok! - assert we_are_translated() - raise BridgeExceptionNotFirst + def remove_bridge_exception(self, operations): + """Check a common case: 'save_exception' immediately followed by + 'restore_exception' at the start of the bridge.""" + # XXX should check if the boxes are used later; but we just assume + # they aren't for now + start = 0 + if operations[0].getopnum() == rop.INCREMENT_DEBUG_COUNTER: + start = 1 + if len(operations) >= start + 3: + if (operations[start+0].getopnum() == rop.SAVE_EXC_CLASS and + operations[start+1].getopnum() == rop.SAVE_EXCEPTION and + operations[start+2].getopnum() == rop.RESTORE_EXCEPTION): + return operations[:start] + operations[start+3:] + return operations diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2099,6 +2099,60 @@ excvalue = self.cpu.grab_exc_value(deadframe) assert not excvalue + def test_save_restore_exceptions(self): + exc_tp = None + exc_ptr = None + def func(i): + if hasattr(self.cpu, '_exception_emulator'): + assert not self.cpu._exception_emulator[0] + assert not self.cpu._exception_emulator[1] + called.append(i) + if i: + raise LLException(exc_tp, exc_ptr) + + ops = ''' + [i0] + i1 = same_as_i(1) + call_n(ConstClass(fptr), i0, descr=calldescr) + i2 = save_exc_class() + p2 = save_exception() + call_n(ConstClass(fptr), 0, descr=calldescr) + restore_exception(i2, p2) + p0 = guard_exception(ConstClass(xtp)) [i1] + finish(p0) + ''' + FPTR = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void)) + fptr = llhelper(FPTR, func) + calldescr = self.cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, + EffectInfo.MOST_GENERAL) + + xtp = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) + xtp.subclassrange_min = 1 + xtp.subclassrange_max = 3 + X = lltype.GcStruct('X', ('parent', rclass.OBJECT), + hints={'vtable': xtp._obj}) + xx = lltype.malloc(X) + xx.parent.typeptr = xtp + xptr = lltype.cast_opaque_ptr(llmemory.GCREF, xx) + + exc_tp = xtp + exc_ptr = xptr + loop = parse(ops, self.cpu, namespace=locals()) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + called = [] + deadframe = self.cpu.execute_token(looptoken, 5) + assert called == [5, 0] + assert self.cpu.get_ref_value(deadframe, 0) == xptr + excvalue = self.cpu.grab_exc_value(deadframe) + assert not excvalue + called = [] + deadframe = self.cpu.execute_token(looptoken, 0) + assert called == [0, 0] + assert self.cpu.get_int_value(deadframe, 0) == 1 + excvalue = self.cpu.grab_exc_value(deadframe) + assert not excvalue + def test_cond_call_gc_wb(self): def func_void(a): record.append(rffi.cast(lltype.Signed, a)) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1609,6 +1609,15 @@ self.implement_guard(guard_token) self._store_and_reset_exception(self.mc, resloc) + def genop_save_exc_class(self, op, arglocs, resloc): + self.mc.MOV(resloc, heap(self.cpu.pos_exception())) + + def genop_save_exception(self, op, arglocs, resloc): + self._store_and_reset_exception(self.mc, resloc) + + def genop_discard_restore_exception(self, op, arglocs): + self._restore_exception(self.mc, arglocs[1], arglocs[0]) + def _store_and_reset_exception(self, mc, excvalloc=None, exctploc=None, tmploc=None): """ Resest the exception. If excvalloc is None, then store it on the diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -418,6 +418,17 @@ self.perform_guard(op, [loc, loc1], resloc) self.rm.possibly_free_var(box) + def consider_save_exception(self, op): + resloc = self.rm.force_allocate_reg(op) + self.perform(op, [], resloc) + consider_save_exc_class = consider_save_exception + + def consider_restore_exception(self, op): + args = op.getarglist() + loc0 = self.rm.make_sure_var_in_reg(op.getarg(0), args) # exc class + loc1 = self.rm.make_sure_var_in_reg(op.getarg(1), args) # exc instance + self.perform_discard(op, [loc0, loc1]) + consider_guard_no_overflow = consider_guard_no_exception consider_guard_overflow = consider_guard_no_exception consider_guard_not_forced = consider_guard_no_exception diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -386,7 +386,9 @@ rop.CALL_MALLOC_NURSERY_VARSIZE_FRAME, rop.NURSERY_PTR_INCREMENT, rop.LABEL, - rop.BRIDGE_EXCEPTION, + rop.SAVE_EXC_CLASS, + rop.SAVE_EXCEPTION, + rop.RESTORE_EXCEPTION, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2487,17 +2487,28 @@ # 'test_guard_no_exception_incorrectly_removed_from_bridge' # shows a corner case in which just putting GuARD_NO_EXCEPTION # here is a bad idea: the optimizer might remove it too. - # So we put a pair BRIDGE_EXCEPTION / GUARD_(NO)_EXCEPTION. - # The BRIDGE_EXCEPTION is meant to re-raise the exception - # caught before the bridge, but in reality it must end up - # as the first operation and thus is a no-op for the backends - # (it is removed in rewrite.py). Its real purpose is only to - # pass through the optimizer unmodified, so that the following - # GUARD_NO_EXCEPTION is not killed. - self.history.record(rop.BRIDGE_EXCEPTION, [], None) - if exception: - self.execute_ll_raised(lltype.cast_opaque_ptr(rclass.OBJECTPTR, - exception)) + # So we put a SAVE_EXCEPTION at the start, and a + # RESTORE_EXCEPTION just before the guard. (rewrite.py will + # remove the two if they end up consecutive.) + + # XXX too much jumps between older and newer models; clean up + # by killing SAVE_EXC_CLASS, RESTORE_EXCEPTION and GUARD_EXCEPTION + + exception_obj = lltype.cast_opaque_ptr(rclass.OBJECTPTR, exception) + if exception_obj: + exc_class = heaptracker.adr2int( + llmemory.cast_ptr_to_adr(exception_obj.typeptr)) + else: + exc_class = 0 + i = len(self.history.operations) + op1 = self.history.record(rop.SAVE_EXC_CLASS, [], exc_class) + op2 = self.history.record(rop.SAVE_EXCEPTION, [], exception) + assert op1 is self.history.operations[i] + assert op2 is self.history.operations[i + 1] + self.history.operations = [op1, op2] + self.history.operations[:i] + self.history.record(rop.RESTORE_EXCEPTION, [op1, op2], None) + if exception_obj: + self.execute_ll_raised(exception_obj) else: self.clear_exception() try: diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -696,7 +696,7 @@ 'GUARD_SUBCLASS/2d/n', # only if supports_guard_gc_type '_GUARD_FOLDABLE_LAST', 'GUARD_NO_EXCEPTION/0d/n', # may be called with an exception currently set - 'GUARD_EXCEPTION/1d/r', # may be called with an exception currently set + 'GUARD_EXCEPTION/1d/r', # XXX kill me, use only SAVE_EXCEPTION 'GUARD_NO_OVERFLOW/0d/n', 'GUARD_OVERFLOW/0d/n', 'GUARD_NOT_FORCED/0d/n', # may be called with an exception currently set @@ -827,7 +827,9 @@ 'QUASIIMMUT_FIELD/1d/n', # [objptr], descr=SlowMutateDescr 'RECORD_EXACT_CLASS/2/n', # [objptr, clsptr] 'KEEPALIVE/1/n', - 'BRIDGE_EXCEPTION/0/n', # pyjitpl: prepare_resume_from_failure() + 'SAVE_EXCEPTION/0/r', + 'SAVE_EXC_CLASS/0/i', # XXX kill me + 'RESTORE_EXCEPTION/2/n', # XXX kill me '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -92,12 +92,13 @@ PLT = "" size_decl = "" type_decl = "" + extra_align = "" else: PLT = "@PLT" type_decl = "\t.type\t%s, @function" % (tramp_name,) size_decl = "\t.size\t%s, .-%s" % ( tramp_name, tramp_name) - + extra_align = "\t.cfi_def_cfa_offset 8" assert detect_cpu.autodetect().startswith(detect_cpu.MODEL_X86_64), ( "rvmprof only supports x86-64 CPUs for now") @@ -124,15 +125,17 @@ # that don't start with \t are silently ignored (: WAT!?) target.write("""\ \t.text +\t.section\t__TEXT,__text,regular,pure_instructions \t.globl\t%(tramp_name)s +\t.align\t4, 0x90 %(type_decl)s %(tramp_name)s: \t.cfi_startproc \tpushq\t%(reg)s \t.cfi_def_cfa_offset 16 \tcall %(cont_name)s%(PLT)s +%(extra_align)s \taddq\t$8, %%rsp -\t.cfi_def_cfa_offset 8 \tret \t.cfi_endproc %(size_decl)s diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -31,7 +31,11 @@ #include #include #include "vmprof_getpc.h" +#ifdef __APPLE__ +#include "libunwind.h" +#else #include "vmprof_unwind.h" +#endif #include "vmprof_mt.h" @@ -39,10 +43,12 @@ // functions copied from libunwind using dlopen +#ifndef __APPLE__ // should be linux only probably static int (*unw_get_reg)(unw_cursor_t*, int, unw_word_t*) = NULL; static int (*unw_step)(unw_cursor_t*) = NULL; static int (*unw_init_local)(unw_cursor_t *, unw_context_t *) = NULL; static int (*unw_get_proc_info)(unw_cursor_t *, unw_proc_info_t *) = NULL; +#endif static int profile_file = -1; static long prepare_interval_usec; @@ -67,6 +73,7 @@ return "bad value for 'interval'"; prepare_interval_usec = (int)(interval * 1000000.0); +#ifndef __APPLE__ if (!unw_get_reg) { void *libhandle; @@ -81,6 +88,7 @@ if (!(unw_step = dlsym(libhandle, UNW_PREFIX "_step"))) goto error; } +#endif if (prepare_concurrent_bufs() < 0) return "out of memory"; @@ -206,7 +214,12 @@ void *ip; int n = 0; unw_cursor_t cursor; +#ifdef __APPLE__ + unw_context_t uc; + unw_getcontext(&uc); +#else unw_context_t uc = *ucontext; +#endif int ret = unw_init_local(&cursor, &uc); assert(ret >= 0); diff --git a/rpython/rlib/rvmprof/src/vmprof_unwind.h b/rpython/rlib/rvmprof/src/vmprof_unwind.h --- a/rpython/rlib/rvmprof/src/vmprof_unwind.h +++ b/rpython/rlib/rvmprof/src/vmprof_unwind.h @@ -64,8 +64,7 @@ typedef struct unw_cursor { unw_word_t opaque[UNW_TDEP_CURSOR_LEN]; - } -unw_cursor_t; + } unw_cursor_t; #define UNW_REG_IP UNW_X86_64_RIP #define UNW_REG_SP UNW_X86_64_RSP @@ -84,7 +83,7 @@ int format; /* unwind-info format (arch-specific) */ int unwind_info_size; /* size of the information (if applicable) */ void *unwind_info; /* unwind-info (arch-specific) */ - } -unw_proc_info_t; + } unw_proc_info_t; // end of copy + diff --git a/rpython/rlib/rvmprof/test/test_rvmprof.py b/rpython/rlib/rvmprof/test/test_rvmprof.py --- a/rpython/rlib/rvmprof/test/test_rvmprof.py +++ b/rpython/rlib/rvmprof/test/test_rvmprof.py @@ -2,6 +2,7 @@ from rpython.tool.udir import udir from rpython.rlib import rvmprof from rpython.translator.c.test.test_genc import compile +from rpython.rlib.objectmodel import we_are_translated def test_vmprof_execute_code_1(): @@ -96,7 +97,12 @@ @rvmprof.vmprof_execute_code("xcode1", lambda code, num: code) def main(code, num): print num - return 42 + s = 0 + for i in range(num): + s += (i << 1) + if s % 32423423423 == 0: + print s + return s tmpfilename = str(udir.join('test_rvmprof')) @@ -104,16 +110,37 @@ code = MyCode() rvmprof.register_code(code, get_name) fd = os.open(tmpfilename, os.O_WRONLY | os.O_CREAT, 0666) - rvmprof.enable(fd, 0.5) - res = main(code, 5) - assert res == 42 + if we_are_translated(): + num = 100000000 + period = 0.0001 + else: + num = 10000 + period = 0.9 + rvmprof.enable(fd, period) + res = main(code, num) + #assert res == 499999500000 rvmprof.disable() os.close(fd) return 0 + def check_profile(filename): + from vmprof import read_profile + + prof = read_profile(filename) + assert prof.get_tree().name.startswith("py:") + assert prof.get_tree().count + assert f() == 0 assert os.path.exists(tmpfilename) fn = compile(f, [], gcpolicy="minimark") - os.unlink(tmpfilename) assert fn() == 0 - assert os.path.exists(tmpfilename) + try: + import vmprof + except ImportError: + py.test.skip("vmprof unimportable") + else: + check_profile(tmpfilename) + finally: + assert os.path.exists(tmpfilename) + os.unlink(tmpfilename) + \ No newline at end of file From noreply at buildbot.pypy.org Mon Oct 12 18:36:28 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Oct 2015 18:36:28 +0200 (CEST) Subject: [pypy-commit] pypy default: hg backout 4444053b6b16 Message-ID: <20151012163628.AEC581C12E2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80142:c9976e50bbb8 Date: 2015-10-12 18:36 +0200 http://bitbucket.org/pypy/pypy/changeset/c9976e50bbb8/ Log: hg backout 4444053b6b16 Breaks translation on Linux diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -92,13 +92,12 @@ PLT = "" size_decl = "" type_decl = "" - extra_align = "" else: PLT = "@PLT" type_decl = "\t.type\t%s, @function" % (tramp_name,) size_decl = "\t.size\t%s, .-%s" % ( tramp_name, tramp_name) - extra_align = "\t.cfi_def_cfa_offset 8" + assert detect_cpu.autodetect().startswith(detect_cpu.MODEL_X86_64), ( "rvmprof only supports x86-64 CPUs for now") @@ -125,17 +124,15 @@ # that don't start with \t are silently ignored (: WAT!?) target.write("""\ \t.text -\t.section\t__TEXT,__text,regular,pure_instructions \t.globl\t%(tramp_name)s -\t.align\t4, 0x90 %(type_decl)s %(tramp_name)s: \t.cfi_startproc \tpushq\t%(reg)s \t.cfi_def_cfa_offset 16 \tcall %(cont_name)s%(PLT)s -%(extra_align)s \taddq\t$8, %%rsp +\t.cfi_def_cfa_offset 8 \tret \t.cfi_endproc %(size_decl)s diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -31,11 +31,7 @@ #include #include #include "vmprof_getpc.h" -#ifdef __APPLE__ -#include "libunwind.h" -#else #include "vmprof_unwind.h" -#endif #include "vmprof_mt.h" @@ -43,12 +39,10 @@ // functions copied from libunwind using dlopen -#ifndef __APPLE__ // should be linux only probably static int (*unw_get_reg)(unw_cursor_t*, int, unw_word_t*) = NULL; static int (*unw_step)(unw_cursor_t*) = NULL; static int (*unw_init_local)(unw_cursor_t *, unw_context_t *) = NULL; static int (*unw_get_proc_info)(unw_cursor_t *, unw_proc_info_t *) = NULL; -#endif static int profile_file = -1; static long prepare_interval_usec; @@ -73,7 +67,6 @@ return "bad value for 'interval'"; prepare_interval_usec = (int)(interval * 1000000.0); -#ifndef __APPLE__ if (!unw_get_reg) { void *libhandle; @@ -88,7 +81,6 @@ if (!(unw_step = dlsym(libhandle, UNW_PREFIX "_step"))) goto error; } -#endif if (prepare_concurrent_bufs() < 0) return "out of memory"; @@ -214,12 +206,7 @@ void *ip; int n = 0; unw_cursor_t cursor; -#ifdef __APPLE__ - unw_context_t uc; - unw_getcontext(&uc); -#else unw_context_t uc = *ucontext; -#endif int ret = unw_init_local(&cursor, &uc); assert(ret >= 0); diff --git a/rpython/rlib/rvmprof/src/vmprof_unwind.h b/rpython/rlib/rvmprof/src/vmprof_unwind.h --- a/rpython/rlib/rvmprof/src/vmprof_unwind.h +++ b/rpython/rlib/rvmprof/src/vmprof_unwind.h @@ -64,7 +64,8 @@ typedef struct unw_cursor { unw_word_t opaque[UNW_TDEP_CURSOR_LEN]; - } unw_cursor_t; + } +unw_cursor_t; #define UNW_REG_IP UNW_X86_64_RIP #define UNW_REG_SP UNW_X86_64_RSP @@ -83,7 +84,7 @@ int format; /* unwind-info format (arch-specific) */ int unwind_info_size; /* size of the information (if applicable) */ void *unwind_info; /* unwind-info (arch-specific) */ - } unw_proc_info_t; + } +unw_proc_info_t; // end of copy - diff --git a/rpython/rlib/rvmprof/test/test_rvmprof.py b/rpython/rlib/rvmprof/test/test_rvmprof.py --- a/rpython/rlib/rvmprof/test/test_rvmprof.py +++ b/rpython/rlib/rvmprof/test/test_rvmprof.py @@ -2,7 +2,6 @@ from rpython.tool.udir import udir from rpython.rlib import rvmprof from rpython.translator.c.test.test_genc import compile -from rpython.rlib.objectmodel import we_are_translated def test_vmprof_execute_code_1(): @@ -97,12 +96,7 @@ @rvmprof.vmprof_execute_code("xcode1", lambda code, num: code) def main(code, num): print num - s = 0 - for i in range(num): - s += (i << 1) - if s % 32423423423 == 0: - print s - return s + return 42 tmpfilename = str(udir.join('test_rvmprof')) @@ -110,37 +104,16 @@ code = MyCode() rvmprof.register_code(code, get_name) fd = os.open(tmpfilename, os.O_WRONLY | os.O_CREAT, 0666) - if we_are_translated(): - num = 100000000 - period = 0.0001 - else: - num = 10000 - period = 0.9 - rvmprof.enable(fd, period) - res = main(code, num) - #assert res == 499999500000 + rvmprof.enable(fd, 0.5) + res = main(code, 5) + assert res == 42 rvmprof.disable() os.close(fd) return 0 - def check_profile(filename): - from vmprof import read_profile - - prof = read_profile(filename) - assert prof.get_tree().name.startswith("py:") - assert prof.get_tree().count - assert f() == 0 assert os.path.exists(tmpfilename) fn = compile(f, [], gcpolicy="minimark") + os.unlink(tmpfilename) assert fn() == 0 - try: - import vmprof - except ImportError: - py.test.skip("vmprof unimportable") - else: - check_profile(tmpfilename) - finally: - assert os.path.exists(tmpfilename) - os.unlink(tmpfilename) - \ No newline at end of file + assert os.path.exists(tmpfilename) From noreply at buildbot.pypy.org Mon Oct 12 18:37:17 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Oct 2015 18:37:17 +0200 (CEST) Subject: [pypy-commit] pypy cffi-handle-lifetime: hg merge default Message-ID: <20151012163717.D74B01C12E2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-handle-lifetime Changeset: r80143:87fadac2e7a3 Date: 2015-10-12 18:37 +0200 http://bitbucket.org/pypy/pypy/changeset/87fadac2e7a3/ Log: hg merge default diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -92,13 +92,12 @@ PLT = "" size_decl = "" type_decl = "" - extra_align = "" else: PLT = "@PLT" type_decl = "\t.type\t%s, @function" % (tramp_name,) size_decl = "\t.size\t%s, .-%s" % ( tramp_name, tramp_name) - extra_align = "\t.cfi_def_cfa_offset 8" + assert detect_cpu.autodetect().startswith(detect_cpu.MODEL_X86_64), ( "rvmprof only supports x86-64 CPUs for now") @@ -125,17 +124,15 @@ # that don't start with \t are silently ignored (: WAT!?) target.write("""\ \t.text -\t.section\t__TEXT,__text,regular,pure_instructions \t.globl\t%(tramp_name)s -\t.align\t4, 0x90 %(type_decl)s %(tramp_name)s: \t.cfi_startproc \tpushq\t%(reg)s \t.cfi_def_cfa_offset 16 \tcall %(cont_name)s%(PLT)s -%(extra_align)s \taddq\t$8, %%rsp +\t.cfi_def_cfa_offset 8 \tret \t.cfi_endproc %(size_decl)s diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -31,11 +31,7 @@ #include #include #include "vmprof_getpc.h" -#ifdef __APPLE__ -#include "libunwind.h" -#else #include "vmprof_unwind.h" -#endif #include "vmprof_mt.h" @@ -43,12 +39,10 @@ // functions copied from libunwind using dlopen -#ifndef __APPLE__ // should be linux only probably static int (*unw_get_reg)(unw_cursor_t*, int, unw_word_t*) = NULL; static int (*unw_step)(unw_cursor_t*) = NULL; static int (*unw_init_local)(unw_cursor_t *, unw_context_t *) = NULL; static int (*unw_get_proc_info)(unw_cursor_t *, unw_proc_info_t *) = NULL; -#endif static int profile_file = -1; static long prepare_interval_usec; @@ -73,7 +67,6 @@ return "bad value for 'interval'"; prepare_interval_usec = (int)(interval * 1000000.0); -#ifndef __APPLE__ if (!unw_get_reg) { void *libhandle; @@ -88,7 +81,6 @@ if (!(unw_step = dlsym(libhandle, UNW_PREFIX "_step"))) goto error; } -#endif if (prepare_concurrent_bufs() < 0) return "out of memory"; @@ -214,12 +206,7 @@ void *ip; int n = 0; unw_cursor_t cursor; -#ifdef __APPLE__ - unw_context_t uc; - unw_getcontext(&uc); -#else unw_context_t uc = *ucontext; -#endif int ret = unw_init_local(&cursor, &uc); assert(ret >= 0); diff --git a/rpython/rlib/rvmprof/src/vmprof_unwind.h b/rpython/rlib/rvmprof/src/vmprof_unwind.h --- a/rpython/rlib/rvmprof/src/vmprof_unwind.h +++ b/rpython/rlib/rvmprof/src/vmprof_unwind.h @@ -64,7 +64,8 @@ typedef struct unw_cursor { unw_word_t opaque[UNW_TDEP_CURSOR_LEN]; - } unw_cursor_t; + } +unw_cursor_t; #define UNW_REG_IP UNW_X86_64_RIP #define UNW_REG_SP UNW_X86_64_RSP @@ -83,7 +84,7 @@ int format; /* unwind-info format (arch-specific) */ int unwind_info_size; /* size of the information (if applicable) */ void *unwind_info; /* unwind-info (arch-specific) */ - } unw_proc_info_t; + } +unw_proc_info_t; // end of copy - diff --git a/rpython/rlib/rvmprof/test/test_rvmprof.py b/rpython/rlib/rvmprof/test/test_rvmprof.py --- a/rpython/rlib/rvmprof/test/test_rvmprof.py +++ b/rpython/rlib/rvmprof/test/test_rvmprof.py @@ -2,7 +2,6 @@ from rpython.tool.udir import udir from rpython.rlib import rvmprof from rpython.translator.c.test.test_genc import compile -from rpython.rlib.objectmodel import we_are_translated def test_vmprof_execute_code_1(): @@ -97,12 +96,7 @@ @rvmprof.vmprof_execute_code("xcode1", lambda code, num: code) def main(code, num): print num - s = 0 - for i in range(num): - s += (i << 1) - if s % 32423423423 == 0: - print s - return s + return 42 tmpfilename = str(udir.join('test_rvmprof')) @@ -110,37 +104,16 @@ code = MyCode() rvmprof.register_code(code, get_name) fd = os.open(tmpfilename, os.O_WRONLY | os.O_CREAT, 0666) - if we_are_translated(): - num = 100000000 - period = 0.0001 - else: - num = 10000 - period = 0.9 - rvmprof.enable(fd, period) - res = main(code, num) - #assert res == 499999500000 + rvmprof.enable(fd, 0.5) + res = main(code, 5) + assert res == 42 rvmprof.disable() os.close(fd) return 0 - def check_profile(filename): - from vmprof import read_profile - - prof = read_profile(filename) - assert prof.get_tree().name.startswith("py:") - assert prof.get_tree().count - assert f() == 0 assert os.path.exists(tmpfilename) fn = compile(f, [], gcpolicy="minimark") + os.unlink(tmpfilename) assert fn() == 0 - try: - import vmprof - except ImportError: - py.test.skip("vmprof unimportable") - else: - check_profile(tmpfilename) - finally: - assert os.path.exists(tmpfilename) - os.unlink(tmpfilename) - \ No newline at end of file + assert os.path.exists(tmpfilename) From noreply at buildbot.pypy.org Mon Oct 12 19:38:48 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Oct 2015 19:38:48 +0200 (CEST) Subject: [pypy-commit] pypy cffi-handle-lifetime: jit translation fix Message-ID: <20151012173848.9E0DF1C147C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-handle-lifetime Changeset: r80144:10debcf2b5c5 Date: 2015-10-12 18:16 +0100 http://bitbucket.org/pypy/pypy/changeset/10debcf2b5c5/ Log: jit translation fix diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -19,6 +19,7 @@ # ____________________________________________________________ + at jit.dont_look_inside def make_callback(space, ctype, w_callable, w_error, w_onerror): # Allocate a callback as a nonmovable W_CDataCallback instance, which # we can cast to a plain VOIDP. As long as the object is not freed, diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -38,9 +38,12 @@ "expected a 'cdata' object with a 'void *' out of " "new_handle(), got '%s'", ctype.name) with w_cdata as ptr: - addr = rffi.cast(llmemory.Address, ptr) - gcref = rgc.reveal_gcref(addr) - # + return _reveal(space, ptr) + + at jit.dont_look_inside +def _reveal(space, ptr): + addr = rffi.cast(llmemory.Address, ptr) + gcref = rgc.reveal_gcref(addr) if not gcref: raise oefmt(space.w_RuntimeError, "cannot use from_handle() on NULL pointer") From noreply at buildbot.pypy.org Mon Oct 12 19:38:51 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Oct 2015 19:38:51 +0200 (CEST) Subject: [pypy-commit] pypy cffi-handle-lifetime: merge heads Message-ID: <20151012173851.07CE61C14E7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-handle-lifetime Changeset: r80145:6bb0e8bdf9c4 Date: 2015-10-12 18:42 +0100 http://bitbucket.org/pypy/pypy/changeset/6bb0e8bdf9c4/ Log: merge heads diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -92,13 +92,12 @@ PLT = "" size_decl = "" type_decl = "" - extra_align = "" else: PLT = "@PLT" type_decl = "\t.type\t%s, @function" % (tramp_name,) size_decl = "\t.size\t%s, .-%s" % ( tramp_name, tramp_name) - extra_align = "\t.cfi_def_cfa_offset 8" + assert detect_cpu.autodetect().startswith(detect_cpu.MODEL_X86_64), ( "rvmprof only supports x86-64 CPUs for now") @@ -125,17 +124,15 @@ # that don't start with \t are silently ignored (: WAT!?) target.write("""\ \t.text -\t.section\t__TEXT,__text,regular,pure_instructions \t.globl\t%(tramp_name)s -\t.align\t4, 0x90 %(type_decl)s %(tramp_name)s: \t.cfi_startproc \tpushq\t%(reg)s \t.cfi_def_cfa_offset 16 \tcall %(cont_name)s%(PLT)s -%(extra_align)s \taddq\t$8, %%rsp +\t.cfi_def_cfa_offset 8 \tret \t.cfi_endproc %(size_decl)s diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -31,11 +31,7 @@ #include #include #include "vmprof_getpc.h" -#ifdef __APPLE__ -#include "libunwind.h" -#else #include "vmprof_unwind.h" -#endif #include "vmprof_mt.h" @@ -43,12 +39,10 @@ // functions copied from libunwind using dlopen -#ifndef __APPLE__ // should be linux only probably static int (*unw_get_reg)(unw_cursor_t*, int, unw_word_t*) = NULL; static int (*unw_step)(unw_cursor_t*) = NULL; static int (*unw_init_local)(unw_cursor_t *, unw_context_t *) = NULL; static int (*unw_get_proc_info)(unw_cursor_t *, unw_proc_info_t *) = NULL; -#endif static int profile_file = -1; static long prepare_interval_usec; @@ -73,7 +67,6 @@ return "bad value for 'interval'"; prepare_interval_usec = (int)(interval * 1000000.0); -#ifndef __APPLE__ if (!unw_get_reg) { void *libhandle; @@ -88,7 +81,6 @@ if (!(unw_step = dlsym(libhandle, UNW_PREFIX "_step"))) goto error; } -#endif if (prepare_concurrent_bufs() < 0) return "out of memory"; @@ -214,12 +206,7 @@ void *ip; int n = 0; unw_cursor_t cursor; -#ifdef __APPLE__ - unw_context_t uc; - unw_getcontext(&uc); -#else unw_context_t uc = *ucontext; -#endif int ret = unw_init_local(&cursor, &uc); assert(ret >= 0); diff --git a/rpython/rlib/rvmprof/src/vmprof_unwind.h b/rpython/rlib/rvmprof/src/vmprof_unwind.h --- a/rpython/rlib/rvmprof/src/vmprof_unwind.h +++ b/rpython/rlib/rvmprof/src/vmprof_unwind.h @@ -64,7 +64,8 @@ typedef struct unw_cursor { unw_word_t opaque[UNW_TDEP_CURSOR_LEN]; - } unw_cursor_t; + } +unw_cursor_t; #define UNW_REG_IP UNW_X86_64_RIP #define UNW_REG_SP UNW_X86_64_RSP @@ -83,7 +84,7 @@ int format; /* unwind-info format (arch-specific) */ int unwind_info_size; /* size of the information (if applicable) */ void *unwind_info; /* unwind-info (arch-specific) */ - } unw_proc_info_t; + } +unw_proc_info_t; // end of copy - diff --git a/rpython/rlib/rvmprof/test/test_rvmprof.py b/rpython/rlib/rvmprof/test/test_rvmprof.py --- a/rpython/rlib/rvmprof/test/test_rvmprof.py +++ b/rpython/rlib/rvmprof/test/test_rvmprof.py @@ -2,7 +2,6 @@ from rpython.tool.udir import udir from rpython.rlib import rvmprof from rpython.translator.c.test.test_genc import compile -from rpython.rlib.objectmodel import we_are_translated def test_vmprof_execute_code_1(): @@ -97,12 +96,7 @@ @rvmprof.vmprof_execute_code("xcode1", lambda code, num: code) def main(code, num): print num - s = 0 - for i in range(num): - s += (i << 1) - if s % 32423423423 == 0: - print s - return s + return 42 tmpfilename = str(udir.join('test_rvmprof')) @@ -110,37 +104,16 @@ code = MyCode() rvmprof.register_code(code, get_name) fd = os.open(tmpfilename, os.O_WRONLY | os.O_CREAT, 0666) - if we_are_translated(): - num = 100000000 - period = 0.0001 - else: - num = 10000 - period = 0.9 - rvmprof.enable(fd, period) - res = main(code, num) - #assert res == 499999500000 + rvmprof.enable(fd, 0.5) + res = main(code, 5) + assert res == 42 rvmprof.disable() os.close(fd) return 0 - def check_profile(filename): - from vmprof import read_profile - - prof = read_profile(filename) - assert prof.get_tree().name.startswith("py:") - assert prof.get_tree().count - assert f() == 0 assert os.path.exists(tmpfilename) fn = compile(f, [], gcpolicy="minimark") + os.unlink(tmpfilename) assert fn() == 0 - try: - import vmprof - except ImportError: - py.test.skip("vmprof unimportable") - else: - check_profile(tmpfilename) - finally: - assert os.path.exists(tmpfilename) - os.unlink(tmpfilename) - \ No newline at end of file + assert os.path.exists(tmpfilename) From noreply at buildbot.pypy.org Mon Oct 12 20:33:12 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Mon, 12 Oct 2015 20:33:12 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: guard test fixed Message-ID: <20151012183312.7974E1C1453@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80146:10c0ffe213cd Date: 2015-10-12 18:11 +0200 http://bitbucket.org/pypy/pypy/changeset/10c0ffe213cd/ Log: guard test fixed diff --git a/rpython/jit/metainterp/optimizeopt/test/test_guard.py b/rpython/jit/metainterp/optimizeopt/test/test_guard.py --- a/rpython/jit/metainterp/optimizeopt/test/test_guard.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_guard.py @@ -107,7 +107,7 @@ def __repr__(self): return '*' from rpython.jit.tool.oparser import OpParser, default_fail_descr - parser = OpParser(instr, self.cpu, self.namespace, 'lltype', None, default_fail_descr, False, None) + parser = OpParser(instr, self.cpu, self.namespace, None, default_fail_descr, True, None) parser.vars = { arg.repr_short(arg._repr_memo) : arg for arg in loop.inputargs} operations = [] last_glob = None @@ -164,16 +164,16 @@ loop1 = self.parse_trace(""" i10 = int_lt(i1, 42) guard_true(i10) [] - i11 = int_add(i1, 1) - i12 = int_lt(i11, 42) - guard_true(i12) [] + i101 = int_add(i1, 1) + i102 = int_lt(i101, 42) + guard_true(i102) [] """) opt = self.optguards(loop1) self.assert_guard_count(loop1, 1) self.assert_contains_sequence(loop1, """ ... - i11 = int_add(i1, 1) - i12 = int_lt(i11, 42) + i101 = int_add(i1, 1) + i12 = int_lt(i101, 42) guard_true(i12) [] ... """) @@ -182,16 +182,16 @@ loop1 = self.parse_trace(""" i10 = int_gt(i1, 42) guard_true(i10) [] - i11 = int_sub(i1, 1) - i12 = int_gt(i11, 42) + i101 = int_sub(i1, 1) + i12 = int_gt(i101, 42) guard_true(i12) [] """) opt = self.optguards(loop1) self.assert_guard_count(loop1, 1) self.assert_contains_sequence(loop1, """ ... - i11 = int_sub(i1, 1) - i12 = int_gt(i11, 42) + i101 = int_sub(i1, 1) + i12 = int_gt(i101, 42) guard_true(i12) [] ... """) @@ -209,8 +209,8 @@ self.assert_guard_count(loop1, 1) self.assert_contains_sequence(loop1, """ ... - i11 = int_mul(i1, 4) - i12 = int_add(i11, 1) + i101 = int_mul(i1, 4) + i12 = int_add(i101, 1) i13 = int_lt(i12, 42) guard_true(i13) [] ... @@ -323,8 +323,8 @@ self.assert_guard_count(loop1, 2) self.assert_contains_sequence(loop1, """ ... - i10 = int_ge(42, i2) - guard_true(i10) [] + i100 = int_ge(42, i2) + guard_true(i100) [] ... i40 = int_gt(i1, 42) guard_true(i40) [] From noreply at buildbot.pypy.org Mon Oct 12 20:33:14 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Mon, 12 Oct 2015 20:33:14 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: added missing code from merge Message-ID: <20151012183314.A6E6E1C1453@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80147:66b7452af316 Date: 2015-10-12 20:33 +0200 http://bitbucket.org/pypy/pypy/changeset/66b7452af316/ Log: added missing code from merge diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -47,6 +47,13 @@ self.inputargs = map(mapping, inputargs) self.operations = [] for op in operations: + opnum = op.getopnum() + if opnum == rop.GUARD_VALUE: + # we don't care about the value 13 here, because we gonna + # fish it from the extra slot on frame anyway + op.getdescr().make_a_counter_per_value(op, 13) + elif opnum == rop.BRIDGE_EXCEPTION: + assert len(self.operations) == 0 # must be first if op.getdescr() is not None: if op.is_guard() or op.getopnum() == rop.FINISH: newdescr = op.getdescr() From noreply at buildbot.pypy.org Mon Oct 12 22:04:13 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Oct 2015 22:04:13 +0200 (CEST) Subject: [pypy-commit] pypy cffi-handle-lifetime: ready for merge Message-ID: <20151012200413.32AAF1C05B6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cffi-handle-lifetime Changeset: r80148:809d3d00d977 Date: 2015-10-12 22:00 +0200 http://bitbucket.org/pypy/pypy/changeset/809d3d00d977/ Log: ready for merge From noreply at buildbot.pypy.org Mon Oct 12 22:04:16 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Oct 2015 22:04:16 +0200 (CEST) Subject: [pypy-commit] pypy default: hg merge cffi-handle-lifetime Message-ID: <20151012200416.3BAB01C05B6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80149:3ef464913dfc Date: 2015-10-12 22:02 +0200 http://bitbucket.org/pypy/pypy/changeset/3ef464913dfc/ Log: hg merge cffi-handle-lifetime Change the way the cffi handles are done: this version is better because handles are valid as long as the object exists, as opposed to the previous version where handles become invalid before the object's __del__ is called. This is done using a new way to ask the GC to make nonmovable objects. diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -1,14 +1,14 @@ """ Callbacks. """ -import sys, os +import sys, os, py -from rpython.rlib import clibffi, jit, jit_libffi +from rpython.rlib import clibffi, jit, jit_libffi, rgc, objectmodel from rpython.rlib.objectmodel import keepalive_until_here -from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from pypy.interpreter.error import OperationError, oefmt -from pypy.module._cffi_backend import cerrno, misc, handle +from pypy.module._cffi_backend import cerrno, misc from pypy.module._cffi_backend.cdataobj import W_CData from pypy.module._cffi_backend.ctypefunc import SIZE_OF_FFI_ARG, W_CTypeFunc from pypy.module._cffi_backend.ctypeprim import W_CTypePrimitiveSigned @@ -19,6 +19,23 @@ # ____________________________________________________________ + at jit.dont_look_inside +def make_callback(space, ctype, w_callable, w_error, w_onerror): + # Allocate a callback as a nonmovable W_CDataCallback instance, which + # we can cast to a plain VOIDP. As long as the object is not freed, + # we can cast the VOIDP back to a W_CDataCallback in reveal_callback(). + cdata = objectmodel.instantiate(W_CDataCallback, nonmovable=True) + gcref = rgc.cast_instance_to_gcref(cdata) + raw_cdata = rgc.hide_nonmovable_gcref(gcref) + cdata.__init__(space, ctype, w_callable, w_error, w_onerror, raw_cdata) + return cdata + +def reveal_callback(raw_ptr): + addr = rffi.cast(llmemory.Address, raw_ptr) + gcref = rgc.reveal_gcref(addr) + return rgc.try_cast_gcref_to_instance(W_CDataCallback, gcref) + + class Closure(object): """This small class is here to have a __del__ outside any cycle.""" @@ -37,7 +54,8 @@ _immutable_fields_ = ['key_pycode'] w_onerror = None - def __init__(self, space, ctype, w_callable, w_error, w_onerror): + def __init__(self, space, ctype, w_callable, w_error, w_onerror, + raw_cdata): raw_closure = rffi.cast(rffi.CCHARP, clibffi.closureHeap.alloc()) self._closure = Closure(raw_closure) W_CData.__init__(self, space, raw_closure, ctype) @@ -72,8 +90,6 @@ from pypy.module.thread.os_thread import setup_threads setup_threads(space) # - handle_index = handle.get_handles(space).reserve_next_handle_index() - # cif_descr = self.getfunctype().cif_descr if not cif_descr: raise oefmt(space.w_NotImplementedError, @@ -81,16 +97,13 @@ "return type or with '...'", self.getfunctype().name) with self as ptr: closure_ptr = rffi.cast(clibffi.FFI_CLOSUREP, ptr) - unique_id = rffi.cast(rffi.VOIDP, handle_index) + unique_id = rffi.cast(rffi.VOIDP, raw_cdata) res = clibffi.c_ffi_prep_closure(closure_ptr, cif_descr.cif, invoke_callback, unique_id) if rffi.cast(lltype.Signed, res) != clibffi.FFI_OK: raise OperationError(space.w_SystemError, space.wrap("libffi failed to build this callback")) - # - _current_space.space = space - handle.get_handles(space).store_handle(handle_index, self) def _repr_extra(self): space = self.space @@ -221,12 +234,6 @@ except OperationError, e: _handle_applevel_exception(callback, e, ll_res, extra_line) -class CurrentSpace: - def _cleanup_(self): - if hasattr(self, 'space'): - del self.space -_current_space = CurrentSpace() - def _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): """ Callback specification. ffi_cif - something ffi specific, don't care @@ -236,10 +243,8 @@ (what the real callback is for example), casted to VOIDP """ ll_res = rffi.cast(rffi.CCHARP, ll_res) - unique_id = rffi.cast(lltype.Signed, ll_userdata) - space = _current_space.space - callback = handle.get_handles(space).fetch_handle(unique_id) - if callback is None or not isinstance(callback, W_CDataCallback): + callback = reveal_callback(ll_userdata) + if callback is None: # oups! try: os.write(STDERR, "SystemError: invoking a callback " @@ -251,6 +256,7 @@ misc._raw_memclear(ll_res, SIZE_OF_FFI_ARG) return # + space = callback.space must_leave = False try: must_leave = space.threadlocals.try_enter_thread(space) diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py --- a/pypy/module/_cffi_backend/ffi_obj.py +++ b/pypy/module/_cffi_backend/ffi_obj.py @@ -294,9 +294,9 @@ CONSIDER_FN_AS_FNPTR) space = self.space if not space.is_none(w_python_callable): - return ccallback.W_CDataCallback(space, w_ctype, - w_python_callable, w_error, - w_onerror) + return ccallback.make_callback(space, w_ctype, + w_python_callable, w_error, + w_onerror) else: # decorator mode: returns a single-argument function return space.appexec([w_ctype, w_error, w_onerror], diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -24,8 +24,8 @@ @unwrap_spec(w_ctype=ctypeobj.W_CType) def callback(space, w_ctype, w_callable, w_error=None, w_onerror=None): - from pypy.module._cffi_backend.ccallback import W_CDataCallback - return W_CDataCallback(space, w_ctype, w_callable, w_error, w_onerror) + from pypy.module._cffi_backend.ccallback import make_callback + return make_callback(space, w_ctype, w_callable, w_error, w_onerror) # ____________________________________________________________ diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -1,24 +1,24 @@ +import py from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter.baseobjspace import W_Root from pypy.module._cffi_backend import ctypeobj, ctypeptr, cdataobj -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib import rweaklist - - -class CffiHandles(rweaklist.RWeakListMixin): - def __init__(self, space): - self.initialize() - -def get_handles(space): - return space.fromcache(CffiHandles) +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rlib import rgc, objectmodel, jit # ____________________________________________________________ + at jit.dont_look_inside def _newp_handle(space, w_ctype, w_x): - index = get_handles(space).reserve_next_handle_index() - _cdata = rffi.cast(rffi.CCHARP, index + 1) - new_cdataobj = cdataobj.W_CDataHandle(space, _cdata, w_ctype, w_x) - get_handles(space).store_handle(index, new_cdataobj) + # Allocate a handle as a nonmovable W_CDataHandle instance, which + # we can cast to a plain CCHARP. As long as the object is not freed, + # we can cast the CCHARP back to a W_CDataHandle with reveal_gcref(). + new_cdataobj = objectmodel.instantiate(cdataobj.W_CDataHandle, + nonmovable=True) + gcref = rgc.cast_instance_to_gcref(new_cdataobj) + _cdata = rgc.hide_nonmovable_gcref(gcref) + _cdata = rffi.cast(rffi.CCHARP, _cdata) + cdataobj.W_CDataHandle.__init__(new_cdataobj, space, _cdata, w_ctype, w_x) return new_cdataobj @unwrap_spec(w_ctype=ctypeobj.W_CType) @@ -38,14 +38,17 @@ "expected a 'cdata' object with a 'void *' out of " "new_handle(), got '%s'", ctype.name) with w_cdata as ptr: - index = rffi.cast(lltype.Signed, ptr) - original_cdataobj = get_handles(space).fetch_handle(index - 1) - # - if isinstance(original_cdataobj, cdataobj.W_CDataHandle): - return original_cdataobj.w_keepalive - else: - if index == 0: - msg = "cannot use from_handle() on NULL pointer" - else: - msg = "'void *' value does not correspond to any object" - raise OperationError(space.w_RuntimeError, space.wrap(msg)) + return _reveal(space, ptr) + + at jit.dont_look_inside +def _reveal(space, ptr): + addr = rffi.cast(llmemory.Address, ptr) + gcref = rgc.reveal_gcref(addr) + if not gcref: + raise oefmt(space.w_RuntimeError, + "cannot use from_handle() on NULL pointer") + cd = rgc.try_cast_gcref_to_instance(cdataobj.W_CDataHandle, gcref) + if cd is None: + raise oefmt(space.w_SystemError, + "ffi.from_handle(): dead or bogus object handle") + return cd.w_keepalive diff --git a/pypy/module/_cffi_backend/test/test_handle.py b/pypy/module/_cffi_backend/test/test_handle.py deleted file mode 100644 --- a/pypy/module/_cffi_backend/test/test_handle.py +++ /dev/null @@ -1,44 +0,0 @@ -import random -from pypy.module._cffi_backend.handle import CffiHandles - - -class PseudoWeakRef(object): - _content = 42 - - def __call__(self): - return self._content - - -def test_cffi_handles_1(): - ch = CffiHandles(None) - expected_content = {} - for i in range(10000): - index = ch.reserve_next_handle_index() - assert 0 <= index < len(ch.handles) - assert ch.handles[index]() is None - pwr = PseudoWeakRef() - expected_content[index] = pwr - ch.handles[index] = pwr - assert len(ch.handles) <= 16384 - for index, pwr in expected_content.items(): - assert ch.handles[index] is pwr - -def test_cffi_handles_2(): - ch = CffiHandles(None) - expected_content = {} - for i in range(10000): - index = ch.reserve_next_handle_index() - assert 0 <= index < len(ch.handles) - assert ch.handles[index]() is None - pwr = PseudoWeakRef() - expected_content[index] = pwr - ch.handles[index] = pwr - # - if len(expected_content) > 20: - r = random.choice(list(expected_content)) - pwr = expected_content.pop(r) - pwr._content = None - # - assert len(ch.handles) < 100 - for index, pwr in expected_content.items(): - assert ch.handles[index] is pwr diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -290,7 +290,7 @@ return SomeInteger(knowntype=rpython.rlib.rarithmetic.r_longlong) @analyzer_for(rpython.rlib.objectmodel.instantiate) -def robjmodel_instantiate(s_clspbc): +def robjmodel_instantiate(s_clspbc, s_nonmovable=None): assert isinstance(s_clspbc, SomePBC) clsdef = None more_than_one = len(s_clspbc.descriptions) > 1 diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -915,10 +915,13 @@ return [op0, op1] def rewrite_op_malloc(self, op): - if op.args[1].value['flavor'] == 'raw': + d = op.args[1].value + if d.get('nonmovable', False): + raise UnsupportedMallocFlags(d) + if d['flavor'] == 'raw': return self._rewrite_raw_malloc(op, 'raw_malloc_fixedsize', []) # - if op.args[1].value.get('zero', False): + if d.get('zero', False): zero = True else: zero = False diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py --- a/rpython/memory/gc/base.py +++ b/rpython/memory/gc/base.py @@ -172,6 +172,9 @@ def can_move(self, addr): return False + def malloc_fixedsize_nonmovable(self, typeid): + raise MemoryError + def pin(self, addr): return False diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -597,7 +597,7 @@ if needs_finalizer and not is_finalizer_light: ll_assert(not contains_weakptr, "'needs_finalizer' and 'contains_weakptr' both specified") - obj = self.external_malloc(typeid, 0, can_make_young=False) + obj = self.external_malloc(typeid, 0, alloc_young=False) self.objects_with_finalizers.append(obj) # # If totalsize is greater than nonlarge_max (which should never be @@ -606,7 +606,7 @@ elif rawtotalsize > self.nonlarge_max: ll_assert(not contains_weakptr, "'contains_weakptr' specified for a large object") - obj = self.external_malloc(typeid, 0) + obj = self.external_malloc(typeid, 0, alloc_young=True) # else: # If totalsize is smaller than minimal_size_in_nursery, round it @@ -659,7 +659,7 @@ # If the total size of the object would be larger than # 'nonlarge_max', then allocate it externally. We also # go there if 'length' is actually negative. - obj = self.external_malloc(typeid, length) + obj = self.external_malloc(typeid, length, alloc_young=True) # else: # With the above checks we know now that totalsize cannot be more @@ -692,6 +692,11 @@ return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + def malloc_fixedsize_nonmovable(self, typeid): + obj = self.external_malloc(typeid, 0, alloc_young=True) + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + + def collect(self, gen=2): """Do a minor (gen=0), start a major (gen=1), or do a full major (gen>=2) collection.""" @@ -808,7 +813,7 @@ collect_and_reserve._dont_inline_ = True - def external_malloc(self, typeid, length, can_make_young=True): + def external_malloc(self, typeid, length, alloc_young): """Allocate a large object using the ArenaCollection or raw_malloc(), possibly as an object with card marking enabled, if it has gc pointers in its var-sized part. 'length' should be @@ -862,7 +867,9 @@ # we should get a MemoryError from major_collection_step(). # # Check if the object would fit in the ArenaCollection. - if raw_malloc_usage(totalsize) <= self.small_request_threshold: + # Also, an object allocated from ArenaCollection must be old. + if (raw_malloc_usage(totalsize) <= self.small_request_threshold + and not alloc_young): # # Yes. Round up 'totalsize' (it cannot overflow and it # must remain <= self.small_request_threshold.) @@ -874,10 +881,6 @@ # Allocate from the ArenaCollection. Don't clear it. result = self.ac.malloc(totalsize) # - # An object allocated from ArenaCollection is always old, even - # if 'can_make_young'. The interesting case of 'can_make_young' - # is for large objects, bigger than the 'large_objects' threshold, - # which are raw-malloced but still young. extra_flags = GCFLAG_TRACK_YOUNG_PTRS # else: @@ -897,11 +900,11 @@ extra_words = self.card_marking_words_for_length(length) cardheadersize = WORD * extra_words extra_flags = GCFLAG_HAS_CARDS | GCFLAG_TRACK_YOUNG_PTRS - # if 'can_make_young', then we also immediately set + # if 'alloc_young', then we also immediately set # GCFLAG_CARDS_SET, but without adding the object to # 'old_objects_with_cards_set'. In this way it should # never be added to that list as long as it is young. - if can_make_young: + if alloc_young: extra_flags |= GCFLAG_CARDS_SET # # Detect very rare cases of overflows @@ -939,7 +942,7 @@ # Record the newly allocated object and its full malloced size. # The object is young or old depending on the argument. self.rawmalloced_total_size += r_uint(allocsize) - if can_make_young: + if alloc_young: if not self.young_rawmalloced_objects: self.young_rawmalloced_objects = self.AddressDict() self.young_rawmalloced_objects.add(result + size_gc_header) diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -519,7 +519,7 @@ if needs_finalizer and not is_finalizer_light: ll_assert(not contains_weakptr, "'needs_finalizer' and 'contains_weakptr' both specified") - obj = self.external_malloc(typeid, 0, can_make_young=False) + obj = self.external_malloc(typeid, 0, alloc_young=False) self.objects_with_finalizers.append(obj) # # If totalsize is greater than nonlarge_max (which should never be @@ -528,7 +528,7 @@ elif rawtotalsize > self.nonlarge_max: ll_assert(not contains_weakptr, "'contains_weakptr' specified for a large object") - obj = self.external_malloc(typeid, 0) + obj = self.external_malloc(typeid, 0, alloc_young=True) # else: # If totalsize is smaller than minimal_size_in_nursery, round it @@ -581,7 +581,7 @@ # If the total size of the object would be larger than # 'nonlarge_max', then allocate it externally. We also # go there if 'length' is actually negative. - obj = self.external_malloc(typeid, length) + obj = self.external_malloc(typeid, length, alloc_young=True) # else: # With the above checks we know now that totalsize cannot be more @@ -614,6 +614,11 @@ return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + def malloc_fixedsize_nonmovable(self, typeid): + obj = self.external_malloc(typeid, 0, alloc_young=True) + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + + def collect(self, gen=1): """Do a minor (gen=0) or major (gen>0) collection.""" self.minor_collection() @@ -671,7 +676,7 @@ collect_and_reserve._dont_inline_ = True - def external_malloc(self, typeid, length, can_make_young=True): + def external_malloc(self, typeid, length, alloc_young): """Allocate a large object using the ArenaCollection or raw_malloc(), possibly as an object with card marking enabled, if it has gc pointers in its var-sized part. 'length' should be @@ -711,7 +716,9 @@ self.major_collection(raw_malloc_usage(totalsize)) # # Check if the object would fit in the ArenaCollection. - if raw_malloc_usage(totalsize) <= self.small_request_threshold: + # Also, an object allocated from ArenaCollection must be old. + if (raw_malloc_usage(totalsize) <= self.small_request_threshold + and not alloc_young): # # Yes. Round up 'totalsize' (it cannot overflow and it # must remain <= self.small_request_threshold.) @@ -724,10 +731,6 @@ result = self.ac.malloc(totalsize) llmemory.raw_memclear(result, totalsize) # - # An object allocated from ArenaCollection is always old, even - # if 'can_make_young'. The interesting case of 'can_make_young' - # is for large objects, bigger than the 'large_objects' threshold, - # which are raw-malloced but still young. extra_flags = GCFLAG_TRACK_YOUNG_PTRS # else: @@ -747,11 +750,11 @@ extra_words = self.card_marking_words_for_length(length) cardheadersize = WORD * extra_words extra_flags = GCFLAG_HAS_CARDS | GCFLAG_TRACK_YOUNG_PTRS - # if 'can_make_young', then we also immediately set + # if 'alloc_young', then we also immediately set # GCFLAG_CARDS_SET, but without adding the object to # 'old_objects_with_cards_set'. In this way it should # never be added to that list as long as it is young. - if can_make_young: + if alloc_young: extra_flags |= GCFLAG_CARDS_SET # # Detect very rare cases of overflows @@ -787,7 +790,7 @@ # Record the newly allocated object and its full malloced size. # The object is young or old depending on the argument. self.rawmalloced_total_size += r_uint(allocsize) - if can_make_young: + if alloc_young: if not self.young_rawmalloced_objects: self.young_rawmalloced_objects = self.AddressDict() self.young_rawmalloced_objects.add(result + size_gc_header) diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py --- a/rpython/memory/gc/test/test_direct.py +++ b/rpython/memory/gc/test/test_direct.py @@ -565,8 +565,8 @@ tid = self.get_type_id(VAR) largeobj_size = self.gc.nonlarge_max + 1 self.gc.next_major_collection_threshold = 99999.0 - addr_src = self.gc.external_malloc(tid, largeobj_size) - addr_dst = self.gc.external_malloc(tid, largeobj_size) + addr_src = self.gc.external_malloc(tid, largeobj_size, alloc_young=True) + addr_dst = self.gc.external_malloc(tid, largeobj_size, alloc_young=True) hdr_src = self.gc.header(addr_src) hdr_dst = self.gc.header(addr_dst) # diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -531,6 +531,9 @@ getfn(func, [SomeAddress()], annmodel.s_None) + self.malloc_nonmovable_ptr = getfn(GCClass.malloc_fixedsize_nonmovable, + [s_gc, s_typeid16], + s_gcref) def create_custom_trace_funcs(self, gc, rtyper): custom_trace_funcs = tuple(rtyper.custom_trace_funcs) @@ -757,7 +760,12 @@ c_has_light_finalizer = rmodel.inputconst(lltype.Bool, has_light_finalizer) - if not op.opname.endswith('_varsize') and not flags.get('varsize'): + if flags.get('nonmovable'): + assert op.opname == 'malloc' + assert not flags.get('varsize') + malloc_ptr = self.malloc_nonmovable_ptr + args = [self.c_const_gc, c_type_id] + elif not op.opname.endswith('_varsize') and not flags.get('varsize'): zero = flags.get('zero', False) if (self.malloc_fast_ptr is not None and not c_has_finalizer.value and diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py --- a/rpython/memory/test/test_transformed_gc.py +++ b/rpython/memory/test/test_transformed_gc.py @@ -1247,6 +1247,26 @@ res = self.runner('nursery_hash_base') assert res([]) >= 195 + def define_instantiate_nonmovable(cls): + from rpython.rlib import objectmodel + from rpython.rtyper import annlowlevel + class A: + pass + def fn(): + a1 = A() + a = objectmodel.instantiate(A, nonmovable=True) + a.next = a1 # 'a' is known young here, so no write barrier emitted + res = rgc.can_move(annlowlevel.cast_instance_to_base_ptr(a)) + rgc.collect() + objectmodel.keepalive_until_here(a) + return res + return fn + + def test_instantiate_nonmovable(self): + res = self.runner('instantiate_nonmovable') + assert res([]) == 0 + + class TestIncrementalMiniMarkGC(TestMiniMarkGC): gcname = "incminimark" diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -276,7 +276,7 @@ # ____________________________________________________________ -def instantiate(cls): +def instantiate(cls, nonmovable=False): "Create an empty instance of 'cls'." if isinstance(cls, type): return cls.__new__(cls) diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -480,7 +480,7 @@ class _GcRef(object): # implementation-specific: there should not be any after translation - __slots__ = ['_x'] + __slots__ = ['_x', '_handle'] def __init__(self, x): self._x = x def __hash__(self): @@ -529,6 +529,48 @@ return None try_cast_gcref_to_instance._annspecialcase_ = 'specialize:arg(0)' +_ffi_cache = None +def _fetch_ffi(): + global _ffi_cache + if _ffi_cache is None: + try: + import _cffi_backend + _ffi_cache = _cffi_backend.FFI() + except (ImportError, AttributeError): + import py + py.test.skip("need CFFI >= 1.0") + return _ffi_cache + + at jit.dont_look_inside +def hide_nonmovable_gcref(gcref): + from rpython.rtyper.lltypesystem import lltype, llmemory, rffi + if we_are_translated(): + assert lltype.typeOf(gcref) == llmemory.GCREF + assert not can_move(gcref) + return rffi.cast(llmemory.Address, gcref) + else: + assert isinstance(gcref, _GcRef) + x = gcref._x + ffi = _fetch_ffi() + if not hasattr(x, '__handle'): + x.__handle = ffi.new_handle(x) + addr = int(ffi.cast("intptr_t", x.__handle)) + return rffi.cast(llmemory.Address, addr) + + at jit.dont_look_inside +def reveal_gcref(addr): + from rpython.rtyper.lltypesystem import lltype, llmemory, rffi + assert lltype.typeOf(addr) == llmemory.Address + if we_are_translated(): + return rffi.cast(llmemory.GCREF, addr) + else: + addr = rffi.cast(lltype.Signed, addr) + if addr == 0: + return lltype.nullptr(llmemory.GCREF.TO) + ffi = _fetch_ffi() + x = ffi.from_handle(ffi.cast("void *", addr)) + return _GcRef(x) + # ------------------- implementation ------------------- _cache_s_list_of_gcrefs = None diff --git a/rpython/rtyper/lltypesystem/rtagged.py b/rpython/rtyper/lltypesystem/rtagged.py --- a/rpython/rtyper/lltypesystem/rtagged.py +++ b/rpython/rtyper/lltypesystem/rtagged.py @@ -27,7 +27,8 @@ self.classdef, flds)) self.specialfieldname = flds[0] - def new_instance(self, llops, classcallhop=None): + def new_instance(self, llops, classcallhop=None, nonmovable=False): + assert not nonmovable if self.is_parent: raise TyperError("don't instantiate %r, it is a parent of an " "UnboxedValue class" % (self.classdef,)) diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -693,18 +693,24 @@ return hop.args_r[0].rtype_isinstance(hop) @typer_for(objectmodel.instantiate) -def rtype_instantiate(hop): +def rtype_instantiate(hop, i_nonmovable=None): hop.exception_cannot_occur() s_class = hop.args_s[0] assert isinstance(s_class, annmodel.SomePBC) + v_nonmovable, = parse_kwds(hop, (i_nonmovable, None)) + nonmovable = (i_nonmovable is not None and v_nonmovable.value) if len(s_class.descriptions) != 1: # instantiate() on a variable class + if nonmovable: + raise TyperError("instantiate(x, nonmovable=True) cannot be used " + "if x is not a constant class") vtypeptr, = hop.inputargs(rclass.get_type_repr(hop.rtyper)) r_class = hop.args_r[0] return r_class._instantiate_runtime_class(hop, vtypeptr, hop.r_result.lowleveltype) classdef = s_class.any_description().getuniqueclassdef() - return rclass.rtype_new_instance(hop.rtyper, classdef, hop.llops) + return rclass.rtype_new_instance(hop.rtyper, classdef, hop.llops, + nonmovable=nonmovable) @typer_for(hasattr) diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -684,10 +684,12 @@ rbase = rbase.rbase return False - def new_instance(self, llops, classcallhop=None): + def new_instance(self, llops, classcallhop=None, nonmovable=False): """Build a new instance, without calling __init__.""" flavor = self.gcflavor flags = {'flavor': flavor} + if nonmovable: + flags['nonmovable'] = True ctype = inputconst(Void, self.object_type) cflags = inputconst(Void, flags) vlist = [ctype, cflags] @@ -1031,9 +1033,10 @@ # ____________________________________________________________ -def rtype_new_instance(rtyper, classdef, llops, classcallhop=None): +def rtype_new_instance(rtyper, classdef, llops, classcallhop=None, + nonmovable=False): rinstance = getinstancerepr(rtyper, classdef) - return rinstance.new_instance(llops, classcallhop) + return rinstance.new_instance(llops, classcallhop, nonmovable=nonmovable) def ll_inst_hash(ins): if not ins: diff --git a/rpython/rtyper/test/test_rbuiltin.py b/rpython/rtyper/test/test_rbuiltin.py --- a/rpython/rtyper/test/test_rbuiltin.py +++ b/rpython/rtyper/test/test_rbuiltin.py @@ -432,6 +432,14 @@ res = self.interpret(f, [2]) assert self.class_name(res) == 'B' + def test_instantiate_nonmovable(self): + class A: + pass + def f(): + return instantiate(A, nonmovable=True) # no effect before GC + res = self.interpret(f, []) + assert self.class_name(res) == 'A' + def test_os_path_join(self): def fn(a, b): return os.path.join(a, b) From noreply at buildbot.pypy.org Mon Oct 12 22:04:18 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 12 Oct 2015 22:04:18 +0200 (CEST) Subject: [pypy-commit] pypy default: document branch Message-ID: <20151012200418.4F6381C05B6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80150:73ea83bf410b Date: 2015-10-12 22:04 +0200 http://bitbucket.org/pypy/pypy/changeset/73ea83bf410b/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -68,3 +68,9 @@ .. branch: type_system-cleanup Remove some remnants of the old ootypesystem vs lltypesystem dichotomy. + +.. branch: cffi-handle-lifetime + +ffi.new_handle() returns handles that work more like CPython's: they +remain valid as long as the target exists (unlike the previous +version, where handles become invalid *before* the __del__ is called). From noreply at buildbot.pypy.org Tue Oct 13 09:10:06 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Oct 2015 09:10:06 +0200 (CEST) Subject: [pypy-commit] benchmarks default: Change this old version of pypy to translate on PPC (without stacklets) Message-ID: <20151013071006.73D7E1C103D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r337:93b19a1dfaf5 Date: 2015-10-13 09:10 +0200 http://bitbucket.org/pypy/benchmarks/changeset/93b19a1dfaf5/ Log: Change this old version of pypy to translate on PPC (without stacklets) diff --git a/lib/pypy/rpython/memory/gctransform/shadowstack.py b/lib/pypy/rpython/memory/gctransform/shadowstack.py --- a/lib/pypy/rpython/memory/gctransform/shadowstack.py +++ b/lib/pypy/rpython/memory/gctransform/shadowstack.py @@ -400,6 +400,9 @@ llmemory.raw_free(shadowstackref.base) if h: _c.destroy(h) + if not gctransformer.translator.config.translation.continuation: + def shadowstack_destructor(shadowstackref): + llmemory.raw_free(shadowstackref.base) destrptr = gctransformer.annotate_helper(shadowstack_destructor, [SHADOWSTACKREFPTR], lltype.Void) From noreply at buildbot.pypy.org Tue Oct 13 09:52:34 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Tue, 13 Oct 2015 09:52:34 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: copied descr does not inhert from resume guard descr -> asserting abstractfaildescr instead Message-ID: <20151013075234.0B8EF1C103D@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80151:e9362f0d3eaf Date: 2015-10-13 08:48 +0200 http://bitbucket.org/pypy/pypy/changeset/e9362f0d3eaf/ Log: copied descr does not inhert from resume guard descr -> asserting abstractfaildescr instead diff --git a/rpython/jit/metainterp/optimizeopt/dependency.py b/rpython/jit/metainterp/optimizeopt/dependency.py --- a/rpython/jit/metainterp/optimizeopt/dependency.py +++ b/rpython/jit/metainterp/optimizeopt/dependency.py @@ -4,10 +4,9 @@ from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.metainterp.resoperation import (rop, GuardResOp, ResOperation) from rpython.jit.metainterp.resume import Snapshot -from rpython.jit.metainterp.compile import ResumeGuardDescr from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.history import (ConstPtr, ConstInt,Const, - AbstractValue) + AbstractValue, AbstractFailDescr) from rpython.rtyper.lltypesystem import llmemory from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import we_are_translated @@ -83,7 +82,7 @@ descr = op.getdescr() if not descr: return False - assert isinstance(descr, ResumeGuardDescr) + assert isinstance(descr, AbstractFailDescr) if not descr.exits_early(): return False elif not op.is_always_pure(): diff --git a/rpython/jit/metainterp/optimizeopt/guard.py b/rpython/jit/metainterp/optimizeopt/guard.py --- a/rpython/jit/metainterp/optimizeopt/guard.py +++ b/rpython/jit/metainterp/optimizeopt/guard.py @@ -8,7 +8,8 @@ from rpython.jit.metainterp.optimizeopt.dependency import (DependencyGraph, MemoryRef, Node, IndexVar) from rpython.jit.metainterp.resoperation import (rop, ResOperation, GuardResOp) -from rpython.jit.metainterp.history import (ConstInt, ConstFloat, Const) +from rpython.jit.metainterp.history import (ConstInt, ConstFloat, Const, + AbstractFailDescr) from rpython.jit.metainterp.compile import ResumeGuardDescr, CompileLoopVersionDescr from rpython.rlib.objectmodel import we_are_translated @@ -88,7 +89,7 @@ descr = CompileLoopVersionDescr() descr.copy_all_attributes_from(self.op.getdescr()) descr.rd_vector_info = None # do not copy the accum list - assert isinstance(descr, ResumeGuardDescr) + assert isinstance(descr, AbstractFailDescr) guard = ResOperation(self.op.getopnum(), [compare], descr=descr) guard.setfailargs(loop.label.getarglist_copy()) opt.emit_operation(guard) @@ -261,7 +262,7 @@ continue descr = op.getdescr() if descr and descr.loop_version(): - assert isinstance(descr, ResumeGuardDescr) + assert isinstance(descr, AbstractFailDescr) info.track(op, descr, version) if user_code: @@ -293,7 +294,7 @@ info.remove(other.op.getdescr()) other.set_to_none(info, loop) descr = transitive_guard.getdescr() - assert isinstance(descr, ResumeGuardDescr) + assert isinstance(descr, AbstractFailDescr) info.track(transitive_guard, descr, version) info.clear() diff --git a/rpython/jit/metainterp/optimizeopt/version.py b/rpython/jit/metainterp/optimizeopt/version.py --- a/rpython/jit/metainterp/optimizeopt/version.py +++ b/rpython/jit/metainterp/optimizeopt/version.py @@ -3,6 +3,7 @@ from rpython.jit.metainterp.optimizeopt.optimizer import BasicLoopInfo from rpython.jit.metainterp.compile import (send_bridge_to_backend, record_loop_or_bridge, ResumeGuardDescr, create_empty_loop) +from rpython.jit.metainterp.history import AbstractFailDescr class LoopVersionInfo(BasicLoopInfo): @@ -69,7 +70,7 @@ # the guard might have been removed from the trace continue if version not in compiled: - assert isinstance(descr, ResumeGuardDescr) + assert isinstance(descr, AbstractFailDescr) vl = version.create_backend_loop(metainterp, jitcell_token) asminfo = send_bridge_to_backend(jitdriver_sd, metainterp_sd, descr, vl.inputargs, From noreply at buildbot.pypy.org Tue Oct 13 09:52:36 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Tue, 13 Oct 2015 09:52:36 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: guard dependency construction moved to the first pass Message-ID: <20151013075236.373F91C103D@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80152:32dc0789de74 Date: 2015-10-13 09:52 +0200 http://bitbucket.org/pypy/pypy/changeset/32dc0789de74/ Log: guard dependency construction moved to the first pass diff --git a/rpython/jit/metainterp/optimizeopt/dependency.py b/rpython/jit/metainterp/optimizeopt/dependency.py --- a/rpython/jit/metainterp/optimizeopt/dependency.py +++ b/rpython/jit/metainterp/optimizeopt/dependency.py @@ -133,9 +133,6 @@ self.emitted = False self.schedule_position = -1 self.priority = 0 - # save the operation that produces the result for the first argument - # only for guard_true/guard_false - self.guard_bool_bool_node = None self._stack = False def is_imaginary(self): @@ -625,11 +622,9 @@ nonpure.edge_to(node, failarg=True, label="nonpure") tracker.non_pure = [] self.guards.append(node) + self.build_guard_dependencies(node, tracker) else: self.build_non_pure_dependencies(node, tracker) - # pass 2 correct guard dependencies - for guard_node in self.guards: - self.build_guard_dependencies(guard_node, tracker) def guard_argument_protection(self, guard_node, tracker): """ the parameters the guard protects are an indicator for @@ -642,27 +637,15 @@ """ guard_op = guard_node.getoperation() guard_opnum = guard_op.getopnum() - if guard_opnum in (rop.GUARD_TRUE, rop.GUARD_FALSE): - for dep in guard_node.depends(): - op = dep.to.getoperation() - if op.returns_bool_result() and op is guard_op.getarg(0): - guard_node.guard_bool_bool_node = dep.to - for arg in op.getarglist(): - if not arg.is_constant(): - self.guard_exit_dependence(guard_node, arg, tracker) - break - else: - # in this case the guard protects an integer - # example: - # i = int_and(j, 255) - # guard_true(i) [...] - pass - elif guard_op.is_foldable_guard(): - # these guards carry their protected variables directly as a parameter - for arg in guard_node.getoperation().getarglist(): - if not arg.is_constant(): - self.guard_exit_dependence(guard_node, arg, tracker) - elif guard_opnum == rop.GUARD_NOT_FORCED_2: + for arg in guard_op.getarglist(): + if not arg.is_constant() and arg.type not in ('i','f'): + # redefine pointers, consider the following example + # guard_nonnull(r1) + # i1 = getfield(r1, ...) + # guard must be emitted before the getfield, thus + # redefine r1 at guard_nonnull + tracker.define(arg, guard_node) + if guard_opnum == rop.GUARD_NOT_FORCED_2: # must be emitted before finish, thus delayed the longest guard_node.setpriority(-10) elif guard_opnum in (rop.GUARD_OVERFLOW, rop.GUARD_NO_OVERFLOW): @@ -695,7 +678,7 @@ else: raise AssertionError("(no)exception/not_forced: not op raises for them") else: - pass # not invalidated, early exit, future condition! + pass # not invalidated, future condition! def guard_exit_dependence(self, guard_node, var, tracker): def_node = tracker.definition(var) @@ -721,7 +704,7 @@ return # handle fail args if guard_op.getfailargs(): - for arg in guard_op.getfailargs(): + for i,arg in enumerate(guard_op.getfailargs()): if arg is None: continue if not tracker.is_defined(arg): @@ -748,6 +731,7 @@ tracker.depends_on_arg(index_var, node) else: tracker.depends_on_arg(cobj, node) + break else: for arg, argcell, destroyed in node.side_effect_arguments(): if argcell is not None: diff --git a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py --- a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py @@ -170,7 +170,7 @@ assert dependency is not None or node_b.getopnum() == rop.JUMP, \ " it is expected that instruction at index" + \ " %s depends on instr on index %s but it does not.\n%s" \ - % (node_a, node_b, graph) + % (node_a.getindex(), node_b.getindex(), graph) elif dependency is not None: dependencies.remove(dependency) assert dependencies == [], \ @@ -489,31 +489,33 @@ def test_dependency_complex_trace(self): graph = self.build_dependency(""" - [i0, i1, i2, i3, i4, i5, i6, i7] # 0: 1,2,3,4,6,7,8,9,10,12,14,17,19,20,21 + [i0, i1, i2, i3, i4, i5, i6, i7] # 0: i9 = int_mul(i0, 8) # 1: 2 i10 = raw_load_i(i3, i9, descr=arraydescr) # 2: 5, 10 i11 = int_mul(i0, 8) # 3: 4 i12 = raw_load_i(i4, i11, descr=arraydescr) # 4: 5,10 i13 = int_add(i10, i12) # 5: 7,10 i14 = int_mul(i0, 8) # 6: 7 - raw_store(i5, i14, i13, descr=arraydescr) # 7: 21 + raw_store(i3, i14, i13, descr=arraydescr) # 7: 10,12,20 i16 = int_add(i0, 1) # 8: 9,10,11,13,16,18 i17 = int_lt(i16, i7) # 9: 10 - guard_true(i17) [i7, i13, i5, i4, i3, i12, i10, i16] # 10: 11,13,16,18,19,21 - i18 = int_mul(i16, 8) # 11: - i19 = raw_load_i(i3, i18, descr=arraydescr) # 12: - i20 = int_mul(i16, 8) # 13: - i21 = raw_load_i(i4, i20, descr=arraydescr) # 14: - i22 = int_add(i19, i21) # 15: - i23 = int_mul(i16, 8) # 16: - raw_store(i5, i23, i22, descr=arraydescr) # 17: - i24 = int_add(i16, 1) # 18: - i25 = int_lt(i24, i7) # 19: + guard_true(i17) [i7, i13, i5, i4, i3, i12, i10, i16] # 10: 17, 20 + i18 = int_mul(i16, 9) # 11: 12 + i19 = raw_load_i(i3, i18, descr=arraydescr) # 12: 15, 20 + i20 = int_mul(i16, 8) # 13: 14 + i21 = raw_load_i(i4, i20, descr=arraydescr) # 14: 15, 20 + i22 = int_add(i19, i21) # 15: 17, 20 + i23 = int_mul(i16, 8) # 16: 17 + raw_store(i5, i23, i22, descr=arraydescr) # 17: 20 + i24 = int_add(i16, 1) # 18: 19, 20 + i25 = int_lt(i24, i7) # 19: 20 guard_true(i25) [i7, i22, i5, i4, i3, i21, i19, i24] # 20: jump(i24, i19, i21, i3, i4, i5, i22, i7) # 21: """) - self.assert_dependencies(graph, full_check=False) + self.assert_dependencies(graph, full_check=True) self.assert_dependent(graph, 2,12) + self.assert_dependent(graph, 7,12) + self.assert_dependent(graph, 4,12) def test_getfield(self): graph = self.build_dependency(""" @@ -529,7 +531,7 @@ def test_cyclic(self): graph = self.build_dependency(""" [p0, p1, p5, p6, p7, p9, p11, p12] # 0: 1,6 - p13 = getfield_gc_r(p9) # 1: 2,4,5 + p13 = getfield_gc_r(p9) # 1: 2,5 guard_nonnull(p13) [] # 2: 4,5 i14 = getfield_gc_i(p9) # 3: 5 p15 = getfield_gc_r(p13) # 4: 5 From noreply at buildbot.pypy.org Tue Oct 13 10:03:37 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Oct 2015 10:03:37 +0200 (CEST) Subject: [pypy-commit] pypy default: Complain if we don't give "--thread" when translating an RPython program Message-ID: <20151013080337.AB0A01C103D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80153:137f20845896 Date: 2015-10-13 09:20 +0200 http://bitbucket.org/pypy/pypy/changeset/137f20845896/ Log: Complain if we don't give "--thread" when translating an RPython program that uses start_new_thread(). diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -79,6 +79,7 @@ @specialize.arg(0) def ll_start_new_thread(func): + _check_thread_enabled() ident = c_thread_start(func) if ident == -1: raise error("can't start new thread") @@ -170,6 +171,18 @@ def _cleanup_(self): raise Exception("seeing a prebuilt rpython.rlib.rthread.Lock instance") +def _check_thread_enabled(): + pass +class Entry(ExtRegistryEntry): + _about_ = _check_thread_enabled + def compute_result_annotation(self): + translator = self.bookkeeper.annotator.translator + if not translator.config.translation.thread: + raise Exception( + "this RPython program uses threads: translate with '--thread'") + def specialize_call(self, hop): + hop.exception_cannot_occur() + # ____________________________________________________________ # # Stack size From noreply at buildbot.pypy.org Tue Oct 13 10:45:36 2015 From: noreply at buildbot.pypy.org (jerith) Date: Tue, 13 Oct 2015 10:45:36 +0200 (CEST) Subject: [pypy-commit] pypy unrecursive-opt: Merge default. Message-ID: <20151013084536.48CB41C1214@cobra.cs.uni-duesseldorf.de> Author: Jeremy Thurgood Branch: unrecursive-opt Changeset: r80154:c3e4bdc5e479 Date: 2015-10-13 10:45 +0200 http://bitbucket.org/pypy/pypy/changeset/c3e4bdc5e479/ Log: Merge default. diff too long, truncating to 2000 out of 8776 lines diff --git a/dotviewer/graphclient.py b/dotviewer/graphclient.py --- a/dotviewer/graphclient.py +++ b/dotviewer/graphclient.py @@ -127,16 +127,8 @@ return spawn_graphserver_handler((host, port)) def spawn_local_handler(): - if hasattr(sys, 'pypy_objspaceclass'): - # if 'python' is actually PyPy, e.g. in a virtualenv, then - # try hard to find a real CPython - try: - python = subprocess.check_output( - 'env -i $SHELL -l -c "which python"', shell=True).strip() - except subprocess.CalledProcessError: - # did not work, fall back to 'python' - python = 'python' - else: + python = os.getenv('PYPY_PYGAME_PYTHON') + if not python: python = sys.executable args = [python, '-u', GRAPHSERVER, '--stdio'] p = subprocess.Popen(args, diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -158,7 +158,7 @@ RegrTest('test_codecs.py', core=True, usemodules='_multibytecodec'), RegrTest('test_codeop.py', core=True), RegrTest('test_coding.py', core=True), - RegrTest('test_coercion.py', core=True), + RegrTest('test_coercion.py', core=True, usemodules='struct'), RegrTest('test_collections.py', usemodules='binascii struct'), RegrTest('test_colorsys.py'), RegrTest('test_commands.py'), diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -310,6 +310,22 @@ """ return self._backend.from_buffer(self.BCharA, python_buffer) + def memmove(self, dest, src, n): + """ffi.memmove(dest, src, n) copies n bytes of memory from src to dest. + + Like the C function memmove(), the memory areas may overlap; + apart from that it behaves like the C function memcpy(). + + 'src' can be any cdata ptr or array, or any Python buffer object. + 'dest' can be any cdata ptr or array, or a writable Python buffer + object. The size to copy, 'n', is always measured in bytes. + + Unlike other methods, this one supports all Python buffer including + byte strings and bytearrays---but it still does not support + non-contiguous buffers. + """ + return self._backend.memmove(dest, src, n) + def callback(self, cdecl, python_callable=None, error=None, onerror=None): """Return a callback object or a decorator making such a callback object. 'cdecl' must name a C function pointer type. diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -26,6 +26,9 @@ _r_words = re.compile(r"\w+|\S") _parser_cache = None _r_int_literal = re.compile(r"-?0?x?[0-9a-f]+[lu]*$", re.IGNORECASE) +_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") +_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") +_r_cdecl = re.compile(r"\b__cdecl\b") def _get_parser(): global _parser_cache @@ -44,6 +47,14 @@ macrovalue = macrovalue.replace('\\\n', '').strip() macros[macroname] = macrovalue csource = _r_define.sub('', csource) + # BIG HACK: replace WINAPI or __stdcall with "volatile const". + # It doesn't make sense for the return type of a function to be + # "volatile volatile const", so we abuse it to detect __stdcall... + # Hack number 2 is that "int(volatile *fptr)();" is not valid C + # syntax, so we place the "volatile" before the opening parenthesis. + csource = _r_stdcall2.sub(' volatile volatile const(', csource) + csource = _r_stdcall1.sub(' volatile volatile const ', csource) + csource = _r_cdecl.sub(' ', csource) # Replace "[...]" with "[__dotdotdotarray__]" csource = _r_partial_array.sub('[__dotdotdotarray__]', csource) # Replace "...}" with "__dotdotdotNUM__}". This construction should @@ -449,7 +460,14 @@ if not ellipsis and args == [model.void_type]: args = [] result, quals = self._get_type_and_quals(typenode.type) - return model.RawFunctionType(tuple(args), result, ellipsis) + # the 'quals' on the result type are ignored. HACK: we absure them + # to detect __stdcall functions: we textually replace "__stdcall" + # with "volatile volatile const" above. + abi = None + if hasattr(typenode.type, 'quals'): # else, probable syntax error anyway + if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']: + abi = '__stdcall' + return model.RawFunctionType(tuple(args), result, ellipsis, abi) def _as_func_arg(self, type, quals): if isinstance(type, model.ArrayType): diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,4 +1,4 @@ -import types +import types, sys import weakref from .lock import allocate_lock @@ -193,18 +193,21 @@ class BaseFunctionType(BaseType): - _attrs_ = ('args', 'result', 'ellipsis') + _attrs_ = ('args', 'result', 'ellipsis', 'abi') - def __init__(self, args, result, ellipsis): + def __init__(self, args, result, ellipsis, abi=None): self.args = args self.result = result self.ellipsis = ellipsis + self.abi = abi # reprargs = [arg._get_c_name() for arg in self.args] if self.ellipsis: reprargs.append('...') reprargs = reprargs or ['void'] replace_with = self._base_pattern % (', '.join(reprargs),) + if abi is not None: + replace_with = replace_with[:1] + abi + ' ' + replace_with[1:] self.c_name_with_marker = ( self.result.c_name_with_marker.replace('&', replace_with)) @@ -222,7 +225,7 @@ "type, not a pointer-to-function type" % (self,)) def as_function_pointer(self): - return FunctionPtrType(self.args, self.result, self.ellipsis) + return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi) class FunctionPtrType(BaseFunctionType): @@ -233,11 +236,18 @@ args = [] for tp in self.args: args.append(tp.get_cached_btype(ffi, finishlist)) + abi_args = () + if self.abi == "__stdcall": + if not self.ellipsis: # __stdcall ignored for variadic funcs + try: + abi_args = (ffi._backend.FFI_STDCALL,) + except AttributeError: + pass return global_cache(self, ffi, 'new_function_type', - tuple(args), result, self.ellipsis) + tuple(args), result, self.ellipsis, *abi_args) def as_raw_function(self): - return RawFunctionType(self.args, self.result, self.ellipsis) + return RawFunctionType(self.args, self.result, self.ellipsis, self.abi) class PointerType(BaseType): diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h --- a/lib_pypy/cffi/parse_c_type.h +++ b/lib_pypy/cffi/parse_c_type.h @@ -5,7 +5,7 @@ #define _CFFI_OP(opcode, arg) (_cffi_opcode_t)(opcode | (((uintptr_t)(arg)) << 8)) #define _CFFI_GETOP(cffi_opcode) ((unsigned char)(uintptr_t)cffi_opcode) -#define _CFFI_GETARG(cffi_opcode) (((uintptr_t)cffi_opcode) >> 8) +#define _CFFI_GETARG(cffi_opcode) (((intptr_t)cffi_opcode) >> 8) #define _CFFI_OP_PRIMITIVE 1 #define _CFFI_OP_POINTER 3 diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -607,7 +607,11 @@ call_arguments.append('x%d' % i) repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' - name_and_arguments = '_cffi_d_%s(%s)' % (name, repr_arguments) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments) prnt('static %s' % (tp.result.get_c_name(name_and_arguments),)) prnt('{') call_arguments = ', '.join(call_arguments) @@ -710,7 +714,8 @@ if difference: repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' - name_and_arguments = '_cffi_f_%s(%s)' % (name, repr_arguments) + name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name, + repr_arguments) prnt('static %s' % (tp_result.get_c_name(name_and_arguments),)) prnt('{') if result_decl: @@ -1135,7 +1140,13 @@ else: self.cffi_types[index] = CffiOp(OP_NOOP, realindex) index += 1 - self.cffi_types[index] = CffiOp(OP_FUNCTION_END, int(tp.ellipsis)) + flags = int(tp.ellipsis) + if tp.abi is not None: + if tp.abi == '__stdcall': + flags |= 2 + else: + raise NotImplementedError("abi=%r" % (tp.abi,)) + self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags) def _emit_bytecode_PointerType(self, tp, index): self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype]) diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -159,7 +159,11 @@ arglist = ', '.join(arglist) or 'void' wrappername = '_cffi_f_%s' % name self.export_symbols.append(wrappername) - funcdecl = ' %s(%s)' % (wrappername, arglist) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + funcdecl = ' %s%s(%s)' % (abi, wrappername, arglist) context = 'result of %s' % name prnt(tpresult.get_c_name(funcdecl, context)) prnt('{') diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -83,7 +83,7 @@ RPython Mixed Modules -===================== +--------------------- This is the internal way to write built-in extension modules in PyPy. It cannot be used by any 3rd-party module: the extension modules are diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -43,3 +43,34 @@ .. branch: numpy-ctypes Add support for ndarray.ctypes property. + +.. branch: share-guard-info + +Share guard resume data between consecutive guards that have only +pure operations and guards in between. + +.. branch: issue-2148 + +Fix performance regression on operations mixing numpy scalars and Python +floats, cf. issue #2148. + +.. branch: cffi-stdcall +Win32: support '__stdcall' in CFFI. + +.. branch: callfamily + +Refactorings of annotation and rtyping of function calls. + +.. branch: fortran-order + +Allow creation of fortran-ordered ndarrays + +.. branch: type_system-cleanup + +Remove some remnants of the old ootypesystem vs lltypesystem dichotomy. + +.. branch: cffi-handle-lifetime + +ffi.new_handle() returns handles that work more like CPython's: they +remain valid as long as the target exists (unlike the previous +version, where handles become invalid *before* the __del__ is called). diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1058,6 +1058,14 @@ args = Arguments.frompacked(self, w_args, w_kwds) return self.call_args(w_callable, args) + def _try_fetch_pycode(self, w_func): + from pypy.interpreter.function import Function, Method + if isinstance(w_func, Method): + w_func = w_func.w_function + if isinstance(w_func, Function): + return w_func.code + return None + def call_function(self, w_func, *args_w): nargs = len(args_w) # used for pruning funccall versions if not self.config.objspace.disable_call_speedhacks and nargs < 5: diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,9 +1,16 @@ import sys from pypy.interpreter.mixedmodule import MixedModule -from rpython.rlib import rdynload +from rpython.rlib import rdynload, clibffi VERSION = "1.3.0" +FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI +try: + FFI_STDCALL = clibffi.FFI_STDCALL + has_stdcall = True +except AttributeError: + has_stdcall = False + class Module(MixedModule): @@ -40,12 +47,13 @@ 'string': 'func.string', 'buffer': 'cbuffer.buffer', + 'memmove': 'func.memmove', 'get_errno': 'cerrno.get_errno', 'set_errno': 'cerrno.set_errno', - 'FFI_DEFAULT_ABI': 'ctypefunc._get_abi(space, "FFI_DEFAULT_ABI")', - 'FFI_CDECL': 'ctypefunc._get_abi(space,"FFI_DEFAULT_ABI")',#win32 name + 'FFI_DEFAULT_ABI': 'space.wrap(%d)' % FFI_DEFAULT_ABI, + 'FFI_CDECL': 'space.wrap(%d)' % FFI_DEFAULT_ABI, # win32 name # CFFI 1.0 'FFI': 'ffi_obj.W_FFIObject', @@ -53,6 +61,9 @@ if sys.platform == 'win32': interpleveldefs['getwinerror'] = 'cerrno.getwinerror' + if has_stdcall: + interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL + def get_dict_rtld_constants(): found = {} diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -1,11 +1,11 @@ """ Callbacks. """ -import sys, os +import sys, os, py -from rpython.rlib import clibffi, rweakref, jit, jit_libffi -from rpython.rlib.objectmodel import compute_unique_id, keepalive_until_here -from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib import clibffi, jit, jit_libffi, rgc, objectmodel +from rpython.rlib.objectmodel import keepalive_until_here +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from pypy.interpreter.error import OperationError, oefmt from pypy.module._cffi_backend import cerrno, misc @@ -19,6 +19,23 @@ # ____________________________________________________________ + at jit.dont_look_inside +def make_callback(space, ctype, w_callable, w_error, w_onerror): + # Allocate a callback as a nonmovable W_CDataCallback instance, which + # we can cast to a plain VOIDP. As long as the object is not freed, + # we can cast the VOIDP back to a W_CDataCallback in reveal_callback(). + cdata = objectmodel.instantiate(W_CDataCallback, nonmovable=True) + gcref = rgc.cast_instance_to_gcref(cdata) + raw_cdata = rgc.hide_nonmovable_gcref(gcref) + cdata.__init__(space, ctype, w_callable, w_error, w_onerror, raw_cdata) + return cdata + +def reveal_callback(raw_ptr): + addr = rffi.cast(llmemory.Address, raw_ptr) + gcref = rgc.reveal_gcref(addr) + return rgc.try_cast_gcref_to_instance(W_CDataCallback, gcref) + + class Closure(object): """This small class is here to have a __del__ outside any cycle.""" @@ -34,10 +51,11 @@ class W_CDataCallback(W_CData): - #_immutable_fields_ = ... + _immutable_fields_ = ['key_pycode'] w_onerror = None - def __init__(self, space, ctype, w_callable, w_error, w_onerror): + def __init__(self, space, ctype, w_callable, w_error, w_onerror, + raw_cdata): raw_closure = rffi.cast(rffi.CCHARP, clibffi.closureHeap.alloc()) self._closure = Closure(raw_closure) W_CData.__init__(self, space, raw_closure, ctype) @@ -46,6 +64,7 @@ raise oefmt(space.w_TypeError, "expected a callable object, not %T", w_callable) self.w_callable = w_callable + self.key_pycode = space._try_fetch_pycode(w_callable) if not space.is_none(w_onerror): if not space.is_true(space.callable(w_onerror)): raise oefmt(space.w_TypeError, @@ -64,8 +83,12 @@ convert_from_object_fficallback(fresult, self._closure.ll_error, w_error) # - self.unique_id = compute_unique_id(self) - global_callback_mapping.set(self.unique_id, self) + # We must setup the GIL here, in case the callback is invoked in + # some other non-Pythonic thread. This is the same as cffi on + # CPython. + if space.config.translation.thread: + from pypy.module.thread.os_thread import setup_threads + setup_threads(space) # cif_descr = self.getfunctype().cif_descr if not cif_descr: @@ -74,20 +97,13 @@ "return type or with '...'", self.getfunctype().name) with self as ptr: closure_ptr = rffi.cast(clibffi.FFI_CLOSUREP, ptr) - unique_id = rffi.cast(rffi.VOIDP, self.unique_id) + unique_id = rffi.cast(rffi.VOIDP, raw_cdata) res = clibffi.c_ffi_prep_closure(closure_ptr, cif_descr.cif, invoke_callback, unique_id) if rffi.cast(lltype.Signed, res) != clibffi.FFI_OK: raise OperationError(space.w_SystemError, space.wrap("libffi failed to build this callback")) - # - # We must setup the GIL here, in case the callback is invoked in - # some other non-Pythonic thread. This is the same as cffi on - # CPython. - if space.config.translation.thread: - from pypy.module.thread.os_thread import setup_threads - setup_threads(space) def _repr_extra(self): space = self.space @@ -105,6 +121,7 @@ def invoke(self, ll_args): space = self.space ctype = self.getfunctype() + ctype = jit.promote(ctype) args_w = [] for i, farg in enumerate(ctype.fargs): ll_arg = rffi.cast(rffi.CCHARP, ll_args[i]) @@ -127,9 +144,6 @@ keepalive_until_here(self) # to keep self._closure.ll_error alive -global_callback_mapping = rweakref.RWeakValueDictionary(int, W_CDataCallback) - - def convert_from_object_fficallback(fresult, ll_res, w_res): space = fresult.space small_result = fresult.size < SIZE_OF_FFI_ARG @@ -178,7 +192,8 @@ @jit.dont_look_inside -def _handle_applevel_exception(space, callback, e, ll_res, extra_line): +def _handle_applevel_exception(callback, e, ll_res, extra_line): + space = callback.space callback.write_error_return_value(ll_res) if callback.w_onerror is None: callback.print_error(e, extra_line) @@ -199,19 +214,36 @@ extra_line="\nDuring the call to 'onerror', " "another exception occurred:\n\n") +def get_printable_location(key_pycode): + if key_pycode is None: + return 'cffi_callback ' + return 'cffi_callback ' + key_pycode.get_repr() - at jit.jit_callback("CFFI") +jitdriver = jit.JitDriver(name='cffi_callback', + greens=['callback.key_pycode'], + reds=['ll_res', 'll_args', 'callback'], + get_printable_location=get_printable_location) + +def py_invoke_callback(callback, ll_res, ll_args): + jitdriver.jit_merge_point(callback=callback, ll_res=ll_res, ll_args=ll_args) + extra_line = '' + try: + w_res = callback.invoke(ll_args) + extra_line = "Trying to convert the result back to C:\n" + callback.convert_result(ll_res, w_res) + except OperationError, e: + _handle_applevel_exception(callback, e, ll_res, extra_line) + def _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): """ Callback specification. ffi_cif - something ffi specific, don't care ll_args - rffi.VOIDPP - pointer to array of pointers to args - ll_restype - rffi.VOIDP - pointer to result + ll_res - rffi.VOIDP - pointer to result ll_userdata - a special structure which holds necessary information (what the real callback is for example), casted to VOIDP """ ll_res = rffi.cast(rffi.CCHARP, ll_res) - unique_id = rffi.cast(lltype.Signed, ll_userdata) - callback = global_callback_mapping.get(unique_id) + callback = reveal_callback(ll_userdata) if callback is None: # oups! try: @@ -224,17 +256,11 @@ misc._raw_memclear(ll_res, SIZE_OF_FFI_ARG) return # + space = callback.space must_leave = False - space = callback.space try: must_leave = space.threadlocals.try_enter_thread(space) - extra_line = '' - try: - w_res = callback.invoke(ll_args) - extra_line = "Trying to convert the result back to C:\n" - callback.convert_result(ll_res, w_res) - except OperationError, e: - _handle_applevel_exception(space, callback, e, ll_res, extra_line) + py_invoke_callback(callback, ll_res, ll_args) # except Exception, e: # oups! last-level attempt to recover. diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -18,6 +18,7 @@ _attrs_ = ['ctptr'] _immutable_fields_ = ['ctptr'] kind = "array" + is_nonfunc_pointer_or_array = True def __init__(self, space, ctptr, length, arraysize, extra): W_CTypePtrOrArray.__init__(self, space, arraysize, extra, 0, diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -12,6 +12,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from pypy.interpreter.error import OperationError, oefmt +from pypy.module import _cffi_backend from pypy.module._cffi_backend import ctypearray, cdataobj, cerrno from pypy.module._cffi_backend.ctypeobj import W_CType from pypy.module._cffi_backend.ctypeptr import W_CTypePtrBase, W_CTypePointer @@ -23,20 +24,22 @@ class W_CTypeFunc(W_CTypePtrBase): - _attrs_ = ['fargs', 'ellipsis', 'cif_descr'] - _immutable_fields_ = ['fargs[*]', 'ellipsis', 'cif_descr'] + _attrs_ = ['fargs', 'ellipsis', 'abi', 'cif_descr'] + _immutable_fields_ = ['fargs[*]', 'ellipsis', 'abi', 'cif_descr'] kind = "function" cif_descr = lltype.nullptr(CIF_DESCRIPTION) - def __init__(self, space, fargs, fresult, ellipsis): + def __init__(self, space, fargs, fresult, ellipsis, + abi=_cffi_backend.FFI_DEFAULT_ABI): assert isinstance(ellipsis, bool) - extra = self._compute_extra_text(fargs, fresult, ellipsis) + extra, xpos = self._compute_extra_text(fargs, fresult, ellipsis, abi) size = rffi.sizeof(rffi.VOIDP) - W_CTypePtrBase.__init__(self, space, size, extra, 2, fresult, + W_CTypePtrBase.__init__(self, space, size, extra, xpos, fresult, could_cast_anything=False) self.fargs = fargs self.ellipsis = ellipsis + self.abi = abi # fresult is stored in self.ctitem if not ellipsis: @@ -44,7 +47,7 @@ # at all. The cif is computed on every call from the actual # types passed in. For all other functions, the cif_descr # is computed here. - builder = CifDescrBuilder(fargs, fresult) + builder = CifDescrBuilder(fargs, fresult, abi) try: builder.rawallocate(self) except OperationError, e: @@ -76,7 +79,7 @@ ctypefunc.fargs = fvarargs ctypefunc.ctitem = self.ctitem #ctypefunc.cif_descr = NULL --- already provided as the default - CifDescrBuilder(fvarargs, self.ctitem).rawallocate(ctypefunc) + CifDescrBuilder(fvarargs, self.ctitem, self.abi).rawallocate(ctypefunc) return ctypefunc @rgc.must_be_light_finalizer @@ -84,8 +87,13 @@ if self.cif_descr: lltype.free(self.cif_descr, flavor='raw') - def _compute_extra_text(self, fargs, fresult, ellipsis): + def _compute_extra_text(self, fargs, fresult, ellipsis, abi): + from pypy.module._cffi_backend import newtype argnames = ['(*)('] + xpos = 2 + if _cffi_backend.has_stdcall and abi == _cffi_backend.FFI_STDCALL: + argnames[0] = '(__stdcall *)(' + xpos += len('__stdcall ') for i, farg in enumerate(fargs): if i > 0: argnames.append(', ') @@ -95,7 +103,7 @@ argnames.append(', ') argnames.append('...') argnames.append(')') - return ''.join(argnames) + return ''.join(argnames), xpos def _fget(self, attrchar): if attrchar == 'a': # args @@ -106,7 +114,7 @@ if attrchar == 'E': # ellipsis return self.space.wrap(self.ellipsis) if attrchar == 'A': # abi - return self.space.wrap(clibffi.FFI_DEFAULT_ABI) # XXX + return self.space.wrap(self.abi) return W_CTypePtrBase._fget(self, attrchar) def call(self, funcaddr, args_w): @@ -181,11 +189,6 @@ def set_mustfree_flag(data, flag): rffi.ptradd(data, -1)[0] = chr(flag) -def _get_abi(space, name): - abi = getattr(clibffi, name) - assert isinstance(abi, int) - return space.wrap(abi) - # ____________________________________________________________ @@ -260,9 +263,10 @@ class CifDescrBuilder(object): rawmem = lltype.nullptr(rffi.CCHARP.TO) - def __init__(self, fargs, fresult): + def __init__(self, fargs, fresult, fabi): self.fargs = fargs self.fresult = fresult + self.fabi = fabi def fb_alloc(self, size): size = llmemory.raw_malloc_usage(size) @@ -421,7 +425,7 @@ cif_descr.exchange_size = exchange_offset def fb_extra_fields(self, cif_descr): - cif_descr.abi = clibffi.FFI_DEFAULT_ABI # XXX + cif_descr.abi = self.fabi cif_descr.nargs = len(self.fargs) cif_descr.rtype = self.rtype cif_descr.atypes = self.atypes diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -21,6 +21,7 @@ cast_anything = False is_primitive_integer = False + is_nonfunc_pointer_or_array = False kind = "?" def __init__(self, space, size, name, name_position): @@ -143,7 +144,7 @@ # obscure hack when untranslated, maybe, approximate, don't use if isinstance(align, llmemory.FieldOffset): align = rffi.sizeof(align.TYPE.y) - if (1 << (8*align-2)) > sys.maxint: + if sys.platform != 'win32' and (1 << (8*align-2)) > sys.maxint: align /= 2 else: # a different hack when translated, to avoid seeing constants diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -172,6 +172,7 @@ _immutable_fields_ = ['is_file', 'cache_array_type?', 'is_void_ptr'] kind = "pointer" cache_array_type = None + is_nonfunc_pointer_or_array = True def __init__(self, space, ctitem): from pypy.module._cffi_backend import ctypearray diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py --- a/pypy/module/_cffi_backend/ffi_obj.py +++ b/pypy/module/_cffi_backend/ffi_obj.py @@ -294,9 +294,9 @@ CONSIDER_FN_AS_FNPTR) space = self.space if not space.is_none(w_python_callable): - return ccallback.W_CDataCallback(space, w_ctype, - w_python_callable, w_error, - w_onerror) + return ccallback.make_callback(space, w_ctype, + w_python_callable, w_error, + w_onerror) else: # decorator mode: returns a single-argument function return space.appexec([w_ctype, w_error, w_onerror], @@ -391,6 +391,25 @@ return cerrno.getwinerror(self.space, code) + @unwrap_spec(n=int) + def descr_memmove(self, w_dest, w_src, n): + """\ +ffi.memmove(dest, src, n) copies n bytes of memory from src to dest. + +Like the C function memmove(), the memory areas may overlap; +apart from that it behaves like the C function memcpy(). + +'src' can be any cdata ptr or array, or any Python buffer object. +'dest' can be any cdata ptr or array, or a writable Python buffer +object. The size to copy, 'n', is always measured in bytes. + +Unlike other methods, this one supports all Python buffer including +byte strings and bytearrays---but it still does not support +non-contiguous buffers.""" + # + return func.memmove(self.space, w_dest, w_src, n) + + @unwrap_spec(w_init=WrappedDefault(None)) def descr_new(self, w_arg, w_init): """\ @@ -623,6 +642,7 @@ gc = interp2app(W_FFIObject.descr_gc), getctype = interp2app(W_FFIObject.descr_getctype), integer_const = interp2app(W_FFIObject.descr_integer_const), + memmove = interp2app(W_FFIObject.descr_memmove), new = interp2app(W_FFIObject.descr_new), new_allocator = interp2app(W_FFIObject.descr_new_allocator), new_handle = interp2app(W_FFIObject.descr_new_handle), diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -1,3 +1,8 @@ +from rpython.rtyper.annlowlevel import llstr +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw +from rpython.rlib.objectmodel import keepalive_until_here + from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from pypy.module._cffi_backend import ctypeobj, cdataobj, allocator @@ -19,8 +24,8 @@ @unwrap_spec(w_ctype=ctypeobj.W_CType) def callback(space, w_ctype, w_callable, w_error=None, w_onerror=None): - from pypy.module._cffi_backend.ccallback import W_CDataCallback - return W_CDataCallback(space, w_ctype, w_callable, w_error, w_onerror) + from pypy.module._cffi_backend.ccallback import make_callback + return make_callback(space, w_ctype, w_callable, w_error, w_onerror) # ____________________________________________________________ @@ -79,6 +84,26 @@ # ____________________________________________________________ +def _fetch_as_read_buffer(space, w_x): + # xxx do we really need to implement the same mess as in CPython 2.7 + # w.r.t. buffers and memoryviews?? + try: + buf = space.readbuf_w(w_x) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + buf = space.buffer_w(w_x, space.BUF_SIMPLE) + return buf + +def _fetch_as_write_buffer(space, w_x): + try: + buf = space.writebuf_w(w_x) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + buf = space.buffer_w(w_x, space.BUF_WRITABLE) + return buf + @unwrap_spec(w_ctype=ctypeobj.W_CType) def from_buffer(space, w_ctype, w_x): from pypy.module._cffi_backend import ctypearray, ctypeprim @@ -88,14 +113,7 @@ raise oefmt(space.w_TypeError, "needs 'char[]', got '%s'", w_ctype.name) # - # xxx do we really need to implement the same mess as in CPython 2.7 - # w.r.t. buffers and memoryviews?? - try: - buf = space.readbuf_w(w_x) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - buf = space.buffer_w(w_x, space.BUF_SIMPLE) + buf = _fetch_as_read_buffer(space, w_x) try: _cdata = buf.get_raw_address() except ValueError: @@ -106,6 +124,76 @@ # return cdataobj.W_CDataFromBuffer(space, _cdata, w_ctype, buf, w_x) + +def unsafe_escaping_ptr_for_ptr_or_array(w_cdata): + if not w_cdata.ctype.is_nonfunc_pointer_or_array: + raise oefmt(w_cdata.space.w_TypeError, + "expected a pointer or array ctype, got '%s'", + w_cdata.ctype.name) + return w_cdata.unsafe_escaping_ptr() + +c_memmove = rffi.llexternal('memmove', [rffi.CCHARP, rffi.CCHARP, + rffi.SIZE_T], lltype.Void, + _nowrapper=True) + + at unwrap_spec(n=int) +def memmove(space, w_dest, w_src, n): + if n < 0: + raise oefmt(space.w_ValueError, "negative size") + + # cases... + src_buf = None + src_data = lltype.nullptr(rffi.CCHARP.TO) + if isinstance(w_src, cdataobj.W_CData): + src_data = unsafe_escaping_ptr_for_ptr_or_array(w_src) + src_is_ptr = True + else: + src_buf = _fetch_as_read_buffer(space, w_src) + try: + src_data = src_buf.get_raw_address() + src_is_ptr = True + except ValueError: + src_is_ptr = False + + if src_is_ptr: + src_string = None + else: + if n == src_buf.getlength(): + src_string = src_buf.as_str() + else: + src_string = src_buf.getslice(0, n, 1, n) + + dest_buf = None + dest_data = lltype.nullptr(rffi.CCHARP.TO) + if isinstance(w_dest, cdataobj.W_CData): + dest_data = unsafe_escaping_ptr_for_ptr_or_array(w_dest) + dest_is_ptr = True + else: + dest_buf = _fetch_as_write_buffer(space, w_dest) + try: + dest_data = dest_buf.get_raw_address() + dest_is_ptr = True + except ValueError: + dest_is_ptr = False + + if dest_is_ptr: + if src_is_ptr: + c_memmove(dest_data, src_data, rffi.cast(rffi.SIZE_T, n)) + else: + copy_string_to_raw(llstr(src_string), dest_data, 0, n) + else: + if src_is_ptr: + for i in range(n): + dest_buf.setitem(i, src_data[i]) + else: + for i in range(n): + dest_buf.setitem(i, src_string[i]) + + keepalive_until_here(src_buf) + keepalive_until_here(dest_buf) + keepalive_until_here(w_src) + keepalive_until_here(w_dest) + # ____________________________________________________________ @unwrap_spec(w_cdata=cdataobj.W_CData) diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -1,24 +1,24 @@ +import py from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter.baseobjspace import W_Root from pypy.module._cffi_backend import ctypeobj, ctypeptr, cdataobj -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib import rweaklist - - -class CffiHandles(rweaklist.RWeakListMixin): - def __init__(self, space): - self.initialize() - -def get(space): - return space.fromcache(CffiHandles) +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rlib import rgc, objectmodel, jit # ____________________________________________________________ + at jit.dont_look_inside def _newp_handle(space, w_ctype, w_x): - index = get(space).reserve_next_handle_index() - _cdata = rffi.cast(rffi.CCHARP, index + 1) - new_cdataobj = cdataobj.W_CDataHandle(space, _cdata, w_ctype, w_x) - get(space).store_handle(index, new_cdataobj) + # Allocate a handle as a nonmovable W_CDataHandle instance, which + # we can cast to a plain CCHARP. As long as the object is not freed, + # we can cast the CCHARP back to a W_CDataHandle with reveal_gcref(). + new_cdataobj = objectmodel.instantiate(cdataobj.W_CDataHandle, + nonmovable=True) + gcref = rgc.cast_instance_to_gcref(new_cdataobj) + _cdata = rgc.hide_nonmovable_gcref(gcref) + _cdata = rffi.cast(rffi.CCHARP, _cdata) + cdataobj.W_CDataHandle.__init__(new_cdataobj, space, _cdata, w_ctype, w_x) return new_cdataobj @unwrap_spec(w_ctype=ctypeobj.W_CType) @@ -38,14 +38,17 @@ "expected a 'cdata' object with a 'void *' out of " "new_handle(), got '%s'", ctype.name) with w_cdata as ptr: - index = rffi.cast(lltype.Signed, ptr) - original_cdataobj = get(space).fetch_handle(index - 1) - # - if isinstance(original_cdataobj, cdataobj.W_CDataHandle): - return original_cdataobj.w_keepalive - else: - if index == 0: - msg = "cannot use from_handle() on NULL pointer" - else: - msg = "'void *' value does not correspond to any object" - raise OperationError(space.w_RuntimeError, space.wrap(msg)) + return _reveal(space, ptr) + + at jit.dont_look_inside +def _reveal(space, ptr): + addr = rffi.cast(llmemory.Address, ptr) + gcref = rgc.reveal_gcref(addr) + if not gcref: + raise oefmt(space.w_RuntimeError, + "cannot use from_handle() on NULL pointer") + cd = rgc.try_cast_gcref_to_instance(cdataobj.W_CDataHandle, gcref) + if cd is None: + raise oefmt(space.w_SystemError, + "ffi.from_handle(): dead or bogus object handle") + return cd.w_keepalive diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -4,10 +4,11 @@ from rpython.rlib.objectmodel import specialize, r_dict, compute_identity_hash from rpython.rlib.rarithmetic import ovfcheck, intmask -from rpython.rlib import jit, rweakref +from rpython.rlib import jit, rweakref, clibffi from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform +from pypy.module import _cffi_backend from pypy.module._cffi_backend import (ctypeobj, ctypeprim, ctypeptr, ctypearray, ctypestruct, ctypevoid, ctypeenum) @@ -592,8 +593,9 @@ # ____________________________________________________________ - at unwrap_spec(w_fresult=ctypeobj.W_CType, ellipsis=int) -def new_function_type(space, w_fargs, w_fresult, ellipsis=0): + at unwrap_spec(w_fresult=ctypeobj.W_CType, ellipsis=int, abi=int) +def new_function_type(space, w_fargs, w_fresult, ellipsis=0, + abi=_cffi_backend.FFI_DEFAULT_ABI): fargs = [] for w_farg in space.fixedview(w_fargs): if not isinstance(w_farg, ctypeobj.W_CType): @@ -602,28 +604,28 @@ if isinstance(w_farg, ctypearray.W_CTypeArray): w_farg = w_farg.ctptr fargs.append(w_farg) - return _new_function_type(space, fargs, w_fresult, bool(ellipsis)) + return _new_function_type(space, fargs, w_fresult, bool(ellipsis), abi) -def _func_key_hash(unique_cache, fargs, fresult, ellipsis): +def _func_key_hash(unique_cache, fargs, fresult, ellipsis, abi): x = compute_identity_hash(fresult) for w_arg in fargs: y = compute_identity_hash(w_arg) x = intmask((1000003 * x) ^ y) - x ^= ellipsis + x ^= (ellipsis - abi) if unique_cache.for_testing: # constant-folded to False in translation; x &= 3 # but for test, keep only 2 bits of hash return x # can't use @jit.elidable here, because it might call back to random # space functions via force_lazy_struct() -def _new_function_type(space, fargs, fresult, ellipsis=False): +def _new_function_type(space, fargs, fresult, ellipsis, abi): try: - return _get_function_type(space, fargs, fresult, ellipsis) + return _get_function_type(space, fargs, fresult, ellipsis, abi) except KeyError: - return _build_function_type(space, fargs, fresult, ellipsis) + return _build_function_type(space, fargs, fresult, ellipsis, abi) @jit.elidable -def _get_function_type(space, fargs, fresult, ellipsis): +def _get_function_type(space, fargs, fresult, ellipsis, abi): # This function is elidable because if called again with exactly the # same arguments (and if it didn't raise KeyError), it would give # the same result, at least as long as this result is still live. @@ -633,18 +635,19 @@ # one such dict, but in case of hash collision, there might be # more. unique_cache = space.fromcache(UniqueCache) - func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis, abi) for weakdict in unique_cache.functions: ctype = weakdict.get(func_hash) if (ctype is not None and ctype.ctitem is fresult and ctype.fargs == fargs and - ctype.ellipsis == ellipsis): + ctype.ellipsis == ellipsis and + ctype.abi == abi): return ctype raise KeyError @jit.dont_look_inside -def _build_function_type(space, fargs, fresult, ellipsis): +def _build_function_type(space, fargs, fresult, ellipsis, abi): from pypy.module._cffi_backend import ctypefunc # if ((fresult.size < 0 and @@ -658,9 +661,9 @@ raise oefmt(space.w_TypeError, "invalid result type: '%s'", fresult.name) # - fct = ctypefunc.W_CTypeFunc(space, fargs, fresult, ellipsis) + fct = ctypefunc.W_CTypeFunc(space, fargs, fresult, ellipsis, abi) unique_cache = space.fromcache(UniqueCache) - func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis, abi) for weakdict in unique_cache.functions: if weakdict.get(func_hash) is None: weakdict.set(func_hash, fct) diff --git a/pypy/module/_cffi_backend/realize_c_type.py b/pypy/module/_cffi_backend/realize_c_type.py --- a/pypy/module/_cffi_backend/realize_c_type.py +++ b/pypy/module/_cffi_backend/realize_c_type.py @@ -5,6 +5,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.error import oefmt from pypy.interpreter.baseobjspace import W_Root +from pypy.module import _cffi_backend from pypy.module._cffi_backend.ctypeobj import W_CType from pypy.module._cffi_backend import cffi_opcode, newtype, ctypestruct from pypy.module._cffi_backend import parse_c_type @@ -164,16 +165,28 @@ OP_FUNCTION_END = cffi_opcode.OP_FUNCTION_END while getop(opcodes[base_index + num_args]) != OP_FUNCTION_END: num_args += 1 - ellipsis = (getarg(opcodes[base_index + num_args]) & 1) != 0 + # + ellipsis = (getarg(opcodes[base_index + num_args]) & 0x01) != 0 + abi = (getarg(opcodes[base_index + num_args]) & 0xFE) + if abi == 0: + abi = _cffi_backend.FFI_DEFAULT_ABI + elif abi == 2: + if _cffi_backend.has_stdcall: + abi = _cffi_backend.FFI_STDCALL + else: + abi = _cffi_backend.FFI_DEFAULT_ABI + else: + raise oefmt(ffi.w_FFIError, "abi number %d not supported", abi) + # fargs = [realize_c_type(ffi, opcodes, base_index + i) for i in range(num_args)] - return fargs, fret, ellipsis + return fargs, fret, ellipsis, abi def unwrap_as_fnptr(self, ffi): if self._ctfuncptr is None: - fargs, fret, ellipsis = self._unpack(ffi) + fargs, fret, ellipsis, abi = self._unpack(ffi) self._ctfuncptr = newtype._new_function_type( - ffi.space, fargs, fret, ellipsis) + ffi.space, fargs, fret, ellipsis, abi) return self._ctfuncptr def unwrap_as_fnptr_in_elidable(self): @@ -190,7 +203,7 @@ # type ptr-to-struct. This is how recompiler.py produces # trampoline functions for PyPy. if self.nostruct_ctype is None: - fargs, fret, ellipsis = self._unpack(ffi) + fargs, fret, ellipsis, abi = self._unpack(ffi) # 'locs' will be a string of the same length as the final fargs, # containing 'A' where a struct argument was detected, and 'R' # in first position if a struct return value was detected @@ -207,7 +220,7 @@ locs = ['R'] + locs fret = newtype.new_void_type(ffi.space) ctfuncptr = newtype._new_function_type( - ffi.space, fargs, fret, ellipsis) + ffi.space, fargs, fret, ellipsis, abi) if locs == ['\x00'] * len(locs): locs = None else: @@ -218,7 +231,7 @@ locs[0] == 'R') def unexpected_fn_type(self, ffi): - fargs, fret, ellipsis = self._unpack(ffi) + fargs, fret, ellipsis, abi = self._unpack(ffi) argnames = [farg.name for farg in fargs] if ellipsis: argnames.append('...') diff --git a/pypy/module/_cffi_backend/src/parse_c_type.c b/pypy/module/_cffi_backend/src/parse_c_type.c --- a/pypy/module/_cffi_backend/src/parse_c_type.c +++ b/pypy/module/_cffi_backend/src/parse_c_type.c @@ -51,6 +51,9 @@ TOK_UNSIGNED, TOK_VOID, TOK_VOLATILE, + + TOK_CDECL, + TOK_STDCALL, }; typedef struct { @@ -165,6 +168,8 @@ switch (*p) { case '_': if (tok->size == 5 && !memcmp(p, "_Bool", 5)) tok->kind = TOK__BOOL; + if (tok->size == 7 && !memcmp(p,"__cdecl",7)) tok->kind = TOK_CDECL; + if (tok->size == 9 && !memcmp(p,"__stdcall",9))tok->kind = TOK_STDCALL; break; case 'c': if (tok->size == 4 && !memcmp(p, "char", 4)) tok->kind = TOK_CHAR; @@ -236,7 +241,7 @@ type). The 'outer' argument is the index of the opcode outside this "sequel". */ - int check_for_grouping; + int check_for_grouping, abi=0; _cffi_opcode_t result, *p_current; header: @@ -253,6 +258,12 @@ /* ignored for now */ next_token(tok); goto header; + case TOK_CDECL: + case TOK_STDCALL: + /* must be in a function; checked below */ + abi = tok->kind; + next_token(tok); + goto header; default: break; } @@ -269,6 +280,11 @@ while (tok->kind == TOK_OPEN_PAREN) { next_token(tok); + if (tok->kind == TOK_CDECL || tok->kind == TOK_STDCALL) { + abi = tok->kind; + next_token(tok); + } + if ((check_for_grouping--) == 1 && (tok->kind == TOK_STAR || tok->kind == TOK_CONST || tok->kind == TOK_VOLATILE || @@ -286,7 +302,14 @@ } else { /* function type */ - int arg_total, base_index, arg_next, has_ellipsis=0; + int arg_total, base_index, arg_next, flags=0; + + if (abi == TOK_STDCALL) { + flags = 2; + /* note that an ellipsis below will overwrite this flags, + which is the goal: variadic functions are always cdecl */ + } + abi = 0; if (tok->kind == TOK_VOID && get_following_char(tok) == ')') { next_token(tok); @@ -315,7 +338,7 @@ _cffi_opcode_t oarg; if (tok->kind == TOK_DOTDOTDOT) { - has_ellipsis = 1; + flags = 1; /* ellipsis */ next_token(tok); break; } @@ -339,8 +362,7 @@ next_token(tok); } } - tok->output[arg_next] = _CFFI_OP(_CFFI_OP_FUNCTION_END, - has_ellipsis); + tok->output[arg_next] = _CFFI_OP(_CFFI_OP_FUNCTION_END, flags); } if (tok->kind != TOK_CLOSE_PAREN) @@ -348,6 +370,9 @@ next_token(tok); } + if (abi != 0) + return parse_error(tok, "expected '('"); + while (tok->kind == TOK_OPEN_BRACKET) { *p_current = _CFFI_OP(_CFFI_GETOP(*p_current), tok->output_index); p_current = tok->output + tok->output_index; diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -2316,9 +2316,6 @@ f(); f() assert get_errno() == 77 -def test_abi(): - assert isinstance(FFI_DEFAULT_ABI, int) - def test_cast_to_array(): # not valid in C! extension to get a non-owning BInt = new_primitive_type("int") @@ -3396,6 +3393,78 @@ check(4 | 8, "CHB", "GTB") check(4 | 16, "CHB", "ROB") +def test_memmove(): + Short = new_primitive_type("short") + ShortA = new_array_type(new_pointer_type(Short), None) + Char = new_primitive_type("char") + CharA = new_array_type(new_pointer_type(Char), None) + p = newp(ShortA, [-1234, -2345, -3456, -4567, -5678]) + memmove(p, p + 1, 4) + assert list(p) == [-2345, -3456, -3456, -4567, -5678] + p[2] = 999 + memmove(p + 2, p, 6) + assert list(p) == [-2345, -3456, -2345, -3456, 999] + memmove(p + 4, newp(CharA, b"\x71\x72"), 2) + if sys.byteorder == 'little': + assert list(p) == [-2345, -3456, -2345, -3456, 0x7271] + else: + assert list(p) == [-2345, -3456, -2345, -3456, 0x7172] + +def test_memmove_buffer(): + import array + Short = new_primitive_type("short") + ShortA = new_array_type(new_pointer_type(Short), None) + a = array.array('H', [10000, 20000, 30000]) + p = newp(ShortA, 5) + memmove(p, a, 6) + assert list(p) == [10000, 20000, 30000, 0, 0] + memmove(p + 1, a, 6) + assert list(p) == [10000, 10000, 20000, 30000, 0] + b = array.array('h', [-1000, -2000, -3000]) + memmove(b, a, 4) + assert b.tolist() == [10000, 20000, -3000] + assert a.tolist() == [10000, 20000, 30000] + p[0] = 999 + p[1] = 998 + p[2] = 997 + p[3] = 996 + p[4] = 995 + memmove(b, p, 2) + assert b.tolist() == [999, 20000, -3000] + memmove(b, p + 2, 4) + assert b.tolist() == [997, 996, -3000] + p[2] = -p[2] + p[3] = -p[3] + memmove(b, p + 2, 6) + assert b.tolist() == [-997, -996, 995] + +def test_memmove_readonly_readwrite(): + SignedChar = new_primitive_type("signed char") + SignedCharA = new_array_type(new_pointer_type(SignedChar), None) + p = newp(SignedCharA, 5) + memmove(p, b"abcde", 3) + assert list(p) == [ord("a"), ord("b"), ord("c"), 0, 0] + memmove(p, bytearray(b"ABCDE"), 2) + assert list(p) == [ord("A"), ord("B"), ord("c"), 0, 0] + py.test.raises((TypeError, BufferError), memmove, b"abcde", p, 3) + ba = bytearray(b"xxxxx") + memmove(dest=ba, src=p, n=3) + assert ba == bytearray(b"ABcxx") + memmove(ba, b"EFGH", 4) + assert ba == bytearray(b"EFGHx") + +def test_memmove_sign_check(): + SignedChar = new_primitive_type("signed char") + SignedCharA = new_array_type(new_pointer_type(SignedChar), None) + p = newp(SignedCharA, 5) + py.test.raises(ValueError, memmove, p, p + 1, -1) # not segfault + +def test_memmove_bad_cdata(): + BInt = new_primitive_type("int") + p = cast(BInt, 42) + py.test.raises(TypeError, memmove, p, bytearray(b'a'), 1) + py.test.raises(TypeError, memmove, bytearray(b'a'), p, 1) + def test_dereference_null_ptr(): BInt = new_primitive_type("int") BIntPtr = new_pointer_type(BInt) @@ -3427,3 +3496,16 @@ "be 'foo *', but the types are different (check " "that you are not e.g. mixing up different ffi " "instances)") + +def test_stdcall_function_type(): + assert FFI_CDECL == FFI_DEFAULT_ABI + try: + stdcall = FFI_STDCALL + except NameError: + stdcall = FFI_DEFAULT_ABI + BInt = new_primitive_type("int") + BFunc = new_function_type((BInt, BInt), BInt, False, stdcall) + if stdcall != FFI_DEFAULT_ABI: + assert repr(BFunc) == "" + else: + assert repr(BFunc) == "" diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py --- a/pypy/module/_cffi_backend/test/test_ffi_obj.py +++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py @@ -247,6 +247,63 @@ ffi.cast("unsigned short *", c)[1] += 500 assert list(a) == [10000, 20500, 30000] + def test_memmove(self): + import sys + import _cffi_backend as _cffi1_backend + ffi = _cffi1_backend.FFI() + p = ffi.new("short[]", [-1234, -2345, -3456, -4567, -5678]) + ffi.memmove(p, p + 1, 4) + assert list(p) == [-2345, -3456, -3456, -4567, -5678] + p[2] = 999 + ffi.memmove(p + 2, p, 6) + assert list(p) == [-2345, -3456, -2345, -3456, 999] + ffi.memmove(p + 4, ffi.new("char[]", b"\x71\x72"), 2) + if sys.byteorder == 'little': + assert list(p) == [-2345, -3456, -2345, -3456, 0x7271] + else: + assert list(p) == [-2345, -3456, -2345, -3456, 0x7172] + + def test_memmove_buffer(self): + import _cffi_backend as _cffi1_backend + import array + ffi = _cffi1_backend.FFI() + a = array.array('H', [10000, 20000, 30000]) + p = ffi.new("short[]", 5) + ffi.memmove(p, a, 6) + assert list(p) == [10000, 20000, 30000, 0, 0] + ffi.memmove(p + 1, a, 6) + assert list(p) == [10000, 10000, 20000, 30000, 0] + b = array.array('h', [-1000, -2000, -3000]) + ffi.memmove(b, a, 4) + assert b.tolist() == [10000, 20000, -3000] + assert a.tolist() == [10000, 20000, 30000] + p[0] = 999 + p[1] = 998 + p[2] = 997 + p[3] = 996 + p[4] = 995 + ffi.memmove(b, p, 2) + assert b.tolist() == [999, 20000, -3000] + ffi.memmove(b, p + 2, 4) + assert b.tolist() == [997, 996, -3000] + p[2] = -p[2] + p[3] = -p[3] + ffi.memmove(b, p + 2, 6) + assert b.tolist() == [-997, -996, 995] + + def test_memmove_readonly_readwrite(self): + import _cffi_backend as _cffi1_backend + ffi = _cffi1_backend.FFI() + p = ffi.new("signed char[]", 5) + ffi.memmove(p, b"abcde", 3) + assert list(p) == [ord("a"), ord("b"), ord("c"), 0, 0] + ffi.memmove(p, bytearray(b"ABCDE"), 2) + assert list(p) == [ord("A"), ord("B"), ord("c"), 0, 0] + raises((TypeError, BufferError), ffi.memmove, b"abcde", p, 3) + ba = bytearray(b"xxxxx") + ffi.memmove(dest=ba, src=p, n=3) + assert ba == bytearray(b"ABcxx") + def test_ffi_types(self): import _cffi_backend as _cffi1_backend CData = _cffi1_backend.FFI.CData diff --git a/pypy/module/_cffi_backend/test/test_handle.py b/pypy/module/_cffi_backend/test/test_handle.py deleted file mode 100644 --- a/pypy/module/_cffi_backend/test/test_handle.py +++ /dev/null @@ -1,44 +0,0 @@ -import random -from pypy.module._cffi_backend.handle import CffiHandles - - -class PseudoWeakRef(object): - _content = 42 - - def __call__(self): - return self._content - - -def test_cffi_handles_1(): - ch = CffiHandles(None) - expected_content = {} - for i in range(10000): - index = ch.reserve_next_handle_index() - assert 0 <= index < len(ch.handles) - assert ch.handles[index]() is None - pwr = PseudoWeakRef() - expected_content[index] = pwr - ch.handles[index] = pwr - assert len(ch.handles) <= 16384 - for index, pwr in expected_content.items(): - assert ch.handles[index] is pwr - -def test_cffi_handles_2(): - ch = CffiHandles(None) - expected_content = {} - for i in range(10000): - index = ch.reserve_next_handle_index() - assert 0 <= index < len(ch.handles) - assert ch.handles[index]() is None - pwr = PseudoWeakRef() - expected_content[index] = pwr - ch.handles[index] = pwr - # - if len(expected_content) > 20: - r = random.choice(list(expected_content)) - pwr = expected_content.pop(r) - pwr._content = None - # - assert len(ch.handles) < 100 - for index, pwr in expected_content.items(): - assert ch.handles[index] is pwr diff --git a/pypy/module/_cffi_backend/test/test_parse_c_type.py b/pypy/module/_cffi_backend/test/test_parse_c_type.py --- a/pypy/module/_cffi_backend/test/test_parse_c_type.py +++ b/pypy/module/_cffi_backend/test/test_parse_c_type.py @@ -338,3 +338,17 @@ # not supported (really obscure): # "char[+5]" # "char['A']" + +def test_stdcall_cdecl(): + assert parse("int __stdcall(int)") == [Prim(cffi_opcode.PRIM_INT), + '->', Func(0), NoOp(4), FuncEnd(2), + Prim(cffi_opcode.PRIM_INT)] + assert parse("int __stdcall func(int)") == parse("int __stdcall(int)") + assert parse("int (__stdcall *)()") == [Prim(cffi_opcode.PRIM_INT), + NoOp(3), '->', Pointer(1), + Func(0), FuncEnd(2), 0] + assert parse("int (__stdcall *p)()") == parse("int (__stdcall*)()") + parse_error("__stdcall int", "identifier expected", 0) + parse_error("__cdecl int", "identifier expected", 0) + parse_error("int __stdcall", "expected '('", 13) + parse_error("int __cdecl", "expected '('", 11) diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -118,7 +118,7 @@ if buffering < 0: buffering = DEFAULT_BUFFER_SIZE - if space.config.translation.type_system == 'lltype' and 'st_blksize' in STAT_FIELD_TYPES: + if 'st_blksize' in STAT_FIELD_TYPES: fileno = space.c_int_w(space.call_method(w_raw, "fileno")) try: st = os.fstat(fileno) diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -12,6 +12,7 @@ from pypy.module.micronumpy.descriptor import get_dtype_cache, W_Dtype from pypy.module.micronumpy.concrete import ConcreteArray from pypy.module.micronumpy import ufuncs +import pypy.module.micronumpy.constants as NPY from rpython.rlib.rawstorage import RAW_STORAGE_PTR from pypy.interpreter.typedef import TypeDef from pypy.interpreter.baseobjspace import W_Root @@ -203,12 +204,12 @@ return shape, dtype def simple_new(space, nd, dims, typenum, - order='C', owning=False, w_subtype=None): + order=NPY.CORDER, owning=False, w_subtype=None): shape, dtype = get_shape_and_dtype(space, nd, dims, typenum) return W_NDimArray.from_shape(space, shape, dtype) def simple_new_from_data(space, nd, dims, typenum, data, - order='C', owning=False, w_subtype=None): + order=NPY.CORDER, owning=False, w_subtype=None): shape, dtype = get_shape_and_dtype(space, nd, dims, typenum) storage = rffi.cast(RAW_STORAGE_PTR, data) return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, @@ -238,7 +239,7 @@ raise OperationError(space.w_NotImplementedError, space.wrap("strides must be NULL")) - order = 'C' if flags & NPY_C_CONTIGUOUS else 'F' + order = NPY.CORDER if flags & NPY_C_CONTIGUOUS else NPY.FORTRANORDER owning = True if flags & NPY_OWNDATA else False w_subtype = None diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -4,16 +4,17 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.micronumpy.ndarray import W_NDimArray from pypy.module.micronumpy.descriptor import get_dtype_cache +import pypy.module.micronumpy.constants as NPY def scalar(space): dtype = get_dtype_cache(space).w_float64dtype return W_NDimArray.new_scalar(space, dtype, space.wrap(10.)) -def array(space, shape, order='C'): +def array(space, shape, order=NPY.CORDER): dtype = get_dtype_cache(space).w_float64dtype return W_NDimArray.from_shape(space, shape, dtype, order=order) -def iarray(space, shape, order='C'): +def iarray(space, shape, order=NPY.CORDER): dtype = get_dtype_cache(space).w_int64dtype return W_NDimArray.from_shape(space, shape, dtype, order=order) @@ -32,8 +33,8 @@ def test_FLAGS(self, space, api): s = array(space, [10]) - c = array(space, [10, 5, 3], order='C') - f = array(space, [10, 5, 3], order='F') + c = array(space, [10, 5, 3], order=NPY.CORDER) + f = array(space, [10, 5, 3], order=NPY.FORTRANORDER) assert api._PyArray_FLAGS(s) & 0x0001 assert api._PyArray_FLAGS(s) & 0x0002 assert api._PyArray_FLAGS(c) & 0x0001 diff --git a/pypy/module/itertools/__init__.py b/pypy/module/itertools/__init__.py --- a/pypy/module/itertools/__init__.py +++ b/pypy/module/itertools/__init__.py @@ -10,7 +10,6 @@ repeat(elem [,n]) --> elem, elem, elem, ... endlessly or up to n times Iterators terminating on the shortest input sequence: - izip(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ... ifilter(pred, seq) --> elements of seq where pred(elem) is True ifilterfalse(pred, seq) --> elements of seq where pred(elem) is False islice(seq, [start,] stop [, step]) --> elements from @@ -22,6 +21,14 @@ takewhile(pred, seq) --> seq[0], seq[1], until pred fails dropwhile(pred, seq) --> seq[n], seq[n+1], starting when pred fails groupby(iterable[, keyfunc]) --> sub-iterators grouped by value of keyfunc(v) + izip(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ... + izip_longest(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ... + + Combinatoric generators: + product(p, q, ... [repeat=1]) --> cartesian product + permutations(p[, r]) + combinations(p, r) + combinations_with_replacement(p, r) """ interpleveldefs = { diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py --- a/pypy/module/itertools/interp_itertools.py +++ b/pypy/module/itertools/interp_itertools.py @@ -649,33 +649,38 @@ class W_IZipLongest(W_IMap): _error_name = "izip_longest" + _immutable_fields_ = ["w_fillvalue"] + + def _fetch(self, index): + w_iter = self.iterators_w[index] + if w_iter is not None: + space = self.space + try: + return space.next(w_iter) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + self.active -= 1 + if self.active <= 0: + # It was the last active iterator + raise + self.iterators_w[index] = None + return self.w_fillvalue def next_w(self): - space = self.space + # common case: 2 arguments + if len(self.iterators_w) == 2: + objects = [self._fetch(0), self._fetch(1)] + else: + objects = self._get_objects() + return self.space.newtuple(objects) + + def _get_objects(self): + # the loop is out of the way of the JIT nb = len(self.iterators_w) - if nb == 0: - raise OperationError(space.w_StopIteration, space.w_None) - - objects_w = [None] * nb - for index in range(nb): - w_value = self.w_fillvalue - w_it = self.iterators_w[index] - if w_it is not None: - try: - w_value = space.next(w_it) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - - self.active -= 1 - if self.active == 0: - # It was the last active iterator - raise - self.iterators_w[index] = None - - objects_w[index] = w_value - return space.newtuple(objects_w) + raise OperationError(self.space.w_StopIteration, self.space.w_None) + return [self._fetch(index) for index in range(nb)] def W_IZipLongest___new__(space, w_subtype, __args__): arguments_w, kwds_w = __args__.unpack() diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -108,7 +108,8 @@ w_axis = space.wrap(0) if space.is_none(w_axis): args_w = [w_arg.reshape(space, - space.newlist([w_arg.descr_get_size(space)])) + space.newlist([w_arg.descr_get_size(space)]), + w_arg.get_order()) for w_arg in args_w] w_axis = space.wrap(0) dtype = args_w[0].get_dtype() @@ -140,7 +141,7 @@ dtype = find_result_type(space, args_w, []) # concatenate does not handle ndarray subtypes, it always returns a ndarray - res = W_NDimArray.from_shape(space, shape, dtype, 'C') + res = W_NDimArray.from_shape(space, shape, dtype, NPY.CORDER) chunks = [Chunk(0, i, 1, i) for i in shape] axis_start = 0 for arr in args_w: diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -38,7 +38,8 @@ self.implementation = implementation @staticmethod - def from_shape(space, shape, dtype, order='C', w_instance=None, zero=True): + def from_shape(space, shape, dtype, order=NPY.CORDER, + w_instance=None, zero=True): from pypy.module.micronumpy import concrete, descriptor, boxes from pypy.module.micronumpy.strides import calc_strides if len(shape) > NPY.MAXDIMS: @@ -59,8 +60,9 @@ @staticmethod def from_shape_and_storage(space, shape, storage, dtype, storage_bytes=-1, - order='C', owning=False, w_subtype=None, - w_base=None, writable=True, strides=None, start=0): + order=NPY.CORDER, owning=False, w_subtype=None, + w_base=None, writable=True, strides=None, + start=0): from pypy.module.micronumpy import concrete from pypy.module.micronumpy.strides import (calc_strides, calc_backstrides) diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -56,6 +56,9 @@ jit.hint(len(backstrides), promote=True) return backstrides + def get_flags(self): + return self.flags + def getitem(self, index): return self.dtype.read(self, index, 0) @@ -89,17 +92,18 @@ def get_storage_size(self): return self.size - def reshape(self, orig_array, new_shape): + def reshape(self, orig_array, new_shape, order=NPY.ANYORDER): # Since we got to here, prod(new_shape) == self.size + order = support.get_order_as_CF(self.order, order) new_strides = None if self.size == 0: - new_strides, _ = calc_strides(new_shape, self.dtype, self.order) + new_strides, _ = calc_strides(new_shape, self.dtype, order) else: if len(self.get_shape()) == 0: new_strides = [self.dtype.elsize] * len(new_shape) else: new_strides = calc_new_strides(new_shape, self.get_shape(), - self.get_strides(), self.order) + self.get_strides(), order) if new_strides is None or len(new_strides) != len(new_shape): return None if new_strides is not None: @@ -303,10 +307,11 @@ return SliceArray(self.start, strides, backstrides, shape, self, orig_array) - def copy(self, space): + def copy(self, space, order=NPY.ANYORDER): + order = support.get_order_as_CF(self.order, order) strides, backstrides = calc_strides(self.get_shape(), self.dtype, - self.order) - impl = ConcreteArray(self.get_shape(), self.dtype, self.order, strides, + order) + impl = ConcreteArray(self.get_shape(), self.dtype, order, strides, backstrides) return loop.setslice(space, self.get_shape(), impl, self) @@ -360,12 +365,12 @@ # but make the array storage contiguous in memory shape = self.get_shape() strides = self.get_strides() - if order not in ('C', 'F'): - raise oefmt(space.w_ValueError, "Unknown order %s in astype", order) + if order not in (NPY.KEEPORDER, NPY.FORTRANORDER, NPY.CORDER): + raise oefmt(space.w_ValueError, "Unknown order %d in astype", order) if len(strides) == 0: t_strides = [] backstrides = [] - elif order != self.order: + elif order in (NPY.FORTRANORDER, NPY.CORDER): t_strides, backstrides = calc_strides(shape, dtype, order) else: indx_array = range(len(strides)) @@ -378,6 +383,7 @@ t_strides[i] = base base *= shape[i] backstrides = calc_backstrides(t_strides, shape) + order = support.get_order_as_CF(self.order, order) impl = ConcreteArray(shape, dtype, order, t_strides, backstrides) loop.setslice(space, impl.get_shape(), impl, self) return impl @@ -429,6 +435,8 @@ self.shape = shape # already tested for overflow in from_shape_and_storage self.size = support.product(shape) * dtype.elsize + if order not in (NPY.CORDER, NPY.FORTRANORDER): + raise oefmt(dtype.itemtype.space.w_ValueError, "ConcreteArrayNotOwning but order is not 0,1 rather %d", order) self.order = order self.dtype = dtype self.strides = strides @@ -562,6 +570,8 @@ self.parent = parent self.storage = parent.storage self.gcstruct = parent.gcstruct + if parent.order not in (NPY.CORDER, NPY.FORTRANORDER): + raise oefmt(dtype.itemtype.space.w_ValueError, "SliceArray but parent order is not 0,1 rather %d", parent.order) self.order = parent.order self.dtype = dtype try: @@ -602,13 +612,13 @@ s = self.get_strides()[0] // dtype.elsize except IndexError: s = 1 - if self.order == 'C': + if self.order != NPY.FORTRANORDER: new_shape.reverse() for sh in new_shape: strides.append(s * dtype.elsize) backstrides.append(s * (sh - 1) * dtype.elsize) s *= max(1, sh) - if self.order == 'C': + if self.order != NPY.FORTRANORDER: strides.reverse() backstrides.reverse() new_shape.reverse() diff --git a/pypy/module/micronumpy/converters.py b/pypy/module/micronumpy/converters.py --- a/pypy/module/micronumpy/converters.py +++ b/pypy/module/micronumpy/converters.py @@ -77,9 +77,8 @@ elif order.startswith('K') or order.startswith('k'): return NPY.KEEPORDER else: - raise OperationError(space.w_TypeError, space.wrap( - "order not understood")) - + raise oefmt(space.w_TypeError, "Unknown order: '%s'", order) + return -1 def multi_axis_converter(space, w_axis, ndim): if space.is_none(w_axis): diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -3,10 +3,13 @@ from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi + from pypy.module.micronumpy import descriptor, loop, support -from pypy.module.micronumpy.base import ( +from pypy.module.micronumpy.base import (wrap_impl, W_NDimArray, convert_to_array, W_NumpyObject) -from pypy.module.micronumpy.converters import shape_converter +from pypy.module.micronumpy.converters import shape_converter, order_converter +import pypy.module.micronumpy.constants as NPY +from .casting import scalar2dtype def build_scalar(space, w_dtype, w_state): @@ -82,7 +85,6 @@ return w_res def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False): - from pypy.module.micronumpy import strides # for anything that isn't already an array, try __array__ method first if not isinstance(w_object, W_NDimArray): @@ -99,13 +101,8 @@ dtype = descriptor.decode_w_dtype(space, w_dtype) if space.is_none(w_order): - order = 'C' - else: - order = space.str_w(w_order) - if order == 'K': - order = 'C' - if order != 'C': # or order != 'F': - raise oefmt(space.w_ValueError, "Unknown order: %s", order) + w_order = space.wrap('C') + npy_order = order_converter(space, w_order, NPY.CORDER) if isinstance(w_object, W_NDimArray): if (dtype is None or w_object.get_dtype() is dtype): @@ -124,7 +121,7 @@ copy = True if copy: shape = w_object.get_shape() - w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) + w_arr = W_NDimArray.from_shape(space, shape, dtype, order=npy_order) if support.product(shape) == 1: w_arr.set_scalar_value(dtype.coerce(space, w_object.implementation.getitem(0))) @@ -143,18 +140,13 @@ w_base=w_base, start=imp.start) else: # not an array - shape, elems_w = strides.find_shape_and_elems(space, w_object, dtype) + shape, elems_w = find_shape_and_elems(space, w_object, dtype) if dtype is None and space.isinstance_w(w_object, space.w_buffer): dtype = descriptor.get_dtype_cache(space).w_uint8dtype if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): dtype = find_dtype_for_seq(space, elems_w, dtype) - if dtype is None: - dtype = descriptor.get_dtype_cache(space).w_float64dtype - elif dtype.is_str_or_unicode() and dtype.elsize < 1: - # promote S0 -> S1, U0 -> U1 - dtype = descriptor.variable_dtype(space, dtype.char + '1') - w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) + w_arr = W_NDimArray.from_shape(space, shape, dtype, order=npy_order) if support.product(shape) == 1: # safe from overflow since from_shape checks w_arr.set_scalar_value(dtype.coerce(space, elems_w[0])) else: @@ -165,7 +157,6 @@ def numpify(space, w_object): """Convert the object to a W_NumpyObject""" # XXX: code duplication with _array() - from pypy.module.micronumpy import strides if isinstance(w_object, W_NumpyObject): return w_object # for anything that isn't already an array, try __array__ method first @@ -173,20 +164,82 @@ if w_array is not None: return w_array - shape, elems_w = strides.find_shape_and_elems(space, w_object, None) + if is_scalar_like(space, w_object, dtype=None): + dtype = scalar2dtype(space, w_object) + if dtype.is_str_or_unicode() and dtype.elsize < 1: + # promote S0 -> S1, U0 -> U1 + dtype = descriptor.variable_dtype(space, dtype.char + '1') + return dtype.coerce(space, w_object) + + shape, elems_w = _find_shape_and_elems(space, w_object) dtype = find_dtype_for_seq(space, elems_w, None) - if dtype is None: - dtype = descriptor.get_dtype_cache(space).w_float64dtype - elif dtype.is_str_or_unicode() and dtype.elsize < 1: - # promote S0 -> S1, U0 -> U1 - dtype = descriptor.variable_dtype(space, dtype.char + '1') + w_arr = W_NDimArray.from_shape(space, shape, dtype) + loop.assign(space, w_arr, elems_w) + return w_arr - if len(elems_w) == 1: - return dtype.coerce(space, elems_w[0]) + +def find_shape_and_elems(space, w_iterable, dtype): + if is_scalar_like(space, w_iterable, dtype): + return [], [w_iterable] + is_rec_type = dtype is not None and dtype.is_record() + return _find_shape_and_elems(space, w_iterable, is_rec_type) + +def is_scalar_like(space, w_obj, dtype): + isstr = space.isinstance_w(w_obj, space.w_str) + if not support.issequence_w(space, w_obj) or isstr: + if dtype is None or dtype.char != NPY.CHARLTR: + return True + is_rec_type = dtype is not None and dtype.is_record() + if is_rec_type and is_single_elem(space, w_obj, is_rec_type): + return True + if isinstance(w_obj, W_NDimArray) and w_obj.is_scalar(): + return True + return False + +def _find_shape_and_elems(space, w_iterable, is_rec_type=False): + from pypy.objspace.std.bufferobject import W_Buffer + shape = [space.len_w(w_iterable)] + if space.isinstance_w(w_iterable, space.w_buffer): + batch = [space.wrap(0)] * shape[0] + for i in range(shape[0]): + batch[i] = space.ord(space.getitem(w_iterable, space.wrap(i))) else: - w_arr = W_NDimArray.from_shape(space, shape, dtype) - loop.assign(space, w_arr, elems_w) - return w_arr + batch = space.listview(w_iterable) + while True: + if not batch: + return shape[:], [] + if is_single_elem(space, batch[0], is_rec_type): + for w_elem in batch: + if not is_single_elem(space, w_elem, is_rec_type): + raise OperationError(space.w_ValueError, space.wrap( + "setting an array element with a sequence")) + return shape[:], batch + new_batch = [] + size = space.len_w(batch[0]) + for w_elem in batch: + if (is_single_elem(space, w_elem, is_rec_type) or + space.len_w(w_elem) != size): + raise OperationError(space.w_ValueError, space.wrap( + "setting an array element with a sequence")) + w_array = space.lookup(w_elem, '__array__') + if w_array is not None: + # Make sure we call the array implementation of listview, + # since for some ndarray subclasses (matrix, for instance) + # listview does not reduce but rather returns the same class + w_elem = space.get_and_call_function(w_array, w_elem, space.w_None) + new_batch += space.listview(w_elem) + shape.append(size) + batch = new_batch + +def is_single_elem(space, w_elem, is_rec_type): + if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): + return True + if (space.isinstance_w(w_elem, space.w_tuple) or + space.isinstance_w(w_elem, space.w_list)): + return False + if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar(): From noreply at buildbot.pypy.org Tue Oct 13 11:03:32 2015 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 13 Oct 2015 11:03:32 +0200 (CEST) Subject: [pypy-commit] pypy default: try to redo mac fixes Message-ID: <20151013090332.6CD8A1C1464@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r80155:a2c908e95b29 Date: 2015-10-13 11:03 +0200 http://bitbucket.org/pypy/pypy/changeset/a2c908e95b29/ Log: try to redo mac fixes diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -92,12 +92,13 @@ PLT = "" size_decl = "" type_decl = "" + extra_align = "" else: PLT = "@PLT" type_decl = "\t.type\t%s, @function" % (tramp_name,) size_decl = "\t.size\t%s, .-%s" % ( tramp_name, tramp_name) - + extra_align = "\t.cfi_def_cfa_offset 8" assert detect_cpu.autodetect().startswith(detect_cpu.MODEL_X86_64), ( "rvmprof only supports x86-64 CPUs for now") @@ -132,7 +133,7 @@ \t.cfi_def_cfa_offset 16 \tcall %(cont_name)s%(PLT)s \taddq\t$8, %%rsp -\t.cfi_def_cfa_offset 8 +%(extra_align)s \tret \t.cfi_endproc %(size_decl)s diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -31,7 +31,11 @@ #include #include #include "vmprof_getpc.h" +#ifdef __APPLE__ +#include "libunwind.h" +#else #include "vmprof_unwind.h" +#endif #include "vmprof_mt.h" @@ -39,10 +43,12 @@ // functions copied from libunwind using dlopen +#ifndef __APPLE__ // should be linux only probably static int (*unw_get_reg)(unw_cursor_t*, int, unw_word_t*) = NULL; static int (*unw_step)(unw_cursor_t*) = NULL; static int (*unw_init_local)(unw_cursor_t *, unw_context_t *) = NULL; static int (*unw_get_proc_info)(unw_cursor_t *, unw_proc_info_t *) = NULL; +#endif static int profile_file = -1; static long prepare_interval_usec; @@ -67,6 +73,7 @@ return "bad value for 'interval'"; prepare_interval_usec = (int)(interval * 1000000.0); +#ifndef __APPLE__ if (!unw_get_reg) { void *libhandle; @@ -81,6 +88,7 @@ if (!(unw_step = dlsym(libhandle, UNW_PREFIX "_step"))) goto error; } +#endif if (prepare_concurrent_bufs() < 0) return "out of memory"; @@ -206,7 +214,12 @@ void *ip; int n = 0; unw_cursor_t cursor; +#ifdef __APPLE__ + unw_context_t uc; + unw_getcontext(&uc); +#else unw_context_t uc = *ucontext; +#endif int ret = unw_init_local(&cursor, &uc); assert(ret >= 0); diff --git a/rpython/rlib/rvmprof/src/vmprof_unwind.h b/rpython/rlib/rvmprof/src/vmprof_unwind.h --- a/rpython/rlib/rvmprof/src/vmprof_unwind.h +++ b/rpython/rlib/rvmprof/src/vmprof_unwind.h @@ -64,8 +64,7 @@ typedef struct unw_cursor { unw_word_t opaque[UNW_TDEP_CURSOR_LEN]; - } -unw_cursor_t; + } unw_cursor_t; #define UNW_REG_IP UNW_X86_64_RIP #define UNW_REG_SP UNW_X86_64_RSP @@ -84,7 +83,7 @@ int format; /* unwind-info format (arch-specific) */ int unwind_info_size; /* size of the information (if applicable) */ void *unwind_info; /* unwind-info (arch-specific) */ - } -unw_proc_info_t; + } unw_proc_info_t; // end of copy + diff --git a/rpython/rlib/rvmprof/test/test_rvmprof.py b/rpython/rlib/rvmprof/test/test_rvmprof.py --- a/rpython/rlib/rvmprof/test/test_rvmprof.py +++ b/rpython/rlib/rvmprof/test/test_rvmprof.py @@ -2,6 +2,7 @@ from rpython.tool.udir import udir from rpython.rlib import rvmprof from rpython.translator.c.test.test_genc import compile +from rpython.rlib.objectmodel import we_are_translated def test_vmprof_execute_code_1(): @@ -96,7 +97,12 @@ @rvmprof.vmprof_execute_code("xcode1", lambda code, num: code) def main(code, num): print num - return 42 + s = 0 + for i in range(num): + s += (i << 1) + if s % 32423423423 == 0: + print s + return s tmpfilename = str(udir.join('test_rvmprof')) @@ -104,16 +110,37 @@ code = MyCode() rvmprof.register_code(code, get_name) fd = os.open(tmpfilename, os.O_WRONLY | os.O_CREAT, 0666) - rvmprof.enable(fd, 0.5) - res = main(code, 5) - assert res == 42 + if we_are_translated(): + num = 100000000 + period = 0.0001 + else: + num = 10000 + period = 0.9 + rvmprof.enable(fd, period) + res = main(code, num) + #assert res == 499999500000 rvmprof.disable() os.close(fd) return 0 + def check_profile(filename): + from vmprof import read_profile + + prof = read_profile(filename) + assert prof.get_tree().name.startswith("py:") + assert prof.get_tree().count + assert f() == 0 assert os.path.exists(tmpfilename) fn = compile(f, [], gcpolicy="minimark") - os.unlink(tmpfilename) assert fn() == 0 - assert os.path.exists(tmpfilename) + try: + import vmprof + except ImportError: + py.test.skip("vmprof unimportable") + else: + check_profile(tmpfilename) + finally: + assert os.path.exists(tmpfilename) + os.unlink(tmpfilename) + \ No newline at end of file From noreply at buildbot.pypy.org Tue Oct 13 12:08:36 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Tue, 13 Oct 2015 12:08:36 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: e.g. [i for i, x in enumerate(...) if cond] will consume an index i even if the condition is not met Message-ID: <20151013100836.13C071C1464@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80156:f8d1a700ef62 Date: 2015-10-13 12:05 +0200 http://bitbucket.org/pypy/pypy/changeset/f8d1a700ef62/ Log: e.g. [i for i,x in enumerate(...) if cond] will consume an index i even if the condition is not met diff --git a/rpython/jit/metainterp/optimizeopt/dependency.py b/rpython/jit/metainterp/optimizeopt/dependency.py --- a/rpython/jit/metainterp/optimizeopt/dependency.py +++ b/rpython/jit/metainterp/optimizeopt/dependency.py @@ -140,6 +140,7 @@ def getoperation(self): return self.op + def getindex(self): return self.opidx @@ -543,7 +544,9 @@ def __init__(self, loop): self.loop = loop self.label = Node(loop.label, 0) - self.nodes = [ Node(op,i+1) for i,op in enumerate(loop.operations) if not op.is_jit_debug() ] + self.nodes = [ Node(op,0) for op in loop.operations if not op.is_jit_debug() ] + for i,node in enumerate(self.nodes): + node.opidx = i+1 self.inodes = [] # imaginary nodes self.jump = Node(loop.jump, len(self.nodes)+1) self.invariant_vars = {} From noreply at buildbot.pypy.org Tue Oct 13 12:14:06 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Tue, 13 Oct 2015 12:14:06 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: merged default Message-ID: <20151013101406.C26D01C1464@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80157:1e544babac6b Date: 2015-10-13 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/1e544babac6b/ Log: merged default diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -69,6 +69,12 @@ Remove some remnants of the old ootypesystem vs lltypesystem dichotomy. +.. branch: cffi-handle-lifetime + +ffi.new_handle() returns handles that work more like CPython's: they +remain valid as long as the target exists (unlike the previous +version, where handles become invalid *before* the __del__ is called). + .. branch: vecopt .. branch: vecopt-merge diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -1,14 +1,14 @@ """ Callbacks. """ -import sys, os +import sys, os, py -from rpython.rlib import clibffi, jit, jit_libffi +from rpython.rlib import clibffi, jit, jit_libffi, rgc, objectmodel from rpython.rlib.objectmodel import keepalive_until_here -from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from pypy.interpreter.error import OperationError, oefmt -from pypy.module._cffi_backend import cerrno, misc, handle +from pypy.module._cffi_backend import cerrno, misc from pypy.module._cffi_backend.cdataobj import W_CData from pypy.module._cffi_backend.ctypefunc import SIZE_OF_FFI_ARG, W_CTypeFunc from pypy.module._cffi_backend.ctypeprim import W_CTypePrimitiveSigned @@ -19,6 +19,23 @@ # ____________________________________________________________ + at jit.dont_look_inside +def make_callback(space, ctype, w_callable, w_error, w_onerror): + # Allocate a callback as a nonmovable W_CDataCallback instance, which + # we can cast to a plain VOIDP. As long as the object is not freed, + # we can cast the VOIDP back to a W_CDataCallback in reveal_callback(). + cdata = objectmodel.instantiate(W_CDataCallback, nonmovable=True) + gcref = rgc.cast_instance_to_gcref(cdata) + raw_cdata = rgc.hide_nonmovable_gcref(gcref) + cdata.__init__(space, ctype, w_callable, w_error, w_onerror, raw_cdata) + return cdata + +def reveal_callback(raw_ptr): + addr = rffi.cast(llmemory.Address, raw_ptr) + gcref = rgc.reveal_gcref(addr) + return rgc.try_cast_gcref_to_instance(W_CDataCallback, gcref) + + class Closure(object): """This small class is here to have a __del__ outside any cycle.""" @@ -37,7 +54,8 @@ _immutable_fields_ = ['key_pycode'] w_onerror = None - def __init__(self, space, ctype, w_callable, w_error, w_onerror): + def __init__(self, space, ctype, w_callable, w_error, w_onerror, + raw_cdata): raw_closure = rffi.cast(rffi.CCHARP, clibffi.closureHeap.alloc()) self._closure = Closure(raw_closure) W_CData.__init__(self, space, raw_closure, ctype) @@ -72,8 +90,6 @@ from pypy.module.thread.os_thread import setup_threads setup_threads(space) # - handle_index = handle.get_handles(space).reserve_next_handle_index() - # cif_descr = self.getfunctype().cif_descr if not cif_descr: raise oefmt(space.w_NotImplementedError, @@ -81,16 +97,13 @@ "return type or with '...'", self.getfunctype().name) with self as ptr: closure_ptr = rffi.cast(clibffi.FFI_CLOSUREP, ptr) - unique_id = rffi.cast(rffi.VOIDP, handle_index) + unique_id = rffi.cast(rffi.VOIDP, raw_cdata) res = clibffi.c_ffi_prep_closure(closure_ptr, cif_descr.cif, invoke_callback, unique_id) if rffi.cast(lltype.Signed, res) != clibffi.FFI_OK: raise OperationError(space.w_SystemError, space.wrap("libffi failed to build this callback")) - # - _current_space.space = space - handle.get_handles(space).store_handle(handle_index, self) def _repr_extra(self): space = self.space @@ -221,12 +234,6 @@ except OperationError, e: _handle_applevel_exception(callback, e, ll_res, extra_line) -class CurrentSpace: - def _cleanup_(self): - if hasattr(self, 'space'): - del self.space -_current_space = CurrentSpace() - def _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): """ Callback specification. ffi_cif - something ffi specific, don't care @@ -236,10 +243,8 @@ (what the real callback is for example), casted to VOIDP """ ll_res = rffi.cast(rffi.CCHARP, ll_res) - unique_id = rffi.cast(lltype.Signed, ll_userdata) - space = _current_space.space - callback = handle.get_handles(space).fetch_handle(unique_id) - if callback is None or not isinstance(callback, W_CDataCallback): + callback = reveal_callback(ll_userdata) + if callback is None: # oups! try: os.write(STDERR, "SystemError: invoking a callback " @@ -251,6 +256,7 @@ misc._raw_memclear(ll_res, SIZE_OF_FFI_ARG) return # + space = callback.space must_leave = False try: must_leave = space.threadlocals.try_enter_thread(space) diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py --- a/pypy/module/_cffi_backend/ffi_obj.py +++ b/pypy/module/_cffi_backend/ffi_obj.py @@ -294,9 +294,9 @@ CONSIDER_FN_AS_FNPTR) space = self.space if not space.is_none(w_python_callable): - return ccallback.W_CDataCallback(space, w_ctype, - w_python_callable, w_error, - w_onerror) + return ccallback.make_callback(space, w_ctype, + w_python_callable, w_error, + w_onerror) else: # decorator mode: returns a single-argument function return space.appexec([w_ctype, w_error, w_onerror], diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -24,8 +24,8 @@ @unwrap_spec(w_ctype=ctypeobj.W_CType) def callback(space, w_ctype, w_callable, w_error=None, w_onerror=None): - from pypy.module._cffi_backend.ccallback import W_CDataCallback - return W_CDataCallback(space, w_ctype, w_callable, w_error, w_onerror) + from pypy.module._cffi_backend.ccallback import make_callback + return make_callback(space, w_ctype, w_callable, w_error, w_onerror) # ____________________________________________________________ diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -1,24 +1,24 @@ +import py from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter.baseobjspace import W_Root from pypy.module._cffi_backend import ctypeobj, ctypeptr, cdataobj -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib import rweaklist - - -class CffiHandles(rweaklist.RWeakListMixin): - def __init__(self, space): - self.initialize() - -def get_handles(space): - return space.fromcache(CffiHandles) +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rlib import rgc, objectmodel, jit # ____________________________________________________________ + at jit.dont_look_inside def _newp_handle(space, w_ctype, w_x): - index = get_handles(space).reserve_next_handle_index() - _cdata = rffi.cast(rffi.CCHARP, index + 1) - new_cdataobj = cdataobj.W_CDataHandle(space, _cdata, w_ctype, w_x) - get_handles(space).store_handle(index, new_cdataobj) + # Allocate a handle as a nonmovable W_CDataHandle instance, which + # we can cast to a plain CCHARP. As long as the object is not freed, + # we can cast the CCHARP back to a W_CDataHandle with reveal_gcref(). + new_cdataobj = objectmodel.instantiate(cdataobj.W_CDataHandle, + nonmovable=True) + gcref = rgc.cast_instance_to_gcref(new_cdataobj) + _cdata = rgc.hide_nonmovable_gcref(gcref) + _cdata = rffi.cast(rffi.CCHARP, _cdata) + cdataobj.W_CDataHandle.__init__(new_cdataobj, space, _cdata, w_ctype, w_x) return new_cdataobj @unwrap_spec(w_ctype=ctypeobj.W_CType) @@ -38,14 +38,17 @@ "expected a 'cdata' object with a 'void *' out of " "new_handle(), got '%s'", ctype.name) with w_cdata as ptr: - index = rffi.cast(lltype.Signed, ptr) - original_cdataobj = get_handles(space).fetch_handle(index - 1) - # - if isinstance(original_cdataobj, cdataobj.W_CDataHandle): - return original_cdataobj.w_keepalive - else: - if index == 0: - msg = "cannot use from_handle() on NULL pointer" - else: - msg = "'void *' value does not correspond to any object" - raise OperationError(space.w_RuntimeError, space.wrap(msg)) + return _reveal(space, ptr) + + at jit.dont_look_inside +def _reveal(space, ptr): + addr = rffi.cast(llmemory.Address, ptr) + gcref = rgc.reveal_gcref(addr) + if not gcref: + raise oefmt(space.w_RuntimeError, + "cannot use from_handle() on NULL pointer") + cd = rgc.try_cast_gcref_to_instance(cdataobj.W_CDataHandle, gcref) + if cd is None: + raise oefmt(space.w_SystemError, + "ffi.from_handle(): dead or bogus object handle") + return cd.w_keepalive diff --git a/pypy/module/_cffi_backend/test/test_handle.py b/pypy/module/_cffi_backend/test/test_handle.py deleted file mode 100644 --- a/pypy/module/_cffi_backend/test/test_handle.py +++ /dev/null @@ -1,44 +0,0 @@ -import random -from pypy.module._cffi_backend.handle import CffiHandles - - -class PseudoWeakRef(object): - _content = 42 - - def __call__(self): - return self._content - - -def test_cffi_handles_1(): - ch = CffiHandles(None) - expected_content = {} - for i in range(10000): - index = ch.reserve_next_handle_index() - assert 0 <= index < len(ch.handles) - assert ch.handles[index]() is None - pwr = PseudoWeakRef() - expected_content[index] = pwr - ch.handles[index] = pwr - assert len(ch.handles) <= 16384 - for index, pwr in expected_content.items(): - assert ch.handles[index] is pwr - -def test_cffi_handles_2(): - ch = CffiHandles(None) - expected_content = {} - for i in range(10000): - index = ch.reserve_next_handle_index() - assert 0 <= index < len(ch.handles) - assert ch.handles[index]() is None - pwr = PseudoWeakRef() - expected_content[index] = pwr - ch.handles[index] = pwr - # - if len(expected_content) > 20: - r = random.choice(list(expected_content)) - pwr = expected_content.pop(r) - pwr._content = None - # - assert len(ch.handles) < 100 - for index, pwr in expected_content.items(): - assert ch.handles[index] is pwr diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -290,7 +290,7 @@ return SomeInteger(knowntype=rpython.rlib.rarithmetic.r_longlong) @analyzer_for(rpython.rlib.objectmodel.instantiate) -def robjmodel_instantiate(s_clspbc): +def robjmodel_instantiate(s_clspbc, s_nonmovable=None): assert isinstance(s_clspbc, SomePBC) clsdef = None more_than_one = len(s_clspbc.descriptions) > 1 diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -508,6 +508,21 @@ self._store_and_reset_exception(self.mc, resloc) return fcond + def emit_op_save_exc_class(self, op, arglocs, regalloc, fcond): + resloc = arglocs[0] + self.mc.gen_load_int(r.ip.value, self.cpu.pos_exception()) + self.load_reg(self.mc, resloc, r.ip) + return fcond + + def emit_op_save_exception(self, op, arglocs, regalloc, fcond): + resloc = arglocs[0] + self._store_and_reset_exception(self.mc, resloc) + return fcond + + def emit_op_restore_exception(self, op, arglocs, regalloc, fcond): + self._restore_exception(self.mc, arglocs[1], arglocs[0]) + return fcond + def emit_op_debug_merge_point(self, op, arglocs, regalloc, fcond): return fcond emit_op_jit_debug = emit_op_debug_merge_point diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -707,6 +707,17 @@ [loc, loc1, resloc, pos_exc_value, pos_exception]) return arglocs + def prepare_op_save_exception(self, op, fcond): + resloc = self.force_allocate_reg(op) + return [resloc] + prepare_op_save_exc_class = prepare_op_save_exception + + def prepare_op_restore_exception(self, op, fcond): + boxes = op.getarglist() + loc0 = self.make_sure_var_in_reg(op.getarg(0), boxes) # exc class + loc1 = self.make_sure_var_in_reg(op.getarg(1), boxes) # exc instance + return [loc0, loc1] + def prepare_op_guard_no_exception(self, op, fcond): loc = self.make_sure_var_in_reg(ConstInt(self.cpu.pos_exception())) arglocs = self._prepare_guard(op, [loc]) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -52,8 +52,6 @@ # we don't care about the value 13 here, because we gonna # fish it from the extra slot on frame anyway op.getdescr().make_a_counter_per_value(op, 13) - elif opnum == rop.BRIDGE_EXCEPTION: - assert len(self.operations) == 0 # must be first if op.getdescr() is not None: if op.is_guard() or op.getopnum() == rop.FINISH: newdescr = op.getdescr() @@ -1097,8 +1095,9 @@ self._accumulate(descr, self.current_op.getfailargs(), values) if hasattr(descr, '_llgraph_bridge'): if propagate_exception: - assert (descr._llgraph_bridge.operations[0].opnum == - rop.BRIDGE_EXCEPTION) + assert (descr._llgraph_bridge.operations[0].opnum in + (rop.SAVE_EXC_CLASS, rop.GUARD_EXCEPTION, + rop.GUARD_NO_EXCEPTION)) target = (descr._llgraph_bridge, -1) values = [value for value in values if value is not None] raise Jump(target, values) @@ -1430,8 +1429,32 @@ def execute_keepalive(self, descr, x): pass - def execute_bridge_exception(self, descr): - pass + def execute_save_exc_class(self, descr): + lle = self.last_exception + if lle is None: + return 0 + else: + return support.cast_to_int(lle.args[0]) + + def execute_save_exception(self, descr): + lle = self.last_exception + if lle is None: + res = lltype.nullptr(llmemory.GCREF.TO) + else: + res = lltype.cast_opaque_ptr(llmemory.GCREF, lle.args[1]) + self.last_exception = None + return res + + def execute_restore_exception(self, descr, kls, e): + kls = heaptracker.int2adr(kls) + if e: + value = lltype.cast_opaque_ptr(rclass.OBJECTPTR, e) + assert llmemory.cast_ptr_to_adr(value.typeptr) == kls + lle = LLException(value.typeptr, e) + else: + assert kls == llmemory.NULL + lle = None + self.last_exception = lle def _getdescr(op): diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -119,6 +119,7 @@ # barriers. We do this on each "basic block" of operations, which in # this case means between CALLs or unknown-size mallocs. # + operations = self.remove_bridge_exception(operations) for i in range(len(operations)): op = operations[i] assert op.get_forwarded() is None @@ -168,9 +169,6 @@ continue if op.getopnum() == rop.JUMP or op.getopnum() == rop.FINISH: self.emit_pending_zeros() - if op.getopnum() == rop.BRIDGE_EXCEPTION: - self.remove_bridge_exception(operations, i) - continue # self.emit_op(op) return self._newops @@ -686,13 +684,17 @@ size = max(size, 2 * WORD) return (size + WORD-1) & ~(WORD-1) # round up - def remove_bridge_exception(self, operations, i): - """Check that the 'bridge_exception' operation occurs at the - start of the bridge.""" - if i == 0: - return # first operation, ok - if i == 1 and operations[0].getopnum() == rop.INCREMENT_DEBUG_COUNTER: - return # 2nd operation after INCREMENT_DEBUG_COUNTER, ok - # not ok! - assert we_are_translated() - raise BridgeExceptionNotFirst + def remove_bridge_exception(self, operations): + """Check a common case: 'save_exception' immediately followed by + 'restore_exception' at the start of the bridge.""" + # XXX should check if the boxes are used later; but we just assume + # they aren't for now + start = 0 + if operations[0].getopnum() == rop.INCREMENT_DEBUG_COUNTER: + start = 1 + if len(operations) >= start + 3: + if (operations[start+0].getopnum() == rop.SAVE_EXC_CLASS and + operations[start+1].getopnum() == rop.SAVE_EXCEPTION and + operations[start+2].getopnum() == rop.RESTORE_EXCEPTION): + return operations[:start] + operations[start+3:] + return operations diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2099,6 +2099,60 @@ excvalue = self.cpu.grab_exc_value(deadframe) assert not excvalue + def test_save_restore_exceptions(self): + exc_tp = None + exc_ptr = None + def func(i): + if hasattr(self.cpu, '_exception_emulator'): + assert not self.cpu._exception_emulator[0] + assert not self.cpu._exception_emulator[1] + called.append(i) + if i: + raise LLException(exc_tp, exc_ptr) + + ops = ''' + [i0] + i1 = same_as_i(1) + call_n(ConstClass(fptr), i0, descr=calldescr) + i2 = save_exc_class() + p2 = save_exception() + call_n(ConstClass(fptr), 0, descr=calldescr) + restore_exception(i2, p2) + p0 = guard_exception(ConstClass(xtp)) [i1] + finish(p0) + ''' + FPTR = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void)) + fptr = llhelper(FPTR, func) + calldescr = self.cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, + EffectInfo.MOST_GENERAL) + + xtp = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) + xtp.subclassrange_min = 1 + xtp.subclassrange_max = 3 + X = lltype.GcStruct('X', ('parent', rclass.OBJECT), + hints={'vtable': xtp._obj}) + xx = lltype.malloc(X) + xx.parent.typeptr = xtp + xptr = lltype.cast_opaque_ptr(llmemory.GCREF, xx) + + exc_tp = xtp + exc_ptr = xptr + loop = parse(ops, self.cpu, namespace=locals()) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + called = [] + deadframe = self.cpu.execute_token(looptoken, 5) + assert called == [5, 0] + assert self.cpu.get_ref_value(deadframe, 0) == xptr + excvalue = self.cpu.grab_exc_value(deadframe) + assert not excvalue + called = [] + deadframe = self.cpu.execute_token(looptoken, 0) + assert called == [0, 0] + assert self.cpu.get_int_value(deadframe, 0) == 1 + excvalue = self.cpu.grab_exc_value(deadframe) + assert not excvalue + def test_cond_call_gc_wb(self): def func_void(a): record.append(rffi.cast(lltype.Signed, a)) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1698,6 +1698,15 @@ self.implement_guard(guard_token) self._store_and_reset_exception(self.mc, resloc) + def genop_save_exc_class(self, op, arglocs, resloc): + self.mc.MOV(resloc, heap(self.cpu.pos_exception())) + + def genop_save_exception(self, op, arglocs, resloc): + self._store_and_reset_exception(self.mc, resloc) + + def genop_discard_restore_exception(self, op, arglocs): + self._restore_exception(self.mc, arglocs[1], arglocs[0]) + def _store_and_reset_exception(self, mc, excvalloc=None, exctploc=None, tmploc=None): """ Resest the exception. If excvalloc is None, then store it on the diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -465,6 +465,17 @@ self.perform_guard(op, [loc, loc1], resloc) self.rm.possibly_free_var(box) + def consider_save_exception(self, op): + resloc = self.rm.force_allocate_reg(op) + self.perform(op, [], resloc) + consider_save_exc_class = consider_save_exception + + def consider_restore_exception(self, op): + args = op.getarglist() + loc0 = self.rm.make_sure_var_in_reg(op.getarg(0), args) # exc class + loc1 = self.rm.make_sure_var_in_reg(op.getarg(1), args) # exc instance + self.perform_discard(op, [loc0, loc1]) + consider_guard_no_overflow = consider_guard_no_exception consider_guard_overflow = consider_guard_no_exception consider_guard_not_forced = consider_guard_no_exception diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -915,10 +915,13 @@ return [op0, op1] def rewrite_op_malloc(self, op): - if op.args[1].value['flavor'] == 'raw': + d = op.args[1].value + if d.get('nonmovable', False): + raise UnsupportedMallocFlags(d) + if d['flavor'] == 'raw': return self._rewrite_raw_malloc(op, 'raw_malloc_fixedsize', []) # - if op.args[1].value.get('zero', False): + if d.get('zero', False): zero = True else: zero = False diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -386,7 +386,9 @@ rop.CALL_MALLOC_NURSERY_VARSIZE_FRAME, rop.NURSERY_PTR_INCREMENT, rop.LABEL, - rop.BRIDGE_EXCEPTION, + rop.SAVE_EXC_CLASS, + rop.SAVE_EXCEPTION, + rop.RESTORE_EXCEPTION, rop.VEC_RAW_LOAD_I, rop.VEC_RAW_LOAD_F, rop.VEC_RAW_STORE, diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2487,17 +2487,28 @@ # 'test_guard_no_exception_incorrectly_removed_from_bridge' # shows a corner case in which just putting GuARD_NO_EXCEPTION # here is a bad idea: the optimizer might remove it too. - # So we put a pair BRIDGE_EXCEPTION / GUARD_(NO)_EXCEPTION. - # The BRIDGE_EXCEPTION is meant to re-raise the exception - # caught before the bridge, but in reality it must end up - # as the first operation and thus is a no-op for the backends - # (it is removed in rewrite.py). Its real purpose is only to - # pass through the optimizer unmodified, so that the following - # GUARD_NO_EXCEPTION is not killed. - self.history.record(rop.BRIDGE_EXCEPTION, [], None) - if exception: - self.execute_ll_raised(lltype.cast_opaque_ptr(rclass.OBJECTPTR, - exception)) + # So we put a SAVE_EXCEPTION at the start, and a + # RESTORE_EXCEPTION just before the guard. (rewrite.py will + # remove the two if they end up consecutive.) + + # XXX too much jumps between older and newer models; clean up + # by killing SAVE_EXC_CLASS, RESTORE_EXCEPTION and GUARD_EXCEPTION + + exception_obj = lltype.cast_opaque_ptr(rclass.OBJECTPTR, exception) + if exception_obj: + exc_class = heaptracker.adr2int( + llmemory.cast_ptr_to_adr(exception_obj.typeptr)) + else: + exc_class = 0 + i = len(self.history.operations) + op1 = self.history.record(rop.SAVE_EXC_CLASS, [], exc_class) + op2 = self.history.record(rop.SAVE_EXCEPTION, [], exception) + assert op1 is self.history.operations[i] + assert op2 is self.history.operations[i + 1] + self.history.operations = [op1, op2] + self.history.operations[:i] + self.history.record(rop.RESTORE_EXCEPTION, [op1, op2], None) + if exception_obj: + self.execute_ll_raised(exception_obj) else: self.clear_exception() try: diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -978,7 +978,7 @@ 'GUARD_SUBCLASS/2d/n', # only if supports_guard_gc_type '_GUARD_FOLDABLE_LAST', 'GUARD_NO_EXCEPTION/0d/n', # may be called with an exception currently set - 'GUARD_EXCEPTION/1d/r', # may be called with an exception currently set + 'GUARD_EXCEPTION/1d/r', # XXX kill me, use only SAVE_EXCEPTION 'GUARD_NO_OVERFLOW/0d/n', 'GUARD_OVERFLOW/0d/n', 'GUARD_NOT_FORCED/0d/n', # may be called with an exception currently set @@ -1158,7 +1158,9 @@ 'QUASIIMMUT_FIELD/1d/n', # [objptr], descr=SlowMutateDescr 'RECORD_EXACT_CLASS/2/n', # [objptr, clsptr] 'KEEPALIVE/1/n', - 'BRIDGE_EXCEPTION/0/n', # pyjitpl: prepare_resume_from_failure() + 'SAVE_EXCEPTION/0/r', + 'SAVE_EXC_CLASS/0/i', # XXX kill me + 'RESTORE_EXCEPTION/2/n', # XXX kill me '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py --- a/rpython/memory/gc/base.py +++ b/rpython/memory/gc/base.py @@ -172,6 +172,9 @@ def can_move(self, addr): return False + def malloc_fixedsize_nonmovable(self, typeid): + raise MemoryError + def pin(self, addr): return False diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -597,7 +597,7 @@ if needs_finalizer and not is_finalizer_light: ll_assert(not contains_weakptr, "'needs_finalizer' and 'contains_weakptr' both specified") - obj = self.external_malloc(typeid, 0, can_make_young=False) + obj = self.external_malloc(typeid, 0, alloc_young=False) self.objects_with_finalizers.append(obj) # # If totalsize is greater than nonlarge_max (which should never be @@ -606,7 +606,7 @@ elif rawtotalsize > self.nonlarge_max: ll_assert(not contains_weakptr, "'contains_weakptr' specified for a large object") - obj = self.external_malloc(typeid, 0) + obj = self.external_malloc(typeid, 0, alloc_young=True) # else: # If totalsize is smaller than minimal_size_in_nursery, round it @@ -659,7 +659,7 @@ # If the total size of the object would be larger than # 'nonlarge_max', then allocate it externally. We also # go there if 'length' is actually negative. - obj = self.external_malloc(typeid, length) + obj = self.external_malloc(typeid, length, alloc_young=True) # else: # With the above checks we know now that totalsize cannot be more @@ -692,6 +692,11 @@ return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + def malloc_fixedsize_nonmovable(self, typeid): + obj = self.external_malloc(typeid, 0, alloc_young=True) + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + + def collect(self, gen=2): """Do a minor (gen=0), start a major (gen=1), or do a full major (gen>=2) collection.""" @@ -808,7 +813,7 @@ collect_and_reserve._dont_inline_ = True - def external_malloc(self, typeid, length, can_make_young=True): + def external_malloc(self, typeid, length, alloc_young): """Allocate a large object using the ArenaCollection or raw_malloc(), possibly as an object with card marking enabled, if it has gc pointers in its var-sized part. 'length' should be @@ -862,7 +867,9 @@ # we should get a MemoryError from major_collection_step(). # # Check if the object would fit in the ArenaCollection. - if raw_malloc_usage(totalsize) <= self.small_request_threshold: + # Also, an object allocated from ArenaCollection must be old. + if (raw_malloc_usage(totalsize) <= self.small_request_threshold + and not alloc_young): # # Yes. Round up 'totalsize' (it cannot overflow and it # must remain <= self.small_request_threshold.) @@ -874,10 +881,6 @@ # Allocate from the ArenaCollection. Don't clear it. result = self.ac.malloc(totalsize) # - # An object allocated from ArenaCollection is always old, even - # if 'can_make_young'. The interesting case of 'can_make_young' - # is for large objects, bigger than the 'large_objects' threshold, - # which are raw-malloced but still young. extra_flags = GCFLAG_TRACK_YOUNG_PTRS # else: @@ -897,11 +900,11 @@ extra_words = self.card_marking_words_for_length(length) cardheadersize = WORD * extra_words extra_flags = GCFLAG_HAS_CARDS | GCFLAG_TRACK_YOUNG_PTRS - # if 'can_make_young', then we also immediately set + # if 'alloc_young', then we also immediately set # GCFLAG_CARDS_SET, but without adding the object to # 'old_objects_with_cards_set'. In this way it should # never be added to that list as long as it is young. - if can_make_young: + if alloc_young: extra_flags |= GCFLAG_CARDS_SET # # Detect very rare cases of overflows @@ -939,7 +942,7 @@ # Record the newly allocated object and its full malloced size. # The object is young or old depending on the argument. self.rawmalloced_total_size += r_uint(allocsize) - if can_make_young: + if alloc_young: if not self.young_rawmalloced_objects: self.young_rawmalloced_objects = self.AddressDict() self.young_rawmalloced_objects.add(result + size_gc_header) diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -519,7 +519,7 @@ if needs_finalizer and not is_finalizer_light: ll_assert(not contains_weakptr, "'needs_finalizer' and 'contains_weakptr' both specified") - obj = self.external_malloc(typeid, 0, can_make_young=False) + obj = self.external_malloc(typeid, 0, alloc_young=False) self.objects_with_finalizers.append(obj) # # If totalsize is greater than nonlarge_max (which should never be @@ -528,7 +528,7 @@ elif rawtotalsize > self.nonlarge_max: ll_assert(not contains_weakptr, "'contains_weakptr' specified for a large object") - obj = self.external_malloc(typeid, 0) + obj = self.external_malloc(typeid, 0, alloc_young=True) # else: # If totalsize is smaller than minimal_size_in_nursery, round it @@ -581,7 +581,7 @@ # If the total size of the object would be larger than # 'nonlarge_max', then allocate it externally. We also # go there if 'length' is actually negative. - obj = self.external_malloc(typeid, length) + obj = self.external_malloc(typeid, length, alloc_young=True) # else: # With the above checks we know now that totalsize cannot be more @@ -614,6 +614,11 @@ return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + def malloc_fixedsize_nonmovable(self, typeid): + obj = self.external_malloc(typeid, 0, alloc_young=True) + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + + def collect(self, gen=1): """Do a minor (gen=0) or major (gen>0) collection.""" self.minor_collection() @@ -671,7 +676,7 @@ collect_and_reserve._dont_inline_ = True - def external_malloc(self, typeid, length, can_make_young=True): + def external_malloc(self, typeid, length, alloc_young): """Allocate a large object using the ArenaCollection or raw_malloc(), possibly as an object with card marking enabled, if it has gc pointers in its var-sized part. 'length' should be @@ -711,7 +716,9 @@ self.major_collection(raw_malloc_usage(totalsize)) # # Check if the object would fit in the ArenaCollection. - if raw_malloc_usage(totalsize) <= self.small_request_threshold: + # Also, an object allocated from ArenaCollection must be old. + if (raw_malloc_usage(totalsize) <= self.small_request_threshold + and not alloc_young): # # Yes. Round up 'totalsize' (it cannot overflow and it # must remain <= self.small_request_threshold.) @@ -724,10 +731,6 @@ result = self.ac.malloc(totalsize) llmemory.raw_memclear(result, totalsize) # - # An object allocated from ArenaCollection is always old, even - # if 'can_make_young'. The interesting case of 'can_make_young' - # is for large objects, bigger than the 'large_objects' threshold, - # which are raw-malloced but still young. extra_flags = GCFLAG_TRACK_YOUNG_PTRS # else: @@ -747,11 +750,11 @@ extra_words = self.card_marking_words_for_length(length) cardheadersize = WORD * extra_words extra_flags = GCFLAG_HAS_CARDS | GCFLAG_TRACK_YOUNG_PTRS - # if 'can_make_young', then we also immediately set + # if 'alloc_young', then we also immediately set # GCFLAG_CARDS_SET, but without adding the object to # 'old_objects_with_cards_set'. In this way it should # never be added to that list as long as it is young. - if can_make_young: + if alloc_young: extra_flags |= GCFLAG_CARDS_SET # # Detect very rare cases of overflows @@ -787,7 +790,7 @@ # Record the newly allocated object and its full malloced size. # The object is young or old depending on the argument. self.rawmalloced_total_size += r_uint(allocsize) - if can_make_young: + if alloc_young: if not self.young_rawmalloced_objects: self.young_rawmalloced_objects = self.AddressDict() self.young_rawmalloced_objects.add(result + size_gc_header) diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py --- a/rpython/memory/gc/test/test_direct.py +++ b/rpython/memory/gc/test/test_direct.py @@ -565,8 +565,8 @@ tid = self.get_type_id(VAR) largeobj_size = self.gc.nonlarge_max + 1 self.gc.next_major_collection_threshold = 99999.0 - addr_src = self.gc.external_malloc(tid, largeobj_size) - addr_dst = self.gc.external_malloc(tid, largeobj_size) + addr_src = self.gc.external_malloc(tid, largeobj_size, alloc_young=True) + addr_dst = self.gc.external_malloc(tid, largeobj_size, alloc_young=True) hdr_src = self.gc.header(addr_src) hdr_dst = self.gc.header(addr_dst) # diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -531,6 +531,9 @@ getfn(func, [SomeAddress()], annmodel.s_None) + self.malloc_nonmovable_ptr = getfn(GCClass.malloc_fixedsize_nonmovable, + [s_gc, s_typeid16], + s_gcref) def create_custom_trace_funcs(self, gc, rtyper): custom_trace_funcs = tuple(rtyper.custom_trace_funcs) @@ -757,7 +760,12 @@ c_has_light_finalizer = rmodel.inputconst(lltype.Bool, has_light_finalizer) - if not op.opname.endswith('_varsize') and not flags.get('varsize'): + if flags.get('nonmovable'): + assert op.opname == 'malloc' + assert not flags.get('varsize') + malloc_ptr = self.malloc_nonmovable_ptr + args = [self.c_const_gc, c_type_id] + elif not op.opname.endswith('_varsize') and not flags.get('varsize'): zero = flags.get('zero', False) if (self.malloc_fast_ptr is not None and not c_has_finalizer.value and diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py --- a/rpython/memory/test/test_transformed_gc.py +++ b/rpython/memory/test/test_transformed_gc.py @@ -1247,6 +1247,26 @@ res = self.runner('nursery_hash_base') assert res([]) >= 195 + def define_instantiate_nonmovable(cls): + from rpython.rlib import objectmodel + from rpython.rtyper import annlowlevel + class A: + pass + def fn(): + a1 = A() + a = objectmodel.instantiate(A, nonmovable=True) + a.next = a1 # 'a' is known young here, so no write barrier emitted + res = rgc.can_move(annlowlevel.cast_instance_to_base_ptr(a)) + rgc.collect() + objectmodel.keepalive_until_here(a) + return res + return fn + + def test_instantiate_nonmovable(self): + res = self.runner('instantiate_nonmovable') + assert res([]) == 0 + + class TestIncrementalMiniMarkGC(TestMiniMarkGC): gcname = "incminimark" diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -276,7 +276,7 @@ # ____________________________________________________________ -def instantiate(cls): +def instantiate(cls, nonmovable=False): "Create an empty instance of 'cls'." if isinstance(cls, type): return cls.__new__(cls) diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -480,7 +480,7 @@ class _GcRef(object): # implementation-specific: there should not be any after translation - __slots__ = ['_x'] + __slots__ = ['_x', '_handle'] def __init__(self, x): self._x = x def __hash__(self): @@ -529,6 +529,48 @@ return None try_cast_gcref_to_instance._annspecialcase_ = 'specialize:arg(0)' +_ffi_cache = None +def _fetch_ffi(): + global _ffi_cache + if _ffi_cache is None: + try: + import _cffi_backend + _ffi_cache = _cffi_backend.FFI() + except (ImportError, AttributeError): + import py + py.test.skip("need CFFI >= 1.0") + return _ffi_cache + + at jit.dont_look_inside +def hide_nonmovable_gcref(gcref): + from rpython.rtyper.lltypesystem import lltype, llmemory, rffi + if we_are_translated(): + assert lltype.typeOf(gcref) == llmemory.GCREF + assert not can_move(gcref) + return rffi.cast(llmemory.Address, gcref) + else: + assert isinstance(gcref, _GcRef) + x = gcref._x + ffi = _fetch_ffi() + if not hasattr(x, '__handle'): + x.__handle = ffi.new_handle(x) + addr = int(ffi.cast("intptr_t", x.__handle)) + return rffi.cast(llmemory.Address, addr) + + at jit.dont_look_inside +def reveal_gcref(addr): + from rpython.rtyper.lltypesystem import lltype, llmemory, rffi + assert lltype.typeOf(addr) == llmemory.Address + if we_are_translated(): + return rffi.cast(llmemory.GCREF, addr) + else: + addr = rffi.cast(lltype.Signed, addr) + if addr == 0: + return lltype.nullptr(llmemory.GCREF.TO) + ffi = _fetch_ffi() + x = ffi.from_handle(ffi.cast("void *", addr)) + return _GcRef(x) + # ------------------- implementation ------------------- _cache_s_list_of_gcrefs = None diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -79,6 +79,7 @@ @specialize.arg(0) def ll_start_new_thread(func): + _check_thread_enabled() ident = c_thread_start(func) if ident == -1: raise error("can't start new thread") @@ -170,6 +171,18 @@ def _cleanup_(self): raise Exception("seeing a prebuilt rpython.rlib.rthread.Lock instance") +def _check_thread_enabled(): + pass +class Entry(ExtRegistryEntry): + _about_ = _check_thread_enabled + def compute_result_annotation(self): + translator = self.bookkeeper.annotator.translator + if not translator.config.translation.thread: + raise Exception( + "this RPython program uses threads: translate with '--thread'") + def specialize_call(self, hop): + hop.exception_cannot_occur() + # ____________________________________________________________ # # Stack size diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -92,12 +92,13 @@ PLT = "" size_decl = "" type_decl = "" + extra_align = "" else: PLT = "@PLT" type_decl = "\t.type\t%s, @function" % (tramp_name,) size_decl = "\t.size\t%s, .-%s" % ( tramp_name, tramp_name) - + extra_align = "\t.cfi_def_cfa_offset 8" assert detect_cpu.autodetect().startswith(detect_cpu.MODEL_X86_64), ( "rvmprof only supports x86-64 CPUs for now") @@ -132,7 +133,7 @@ \t.cfi_def_cfa_offset 16 \tcall %(cont_name)s%(PLT)s \taddq\t$8, %%rsp -\t.cfi_def_cfa_offset 8 +%(extra_align)s \tret \t.cfi_endproc %(size_decl)s diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -31,7 +31,11 @@ #include #include #include "vmprof_getpc.h" +#ifdef __APPLE__ +#include "libunwind.h" +#else #include "vmprof_unwind.h" +#endif #include "vmprof_mt.h" @@ -39,10 +43,12 @@ // functions copied from libunwind using dlopen +#ifndef __APPLE__ // should be linux only probably static int (*unw_get_reg)(unw_cursor_t*, int, unw_word_t*) = NULL; static int (*unw_step)(unw_cursor_t*) = NULL; static int (*unw_init_local)(unw_cursor_t *, unw_context_t *) = NULL; static int (*unw_get_proc_info)(unw_cursor_t *, unw_proc_info_t *) = NULL; +#endif static int profile_file = -1; static long prepare_interval_usec; @@ -67,6 +73,7 @@ return "bad value for 'interval'"; prepare_interval_usec = (int)(interval * 1000000.0); +#ifndef __APPLE__ if (!unw_get_reg) { void *libhandle; @@ -81,6 +88,7 @@ if (!(unw_step = dlsym(libhandle, UNW_PREFIX "_step"))) goto error; } +#endif if (prepare_concurrent_bufs() < 0) return "out of memory"; @@ -206,7 +214,12 @@ void *ip; int n = 0; unw_cursor_t cursor; +#ifdef __APPLE__ + unw_context_t uc; + unw_getcontext(&uc); +#else unw_context_t uc = *ucontext; +#endif int ret = unw_init_local(&cursor, &uc); assert(ret >= 0); diff --git a/rpython/rlib/rvmprof/src/vmprof_unwind.h b/rpython/rlib/rvmprof/src/vmprof_unwind.h --- a/rpython/rlib/rvmprof/src/vmprof_unwind.h +++ b/rpython/rlib/rvmprof/src/vmprof_unwind.h @@ -64,8 +64,7 @@ typedef struct unw_cursor { unw_word_t opaque[UNW_TDEP_CURSOR_LEN]; - } -unw_cursor_t; + } unw_cursor_t; #define UNW_REG_IP UNW_X86_64_RIP #define UNW_REG_SP UNW_X86_64_RSP @@ -84,7 +83,7 @@ int format; /* unwind-info format (arch-specific) */ int unwind_info_size; /* size of the information (if applicable) */ void *unwind_info; /* unwind-info (arch-specific) */ - } -unw_proc_info_t; + } unw_proc_info_t; // end of copy + diff --git a/rpython/rlib/rvmprof/test/test_rvmprof.py b/rpython/rlib/rvmprof/test/test_rvmprof.py --- a/rpython/rlib/rvmprof/test/test_rvmprof.py +++ b/rpython/rlib/rvmprof/test/test_rvmprof.py @@ -2,6 +2,7 @@ from rpython.tool.udir import udir from rpython.rlib import rvmprof from rpython.translator.c.test.test_genc import compile +from rpython.rlib.objectmodel import we_are_translated def test_vmprof_execute_code_1(): @@ -96,7 +97,12 @@ @rvmprof.vmprof_execute_code("xcode1", lambda code, num: code) def main(code, num): print num - return 42 + s = 0 + for i in range(num): + s += (i << 1) + if s % 32423423423 == 0: + print s + return s tmpfilename = str(udir.join('test_rvmprof')) @@ -104,16 +110,37 @@ code = MyCode() rvmprof.register_code(code, get_name) fd = os.open(tmpfilename, os.O_WRONLY | os.O_CREAT, 0666) - rvmprof.enable(fd, 0.5) - res = main(code, 5) - assert res == 42 + if we_are_translated(): + num = 100000000 + period = 0.0001 + else: + num = 10000 + period = 0.9 + rvmprof.enable(fd, period) + res = main(code, num) + #assert res == 499999500000 rvmprof.disable() os.close(fd) return 0 + def check_profile(filename): + from vmprof import read_profile + + prof = read_profile(filename) + assert prof.get_tree().name.startswith("py:") + assert prof.get_tree().count + assert f() == 0 assert os.path.exists(tmpfilename) fn = compile(f, [], gcpolicy="minimark") - os.unlink(tmpfilename) assert fn() == 0 - assert os.path.exists(tmpfilename) + try: + import vmprof + except ImportError: + py.test.skip("vmprof unimportable") + else: + check_profile(tmpfilename) + finally: + assert os.path.exists(tmpfilename) + os.unlink(tmpfilename) + \ No newline at end of file diff --git a/rpython/rtyper/lltypesystem/rtagged.py b/rpython/rtyper/lltypesystem/rtagged.py --- a/rpython/rtyper/lltypesystem/rtagged.py +++ b/rpython/rtyper/lltypesystem/rtagged.py @@ -27,7 +27,8 @@ self.classdef, flds)) self.specialfieldname = flds[0] - def new_instance(self, llops, classcallhop=None): + def new_instance(self, llops, classcallhop=None, nonmovable=False): + assert not nonmovable if self.is_parent: raise TyperError("don't instantiate %r, it is a parent of an " "UnboxedValue class" % (self.classdef,)) diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -693,18 +693,24 @@ return hop.args_r[0].rtype_isinstance(hop) @typer_for(objectmodel.instantiate) -def rtype_instantiate(hop): +def rtype_instantiate(hop, i_nonmovable=None): hop.exception_cannot_occur() s_class = hop.args_s[0] assert isinstance(s_class, annmodel.SomePBC) + v_nonmovable, = parse_kwds(hop, (i_nonmovable, None)) + nonmovable = (i_nonmovable is not None and v_nonmovable.value) if len(s_class.descriptions) != 1: # instantiate() on a variable class + if nonmovable: + raise TyperError("instantiate(x, nonmovable=True) cannot be used " + "if x is not a constant class") vtypeptr, = hop.inputargs(rclass.get_type_repr(hop.rtyper)) r_class = hop.args_r[0] return r_class._instantiate_runtime_class(hop, vtypeptr, hop.r_result.lowleveltype) classdef = s_class.any_description().getuniqueclassdef() - return rclass.rtype_new_instance(hop.rtyper, classdef, hop.llops) + return rclass.rtype_new_instance(hop.rtyper, classdef, hop.llops, + nonmovable=nonmovable) @typer_for(hasattr) diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -684,10 +684,12 @@ rbase = rbase.rbase return False - def new_instance(self, llops, classcallhop=None): + def new_instance(self, llops, classcallhop=None, nonmovable=False): """Build a new instance, without calling __init__.""" flavor = self.gcflavor flags = {'flavor': flavor} + if nonmovable: + flags['nonmovable'] = True ctype = inputconst(Void, self.object_type) cflags = inputconst(Void, flags) vlist = [ctype, cflags] @@ -1031,9 +1033,10 @@ # ____________________________________________________________ -def rtype_new_instance(rtyper, classdef, llops, classcallhop=None): +def rtype_new_instance(rtyper, classdef, llops, classcallhop=None, + nonmovable=False): rinstance = getinstancerepr(rtyper, classdef) - return rinstance.new_instance(llops, classcallhop) + return rinstance.new_instance(llops, classcallhop, nonmovable=nonmovable) def ll_inst_hash(ins): if not ins: diff --git a/rpython/rtyper/test/test_rbuiltin.py b/rpython/rtyper/test/test_rbuiltin.py --- a/rpython/rtyper/test/test_rbuiltin.py +++ b/rpython/rtyper/test/test_rbuiltin.py @@ -432,6 +432,14 @@ res = self.interpret(f, [2]) assert self.class_name(res) == 'B' + def test_instantiate_nonmovable(self): + class A: + pass + def f(): + return instantiate(A, nonmovable=True) # no effect before GC + res = self.interpret(f, []) + assert self.class_name(res) == 'A' + def test_os_path_join(self): def fn(a, b): return os.path.join(a, b) From noreply at buildbot.pypy.org Tue Oct 13 16:49:54 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Tue, 13 Oct 2015 16:49:54 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: new branch for the s390x backend Message-ID: <20151013144954.370081C1214@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80158:6850eb25a44a Date: 2015-10-13 15:58 +0200 http://bitbucket.org/pypy/pypy/changeset/6850eb25a44a/ Log: new branch for the s390x backend From noreply at buildbot.pypy.org Tue Oct 13 16:49:56 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Tue, 13 Oct 2015 16:49:56 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: added minimal cpu setup and created directory structure Message-ID: <20151013144956.491091C1214@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80159:42ca55277fde Date: 2015-10-13 16:49 +0200 http://bitbucket.org/pypy/pypy/changeset/42ca55277fde/ Log: added minimal cpu setup and created directory structure diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -15,6 +15,7 @@ MODEL_X86_64 = 'x86-64' MODEL_ARM = 'arm' MODEL_PPC_64 = 'ppc-64' +MODEL_S390_64 = 's390x' # don't use '_' in the model strings; they are replaced by '-' @@ -26,6 +27,7 @@ MODEL_ARM: ['__arm__', '__thumb__','_M_ARM_EP'], MODEL_X86: ['i386', '__i386', '__i386__', '__i686__','_M_IX86'], MODEL_PPC_64: ['__powerpc64__'], + MODEL_S390_64:['__s390x__'], } for k, v in mapping.iteritems(): for macro in v: @@ -64,6 +66,7 @@ 'armv7l': MODEL_ARM, 'armv6l': MODEL_ARM, 'arm': MODEL_ARM, # freebsd + 's390x': MODEL_S390_64 }.get(mach) if result is None: @@ -111,6 +114,8 @@ return "rpython.jit.backend.x86.runner", "CPU_X86_64" elif backend_name == MODEL_ARM: return "rpython.jit.backend.arm.runner", "CPU_ARM" + elif backend_name == MODEL_S390_64: + return "rpython.jit.backend.zarch.runner", "CPU_ZARCH" else: raise ProcessorAutodetectError, ( "we have no JIT backend for this cpu: '%s'" % backend_name) @@ -130,6 +135,7 @@ MODEL_X86_64: ['floats', 'singlefloats'], MODEL_ARM: ['floats', 'singlefloats', 'longlong'], MODEL_PPC_64: [], # we don't even have PPC directory, so no + MODEL_S390_64: [], }[backend_name] if __name__ == '__main__': diff --git a/rpython/jit/backend/zarch/__init__.py b/rpython/jit/backend/zarch/__init__.py new file mode 100644 diff --git a/rpython/jit/backend/zarch/runner.py b/rpython/jit/backend/zarch/runner.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/runner.py @@ -0,0 +1,7 @@ +from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU + +class AbstractZARCHCPU(AbstractLLCPU): + pass + +class CPU_S390X(AbstractZARCHCPU): + pass diff --git a/rpython/jit/backend/zarch/test/__init__.py b/rpython/jit/backend/zarch/test/__init__.py new file mode 100644 From noreply at buildbot.pypy.org Tue Oct 13 16:53:04 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 13 Oct 2015 16:53:04 +0200 (CEST) Subject: [pypy-commit] pypy no-class-specialize: Don't use specialize:ctr_location in rpython.rlib.parsing Message-ID: <20151013145304.056561C1214@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: no-class-specialize Changeset: r80160:8761f11122e3 Date: 2015-10-13 07:15 +0100 http://bitbucket.org/pypy/pypy/changeset/8761f11122e3/ Log: Don't use specialize:ctr_location in rpython.rlib.parsing Surprisingly, all the tests still seem to pass. diff --git a/rpython/rlib/parsing/makepackrat.py b/rpython/rlib/parsing/makepackrat.py --- a/rpython/rlib/parsing/makepackrat.py +++ b/rpython/rlib/parsing/makepackrat.py @@ -54,7 +54,7 @@ newline: COMMENT | `( *\n *)*`; - + REGEX: r = `\`[^\\\`]*(\\.[^\\\`]*)*\`` @@ -109,8 +109,8 @@ IGNORE* return {Nonterminal('productionargs', args + [arg])} | return {Nonterminal('productionargs', [])}; - - + + or_: l = (commands ['|' IGNORE*])+ last = commands @@ -222,7 +222,7 @@ call | REGEX [IGNORE*] | QUOTE [IGNORE*]; call: - x = NAME + x = NAME args = arguments IGNORE* return {Nonterminal("call", [x, args])}; @@ -288,8 +288,7 @@ INPROGRESS = 2 LEFTRECURSION = 3 SOMESOLUTIONS = 4 - - _annspecialcase_ = 'specialize:ctr_location' # polymorphic + def __repr__(self): return "Status(%s, %s, %s, %s)" % (self.pos, self.result, self.error, self.status) @@ -300,6 +299,7 @@ self.status = self.INPROGRESS self.result = None + class ParserBuilder(RPythonVisitor, Codebuilder): def __init__(self): Codebuilder.__init__(self) @@ -594,7 +594,7 @@ r = t.additional_info[1:-1].replace('\\`', '`') matcher = self.get_regex(r) self.emit("_result = self._regex%s()" % (abs(hash(r)), )) - + def visit_QUOTE(self, t): self.emit("_result = self.__chars__(%r)" % ( str(t.additional_info[1:-1]), )) From noreply at buildbot.pypy.org Tue Oct 13 17:12:04 2015 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 13 Oct 2015 17:12:04 +0200 (CEST) Subject: [pypy-commit] pypy ufunc-casting: implement casting in ufuncs Message-ID: <20151013151204.ACD501C103D@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufunc-casting Changeset: r80161:d84d28ec19ea Date: 2015-10-11 22:36 +0300 http://bitbucket.org/pypy/pypy/changeset/d84d28ec19ea/ Log: implement casting in ufuncs diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -159,7 +159,6 @@ af2 = ufunc(af) assert all(af2 == af * 2) ac = arange(10, dtype=complex) - skip('casting not implemented yet') ac1 = ufunc(ac) def test_frompyfunc_2d_sig(self): @@ -1393,7 +1392,7 @@ def test_add_doc(self): import sys if '__pypy__' not in sys.builtin_module_names: - skip('') + skip('cpython sets docstrings differently') try: from numpy import set_docstring except ImportError: From noreply at buildbot.pypy.org Tue Oct 13 17:12:06 2015 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 13 Oct 2015 17:12:06 +0200 (CEST) Subject: [pypy-commit] pypy ufunc-casting: percolate casting into ufuncs, call_many_to_one, call_many_to_many Message-ID: <20151013151206.D81A81C103D@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufunc-casting Changeset: r80162:43673ac286b4 Date: 2015-10-13 18:08 +0300 http://bitbucket.org/pypy/pypy/changeset/43673ac286b4/ Log: percolate casting into ufuncs, call_many_to_one, call_many_to_many diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -74,10 +74,10 @@ call_many_to_one_driver = jit.JitDriver( name='numpy_call_many_to_one', - greens=['shapelen', 'nin', 'func', 'res_dtype'], + greens=['shapelen', 'nin', 'func', 'in_dtypes', 'res_dtype'], reds='auto') -def call_many_to_one(space, shape, func, res_dtype, in_args, out): +def call_many_to_one(space, shape, func, in_dtypes, res_dtype, in_args, out): # out must hav been built. func needs no calc_type, is usually an # external ufunc nin = len(in_args) @@ -95,9 +95,9 @@ vals = [None] * nin while not out_iter.done(out_state): call_many_to_one_driver.jit_merge_point(shapelen=shapelen, func=func, - res_dtype=res_dtype, nin=nin) + in_dtypes=in_dtypes, res_dtype=res_dtype, nin=nin) for i in range(nin): - vals[i] = in_iters[i].getitem(in_states[i]) + vals[i] = in_dtypes[i].coerce(space, in_iters[i].getitem(in_states[i])) w_arglist = space.newlist(vals) w_out_val = space.call_args(func, Arguments.frompacked(space, w_arglist)) out_iter.setitem(out_state, res_dtype.coerce(space, w_out_val)) @@ -108,10 +108,10 @@ call_many_to_many_driver = jit.JitDriver( name='numpy_call_many_to_many', - greens=['shapelen', 'nin', 'nout', 'func', 'res_dtype'], + greens=['shapelen', 'nin', 'nout', 'func', 'in_dtypes', 'out_dtypes'], reds='auto') -def call_many_to_many(space, shape, func, res_dtype, in_args, out_args): +def call_many_to_many(space, shape, func, in_dtypes, out_dtypes, in_args, out_args): # out must hav been built. func needs no calc_type, is usually an # external ufunc nin = len(in_args) @@ -134,24 +134,29 @@ out_states[i] = out_state shapelen = len(shape) vals = [None] * nin - while not out_iters[0].done(out_states[0]): + test_iter, test_state = in_iters[-1], in_states[-1] + if nout > 0: + test_iter, test_state = out_iters[0], out_states[0] + while not test_iter.done(test_state): call_many_to_many_driver.jit_merge_point(shapelen=shapelen, func=func, - res_dtype=res_dtype, nin=nin, nout=nout) + in_dtypes=in_dtypes, out_dtypes=out_dtypes, nin=nin, nout=nout) for i in range(nin): - vals[i] = in_iters[i].getitem(in_states[i]) + vals[i] = in_dtypes[i].coerce(space, in_iters[i].getitem(in_states[i])) + print 'vals', vals w_arglist = space.newlist(vals) w_outvals = space.call_args(func, Arguments.frompacked(space, w_arglist)) # w_outvals should be a tuple, but func can return a single value as well if space.isinstance_w(w_outvals, space.w_tuple): batch = space.listview(w_outvals) for i in range(len(batch)): - out_iters[i].setitem(out_states[i], res_dtype.coerce(space, batch[i])) + out_iters[i].setitem(out_states[i], out_dtypes[i].coerce(space, batch[i])) out_states[i] = out_iters[i].next(out_states[i]) - else: - out_iters[0].setitem(out_states[0], res_dtype.coerce(space, w_outvals)) + elif nout > 0: + out_iters[0].setitem(out_states[0], out_dtypes[0].coerce(space, w_outvals)) out_states[0] = out_iters[0].next(out_states[0]) for i in range(nin): in_states[i] = in_iters[i].next(in_states[i]) + test_iter.next(test_state) return space.newtuple([convert_to_array(space, o) for o in out_args]) setslice_driver = jit.JitDriver(name='numpy_setslice', diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -159,7 +159,7 @@ af2 = ufunc(af) assert all(af2 == af * 2) ac = arange(10, dtype=complex) - ac1 = ufunc(ac) + raises(TypeError, ufunc, ac) def test_frompyfunc_2d_sig(self): import sys @@ -267,6 +267,54 @@ assert out0.shape == in0.shape assert (out0 == in0 * 2).all() + def test_frompyfunc_casting(self): + import sys + import numpy as np + if '__pypy__' not in sys.builtin_module_names: + skip('PyPy only frompyfunc extension') + + def times2_int(in0, out0): + assert in0.dtype == int + assert out0.dtype == int + # hack to assing to a 0-dim array + out0.real = in0 * 2 + + def times2_complex(in0, out0): + assert in0.dtype == complex + assert out0.dtype == complex + out0.real = in0.real * 2 + out0.imag = in0.imag + + def times2_complex0(in0): + assert in0.dtype == complex + return in0 * 2 + + def times2_int0(in0): + assert in0.dtype == int + return in0 * 2 + + times2stacked = np.frompyfunc([times2_int, times2_complex], 1, 1, + dtypes=[np.dtype(int), np.dtype(int), + np.dtype(complex), np.dtype(complex)], + stack_inputs=True, signature='()->()', + ) + times2 = np.frompyfunc([times2_int0, times2_complex0], 1, 1, + dtypes=[np.dtype(int), np.dtype(int), + np.dtype(complex), np.dtype(complex)], + stack_inputs=False, + ) + for d in [np.dtype(float), np.dtype('uint8'), np.dtype('complex64')]: + in0 = np.arange(4, dtype=d) + out0 = times2stacked(in0) + assert out0.shape == in0.shape + assert out0.dtype in (int, complex) + assert (out0 == in0 * 2).all() + + out0 = times2(in0) + assert out0.shape == in0.shape + assert out0.dtype in (int, complex) + assert (out0 == in0 * 2).all() + def test_ufunc_kwargs(self): from numpy import ufunc, frompyfunc, arange, dtype def adder(a, b): diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -709,6 +709,32 @@ raise oefmt(space.w_TypeError, "ufunc '%s' not supported for the input types", self.name) +def _match_dtypes(space, indtypes, targetdtypes, i_target, casting): + allok = True + for i in range(len(indtypes)): + origin = indtypes[i] + target = targetdtypes[i + i_target] + if origin is None: + continue + if target is None: + continue + if not can_cast_type(space, origin, target, casting): + allok = False + break + return allok + +def _raise_err_msg(self, space, dtypes0, dtypes1): + dtypesstr = '' + for d in dtypes0: + if d is None: + dtypesstr += 'None,' + else: + dtypesstr += '%s%s%s,' % (d.byteorder, d.kind, d.elsize) + _dtypesstr = ','.join(['%s%s%s' % (d.byteorder, d.kind, d.elsize) \ + for d in dtypes1]) + raise oefmt(space.w_TypeError, + "input dtype [%s] did not match any known dtypes [%s] ", + dtypesstr,_dtypesstr) class W_UfuncGeneric(W_Ufunc): @@ -794,6 +820,7 @@ iter_shape, arg_shapes, matched_dims = self.verify_args(space, inargs, outargs) inargs, outargs, need_to_cast = self.alloc_args(space, inargs, outargs, dtypes, arg_shapes) + print 'call', self.external_loop, need_to_cast, dtypes, [a.get_dtype() for a in inargs], [a.get_dtype() for a in outargs] if not self.external_loop: inargs0 = inargs[0] outargs0 = outargs[0] @@ -802,26 +829,31 @@ res_dtype = outargs0.get_dtype() new_shape = inargs0.get_shape() # XXX use _find_array_wrap and wrap outargs using __array_wrap__ + if self.stack_inputs: + loop.call_many_to_many(space, new_shape, func, + dtypes, [], inargs + outargs, []) + if len(outargs) < 2: + return outargs[0] + return outargs if len(outargs) < 2: return loop.call_many_to_one(space, new_shape, func, - res_dtype, inargs, outargs[0]) + dtypes[:self.nin], dtypes[-1], inargs, outargs[0]) return loop.call_many_to_many(space, new_shape, func, - res_dtype, inargs, outargs) + dtypes[:self.nin], dtypes[self.nin:], inargs, outargs) + w_casting = space.w_None + w_op_dtypes = space.w_None for tf in need_to_cast: if tf: - raise oefmt(space.w_NotImplementedError, "casting not supported yet") + w_casting = space.wrap('safe') + w_op_dtypes = space.newtuple([space.wrap(d) for d in dtypes]) + w_flags = space.w_None # NOT 'external_loop', we do coalescing by core_num_dims - w_op_flags = space.newtuple([space.wrap(r) for r in ['readonly'] * len(inargs)] + \ - [space.wrap(r) for r in ['readwrite'] * len(outargs)]) - w_op_dtypes = space.w_None - w_casting = space.w_None + w_ro = space.newtuple([space.wrap('readonly'), space.wrap('copy')]) + w_rw = space.newtuple([space.wrap('readwrite'), space.wrap('updateifcopy')]) + + w_op_flags = space.newtuple([w_ro] * len(inargs) + [w_rw] * len(outargs)) w_op_axes = space.w_None - #print '\nsignature', sig - #print [(d, getattr(self,d)) for d in dir(self) if 'core' in d or 'broad' in d] - #print [(d, locals()[d]) for d in locals() if 'core' in d or 'broad' in d] - #print 'shapes',[d.get_shape() for d in inargs + outargs] - #print 'steps',[d.implementation.strides for d in inargs + outargs] if isinstance(func, W_GenericUFuncCaller): # Use GeneralizeUfunc interface with signature # Unlike numpy, we will not broadcast dims before @@ -934,19 +966,32 @@ # linear_search_type_resolver in numpy ufunc_type_resolutions.c # type_tup can be '', a tuple of dtypes, or a string # of the form d,t -> D where the letters are dtype specs - nop = len(inargs) + len(outargs) + + # XXX why does the next line not pass translation? + # dtypes = [i.get_dtype() for i in inargs] dtypes = [] + for i in inargs: + if isinstance(i, W_NDimArray): + dtypes.append(i.get_dtype()) + else: + dtypes.append(None) + for i in outargs: + if isinstance(i, W_NDimArray): + dtypes.append(i.get_dtype()) + else: + dtypes.append(None) if isinstance(type_tup, str) and len(type_tup) > 0: try: if len(type_tup) == 1: - dtypes = [get_dtype_cache(space).dtypes_by_name[type_tup]] * self.nargs + s_dtypes = [get_dtype_cache(space).dtypes_by_name[type_tup]] * self.nargs elif len(type_tup) == self.nargs + 2: + s_dtypes = [] for i in range(self.nin): - dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[i]]) + s_dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[i]]) #skip the '->' in the signature for i in range(self.nout): j = i + self.nin + 2 - dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[j]]) + s_dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[j]]) else: raise oefmt(space.w_TypeError, "a type-string for %s " \ "requires 1 typecode or %d typecode(s) before and %d" \ @@ -955,42 +1000,26 @@ except KeyError: raise oefmt(space.w_ValueError, "unknown typecode in" \ " call to %s with type-string '%s'", self.name, type_tup) - else: - # XXX why does the next line not pass translation? - # dtypes = [i.get_dtype() for i in inargs] - for i in inargs: - if isinstance(i, W_NDimArray): - dtypes.append(i.get_dtype()) - else: - dtypes.append(None) - for i in outargs: - if isinstance(i, W_NDimArray): - dtypes.append(i.get_dtype()) - else: - dtypes.append(None) + # Make sure args can be cast to dtypes + if not _match_dtypes(space, dtypes, s_dtypes, 0, "safe"): + _raise_err_msg(self, space, dtypes, s_dtypes) + dtypes = s_dtypes #Find the first matchup of dtypes with _dtypes for i in range(0, len(_dtypes), self.nargs): - allok = True - for j in range(self.nargs): - if dtypes[j] is not None and dtypes[j] != _dtypes[i+j]: - allok = False + allok = _match_dtypes(space, dtypes, _dtypes, i, "no") if allok: break else: - if len(self.funcs) > 1: - - dtypesstr = '' - for d in dtypes: - if d is None: - dtypesstr += 'None,' - else: - dtypesstr += '%s%s%s,' % (d.byteorder, d.kind, d.elsize) - _dtypesstr = ','.join(['%s%s%s' % (d.byteorder, d.kind, d.elsize) \ - for d in _dtypes]) - raise oefmt(space.w_TypeError, - "input dtype [%s] did not match any known dtypes [%s] ", - dtypesstr,_dtypesstr) - i = 0 + # No exact matches, can we cast? + for i in range(0, len(_dtypes), self.nargs): + allok = _match_dtypes(space, dtypes, _dtypes, i, "safe") + if allok: + dtypes = _dtypes[i:i+self.nargs] + break + else: + if len(self.funcs) > 1: + _raise_err_msg(self, space, dtypes, _dtypes) + i = 0 # Fill in empty dtypes for j in range(self.nargs): if dtypes[j] is None: From noreply at buildbot.pypy.org Tue Oct 13 17:12:09 2015 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 13 Oct 2015 17:12:09 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: typo Message-ID: <20151013151209.0BF991C103D@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: vecopt-merge Changeset: r80163:6c294a519052 Date: 2015-10-13 18:12 +0300 http://bitbucket.org/pypy/pypy/changeset/6c294a519052/ Log: typo diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -83,7 +83,7 @@ result = MODEL_X86 else: result = MODEL_X86_NO_SSE2 - if detect_feature.detect_x32_mode(): + if feature.detect_x32_mode(): raise ProcessorAutodetectError( 'JITting in x32 mode is not implemented') From noreply at buildbot.pypy.org Tue Oct 13 17:52:38 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Tue, 13 Oct 2015 17:52:38 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: extending the structure, added first test to check the assembly of int_add Message-ID: <20151013155238.157481C14AF@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80164:f086429ff834 Date: 2015-10-13 17:52 +0200 http://bitbucket.org/pypy/pypy/changeset/f086429ff834/ Log: extending the structure, added first test to check the assembly of int_add diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -115,7 +115,7 @@ elif backend_name == MODEL_ARM: return "rpython.jit.backend.arm.runner", "CPU_ARM" elif backend_name == MODEL_S390_64: - return "rpython.jit.backend.zarch.runner", "CPU_ZARCH" + return "rpython.jit.backend.zarch.runner", "CPU_S390_64" else: raise ProcessorAutodetectError, ( "we have no JIT backend for this cpu: '%s'" % backend_name) diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/assembler.py @@ -0,0 +1,6 @@ +from rpython.jit.backend.llsupport.assembler import GuardToken, BaseAssembler + +class AssemblerZARCH(BaseAssembler): + def emit_op_int_add(self, op): + pass + diff --git a/rpython/jit/backend/zarch/conditions.py b/rpython/jit/backend/zarch/conditions.py new file mode 100644 diff --git a/rpython/jit/backend/zarch/instruction_builder.py b/rpython/jit/backend/zarch/instruction_builder.py new file mode 100644 diff --git a/rpython/jit/backend/zarch/locations.py b/rpython/jit/backend/zarch/locations.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/locations.py @@ -0,0 +1,3 @@ + + +imm = None diff --git a/rpython/jit/backend/zarch/registers.py b/rpython/jit/backend/zarch/registers.py new file mode 100644 diff --git a/rpython/jit/backend/zarch/runner.py b/rpython/jit/backend/zarch/runner.py --- a/rpython/jit/backend/zarch/runner.py +++ b/rpython/jit/backend/zarch/runner.py @@ -3,5 +3,5 @@ class AbstractZARCHCPU(AbstractLLCPU): pass -class CPU_S390X(AbstractZARCHCPU): +class CPU_S390_64(AbstractZARCHCPU): pass diff --git a/rpython/jit/backend/zarch/test/conftest.py b/rpython/jit/backend/zarch/test/conftest.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/conftest.py @@ -0,0 +1,13 @@ +""" +This disables the backend tests on non zarch platforms. +Note that you need "--slow" to run translation tests. +""" +import py, os +from rpython.jit.backend import detect_cpu + +cpu = detect_cpu.autodetect() + +def pytest_collect_directory(path, parent): + if not cpu.startswith('s390x'): + py.test.skip("zarch tests skipped: cpu is %r" % (cpu,)) +pytest_collect_file = pytest_collect_directory diff --git a/rpython/jit/backend/zarch/test/support.py b/rpython/jit/backend/zarch/test/support.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/support.py @@ -0,0 +1,4 @@ + + +def run_asm(): + pass diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -0,0 +1,37 @@ +from rpython.jit.backend.zarch import conditions as c +from rpython.jit.backend.zarch import registers as r +from rpython.jit.backend.zarch.assembler import AssemblerZARCH +from rpython.jit.backend.zarch.locations import imm +from rpython.jit.backend.zarch.test.support import run_asm +from rpython.jit.backend.detect_cpu import getcpuclass +from rpython.jit.metainterp.resoperation import rop +from rpython.jit.codewriter import longlong + +from rpython.rtyper.annlowlevel import llhelper +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.jit.metainterp.history import JitCellToken +from rpython.jit.backend.model import CompiledLoopToken +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rtyper.annlowlevel import llhelper +from rpython.rlib.objectmodel import specialize +from rpython.rlib.debug import ll_assert + +CPU = getcpuclass() + + +class TestRunningAssembler(object): + def setup_method(self, method): + cpu = CPU(None, None) + self.a = AssemblerZARCH(cpu) + self.a.setup_once() + token = JitCellToken() + clt = CompiledLoopToken(cpu, 0) + clt.allgcrefs = [] + token.compiled_loop_token = clt + self.a.setup(token) + + def test_make_operation_list(self): + i = rop.INT_ADD + from rpython.jit.backend.zarch import assembler + assert assembler.asm_operations[i] \ + is AssemblerZARCH.emit_op_int_add.im_func From noreply at buildbot.pypy.org Tue Oct 13 18:02:06 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Tue, 13 Oct 2015 18:02:06 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: skipping test that require a cpu that has an implemented vector ISA Message-ID: <20151013160206.0F6801C1517@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80165:36a9712f2c6d Date: 2015-10-13 18:02 +0200 http://bitbucket.org/pypy/pypy/changeset/36a9712f2c6d/ Log: skipping test that require a cpu that has an implemented vector ISA diff --git a/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py b/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py --- a/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py @@ -13,6 +13,11 @@ from rpython.jit.metainterp.resoperation import rop, ResOperation, AbstractValue from rpython.jit.tool.oparser import parse as opparse from rpython.jit.tool.oparser_model import get_model +from rpython.jit.backend.detect_cpu import getcpuclass + +CPU = getcpuclass() +if not CPU.vector_extension: + py.test.skip("this cpu %s has no implemented vector backend" % CPU) class FakeMemoryRef(object): def __init__(self, array, iv): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_schedule.py b/rpython/jit/metainterp/optimizeopt/test/test_schedule.py --- a/rpython/jit/metainterp/optimizeopt/test/test_schedule.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_schedule.py @@ -15,6 +15,11 @@ from rpython.jit.metainterp.resoperation import rop, ResOperation from rpython.jit.tool.oparser import parse as opparse from rpython.jit.tool.oparser_model import get_model +from rpython.jit.backend.detect_cpu import getcpuclass + +CPU = getcpuclass() +if not CPU.vector_extension: + py.test.skip("this cpu %s has no implemented vector backend" % CPU) class FakeVecScheduleState(VecScheduleState): def __init__(self): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py @@ -24,6 +24,11 @@ from rpython.jit.backend.llsupport.descr import ArrayDescr from rpython.jit.metainterp.optimizeopt.dependency import Node, DependencyGraph from rpython.jit.tool.oparser import OpParser +from rpython.jit.backend.detect_cpu import getcpuclass + +CPU = getcpuclass() +if not CPU.vector_extension: + py.test.skip("this cpu %s has no implemented vector backend" % CPU) class FakeJitDriverStaticData(object): vec=True diff --git a/rpython/jit/metainterp/test/test_vector.py b/rpython/jit/metainterp/test/test_vector.py --- a/rpython/jit/metainterp/test/test_vector.py +++ b/rpython/jit/metainterp/test/test_vector.py @@ -13,6 +13,11 @@ from rpython.rlib.rawstorage import (alloc_raw_storage, raw_storage_setitem, free_raw_storage, raw_storage_getitem) from rpython.rlib.objectmodel import specialize, is_annotation_constant +from rpython.jit.backend.detect_cpu import getcpuclass + +CPU = getcpuclass() +if not CPU.vector_extension: + py.test.skip("this cpu %s has no implemented vector backend" % CPU) @specialize.argtype(0,1) def malloc(T,n): From noreply at buildbot.pypy.org Tue Oct 13 18:34:11 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 13 Oct 2015 18:34:11 +0200 (CEST) Subject: [pypy-commit] pypy no-class-specialize: Remove class specialisation Message-ID: <20151013163411.600B51C1214@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: no-class-specialize Changeset: r80166:14ab1bb7584d Date: 2015-10-13 16:54 +0100 http://bitbucket.org/pypy/pypy/changeset/14ab1bb7584d/ Log: Remove class specialisation diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -417,8 +417,7 @@ _detect_invalid_attrs = None def __init__(self, bookkeeper, cls, - name=None, basedesc=None, classdict=None, - specialize=None): + name=None, basedesc=None, classdict=None): super(ClassDesc, self).__init__(bookkeeper, cls) if '__NOT_RPYTHON__' in cls.__dict__: raise AnnotatorError('Bad class') @@ -430,9 +429,10 @@ if classdict is None: classdict = {} # populated below self.classdict = classdict # {attr: Constant-or-Desc} - if specialize is None: - specialize = cls.__dict__.get('_annspecialcase_', '') - self.specialize = specialize + if cls.__dict__.get('_annspecialcase_', ''): + raise AnnotatorError( + "Class specialization has been removed. The " + "'_annspecialcase_' class tag is now unsupported.") self._classdefs = {} if is_mixin(cls): @@ -516,16 +516,6 @@ # for debugging if not hasattr(value, 'class_'): value.class_ = self.pyobj - if self.specialize: - # make a custom funcdesc that specializes on its first - # argument (i.e. 'self'). - from rpython.annotator.specialize import specialize_argtype - def argtype0(funcdesc, args_s): - return specialize_argtype(funcdesc, args_s, 0) - funcdesc = FunctionDesc(self.bookkeeper, value, - specializer=argtype0) - self.classdict[name] = funcdesc - return if mixin: # make a new copy of the FunctionDesc for this class, # but don't specialize further for all subclasses @@ -618,26 +608,11 @@ return classdef def getuniqueclassdef(self): - if self.specialize: - raise Exception("not supported on class %r because it needs " - "specialization" % (self.name,)) return self.getclassdef(None) def pycall(self, whence, args, s_previous_result, op=None): from rpython.annotator.model import SomeInstance, SomeImpossibleValue - if self.specialize: - if self.specialize == 'specialize:ctr_location': - # We use the SomeInstance annotation returned the last time - # to make sure we use the same ClassDef this time. - if isinstance(s_previous_result, SomeInstance): - classdef = s_previous_result.classdef - else: - classdef = self.getclassdef(object()) - else: - raise Exception("unsupported specialization tag: %r" % ( - self.specialize,)) - else: - classdef = self.getuniqueclassdef() + classdef = self.getuniqueclassdef() s_instance = SomeInstance(classdef) # look up __init__ directly on the class, bypassing the normal # lookup mechanisms ClassDef (to avoid influencing Attribute placement) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -758,19 +758,6 @@ s = a.build_types(snippet.call_star_args_multiple, [int]) assert s.knowntype == int - def test_class_spec(self): - a = self.RPythonAnnotator(policy=AnnotatorPolicy()) - s = a.build_types(snippet.class_spec, []) - assert s.items[0].knowntype == int - assert s.items[1].knowntype == str - - def test_class_spec_confused(self): - x = snippet.PolyStk() - def f(): - return x - a = self.RPythonAnnotator(policy=AnnotatorPolicy()) - py.test.raises(Exception, a.build_types, f, []) - def test_exception_deduction_with_raise1(self): a = self.RPythonAnnotator() s = a.build_types(snippet.exception_deduction_with_raise1, [bool]) @@ -3298,8 +3285,8 @@ b.x = str(n) return len(b.x) + a.x a = self.RPythonAnnotator() - s = a.build_types(f, [int]) - assert isinstance(s, annmodel.SomeInteger) + with py.test.raises(annmodel.AnnotatorError): + s = a.build_types(f, [int]) def test_weakref(self): import weakref diff --git a/rpython/translator/test/snippet.py b/rpython/translator/test/snippet.py --- a/rpython/translator/test/snippet.py +++ b/rpython/translator/test/snippet.py @@ -1035,33 +1035,6 @@ raise Exception -# class specialization - -class PolyStk: - _annspecialcase_ = "specialize:ctr_location" - - def __init__(self): - self.itms = [] - - def push(self, v): - self.itms.append(v) - - def top(self): - return self.itms[-1] - - -def class_spec(): - istk = PolyStk() - istk.push(1) - sstk = PolyStk() - sstk.push("a") - istk.push(2) - sstk.push("b") - #if not isinstance(istk, PolyStk): - # return "confused" - return istk.top(), sstk.top() - - from rpython.rlib.rarithmetic import ovfcheck def add_func(i=numtype): From noreply at buildbot.pypy.org Tue Oct 13 18:34:13 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 13 Oct 2015 18:34:13 +0200 (CEST) Subject: [pypy-commit] pypy no-class-specialize: Store the now-unique classdef in a plain attribute rather than a length-1 dict Message-ID: <20151013163413.89B2A1C1214@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: no-class-specialize Changeset: r80167:519e2de59249 Date: 2015-10-13 17:30 +0100 http://bitbucket.org/pypy/pypy/changeset/519e2de59249/ Log: Store the now-unique classdef in a plain attribute rather than a length-1 dict diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -162,9 +162,7 @@ s_callable.consider_call_site(args, s_result, call_op) def getuniqueclassdef(self, cls): - """Get the ClassDef associated with the given user cls. - Avoid using this! It breaks for classes that must be specialized. - """ + """Get the ClassDef associated with the given user cls.""" assert cls is not object desc = self.getdesc(cls) return desc.getuniqueclassdef() diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -433,7 +433,7 @@ raise AnnotatorError( "Class specialization has been removed. The " "'_annspecialcase_' class tag is now unsupported.") - self._classdefs = {} + self.classdef = None if is_mixin(cls): raise AnnotatorError("cannot use directly the class %r because " @@ -573,17 +573,14 @@ for name, value in cls.__dict__.items(): self.add_source_attribute(name, value) - def getallclassdefs(self): - return self._classdefs.values() - def getclassdef(self, key): - try: - return self._classdefs[key] - except KeyError: + if self.classdef is not None: + return self.classdef + else: from rpython.annotator.classdef import ClassDef classdef = ClassDef(self.bookkeeper, self) self.bookkeeper.classdefs.append(classdef) - self._classdefs[key] = classdef + self.classdef = classdef # forced attributes cls = self.pyobj From noreply at buildbot.pypy.org Tue Oct 13 18:34:15 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 13 Oct 2015 18:34:15 +0200 (CEST) Subject: [pypy-commit] pypy no-class-specialize: Remove unnecessary else: Message-ID: <20151013163415.A8BB61C1214@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: no-class-specialize Changeset: r80168:17ca1e3d42c6 Date: 2015-10-13 17:33 +0100 http://bitbucket.org/pypy/pypy/changeset/17ca1e3d42c6/ Log: Remove unnecessary else: diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -576,33 +576,32 @@ def getclassdef(self, key): if self.classdef is not None: return self.classdef - else: - from rpython.annotator.classdef import ClassDef - classdef = ClassDef(self.bookkeeper, self) - self.bookkeeper.classdefs.append(classdef) - self.classdef = classdef + from rpython.annotator.classdef import ClassDef + classdef = ClassDef(self.bookkeeper, self) + self.bookkeeper.classdefs.append(classdef) + self.classdef = classdef - # forced attributes - cls = self.pyobj - if cls in FORCE_ATTRIBUTES_INTO_CLASSES: - for name, s_value in FORCE_ATTRIBUTES_INTO_CLASSES[cls].items(): - classdef.generalize_attr(name, s_value) - classdef.find_attribute(name).modified(classdef) + # forced attributes + cls = self.pyobj + if cls in FORCE_ATTRIBUTES_INTO_CLASSES: + for name, s_value in FORCE_ATTRIBUTES_INTO_CLASSES[cls].items(): + classdef.generalize_attr(name, s_value) + classdef.find_attribute(name).modified(classdef) - # register all class attributes as coming from this ClassDesc - # (as opposed to prebuilt instances) - classsources = {} - for attr in self.classdict: - classsources[attr] = self # comes from this ClassDesc - classdef.setup(classsources) - # look for a __del__ method and annotate it if it's there - if '__del__' in self.classdict: - from rpython.annotator.model import s_None, SomeInstance - s_func = self.s_read_attribute('__del__') - args_s = [SomeInstance(classdef)] - s = self.bookkeeper.emulate_pbc_call(classdef, s_func, args_s) - assert s_None.contains(s) - return classdef + # register all class attributes as coming from this ClassDesc + # (as opposed to prebuilt instances) + classsources = {} + for attr in self.classdict: + classsources[attr] = self # comes from this ClassDesc + classdef.setup(classsources) + # look for a __del__ method and annotate it if it's there + if '__del__' in self.classdict: + from rpython.annotator.model import s_None, SomeInstance + s_func = self.s_read_attribute('__del__') + args_s = [SomeInstance(classdef)] + s = self.bookkeeper.emulate_pbc_call(classdef, s_func, args_s) + assert s_None.contains(s) + return classdef def getuniqueclassdef(self): return self.getclassdef(None) From noreply at buildbot.pypy.org Tue Oct 13 19:05:24 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 13 Oct 2015 19:05:24 +0200 (CEST) Subject: [pypy-commit] pypy no-class-specialize: Extract classdef init into a separate method Message-ID: <20151013170524.E53081C13BE@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: no-class-specialize Changeset: r80169:fd655258c3c8 Date: 2015-10-13 18:05 +0100 http://bitbucket.org/pypy/pypy/changeset/fd655258c3c8/ Log: Extract classdef init into a separate method diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -574,8 +574,9 @@ self.add_source_attribute(name, value) def getclassdef(self, key): - if self.classdef is not None: - return self.classdef + return self.getuniqueclassdef() + + def _init_classdef(self): from rpython.annotator.classdef import ClassDef classdef = ClassDef(self.bookkeeper, self) self.bookkeeper.classdefs.append(classdef) @@ -604,7 +605,9 @@ return classdef def getuniqueclassdef(self): - return self.getclassdef(None) + if self.classdef is None: + self._init_classdef() + return self.classdef def pycall(self, whence, args, s_previous_result, op=None): from rpython.annotator.model import SomeInstance, SomeImpossibleValue From noreply at buildbot.pypy.org Tue Oct 13 19:37:00 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Oct 2015 19:37:00 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Trying to add specific GC support to speed up and fix cpyext's mappings Message-ID: <20151013173700.333101C147C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80170:f144e3ced550 Date: 2015-10-13 12:24 +0200 http://bitbucket.org/pypy/pypy/changeset/f144e3ced550/ Log: Trying to add specific GC support to speed up and fix cpyext's mappings between W_Root and PyObject. From noreply at buildbot.pypy.org Tue Oct 13 19:37:02 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Oct 2015 19:37:02 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Add discussion Message-ID: <20151013173702.5C9CD1C147C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80171:b0e71761ec30 Date: 2015-10-13 19:37 +0200 http://bitbucket.org/pypy/pypy/changeset/b0e71761ec30/ Log: Add discussion diff --git a/pypy/doc/discussion/rawrefcount.rst b/pypy/doc/discussion/rawrefcount.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/discussion/rawrefcount.rst @@ -0,0 +1,81 @@ +====================== +Rawrefcount and the GC +====================== + + +GC Interface +------------ + +"PyObject" is a raw structure with at least two fields, ob_refcnt and +ob_pypy_link. The ob_refcnt is the reference counter as used on +CPython. If the PyObject structure is linked to a live PyPy object, +its current address is stored in ob_pypy_link and ob_refcnt is bumped +by the constant REFCNT_FROM_PYPY_OBJECT. + +rawrefcount_create_link_from_pypy(p, ob) + + Makes a link between an exising object gcref 'p' and a newly + allocated PyObject structure 'ob'. Both must not be linked so far. + This adds REFCNT_FROM_PYPY_OBJECT to ob->ob_refcnt. + +rawrefcount_create_link_to_pypy(p, ob) + + Makes a link from an existing PyObject structure 'ob' to a newly + allocated W_CPyExtPlaceHolderObject 'p'. The 'p' should have a + back reference field pointing to 'ob'. This also adds + REFCNT_FROM_PYPY_OBJECT to ob->ob_refcnt. + +rawrefcount_from_obj(p) + + If there is a link from object 'p', and 'p' is not a + W_CPyExtPlaceHolderObject, returns the corresponding 'ob'. + Otherwise, returns NULL. + +rawrefcount_to_obj(ob) + + Returns ob->ob_pypy_link, cast to a GCREF. + + +Collection logic +---------------- + +Objects exising purely on the C side have ob->ob_from_pypy == NULL; +these are purely reference counted. On the other hand, if +ob->ob_from_pypy != NULL, then ob->ob_refcnt is at least +REFCNT_FROM_PYPY_OBJECT and the object is part of a "link". + +The idea is that links whose 'p' is not reachable from other PyPy +objects *and* whose 'ob->ob_refcnt' is REFCNT_FROM_PYPY_OBJECT are the +ones who die. But it is more messy because links created with +rawrefcount_create_link_to_pypy() need to have a deallocator called, +and this cannot occur immediately (and can do random things like +accessing other references this object points to, or resurrecting the +object). + +Let P = list of links created with rawrefcount_create_link_from_pypy() +and O = list of links created with rawrefcount_create_link_to_pypy(). +The PyPy objects in the list O are all W_CPyExtPlaceHolderObject: all +the data is in the PyObjects, and all references are regular +CPython-like reference counts. It is the opposite with the P links: +all references are regular PyPy references from the 'p' object, and +the 'ob' is trivial. + +So, after the collection we do this about P links: + + for (p, ob) in P: + if ob->ob_refcnt != REFCNT_FROM_PYPY_OBJECT: + mark 'p' as surviving, as well as all its dependencies + + for (p, ob) in P: + if p is not surviving: + unlink p and ob, free ob + +Afterwards, the O links are handled like this: + + for (p, ob) in O: + # p is trivial: it cannot point to other PyPy objects + if p is not surviving: + unlink p and ob + ob->ob_refcnt -= REFCNT_FROM_PYPY_OBJECT + if ob->ob_refcnt == 0: + invoke _Py_Dealloc(ob) later, outside the GC From noreply at buildbot.pypy.org Tue Oct 13 20:00:13 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Oct 2015 20:00:13 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: GC implementation Message-ID: <20151013180013.6E70E1C147C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80172:26ca7b912f8c Date: 2015-10-13 20:00 +0200 http://bitbucket.org/pypy/pypy/changeset/26ca7b912f8c/ Log: GC implementation diff --git a/pypy/doc/discussion/rawrefcount.rst b/pypy/doc/discussion/rawrefcount.rst --- a/pypy/doc/discussion/rawrefcount.rst +++ b/pypy/doc/discussion/rawrefcount.rst @@ -39,7 +39,7 @@ Collection logic ---------------- -Objects exising purely on the C side have ob->ob_from_pypy == NULL; +Objects existing purely on the C side have ob->ob_from_pypy == NULL; these are purely reference counted. On the other hand, if ob->ob_from_pypy != NULL, then ob->ob_refcnt is at least REFCNT_FROM_PYPY_OBJECT and the object is part of a "link". @@ -79,3 +79,20 @@ ob->ob_refcnt -= REFCNT_FROM_PYPY_OBJECT if ob->ob_refcnt == 0: invoke _Py_Dealloc(ob) later, outside the GC + + +GC Implementation +----------------- + +We need two P lists and two O lists, for young or old objects. All +four lists can actually be linked lists of 'ob', using yet another +field 'ob_pypy_next'; or they can be regular AddressLists (unsure +about the overhead of this extra field for all PyObjects -- even ones +not linked to PyPy objects). + +We also need an AddressDict mapping 'p' to 'ob' for all links in the P +list. This dict contains both young and old 'p'; we simply write a +new entry when the object moves. As a result it can contain some +extra garbage entries after some minor collections. It is cleaned up +by being rebuilt at the next major collection. We never walk all +items of that dict; we only walk the two explicit P lists. From noreply at buildbot.pypy.org Tue Oct 13 20:52:15 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 13 Oct 2015 20:52:15 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Further notes Message-ID: <20151013185215.6E0431C103D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80173:6bf5e57af9d4 Date: 2015-10-13 20:52 +0200 http://bitbucket.org/pypy/pypy/changeset/6bf5e57af9d4/ Log: Further notes diff --git a/pypy/doc/discussion/rawrefcount.rst b/pypy/doc/discussion/rawrefcount.rst --- a/pypy/doc/discussion/rawrefcount.rst +++ b/pypy/doc/discussion/rawrefcount.rst @@ -22,7 +22,7 @@ Makes a link from an existing PyObject structure 'ob' to a newly allocated W_CPyExtPlaceHolderObject 'p'. The 'p' should have a - back reference field pointing to 'ob'. This also adds + back-reference field pointing to 'ob'. This also adds REFCNT_FROM_PYPY_OBJECT to ob->ob_refcnt. rawrefcount_from_obj(p) @@ -96,3 +96,18 @@ extra garbage entries after some minor collections. It is cleaned up by being rebuilt at the next major collection. We never walk all items of that dict; we only walk the two explicit P lists. + + +Further notes +------------- + +For small immutable types like and , we can actually +create a PyIntObject as a complete copy of the W_IntObject whenever +asked, and not record any link. Is it cheaper? Unclear. + +A few special types need to be reflected both as PyPy objects and +PyObjects. For now we assume that these are large and mostly +immutable, like objects. They should be linked in the O list, +and we'll ignore the issues of deallocation ordering for them. (Also, +W_TypeObject can have a back-reference field like +W_CPyExtPlaceHolderObject.) From noreply at buildbot.pypy.org Tue Oct 13 21:54:38 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 13 Oct 2015 21:54:38 +0200 (CEST) Subject: [pypy-commit] pypy raises(Exception)-must-die: modernise syntax of some py.test.raises calls Message-ID: <20151013195438.7774A1C103D@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: raises(Exception)-must-die Changeset: r80174:33d415293c92 Date: 2015-10-13 20:00 +0100 http://bitbucket.org/pypy/pypy/changeset/33d415293c92/ Log: modernise syntax of some py.test.raises calls diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -769,7 +769,8 @@ def f(): return x a = self.RPythonAnnotator(policy=AnnotatorPolicy()) - py.test.raises(Exception, a.build_types, f, []) + with py.test.raises(Exception): + a.build_types(f, []) def test_exception_deduction_with_raise1(self): a = self.RPythonAnnotator() @@ -959,14 +960,16 @@ def f(): return large_constant a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, []) + with py.test.raises(Exception): + a.build_types(f, []) # if you want to get a r_uint, you have to be explicit about it def test_add_different_ints(self): def f(a, b): return a + b a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, [r_uint, int]) + with py.test.raises(Exception): + a.build_types(f, [r_uint, int]) def test_merge_different_ints(self): def f(a, b): @@ -976,7 +979,8 @@ c = b return c a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, [r_uint, int]) + with py.test.raises(Exception): + a.build_types(f, [r_uint, int]) def test_merge_ruint_zero(self): def f(a): @@ -2694,7 +2698,8 @@ return a.x # should explode here a = self.RPythonAnnotator() - e = py.test.raises(Exception, a.build_types, f, [int]) + with py.test.raises(Exception) as excinfo: + a.build_types(f, [int]) # this should explode on reading the attribute 'a.x', but it can # sometimes explode on 'self.x = x', which does not make much sense. # But it looks hard to fix in general: we don't know yet during 'a.x' @@ -2928,7 +2933,8 @@ s = a.build_types(fun, [s_nonneg, s_nonneg]) assert isinstance(s, annmodel.SomeInteger) assert not s.nonneg - py.test.raises(Exception, a.build_types, fun, [int, int]) + with py.test.raises(Exception): + a.build_types(fun, [int, int]) def test_sig_simpler(self): def fun(x, y): @@ -2940,7 +2946,8 @@ s = a.build_types(fun, [s_nonneg, s_nonneg]) assert isinstance(s, annmodel.SomeInteger) assert not s.nonneg - py.test.raises(Exception, a.build_types, fun, [int, int]) + with py.test.raises(Exception): + a.build_types(fun, [int, int]) def test_sig_lambda(self): def fun(x, y): @@ -2954,7 +2961,8 @@ s = a.build_types(fun, [int, s_nonneg]) assert isinstance(s, annmodel.SomeInteger) assert not s.nonneg - py.test.raises(Exception, a.build_types, fun, [s_nonneg, int]) + with py.test.raises(Exception): + a.build_types(fun, [s_nonneg, int]) def test_sig_bug(self): def g(x, y=5): @@ -3138,7 +3146,8 @@ return a.n() a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, fun, [bool]) + with py.test.raises(Exception): + a.build_types(fun, [bool]) def test_float_cmp(self): def fun(x, y): @@ -3243,7 +3252,8 @@ i.x = x a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, []) + with py.test.raises(Exception): + a.build_types(f, []) class M: @@ -3342,7 +3352,8 @@ if g(x, y): g(x, r_uint(y)) a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, [int, int]) + with py.test.raises(Exception): + a.build_types(f, [int, int]) def test_compare_with_zero(self): def g(): @@ -4102,7 +4113,8 @@ e = cls() e.foo = "bar" a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, fn, []) + with py.test.raises(Exception): + a.build_types(fn, []) def test_lower_char(self): def fn(c): diff --git a/rpython/rlib/test/test_rweakkeydict.py b/rpython/rlib/test/test_rweakkeydict.py --- a/rpython/rlib/test/test_rweakkeydict.py +++ b/rpython/rlib/test/test_rweakkeydict.py @@ -126,7 +126,9 @@ else: d = RWeakKeyDictionary(KY, VX) d.set(KX(), VX()) - py.test.raises(Exception, interpret, g, [1]) + + with py.test.raises(Exception): + interpret(g, [1]) def g(x): if x: @@ -134,7 +136,9 @@ else: d = RWeakKeyDictionary(KX, VY) d.set(KX(), VX()) - py.test.raises(Exception, interpret, g, [1]) + + with py.test.raises(Exception): + interpret(g, [1]) def test_rpython_free_values(): diff --git a/rpython/rlib/test/test_rweakvaldict.py b/rpython/rlib/test/test_rweakvaldict.py --- a/rpython/rlib/test/test_rweakvaldict.py +++ b/rpython/rlib/test/test_rweakvaldict.py @@ -143,7 +143,9 @@ else: d = RWeakValueDictionary(str, Y) d.set("x", X()) - py.test.raises(Exception, interpret, g, [1]) + + with py.test.raises(Exception): + interpret(g, [1]) def test_rpython_RWeakValueDictionary_or_None(): diff --git a/rpython/rtyper/test/test_extfunc.py b/rpython/rtyper/test/test_extfunc.py --- a/rpython/rtyper/test/test_extfunc.py +++ b/rpython/rtyper/test/test_extfunc.py @@ -152,7 +152,8 @@ a.translator.config.translation.check_str_without_nul=True def g(s): return os_open(s) - py.test.raises(Exception, a.build_types, g, [str]) + with py.test.raises(Exception): + a.build_types(g, [str]) a.build_types(g, [str0]) # Does not raise def test_list_of_str0(self): @@ -170,7 +171,8 @@ a.translator.config.translation.check_str_without_nul=True def g(l): return os_execve(l) - py.test.raises(Exception, a.build_types, g, [[str]]) + with py.test.raises(Exception): + a.build_types(g, [[str]]) a.build_types(g, [[str0]]) # Does not raise From noreply at buildbot.pypy.org Tue Oct 13 21:54:40 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 13 Oct 2015 21:54:40 +0200 (CEST) Subject: [pypy-commit] pypy raises(Exception)-must-die: make some tests and exceptions more precise Message-ID: <20151013195440.B6D601C103D@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: raises(Exception)-must-die Changeset: r80175:0b8a0b568238 Date: 2015-10-13 20:54 +0100 http://bitbucket.org/pypy/pypy/changeset/0b8a0b568238/ Log: make some tests and exceptions more precise diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -8,12 +8,12 @@ from contextlib import contextmanager from rpython.flowspace.model import Constant -from rpython.annotator.model import (SomeOrderedDict, - SomeString, SomeChar, SomeFloat, unionof, SomeInstance, SomeDict, - SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, +from rpython.annotator.model import ( + SomeOrderedDict, SomeString, SomeChar, SomeFloat, unionof, SomeInstance, + SomeDict, SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, s_None, s_ImpossibleValue, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, - SomeWeakRef, SomeByteArray, SomeConstantType, SomeProperty) + SomeWeakRef, SomeByteArray, SomeConstantType, SomeProperty, AnnotatorError) from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef @@ -225,7 +225,8 @@ x = int(x) result = SomeInteger(nonneg = x>=0) else: - raise Exception("seeing a prebuilt long (value %s)" % hex(x)) + # XXX: better error reporting? + raise ValueError("seeing a prebuilt long (value %s)" % hex(x)) elif issubclass(tp, str): # py.lib uses annotated str subclasses no_nul = not '\x00' in x if len(x) == 1: diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdef.py --- a/rpython/annotator/classdef.py +++ b/rpython/annotator/classdef.py @@ -112,14 +112,10 @@ for desc in s_newvalue.descriptions: if desc.selfclassdef is None: if homedef.classdesc.settled: - raise Exception("demoting method %s " - "to settled class %s not " - "allowed" % - (self.name, homedef) - ) - #self.bookkeeper.warning("demoting method %s " - # "to base class %s" % - # (self.name, homedef)) + raise AnnotatorError( + "demoting method %s to settled class " + "%s not allowed" % (self.name, homedef) + ) break # check for attributes forbidden by slots or _attrs_ diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -801,8 +801,9 @@ s_init = basedesc.s_read_attribute('__init__') parent_has_init = isinstance(s_init, SomePBC) if has_init and not parent_has_init: - raise Exception("some subclasses among %r declare __init__()," - " but not the common parent class" % (descs,)) + raise AnnotatorError( + "some subclasses among %r declare __init__()," + " but not the common parent class" % (descs,)) # make a PBC of MethodDescs, one for the __init__ of each class initdescs = [] for desc, classdef in zip(descs, classdefs): diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4,10 +4,12 @@ from rpython.conftest import option from rpython.annotator import model as annmodel +from rpython.annotator.model import AnnotatorError, UnionError from rpython.annotator.annrpython import RPythonAnnotator as _RPythonAnnotator +from rpython.annotator.classdef import NoSuchAttrError from rpython.translator.translator import graphof as tgraphof from rpython.annotator.policy import AnnotatorPolicy -from rpython.annotator.signature import Sig +from rpython.annotator.signature import Sig, SignatureError from rpython.annotator.listdef import ListDef, ListChangeUnallowed from rpython.annotator.dictdef import DictDef from rpython.flowspace.model import * @@ -213,7 +215,7 @@ def f(): return X().meth() a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) def test_methodcall1(self): a = self.RPythonAnnotator() @@ -360,7 +362,7 @@ def f(l): return g(*l) a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError): + with py.test.raises(AnnotatorError): a.build_types(f, [[int]]) def test_star_unpack_and_keywords(self): @@ -960,7 +962,7 @@ def f(): return large_constant a = self.RPythonAnnotator() - with py.test.raises(Exception): + with py.test.raises(ValueError): a.build_types(f, []) # if you want to get a r_uint, you have to be explicit about it @@ -968,7 +970,7 @@ def f(a, b): return a + b a = self.RPythonAnnotator() - with py.test.raises(Exception): + with py.test.raises(UnionError): a.build_types(f, [r_uint, int]) def test_merge_different_ints(self): @@ -979,7 +981,7 @@ c = b return c a = self.RPythonAnnotator() - with py.test.raises(Exception): + with py.test.raises(UnionError): a.build_types(f, [r_uint, int]) def test_merge_ruint_zero(self): @@ -2616,14 +2618,14 @@ def f(): return A() a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) # class B(object): pass x = B() def g(): return isinstance(x, A) - py.test.raises(annmodel.AnnotatorError, a.build_types, g, []) + py.test.raises(AnnotatorError, a.build_types, g, []) def test_import_from_mixin(self): class M(object): @@ -2698,7 +2700,7 @@ return a.x # should explode here a = self.RPythonAnnotator() - with py.test.raises(Exception) as excinfo: + with py.test.raises(NoSuchAttrError) as excinfo: a.build_types(f, [int]) # this should explode on reading the attribute 'a.x', but it can # sometimes explode on 'self.x = x', which does not make much sense. @@ -2933,7 +2935,7 @@ s = a.build_types(fun, [s_nonneg, s_nonneg]) assert isinstance(s, annmodel.SomeInteger) assert not s.nonneg - with py.test.raises(Exception): + with py.test.raises(SignatureError): a.build_types(fun, [int, int]) def test_sig_simpler(self): @@ -2946,7 +2948,7 @@ s = a.build_types(fun, [s_nonneg, s_nonneg]) assert isinstance(s, annmodel.SomeInteger) assert not s.nonneg - with py.test.raises(Exception): + with py.test.raises(SignatureError): a.build_types(fun, [int, int]) def test_sig_lambda(self): @@ -2961,7 +2963,7 @@ s = a.build_types(fun, [int, s_nonneg]) assert isinstance(s, annmodel.SomeInteger) assert not s.nonneg - with py.test.raises(Exception): + with py.test.raises(SignatureError): a.build_types(fun, [s_nonneg, int]) def test_sig_bug(self): @@ -3012,8 +3014,8 @@ if works: a.build_types(fun, [int]) else: - from rpython.annotator.classdef import NoSuchAttrError - py.test.raises(NoSuchAttrError, a.build_types, fun, [int]) + with py.test.raises(NoSuchAttrError): + a.build_types(fun, [int]) def test_slots_enforce_attrs(self): class Superbase(object): @@ -3146,7 +3148,7 @@ return a.n() a = self.RPythonAnnotator() - with py.test.raises(Exception): + with py.test.raises(AnnotatorError): a.build_types(fun, [bool]) def test_float_cmp(self): @@ -3236,6 +3238,7 @@ assert isinstance(s.items[2], annmodel.SomeInstance) assert s.items[2].flags == {} + @py.test.mark.xfail def test_no_access_directly_on_heap(self): from rpython.rlib.jit import hint @@ -3252,7 +3255,7 @@ i.x = x a = self.RPythonAnnotator() - with py.test.raises(Exception): + with py.test.raises(AnnotatorError): a.build_types(f, []) @@ -3277,7 +3280,7 @@ c.m.l.append(x) a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) def f(): x = A() @@ -3285,7 +3288,7 @@ c.m.d[None] = x a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) def f(): x = A() @@ -3293,7 +3296,7 @@ c.m.d[x] = None a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) def test_ctr_location(self): class A: @@ -3352,7 +3355,7 @@ if g(x, y): g(x, r_uint(y)) a = self.RPythonAnnotator() - with py.test.raises(Exception): + with py.test.raises(UnionError): a.build_types(f, [int, int]) def test_compare_with_zero(self): @@ -3475,22 +3478,22 @@ return '%s' % unichr(x) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + py.test.raises(AnnotatorError, a.build_types, f, [int]) def f(x): return '%s' % (unichr(x) * 3) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + py.test.raises(AnnotatorError, a.build_types, f, [int]) def f(x): return '%s%s' % (1, unichr(x)) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + py.test.raises(AnnotatorError, a.build_types, f, [int]) def f(x): return '%s%s' % (1, unichr(x) * 15) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + py.test.raises(AnnotatorError, a.build_types, f, [int]) def test_strformatting_tuple(self): @@ -3528,7 +3531,7 @@ return [1, 2, 3][s:e] a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") + py.test.raises(AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) def f(x): @@ -3541,20 +3544,20 @@ return "xyz".find("x", s, e) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") + py.test.raises(AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) def f(s, e): return "xyz".rfind("x", s, e) - py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") + py.test.raises(AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) def f(s, e): return "xyz".count("x", s, e) - py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") + py.test.raises(AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) @@ -3728,7 +3731,8 @@ raise Exception(lle) # ^^^ instead, must cast back from a base ptr to an instance a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + with py.test.raises(AssertionError): + a.build_types(f, []) def test_enumerate(self): def f(): @@ -4113,7 +4117,7 @@ e = cls() e.foo = "bar" a = self.RPythonAnnotator() - with py.test.raises(Exception): + with py.test.raises(NoSuchAttrError): a.build_types(fn, []) def test_lower_char(self): @@ -4226,7 +4230,7 @@ return "bbb" a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) the_exc = exc.value @@ -4242,7 +4246,7 @@ return (1, 2) a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) assert "RPython cannot unify tuples of different length: 2 versus 1" in exc.value.msg @@ -4255,7 +4259,7 @@ return -1 a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) assert ("RPython cannot prove that these integers are of the " @@ -4272,7 +4276,7 @@ return B() a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) assert ("RPython cannot unify instances with no common base class" @@ -4288,7 +4292,7 @@ return d.itervalues() a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) assert ("RPython cannot unify incompatible iterator variants" in @@ -4300,7 +4304,7 @@ a = A() return getattr(a, y) a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError) as exc: + with py.test.raises(AnnotatorError) as exc: a.build_types(f, [str]) assert ("variable argument to getattr" in exc.value.msg) @@ -4308,7 +4312,7 @@ def f(x): return x() a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError) as exc: + with py.test.raises(AnnotatorError) as exc: a.build_types(f, [str]) assert ("Cannot prove that the object is callable" in exc.value.msg) @@ -4317,7 +4321,7 @@ def f(x): l.append(x) a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as excinfo: + with py.test.raises(UnionError) as excinfo: a.build_types(f, [int]) assert 'Happened at file' in excinfo.value.source assert 'Known variable annotations:' in excinfo.value.source @@ -4326,7 +4330,7 @@ def f(s, x): return s.format(x) a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError) as exc: + with py.test.raises(AnnotatorError) as exc: a.build_types(f, [str, str]) assert ("format() is not RPython" in exc.value.msg) @@ -4362,7 +4366,7 @@ def f(x): a, b = x a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, + py.test.raises(AnnotatorError, a.build_types, f, [annmodel.s_None]) def test_class___name__(self): @@ -4476,10 +4480,10 @@ o = O2(n) o.x = 20 a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f1, [int]) a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f2, [int]) def test_property_union_2(self): @@ -4508,7 +4512,7 @@ a = self.RPythonAnnotator() # Ideally, this should translate to something sensible, # but for now, AnnotatorError is better than silently mistranslating. - with py.test.raises(annmodel.AnnotatorError): + with py.test.raises(AnnotatorError): a.build_types(f, [int]) def test_property_union_3(self): @@ -4528,7 +4532,7 @@ obj = B() return obj.x a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError): + with py.test.raises(AnnotatorError): a.build_types(f, [int]) def test_dict_can_be_none_ordering_issue(self): diff --git a/rpython/annotator/test/test_annsimplifyrpython.py b/rpython/annotator/test/test_annsimplifyrpython.py --- a/rpython/annotator/test/test_annsimplifyrpython.py +++ b/rpython/annotator/test/test_annsimplifyrpython.py @@ -3,6 +3,7 @@ from rpython.annotator.test.test_annrpython import graphof from rpython.annotator.test.test_annrpython import TestAnnotateTestCase as parent +from rpython.annotator.model import AnnotatorError class TestAnnotateAndSimplifyTestCase(parent): @@ -132,5 +133,5 @@ cls = C return cls().foo a = self.RPythonAnnotator() - with py.test.raises(Exception): + with py.test.raises(AnnotatorError): a.build_types(f, [int]) From noreply at buildbot.pypy.org Tue Oct 13 23:44:36 2015 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 13 Oct 2015 23:44:36 +0200 (CEST) Subject: [pypy-commit] cffi default: remove _hack_at_distutils which imports setuptools on win32, it has too many side effects. Message-ID: <20151013214436.900C71C1464@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r2336:51f1337c9b4c Date: 2015-10-11 23:10 +0300 http://bitbucket.org/cffi/cffi/changeset/51f1337c9b4c/ Log: remove _hack_at_distutils which imports setuptools on win32, it has too many side effects. Not needed since 1.0 since developers can distribute binary packages, even on win32 diff --git a/cffi/ffiplatform.py b/cffi/ffiplatform.py --- a/cffi/ffiplatform.py +++ b/cffi/ffiplatform.py @@ -14,17 +14,7 @@ LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs', 'extra_objects', 'depends'] -def _hack_at_distutils(): - # Windows-only workaround for some configurations: see - # https://bugs.python.org/issue23246 (Python 2.7.9) - if sys.platform == "win32": - try: - import setuptools # for side-effects, patches distutils - except ImportError: - pass - def get_extension(srcfilename, modname, sources=(), **kwds): - _hack_at_distutils() # *before* the following import from distutils.core import Extension allsources = [srcfilename] allsources.extend(sources) @@ -47,7 +37,6 @@ def _build(tmpdir, ext): # XXX compact but horrible :-( - _hack_at_distutils() from distutils.core import Distribution import distutils.errors # From noreply at buildbot.pypy.org Tue Oct 13 23:44:38 2015 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 13 Oct 2015 23:44:38 +0200 (CEST) Subject: [pypy-commit] cffi default: document hack removal, leave hack in place for backward compatability with verify() Message-ID: <20151013214438.8F12F1C1464@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r2337:46f1c764af88 Date: 2015-10-13 23:59 +0300 http://bitbucket.org/cffi/cffi/changeset/46f1c764af88/ Log: document hack removal, leave hack in place for backward compatability with verify() diff --git a/cffi/verifier.py b/cffi/verifier.py --- a/cffi/verifier.py +++ b/cffi/verifier.py @@ -22,6 +22,15 @@ s = s.encode('ascii') super(NativeIO, self).write(s) +def _hack_at_distutils(): + # Windows-only workaround for some configurations: see + # https://bugs.python.org/issue23246 (Python 2.7.9) + if sys.platform == "win32": + try: + import setuptools # for side-effects, patches distutils + except ImportError: + pass + class Verifier(object): @@ -112,6 +121,7 @@ return basename def get_extension(self): + _hack_at_distutils() # backward compatibility hack if not self._has_source: with self.ffi._lock: if not self._has_source: diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -137,6 +137,10 @@ The recommended C compiler compatible with Python 2.7 is this one: http://www.microsoft.com/en-us/download/details.aspx?id=44266 +There is a known problem with distutils on Python 2.7.9, as +explained in https://bugs.python.org/issue23246, and the same +problem applies whenever you want to run compile() to build a dll. +``import setuptools`` might help, but YMMV For Python 3.4 and beyond: https://www.visualstudio.com/en-us/downloads/visual-studio-2015-ctp-vs @@ -146,7 +150,8 @@ ++++++++++ Win64 received very basic testing and we applied a few essential -fixes in cffi 0.7. Please report any other issue. +fixes in cffi 0.7. The comment above applies for Python 2.7 on +Windows 64 as well. Please report any other issue. Note as usual that this is only about running the 64-bit version of Python on the 64-bit OS. If you're running the 32-bit version (the From noreply at buildbot.pypy.org Tue Oct 13 23:44:40 2015 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 13 Oct 2015 23:44:40 +0200 (CEST) Subject: [pypy-commit] cffi default: try to be more accurate Message-ID: <20151013214440.877291C1464@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r2338:8628e3d133c1 Date: 2015-10-14 00:06 +0300 http://bitbucket.org/cffi/cffi/changeset/8628e3d133c1/ Log: try to be more accurate diff --git a/cffi/verifier.py b/cffi/verifier.py --- a/cffi/verifier.py +++ b/cffi/verifier.py @@ -24,7 +24,8 @@ def _hack_at_distutils(): # Windows-only workaround for some configurations: see - # https://bugs.python.org/issue23246 (Python 2.7.9) + # https://bugs.python.org/issue23246 (Python 2.7 with + # a specific MS compiler suite download) if sys.platform == "win32": try: import setuptools # for side-effects, patches distutils diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -137,9 +137,10 @@ The recommended C compiler compatible with Python 2.7 is this one: http://www.microsoft.com/en-us/download/details.aspx?id=44266 -There is a known problem with distutils on Python 2.7.9, as +There is a known problem with distutils on Python 2.7, as explained in https://bugs.python.org/issue23246, and the same -problem applies whenever you want to run compile() to build a dll. +problem applies whenever you want to run compile() to build a dll with +this specific compiler suite download. ``import setuptools`` might help, but YMMV For Python 3.4 and beyond: From noreply at buildbot.pypy.org Tue Oct 13 23:57:42 2015 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 13 Oct 2015 23:57:42 +0200 (CEST) Subject: [pypy-commit] pypy ufunc-casting: cleanup, fix translation Message-ID: <20151013215742.98AC31C147C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufunc-casting Changeset: r80176:23e41a855538 Date: 2015-10-13 21:35 +0300 http://bitbucket.org/pypy/pypy/changeset/23e41a855538/ Log: cleanup, fix translation diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -139,10 +139,10 @@ test_iter, test_state = out_iters[0], out_states[0] while not test_iter.done(test_state): call_many_to_many_driver.jit_merge_point(shapelen=shapelen, func=func, - in_dtypes=in_dtypes, out_dtypes=out_dtypes, nin=nin, nout=nout) + in_dtypes=in_dtypes, out_dtypes=out_dtypes, + nin=nin, nout=nout) for i in range(nin): vals[i] = in_dtypes[i].coerce(space, in_iters[i].getitem(in_states[i])) - print 'vals', vals w_arglist = space.newlist(vals) w_outvals = space.call_args(func, Arguments.frompacked(space, w_arglist)) # w_outvals should be a tuple, but func can return a single value as well @@ -156,7 +156,7 @@ out_states[0] = out_iters[0].next(out_states[0]) for i in range(nin): in_states[i] = in_iters[i].next(in_states[i]) - test_iter.next(test_state) + test_state = test_iter.next(test_state) return space.newtuple([convert_to_array(space, o) for o in out_args]) setslice_driver = jit.JitDriver(name='numpy_setslice', diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -820,12 +820,13 @@ iter_shape, arg_shapes, matched_dims = self.verify_args(space, inargs, outargs) inargs, outargs, need_to_cast = self.alloc_args(space, inargs, outargs, dtypes, arg_shapes) - print 'call', self.external_loop, need_to_cast, dtypes, [a.get_dtype() for a in inargs], [a.get_dtype() for a in outargs] if not self.external_loop: inargs0 = inargs[0] outargs0 = outargs[0] assert isinstance(inargs0, W_NDimArray) assert isinstance(outargs0, W_NDimArray) + nin = self.nin + assert nin >= 0 res_dtype = outargs0.get_dtype() new_shape = inargs0.get_shape() # XXX use _find_array_wrap and wrap outargs using __array_wrap__ @@ -834,12 +835,12 @@ dtypes, [], inargs + outargs, []) if len(outargs) < 2: return outargs[0] - return outargs + return space.newtuple(outargs) if len(outargs) < 2: return loop.call_many_to_one(space, new_shape, func, - dtypes[:self.nin], dtypes[-1], inargs, outargs[0]) + dtypes[:nin], dtypes[-1], inargs, outargs[0]) return loop.call_many_to_many(space, new_shape, func, - dtypes[:self.nin], dtypes[self.nin:], inargs, outargs) + dtypes[:nin], dtypes[nin:], inargs, outargs) w_casting = space.w_None w_op_dtypes = space.w_None for tf in need_to_cast: @@ -1014,7 +1015,10 @@ for i in range(0, len(_dtypes), self.nargs): allok = _match_dtypes(space, dtypes, _dtypes, i, "safe") if allok: - dtypes = _dtypes[i:i+self.nargs] + end = i + self.nargs + assert i >= 0 + assert end >=0 + dtypes = _dtypes[i:end] break else: if len(self.funcs) > 1: @@ -1152,7 +1156,7 @@ # the current op (signalling it can handle ndarray's). # TODO parse and handle subok - # TODO handle flags, op_flags + # TODO handle more flags, op_flags #print 'iter_shape',iter_shape,'arg_shapes',arg_shapes,'matched_dims',matched_dims return iter_shape, arg_shapes, matched_dims From noreply at buildbot.pypy.org Tue Oct 13 23:57:44 2015 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 13 Oct 2015 23:57:44 +0200 (CEST) Subject: [pypy-commit] pypy ufunc-casting: test, fix divide-by-zero Message-ID: <20151013215744.A62B51C147C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufunc-casting Changeset: r80177:498bdeec8923 Date: 2015-10-13 23:48 +0300 http://bitbucket.org/pypy/pypy/changeset/498bdeec8923/ Log: test, fix divide-by-zero diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -198,6 +198,10 @@ ai2 = ufunc(aiV) assert (ai2 == aiV * 2).all() + ai = arange(0).reshape(0, 1, 1) + ao = ufunc(ai) + assert ao.shape == (0, 1, 1) + def test_frompyfunc_needs_nditer(self): import sys from numpy import frompyfunc, dtype, arange diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -1119,7 +1119,7 @@ for j in range(offset, len(iter_shape)): x = iter_shape[j + offset] y = dims_to_broadcast[j] - if (x > y and x % y) or y %x: + if y != 0 and x != 0 and ((x > y and x % y) or y %x): raise oefmt(space.w_ValueError, "%s: %s operand %d has a " "mismatch in its broadcast dimension %d " "(size %d is different from %d)", From noreply at buildbot.pypy.org Tue Oct 13 23:57:48 2015 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 13 Oct 2015 23:57:48 +0200 (CEST) Subject: [pypy-commit] pypy ufunc-casting: merge default into branch Message-ID: <20151013215748.4908E1C147C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufunc-casting Changeset: r80178:19f479a3781d Date: 2015-10-13 23:48 +0300 http://bitbucket.org/pypy/pypy/changeset/19f479a3781d/ Log: merge default into branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -68,3 +68,9 @@ .. branch: type_system-cleanup Remove some remnants of the old ootypesystem vs lltypesystem dichotomy. + +.. branch: cffi-handle-lifetime + +ffi.new_handle() returns handles that work more like CPython's: they +remain valid as long as the target exists (unlike the previous +version, where handles become invalid *before* the __del__ is called). diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -1,14 +1,14 @@ """ Callbacks. """ -import sys, os +import sys, os, py -from rpython.rlib import clibffi, jit, jit_libffi +from rpython.rlib import clibffi, jit, jit_libffi, rgc, objectmodel from rpython.rlib.objectmodel import keepalive_until_here -from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from pypy.interpreter.error import OperationError, oefmt -from pypy.module._cffi_backend import cerrno, misc, handle +from pypy.module._cffi_backend import cerrno, misc from pypy.module._cffi_backend.cdataobj import W_CData from pypy.module._cffi_backend.ctypefunc import SIZE_OF_FFI_ARG, W_CTypeFunc from pypy.module._cffi_backend.ctypeprim import W_CTypePrimitiveSigned @@ -19,6 +19,23 @@ # ____________________________________________________________ + at jit.dont_look_inside +def make_callback(space, ctype, w_callable, w_error, w_onerror): + # Allocate a callback as a nonmovable W_CDataCallback instance, which + # we can cast to a plain VOIDP. As long as the object is not freed, + # we can cast the VOIDP back to a W_CDataCallback in reveal_callback(). + cdata = objectmodel.instantiate(W_CDataCallback, nonmovable=True) + gcref = rgc.cast_instance_to_gcref(cdata) + raw_cdata = rgc.hide_nonmovable_gcref(gcref) + cdata.__init__(space, ctype, w_callable, w_error, w_onerror, raw_cdata) + return cdata + +def reveal_callback(raw_ptr): + addr = rffi.cast(llmemory.Address, raw_ptr) + gcref = rgc.reveal_gcref(addr) + return rgc.try_cast_gcref_to_instance(W_CDataCallback, gcref) + + class Closure(object): """This small class is here to have a __del__ outside any cycle.""" @@ -37,7 +54,8 @@ _immutable_fields_ = ['key_pycode'] w_onerror = None - def __init__(self, space, ctype, w_callable, w_error, w_onerror): + def __init__(self, space, ctype, w_callable, w_error, w_onerror, + raw_cdata): raw_closure = rffi.cast(rffi.CCHARP, clibffi.closureHeap.alloc()) self._closure = Closure(raw_closure) W_CData.__init__(self, space, raw_closure, ctype) @@ -72,8 +90,6 @@ from pypy.module.thread.os_thread import setup_threads setup_threads(space) # - handle_index = handle.get_handles(space).reserve_next_handle_index() - # cif_descr = self.getfunctype().cif_descr if not cif_descr: raise oefmt(space.w_NotImplementedError, @@ -81,16 +97,13 @@ "return type or with '...'", self.getfunctype().name) with self as ptr: closure_ptr = rffi.cast(clibffi.FFI_CLOSUREP, ptr) - unique_id = rffi.cast(rffi.VOIDP, handle_index) + unique_id = rffi.cast(rffi.VOIDP, raw_cdata) res = clibffi.c_ffi_prep_closure(closure_ptr, cif_descr.cif, invoke_callback, unique_id) if rffi.cast(lltype.Signed, res) != clibffi.FFI_OK: raise OperationError(space.w_SystemError, space.wrap("libffi failed to build this callback")) - # - _current_space.space = space - handle.get_handles(space).store_handle(handle_index, self) def _repr_extra(self): space = self.space @@ -221,12 +234,6 @@ except OperationError, e: _handle_applevel_exception(callback, e, ll_res, extra_line) -class CurrentSpace: - def _cleanup_(self): - if hasattr(self, 'space'): - del self.space -_current_space = CurrentSpace() - def _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): """ Callback specification. ffi_cif - something ffi specific, don't care @@ -236,10 +243,8 @@ (what the real callback is for example), casted to VOIDP """ ll_res = rffi.cast(rffi.CCHARP, ll_res) - unique_id = rffi.cast(lltype.Signed, ll_userdata) - space = _current_space.space - callback = handle.get_handles(space).fetch_handle(unique_id) - if callback is None or not isinstance(callback, W_CDataCallback): + callback = reveal_callback(ll_userdata) + if callback is None: # oups! try: os.write(STDERR, "SystemError: invoking a callback " @@ -251,6 +256,7 @@ misc._raw_memclear(ll_res, SIZE_OF_FFI_ARG) return # + space = callback.space must_leave = False try: must_leave = space.threadlocals.try_enter_thread(space) diff --git a/pypy/module/_cffi_backend/ffi_obj.py b/pypy/module/_cffi_backend/ffi_obj.py --- a/pypy/module/_cffi_backend/ffi_obj.py +++ b/pypy/module/_cffi_backend/ffi_obj.py @@ -294,9 +294,9 @@ CONSIDER_FN_AS_FNPTR) space = self.space if not space.is_none(w_python_callable): - return ccallback.W_CDataCallback(space, w_ctype, - w_python_callable, w_error, - w_onerror) + return ccallback.make_callback(space, w_ctype, + w_python_callable, w_error, + w_onerror) else: # decorator mode: returns a single-argument function return space.appexec([w_ctype, w_error, w_onerror], diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -24,8 +24,8 @@ @unwrap_spec(w_ctype=ctypeobj.W_CType) def callback(space, w_ctype, w_callable, w_error=None, w_onerror=None): - from pypy.module._cffi_backend.ccallback import W_CDataCallback - return W_CDataCallback(space, w_ctype, w_callable, w_error, w_onerror) + from pypy.module._cffi_backend.ccallback import make_callback + return make_callback(space, w_ctype, w_callable, w_error, w_onerror) # ____________________________________________________________ diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -1,24 +1,24 @@ +import py from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter.baseobjspace import W_Root from pypy.module._cffi_backend import ctypeobj, ctypeptr, cdataobj -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib import rweaklist - - -class CffiHandles(rweaklist.RWeakListMixin): - def __init__(self, space): - self.initialize() - -def get_handles(space): - return space.fromcache(CffiHandles) +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rlib import rgc, objectmodel, jit # ____________________________________________________________ + at jit.dont_look_inside def _newp_handle(space, w_ctype, w_x): - index = get_handles(space).reserve_next_handle_index() - _cdata = rffi.cast(rffi.CCHARP, index + 1) - new_cdataobj = cdataobj.W_CDataHandle(space, _cdata, w_ctype, w_x) - get_handles(space).store_handle(index, new_cdataobj) + # Allocate a handle as a nonmovable W_CDataHandle instance, which + # we can cast to a plain CCHARP. As long as the object is not freed, + # we can cast the CCHARP back to a W_CDataHandle with reveal_gcref(). + new_cdataobj = objectmodel.instantiate(cdataobj.W_CDataHandle, + nonmovable=True) + gcref = rgc.cast_instance_to_gcref(new_cdataobj) + _cdata = rgc.hide_nonmovable_gcref(gcref) + _cdata = rffi.cast(rffi.CCHARP, _cdata) + cdataobj.W_CDataHandle.__init__(new_cdataobj, space, _cdata, w_ctype, w_x) return new_cdataobj @unwrap_spec(w_ctype=ctypeobj.W_CType) @@ -38,14 +38,17 @@ "expected a 'cdata' object with a 'void *' out of " "new_handle(), got '%s'", ctype.name) with w_cdata as ptr: - index = rffi.cast(lltype.Signed, ptr) - original_cdataobj = get_handles(space).fetch_handle(index - 1) - # - if isinstance(original_cdataobj, cdataobj.W_CDataHandle): - return original_cdataobj.w_keepalive - else: - if index == 0: - msg = "cannot use from_handle() on NULL pointer" - else: - msg = "'void *' value does not correspond to any object" - raise OperationError(space.w_RuntimeError, space.wrap(msg)) + return _reveal(space, ptr) + + at jit.dont_look_inside +def _reveal(space, ptr): + addr = rffi.cast(llmemory.Address, ptr) + gcref = rgc.reveal_gcref(addr) + if not gcref: + raise oefmt(space.w_RuntimeError, + "cannot use from_handle() on NULL pointer") + cd = rgc.try_cast_gcref_to_instance(cdataobj.W_CDataHandle, gcref) + if cd is None: + raise oefmt(space.w_SystemError, + "ffi.from_handle(): dead or bogus object handle") + return cd.w_keepalive diff --git a/pypy/module/_cffi_backend/test/test_handle.py b/pypy/module/_cffi_backend/test/test_handle.py deleted file mode 100644 --- a/pypy/module/_cffi_backend/test/test_handle.py +++ /dev/null @@ -1,44 +0,0 @@ -import random -from pypy.module._cffi_backend.handle import CffiHandles - - -class PseudoWeakRef(object): - _content = 42 - - def __call__(self): - return self._content - - -def test_cffi_handles_1(): - ch = CffiHandles(None) - expected_content = {} - for i in range(10000): - index = ch.reserve_next_handle_index() - assert 0 <= index < len(ch.handles) - assert ch.handles[index]() is None - pwr = PseudoWeakRef() - expected_content[index] = pwr - ch.handles[index] = pwr - assert len(ch.handles) <= 16384 - for index, pwr in expected_content.items(): - assert ch.handles[index] is pwr - -def test_cffi_handles_2(): - ch = CffiHandles(None) - expected_content = {} - for i in range(10000): - index = ch.reserve_next_handle_index() - assert 0 <= index < len(ch.handles) - assert ch.handles[index]() is None - pwr = PseudoWeakRef() - expected_content[index] = pwr - ch.handles[index] = pwr - # - if len(expected_content) > 20: - r = random.choice(list(expected_content)) - pwr = expected_content.pop(r) - pwr._content = None - # - assert len(ch.handles) < 100 - for index, pwr in expected_content.items(): - assert ch.handles[index] is pwr diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -290,7 +290,7 @@ return SomeInteger(knowntype=rpython.rlib.rarithmetic.r_longlong) @analyzer_for(rpython.rlib.objectmodel.instantiate) -def robjmodel_instantiate(s_clspbc): +def robjmodel_instantiate(s_clspbc, s_nonmovable=None): assert isinstance(s_clspbc, SomePBC) clsdef = None more_than_one = len(s_clspbc.descriptions) > 1 diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -508,6 +508,21 @@ self._store_and_reset_exception(self.mc, resloc) return fcond + def emit_op_save_exc_class(self, op, arglocs, regalloc, fcond): + resloc = arglocs[0] + self.mc.gen_load_int(r.ip.value, self.cpu.pos_exception()) + self.load_reg(self.mc, resloc, r.ip) + return fcond + + def emit_op_save_exception(self, op, arglocs, regalloc, fcond): + resloc = arglocs[0] + self._store_and_reset_exception(self.mc, resloc) + return fcond + + def emit_op_restore_exception(self, op, arglocs, regalloc, fcond): + self._restore_exception(self.mc, arglocs[1], arglocs[0]) + return fcond + def emit_op_debug_merge_point(self, op, arglocs, regalloc, fcond): return fcond emit_op_jit_debug = emit_op_debug_merge_point diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -707,6 +707,17 @@ [loc, loc1, resloc, pos_exc_value, pos_exception]) return arglocs + def prepare_op_save_exception(self, op, fcond): + resloc = self.force_allocate_reg(op) + return [resloc] + prepare_op_save_exc_class = prepare_op_save_exception + + def prepare_op_restore_exception(self, op, fcond): + boxes = op.getarglist() + loc0 = self.make_sure_var_in_reg(op.getarg(0), boxes) # exc class + loc1 = self.make_sure_var_in_reg(op.getarg(1), boxes) # exc instance + return [loc0, loc1] + def prepare_op_guard_no_exception(self, op, fcond): loc = self.make_sure_var_in_reg(ConstInt(self.cpu.pos_exception())) arglocs = self._prepare_guard(op, [loc]) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -45,8 +45,6 @@ # we don't care about the value 13 here, because we gonna # fish it from the extra slot on frame anyway op.getdescr().make_a_counter_per_value(op, 13) - elif opnum == rop.BRIDGE_EXCEPTION: - assert len(self.operations) == 0 # must be first if op.getdescr() is not None: if op.is_guard() or op.getopnum() == rop.FINISH: newdescr = op.getdescr() @@ -906,8 +904,9 @@ values.append(value) if hasattr(descr, '_llgraph_bridge'): if propagate_exception: - assert (descr._llgraph_bridge.operations[0].opnum == - rop.BRIDGE_EXCEPTION) + assert (descr._llgraph_bridge.operations[0].opnum in + (rop.SAVE_EXC_CLASS, rop.GUARD_EXCEPTION, + rop.GUARD_NO_EXCEPTION)) target = (descr._llgraph_bridge, -1) values = [value for value in values if value is not None] raise Jump(target, values) @@ -1229,8 +1228,32 @@ def execute_keepalive(self, descr, x): pass - def execute_bridge_exception(self, descr): - pass + def execute_save_exc_class(self, descr): + lle = self.last_exception + if lle is None: + return 0 + else: + return support.cast_to_int(lle.args[0]) + + def execute_save_exception(self, descr): + lle = self.last_exception + if lle is None: + res = lltype.nullptr(llmemory.GCREF.TO) + else: + res = lltype.cast_opaque_ptr(llmemory.GCREF, lle.args[1]) + self.last_exception = None + return res + + def execute_restore_exception(self, descr, kls, e): + kls = heaptracker.int2adr(kls) + if e: + value = lltype.cast_opaque_ptr(rclass.OBJECTPTR, e) + assert llmemory.cast_ptr_to_adr(value.typeptr) == kls + lle = LLException(value.typeptr, e) + else: + assert kls == llmemory.NULL + lle = None + self.last_exception = lle def _getdescr(op): diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -119,6 +119,7 @@ # barriers. We do this on each "basic block" of operations, which in # this case means between CALLs or unknown-size mallocs. # + operations = self.remove_bridge_exception(operations) for i in range(len(operations)): op = operations[i] assert op.get_forwarded() is None @@ -168,9 +169,6 @@ continue if op.getopnum() == rop.JUMP or op.getopnum() == rop.FINISH: self.emit_pending_zeros() - if op.getopnum() == rop.BRIDGE_EXCEPTION: - self.remove_bridge_exception(operations, i) - continue # self.emit_op(op) return self._newops @@ -686,13 +684,17 @@ size = max(size, 2 * WORD) return (size + WORD-1) & ~(WORD-1) # round up - def remove_bridge_exception(self, operations, i): - """Check that the 'bridge_exception' operation occurs at the - start of the bridge.""" - if i == 0: - return # first operation, ok - if i == 1 and operations[0].getopnum() == rop.INCREMENT_DEBUG_COUNTER: - return # 2nd operation after INCREMENT_DEBUG_COUNTER, ok - # not ok! - assert we_are_translated() - raise BridgeExceptionNotFirst + def remove_bridge_exception(self, operations): + """Check a common case: 'save_exception' immediately followed by + 'restore_exception' at the start of the bridge.""" + # XXX should check if the boxes are used later; but we just assume + # they aren't for now + start = 0 + if operations[0].getopnum() == rop.INCREMENT_DEBUG_COUNTER: + start = 1 + if len(operations) >= start + 3: + if (operations[start+0].getopnum() == rop.SAVE_EXC_CLASS and + operations[start+1].getopnum() == rop.SAVE_EXCEPTION and + operations[start+2].getopnum() == rop.RESTORE_EXCEPTION): + return operations[:start] + operations[start+3:] + return operations diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -2099,6 +2099,60 @@ excvalue = self.cpu.grab_exc_value(deadframe) assert not excvalue + def test_save_restore_exceptions(self): + exc_tp = None + exc_ptr = None + def func(i): + if hasattr(self.cpu, '_exception_emulator'): + assert not self.cpu._exception_emulator[0] + assert not self.cpu._exception_emulator[1] + called.append(i) + if i: + raise LLException(exc_tp, exc_ptr) + + ops = ''' + [i0] + i1 = same_as_i(1) + call_n(ConstClass(fptr), i0, descr=calldescr) + i2 = save_exc_class() + p2 = save_exception() + call_n(ConstClass(fptr), 0, descr=calldescr) + restore_exception(i2, p2) + p0 = guard_exception(ConstClass(xtp)) [i1] + finish(p0) + ''' + FPTR = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Void)) + fptr = llhelper(FPTR, func) + calldescr = self.cpu.calldescrof(FPTR.TO, FPTR.TO.ARGS, FPTR.TO.RESULT, + EffectInfo.MOST_GENERAL) + + xtp = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True) + xtp.subclassrange_min = 1 + xtp.subclassrange_max = 3 + X = lltype.GcStruct('X', ('parent', rclass.OBJECT), + hints={'vtable': xtp._obj}) + xx = lltype.malloc(X) + xx.parent.typeptr = xtp + xptr = lltype.cast_opaque_ptr(llmemory.GCREF, xx) + + exc_tp = xtp + exc_ptr = xptr + loop = parse(ops, self.cpu, namespace=locals()) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + called = [] + deadframe = self.cpu.execute_token(looptoken, 5) + assert called == [5, 0] + assert self.cpu.get_ref_value(deadframe, 0) == xptr + excvalue = self.cpu.grab_exc_value(deadframe) + assert not excvalue + called = [] + deadframe = self.cpu.execute_token(looptoken, 0) + assert called == [0, 0] + assert self.cpu.get_int_value(deadframe, 0) == 1 + excvalue = self.cpu.grab_exc_value(deadframe) + assert not excvalue + def test_cond_call_gc_wb(self): def func_void(a): record.append(rffi.cast(lltype.Signed, a)) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1609,6 +1609,15 @@ self.implement_guard(guard_token) self._store_and_reset_exception(self.mc, resloc) + def genop_save_exc_class(self, op, arglocs, resloc): + self.mc.MOV(resloc, heap(self.cpu.pos_exception())) + + def genop_save_exception(self, op, arglocs, resloc): + self._store_and_reset_exception(self.mc, resloc) + + def genop_discard_restore_exception(self, op, arglocs): + self._restore_exception(self.mc, arglocs[1], arglocs[0]) + def _store_and_reset_exception(self, mc, excvalloc=None, exctploc=None, tmploc=None): """ Resest the exception. If excvalloc is None, then store it on the diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -418,6 +418,17 @@ self.perform_guard(op, [loc, loc1], resloc) self.rm.possibly_free_var(box) + def consider_save_exception(self, op): + resloc = self.rm.force_allocate_reg(op) + self.perform(op, [], resloc) + consider_save_exc_class = consider_save_exception + + def consider_restore_exception(self, op): + args = op.getarglist() + loc0 = self.rm.make_sure_var_in_reg(op.getarg(0), args) # exc class + loc1 = self.rm.make_sure_var_in_reg(op.getarg(1), args) # exc instance + self.perform_discard(op, [loc0, loc1]) + consider_guard_no_overflow = consider_guard_no_exception consider_guard_overflow = consider_guard_no_exception consider_guard_not_forced = consider_guard_no_exception diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -915,10 +915,13 @@ return [op0, op1] def rewrite_op_malloc(self, op): - if op.args[1].value['flavor'] == 'raw': + d = op.args[1].value + if d.get('nonmovable', False): + raise UnsupportedMallocFlags(d) + if d['flavor'] == 'raw': return self._rewrite_raw_malloc(op, 'raw_malloc_fixedsize', []) # - if op.args[1].value.get('zero', False): + if d.get('zero', False): zero = True else: zero = False diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -386,7 +386,9 @@ rop.CALL_MALLOC_NURSERY_VARSIZE_FRAME, rop.NURSERY_PTR_INCREMENT, rop.LABEL, - rop.BRIDGE_EXCEPTION, + rop.SAVE_EXC_CLASS, + rop.SAVE_EXCEPTION, + rop.RESTORE_EXCEPTION, ): # list of opcodes never executed by pyjitpl continue raise AssertionError("missing %r" % (key,)) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2487,17 +2487,28 @@ # 'test_guard_no_exception_incorrectly_removed_from_bridge' # shows a corner case in which just putting GuARD_NO_EXCEPTION # here is a bad idea: the optimizer might remove it too. - # So we put a pair BRIDGE_EXCEPTION / GUARD_(NO)_EXCEPTION. - # The BRIDGE_EXCEPTION is meant to re-raise the exception - # caught before the bridge, but in reality it must end up - # as the first operation and thus is a no-op for the backends - # (it is removed in rewrite.py). Its real purpose is only to - # pass through the optimizer unmodified, so that the following - # GUARD_NO_EXCEPTION is not killed. - self.history.record(rop.BRIDGE_EXCEPTION, [], None) - if exception: - self.execute_ll_raised(lltype.cast_opaque_ptr(rclass.OBJECTPTR, - exception)) + # So we put a SAVE_EXCEPTION at the start, and a + # RESTORE_EXCEPTION just before the guard. (rewrite.py will + # remove the two if they end up consecutive.) + + # XXX too much jumps between older and newer models; clean up + # by killing SAVE_EXC_CLASS, RESTORE_EXCEPTION and GUARD_EXCEPTION + + exception_obj = lltype.cast_opaque_ptr(rclass.OBJECTPTR, exception) + if exception_obj: + exc_class = heaptracker.adr2int( + llmemory.cast_ptr_to_adr(exception_obj.typeptr)) + else: + exc_class = 0 + i = len(self.history.operations) + op1 = self.history.record(rop.SAVE_EXC_CLASS, [], exc_class) + op2 = self.history.record(rop.SAVE_EXCEPTION, [], exception) + assert op1 is self.history.operations[i] + assert op2 is self.history.operations[i + 1] + self.history.operations = [op1, op2] + self.history.operations[:i] + self.history.record(rop.RESTORE_EXCEPTION, [op1, op2], None) + if exception_obj: + self.execute_ll_raised(exception_obj) else: self.clear_exception() try: diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -696,7 +696,7 @@ 'GUARD_SUBCLASS/2d/n', # only if supports_guard_gc_type '_GUARD_FOLDABLE_LAST', 'GUARD_NO_EXCEPTION/0d/n', # may be called with an exception currently set - 'GUARD_EXCEPTION/1d/r', # may be called with an exception currently set + 'GUARD_EXCEPTION/1d/r', # XXX kill me, use only SAVE_EXCEPTION 'GUARD_NO_OVERFLOW/0d/n', 'GUARD_OVERFLOW/0d/n', 'GUARD_NOT_FORCED/0d/n', # may be called with an exception currently set @@ -827,7 +827,9 @@ 'QUASIIMMUT_FIELD/1d/n', # [objptr], descr=SlowMutateDescr 'RECORD_EXACT_CLASS/2/n', # [objptr, clsptr] 'KEEPALIVE/1/n', - 'BRIDGE_EXCEPTION/0/n', # pyjitpl: prepare_resume_from_failure() + 'SAVE_EXCEPTION/0/r', + 'SAVE_EXC_CLASS/0/i', # XXX kill me + 'RESTORE_EXCEPTION/2/n', # XXX kill me '_CANRAISE_FIRST', # ----- start of can_raise operations ----- '_CALL_FIRST', diff --git a/rpython/memory/gc/base.py b/rpython/memory/gc/base.py --- a/rpython/memory/gc/base.py +++ b/rpython/memory/gc/base.py @@ -172,6 +172,9 @@ def can_move(self, addr): return False + def malloc_fixedsize_nonmovable(self, typeid): + raise MemoryError + def pin(self, addr): return False diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -597,7 +597,7 @@ if needs_finalizer and not is_finalizer_light: ll_assert(not contains_weakptr, "'needs_finalizer' and 'contains_weakptr' both specified") - obj = self.external_malloc(typeid, 0, can_make_young=False) + obj = self.external_malloc(typeid, 0, alloc_young=False) self.objects_with_finalizers.append(obj) # # If totalsize is greater than nonlarge_max (which should never be @@ -606,7 +606,7 @@ elif rawtotalsize > self.nonlarge_max: ll_assert(not contains_weakptr, "'contains_weakptr' specified for a large object") - obj = self.external_malloc(typeid, 0) + obj = self.external_malloc(typeid, 0, alloc_young=True) # else: # If totalsize is smaller than minimal_size_in_nursery, round it @@ -659,7 +659,7 @@ # If the total size of the object would be larger than # 'nonlarge_max', then allocate it externally. We also # go there if 'length' is actually negative. - obj = self.external_malloc(typeid, length) + obj = self.external_malloc(typeid, length, alloc_young=True) # else: # With the above checks we know now that totalsize cannot be more @@ -692,6 +692,11 @@ return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + def malloc_fixedsize_nonmovable(self, typeid): + obj = self.external_malloc(typeid, 0, alloc_young=True) + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + + def collect(self, gen=2): """Do a minor (gen=0), start a major (gen=1), or do a full major (gen>=2) collection.""" @@ -808,7 +813,7 @@ collect_and_reserve._dont_inline_ = True - def external_malloc(self, typeid, length, can_make_young=True): + def external_malloc(self, typeid, length, alloc_young): """Allocate a large object using the ArenaCollection or raw_malloc(), possibly as an object with card marking enabled, if it has gc pointers in its var-sized part. 'length' should be @@ -862,7 +867,9 @@ # we should get a MemoryError from major_collection_step(). # # Check if the object would fit in the ArenaCollection. - if raw_malloc_usage(totalsize) <= self.small_request_threshold: + # Also, an object allocated from ArenaCollection must be old. + if (raw_malloc_usage(totalsize) <= self.small_request_threshold + and not alloc_young): # # Yes. Round up 'totalsize' (it cannot overflow and it # must remain <= self.small_request_threshold.) @@ -874,10 +881,6 @@ # Allocate from the ArenaCollection. Don't clear it. result = self.ac.malloc(totalsize) # - # An object allocated from ArenaCollection is always old, even - # if 'can_make_young'. The interesting case of 'can_make_young' - # is for large objects, bigger than the 'large_objects' threshold, - # which are raw-malloced but still young. extra_flags = GCFLAG_TRACK_YOUNG_PTRS # else: @@ -897,11 +900,11 @@ extra_words = self.card_marking_words_for_length(length) cardheadersize = WORD * extra_words extra_flags = GCFLAG_HAS_CARDS | GCFLAG_TRACK_YOUNG_PTRS - # if 'can_make_young', then we also immediately set + # if 'alloc_young', then we also immediately set # GCFLAG_CARDS_SET, but without adding the object to # 'old_objects_with_cards_set'. In this way it should # never be added to that list as long as it is young. - if can_make_young: + if alloc_young: extra_flags |= GCFLAG_CARDS_SET # # Detect very rare cases of overflows @@ -939,7 +942,7 @@ # Record the newly allocated object and its full malloced size. # The object is young or old depending on the argument. self.rawmalloced_total_size += r_uint(allocsize) - if can_make_young: + if alloc_young: if not self.young_rawmalloced_objects: self.young_rawmalloced_objects = self.AddressDict() self.young_rawmalloced_objects.add(result + size_gc_header) diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -519,7 +519,7 @@ if needs_finalizer and not is_finalizer_light: ll_assert(not contains_weakptr, "'needs_finalizer' and 'contains_weakptr' both specified") - obj = self.external_malloc(typeid, 0, can_make_young=False) + obj = self.external_malloc(typeid, 0, alloc_young=False) self.objects_with_finalizers.append(obj) # # If totalsize is greater than nonlarge_max (which should never be @@ -528,7 +528,7 @@ elif rawtotalsize > self.nonlarge_max: ll_assert(not contains_weakptr, "'contains_weakptr' specified for a large object") - obj = self.external_malloc(typeid, 0) + obj = self.external_malloc(typeid, 0, alloc_young=True) # else: # If totalsize is smaller than minimal_size_in_nursery, round it @@ -581,7 +581,7 @@ # If the total size of the object would be larger than # 'nonlarge_max', then allocate it externally. We also # go there if 'length' is actually negative. - obj = self.external_malloc(typeid, length) + obj = self.external_malloc(typeid, length, alloc_young=True) # else: # With the above checks we know now that totalsize cannot be more @@ -614,6 +614,11 @@ return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + def malloc_fixedsize_nonmovable(self, typeid): + obj = self.external_malloc(typeid, 0, alloc_young=True) + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + + def collect(self, gen=1): """Do a minor (gen=0) or major (gen>0) collection.""" self.minor_collection() @@ -671,7 +676,7 @@ collect_and_reserve._dont_inline_ = True - def external_malloc(self, typeid, length, can_make_young=True): + def external_malloc(self, typeid, length, alloc_young): """Allocate a large object using the ArenaCollection or raw_malloc(), possibly as an object with card marking enabled, if it has gc pointers in its var-sized part. 'length' should be @@ -711,7 +716,9 @@ self.major_collection(raw_malloc_usage(totalsize)) # # Check if the object would fit in the ArenaCollection. - if raw_malloc_usage(totalsize) <= self.small_request_threshold: + # Also, an object allocated from ArenaCollection must be old. + if (raw_malloc_usage(totalsize) <= self.small_request_threshold + and not alloc_young): # # Yes. Round up 'totalsize' (it cannot overflow and it # must remain <= self.small_request_threshold.) @@ -724,10 +731,6 @@ result = self.ac.malloc(totalsize) llmemory.raw_memclear(result, totalsize) # - # An object allocated from ArenaCollection is always old, even - # if 'can_make_young'. The interesting case of 'can_make_young' - # is for large objects, bigger than the 'large_objects' threshold, - # which are raw-malloced but still young. extra_flags = GCFLAG_TRACK_YOUNG_PTRS # else: @@ -747,11 +750,11 @@ extra_words = self.card_marking_words_for_length(length) cardheadersize = WORD * extra_words extra_flags = GCFLAG_HAS_CARDS | GCFLAG_TRACK_YOUNG_PTRS - # if 'can_make_young', then we also immediately set + # if 'alloc_young', then we also immediately set # GCFLAG_CARDS_SET, but without adding the object to # 'old_objects_with_cards_set'. In this way it should # never be added to that list as long as it is young. - if can_make_young: + if alloc_young: extra_flags |= GCFLAG_CARDS_SET # # Detect very rare cases of overflows @@ -787,7 +790,7 @@ # Record the newly allocated object and its full malloced size. # The object is young or old depending on the argument. self.rawmalloced_total_size += r_uint(allocsize) - if can_make_young: + if alloc_young: if not self.young_rawmalloced_objects: self.young_rawmalloced_objects = self.AddressDict() self.young_rawmalloced_objects.add(result + size_gc_header) diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py --- a/rpython/memory/gc/test/test_direct.py +++ b/rpython/memory/gc/test/test_direct.py @@ -565,8 +565,8 @@ tid = self.get_type_id(VAR) largeobj_size = self.gc.nonlarge_max + 1 self.gc.next_major_collection_threshold = 99999.0 - addr_src = self.gc.external_malloc(tid, largeobj_size) - addr_dst = self.gc.external_malloc(tid, largeobj_size) + addr_src = self.gc.external_malloc(tid, largeobj_size, alloc_young=True) + addr_dst = self.gc.external_malloc(tid, largeobj_size, alloc_young=True) hdr_src = self.gc.header(addr_src) hdr_dst = self.gc.header(addr_dst) # diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -531,6 +531,9 @@ getfn(func, [SomeAddress()], annmodel.s_None) + self.malloc_nonmovable_ptr = getfn(GCClass.malloc_fixedsize_nonmovable, + [s_gc, s_typeid16], + s_gcref) def create_custom_trace_funcs(self, gc, rtyper): custom_trace_funcs = tuple(rtyper.custom_trace_funcs) @@ -757,7 +760,12 @@ c_has_light_finalizer = rmodel.inputconst(lltype.Bool, has_light_finalizer) - if not op.opname.endswith('_varsize') and not flags.get('varsize'): + if flags.get('nonmovable'): + assert op.opname == 'malloc' + assert not flags.get('varsize') + malloc_ptr = self.malloc_nonmovable_ptr + args = [self.c_const_gc, c_type_id] + elif not op.opname.endswith('_varsize') and not flags.get('varsize'): zero = flags.get('zero', False) if (self.malloc_fast_ptr is not None and not c_has_finalizer.value and diff --git a/rpython/memory/test/test_transformed_gc.py b/rpython/memory/test/test_transformed_gc.py --- a/rpython/memory/test/test_transformed_gc.py +++ b/rpython/memory/test/test_transformed_gc.py @@ -1247,6 +1247,26 @@ res = self.runner('nursery_hash_base') assert res([]) >= 195 + def define_instantiate_nonmovable(cls): + from rpython.rlib import objectmodel + from rpython.rtyper import annlowlevel + class A: + pass + def fn(): + a1 = A() + a = objectmodel.instantiate(A, nonmovable=True) + a.next = a1 # 'a' is known young here, so no write barrier emitted + res = rgc.can_move(annlowlevel.cast_instance_to_base_ptr(a)) + rgc.collect() + objectmodel.keepalive_until_here(a) + return res + return fn + + def test_instantiate_nonmovable(self): + res = self.runner('instantiate_nonmovable') + assert res([]) == 0 + + class TestIncrementalMiniMarkGC(TestMiniMarkGC): gcname = "incminimark" diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -276,7 +276,7 @@ # ____________________________________________________________ -def instantiate(cls): +def instantiate(cls, nonmovable=False): "Create an empty instance of 'cls'." if isinstance(cls, type): return cls.__new__(cls) diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -480,7 +480,7 @@ class _GcRef(object): # implementation-specific: there should not be any after translation - __slots__ = ['_x'] + __slots__ = ['_x', '_handle'] def __init__(self, x): self._x = x def __hash__(self): @@ -529,6 +529,48 @@ return None try_cast_gcref_to_instance._annspecialcase_ = 'specialize:arg(0)' +_ffi_cache = None +def _fetch_ffi(): + global _ffi_cache + if _ffi_cache is None: + try: + import _cffi_backend + _ffi_cache = _cffi_backend.FFI() + except (ImportError, AttributeError): + import py + py.test.skip("need CFFI >= 1.0") + return _ffi_cache + + at jit.dont_look_inside +def hide_nonmovable_gcref(gcref): + from rpython.rtyper.lltypesystem import lltype, llmemory, rffi + if we_are_translated(): + assert lltype.typeOf(gcref) == llmemory.GCREF + assert not can_move(gcref) + return rffi.cast(llmemory.Address, gcref) + else: + assert isinstance(gcref, _GcRef) + x = gcref._x + ffi = _fetch_ffi() + if not hasattr(x, '__handle'): + x.__handle = ffi.new_handle(x) + addr = int(ffi.cast("intptr_t", x.__handle)) + return rffi.cast(llmemory.Address, addr) + + at jit.dont_look_inside +def reveal_gcref(addr): + from rpython.rtyper.lltypesystem import lltype, llmemory, rffi + assert lltype.typeOf(addr) == llmemory.Address + if we_are_translated(): + return rffi.cast(llmemory.GCREF, addr) + else: + addr = rffi.cast(lltype.Signed, addr) + if addr == 0: + return lltype.nullptr(llmemory.GCREF.TO) + ffi = _fetch_ffi() + x = ffi.from_handle(ffi.cast("void *", addr)) + return _GcRef(x) + # ------------------- implementation ------------------- _cache_s_list_of_gcrefs = None diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -79,6 +79,7 @@ @specialize.arg(0) def ll_start_new_thread(func): + _check_thread_enabled() ident = c_thread_start(func) if ident == -1: raise error("can't start new thread") @@ -170,6 +171,18 @@ def _cleanup_(self): raise Exception("seeing a prebuilt rpython.rlib.rthread.Lock instance") +def _check_thread_enabled(): + pass +class Entry(ExtRegistryEntry): + _about_ = _check_thread_enabled + def compute_result_annotation(self): + translator = self.bookkeeper.annotator.translator + if not translator.config.translation.thread: + raise Exception( + "this RPython program uses threads: translate with '--thread'") + def specialize_call(self, hop): + hop.exception_cannot_occur() + # ____________________________________________________________ # # Stack size diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -92,12 +92,13 @@ PLT = "" size_decl = "" type_decl = "" + extra_align = "" else: PLT = "@PLT" type_decl = "\t.type\t%s, @function" % (tramp_name,) size_decl = "\t.size\t%s, .-%s" % ( tramp_name, tramp_name) - + extra_align = "\t.cfi_def_cfa_offset 8" assert detect_cpu.autodetect().startswith(detect_cpu.MODEL_X86_64), ( "rvmprof only supports x86-64 CPUs for now") @@ -132,7 +133,7 @@ \t.cfi_def_cfa_offset 16 \tcall %(cont_name)s%(PLT)s \taddq\t$8, %%rsp -\t.cfi_def_cfa_offset 8 +%(extra_align)s \tret \t.cfi_endproc %(size_decl)s diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -31,7 +31,11 @@ #include #include #include "vmprof_getpc.h" +#ifdef __APPLE__ +#include "libunwind.h" +#else #include "vmprof_unwind.h" +#endif #include "vmprof_mt.h" @@ -39,10 +43,12 @@ // functions copied from libunwind using dlopen +#ifndef __APPLE__ // should be linux only probably static int (*unw_get_reg)(unw_cursor_t*, int, unw_word_t*) = NULL; static int (*unw_step)(unw_cursor_t*) = NULL; static int (*unw_init_local)(unw_cursor_t *, unw_context_t *) = NULL; static int (*unw_get_proc_info)(unw_cursor_t *, unw_proc_info_t *) = NULL; +#endif static int profile_file = -1; static long prepare_interval_usec; @@ -67,6 +73,7 @@ return "bad value for 'interval'"; prepare_interval_usec = (int)(interval * 1000000.0); +#ifndef __APPLE__ if (!unw_get_reg) { void *libhandle; @@ -81,6 +88,7 @@ if (!(unw_step = dlsym(libhandle, UNW_PREFIX "_step"))) goto error; } +#endif if (prepare_concurrent_bufs() < 0) return "out of memory"; @@ -206,7 +214,12 @@ void *ip; int n = 0; unw_cursor_t cursor; +#ifdef __APPLE__ + unw_context_t uc; + unw_getcontext(&uc); +#else unw_context_t uc = *ucontext; +#endif int ret = unw_init_local(&cursor, &uc); assert(ret >= 0); diff --git a/rpython/rlib/rvmprof/src/vmprof_unwind.h b/rpython/rlib/rvmprof/src/vmprof_unwind.h --- a/rpython/rlib/rvmprof/src/vmprof_unwind.h +++ b/rpython/rlib/rvmprof/src/vmprof_unwind.h @@ -64,8 +64,7 @@ typedef struct unw_cursor { unw_word_t opaque[UNW_TDEP_CURSOR_LEN]; - } -unw_cursor_t; + } unw_cursor_t; #define UNW_REG_IP UNW_X86_64_RIP #define UNW_REG_SP UNW_X86_64_RSP @@ -84,7 +83,7 @@ int format; /* unwind-info format (arch-specific) */ int unwind_info_size; /* size of the information (if applicable) */ void *unwind_info; /* unwind-info (arch-specific) */ - } -unw_proc_info_t; + } unw_proc_info_t; // end of copy + diff --git a/rpython/rlib/rvmprof/test/test_rvmprof.py b/rpython/rlib/rvmprof/test/test_rvmprof.py --- a/rpython/rlib/rvmprof/test/test_rvmprof.py +++ b/rpython/rlib/rvmprof/test/test_rvmprof.py @@ -2,6 +2,7 @@ from rpython.tool.udir import udir from rpython.rlib import rvmprof from rpython.translator.c.test.test_genc import compile +from rpython.rlib.objectmodel import we_are_translated def test_vmprof_execute_code_1(): @@ -96,7 +97,12 @@ @rvmprof.vmprof_execute_code("xcode1", lambda code, num: code) def main(code, num): print num - return 42 + s = 0 + for i in range(num): + s += (i << 1) + if s % 32423423423 == 0: + print s + return s tmpfilename = str(udir.join('test_rvmprof')) @@ -104,16 +110,37 @@ code = MyCode() rvmprof.register_code(code, get_name) fd = os.open(tmpfilename, os.O_WRONLY | os.O_CREAT, 0666) - rvmprof.enable(fd, 0.5) - res = main(code, 5) - assert res == 42 + if we_are_translated(): + num = 100000000 + period = 0.0001 + else: + num = 10000 + period = 0.9 + rvmprof.enable(fd, period) + res = main(code, num) + #assert res == 499999500000 rvmprof.disable() os.close(fd) return 0 + def check_profile(filename): + from vmprof import read_profile + + prof = read_profile(filename) + assert prof.get_tree().name.startswith("py:") + assert prof.get_tree().count + assert f() == 0 assert os.path.exists(tmpfilename) fn = compile(f, [], gcpolicy="minimark") - os.unlink(tmpfilename) assert fn() == 0 - assert os.path.exists(tmpfilename) + try: + import vmprof + except ImportError: + py.test.skip("vmprof unimportable") + else: + check_profile(tmpfilename) + finally: + assert os.path.exists(tmpfilename) + os.unlink(tmpfilename) + \ No newline at end of file diff --git a/rpython/rtyper/lltypesystem/rtagged.py b/rpython/rtyper/lltypesystem/rtagged.py --- a/rpython/rtyper/lltypesystem/rtagged.py +++ b/rpython/rtyper/lltypesystem/rtagged.py @@ -27,7 +27,8 @@ self.classdef, flds)) self.specialfieldname = flds[0] - def new_instance(self, llops, classcallhop=None): + def new_instance(self, llops, classcallhop=None, nonmovable=False): + assert not nonmovable if self.is_parent: raise TyperError("don't instantiate %r, it is a parent of an " "UnboxedValue class" % (self.classdef,)) diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -693,18 +693,24 @@ return hop.args_r[0].rtype_isinstance(hop) @typer_for(objectmodel.instantiate) -def rtype_instantiate(hop): +def rtype_instantiate(hop, i_nonmovable=None): hop.exception_cannot_occur() s_class = hop.args_s[0] assert isinstance(s_class, annmodel.SomePBC) + v_nonmovable, = parse_kwds(hop, (i_nonmovable, None)) + nonmovable = (i_nonmovable is not None and v_nonmovable.value) if len(s_class.descriptions) != 1: # instantiate() on a variable class + if nonmovable: + raise TyperError("instantiate(x, nonmovable=True) cannot be used " + "if x is not a constant class") vtypeptr, = hop.inputargs(rclass.get_type_repr(hop.rtyper)) r_class = hop.args_r[0] return r_class._instantiate_runtime_class(hop, vtypeptr, hop.r_result.lowleveltype) classdef = s_class.any_description().getuniqueclassdef() - return rclass.rtype_new_instance(hop.rtyper, classdef, hop.llops) + return rclass.rtype_new_instance(hop.rtyper, classdef, hop.llops, + nonmovable=nonmovable) @typer_for(hasattr) diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -684,10 +684,12 @@ rbase = rbase.rbase return False - def new_instance(self, llops, classcallhop=None): + def new_instance(self, llops, classcallhop=None, nonmovable=False): """Build a new instance, without calling __init__.""" flavor = self.gcflavor flags = {'flavor': flavor} + if nonmovable: + flags['nonmovable'] = True ctype = inputconst(Void, self.object_type) cflags = inputconst(Void, flags) vlist = [ctype, cflags] @@ -1031,9 +1033,10 @@ # ____________________________________________________________ -def rtype_new_instance(rtyper, classdef, llops, classcallhop=None): +def rtype_new_instance(rtyper, classdef, llops, classcallhop=None, + nonmovable=False): rinstance = getinstancerepr(rtyper, classdef) - return rinstance.new_instance(llops, classcallhop) + return rinstance.new_instance(llops, classcallhop, nonmovable=nonmovable) def ll_inst_hash(ins): if not ins: diff --git a/rpython/rtyper/test/test_rbuiltin.py b/rpython/rtyper/test/test_rbuiltin.py --- a/rpython/rtyper/test/test_rbuiltin.py +++ b/rpython/rtyper/test/test_rbuiltin.py @@ -432,6 +432,14 @@ res = self.interpret(f, [2]) assert self.class_name(res) == 'B' + def test_instantiate_nonmovable(self): + class A: + pass + def f(): + return instantiate(A, nonmovable=True) # no effect before GC + res = self.interpret(f, []) + assert self.class_name(res) == 'A' + def test_os_path_join(self): def fn(a, b): return os.path.join(a, b) From noreply at buildbot.pypy.org Tue Oct 13 23:57:50 2015 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 13 Oct 2015 23:57:50 +0200 (CEST) Subject: [pypy-commit] pypy ufunc-casting: document branch Message-ID: <20151013215750.6CBA21C147C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufunc-casting Changeset: r80179:dea28735aada Date: 2015-10-14 00:56 +0300 http://bitbucket.org/pypy/pypy/changeset/dea28735aada/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -74,3 +74,9 @@ ffi.new_handle() returns handles that work more like CPython's: they remain valid as long as the target exists (unlike the previous version, where handles become invalid *before* the __del__ is called). + +.. branch: ufunc-casting + +allow automatic casting in ufuncs (and frompypyfunc) to cast the +arguments to the allowed function type declarations, fixes various +failures in linalg cffi functions From noreply at buildbot.pypy.org Tue Oct 13 23:57:52 2015 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 13 Oct 2015 23:57:52 +0200 (CEST) Subject: [pypy-commit] pypy ufunc-casting: close branch Message-ID: <20151013215752.991471C147C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ufunc-casting Changeset: r80180:f7f3865a2e4d Date: 2015-10-14 00:56 +0300 http://bitbucket.org/pypy/pypy/changeset/f7f3865a2e4d/ Log: close branch From noreply at buildbot.pypy.org Tue Oct 13 23:57:54 2015 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 13 Oct 2015 23:57:54 +0200 (CEST) Subject: [pypy-commit] pypy default: merge ufunc-casting which provides casting of arguments to ufuncs and frompypyfunc Message-ID: <20151013215754.B1E011C147C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80181:517db24acc3f Date: 2015-10-14 00:57 +0300 http://bitbucket.org/pypy/pypy/changeset/517db24acc3f/ Log: merge ufunc-casting which provides casting of arguments to ufuncs and frompypyfunc diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -74,3 +74,9 @@ ffi.new_handle() returns handles that work more like CPython's: they remain valid as long as the target exists (unlike the previous version, where handles become invalid *before* the __del__ is called). + +.. branch: ufunc-casting + +allow automatic casting in ufuncs (and frompypyfunc) to cast the +arguments to the allowed function type declarations, fixes various +failures in linalg cffi functions diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -74,10 +74,10 @@ call_many_to_one_driver = jit.JitDriver( name='numpy_call_many_to_one', - greens=['shapelen', 'nin', 'func', 'res_dtype'], + greens=['shapelen', 'nin', 'func', 'in_dtypes', 'res_dtype'], reds='auto') -def call_many_to_one(space, shape, func, res_dtype, in_args, out): +def call_many_to_one(space, shape, func, in_dtypes, res_dtype, in_args, out): # out must hav been built. func needs no calc_type, is usually an # external ufunc nin = len(in_args) @@ -95,9 +95,9 @@ vals = [None] * nin while not out_iter.done(out_state): call_many_to_one_driver.jit_merge_point(shapelen=shapelen, func=func, - res_dtype=res_dtype, nin=nin) + in_dtypes=in_dtypes, res_dtype=res_dtype, nin=nin) for i in range(nin): - vals[i] = in_iters[i].getitem(in_states[i]) + vals[i] = in_dtypes[i].coerce(space, in_iters[i].getitem(in_states[i])) w_arglist = space.newlist(vals) w_out_val = space.call_args(func, Arguments.frompacked(space, w_arglist)) out_iter.setitem(out_state, res_dtype.coerce(space, w_out_val)) @@ -108,10 +108,10 @@ call_many_to_many_driver = jit.JitDriver( name='numpy_call_many_to_many', - greens=['shapelen', 'nin', 'nout', 'func', 'res_dtype'], + greens=['shapelen', 'nin', 'nout', 'func', 'in_dtypes', 'out_dtypes'], reds='auto') -def call_many_to_many(space, shape, func, res_dtype, in_args, out_args): +def call_many_to_many(space, shape, func, in_dtypes, out_dtypes, in_args, out_args): # out must hav been built. func needs no calc_type, is usually an # external ufunc nin = len(in_args) @@ -134,24 +134,29 @@ out_states[i] = out_state shapelen = len(shape) vals = [None] * nin - while not out_iters[0].done(out_states[0]): + test_iter, test_state = in_iters[-1], in_states[-1] + if nout > 0: + test_iter, test_state = out_iters[0], out_states[0] + while not test_iter.done(test_state): call_many_to_many_driver.jit_merge_point(shapelen=shapelen, func=func, - res_dtype=res_dtype, nin=nin, nout=nout) + in_dtypes=in_dtypes, out_dtypes=out_dtypes, + nin=nin, nout=nout) for i in range(nin): - vals[i] = in_iters[i].getitem(in_states[i]) + vals[i] = in_dtypes[i].coerce(space, in_iters[i].getitem(in_states[i])) w_arglist = space.newlist(vals) w_outvals = space.call_args(func, Arguments.frompacked(space, w_arglist)) # w_outvals should be a tuple, but func can return a single value as well if space.isinstance_w(w_outvals, space.w_tuple): batch = space.listview(w_outvals) for i in range(len(batch)): - out_iters[i].setitem(out_states[i], res_dtype.coerce(space, batch[i])) + out_iters[i].setitem(out_states[i], out_dtypes[i].coerce(space, batch[i])) out_states[i] = out_iters[i].next(out_states[i]) - else: - out_iters[0].setitem(out_states[0], res_dtype.coerce(space, w_outvals)) + elif nout > 0: + out_iters[0].setitem(out_states[0], out_dtypes[0].coerce(space, w_outvals)) out_states[0] = out_iters[0].next(out_states[0]) for i in range(nin): in_states[i] = in_iters[i].next(in_states[i]) + test_state = test_iter.next(test_state) return space.newtuple([convert_to_array(space, o) for o in out_args]) setslice_driver = jit.JitDriver(name='numpy_setslice', diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -159,8 +159,7 @@ af2 = ufunc(af) assert all(af2 == af * 2) ac = arange(10, dtype=complex) - skip('casting not implemented yet') - ac1 = ufunc(ac) + raises(TypeError, ufunc, ac) def test_frompyfunc_2d_sig(self): import sys @@ -199,6 +198,10 @@ ai2 = ufunc(aiV) assert (ai2 == aiV * 2).all() + ai = arange(0).reshape(0, 1, 1) + ao = ufunc(ai) + assert ao.shape == (0, 1, 1) + def test_frompyfunc_needs_nditer(self): import sys from numpy import frompyfunc, dtype, arange @@ -268,6 +271,54 @@ assert out0.shape == in0.shape assert (out0 == in0 * 2).all() + def test_frompyfunc_casting(self): + import sys + import numpy as np + if '__pypy__' not in sys.builtin_module_names: + skip('PyPy only frompyfunc extension') + + def times2_int(in0, out0): + assert in0.dtype == int + assert out0.dtype == int + # hack to assing to a 0-dim array + out0.real = in0 * 2 + + def times2_complex(in0, out0): + assert in0.dtype == complex + assert out0.dtype == complex + out0.real = in0.real * 2 + out0.imag = in0.imag + + def times2_complex0(in0): + assert in0.dtype == complex + return in0 * 2 + + def times2_int0(in0): + assert in0.dtype == int + return in0 * 2 + + times2stacked = np.frompyfunc([times2_int, times2_complex], 1, 1, + dtypes=[np.dtype(int), np.dtype(int), + np.dtype(complex), np.dtype(complex)], + stack_inputs=True, signature='()->()', + ) + times2 = np.frompyfunc([times2_int0, times2_complex0], 1, 1, + dtypes=[np.dtype(int), np.dtype(int), + np.dtype(complex), np.dtype(complex)], + stack_inputs=False, + ) + for d in [np.dtype(float), np.dtype('uint8'), np.dtype('complex64')]: + in0 = np.arange(4, dtype=d) + out0 = times2stacked(in0) + assert out0.shape == in0.shape + assert out0.dtype in (int, complex) + assert (out0 == in0 * 2).all() + + out0 = times2(in0) + assert out0.shape == in0.shape + assert out0.dtype in (int, complex) + assert (out0 == in0 * 2).all() + def test_ufunc_kwargs(self): from numpy import ufunc, frompyfunc, arange, dtype def adder(a, b): @@ -1393,7 +1444,7 @@ def test_add_doc(self): import sys if '__pypy__' not in sys.builtin_module_names: - skip('') + skip('cpython sets docstrings differently') try: from numpy import set_docstring except ImportError: diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -709,6 +709,32 @@ raise oefmt(space.w_TypeError, "ufunc '%s' not supported for the input types", self.name) +def _match_dtypes(space, indtypes, targetdtypes, i_target, casting): + allok = True + for i in range(len(indtypes)): + origin = indtypes[i] + target = targetdtypes[i + i_target] + if origin is None: + continue + if target is None: + continue + if not can_cast_type(space, origin, target, casting): + allok = False + break + return allok + +def _raise_err_msg(self, space, dtypes0, dtypes1): + dtypesstr = '' + for d in dtypes0: + if d is None: + dtypesstr += 'None,' + else: + dtypesstr += '%s%s%s,' % (d.byteorder, d.kind, d.elsize) + _dtypesstr = ','.join(['%s%s%s' % (d.byteorder, d.kind, d.elsize) \ + for d in dtypes1]) + raise oefmt(space.w_TypeError, + "input dtype [%s] did not match any known dtypes [%s] ", + dtypesstr,_dtypesstr) class W_UfuncGeneric(W_Ufunc): @@ -799,29 +825,36 @@ outargs0 = outargs[0] assert isinstance(inargs0, W_NDimArray) assert isinstance(outargs0, W_NDimArray) + nin = self.nin + assert nin >= 0 res_dtype = outargs0.get_dtype() new_shape = inargs0.get_shape() # XXX use _find_array_wrap and wrap outargs using __array_wrap__ + if self.stack_inputs: + loop.call_many_to_many(space, new_shape, func, + dtypes, [], inargs + outargs, []) + if len(outargs) < 2: + return outargs[0] + return space.newtuple(outargs) if len(outargs) < 2: return loop.call_many_to_one(space, new_shape, func, - res_dtype, inargs, outargs[0]) + dtypes[:nin], dtypes[-1], inargs, outargs[0]) return loop.call_many_to_many(space, new_shape, func, - res_dtype, inargs, outargs) + dtypes[:nin], dtypes[nin:], inargs, outargs) + w_casting = space.w_None + w_op_dtypes = space.w_None for tf in need_to_cast: if tf: - raise oefmt(space.w_NotImplementedError, "casting not supported yet") + w_casting = space.wrap('safe') + w_op_dtypes = space.newtuple([space.wrap(d) for d in dtypes]) + w_flags = space.w_None # NOT 'external_loop', we do coalescing by core_num_dims - w_op_flags = space.newtuple([space.wrap(r) for r in ['readonly'] * len(inargs)] + \ - [space.wrap(r) for r in ['readwrite'] * len(outargs)]) - w_op_dtypes = space.w_None - w_casting = space.w_None + w_ro = space.newtuple([space.wrap('readonly'), space.wrap('copy')]) + w_rw = space.newtuple([space.wrap('readwrite'), space.wrap('updateifcopy')]) + + w_op_flags = space.newtuple([w_ro] * len(inargs) + [w_rw] * len(outargs)) w_op_axes = space.w_None - #print '\nsignature', sig - #print [(d, getattr(self,d)) for d in dir(self) if 'core' in d or 'broad' in d] - #print [(d, locals()[d]) for d in locals() if 'core' in d or 'broad' in d] - #print 'shapes',[d.get_shape() for d in inargs + outargs] - #print 'steps',[d.implementation.strides for d in inargs + outargs] if isinstance(func, W_GenericUFuncCaller): # Use GeneralizeUfunc interface with signature # Unlike numpy, we will not broadcast dims before @@ -934,19 +967,32 @@ # linear_search_type_resolver in numpy ufunc_type_resolutions.c # type_tup can be '', a tuple of dtypes, or a string # of the form d,t -> D where the letters are dtype specs - nop = len(inargs) + len(outargs) + + # XXX why does the next line not pass translation? + # dtypes = [i.get_dtype() for i in inargs] dtypes = [] + for i in inargs: + if isinstance(i, W_NDimArray): + dtypes.append(i.get_dtype()) + else: + dtypes.append(None) + for i in outargs: + if isinstance(i, W_NDimArray): + dtypes.append(i.get_dtype()) + else: + dtypes.append(None) if isinstance(type_tup, str) and len(type_tup) > 0: try: if len(type_tup) == 1: - dtypes = [get_dtype_cache(space).dtypes_by_name[type_tup]] * self.nargs + s_dtypes = [get_dtype_cache(space).dtypes_by_name[type_tup]] * self.nargs elif len(type_tup) == self.nargs + 2: + s_dtypes = [] for i in range(self.nin): - dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[i]]) + s_dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[i]]) #skip the '->' in the signature for i in range(self.nout): j = i + self.nin + 2 - dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[j]]) + s_dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[j]]) else: raise oefmt(space.w_TypeError, "a type-string for %s " \ "requires 1 typecode or %d typecode(s) before and %d" \ @@ -955,42 +1001,29 @@ except KeyError: raise oefmt(space.w_ValueError, "unknown typecode in" \ " call to %s with type-string '%s'", self.name, type_tup) - else: - # XXX why does the next line not pass translation? - # dtypes = [i.get_dtype() for i in inargs] - for i in inargs: - if isinstance(i, W_NDimArray): - dtypes.append(i.get_dtype()) - else: - dtypes.append(None) - for i in outargs: - if isinstance(i, W_NDimArray): - dtypes.append(i.get_dtype()) - else: - dtypes.append(None) + # Make sure args can be cast to dtypes + if not _match_dtypes(space, dtypes, s_dtypes, 0, "safe"): + _raise_err_msg(self, space, dtypes, s_dtypes) + dtypes = s_dtypes #Find the first matchup of dtypes with _dtypes for i in range(0, len(_dtypes), self.nargs): - allok = True - for j in range(self.nargs): - if dtypes[j] is not None and dtypes[j] != _dtypes[i+j]: - allok = False + allok = _match_dtypes(space, dtypes, _dtypes, i, "no") if allok: break else: - if len(self.funcs) > 1: - - dtypesstr = '' - for d in dtypes: - if d is None: - dtypesstr += 'None,' - else: - dtypesstr += '%s%s%s,' % (d.byteorder, d.kind, d.elsize) - _dtypesstr = ','.join(['%s%s%s' % (d.byteorder, d.kind, d.elsize) \ - for d in _dtypes]) - raise oefmt(space.w_TypeError, - "input dtype [%s] did not match any known dtypes [%s] ", - dtypesstr,_dtypesstr) - i = 0 + # No exact matches, can we cast? + for i in range(0, len(_dtypes), self.nargs): + allok = _match_dtypes(space, dtypes, _dtypes, i, "safe") + if allok: + end = i + self.nargs + assert i >= 0 + assert end >=0 + dtypes = _dtypes[i:end] + break + else: + if len(self.funcs) > 1: + _raise_err_msg(self, space, dtypes, _dtypes) + i = 0 # Fill in empty dtypes for j in range(self.nargs): if dtypes[j] is None: @@ -1086,7 +1119,7 @@ for j in range(offset, len(iter_shape)): x = iter_shape[j + offset] y = dims_to_broadcast[j] - if (x > y and x % y) or y %x: + if y != 0 and x != 0 and ((x > y and x % y) or y %x): raise oefmt(space.w_ValueError, "%s: %s operand %d has a " "mismatch in its broadcast dimension %d " "(size %d is different from %d)", @@ -1123,7 +1156,7 @@ # the current op (signalling it can handle ndarray's). # TODO parse and handle subok - # TODO handle flags, op_flags + # TODO handle more flags, op_flags #print 'iter_shape',iter_shape,'arg_shapes',arg_shapes,'matched_dims',matched_dims return iter_shape, arg_shapes, matched_dims From noreply at buildbot.pypy.org Wed Oct 14 00:01:02 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Oct 2015 00:01:02 +0200 (CEST) Subject: [pypy-commit] cffi default: document this windows change Message-ID: <20151013220102.C15141C14AF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2339:71aa461f34fe Date: 2015-10-14 00:00 +0200 http://bitbucket.org/cffi/cffi/changeset/71aa461f34fe/ Log: document this windows change diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -25,7 +25,14 @@ * ffi.memmove XXX +* Windows: CPython 2.7 distutils doesn't work with Microsoft's official + Visual Studio for Python, and I'm told this is `not a bug`__. For + ffi.compile(), we `removed a workaround`__ that was inside cffi but + which had unwanted side-effects. Try saying ``import setuptools`` + first, which patches distutils... +.. __: https://bugs.python.org/issue23246 +.. __: https://bitbucket.org/cffi/cffi/pull-requests/65/remove-_hack_at_distutils-which-imports/diff .. _`calling convention`: using.html#windows-calling-conventions From noreply at buildbot.pypy.org Wed Oct 14 00:34:13 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 14 Oct 2015 00:34:13 +0200 (CEST) Subject: [pypy-commit] pypy raises(Exception)-must-die: fix tests and missing import for rweakref Message-ID: <20151013223413.35FAA1C1214@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: raises(Exception)-must-die Changeset: r80182:a5ba716fb184 Date: 2015-10-13 23:26 +0100 http://bitbucket.org/pypy/pypy/changeset/a5ba716fb184/ Log: fix tests and missing import for rweakref diff --git a/rpython/rlib/rweakref.py b/rpython/rlib/rweakref.py --- a/rpython/rlib/rweakref.py +++ b/rpython/rlib/rweakref.py @@ -2,10 +2,10 @@ Weakref support in RPython. Basic regular weakrefs without callbacks are supported. This file contains the following additions: a form of WeakKeyDictionary, and a limited version of WeakValueDictionary. -LLType only for now! """ import weakref +from rpython.annotator.model import UnionError ref = weakref.ref # basic regular weakrefs are supported in RPython @@ -191,9 +191,9 @@ class __extend__(pairtype(SomeWeakKeyDict, SomeWeakKeyDict)): def union((s_wkd1, s_wkd2)): if s_wkd1.keyclassdef is not s_wkd2.keyclassdef: - raise UnionError(w_wkd1, s_wkd2, "not the same key class!") + raise UnionError(s_wkd1, s_wkd2, "not the same key class!") if s_wkd1.valueclassdef is not s_wkd2.valueclassdef: - raise UnionError(w_wkd1, s_wkd2, "not the same value class!") + raise UnionError(s_wkd1, s_wkd2, "not the same value class!") return SomeWeakKeyDict(s_wkd1.keyclassdef, s_wkd1.valueclassdef) class Entry(extregistry.ExtRegistryEntry): diff --git a/rpython/rlib/test/test_rweakkeydict.py b/rpython/rlib/test/test_rweakkeydict.py --- a/rpython/rlib/test/test_rweakkeydict.py +++ b/rpython/rlib/test/test_rweakkeydict.py @@ -1,4 +1,5 @@ import py +from rpython.annotator.model import UnionError from rpython.rlib import rgc from rpython.rlib.rweakref import RWeakKeyDictionary from rpython.rtyper.test.test_llinterp import interpret @@ -120,6 +121,9 @@ f(1) interpret(f, [1]) + at py.test.mark.xfail( + reason="may fail with AssertionError, depending on annotation order") +def test_rpython_merge_RWeakKeyDictionary3(): def g(x): if x: d = RWeakKeyDictionary(KX, VX) @@ -127,9 +131,12 @@ d = RWeakKeyDictionary(KY, VX) d.set(KX(), VX()) - with py.test.raises(Exception): + with py.test.raises(UnionError): interpret(g, [1]) + at py.test.mark.xfail( + reason="may fail with AssertionError, depending on annotation order") +def test_rpython_merge_RWeakKeyDictionary4(): def g(x): if x: d = RWeakKeyDictionary(KX, VX) @@ -137,12 +144,11 @@ d = RWeakKeyDictionary(KX, VY) d.set(KX(), VX()) - with py.test.raises(Exception): + with py.test.raises(UnionError): interpret(g, [1]) - + at py.test.mark.xfail(reason="not implemented, messy") def test_rpython_free_values(): - import py; py.test.skip("XXX not implemented, messy") class VXDel: def __del__(self): state.freed.append(1) diff --git a/rpython/rlib/test/test_rweakvaldict.py b/rpython/rlib/test/test_rweakvaldict.py --- a/rpython/rlib/test/test_rweakvaldict.py +++ b/rpython/rlib/test/test_rweakvaldict.py @@ -1,4 +1,5 @@ import py +from rpython.annotator.model import UnionError from rpython.rlib import rgc from rpython.rlib.rweakref import RWeakValueDictionary from rpython.rtyper.test.test_llinterp import interpret @@ -144,7 +145,7 @@ d = RWeakValueDictionary(str, Y) d.set("x", X()) - with py.test.raises(Exception): + with py.test.raises(UnionError): interpret(g, [1]) From noreply at buildbot.pypy.org Wed Oct 14 00:34:15 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 14 Oct 2015 00:34:15 +0200 (CEST) Subject: [pypy-commit] pypy raises(Exception)-must-die: make test and exception more precise in rstrategies Message-ID: <20151013223415.69E2C1C1214@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: raises(Exception)-must-die Changeset: r80183:8ee285b52656 Date: 2015-10-13 23:34 +0100 http://bitbucket.org/pypy/pypy/changeset/8ee285b52656/ Log: make test and exception more precise in rstrategies diff --git a/rpython/rlib/rstrategies/rstrategies.py b/rpython/rlib/rstrategies/rstrategies.py --- a/rpython/rlib/rstrategies/rstrategies.py +++ b/rpython/rlib/rstrategies/rstrategies.py @@ -41,7 +41,7 @@ attrs['get_storage'] = get_storage attrs['set_storage'] = set_storage return type.__new__(self, name, bases, attrs) - + def strategy(generalize=None, singleton=True): """ Strategy classes must be decorated with this. @@ -71,19 +71,19 @@ class StrategyFactory(object): _immutable_fields_ = ["strategies[*]", "logger", "strategy_singleton_field"] factory_instance_counter = 0 - + def __init__(self, root_class, all_strategy_classes=None): if all_strategy_classes is None: all_strategy_classes = self._collect_subclasses(root_class) self.strategies = [] self.logger = logger.Logger() - + # This is to avoid confusion between multiple factories existing simultaneously (e.g. in tests) self.strategy_singleton_field = "__singleton_%i" % StrategyFactory.factory_instance_counter StrategyFactory.factory_instance_counter += 1 - + self._create_strategy_instances(root_class, all_strategy_classes) - + def _create_strategy_instances(self, root_class, all_strategy_classes): for strategy_class in all_strategy_classes: if strategy_class._is_strategy: @@ -91,11 +91,11 @@ self.strategies.append(strategy_class) self._patch_strategy_class(strategy_class, root_class) self._order_strategies() - + # ============================= # API methods # ============================= - + def switch_strategy(self, w_self, new_strategy_type, new_element=None): """ Switch the strategy of w_self to the new type. @@ -113,7 +113,7 @@ new_strategy.strategy_switched(w_self) self.log(w_self, new_strategy, old_strategy, new_element) return new_strategy - + def set_initial_strategy(self, w_self, strategy_type, size, elements=None): """ Initialize the strategy and storage fields of w_self. @@ -135,7 +135,7 @@ strategy.strategy_switched(w_self) self.log(w_self, strategy, None, element) return strategy - + @jit.unroll_safe def strategy_type_for(self, objects): """ @@ -153,8 +153,8 @@ for i, strategy_type in enumerate(self.strategies): if can_handle[i]: return strategy_type - raise Exception("Could not find strategy to handle: %s" % objects) - + raise ValueError("Could not find strategy to handle: %s" % objects) + def decorate_strategies(self, transitions): """ As an alternative to decorating all strategies with @strategy, @@ -165,11 +165,11 @@ "NOT_RPYTHON" for strategy_class, generalized in transitions.items(): strategy(generalized)(strategy_class) - + # ============================= # The following methods can be overwritten to customize certain aspects of the factory. # ============================= - + def instantiate_strategy(self, strategy_type, w_self=None, initial_size=0): """ Return a functional instance of strategy_type. @@ -177,7 +177,7 @@ The two additional parameters should be ignored for singleton-strategies. """ return strategy_type() - + def log(self, w_self, new_strategy, old_strategy=None, new_element=None): """ This can be overwritten into a more appropriate call to self.logger.log @@ -190,7 +190,7 @@ typename = "" cause = "Switched" if old_strategy else "Created" self.logger.log(new_strategy_str, size, cause, old_strategy_str, typename, element_typename) - + @specialize.call_location() def log_string_for_object(self, obj): """ @@ -198,8 +198,8 @@ Keep the specialize-annotation in order to handle different kinds of objects here. """ return obj.__class__.__name__ if obj else "" - - # These storage accessors are specialized because the storage field is + + # These storage accessors are specialized because the storage field is # populated by erased-objects which seem to be incompatible sometimes. @specialize.call_location() def get_storage(self, obj): @@ -207,16 +207,16 @@ @specialize.call_location() def set_storage(self, obj, val): return obj._set_storage(val) - + def get_strategy(self, obj): return obj._get_strategy() def set_strategy(self, obj, val): return obj._set_strategy(val) - + # ============================= # Internal methods # ============================= - + def _patch_strategy_class(self, strategy_class, root_class): "NOT_RPYTHON" # Patch root class: Add default handler for visitor @@ -225,12 +225,12 @@ funcname = "_convert_storage_from_" + strategy_class.__name__ _convert_storage_from_OTHER.func_name = funcname setattr(root_class, funcname, _convert_storage_from_OTHER) - + # Patch strategy class: Add polymorphic visitor function def _convert_storage_to(self, w_self, new_strategy): getattr(new_strategy, funcname)(w_self, self) strategy_class._convert_storage_to = _convert_storage_to - + def _collect_subclasses(self, cls): "NOT_RPYTHON" subclasses = [] @@ -238,7 +238,7 @@ subclasses.append(subcls) subclasses.extend(self._collect_subclasses(subcls)) return subclasses - + def _order_strategies(self): "NOT_RPYTHON" def get_generalization_depth(strategy, visited=None): @@ -256,11 +256,11 @@ else: return 0 self.strategies.sort(key=get_generalization_depth, reverse=True) - + @jit.elidable def strategy_singleton_instance(self, strategy_class): return getattr(strategy_class, self.strategy_singleton_field) - + def _freeze_(self): # Instance will be frozen at compile time, making accesses constant. # The constructor does meta stuff which is not possible after translation. @@ -271,65 +271,65 @@ == Required: strategy_factory(self) - Access to StorageFactory """ - + def strategy_switched(self, w_self): # Overwrite this method for a hook whenever the strategy # of w_self was switched to self. pass - + # Main Fixedsize API - + def store(self, w_self, index0, value): raise NotImplementedError("Abstract method") - + def fetch(self, w_self, index0): raise NotImplementedError("Abstract method") - + def size(self, w_self): raise NotImplementedError("Abstract method") - + # Fixedsize utility methods - + def slice(self, w_self, start, end): return [ self.fetch(w_self, i) for i in range(start, end)] - + def fetch_all(self, w_self): return self.slice(w_self, 0, self.size(w_self)) - + def store_all(self, w_self, elements): for i, e in enumerate(elements): self.store(w_self, i, e) - + # Main Varsize API - + def insert(self, w_self, index0, list_w): raise NotImplementedError("Abstract method") - + def delete(self, w_self, start, end): raise NotImplementedError("Abstract method") - + # Varsize utility methods - + def append(self, w_self, list_w): - self.insert(w_self, self.size(w_self), list_w) - + self.insert(w_self, self.size(w_self), list_w) + def pop(self, w_self, index0): e = self.fetch(w_self, index0) self.delete(w_self, index0, index0+1) return e # Internal methods - + def _initialize_storage(self, w_self, initial_size): raise NotImplementedError("Abstract method") - + def _check_can_handle(self, value): raise NotImplementedError("Abstract method") - + def _convert_storage_to(self, w_self, new_strategy): # This will be overwritten in _patch_strategy_class new_strategy._convert_storage_from(w_self, self) - + @jit.unroll_safe def _convert_storage_from(self, w_self, previous_strategy): # This is a very unefficient (but most generic) way to do this. @@ -338,16 +338,16 @@ self._initialize_storage(w_self, previous_strategy.size(w_self)) for i, field in enumerate(storage): self.store(w_self, i, field) - + def _generalize_for_value(self, w_self, value): strategy_type = self.generalized_strategy_for(value) new_instance = self.strategy_factory().switch_strategy(w_self, strategy_type, new_element=value) return new_instance - + def _cannot_handle_store(self, w_self, index0, value): new_instance = self._generalize_for_value(w_self, value) new_instance.store(w_self, index0, value) - + def _cannot_handle_insert(self, w_self, index0, list_w): # TODO - optimize. Prevent multiple generalizations and slicing done by callers. new_strategy = self._generalize_for_value(w_self, list_w[0]) @@ -358,7 +358,7 @@ class EmptyStrategy(AbstractStrategy): # == Required: # See AbstractStrategy - + def _initialize_storage(self, w_self, initial_size): assert initial_size == 0 self.set_storage(w_self, None) @@ -366,7 +366,7 @@ self.set_storage(w_self, None) def _check_can_handle(self, value): return False - + def fetch(self, w_self, index0): raise IndexError def store(self, w_self, index0, value): @@ -389,7 +389,7 @@ # See AbstractStrategy # check_index_*(...) - use mixin SafeIndexingMixin or UnsafeIndexingMixin # value(self) - the single value contained in this strategy. Should be constant. - + def _initialize_storage(self, w_self, initial_size): storage_obj = SingleValueStrategyStorage(initial_size) self.set_storage(w_self, storage_obj) @@ -397,7 +397,7 @@ self._initialize_storage(w_self, previous_strategy.size(w_self)) def _check_can_handle(self, value): return value is self.value() - + def fetch(self, w_self, index0): self.check_index_fetch(w_self, index0) return self.value() @@ -411,7 +411,7 @@ self.get_storage(w_self).size -= (end - start) def size(self, w_self): return self.get_storage(w_self).size - + @jit.unroll_safe def insert(self, w_self, index0, list_w): storage_obj = self.get_storage(w_self) @@ -429,18 +429,18 @@ # See AbstractStrategy # check_index_*(...) - use mixin SafeIndexingMixin or UnsafeIndexingMixin # default_value(self) - The value to be initially contained in this strategy - + def _initialize_storage(self, w_self, initial_size): default = self._unwrap(self.default_value()) self.set_storage(w_self, [default] * initial_size) - + @jit.unroll_safe def _convert_storage_from(self, w_self, previous_strategy): size = previous_strategy.size(w_self) new_storage = [ self._unwrap(previous_strategy.fetch(w_self, i)) for i in range(size) ] self.set_storage(w_self, new_storage) - + def store(self, w_self, index0, wrapped_value): self.check_index_store(w_self, index0) if self._check_can_handle(wrapped_value): @@ -448,21 +448,21 @@ self.get_storage(w_self)[index0] = unwrapped else: self._cannot_handle_store(w_self, index0, wrapped_value) - + def fetch(self, w_self, index0): self.check_index_fetch(w_self, index0) unwrapped = self.get_storage(w_self)[index0] return self._wrap(unwrapped) - + def _wrap(self, value): raise NotImplementedError("Abstract method") - + def _unwrap(self, value): raise NotImplementedError("Abstract method") - + def size(self, w_self): return len(self.get_storage(w_self)) - + @jit.unroll_safe def insert(self, w_self, start, list_w): # This is following Python's behaviour - insert automatically @@ -475,27 +475,27 @@ else: self._cannot_handle_insert(w_self, start + i, list_w[i:]) return - + def delete(self, w_self, start, end): self.check_index_range(w_self, start, end) assert start >= 0 and end >= 0 del self.get_storage(w_self)[start : end] - + class GenericStrategy(StrategyWithStorage): # == Required: # See StrategyWithStorage - + def _wrap(self, value): return value def _unwrap(self, value): return value def _check_can_handle(self, wrapped_value): return True - + class WeakGenericStrategy(StrategyWithStorage): # == Required: # See StrategyWithStorage - + def _wrap(self, value): return value() or self.default_value() def _unwrap(self, value): @@ -503,7 +503,7 @@ return weakref.ref(value) def _check_can_handle(self, wrapped_value): return True - + # ============== Mixins for index checking operations ============== class SafeIndexingMixin(object): @@ -535,37 +535,37 @@ # See StrategyWithStorage # wrap(self, value) - Return a boxed object for the primitive value # unwrap(self, value) - Return the unboxed primitive value of value - + def _unwrap(self, value): return self.unwrap(value) def _wrap(self, value): return self.wrap(value) - + class SingleTypeStrategy(SpecializedStrategy): # == Required Functions: # See SpecializedStrategy # contained_type - The wrapped type that can be stored in this strategy - + def _check_can_handle(self, value): return isinstance(value, self.contained_type) - + class TaggingStrategy(SingleTypeStrategy): """This strategy uses a special tag value to represent a single additional object.""" # == Required: # See SingleTypeStrategy # wrapped_tagged_value(self) - The tagged object # unwrapped_tagged_value(self) - The unwrapped tag value representing the tagged object - + def _check_can_handle(self, value): return value is self.wrapped_tagged_value() or \ (isinstance(value, self.contained_type) and \ self.unwrap(value) != self.unwrapped_tagged_value()) - + def _unwrap(self, value): if value is self.wrapped_tagged_value(): return self.unwrapped_tagged_value() return self.unwrap(value) - + def _wrap(self, value): if value == self.unwrapped_tagged_value(): return self.wrapped_tagged_value() diff --git a/rpython/rlib/rstrategies/test/test_rstrategies.py b/rpython/rlib/rstrategies/test/test_rstrategies.py --- a/rpython/rlib/rstrategies/test/test_rstrategies.py +++ b/rpython/rlib/rstrategies/test/test_rstrategies.py @@ -69,7 +69,7 @@ class Factory(rs.StrategyFactory): switching_log = [] - + def __init__(self, root_class): self.decorate_strategies({ EmptyStrategy: [NilStrategy, IntegerStrategy, IntegerOrNilStrategy, GenericStrategy], @@ -79,15 +79,15 @@ IntegerOrNilStrategy: [GenericStrategy], }) rs.StrategyFactory.__init__(self, root_class) - + def instantiate_strategy(self, strategy_type, w_self=None, size=0): return strategy_type(self, w_self, size) - - def set_strategy(self, w_list, strategy): + + def set_strategy(self, w_list, strategy): old_strategy = self.get_strategy(w_list) self.switching_log.append((old_strategy, strategy)) super(Factory, self).set_strategy(w_list, strategy) - + def clear_log(self): del self.switching_log[:] @@ -107,7 +107,7 @@ class WeakGenericStrategy(AbstractStrategy): import_from_mixin(rs.WeakGenericStrategy) def default_value(self): return w_nil - + class IntegerStrategy(AbstractStrategy): import_from_mixin(rs.SingleTypeStrategy) contained_type = W_Integer @@ -123,7 +123,7 @@ def default_value(self): return w_nil def wrapped_tagged_value(self): return w_nil def unwrapped_tagged_value(self): import sys; return sys.maxint - + @rs.strategy(generalize=[], singleton=False) class NonSingletonStrategy(GenericStrategy): def __init__(self, factory, w_list=None, size=0): @@ -214,22 +214,22 @@ py.test.raises(IndexError, s.fetch, l, 10) py.test.raises(IndexError, s.delete, l, 0, 1) py.test.raises(AssertionError, W_List, EmptyStrategy, 2) # Only size 0 possible. - + def test_init_Nil(): do_test_initialization(NilStrategy) def test_init_Generic(): do_test_initialization(GenericStrategy, is_safe=False) - + def test_init_WeakGeneric(): do_test_initialization(WeakGenericStrategy) - + def test_init_Integer(): do_test_initialization(IntegerStrategy, default_value=W_Integer(0)) - + def test_init_IntegerOrNil(): do_test_initialization(IntegerOrNilStrategy) - + # === Test Simple store def do_test_store(cls, stored_value=W_Object(), is_safe=True, is_varsize=False): @@ -256,13 +256,13 @@ def test_store_Generic(): do_test_store(GenericStrategy, is_safe=False) - + def test_store_WeakGeneric(): do_test_store(WeakGenericStrategy, stored_value=w_nil) - + def test_store_Integer(): do_test_store(IntegerStrategy, stored_value=W_Integer(100)) - + def test_store_IntegerOrNil(): do_test_store(IntegerOrNilStrategy, stored_value=W_Integer(100)) do_test_store(IntegerOrNilStrategy, stored_value=w_nil) @@ -289,17 +289,17 @@ def test_insert_Generic(): do_test_insert(GenericStrategy, [W_Object() for _ in range(6)]) - + def test_insert_WeakGeneric(): do_test_insert(WeakGenericStrategy, [W_Object() for _ in range(6)]) - + def test_insert_Integer(): do_test_insert(IntegerStrategy, [W_Integer(x) for x in range(6)]) - + def test_insert_IntegerOrNil(): do_test_insert(IntegerOrNilStrategy, [w_nil]+[W_Integer(x) for x in range(4)]+[w_nil]) do_test_insert(IntegerOrNilStrategy, [w_nil]*6) - + # === Test Delete def do_test_delete(cls, values, indexing_unsafe=False): @@ -319,13 +319,13 @@ def test_delete_Generic(): do_test_delete(GenericStrategy, [W_Object() for _ in range(6)], indexing_unsafe=True) - + def test_delete_WeakGeneric(): do_test_delete(WeakGenericStrategy, [W_Object() for _ in range(6)]) - + def test_delete_Integer(): do_test_delete(IntegerStrategy, [W_Integer(x) for x in range(6)]) - + def test_delete_IntegerOrNil(): do_test_delete(IntegerOrNilStrategy, [w_nil]+[W_Integer(x) for x in range(4)]+[w_nil]) do_test_delete(IntegerOrNilStrategy, [w_nil]*6) @@ -342,7 +342,7 @@ obj = W_Object() i = W_Integer(0) nil = w_nil - + assert_handles(EmptyStrategy, [], [nil, obj, i]) assert_handles(NilStrategy, [nil], [obj, i]) assert_handles(GenericStrategy, [nil, obj, i], []) @@ -392,7 +392,7 @@ o = W_Object() l = do_test_insert(NilStrategy, [w_nil, w_nil, o, o, w_nil, w_nil]) assert isinstance(l.strategy, GenericStrategy) - + def test_transition_to_nonSingleton(): l = W_List(NilStrategy, 5) factory.switch_strategy(l, NonSingletonStrategy) @@ -467,12 +467,12 @@ v3 = [W_Object() for _ in range(l.size()) ] assert v2 != v assert v3 != v - + l.store_all(v2) assert l.fetch_all() == v2+v[4:] l.store_all(v3) assert l.fetch_all() == v3 - + py.test.raises(IndexError, l.store_all, [W_Object() for _ in range(8) ]) # === Test Weak Strategy @@ -488,7 +488,7 @@ assert False, "The default convert_storage_from() should not be called!" def convert_storage_from_special(self, w_self, other): s.copied += 1 - + monkeypatch.setattr(AbstractStrategy, "_convert_storage_from_NilStrategy", convert_storage_from_special) monkeypatch.setattr(AbstractStrategy, "_convert_storage_from", convert_storage_from_default) try: @@ -507,7 +507,8 @@ assert factory.strategy_type_for([]) == EmptyStrategy monkeypatch.setattr(GenericStrategy, '_check_can_handle', lambda self, o: False) try: - py.test.raises(Exception, factory.strategy_type_for, [W_Object(), W_Object()]) + with py.test.raises(ValueError): + factory.strategy_type_for([W_Object(), W_Object()]) finally: monkeypatch.undo() @@ -549,4 +550,3 @@ 'Created (EmptyStrategy) size 0 objects 1', 'Created (IntegerStrategy) size 3 objects 1', 'Switched (IntegerStrategy -> IntegerOrNilStrategy) size 3 objects 1 elements: W_Object'] - \ No newline at end of file From noreply at buildbot.pypy.org Wed Oct 14 01:26:48 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 14 Oct 2015 01:26:48 +0200 (CEST) Subject: [pypy-commit] pypy raises(Exception)-must-die: make test and exception more precise in extfunc Message-ID: <20151013232648.9C2E91C13BE@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: raises(Exception)-must-die Changeset: r80184:d968eea777cb Date: 2015-10-14 00:07 +0100 http://bitbucket.org/pypy/pypy/changeset/d968eea777cb/ Log: make test and exception more precise in extfunc diff --git a/rpython/rtyper/extfunc.py b/rpython/rtyper/extfunc.py --- a/rpython/rtyper/extfunc.py +++ b/rpython/rtyper/extfunc.py @@ -1,8 +1,8 @@ from rpython.rtyper import extregistry from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.lltypesystem.lltype import typeOf, FuncType, functionptr -from rpython.annotator import model as annmodel -from rpython.annotator.signature import annotation +from rpython.annotator.model import unionof +from rpython.annotator.signature import annotation, SignatureError import py, sys @@ -130,7 +130,7 @@ "Argument number mismatch" for i, expected in enumerate(signature_args): - arg = annmodel.unionof(args_s[i], expected) + arg = unionof(args_s[i], expected) if not expected.contains(arg): name = getattr(self, 'name', None) if not name: @@ -138,7 +138,7 @@ name = self.instance.__name__ except AttributeError: name = '?' - raise Exception("In call to external function %r:\n" + raise SignatureError("In call to external function %r:\n" "arg %d must be %s,\n" " got %s" % ( name, i+1, expected, args_s[i])) diff --git a/rpython/rtyper/test/test_extfunc.py b/rpython/rtyper/test/test_extfunc.py --- a/rpython/rtyper/test/test_extfunc.py +++ b/rpython/rtyper/test/test_extfunc.py @@ -2,9 +2,10 @@ from rpython.rtyper.extfunc import ExtFuncEntry, register_external,\ is_external, lazy_register -from rpython.annotator import model as annmodel +from rpython.annotator.model import SomeInteger, SomeString, AnnotatorError from rpython.annotator.annrpython import RPythonAnnotator from rpython.annotator.policy import AnnotatorPolicy +from rpython.annotator.signature import SignatureError from rpython.rtyper.test.test_llinterp import interpret class TestExtFuncEntry: @@ -21,8 +22,8 @@ class BTestFuncEntry(ExtFuncEntry): _about_ = b name = 'b' - signature_args = [annmodel.SomeInteger()] - signature_result = annmodel.SomeInteger() + signature_args = [SomeInteger()] + signature_result = SomeInteger() def f(): return b(2) @@ -30,7 +31,7 @@ policy = AnnotatorPolicy() a = RPythonAnnotator(policy=policy) s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeInteger) + assert isinstance(s, SomeInteger) res = interpret(f, []) assert res == 42 @@ -45,8 +46,8 @@ class CTestFuncEntry(ExtFuncEntry): _about_ = c name = 'ccc' - signature_args = [annmodel.SomeInteger()] * 2 - signature_result = annmodel.SomeInteger() + signature_args = [SomeInteger()] * 2 + signature_result = SomeInteger() def lltypeimpl(y, x): return y + x @@ -72,7 +73,7 @@ policy = AnnotatorPolicy() a = RPythonAnnotator(policy=policy) s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeInteger) + assert isinstance(s, SomeInteger) def test_register_external_tuple_args(self): """ @@ -96,7 +97,7 @@ s = a.build_types(f, []) # Not a very good assertion, but at least it means _something_ happened. - assert isinstance(s, annmodel.SomeInteger) + assert isinstance(s, SomeInteger) def test_register_external_return_goes_back(self): """ @@ -118,7 +119,7 @@ policy = AnnotatorPolicy() a = RPythonAnnotator(policy=policy) s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeInteger) + assert isinstance(s, SomeInteger) def test_register_external_specialcase(self): """ @@ -135,10 +136,10 @@ policy = AnnotatorPolicy() a = RPythonAnnotator(policy=policy) s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeString) + assert isinstance(s, SomeString) def test_str0(self): - str0 = annmodel.SomeString(no_nul=True) + str0 = SomeString(no_nul=True) def os_open(s): pass register_external(os_open, [str0], None) @@ -152,27 +153,32 @@ a.translator.config.translation.check_str_without_nul=True def g(s): return os_open(s) - with py.test.raises(Exception): + with py.test.raises(SignatureError): a.build_types(g, [str]) a.build_types(g, [str0]) # Does not raise - def test_list_of_str0(self): - str0 = annmodel.SomeString(no_nul=True) + def test_list_of_str0_unchecked(self): + str0 = SomeString(no_nul=True) + def os_execve(l): pass + register_external(os_execve, [[str0]], None) + def f(l): return os_execve(l) + policy = AnnotatorPolicy() a = RPythonAnnotator(policy=policy) + assert a.translator.config.translation.check_str_without_nul == False a.build_types(f, [[str]]) # Does not raise - assert a.translator.config.translation.check_str_without_nul == False # Now enable the str0 check, and try again with a similar function a.translator.config.translation.check_str_without_nul=True + def g(l): return os_execve(l) - with py.test.raises(Exception): + + with py.test.raises(AnnotatorError): + # fails with TooLateForChange a.build_types(g, [[str]]) a.build_types(g, [[str0]]) # Does not raise - - From noreply at buildbot.pypy.org Wed Oct 14 01:30:49 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 14 Oct 2015 01:30:49 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged raises(Exception)-must-die into default Message-ID: <20151013233049.487F31C13BE@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r80185:d3bde7757257 Date: 2015-10-14 00:31 +0100 http://bitbucket.org/pypy/pypy/changeset/d3bde7757257/ Log: Merged raises(Exception)-must-die into default diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -8,12 +8,12 @@ from contextlib import contextmanager from rpython.flowspace.model import Constant -from rpython.annotator.model import (SomeOrderedDict, - SomeString, SomeChar, SomeFloat, unionof, SomeInstance, SomeDict, - SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, +from rpython.annotator.model import ( + SomeOrderedDict, SomeString, SomeChar, SomeFloat, unionof, SomeInstance, + SomeDict, SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, s_None, s_ImpossibleValue, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, - SomeWeakRef, SomeByteArray, SomeConstantType, SomeProperty) + SomeWeakRef, SomeByteArray, SomeConstantType, SomeProperty, AnnotatorError) from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef @@ -225,7 +225,8 @@ x = int(x) result = SomeInteger(nonneg = x>=0) else: - raise Exception("seeing a prebuilt long (value %s)" % hex(x)) + # XXX: better error reporting? + raise ValueError("seeing a prebuilt long (value %s)" % hex(x)) elif issubclass(tp, str): # py.lib uses annotated str subclasses no_nul = not '\x00' in x if len(x) == 1: diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdef.py --- a/rpython/annotator/classdef.py +++ b/rpython/annotator/classdef.py @@ -112,14 +112,10 @@ for desc in s_newvalue.descriptions: if desc.selfclassdef is None: if homedef.classdesc.settled: - raise Exception("demoting method %s " - "to settled class %s not " - "allowed" % - (self.name, homedef) - ) - #self.bookkeeper.warning("demoting method %s " - # "to base class %s" % - # (self.name, homedef)) + raise AnnotatorError( + "demoting method %s to settled class " + "%s not allowed" % (self.name, homedef) + ) break # check for attributes forbidden by slots or _attrs_ diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -801,8 +801,9 @@ s_init = basedesc.s_read_attribute('__init__') parent_has_init = isinstance(s_init, SomePBC) if has_init and not parent_has_init: - raise Exception("some subclasses among %r declare __init__()," - " but not the common parent class" % (descs,)) + raise AnnotatorError( + "some subclasses among %r declare __init__()," + " but not the common parent class" % (descs,)) # make a PBC of MethodDescs, one for the __init__ of each class initdescs = [] for desc, classdef in zip(descs, classdefs): diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4,10 +4,12 @@ from rpython.conftest import option from rpython.annotator import model as annmodel +from rpython.annotator.model import AnnotatorError, UnionError from rpython.annotator.annrpython import RPythonAnnotator as _RPythonAnnotator +from rpython.annotator.classdef import NoSuchAttrError from rpython.translator.translator import graphof as tgraphof from rpython.annotator.policy import AnnotatorPolicy -from rpython.annotator.signature import Sig +from rpython.annotator.signature import Sig, SignatureError from rpython.annotator.listdef import ListDef, ListChangeUnallowed from rpython.annotator.dictdef import DictDef from rpython.flowspace.model import * @@ -213,7 +215,7 @@ def f(): return X().meth() a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) def test_methodcall1(self): a = self.RPythonAnnotator() @@ -360,7 +362,7 @@ def f(l): return g(*l) a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError): + with py.test.raises(AnnotatorError): a.build_types(f, [[int]]) def test_star_unpack_and_keywords(self): @@ -769,7 +771,8 @@ def f(): return x a = self.RPythonAnnotator(policy=AnnotatorPolicy()) - py.test.raises(Exception, a.build_types, f, []) + with py.test.raises(Exception): + a.build_types(f, []) def test_exception_deduction_with_raise1(self): a = self.RPythonAnnotator() @@ -959,14 +962,16 @@ def f(): return large_constant a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, []) + with py.test.raises(ValueError): + a.build_types(f, []) # if you want to get a r_uint, you have to be explicit about it def test_add_different_ints(self): def f(a, b): return a + b a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, [r_uint, int]) + with py.test.raises(UnionError): + a.build_types(f, [r_uint, int]) def test_merge_different_ints(self): def f(a, b): @@ -976,7 +981,8 @@ c = b return c a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, [r_uint, int]) + with py.test.raises(UnionError): + a.build_types(f, [r_uint, int]) def test_merge_ruint_zero(self): def f(a): @@ -2612,14 +2618,14 @@ def f(): return A() a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) # class B(object): pass x = B() def g(): return isinstance(x, A) - py.test.raises(annmodel.AnnotatorError, a.build_types, g, []) + py.test.raises(AnnotatorError, a.build_types, g, []) def test_import_from_mixin(self): class M(object): @@ -2694,7 +2700,8 @@ return a.x # should explode here a = self.RPythonAnnotator() - e = py.test.raises(Exception, a.build_types, f, [int]) + with py.test.raises(NoSuchAttrError) as excinfo: + a.build_types(f, [int]) # this should explode on reading the attribute 'a.x', but it can # sometimes explode on 'self.x = x', which does not make much sense. # But it looks hard to fix in general: we don't know yet during 'a.x' @@ -2928,7 +2935,8 @@ s = a.build_types(fun, [s_nonneg, s_nonneg]) assert isinstance(s, annmodel.SomeInteger) assert not s.nonneg - py.test.raises(Exception, a.build_types, fun, [int, int]) + with py.test.raises(SignatureError): + a.build_types(fun, [int, int]) def test_sig_simpler(self): def fun(x, y): @@ -2940,7 +2948,8 @@ s = a.build_types(fun, [s_nonneg, s_nonneg]) assert isinstance(s, annmodel.SomeInteger) assert not s.nonneg - py.test.raises(Exception, a.build_types, fun, [int, int]) + with py.test.raises(SignatureError): + a.build_types(fun, [int, int]) def test_sig_lambda(self): def fun(x, y): @@ -2954,7 +2963,8 @@ s = a.build_types(fun, [int, s_nonneg]) assert isinstance(s, annmodel.SomeInteger) assert not s.nonneg - py.test.raises(Exception, a.build_types, fun, [s_nonneg, int]) + with py.test.raises(SignatureError): + a.build_types(fun, [s_nonneg, int]) def test_sig_bug(self): def g(x, y=5): @@ -3004,8 +3014,8 @@ if works: a.build_types(fun, [int]) else: - from rpython.annotator.classdef import NoSuchAttrError - py.test.raises(NoSuchAttrError, a.build_types, fun, [int]) + with py.test.raises(NoSuchAttrError): + a.build_types(fun, [int]) def test_slots_enforce_attrs(self): class Superbase(object): @@ -3138,7 +3148,8 @@ return a.n() a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, fun, [bool]) + with py.test.raises(AnnotatorError): + a.build_types(fun, [bool]) def test_float_cmp(self): def fun(x, y): @@ -3227,6 +3238,7 @@ assert isinstance(s.items[2], annmodel.SomeInstance) assert s.items[2].flags == {} + @py.test.mark.xfail def test_no_access_directly_on_heap(self): from rpython.rlib.jit import hint @@ -3243,7 +3255,8 @@ i.x = x a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, []) + with py.test.raises(AnnotatorError): + a.build_types(f, []) class M: @@ -3267,7 +3280,7 @@ c.m.l.append(x) a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) def f(): x = A() @@ -3275,7 +3288,7 @@ c.m.d[None] = x a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) def f(): x = A() @@ -3283,7 +3296,7 @@ c.m.d[x] = None a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) def test_ctr_location(self): class A: @@ -3342,7 +3355,8 @@ if g(x, y): g(x, r_uint(y)) a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, [int, int]) + with py.test.raises(UnionError): + a.build_types(f, [int, int]) def test_compare_with_zero(self): def g(): @@ -3464,22 +3478,22 @@ return '%s' % unichr(x) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + py.test.raises(AnnotatorError, a.build_types, f, [int]) def f(x): return '%s' % (unichr(x) * 3) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + py.test.raises(AnnotatorError, a.build_types, f, [int]) def f(x): return '%s%s' % (1, unichr(x)) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + py.test.raises(AnnotatorError, a.build_types, f, [int]) def f(x): return '%s%s' % (1, unichr(x) * 15) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + py.test.raises(AnnotatorError, a.build_types, f, [int]) def test_strformatting_tuple(self): @@ -3517,7 +3531,7 @@ return [1, 2, 3][s:e] a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") + py.test.raises(AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) def f(x): @@ -3530,20 +3544,20 @@ return "xyz".find("x", s, e) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") + py.test.raises(AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) def f(s, e): return "xyz".rfind("x", s, e) - py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") + py.test.raises(AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) def f(s, e): return "xyz".count("x", s, e) - py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") + py.test.raises(AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) @@ -3717,7 +3731,8 @@ raise Exception(lle) # ^^^ instead, must cast back from a base ptr to an instance a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + with py.test.raises(AssertionError): + a.build_types(f, []) def test_enumerate(self): def f(): @@ -4102,7 +4117,8 @@ e = cls() e.foo = "bar" a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, fn, []) + with py.test.raises(NoSuchAttrError): + a.build_types(fn, []) def test_lower_char(self): def fn(c): @@ -4214,7 +4230,7 @@ return "bbb" a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) the_exc = exc.value @@ -4230,7 +4246,7 @@ return (1, 2) a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) assert "RPython cannot unify tuples of different length: 2 versus 1" in exc.value.msg @@ -4243,7 +4259,7 @@ return -1 a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) assert ("RPython cannot prove that these integers are of the " @@ -4260,7 +4276,7 @@ return B() a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) assert ("RPython cannot unify instances with no common base class" @@ -4276,7 +4292,7 @@ return d.itervalues() a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) assert ("RPython cannot unify incompatible iterator variants" in @@ -4288,7 +4304,7 @@ a = A() return getattr(a, y) a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError) as exc: + with py.test.raises(AnnotatorError) as exc: a.build_types(f, [str]) assert ("variable argument to getattr" in exc.value.msg) @@ -4296,7 +4312,7 @@ def f(x): return x() a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError) as exc: + with py.test.raises(AnnotatorError) as exc: a.build_types(f, [str]) assert ("Cannot prove that the object is callable" in exc.value.msg) @@ -4305,7 +4321,7 @@ def f(x): l.append(x) a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as excinfo: + with py.test.raises(UnionError) as excinfo: a.build_types(f, [int]) assert 'Happened at file' in excinfo.value.source assert 'Known variable annotations:' in excinfo.value.source @@ -4314,7 +4330,7 @@ def f(s, x): return s.format(x) a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError) as exc: + with py.test.raises(AnnotatorError) as exc: a.build_types(f, [str, str]) assert ("format() is not RPython" in exc.value.msg) @@ -4350,7 +4366,7 @@ def f(x): a, b = x a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, + py.test.raises(AnnotatorError, a.build_types, f, [annmodel.s_None]) def test_class___name__(self): @@ -4464,10 +4480,10 @@ o = O2(n) o.x = 20 a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f1, [int]) a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f2, [int]) def test_property_union_2(self): @@ -4496,7 +4512,7 @@ a = self.RPythonAnnotator() # Ideally, this should translate to something sensible, # but for now, AnnotatorError is better than silently mistranslating. - with py.test.raises(annmodel.AnnotatorError): + with py.test.raises(AnnotatorError): a.build_types(f, [int]) def test_property_union_3(self): @@ -4516,7 +4532,7 @@ obj = B() return obj.x a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError): + with py.test.raises(AnnotatorError): a.build_types(f, [int]) def test_dict_can_be_none_ordering_issue(self): diff --git a/rpython/annotator/test/test_annsimplifyrpython.py b/rpython/annotator/test/test_annsimplifyrpython.py --- a/rpython/annotator/test/test_annsimplifyrpython.py +++ b/rpython/annotator/test/test_annsimplifyrpython.py @@ -3,6 +3,7 @@ from rpython.annotator.test.test_annrpython import graphof from rpython.annotator.test.test_annrpython import TestAnnotateTestCase as parent +from rpython.annotator.model import AnnotatorError class TestAnnotateAndSimplifyTestCase(parent): @@ -132,5 +133,5 @@ cls = C return cls().foo a = self.RPythonAnnotator() - with py.test.raises(Exception): + with py.test.raises(AnnotatorError): a.build_types(f, [int]) diff --git a/rpython/rlib/rstrategies/rstrategies.py b/rpython/rlib/rstrategies/rstrategies.py --- a/rpython/rlib/rstrategies/rstrategies.py +++ b/rpython/rlib/rstrategies/rstrategies.py @@ -41,7 +41,7 @@ attrs['get_storage'] = get_storage attrs['set_storage'] = set_storage return type.__new__(self, name, bases, attrs) - + def strategy(generalize=None, singleton=True): """ Strategy classes must be decorated with this. @@ -71,19 +71,19 @@ class StrategyFactory(object): _immutable_fields_ = ["strategies[*]", "logger", "strategy_singleton_field"] factory_instance_counter = 0 - + def __init__(self, root_class, all_strategy_classes=None): if all_strategy_classes is None: all_strategy_classes = self._collect_subclasses(root_class) self.strategies = [] self.logger = logger.Logger() - + # This is to avoid confusion between multiple factories existing simultaneously (e.g. in tests) self.strategy_singleton_field = "__singleton_%i" % StrategyFactory.factory_instance_counter StrategyFactory.factory_instance_counter += 1 - + self._create_strategy_instances(root_class, all_strategy_classes) - + def _create_strategy_instances(self, root_class, all_strategy_classes): for strategy_class in all_strategy_classes: if strategy_class._is_strategy: @@ -91,11 +91,11 @@ self.strategies.append(strategy_class) self._patch_strategy_class(strategy_class, root_class) self._order_strategies() - + # ============================= # API methods # ============================= - + def switch_strategy(self, w_self, new_strategy_type, new_element=None): """ Switch the strategy of w_self to the new type. @@ -113,7 +113,7 @@ new_strategy.strategy_switched(w_self) self.log(w_self, new_strategy, old_strategy, new_element) return new_strategy - + def set_initial_strategy(self, w_self, strategy_type, size, elements=None): """ Initialize the strategy and storage fields of w_self. @@ -135,7 +135,7 @@ strategy.strategy_switched(w_self) self.log(w_self, strategy, None, element) return strategy - + @jit.unroll_safe def strategy_type_for(self, objects): """ @@ -153,8 +153,8 @@ for i, strategy_type in enumerate(self.strategies): if can_handle[i]: return strategy_type - raise Exception("Could not find strategy to handle: %s" % objects) - + raise ValueError("Could not find strategy to handle: %s" % objects) + def decorate_strategies(self, transitions): """ As an alternative to decorating all strategies with @strategy, @@ -165,11 +165,11 @@ "NOT_RPYTHON" for strategy_class, generalized in transitions.items(): strategy(generalized)(strategy_class) - + # ============================= # The following methods can be overwritten to customize certain aspects of the factory. # ============================= - + def instantiate_strategy(self, strategy_type, w_self=None, initial_size=0): """ Return a functional instance of strategy_type. @@ -177,7 +177,7 @@ The two additional parameters should be ignored for singleton-strategies. """ return strategy_type() - + def log(self, w_self, new_strategy, old_strategy=None, new_element=None): """ This can be overwritten into a more appropriate call to self.logger.log @@ -190,7 +190,7 @@ typename = "" cause = "Switched" if old_strategy else "Created" self.logger.log(new_strategy_str, size, cause, old_strategy_str, typename, element_typename) - + @specialize.call_location() def log_string_for_object(self, obj): """ @@ -198,8 +198,8 @@ Keep the specialize-annotation in order to handle different kinds of objects here. """ return obj.__class__.__name__ if obj else "" - - # These storage accessors are specialized because the storage field is + + # These storage accessors are specialized because the storage field is # populated by erased-objects which seem to be incompatible sometimes. @specialize.call_location() def get_storage(self, obj): @@ -207,16 +207,16 @@ @specialize.call_location() def set_storage(self, obj, val): return obj._set_storage(val) - + def get_strategy(self, obj): return obj._get_strategy() def set_strategy(self, obj, val): return obj._set_strategy(val) - + # ============================= # Internal methods # ============================= - + def _patch_strategy_class(self, strategy_class, root_class): "NOT_RPYTHON" # Patch root class: Add default handler for visitor @@ -225,12 +225,12 @@ funcname = "_convert_storage_from_" + strategy_class.__name__ _convert_storage_from_OTHER.func_name = funcname setattr(root_class, funcname, _convert_storage_from_OTHER) - + # Patch strategy class: Add polymorphic visitor function def _convert_storage_to(self, w_self, new_strategy): getattr(new_strategy, funcname)(w_self, self) strategy_class._convert_storage_to = _convert_storage_to - + def _collect_subclasses(self, cls): "NOT_RPYTHON" subclasses = [] @@ -238,7 +238,7 @@ subclasses.append(subcls) subclasses.extend(self._collect_subclasses(subcls)) return subclasses - + def _order_strategies(self): "NOT_RPYTHON" def get_generalization_depth(strategy, visited=None): @@ -256,11 +256,11 @@ else: return 0 self.strategies.sort(key=get_generalization_depth, reverse=True) - + @jit.elidable def strategy_singleton_instance(self, strategy_class): return getattr(strategy_class, self.strategy_singleton_field) - + def _freeze_(self): # Instance will be frozen at compile time, making accesses constant. # The constructor does meta stuff which is not possible after translation. @@ -271,65 +271,65 @@ == Required: strategy_factory(self) - Access to StorageFactory """ - + def strategy_switched(self, w_self): # Overwrite this method for a hook whenever the strategy # of w_self was switched to self. pass - + # Main Fixedsize API - + def store(self, w_self, index0, value): raise NotImplementedError("Abstract method") - + def fetch(self, w_self, index0): raise NotImplementedError("Abstract method") - + def size(self, w_self): raise NotImplementedError("Abstract method") - + # Fixedsize utility methods - + def slice(self, w_self, start, end): return [ self.fetch(w_self, i) for i in range(start, end)] - + def fetch_all(self, w_self): return self.slice(w_self, 0, self.size(w_self)) - + def store_all(self, w_self, elements): for i, e in enumerate(elements): self.store(w_self, i, e) - + # Main Varsize API - + def insert(self, w_self, index0, list_w): raise NotImplementedError("Abstract method") - + def delete(self, w_self, start, end): raise NotImplementedError("Abstract method") - + # Varsize utility methods - + def append(self, w_self, list_w): - self.insert(w_self, self.size(w_self), list_w) - + self.insert(w_self, self.size(w_self), list_w) + def pop(self, w_self, index0): e = self.fetch(w_self, index0) self.delete(w_self, index0, index0+1) return e # Internal methods - + def _initialize_storage(self, w_self, initial_size): raise NotImplementedError("Abstract method") - + def _check_can_handle(self, value): raise NotImplementedError("Abstract method") - + def _convert_storage_to(self, w_self, new_strategy): # This will be overwritten in _patch_strategy_class new_strategy._convert_storage_from(w_self, self) - + @jit.unroll_safe def _convert_storage_from(self, w_self, previous_strategy): # This is a very unefficient (but most generic) way to do this. @@ -338,16 +338,16 @@ self._initialize_storage(w_self, previous_strategy.size(w_self)) for i, field in enumerate(storage): self.store(w_self, i, field) - + def _generalize_for_value(self, w_self, value): strategy_type = self.generalized_strategy_for(value) new_instance = self.strategy_factory().switch_strategy(w_self, strategy_type, new_element=value) return new_instance - + def _cannot_handle_store(self, w_self, index0, value): new_instance = self._generalize_for_value(w_self, value) new_instance.store(w_self, index0, value) - + def _cannot_handle_insert(self, w_self, index0, list_w): # TODO - optimize. Prevent multiple generalizations and slicing done by callers. new_strategy = self._generalize_for_value(w_self, list_w[0]) @@ -358,7 +358,7 @@ class EmptyStrategy(AbstractStrategy): # == Required: # See AbstractStrategy - + def _initialize_storage(self, w_self, initial_size): assert initial_size == 0 self.set_storage(w_self, None) @@ -366,7 +366,7 @@ self.set_storage(w_self, None) def _check_can_handle(self, value): return False - + def fetch(self, w_self, index0): raise IndexError def store(self, w_self, index0, value): @@ -389,7 +389,7 @@ # See AbstractStrategy # check_index_*(...) - use mixin SafeIndexingMixin or UnsafeIndexingMixin # value(self) - the single value contained in this strategy. Should be constant. - + def _initialize_storage(self, w_self, initial_size): storage_obj = SingleValueStrategyStorage(initial_size) self.set_storage(w_self, storage_obj) @@ -397,7 +397,7 @@ self._initialize_storage(w_self, previous_strategy.size(w_self)) def _check_can_handle(self, value): return value is self.value() - + def fetch(self, w_self, index0): self.check_index_fetch(w_self, index0) return self.value() @@ -411,7 +411,7 @@ self.get_storage(w_self).size -= (end - start) def size(self, w_self): return self.get_storage(w_self).size - + @jit.unroll_safe def insert(self, w_self, index0, list_w): storage_obj = self.get_storage(w_self) @@ -429,18 +429,18 @@ # See AbstractStrategy # check_index_*(...) - use mixin SafeIndexingMixin or UnsafeIndexingMixin # default_value(self) - The value to be initially contained in this strategy - + def _initialize_storage(self, w_self, initial_size): default = self._unwrap(self.default_value()) self.set_storage(w_self, [default] * initial_size) - + @jit.unroll_safe def _convert_storage_from(self, w_self, previous_strategy): size = previous_strategy.size(w_self) new_storage = [ self._unwrap(previous_strategy.fetch(w_self, i)) for i in range(size) ] self.set_storage(w_self, new_storage) - + def store(self, w_self, index0, wrapped_value): self.check_index_store(w_self, index0) if self._check_can_handle(wrapped_value): @@ -448,21 +448,21 @@ self.get_storage(w_self)[index0] = unwrapped else: self._cannot_handle_store(w_self, index0, wrapped_value) - + def fetch(self, w_self, index0): self.check_index_fetch(w_self, index0) unwrapped = self.get_storage(w_self)[index0] return self._wrap(unwrapped) - + def _wrap(self, value): raise NotImplementedError("Abstract method") - + def _unwrap(self, value): raise NotImplementedError("Abstract method") - + def size(self, w_self): return len(self.get_storage(w_self)) - + @jit.unroll_safe def insert(self, w_self, start, list_w): # This is following Python's behaviour - insert automatically @@ -475,27 +475,27 @@ else: self._cannot_handle_insert(w_self, start + i, list_w[i:]) return - + def delete(self, w_self, start, end): self.check_index_range(w_self, start, end) assert start >= 0 and end >= 0 del self.get_storage(w_self)[start : end] - + class GenericStrategy(StrategyWithStorage): # == Required: # See StrategyWithStorage - + def _wrap(self, value): return value def _unwrap(self, value): return value def _check_can_handle(self, wrapped_value): return True - + class WeakGenericStrategy(StrategyWithStorage): # == Required: # See StrategyWithStorage - + def _wrap(self, value): return value() or self.default_value() def _unwrap(self, value): @@ -503,7 +503,7 @@ return weakref.ref(value) def _check_can_handle(self, wrapped_value): return True - + # ============== Mixins for index checking operations ============== class SafeIndexingMixin(object): @@ -535,37 +535,37 @@ # See StrategyWithStorage # wrap(self, value) - Return a boxed object for the primitive value # unwrap(self, value) - Return the unboxed primitive value of value - + def _unwrap(self, value): return self.unwrap(value) def _wrap(self, value): return self.wrap(value) - + class SingleTypeStrategy(SpecializedStrategy): # == Required Functions: # See SpecializedStrategy # contained_type - The wrapped type that can be stored in this strategy - + def _check_can_handle(self, value): return isinstance(value, self.contained_type) - + class TaggingStrategy(SingleTypeStrategy): """This strategy uses a special tag value to represent a single additional object.""" # == Required: # See SingleTypeStrategy # wrapped_tagged_value(self) - The tagged object # unwrapped_tagged_value(self) - The unwrapped tag value representing the tagged object - + def _check_can_handle(self, value): return value is self.wrapped_tagged_value() or \ (isinstance(value, self.contained_type) and \ self.unwrap(value) != self.unwrapped_tagged_value()) - + def _unwrap(self, value): if value is self.wrapped_tagged_value(): return self.unwrapped_tagged_value() return self.unwrap(value) - + def _wrap(self, value): if value == self.unwrapped_tagged_value(): return self.wrapped_tagged_value() diff --git a/rpython/rlib/rstrategies/test/test_rstrategies.py b/rpython/rlib/rstrategies/test/test_rstrategies.py --- a/rpython/rlib/rstrategies/test/test_rstrategies.py +++ b/rpython/rlib/rstrategies/test/test_rstrategies.py @@ -69,7 +69,7 @@ class Factory(rs.StrategyFactory): switching_log = [] - + def __init__(self, root_class): self.decorate_strategies({ EmptyStrategy: [NilStrategy, IntegerStrategy, IntegerOrNilStrategy, GenericStrategy], @@ -79,15 +79,15 @@ IntegerOrNilStrategy: [GenericStrategy], }) rs.StrategyFactory.__init__(self, root_class) - + def instantiate_strategy(self, strategy_type, w_self=None, size=0): return strategy_type(self, w_self, size) - - def set_strategy(self, w_list, strategy): + + def set_strategy(self, w_list, strategy): old_strategy = self.get_strategy(w_list) self.switching_log.append((old_strategy, strategy)) super(Factory, self).set_strategy(w_list, strategy) - + def clear_log(self): del self.switching_log[:] @@ -107,7 +107,7 @@ class WeakGenericStrategy(AbstractStrategy): import_from_mixin(rs.WeakGenericStrategy) def default_value(self): return w_nil - + class IntegerStrategy(AbstractStrategy): import_from_mixin(rs.SingleTypeStrategy) contained_type = W_Integer @@ -123,7 +123,7 @@ def default_value(self): return w_nil def wrapped_tagged_value(self): return w_nil def unwrapped_tagged_value(self): import sys; return sys.maxint - + @rs.strategy(generalize=[], singleton=False) class NonSingletonStrategy(GenericStrategy): def __init__(self, factory, w_list=None, size=0): @@ -214,22 +214,22 @@ py.test.raises(IndexError, s.fetch, l, 10) py.test.raises(IndexError, s.delete, l, 0, 1) py.test.raises(AssertionError, W_List, EmptyStrategy, 2) # Only size 0 possible. - + def test_init_Nil(): do_test_initialization(NilStrategy) def test_init_Generic(): do_test_initialization(GenericStrategy, is_safe=False) - + def test_init_WeakGeneric(): do_test_initialization(WeakGenericStrategy) - + def test_init_Integer(): do_test_initialization(IntegerStrategy, default_value=W_Integer(0)) - + def test_init_IntegerOrNil(): do_test_initialization(IntegerOrNilStrategy) - + # === Test Simple store def do_test_store(cls, stored_value=W_Object(), is_safe=True, is_varsize=False): @@ -256,13 +256,13 @@ def test_store_Generic(): do_test_store(GenericStrategy, is_safe=False) - + def test_store_WeakGeneric(): do_test_store(WeakGenericStrategy, stored_value=w_nil) - + def test_store_Integer(): do_test_store(IntegerStrategy, stored_value=W_Integer(100)) - + def test_store_IntegerOrNil(): do_test_store(IntegerOrNilStrategy, stored_value=W_Integer(100)) do_test_store(IntegerOrNilStrategy, stored_value=w_nil) @@ -289,17 +289,17 @@ def test_insert_Generic(): do_test_insert(GenericStrategy, [W_Object() for _ in range(6)]) - + def test_insert_WeakGeneric(): do_test_insert(WeakGenericStrategy, [W_Object() for _ in range(6)]) - + def test_insert_Integer(): do_test_insert(IntegerStrategy, [W_Integer(x) for x in range(6)]) - + def test_insert_IntegerOrNil(): do_test_insert(IntegerOrNilStrategy, [w_nil]+[W_Integer(x) for x in range(4)]+[w_nil]) do_test_insert(IntegerOrNilStrategy, [w_nil]*6) - + # === Test Delete def do_test_delete(cls, values, indexing_unsafe=False): @@ -319,13 +319,13 @@ def test_delete_Generic(): do_test_delete(GenericStrategy, [W_Object() for _ in range(6)], indexing_unsafe=True) - + def test_delete_WeakGeneric(): do_test_delete(WeakGenericStrategy, [W_Object() for _ in range(6)]) - + def test_delete_Integer(): do_test_delete(IntegerStrategy, [W_Integer(x) for x in range(6)]) - + def test_delete_IntegerOrNil(): do_test_delete(IntegerOrNilStrategy, [w_nil]+[W_Integer(x) for x in range(4)]+[w_nil]) do_test_delete(IntegerOrNilStrategy, [w_nil]*6) @@ -342,7 +342,7 @@ obj = W_Object() i = W_Integer(0) nil = w_nil - + assert_handles(EmptyStrategy, [], [nil, obj, i]) assert_handles(NilStrategy, [nil], [obj, i]) assert_handles(GenericStrategy, [nil, obj, i], []) @@ -392,7 +392,7 @@ o = W_Object() l = do_test_insert(NilStrategy, [w_nil, w_nil, o, o, w_nil, w_nil]) assert isinstance(l.strategy, GenericStrategy) - + def test_transition_to_nonSingleton(): l = W_List(NilStrategy, 5) factory.switch_strategy(l, NonSingletonStrategy) @@ -467,12 +467,12 @@ v3 = [W_Object() for _ in range(l.size()) ] assert v2 != v assert v3 != v - + l.store_all(v2) assert l.fetch_all() == v2+v[4:] l.store_all(v3) assert l.fetch_all() == v3 - + py.test.raises(IndexError, l.store_all, [W_Object() for _ in range(8) ]) # === Test Weak Strategy @@ -488,7 +488,7 @@ assert False, "The default convert_storage_from() should not be called!" def convert_storage_from_special(self, w_self, other): s.copied += 1 - + monkeypatch.setattr(AbstractStrategy, "_convert_storage_from_NilStrategy", convert_storage_from_special) monkeypatch.setattr(AbstractStrategy, "_convert_storage_from", convert_storage_from_default) try: @@ -507,7 +507,8 @@ assert factory.strategy_type_for([]) == EmptyStrategy monkeypatch.setattr(GenericStrategy, '_check_can_handle', lambda self, o: False) try: - py.test.raises(Exception, factory.strategy_type_for, [W_Object(), W_Object()]) + with py.test.raises(ValueError): + factory.strategy_type_for([W_Object(), W_Object()]) finally: monkeypatch.undo() @@ -549,4 +550,3 @@ 'Created (EmptyStrategy) size 0 objects 1', 'Created (IntegerStrategy) size 3 objects 1', 'Switched (IntegerStrategy -> IntegerOrNilStrategy) size 3 objects 1 elements: W_Object'] - \ No newline at end of file diff --git a/rpython/rlib/rweakref.py b/rpython/rlib/rweakref.py --- a/rpython/rlib/rweakref.py +++ b/rpython/rlib/rweakref.py @@ -2,10 +2,10 @@ Weakref support in RPython. Basic regular weakrefs without callbacks are supported. This file contains the following additions: a form of WeakKeyDictionary, and a limited version of WeakValueDictionary. -LLType only for now! """ import weakref +from rpython.annotator.model import UnionError ref = weakref.ref # basic regular weakrefs are supported in RPython @@ -191,9 +191,9 @@ class __extend__(pairtype(SomeWeakKeyDict, SomeWeakKeyDict)): def union((s_wkd1, s_wkd2)): if s_wkd1.keyclassdef is not s_wkd2.keyclassdef: - raise UnionError(w_wkd1, s_wkd2, "not the same key class!") + raise UnionError(s_wkd1, s_wkd2, "not the same key class!") if s_wkd1.valueclassdef is not s_wkd2.valueclassdef: - raise UnionError(w_wkd1, s_wkd2, "not the same value class!") + raise UnionError(s_wkd1, s_wkd2, "not the same value class!") return SomeWeakKeyDict(s_wkd1.keyclassdef, s_wkd1.valueclassdef) class Entry(extregistry.ExtRegistryEntry): diff --git a/rpython/rlib/test/test_rweakkeydict.py b/rpython/rlib/test/test_rweakkeydict.py --- a/rpython/rlib/test/test_rweakkeydict.py +++ b/rpython/rlib/test/test_rweakkeydict.py @@ -1,4 +1,5 @@ import py +from rpython.annotator.model import UnionError from rpython.rlib import rgc from rpython.rlib.rweakref import RWeakKeyDictionary from rpython.rtyper.test.test_llinterp import interpret @@ -120,25 +121,34 @@ f(1) interpret(f, [1]) + at py.test.mark.xfail( + reason="may fail with AssertionError, depending on annotation order") +def test_rpython_merge_RWeakKeyDictionary3(): def g(x): if x: d = RWeakKeyDictionary(KX, VX) else: d = RWeakKeyDictionary(KY, VX) d.set(KX(), VX()) - py.test.raises(Exception, interpret, g, [1]) + with py.test.raises(UnionError): + interpret(g, [1]) + + at py.test.mark.xfail( + reason="may fail with AssertionError, depending on annotation order") +def test_rpython_merge_RWeakKeyDictionary4(): def g(x): if x: d = RWeakKeyDictionary(KX, VX) else: d = RWeakKeyDictionary(KX, VY) d.set(KX(), VX()) - py.test.raises(Exception, interpret, g, [1]) + with py.test.raises(UnionError): + interpret(g, [1]) + at py.test.mark.xfail(reason="not implemented, messy") def test_rpython_free_values(): - import py; py.test.skip("XXX not implemented, messy") class VXDel: def __del__(self): state.freed.append(1) diff --git a/rpython/rlib/test/test_rweakvaldict.py b/rpython/rlib/test/test_rweakvaldict.py --- a/rpython/rlib/test/test_rweakvaldict.py +++ b/rpython/rlib/test/test_rweakvaldict.py @@ -1,4 +1,5 @@ import py +from rpython.annotator.model import UnionError from rpython.rlib import rgc from rpython.rlib.rweakref import RWeakValueDictionary from rpython.rtyper.test.test_llinterp import interpret @@ -143,7 +144,9 @@ else: d = RWeakValueDictionary(str, Y) d.set("x", X()) - py.test.raises(Exception, interpret, g, [1]) + + with py.test.raises(UnionError): + interpret(g, [1]) def test_rpython_RWeakValueDictionary_or_None(): diff --git a/rpython/rtyper/extfunc.py b/rpython/rtyper/extfunc.py --- a/rpython/rtyper/extfunc.py +++ b/rpython/rtyper/extfunc.py @@ -1,8 +1,8 @@ from rpython.rtyper import extregistry from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.lltypesystem.lltype import typeOf, FuncType, functionptr -from rpython.annotator import model as annmodel -from rpython.annotator.signature import annotation +from rpython.annotator.model import unionof +from rpython.annotator.signature import annotation, SignatureError import py, sys @@ -130,7 +130,7 @@ "Argument number mismatch" for i, expected in enumerate(signature_args): - arg = annmodel.unionof(args_s[i], expected) + arg = unionof(args_s[i], expected) if not expected.contains(arg): name = getattr(self, 'name', None) if not name: @@ -138,7 +138,7 @@ name = self.instance.__name__ except AttributeError: name = '?' - raise Exception("In call to external function %r:\n" + raise SignatureError("In call to external function %r:\n" "arg %d must be %s,\n" " got %s" % ( name, i+1, expected, args_s[i])) diff --git a/rpython/rtyper/test/test_extfunc.py b/rpython/rtyper/test/test_extfunc.py --- a/rpython/rtyper/test/test_extfunc.py +++ b/rpython/rtyper/test/test_extfunc.py @@ -2,9 +2,10 @@ from rpython.rtyper.extfunc import ExtFuncEntry, register_external,\ is_external, lazy_register -from rpython.annotator import model as annmodel +from rpython.annotator.model import SomeInteger, SomeString, AnnotatorError from rpython.annotator.annrpython import RPythonAnnotator from rpython.annotator.policy import AnnotatorPolicy +from rpython.annotator.signature import SignatureError from rpython.rtyper.test.test_llinterp import interpret class TestExtFuncEntry: @@ -21,8 +22,8 @@ class BTestFuncEntry(ExtFuncEntry): _about_ = b name = 'b' - signature_args = [annmodel.SomeInteger()] - signature_result = annmodel.SomeInteger() + signature_args = [SomeInteger()] + signature_result = SomeInteger() def f(): return b(2) @@ -30,7 +31,7 @@ policy = AnnotatorPolicy() a = RPythonAnnotator(policy=policy) s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeInteger) + assert isinstance(s, SomeInteger) res = interpret(f, []) assert res == 42 @@ -45,8 +46,8 @@ class CTestFuncEntry(ExtFuncEntry): _about_ = c name = 'ccc' - signature_args = [annmodel.SomeInteger()] * 2 - signature_result = annmodel.SomeInteger() + signature_args = [SomeInteger()] * 2 + signature_result = SomeInteger() def lltypeimpl(y, x): return y + x @@ -72,7 +73,7 @@ policy = AnnotatorPolicy() a = RPythonAnnotator(policy=policy) s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeInteger) + assert isinstance(s, SomeInteger) def test_register_external_tuple_args(self): """ @@ -96,7 +97,7 @@ s = a.build_types(f, []) # Not a very good assertion, but at least it means _something_ happened. - assert isinstance(s, annmodel.SomeInteger) + assert isinstance(s, SomeInteger) def test_register_external_return_goes_back(self): """ @@ -118,7 +119,7 @@ policy = AnnotatorPolicy() a = RPythonAnnotator(policy=policy) s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeInteger) + assert isinstance(s, SomeInteger) def test_register_external_specialcase(self): """ @@ -135,10 +136,10 @@ policy = AnnotatorPolicy() a = RPythonAnnotator(policy=policy) s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeString) + assert isinstance(s, SomeString) def test_str0(self): - str0 = annmodel.SomeString(no_nul=True) + str0 = SomeString(no_nul=True) def os_open(s): pass register_external(os_open, [str0], None) @@ -152,25 +153,32 @@ a.translator.config.translation.check_str_without_nul=True def g(s): return os_open(s) - py.test.raises(Exception, a.build_types, g, [str]) + with py.test.raises(SignatureError): + a.build_types(g, [str]) a.build_types(g, [str0]) # Does not raise - def test_list_of_str0(self): - str0 = annmodel.SomeString(no_nul=True) + def test_list_of_str0_unchecked(self): + str0 = SomeString(no_nul=True) + def os_execve(l): pass + register_external(os_execve, [[str0]], None) + def f(l): return os_execve(l) + policy = AnnotatorPolicy() a = RPythonAnnotator(policy=policy) + assert a.translator.config.translation.check_str_without_nul == False a.build_types(f, [[str]]) # Does not raise - assert a.translator.config.translation.check_str_without_nul == False # Now enable the str0 check, and try again with a similar function a.translator.config.translation.check_str_without_nul=True + def g(l): return os_execve(l) - py.test.raises(Exception, a.build_types, g, [[str]]) + + with py.test.raises(AnnotatorError): + # fails with TooLateForChange + a.build_types(g, [[str]]) a.build_types(g, [[str0]]) # Does not raise - - From noreply at buildbot.pypy.org Wed Oct 14 02:09:34 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 14 Oct 2015 02:09:34 +0200 (CEST) Subject: [pypy-commit] pypy no-class-specialize: hg merge default Message-ID: <20151014000934.5D1921C103D@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: no-class-specialize Changeset: r80186:7e0563a8fbc0 Date: 2015-10-14 00:54 +0100 http://bitbucket.org/pypy/pypy/changeset/7e0563a8fbc0/ Log: hg merge default diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -74,3 +74,9 @@ ffi.new_handle() returns handles that work more like CPython's: they remain valid as long as the target exists (unlike the previous version, where handles become invalid *before* the __del__ is called). + +.. branch: ufunc-casting + +allow automatic casting in ufuncs (and frompypyfunc) to cast the +arguments to the allowed function type declarations, fixes various +failures in linalg cffi functions diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -74,10 +74,10 @@ call_many_to_one_driver = jit.JitDriver( name='numpy_call_many_to_one', - greens=['shapelen', 'nin', 'func', 'res_dtype'], + greens=['shapelen', 'nin', 'func', 'in_dtypes', 'res_dtype'], reds='auto') -def call_many_to_one(space, shape, func, res_dtype, in_args, out): +def call_many_to_one(space, shape, func, in_dtypes, res_dtype, in_args, out): # out must hav been built. func needs no calc_type, is usually an # external ufunc nin = len(in_args) @@ -95,9 +95,9 @@ vals = [None] * nin while not out_iter.done(out_state): call_many_to_one_driver.jit_merge_point(shapelen=shapelen, func=func, - res_dtype=res_dtype, nin=nin) + in_dtypes=in_dtypes, res_dtype=res_dtype, nin=nin) for i in range(nin): - vals[i] = in_iters[i].getitem(in_states[i]) + vals[i] = in_dtypes[i].coerce(space, in_iters[i].getitem(in_states[i])) w_arglist = space.newlist(vals) w_out_val = space.call_args(func, Arguments.frompacked(space, w_arglist)) out_iter.setitem(out_state, res_dtype.coerce(space, w_out_val)) @@ -108,10 +108,10 @@ call_many_to_many_driver = jit.JitDriver( name='numpy_call_many_to_many', - greens=['shapelen', 'nin', 'nout', 'func', 'res_dtype'], + greens=['shapelen', 'nin', 'nout', 'func', 'in_dtypes', 'out_dtypes'], reds='auto') -def call_many_to_many(space, shape, func, res_dtype, in_args, out_args): +def call_many_to_many(space, shape, func, in_dtypes, out_dtypes, in_args, out_args): # out must hav been built. func needs no calc_type, is usually an # external ufunc nin = len(in_args) @@ -134,24 +134,29 @@ out_states[i] = out_state shapelen = len(shape) vals = [None] * nin - while not out_iters[0].done(out_states[0]): + test_iter, test_state = in_iters[-1], in_states[-1] + if nout > 0: + test_iter, test_state = out_iters[0], out_states[0] + while not test_iter.done(test_state): call_many_to_many_driver.jit_merge_point(shapelen=shapelen, func=func, - res_dtype=res_dtype, nin=nin, nout=nout) + in_dtypes=in_dtypes, out_dtypes=out_dtypes, + nin=nin, nout=nout) for i in range(nin): - vals[i] = in_iters[i].getitem(in_states[i]) + vals[i] = in_dtypes[i].coerce(space, in_iters[i].getitem(in_states[i])) w_arglist = space.newlist(vals) w_outvals = space.call_args(func, Arguments.frompacked(space, w_arglist)) # w_outvals should be a tuple, but func can return a single value as well if space.isinstance_w(w_outvals, space.w_tuple): batch = space.listview(w_outvals) for i in range(len(batch)): - out_iters[i].setitem(out_states[i], res_dtype.coerce(space, batch[i])) + out_iters[i].setitem(out_states[i], out_dtypes[i].coerce(space, batch[i])) out_states[i] = out_iters[i].next(out_states[i]) - else: - out_iters[0].setitem(out_states[0], res_dtype.coerce(space, w_outvals)) + elif nout > 0: + out_iters[0].setitem(out_states[0], out_dtypes[0].coerce(space, w_outvals)) out_states[0] = out_iters[0].next(out_states[0]) for i in range(nin): in_states[i] = in_iters[i].next(in_states[i]) + test_state = test_iter.next(test_state) return space.newtuple([convert_to_array(space, o) for o in out_args]) setslice_driver = jit.JitDriver(name='numpy_setslice', diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -159,8 +159,7 @@ af2 = ufunc(af) assert all(af2 == af * 2) ac = arange(10, dtype=complex) - skip('casting not implemented yet') - ac1 = ufunc(ac) + raises(TypeError, ufunc, ac) def test_frompyfunc_2d_sig(self): import sys @@ -199,6 +198,10 @@ ai2 = ufunc(aiV) assert (ai2 == aiV * 2).all() + ai = arange(0).reshape(0, 1, 1) + ao = ufunc(ai) + assert ao.shape == (0, 1, 1) + def test_frompyfunc_needs_nditer(self): import sys from numpy import frompyfunc, dtype, arange @@ -268,6 +271,54 @@ assert out0.shape == in0.shape assert (out0 == in0 * 2).all() + def test_frompyfunc_casting(self): + import sys + import numpy as np + if '__pypy__' not in sys.builtin_module_names: + skip('PyPy only frompyfunc extension') + + def times2_int(in0, out0): + assert in0.dtype == int + assert out0.dtype == int + # hack to assing to a 0-dim array + out0.real = in0 * 2 + + def times2_complex(in0, out0): + assert in0.dtype == complex + assert out0.dtype == complex + out0.real = in0.real * 2 + out0.imag = in0.imag + + def times2_complex0(in0): + assert in0.dtype == complex + return in0 * 2 + + def times2_int0(in0): + assert in0.dtype == int + return in0 * 2 + + times2stacked = np.frompyfunc([times2_int, times2_complex], 1, 1, + dtypes=[np.dtype(int), np.dtype(int), + np.dtype(complex), np.dtype(complex)], + stack_inputs=True, signature='()->()', + ) + times2 = np.frompyfunc([times2_int0, times2_complex0], 1, 1, + dtypes=[np.dtype(int), np.dtype(int), + np.dtype(complex), np.dtype(complex)], + stack_inputs=False, + ) + for d in [np.dtype(float), np.dtype('uint8'), np.dtype('complex64')]: + in0 = np.arange(4, dtype=d) + out0 = times2stacked(in0) + assert out0.shape == in0.shape + assert out0.dtype in (int, complex) + assert (out0 == in0 * 2).all() + + out0 = times2(in0) + assert out0.shape == in0.shape + assert out0.dtype in (int, complex) + assert (out0 == in0 * 2).all() + def test_ufunc_kwargs(self): from numpy import ufunc, frompyfunc, arange, dtype def adder(a, b): @@ -1393,7 +1444,7 @@ def test_add_doc(self): import sys if '__pypy__' not in sys.builtin_module_names: - skip('') + skip('cpython sets docstrings differently') try: from numpy import set_docstring except ImportError: diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -709,6 +709,32 @@ raise oefmt(space.w_TypeError, "ufunc '%s' not supported for the input types", self.name) +def _match_dtypes(space, indtypes, targetdtypes, i_target, casting): + allok = True + for i in range(len(indtypes)): + origin = indtypes[i] + target = targetdtypes[i + i_target] + if origin is None: + continue + if target is None: + continue + if not can_cast_type(space, origin, target, casting): + allok = False + break + return allok + +def _raise_err_msg(self, space, dtypes0, dtypes1): + dtypesstr = '' + for d in dtypes0: + if d is None: + dtypesstr += 'None,' + else: + dtypesstr += '%s%s%s,' % (d.byteorder, d.kind, d.elsize) + _dtypesstr = ','.join(['%s%s%s' % (d.byteorder, d.kind, d.elsize) \ + for d in dtypes1]) + raise oefmt(space.w_TypeError, + "input dtype [%s] did not match any known dtypes [%s] ", + dtypesstr,_dtypesstr) class W_UfuncGeneric(W_Ufunc): @@ -799,29 +825,36 @@ outargs0 = outargs[0] assert isinstance(inargs0, W_NDimArray) assert isinstance(outargs0, W_NDimArray) + nin = self.nin + assert nin >= 0 res_dtype = outargs0.get_dtype() new_shape = inargs0.get_shape() # XXX use _find_array_wrap and wrap outargs using __array_wrap__ + if self.stack_inputs: + loop.call_many_to_many(space, new_shape, func, + dtypes, [], inargs + outargs, []) + if len(outargs) < 2: + return outargs[0] + return space.newtuple(outargs) if len(outargs) < 2: return loop.call_many_to_one(space, new_shape, func, - res_dtype, inargs, outargs[0]) + dtypes[:nin], dtypes[-1], inargs, outargs[0]) return loop.call_many_to_many(space, new_shape, func, - res_dtype, inargs, outargs) + dtypes[:nin], dtypes[nin:], inargs, outargs) + w_casting = space.w_None + w_op_dtypes = space.w_None for tf in need_to_cast: if tf: - raise oefmt(space.w_NotImplementedError, "casting not supported yet") + w_casting = space.wrap('safe') + w_op_dtypes = space.newtuple([space.wrap(d) for d in dtypes]) + w_flags = space.w_None # NOT 'external_loop', we do coalescing by core_num_dims - w_op_flags = space.newtuple([space.wrap(r) for r in ['readonly'] * len(inargs)] + \ - [space.wrap(r) for r in ['readwrite'] * len(outargs)]) - w_op_dtypes = space.w_None - w_casting = space.w_None + w_ro = space.newtuple([space.wrap('readonly'), space.wrap('copy')]) + w_rw = space.newtuple([space.wrap('readwrite'), space.wrap('updateifcopy')]) + + w_op_flags = space.newtuple([w_ro] * len(inargs) + [w_rw] * len(outargs)) w_op_axes = space.w_None - #print '\nsignature', sig - #print [(d, getattr(self,d)) for d in dir(self) if 'core' in d or 'broad' in d] - #print [(d, locals()[d]) for d in locals() if 'core' in d or 'broad' in d] - #print 'shapes',[d.get_shape() for d in inargs + outargs] - #print 'steps',[d.implementation.strides for d in inargs + outargs] if isinstance(func, W_GenericUFuncCaller): # Use GeneralizeUfunc interface with signature # Unlike numpy, we will not broadcast dims before @@ -934,19 +967,32 @@ # linear_search_type_resolver in numpy ufunc_type_resolutions.c # type_tup can be '', a tuple of dtypes, or a string # of the form d,t -> D where the letters are dtype specs - nop = len(inargs) + len(outargs) + + # XXX why does the next line not pass translation? + # dtypes = [i.get_dtype() for i in inargs] dtypes = [] + for i in inargs: + if isinstance(i, W_NDimArray): + dtypes.append(i.get_dtype()) + else: + dtypes.append(None) + for i in outargs: + if isinstance(i, W_NDimArray): + dtypes.append(i.get_dtype()) + else: + dtypes.append(None) if isinstance(type_tup, str) and len(type_tup) > 0: try: if len(type_tup) == 1: - dtypes = [get_dtype_cache(space).dtypes_by_name[type_tup]] * self.nargs + s_dtypes = [get_dtype_cache(space).dtypes_by_name[type_tup]] * self.nargs elif len(type_tup) == self.nargs + 2: + s_dtypes = [] for i in range(self.nin): - dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[i]]) + s_dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[i]]) #skip the '->' in the signature for i in range(self.nout): j = i + self.nin + 2 - dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[j]]) + s_dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[j]]) else: raise oefmt(space.w_TypeError, "a type-string for %s " \ "requires 1 typecode or %d typecode(s) before and %d" \ @@ -955,42 +1001,29 @@ except KeyError: raise oefmt(space.w_ValueError, "unknown typecode in" \ " call to %s with type-string '%s'", self.name, type_tup) - else: - # XXX why does the next line not pass translation? - # dtypes = [i.get_dtype() for i in inargs] - for i in inargs: - if isinstance(i, W_NDimArray): - dtypes.append(i.get_dtype()) - else: - dtypes.append(None) - for i in outargs: - if isinstance(i, W_NDimArray): - dtypes.append(i.get_dtype()) - else: - dtypes.append(None) + # Make sure args can be cast to dtypes + if not _match_dtypes(space, dtypes, s_dtypes, 0, "safe"): + _raise_err_msg(self, space, dtypes, s_dtypes) + dtypes = s_dtypes #Find the first matchup of dtypes with _dtypes for i in range(0, len(_dtypes), self.nargs): - allok = True - for j in range(self.nargs): - if dtypes[j] is not None and dtypes[j] != _dtypes[i+j]: - allok = False + allok = _match_dtypes(space, dtypes, _dtypes, i, "no") if allok: break else: - if len(self.funcs) > 1: - - dtypesstr = '' - for d in dtypes: - if d is None: - dtypesstr += 'None,' - else: - dtypesstr += '%s%s%s,' % (d.byteorder, d.kind, d.elsize) - _dtypesstr = ','.join(['%s%s%s' % (d.byteorder, d.kind, d.elsize) \ - for d in _dtypes]) - raise oefmt(space.w_TypeError, - "input dtype [%s] did not match any known dtypes [%s] ", - dtypesstr,_dtypesstr) - i = 0 + # No exact matches, can we cast? + for i in range(0, len(_dtypes), self.nargs): + allok = _match_dtypes(space, dtypes, _dtypes, i, "safe") + if allok: + end = i + self.nargs + assert i >= 0 + assert end >=0 + dtypes = _dtypes[i:end] + break + else: + if len(self.funcs) > 1: + _raise_err_msg(self, space, dtypes, _dtypes) + i = 0 # Fill in empty dtypes for j in range(self.nargs): if dtypes[j] is None: @@ -1086,7 +1119,7 @@ for j in range(offset, len(iter_shape)): x = iter_shape[j + offset] y = dims_to_broadcast[j] - if (x > y and x % y) or y %x: + if y != 0 and x != 0 and ((x > y and x % y) or y %x): raise oefmt(space.w_ValueError, "%s: %s operand %d has a " "mismatch in its broadcast dimension %d " "(size %d is different from %d)", @@ -1123,7 +1156,7 @@ # the current op (signalling it can handle ndarray's). # TODO parse and handle subok - # TODO handle flags, op_flags + # TODO handle more flags, op_flags #print 'iter_shape',iter_shape,'arg_shapes',arg_shapes,'matched_dims',matched_dims return iter_shape, arg_shapes, matched_dims diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -8,12 +8,12 @@ from contextlib import contextmanager from rpython.flowspace.model import Constant -from rpython.annotator.model import (SomeOrderedDict, - SomeString, SomeChar, SomeFloat, unionof, SomeInstance, SomeDict, - SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, +from rpython.annotator.model import ( + SomeOrderedDict, SomeString, SomeChar, SomeFloat, unionof, SomeInstance, + SomeDict, SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, s_None, s_ImpossibleValue, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, - SomeWeakRef, SomeByteArray, SomeConstantType, SomeProperty) + SomeWeakRef, SomeByteArray, SomeConstantType, SomeProperty, AnnotatorError) from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef @@ -223,7 +223,8 @@ x = int(x) result = SomeInteger(nonneg = x>=0) else: - raise Exception("seeing a prebuilt long (value %s)" % hex(x)) + # XXX: better error reporting? + raise ValueError("seeing a prebuilt long (value %s)" % hex(x)) elif issubclass(tp, str): # py.lib uses annotated str subclasses no_nul = not '\x00' in x if len(x) == 1: diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdef.py --- a/rpython/annotator/classdef.py +++ b/rpython/annotator/classdef.py @@ -112,14 +112,10 @@ for desc in s_newvalue.descriptions: if desc.selfclassdef is None: if homedef.classdesc.settled: - raise Exception("demoting method %s " - "to settled class %s not " - "allowed" % - (self.name, homedef) - ) - #self.bookkeeper.warning("demoting method %s " - # "to base class %s" % - # (self.name, homedef)) + raise AnnotatorError( + "demoting method %s to settled class " + "%s not allowed" % (self.name, homedef) + ) break # check for attributes forbidden by slots or _attrs_ diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -775,8 +775,9 @@ s_init = basedesc.s_read_attribute('__init__') parent_has_init = isinstance(s_init, SomePBC) if has_init and not parent_has_init: - raise Exception("some subclasses among %r declare __init__()," - " but not the common parent class" % (descs,)) + raise AnnotatorError( + "some subclasses among %r declare __init__()," + " but not the common parent class" % (descs,)) # make a PBC of MethodDescs, one for the __init__ of each class initdescs = [] for desc, classdef in zip(descs, classdefs): diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4,10 +4,12 @@ from rpython.conftest import option from rpython.annotator import model as annmodel +from rpython.annotator.model import AnnotatorError, UnionError from rpython.annotator.annrpython import RPythonAnnotator as _RPythonAnnotator +from rpython.annotator.classdef import NoSuchAttrError from rpython.translator.translator import graphof as tgraphof from rpython.annotator.policy import AnnotatorPolicy -from rpython.annotator.signature import Sig +from rpython.annotator.signature import Sig, SignatureError from rpython.annotator.listdef import ListDef, ListChangeUnallowed from rpython.annotator.dictdef import DictDef from rpython.flowspace.model import * @@ -213,7 +215,7 @@ def f(): return X().meth() a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) def test_methodcall1(self): a = self.RPythonAnnotator() @@ -360,7 +362,7 @@ def f(l): return g(*l) a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError): + with py.test.raises(AnnotatorError): a.build_types(f, [[int]]) def test_star_unpack_and_keywords(self): @@ -946,14 +948,16 @@ def f(): return large_constant a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, []) + with py.test.raises(ValueError): + a.build_types(f, []) # if you want to get a r_uint, you have to be explicit about it def test_add_different_ints(self): def f(a, b): return a + b a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, [r_uint, int]) + with py.test.raises(UnionError): + a.build_types(f, [r_uint, int]) def test_merge_different_ints(self): def f(a, b): @@ -963,7 +967,8 @@ c = b return c a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, [r_uint, int]) + with py.test.raises(UnionError): + a.build_types(f, [r_uint, int]) def test_merge_ruint_zero(self): def f(a): @@ -2599,14 +2604,14 @@ def f(): return A() a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) # class B(object): pass x = B() def g(): return isinstance(x, A) - py.test.raises(annmodel.AnnotatorError, a.build_types, g, []) + py.test.raises(AnnotatorError, a.build_types, g, []) def test_import_from_mixin(self): class M(object): @@ -2681,7 +2686,8 @@ return a.x # should explode here a = self.RPythonAnnotator() - e = py.test.raises(Exception, a.build_types, f, [int]) + with py.test.raises(NoSuchAttrError) as excinfo: + a.build_types(f, [int]) # this should explode on reading the attribute 'a.x', but it can # sometimes explode on 'self.x = x', which does not make much sense. # But it looks hard to fix in general: we don't know yet during 'a.x' @@ -2915,7 +2921,8 @@ s = a.build_types(fun, [s_nonneg, s_nonneg]) assert isinstance(s, annmodel.SomeInteger) assert not s.nonneg - py.test.raises(Exception, a.build_types, fun, [int, int]) + with py.test.raises(SignatureError): + a.build_types(fun, [int, int]) def test_sig_simpler(self): def fun(x, y): @@ -2927,7 +2934,8 @@ s = a.build_types(fun, [s_nonneg, s_nonneg]) assert isinstance(s, annmodel.SomeInteger) assert not s.nonneg - py.test.raises(Exception, a.build_types, fun, [int, int]) + with py.test.raises(SignatureError): + a.build_types(fun, [int, int]) def test_sig_lambda(self): def fun(x, y): @@ -2941,7 +2949,8 @@ s = a.build_types(fun, [int, s_nonneg]) assert isinstance(s, annmodel.SomeInteger) assert not s.nonneg - py.test.raises(Exception, a.build_types, fun, [s_nonneg, int]) + with py.test.raises(SignatureError): + a.build_types(fun, [s_nonneg, int]) def test_sig_bug(self): def g(x, y=5): @@ -2991,8 +3000,8 @@ if works: a.build_types(fun, [int]) else: - from rpython.annotator.classdef import NoSuchAttrError - py.test.raises(NoSuchAttrError, a.build_types, fun, [int]) + with py.test.raises(NoSuchAttrError): + a.build_types(fun, [int]) def test_slots_enforce_attrs(self): class Superbase(object): @@ -3125,7 +3134,8 @@ return a.n() a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, fun, [bool]) + with py.test.raises(AnnotatorError): + a.build_types(fun, [bool]) def test_float_cmp(self): def fun(x, y): @@ -3214,6 +3224,7 @@ assert isinstance(s.items[2], annmodel.SomeInstance) assert s.items[2].flags == {} + @py.test.mark.xfail def test_no_access_directly_on_heap(self): from rpython.rlib.jit import hint @@ -3230,7 +3241,8 @@ i.x = x a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, []) + with py.test.raises(AnnotatorError): + a.build_types(f, []) class M: @@ -3254,7 +3266,7 @@ c.m.l.append(x) a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) def f(): x = A() @@ -3262,7 +3274,7 @@ c.m.d[None] = x a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) def f(): x = A() @@ -3270,7 +3282,7 @@ c.m.d[x] = None a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) def test_ctr_location(self): class A: @@ -3329,7 +3341,8 @@ if g(x, y): g(x, r_uint(y)) a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, [int, int]) + with py.test.raises(UnionError): + a.build_types(f, [int, int]) def test_compare_with_zero(self): def g(): @@ -3451,22 +3464,22 @@ return '%s' % unichr(x) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + py.test.raises(AnnotatorError, a.build_types, f, [int]) def f(x): return '%s' % (unichr(x) * 3) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + py.test.raises(AnnotatorError, a.build_types, f, [int]) def f(x): return '%s%s' % (1, unichr(x)) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + py.test.raises(AnnotatorError, a.build_types, f, [int]) def f(x): return '%s%s' % (1, unichr(x) * 15) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + py.test.raises(AnnotatorError, a.build_types, f, [int]) def test_strformatting_tuple(self): @@ -3504,7 +3517,7 @@ return [1, 2, 3][s:e] a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") + py.test.raises(AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) def f(x): @@ -3517,20 +3530,20 @@ return "xyz".find("x", s, e) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") + py.test.raises(AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) def f(s, e): return "xyz".rfind("x", s, e) - py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") + py.test.raises(AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) def f(s, e): return "xyz".count("x", s, e) - py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") + py.test.raises(AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) @@ -3704,7 +3717,8 @@ raise Exception(lle) # ^^^ instead, must cast back from a base ptr to an instance a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + with py.test.raises(AssertionError): + a.build_types(f, []) def test_enumerate(self): def f(): @@ -4089,7 +4103,8 @@ e = cls() e.foo = "bar" a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, fn, []) + with py.test.raises(NoSuchAttrError): + a.build_types(fn, []) def test_lower_char(self): def fn(c): @@ -4201,7 +4216,7 @@ return "bbb" a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) the_exc = exc.value @@ -4217,7 +4232,7 @@ return (1, 2) a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) assert "RPython cannot unify tuples of different length: 2 versus 1" in exc.value.msg @@ -4230,7 +4245,7 @@ return -1 a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) assert ("RPython cannot prove that these integers are of the " @@ -4247,7 +4262,7 @@ return B() a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) assert ("RPython cannot unify instances with no common base class" @@ -4263,7 +4278,7 @@ return d.itervalues() a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) assert ("RPython cannot unify incompatible iterator variants" in @@ -4275,7 +4290,7 @@ a = A() return getattr(a, y) a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError) as exc: + with py.test.raises(AnnotatorError) as exc: a.build_types(f, [str]) assert ("variable argument to getattr" in exc.value.msg) @@ -4283,7 +4298,7 @@ def f(x): return x() a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError) as exc: + with py.test.raises(AnnotatorError) as exc: a.build_types(f, [str]) assert ("Cannot prove that the object is callable" in exc.value.msg) @@ -4292,7 +4307,7 @@ def f(x): l.append(x) a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as excinfo: + with py.test.raises(UnionError) as excinfo: a.build_types(f, [int]) assert 'Happened at file' in excinfo.value.source assert 'Known variable annotations:' in excinfo.value.source @@ -4301,7 +4316,7 @@ def f(s, x): return s.format(x) a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError) as exc: + with py.test.raises(AnnotatorError) as exc: a.build_types(f, [str, str]) assert ("format() is not RPython" in exc.value.msg) @@ -4337,7 +4352,7 @@ def f(x): a, b = x a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, + py.test.raises(AnnotatorError, a.build_types, f, [annmodel.s_None]) def test_class___name__(self): @@ -4451,10 +4466,10 @@ o = O2(n) o.x = 20 a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f1, [int]) a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f2, [int]) def test_property_union_2(self): @@ -4483,7 +4498,7 @@ a = self.RPythonAnnotator() # Ideally, this should translate to something sensible, # but for now, AnnotatorError is better than silently mistranslating. - with py.test.raises(annmodel.AnnotatorError): + with py.test.raises(AnnotatorError): a.build_types(f, [int]) def test_property_union_3(self): @@ -4503,7 +4518,7 @@ obj = B() return obj.x a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError): + with py.test.raises(AnnotatorError): a.build_types(f, [int]) def test_dict_can_be_none_ordering_issue(self): diff --git a/rpython/annotator/test/test_annsimplifyrpython.py b/rpython/annotator/test/test_annsimplifyrpython.py --- a/rpython/annotator/test/test_annsimplifyrpython.py +++ b/rpython/annotator/test/test_annsimplifyrpython.py @@ -3,6 +3,7 @@ from rpython.annotator.test.test_annrpython import graphof from rpython.annotator.test.test_annrpython import TestAnnotateTestCase as parent +from rpython.annotator.model import AnnotatorError class TestAnnotateAndSimplifyTestCase(parent): @@ -132,5 +133,5 @@ cls = C return cls().foo a = self.RPythonAnnotator() - with py.test.raises(Exception): + with py.test.raises(AnnotatorError): a.build_types(f, [int]) diff --git a/rpython/rlib/rstrategies/rstrategies.py b/rpython/rlib/rstrategies/rstrategies.py --- a/rpython/rlib/rstrategies/rstrategies.py +++ b/rpython/rlib/rstrategies/rstrategies.py @@ -41,7 +41,7 @@ attrs['get_storage'] = get_storage attrs['set_storage'] = set_storage return type.__new__(self, name, bases, attrs) - + def strategy(generalize=None, singleton=True): """ Strategy classes must be decorated with this. @@ -71,19 +71,19 @@ class StrategyFactory(object): _immutable_fields_ = ["strategies[*]", "logger", "strategy_singleton_field"] factory_instance_counter = 0 - + def __init__(self, root_class, all_strategy_classes=None): if all_strategy_classes is None: all_strategy_classes = self._collect_subclasses(root_class) self.strategies = [] self.logger = logger.Logger() - + # This is to avoid confusion between multiple factories existing simultaneously (e.g. in tests) self.strategy_singleton_field = "__singleton_%i" % StrategyFactory.factory_instance_counter StrategyFactory.factory_instance_counter += 1 - + self._create_strategy_instances(root_class, all_strategy_classes) - + def _create_strategy_instances(self, root_class, all_strategy_classes): for strategy_class in all_strategy_classes: if strategy_class._is_strategy: @@ -91,11 +91,11 @@ self.strategies.append(strategy_class) self._patch_strategy_class(strategy_class, root_class) self._order_strategies() - + # ============================= # API methods # ============================= - + def switch_strategy(self, w_self, new_strategy_type, new_element=None): """ Switch the strategy of w_self to the new type. @@ -113,7 +113,7 @@ new_strategy.strategy_switched(w_self) self.log(w_self, new_strategy, old_strategy, new_element) return new_strategy - + def set_initial_strategy(self, w_self, strategy_type, size, elements=None): """ Initialize the strategy and storage fields of w_self. @@ -135,7 +135,7 @@ strategy.strategy_switched(w_self) self.log(w_self, strategy, None, element) return strategy - + @jit.unroll_safe def strategy_type_for(self, objects): """ @@ -153,8 +153,8 @@ for i, strategy_type in enumerate(self.strategies): if can_handle[i]: return strategy_type - raise Exception("Could not find strategy to handle: %s" % objects) - + raise ValueError("Could not find strategy to handle: %s" % objects) + def decorate_strategies(self, transitions): """ As an alternative to decorating all strategies with @strategy, @@ -165,11 +165,11 @@ "NOT_RPYTHON" for strategy_class, generalized in transitions.items(): strategy(generalized)(strategy_class) - + # ============================= # The following methods can be overwritten to customize certain aspects of the factory. # ============================= - + def instantiate_strategy(self, strategy_type, w_self=None, initial_size=0): """ Return a functional instance of strategy_type. @@ -177,7 +177,7 @@ The two additional parameters should be ignored for singleton-strategies. """ return strategy_type() - + def log(self, w_self, new_strategy, old_strategy=None, new_element=None): """ This can be overwritten into a more appropriate call to self.logger.log @@ -190,7 +190,7 @@ typename = "" cause = "Switched" if old_strategy else "Created" self.logger.log(new_strategy_str, size, cause, old_strategy_str, typename, element_typename) - + @specialize.call_location() def log_string_for_object(self, obj): """ @@ -198,8 +198,8 @@ Keep the specialize-annotation in order to handle different kinds of objects here. """ return obj.__class__.__name__ if obj else "" - - # These storage accessors are specialized because the storage field is + + # These storage accessors are specialized because the storage field is # populated by erased-objects which seem to be incompatible sometimes. @specialize.call_location() def get_storage(self, obj): @@ -207,16 +207,16 @@ @specialize.call_location() def set_storage(self, obj, val): return obj._set_storage(val) - + def get_strategy(self, obj): return obj._get_strategy() def set_strategy(self, obj, val): return obj._set_strategy(val) - + # ============================= # Internal methods # ============================= - + def _patch_strategy_class(self, strategy_class, root_class): "NOT_RPYTHON" # Patch root class: Add default handler for visitor @@ -225,12 +225,12 @@ funcname = "_convert_storage_from_" + strategy_class.__name__ _convert_storage_from_OTHER.func_name = funcname setattr(root_class, funcname, _convert_storage_from_OTHER) - + # Patch strategy class: Add polymorphic visitor function def _convert_storage_to(self, w_self, new_strategy): getattr(new_strategy, funcname)(w_self, self) strategy_class._convert_storage_to = _convert_storage_to - + def _collect_subclasses(self, cls): "NOT_RPYTHON" subclasses = [] @@ -238,7 +238,7 @@ subclasses.append(subcls) subclasses.extend(self._collect_subclasses(subcls)) return subclasses - + def _order_strategies(self): "NOT_RPYTHON" def get_generalization_depth(strategy, visited=None): @@ -256,11 +256,11 @@ else: return 0 self.strategies.sort(key=get_generalization_depth, reverse=True) - + @jit.elidable def strategy_singleton_instance(self, strategy_class): return getattr(strategy_class, self.strategy_singleton_field) - + def _freeze_(self): # Instance will be frozen at compile time, making accesses constant. # The constructor does meta stuff which is not possible after translation. @@ -271,65 +271,65 @@ == Required: strategy_factory(self) - Access to StorageFactory """ - + def strategy_switched(self, w_self): # Overwrite this method for a hook whenever the strategy # of w_self was switched to self. pass - + # Main Fixedsize API - + def store(self, w_self, index0, value): raise NotImplementedError("Abstract method") - + def fetch(self, w_self, index0): raise NotImplementedError("Abstract method") - + def size(self, w_self): raise NotImplementedError("Abstract method") - + # Fixedsize utility methods - + def slice(self, w_self, start, end): return [ self.fetch(w_self, i) for i in range(start, end)] - + def fetch_all(self, w_self): return self.slice(w_self, 0, self.size(w_self)) - + def store_all(self, w_self, elements): for i, e in enumerate(elements): self.store(w_self, i, e) - + # Main Varsize API - + def insert(self, w_self, index0, list_w): raise NotImplementedError("Abstract method") - + def delete(self, w_self, start, end): raise NotImplementedError("Abstract method") - + # Varsize utility methods - + def append(self, w_self, list_w): - self.insert(w_self, self.size(w_self), list_w) - + self.insert(w_self, self.size(w_self), list_w) + def pop(self, w_self, index0): e = self.fetch(w_self, index0) self.delete(w_self, index0, index0+1) return e # Internal methods - + def _initialize_storage(self, w_self, initial_size): raise NotImplementedError("Abstract method") - + def _check_can_handle(self, value): raise NotImplementedError("Abstract method") - + def _convert_storage_to(self, w_self, new_strategy): # This will be overwritten in _patch_strategy_class new_strategy._convert_storage_from(w_self, self) - + @jit.unroll_safe def _convert_storage_from(self, w_self, previous_strategy): # This is a very unefficient (but most generic) way to do this. @@ -338,16 +338,16 @@ self._initialize_storage(w_self, previous_strategy.size(w_self)) for i, field in enumerate(storage): self.store(w_self, i, field) - + def _generalize_for_value(self, w_self, value): strategy_type = self.generalized_strategy_for(value) new_instance = self.strategy_factory().switch_strategy(w_self, strategy_type, new_element=value) return new_instance - + def _cannot_handle_store(self, w_self, index0, value): new_instance = self._generalize_for_value(w_self, value) new_instance.store(w_self, index0, value) - + def _cannot_handle_insert(self, w_self, index0, list_w): # TODO - optimize. Prevent multiple generalizations and slicing done by callers. new_strategy = self._generalize_for_value(w_self, list_w[0]) @@ -358,7 +358,7 @@ class EmptyStrategy(AbstractStrategy): # == Required: # See AbstractStrategy - + def _initialize_storage(self, w_self, initial_size): assert initial_size == 0 self.set_storage(w_self, None) @@ -366,7 +366,7 @@ self.set_storage(w_self, None) def _check_can_handle(self, value): return False - + def fetch(self, w_self, index0): raise IndexError def store(self, w_self, index0, value): @@ -389,7 +389,7 @@ # See AbstractStrategy # check_index_*(...) - use mixin SafeIndexingMixin or UnsafeIndexingMixin # value(self) - the single value contained in this strategy. Should be constant. - + def _initialize_storage(self, w_self, initial_size): storage_obj = SingleValueStrategyStorage(initial_size) self.set_storage(w_self, storage_obj) @@ -397,7 +397,7 @@ self._initialize_storage(w_self, previous_strategy.size(w_self)) def _check_can_handle(self, value): return value is self.value() - + def fetch(self, w_self, index0): self.check_index_fetch(w_self, index0) return self.value() @@ -411,7 +411,7 @@ self.get_storage(w_self).size -= (end - start) def size(self, w_self): return self.get_storage(w_self).size - + @jit.unroll_safe def insert(self, w_self, index0, list_w): storage_obj = self.get_storage(w_self) @@ -429,18 +429,18 @@ # See AbstractStrategy # check_index_*(...) - use mixin SafeIndexingMixin or UnsafeIndexingMixin # default_value(self) - The value to be initially contained in this strategy - + def _initialize_storage(self, w_self, initial_size): default = self._unwrap(self.default_value()) self.set_storage(w_self, [default] * initial_size) - + @jit.unroll_safe def _convert_storage_from(self, w_self, previous_strategy): size = previous_strategy.size(w_self) new_storage = [ self._unwrap(previous_strategy.fetch(w_self, i)) for i in range(size) ] self.set_storage(w_self, new_storage) - + def store(self, w_self, index0, wrapped_value): self.check_index_store(w_self, index0) if self._check_can_handle(wrapped_value): @@ -448,21 +448,21 @@ self.get_storage(w_self)[index0] = unwrapped else: self._cannot_handle_store(w_self, index0, wrapped_value) - + def fetch(self, w_self, index0): self.check_index_fetch(w_self, index0) unwrapped = self.get_storage(w_self)[index0] return self._wrap(unwrapped) - + def _wrap(self, value): raise NotImplementedError("Abstract method") - + def _unwrap(self, value): raise NotImplementedError("Abstract method") - + def size(self, w_self): return len(self.get_storage(w_self)) - + @jit.unroll_safe def insert(self, w_self, start, list_w): # This is following Python's behaviour - insert automatically @@ -475,27 +475,27 @@ else: self._cannot_handle_insert(w_self, start + i, list_w[i:]) return - + def delete(self, w_self, start, end): self.check_index_range(w_self, start, end) assert start >= 0 and end >= 0 del self.get_storage(w_self)[start : end] - + class GenericStrategy(StrategyWithStorage): # == Required: # See StrategyWithStorage - + def _wrap(self, value): return value def _unwrap(self, value): return value def _check_can_handle(self, wrapped_value): return True - + class WeakGenericStrategy(StrategyWithStorage): # == Required: # See StrategyWithStorage - + def _wrap(self, value): return value() or self.default_value() def _unwrap(self, value): @@ -503,7 +503,7 @@ return weakref.ref(value) def _check_can_handle(self, wrapped_value): return True - + # ============== Mixins for index checking operations ============== class SafeIndexingMixin(object): @@ -535,37 +535,37 @@ # See StrategyWithStorage # wrap(self, value) - Return a boxed object for the primitive value # unwrap(self, value) - Return the unboxed primitive value of value - + def _unwrap(self, value): return self.unwrap(value) def _wrap(self, value): return self.wrap(value) - + class SingleTypeStrategy(SpecializedStrategy): # == Required Functions: # See SpecializedStrategy # contained_type - The wrapped type that can be stored in this strategy - + def _check_can_handle(self, value): return isinstance(value, self.contained_type) - + class TaggingStrategy(SingleTypeStrategy): """This strategy uses a special tag value to represent a single additional object.""" # == Required: # See SingleTypeStrategy # wrapped_tagged_value(self) - The tagged object # unwrapped_tagged_value(self) - The unwrapped tag value representing the tagged object - + def _check_can_handle(self, value): return value is self.wrapped_tagged_value() or \ (isinstance(value, self.contained_type) and \ self.unwrap(value) != self.unwrapped_tagged_value()) - + def _unwrap(self, value): if value is self.wrapped_tagged_value(): return self.unwrapped_tagged_value() return self.unwrap(value) - + def _wrap(self, value): if value == self.unwrapped_tagged_value(): return self.wrapped_tagged_value() diff --git a/rpython/rlib/rstrategies/test/test_rstrategies.py b/rpython/rlib/rstrategies/test/test_rstrategies.py --- a/rpython/rlib/rstrategies/test/test_rstrategies.py +++ b/rpython/rlib/rstrategies/test/test_rstrategies.py @@ -69,7 +69,7 @@ class Factory(rs.StrategyFactory): switching_log = [] - + def __init__(self, root_class): self.decorate_strategies({ EmptyStrategy: [NilStrategy, IntegerStrategy, IntegerOrNilStrategy, GenericStrategy], @@ -79,15 +79,15 @@ IntegerOrNilStrategy: [GenericStrategy], }) rs.StrategyFactory.__init__(self, root_class) - + def instantiate_strategy(self, strategy_type, w_self=None, size=0): return strategy_type(self, w_self, size) - - def set_strategy(self, w_list, strategy): + + def set_strategy(self, w_list, strategy): old_strategy = self.get_strategy(w_list) self.switching_log.append((old_strategy, strategy)) super(Factory, self).set_strategy(w_list, strategy) - + def clear_log(self): del self.switching_log[:] @@ -107,7 +107,7 @@ class WeakGenericStrategy(AbstractStrategy): import_from_mixin(rs.WeakGenericStrategy) def default_value(self): return w_nil - + class IntegerStrategy(AbstractStrategy): import_from_mixin(rs.SingleTypeStrategy) contained_type = W_Integer @@ -123,7 +123,7 @@ def default_value(self): return w_nil def wrapped_tagged_value(self): return w_nil def unwrapped_tagged_value(self): import sys; return sys.maxint - + @rs.strategy(generalize=[], singleton=False) class NonSingletonStrategy(GenericStrategy): def __init__(self, factory, w_list=None, size=0): @@ -214,22 +214,22 @@ py.test.raises(IndexError, s.fetch, l, 10) py.test.raises(IndexError, s.delete, l, 0, 1) py.test.raises(AssertionError, W_List, EmptyStrategy, 2) # Only size 0 possible. - + def test_init_Nil(): do_test_initialization(NilStrategy) def test_init_Generic(): do_test_initialization(GenericStrategy, is_safe=False) - + def test_init_WeakGeneric(): do_test_initialization(WeakGenericStrategy) - + def test_init_Integer(): do_test_initialization(IntegerStrategy, default_value=W_Integer(0)) - + def test_init_IntegerOrNil(): do_test_initialization(IntegerOrNilStrategy) - + # === Test Simple store def do_test_store(cls, stored_value=W_Object(), is_safe=True, is_varsize=False): @@ -256,13 +256,13 @@ def test_store_Generic(): do_test_store(GenericStrategy, is_safe=False) - + def test_store_WeakGeneric(): do_test_store(WeakGenericStrategy, stored_value=w_nil) - + def test_store_Integer(): do_test_store(IntegerStrategy, stored_value=W_Integer(100)) - + def test_store_IntegerOrNil(): do_test_store(IntegerOrNilStrategy, stored_value=W_Integer(100)) do_test_store(IntegerOrNilStrategy, stored_value=w_nil) @@ -289,17 +289,17 @@ def test_insert_Generic(): do_test_insert(GenericStrategy, [W_Object() for _ in range(6)]) - + def test_insert_WeakGeneric(): do_test_insert(WeakGenericStrategy, [W_Object() for _ in range(6)]) - + def test_insert_Integer(): do_test_insert(IntegerStrategy, [W_Integer(x) for x in range(6)]) - + def test_insert_IntegerOrNil(): do_test_insert(IntegerOrNilStrategy, [w_nil]+[W_Integer(x) for x in range(4)]+[w_nil]) do_test_insert(IntegerOrNilStrategy, [w_nil]*6) - + # === Test Delete def do_test_delete(cls, values, indexing_unsafe=False): @@ -319,13 +319,13 @@ def test_delete_Generic(): do_test_delete(GenericStrategy, [W_Object() for _ in range(6)], indexing_unsafe=True) - + def test_delete_WeakGeneric(): do_test_delete(WeakGenericStrategy, [W_Object() for _ in range(6)]) - + def test_delete_Integer(): do_test_delete(IntegerStrategy, [W_Integer(x) for x in range(6)]) - + def test_delete_IntegerOrNil(): do_test_delete(IntegerOrNilStrategy, [w_nil]+[W_Integer(x) for x in range(4)]+[w_nil]) do_test_delete(IntegerOrNilStrategy, [w_nil]*6) @@ -342,7 +342,7 @@ obj = W_Object() i = W_Integer(0) nil = w_nil - + assert_handles(EmptyStrategy, [], [nil, obj, i]) assert_handles(NilStrategy, [nil], [obj, i]) assert_handles(GenericStrategy, [nil, obj, i], []) @@ -392,7 +392,7 @@ o = W_Object() l = do_test_insert(NilStrategy, [w_nil, w_nil, o, o, w_nil, w_nil]) assert isinstance(l.strategy, GenericStrategy) - + def test_transition_to_nonSingleton(): l = W_List(NilStrategy, 5) factory.switch_strategy(l, NonSingletonStrategy) @@ -467,12 +467,12 @@ v3 = [W_Object() for _ in range(l.size()) ] assert v2 != v assert v3 != v - + l.store_all(v2) assert l.fetch_all() == v2+v[4:] l.store_all(v3) assert l.fetch_all() == v3 - + py.test.raises(IndexError, l.store_all, [W_Object() for _ in range(8) ]) # === Test Weak Strategy @@ -488,7 +488,7 @@ assert False, "The default convert_storage_from() should not be called!" def convert_storage_from_special(self, w_self, other): s.copied += 1 - + monkeypatch.setattr(AbstractStrategy, "_convert_storage_from_NilStrategy", convert_storage_from_special) monkeypatch.setattr(AbstractStrategy, "_convert_storage_from", convert_storage_from_default) try: @@ -507,7 +507,8 @@ assert factory.strategy_type_for([]) == EmptyStrategy monkeypatch.setattr(GenericStrategy, '_check_can_handle', lambda self, o: False) try: - py.test.raises(Exception, factory.strategy_type_for, [W_Object(), W_Object()]) + with py.test.raises(ValueError): + factory.strategy_type_for([W_Object(), W_Object()]) finally: monkeypatch.undo() @@ -549,4 +550,3 @@ 'Created (EmptyStrategy) size 0 objects 1', 'Created (IntegerStrategy) size 3 objects 1', 'Switched (IntegerStrategy -> IntegerOrNilStrategy) size 3 objects 1 elements: W_Object'] - \ No newline at end of file diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -79,6 +79,7 @@ @specialize.arg(0) def ll_start_new_thread(func): + _check_thread_enabled() ident = c_thread_start(func) if ident == -1: raise error("can't start new thread") @@ -170,6 +171,18 @@ def _cleanup_(self): raise Exception("seeing a prebuilt rpython.rlib.rthread.Lock instance") +def _check_thread_enabled(): + pass +class Entry(ExtRegistryEntry): + _about_ = _check_thread_enabled + def compute_result_annotation(self): + translator = self.bookkeeper.annotator.translator + if not translator.config.translation.thread: + raise Exception( + "this RPython program uses threads: translate with '--thread'") + def specialize_call(self, hop): + hop.exception_cannot_occur() + # ____________________________________________________________ # # Stack size diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -92,12 +92,13 @@ PLT = "" size_decl = "" type_decl = "" + extra_align = "" else: PLT = "@PLT" type_decl = "\t.type\t%s, @function" % (tramp_name,) size_decl = "\t.size\t%s, .-%s" % ( tramp_name, tramp_name) - + extra_align = "\t.cfi_def_cfa_offset 8" assert detect_cpu.autodetect().startswith(detect_cpu.MODEL_X86_64), ( "rvmprof only supports x86-64 CPUs for now") @@ -132,7 +133,7 @@ \t.cfi_def_cfa_offset 16 \tcall %(cont_name)s%(PLT)s \taddq\t$8, %%rsp -\t.cfi_def_cfa_offset 8 +%(extra_align)s \tret \t.cfi_endproc %(size_decl)s diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -31,7 +31,11 @@ #include #include #include "vmprof_getpc.h" +#ifdef __APPLE__ +#include "libunwind.h" +#else #include "vmprof_unwind.h" +#endif #include "vmprof_mt.h" @@ -39,10 +43,12 @@ // functions copied from libunwind using dlopen +#ifndef __APPLE__ // should be linux only probably static int (*unw_get_reg)(unw_cursor_t*, int, unw_word_t*) = NULL; static int (*unw_step)(unw_cursor_t*) = NULL; static int (*unw_init_local)(unw_cursor_t *, unw_context_t *) = NULL; static int (*unw_get_proc_info)(unw_cursor_t *, unw_proc_info_t *) = NULL; +#endif static int profile_file = -1; static long prepare_interval_usec; @@ -67,6 +73,7 @@ return "bad value for 'interval'"; prepare_interval_usec = (int)(interval * 1000000.0); +#ifndef __APPLE__ if (!unw_get_reg) { void *libhandle; @@ -81,6 +88,7 @@ if (!(unw_step = dlsym(libhandle, UNW_PREFIX "_step"))) goto error; } +#endif if (prepare_concurrent_bufs() < 0) return "out of memory"; @@ -206,7 +214,12 @@ void *ip; int n = 0; unw_cursor_t cursor; +#ifdef __APPLE__ + unw_context_t uc; + unw_getcontext(&uc); +#else unw_context_t uc = *ucontext; +#endif int ret = unw_init_local(&cursor, &uc); assert(ret >= 0); diff --git a/rpython/rlib/rvmprof/src/vmprof_unwind.h b/rpython/rlib/rvmprof/src/vmprof_unwind.h --- a/rpython/rlib/rvmprof/src/vmprof_unwind.h +++ b/rpython/rlib/rvmprof/src/vmprof_unwind.h @@ -64,8 +64,7 @@ typedef struct unw_cursor { unw_word_t opaque[UNW_TDEP_CURSOR_LEN]; - } -unw_cursor_t; + } unw_cursor_t; #define UNW_REG_IP UNW_X86_64_RIP #define UNW_REG_SP UNW_X86_64_RSP @@ -84,7 +83,7 @@ int format; /* unwind-info format (arch-specific) */ int unwind_info_size; /* size of the information (if applicable) */ void *unwind_info; /* unwind-info (arch-specific) */ - } -unw_proc_info_t; + } unw_proc_info_t; // end of copy + diff --git a/rpython/rlib/rvmprof/test/test_rvmprof.py b/rpython/rlib/rvmprof/test/test_rvmprof.py --- a/rpython/rlib/rvmprof/test/test_rvmprof.py +++ b/rpython/rlib/rvmprof/test/test_rvmprof.py @@ -2,6 +2,7 @@ from rpython.tool.udir import udir from rpython.rlib import rvmprof from rpython.translator.c.test.test_genc import compile +from rpython.rlib.objectmodel import we_are_translated def test_vmprof_execute_code_1(): @@ -96,7 +97,12 @@ @rvmprof.vmprof_execute_code("xcode1", lambda code, num: code) def main(code, num): print num - return 42 + s = 0 + for i in range(num): + s += (i << 1) + if s % 32423423423 == 0: + print s + return s tmpfilename = str(udir.join('test_rvmprof')) @@ -104,16 +110,37 @@ code = MyCode() rvmprof.register_code(code, get_name) fd = os.open(tmpfilename, os.O_WRONLY | os.O_CREAT, 0666) - rvmprof.enable(fd, 0.5) - res = main(code, 5) - assert res == 42 + if we_are_translated(): + num = 100000000 + period = 0.0001 + else: + num = 10000 + period = 0.9 + rvmprof.enable(fd, period) + res = main(code, num) + #assert res == 499999500000 rvmprof.disable() os.close(fd) return 0 + def check_profile(filename): + from vmprof import read_profile + + prof = read_profile(filename) + assert prof.get_tree().name.startswith("py:") + assert prof.get_tree().count + assert f() == 0 assert os.path.exists(tmpfilename) fn = compile(f, [], gcpolicy="minimark") - os.unlink(tmpfilename) assert fn() == 0 - assert os.path.exists(tmpfilename) + try: + import vmprof + except ImportError: + py.test.skip("vmprof unimportable") + else: + check_profile(tmpfilename) + finally: + assert os.path.exists(tmpfilename) + os.unlink(tmpfilename) + \ No newline at end of file diff --git a/rpython/rlib/rweakref.py b/rpython/rlib/rweakref.py --- a/rpython/rlib/rweakref.py +++ b/rpython/rlib/rweakref.py @@ -2,10 +2,10 @@ Weakref support in RPython. Basic regular weakrefs without callbacks are supported. This file contains the following additions: a form of WeakKeyDictionary, and a limited version of WeakValueDictionary. -LLType only for now! """ import weakref +from rpython.annotator.model import UnionError ref = weakref.ref # basic regular weakrefs are supported in RPython @@ -191,9 +191,9 @@ class __extend__(pairtype(SomeWeakKeyDict, SomeWeakKeyDict)): def union((s_wkd1, s_wkd2)): if s_wkd1.keyclassdef is not s_wkd2.keyclassdef: - raise UnionError(w_wkd1, s_wkd2, "not the same key class!") + raise UnionError(s_wkd1, s_wkd2, "not the same key class!") if s_wkd1.valueclassdef is not s_wkd2.valueclassdef: - raise UnionError(w_wkd1, s_wkd2, "not the same value class!") + raise UnionError(s_wkd1, s_wkd2, "not the same value class!") return SomeWeakKeyDict(s_wkd1.keyclassdef, s_wkd1.valueclassdef) class Entry(extregistry.ExtRegistryEntry): diff --git a/rpython/rlib/test/test_rweakkeydict.py b/rpython/rlib/test/test_rweakkeydict.py --- a/rpython/rlib/test/test_rweakkeydict.py +++ b/rpython/rlib/test/test_rweakkeydict.py @@ -1,4 +1,5 @@ import py +from rpython.annotator.model import UnionError from rpython.rlib import rgc from rpython.rlib.rweakref import RWeakKeyDictionary from rpython.rtyper.test.test_llinterp import interpret @@ -120,25 +121,34 @@ f(1) interpret(f, [1]) + at py.test.mark.xfail( + reason="may fail with AssertionError, depending on annotation order") +def test_rpython_merge_RWeakKeyDictionary3(): def g(x): if x: d = RWeakKeyDictionary(KX, VX) else: d = RWeakKeyDictionary(KY, VX) d.set(KX(), VX()) - py.test.raises(Exception, interpret, g, [1]) + with py.test.raises(UnionError): + interpret(g, [1]) + + at py.test.mark.xfail( + reason="may fail with AssertionError, depending on annotation order") +def test_rpython_merge_RWeakKeyDictionary4(): def g(x): if x: d = RWeakKeyDictionary(KX, VX) else: d = RWeakKeyDictionary(KX, VY) d.set(KX(), VX()) - py.test.raises(Exception, interpret, g, [1]) + with py.test.raises(UnionError): + interpret(g, [1]) + at py.test.mark.xfail(reason="not implemented, messy") def test_rpython_free_values(): - import py; py.test.skip("XXX not implemented, messy") class VXDel: def __del__(self): state.freed.append(1) diff --git a/rpython/rlib/test/test_rweakvaldict.py b/rpython/rlib/test/test_rweakvaldict.py --- a/rpython/rlib/test/test_rweakvaldict.py +++ b/rpython/rlib/test/test_rweakvaldict.py @@ -1,4 +1,5 @@ import py +from rpython.annotator.model import UnionError from rpython.rlib import rgc from rpython.rlib.rweakref import RWeakValueDictionary from rpython.rtyper.test.test_llinterp import interpret @@ -143,7 +144,9 @@ else: d = RWeakValueDictionary(str, Y) d.set("x", X()) - py.test.raises(Exception, interpret, g, [1]) + + with py.test.raises(UnionError): + interpret(g, [1]) def test_rpython_RWeakValueDictionary_or_None(): diff --git a/rpython/rtyper/extfunc.py b/rpython/rtyper/extfunc.py --- a/rpython/rtyper/extfunc.py +++ b/rpython/rtyper/extfunc.py @@ -1,8 +1,8 @@ from rpython.rtyper import extregistry from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.lltypesystem.lltype import typeOf, FuncType, functionptr -from rpython.annotator import model as annmodel -from rpython.annotator.signature import annotation +from rpython.annotator.model import unionof +from rpython.annotator.signature import annotation, SignatureError import py, sys @@ -130,7 +130,7 @@ "Argument number mismatch" for i, expected in enumerate(signature_args): - arg = annmodel.unionof(args_s[i], expected) + arg = unionof(args_s[i], expected) if not expected.contains(arg): name = getattr(self, 'name', None) if not name: @@ -138,7 +138,7 @@ name = self.instance.__name__ except AttributeError: name = '?' - raise Exception("In call to external function %r:\n" + raise SignatureError("In call to external function %r:\n" "arg %d must be %s,\n" " got %s" % ( name, i+1, expected, args_s[i])) diff --git a/rpython/rtyper/test/test_extfunc.py b/rpython/rtyper/test/test_extfunc.py --- a/rpython/rtyper/test/test_extfunc.py +++ b/rpython/rtyper/test/test_extfunc.py @@ -2,9 +2,10 @@ from rpython.rtyper.extfunc import ExtFuncEntry, register_external,\ is_external, lazy_register -from rpython.annotator import model as annmodel +from rpython.annotator.model import SomeInteger, SomeString, AnnotatorError from rpython.annotator.annrpython import RPythonAnnotator from rpython.annotator.policy import AnnotatorPolicy +from rpython.annotator.signature import SignatureError from rpython.rtyper.test.test_llinterp import interpret class TestExtFuncEntry: @@ -21,8 +22,8 @@ class BTestFuncEntry(ExtFuncEntry): _about_ = b name = 'b' - signature_args = [annmodel.SomeInteger()] - signature_result = annmodel.SomeInteger() + signature_args = [SomeInteger()] + signature_result = SomeInteger() def f(): return b(2) @@ -30,7 +31,7 @@ policy = AnnotatorPolicy() a = RPythonAnnotator(policy=policy) s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeInteger) + assert isinstance(s, SomeInteger) res = interpret(f, []) assert res == 42 @@ -45,8 +46,8 @@ class CTestFuncEntry(ExtFuncEntry): _about_ = c name = 'ccc' - signature_args = [annmodel.SomeInteger()] * 2 - signature_result = annmodel.SomeInteger() + signature_args = [SomeInteger()] * 2 + signature_result = SomeInteger() def lltypeimpl(y, x): return y + x @@ -72,7 +73,7 @@ policy = AnnotatorPolicy() a = RPythonAnnotator(policy=policy) s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeInteger) + assert isinstance(s, SomeInteger) def test_register_external_tuple_args(self): """ @@ -96,7 +97,7 @@ s = a.build_types(f, []) # Not a very good assertion, but at least it means _something_ happened. - assert isinstance(s, annmodel.SomeInteger) + assert isinstance(s, SomeInteger) def test_register_external_return_goes_back(self): """ @@ -118,7 +119,7 @@ policy = AnnotatorPolicy() a = RPythonAnnotator(policy=policy) s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeInteger) + assert isinstance(s, SomeInteger) def test_register_external_specialcase(self): """ @@ -135,10 +136,10 @@ policy = AnnotatorPolicy() a = RPythonAnnotator(policy=policy) s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeString) + assert isinstance(s, SomeString) def test_str0(self): - str0 = annmodel.SomeString(no_nul=True) + str0 = SomeString(no_nul=True) def os_open(s): pass register_external(os_open, [str0], None) @@ -152,25 +153,32 @@ a.translator.config.translation.check_str_without_nul=True def g(s): return os_open(s) - py.test.raises(Exception, a.build_types, g, [str]) + with py.test.raises(SignatureError): + a.build_types(g, [str]) a.build_types(g, [str0]) # Does not raise - def test_list_of_str0(self): - str0 = annmodel.SomeString(no_nul=True) + def test_list_of_str0_unchecked(self): + str0 = SomeString(no_nul=True) + def os_execve(l): pass + register_external(os_execve, [[str0]], None) + def f(l): return os_execve(l) + policy = AnnotatorPolicy() a = RPythonAnnotator(policy=policy) + assert a.translator.config.translation.check_str_without_nul == False a.build_types(f, [[str]]) # Does not raise - assert a.translator.config.translation.check_str_without_nul == False # Now enable the str0 check, and try again with a similar function a.translator.config.translation.check_str_without_nul=True + def g(l): return os_execve(l) - py.test.raises(Exception, a.build_types, g, [[str]]) + + with py.test.raises(AnnotatorError): + # fails with TooLateForChange + a.build_types(g, [[str]]) a.build_types(g, [[str0]]) # Does not raise - - From noreply at buildbot.pypy.org Wed Oct 14 02:09:36 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 14 Oct 2015 02:09:36 +0200 (CEST) Subject: [pypy-commit] pypy no-class-specialize: Merge ClassDesc definition and classdef.py into classdesc.py Message-ID: <20151014000936.7CB4D1C103D@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: no-class-specialize Changeset: r80187:4f5e9f83eb3b Date: 2015-10-14 01:09 +0100 http://bitbucket.org/pypy/pypy/changeset/4f5e9f83eb3b/ Log: Merge ClassDesc definition and classdef.py into classdesc.py diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -14,7 +14,7 @@ s_None, s_ImpossibleValue, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, SomeWeakRef, SomeByteArray, SomeConstantType, SomeProperty, AnnotatorError) -from rpython.annotator.classdef import InstanceSource, ClassDef +from rpython.annotator.classdesc import InstanceSource, ClassDef, ClassDesc from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef from rpython.annotator import description @@ -359,7 +359,7 @@ if pyobj.__module__ == '__builtin__': # avoid making classdefs for builtin types result = self.getfrozen(pyobj) else: - result = description.ClassDesc(self, pyobj) + result = ClassDesc(self, pyobj) elif isinstance(pyobj, types.MethodType): if pyobj.im_self is None: # unbound return self.getdesc(pyobj.im_func) diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdesc.py rename from rpython/annotator/classdef.py rename to rpython/annotator/classdesc.py --- a/rpython/annotator/classdef.py +++ b/rpython/annotator/classdesc.py @@ -1,9 +1,15 @@ """ Type inference for user-defined classes. """ +from __future__ import absolute_import +import types +from rpython.flowspace.model import Constant +from rpython.tool.sourcetools import func_with_new_name from rpython.annotator.model import ( - SomePBC, s_ImpossibleValue, unionof, s_None, AnnotatorError) -from rpython.annotator import description + SomePBC, s_ImpossibleValue, unionof, s_None, AnnotatorError, SomeInteger, + SomeString) +from rpython.annotator.description import ( + Desc, FunctionDesc, MethodDesc, NODEFAULT) # The main purpose of a ClassDef is to collect information about class/instance @@ -96,7 +102,7 @@ self.modified(classdef) self.read_locations.update(other.read_locations) - def mutated(self, homedef): # reflow from attr read positions + def mutated(self, homedef): # reflow from attr read positions s_newvalue = self.getvalue() for position in self.read_locations: @@ -105,17 +111,21 @@ # check for method demotion and after-the-fact method additions if isinstance(s_newvalue, SomePBC): attr = self.name - if s_newvalue.getKind() == description.MethodDesc: + if s_newvalue.getKind() == MethodDesc: # is method if homedef.classdesc.read_attribute(attr, None) is None: if not homedef.check_missing_attribute_update(attr): for desc in s_newvalue.descriptions: if desc.selfclassdef is None: if homedef.classdesc.settled: - raise AnnotatorError( - "demoting method %s to settled class " - "%s not allowed" % (self.name, homedef) - ) + raise Exception("demoting method %s " + "to settled class %s not " + "allowed" % + (self.name, homedef) + ) + #self.bookkeeper.warning("demoting method %s " + # "to base class %s" % + # (self.name, homedef)) break # check for attributes forbidden by slots or _attrs_ @@ -124,9 +134,8 @@ self.attr_allowed = False if not self.readonly: raise NoSuchAttrError( - "the attribute %r goes here to %r, " - "but it is forbidden here" % ( - self.name, homedef)) + "the attribute %r goes here to %r, but it is " + "forbidden here" % (self.name, homedef)) def modified(self, classdef='?'): self.readonly = False @@ -182,7 +191,7 @@ # but as an optimization we try to see if the attribute # has really been generalized if attrdef.s_value != s_prev_value: - attrdef.mutated(cdef) # reflow from all read positions + attrdef.mutated(cdef) # reflow from all read positions return else: # remember the source in self.attr_sources @@ -200,7 +209,7 @@ s_prev_value = attrdef.s_value attrdef.add_constant_source(self, source) if attrdef.s_value != s_prev_value: - attrdef.mutated(subdef) # reflow from all read positions + attrdef.mutated(subdef) # reflow from all read positions def locate_attribute(self, attr): while True: @@ -326,11 +335,11 @@ for desc in pbc.descriptions: # pick methods but ignore already-bound methods, which can come # from an instance attribute - if (isinstance(desc, description.MethodDesc) + if (isinstance(desc, MethodDesc) and desc.selfclassdef is None): methclassdef = desc.originclassdef if methclassdef is not self and methclassdef.issubclass(self): - pass # subclasses methods are always candidates + pass # subclasses methods are always candidates elif self.issubclass(methclassdef): # upward consider only the best match if uplookup is None or methclassdef.issubclass(uplookup): @@ -341,7 +350,7 @@ # clsdef1.lookup_filter(pbc) includes # clsdef2.lookup_filter(pbc) (see formal proof...) else: - continue # not matching + continue # not matching # bind the method by giving it a selfclassdef. Use the # more precise subclass that it's coming from. desc = desc.bind_self(methclassdef, flags) @@ -394,7 +403,8 @@ return SomePBC([subdef.classdesc for subdef in self.getallsubdefs()]) def _freeze_(self): - raise Exception("ClassDefs are used as knowntype for instances but cannot be used as immutablevalue arguments directly") + raise Exception("ClassDefs are used as knowntype for instances but " + "cannot be used as immutablevalue arguments directly") # ____________________________________________________________ @@ -432,3 +442,465 @@ class NoSuchAttrError(AnnotatorError): """Raised when an attribute is found on a class where __slots__ or _attrs_ forbits it.""" + + +def is_mixin(cls): + return cls.__dict__.get('_mixin_', False) + + +class ClassDesc(Desc): + knowntype = type + instance_level = False + all_enforced_attrs = None # or a set + settled = False + _detect_invalid_attrs = None + + def __init__(self, bookkeeper, cls, + name=None, basedesc=None, classdict=None): + super(ClassDesc, self).__init__(bookkeeper, cls) + if '__NOT_RPYTHON__' in cls.__dict__: + raise AnnotatorError('Bad class') + + if name is None: + name = cls.__module__ + '.' + cls.__name__ + self.name = name + self.basedesc = basedesc + if classdict is None: + classdict = {} # populated below + self.classdict = classdict # {attr: Constant-or-Desc} + if cls.__dict__.get('_annspecialcase_', ''): + raise AnnotatorError( + "Class specialization has been removed. The " + "'_annspecialcase_' class tag is now unsupported.") + self.classdef = None + + if is_mixin(cls): + raise AnnotatorError("cannot use directly the class %r because " + "it is a _mixin_" % (cls,)) + + assert cls.__module__ != '__builtin__' + baselist = list(cls.__bases__) + + # special case: skip BaseException, and pretend + # that all exceptions ultimately inherit from Exception instead + # of BaseException (XXX hack) + if cls is Exception: + baselist = [] + elif baselist == [BaseException]: + baselist = [Exception] + + mixins_before = [] + mixins_after = [] + base = object + for b1 in baselist: + if b1 is object: + continue + if is_mixin(b1): + if base is object: + mixins_before.append(b1) + else: + mixins_after.append(b1) + else: + assert base is object, ("multiple inheritance only supported " + "with _mixin_: %r" % (cls,)) + base = b1 + if mixins_before and mixins_after: + raise Exception("unsupported: class %r has mixin bases both" + " before and after the regular base" % (self,)) + self.add_mixins(mixins_after, check_not_in=base) + self.add_mixins(mixins_before) + self.add_sources_for_class(cls) + + if base is not object: + self.basedesc = bookkeeper.getdesc(base) + + if '_settled_' in cls.__dict__: + self.settled = bool(cls.__dict__['_settled_']) + + if '__slots__' in cls.__dict__ or '_attrs_' in cls.__dict__: + attrs = {} + for decl in ('__slots__', '_attrs_'): + decl = cls.__dict__.get(decl, []) + if isinstance(decl, str): + decl = (decl,) + decl = dict.fromkeys(decl) + attrs.update(decl) + if self.basedesc is not None: + if self.basedesc.all_enforced_attrs is None: + raise Exception("%r has slots or _attrs_, " + "but not its base class" % (cls,)) + attrs.update(self.basedesc.all_enforced_attrs) + self.all_enforced_attrs = attrs + + if (self.is_builtin_exception_class() and + self.all_enforced_attrs is None): + if cls not in FORCE_ATTRIBUTES_INTO_CLASSES: + self.all_enforced_attrs = [] # no attribute allowed + + def add_source_attribute(self, name, value, mixin=False): + if isinstance(value, property): + # special case for property object + if value.fget is not None: + newname = name + '__getter__' + func = func_with_new_name(value.fget, newname) + self.add_source_attribute(newname, func, mixin) + if value.fset is not None: + newname = name + '__setter__' + func = func_with_new_name(value.fset, newname) + self.add_source_attribute(newname, func, mixin) + self.classdict[name] = Constant(value) + return + + if isinstance(value, types.FunctionType): + # for debugging + if not hasattr(value, 'class_'): + value.class_ = self.pyobj + if mixin: + # make a new copy of the FunctionDesc for this class, + # but don't specialize further for all subclasses + funcdesc = FunctionDesc(self.bookkeeper, value) + self.classdict[name] = funcdesc + return + # NB. if value is, say, AssertionError.__init__, then we + # should not use getdesc() on it. Never. The problem is + # that the py lib has its own AssertionError.__init__ which + # is of type FunctionType. But bookkeeper.immutablevalue() + # will do the right thing in s_get_value(). + if isinstance(value, staticmethod) and mixin: + # make a new copy of staticmethod + func = value.__get__(42) + value = staticmethod(func_with_new_name(func, func.__name__)) + + if type(value) in MemberDescriptorTypes: + # skip __slots__, showing up in the class as 'member' objects + return + if name == '__init__' and self.is_builtin_exception_class(): + # pretend that built-in exceptions have no __init__, + # unless explicitly specified in builtin.py + from rpython.annotator.builtin import BUILTIN_ANALYZERS + value = getattr(value, 'im_func', value) + if value not in BUILTIN_ANALYZERS: + return + self.classdict[name] = Constant(value) + + def add_mixins(self, mixins, check_not_in=object): + if not mixins: + return + A = type('tmp', tuple(mixins) + (object,), {}) + mro = A.__mro__ + assert mro[0] is A and mro[-1] is object + mro = mro[1:-1] + # + skip = set() + def add(cls): + if cls is not object: + for base in cls.__bases__: + add(base) + for name in cls.__dict__: + skip.add(name) + add(check_not_in) + # + for base in reversed(mro): + assert is_mixin(base), ( + "Mixin class %r has non mixin base class %r" % (mixins, base)) + for name, value in base.__dict__.items(): + if name in skip: + continue + self.add_source_attribute(name, value, mixin=True) + + def add_sources_for_class(self, cls): + for name, value in cls.__dict__.items(): + self.add_source_attribute(name, value) + + def getclassdef(self, key): + return self.getuniqueclassdef() + + def _init_classdef(self): + from rpython.annotator.classdef import ClassDef + classdef = ClassDef(self.bookkeeper, self) + self.bookkeeper.classdefs.append(classdef) + self.classdef = classdef + + # forced attributes + cls = self.pyobj + if cls in FORCE_ATTRIBUTES_INTO_CLASSES: + for name, s_value in FORCE_ATTRIBUTES_INTO_CLASSES[cls].items(): + classdef.generalize_attr(name, s_value) + classdef.find_attribute(name).modified(classdef) + + # register all class attributes as coming from this ClassDesc + # (as opposed to prebuilt instances) + classsources = {} + for attr in self.classdict: + classsources[attr] = self # comes from this ClassDesc + classdef.setup(classsources) + # look for a __del__ method and annotate it if it's there + if '__del__' in self.classdict: + from rpython.annotator.model import s_None, SomeInstance + s_func = self.s_read_attribute('__del__') + args_s = [SomeInstance(classdef)] + s = self.bookkeeper.emulate_pbc_call(classdef, s_func, args_s) + assert s_None.contains(s) + return classdef + + def getuniqueclassdef(self): + if self.classdef is None: + self._init_classdef() + return self.classdef + + def pycall(self, whence, args, s_previous_result, op=None): + from rpython.annotator.model import SomeInstance, SomeImpossibleValue + classdef = self.getuniqueclassdef() + s_instance = SomeInstance(classdef) + # look up __init__ directly on the class, bypassing the normal + # lookup mechanisms ClassDef (to avoid influencing Attribute placement) + s_init = self.s_read_attribute('__init__') + if isinstance(s_init, SomeImpossibleValue): + # no __init__: check that there are no constructor args + if not self.is_exception_class(): + try: + args.fixedunpack(0) + except ValueError: + raise Exception("default __init__ takes no argument" + " (class %s)" % (self.name,)) + elif self.pyobj is Exception: + # check explicitly against "raise Exception, x" where x + # is a low-level exception pointer + try: + [s_arg] = args.fixedunpack(1) + except ValueError: + pass + else: + from rpython.rtyper.llannotation import SomePtr + assert not isinstance(s_arg, SomePtr) + else: + # call the constructor + args = args.prepend(s_instance) + s_init.call(args) + return s_instance + + def is_exception_class(self): + return issubclass(self.pyobj, BaseException) + + def is_builtin_exception_class(self): + if self.is_exception_class(): + if self.pyobj.__module__ == 'exceptions': + return True + if issubclass(self.pyobj, AssertionError): + return True + return False + + def lookup(self, name): + cdesc = self + while name not in cdesc.classdict: + cdesc = cdesc.basedesc + if cdesc is None: + return None + else: + return cdesc + + def read_attribute(self, name, default=NODEFAULT): + cdesc = self.lookup(name) + if cdesc is None: + if default is NODEFAULT: + raise AttributeError + else: + return default + else: + return cdesc.classdict[name] + + def s_read_attribute(self, name): + # look up an attribute in the class + cdesc = self.lookup(name) + if cdesc is None: + return s_ImpossibleValue + else: + # delegate to s_get_value to turn it into an annotation + return cdesc.s_get_value(None, name) + + def s_get_value(self, classdef, name): + obj = self.classdict[name] + if isinstance(obj, Constant): + value = obj.value + if isinstance(value, staticmethod): # special case + value = value.__get__(42) + classdef = None # don't bind + elif isinstance(value, classmethod): + raise AnnotatorError("classmethods are not supported") + s_value = self.bookkeeper.immutablevalue(value) + if classdef is not None: + s_value = s_value.bind_callables_under(classdef, name) + elif isinstance(obj, Desc): + from rpython.annotator.model import SomePBC + if classdef is not None: + obj = obj.bind_under(classdef, name) + s_value = SomePBC([obj]) + else: + raise TypeError("classdict should not contain %r" % (obj,)) + return s_value + + def create_new_attribute(self, name, value): + assert name not in self.classdict, "name clash: %r" % (name,) + self.classdict[name] = Constant(value) + + def find_source_for(self, name): + if name in self.classdict: + return self + # check whether there is a new attribute + cls = self.pyobj + if name in cls.__dict__: + self.add_source_attribute(name, cls.__dict__[name]) + if name in self.classdict: + return self + return None + + def maybe_return_immutable_list(self, attr, s_result): + # hack: 'x.lst' where lst is listed in _immutable_fields_ as + # either 'lst[*]' or 'lst?[*]' + # should really return an immutable list as a result. Implemented + # by changing the result's annotation (but not, of course, doing an + # actual copy in the rtyper). Tested in rpython.rtyper.test.test_rlist, + # test_immutable_list_out_of_instance. + if self._detect_invalid_attrs and attr in self._detect_invalid_attrs: + raise Exception("field %r was migrated to %r from a subclass in " + "which it was declared as _immutable_fields_" % + (attr, self.pyobj)) + search1 = '%s[*]' % (attr,) + search2 = '%s?[*]' % (attr,) + cdesc = self + while cdesc is not None: + if '_immutable_fields_' in cdesc.classdict: + if (search1 in cdesc.classdict['_immutable_fields_'].value or + search2 in cdesc.classdict['_immutable_fields_'].value): + s_result.listdef.never_resize() + s_copy = s_result.listdef.offspring() + s_copy.listdef.mark_as_immutable() + # + cdesc = cdesc.basedesc + while cdesc is not None: + if cdesc._detect_invalid_attrs is None: + cdesc._detect_invalid_attrs = set() + cdesc._detect_invalid_attrs.add(attr) + cdesc = cdesc.basedesc + # + return s_copy + cdesc = cdesc.basedesc + return s_result # common case + + @staticmethod + def consider_call_site(descs, args, s_result, op): + descs[0].getcallfamily() + descs[0].mergecallfamilies(*descs[1:]) + from rpython.annotator.model import SomeInstance, SomePBC, s_None + if len(descs) == 1: + # call to a single class, look at the result annotation + # in case it was specialized + if not isinstance(s_result, SomeInstance): + raise Exception("calling a class didn't return an instance??") + classdefs = [s_result.classdef] + else: + # call to multiple classes: specialization not supported + classdefs = [desc.getuniqueclassdef() for desc in descs] + # If some of the classes have an __init__ and others not, then + # we complain, even though in theory it could work if all the + # __init__s take no argument. But it's messy to implement, so + # let's just say it is not RPython and you have to add an empty + # __init__ to your base class. + has_init = False + for desc in descs: + s_init = desc.s_read_attribute('__init__') + has_init |= isinstance(s_init, SomePBC) + basedesc = ClassDesc.getcommonbase(descs) + s_init = basedesc.s_read_attribute('__init__') + parent_has_init = isinstance(s_init, SomePBC) + if has_init and not parent_has_init: + raise AnnotatorError( + "some subclasses among %r declare __init__()," + " but not the common parent class" % (descs,)) + # make a PBC of MethodDescs, one for the __init__ of each class + initdescs = [] + for desc, classdef in zip(descs, classdefs): + s_init = desc.s_read_attribute('__init__') + if isinstance(s_init, SomePBC): + assert len(s_init.descriptions) == 1, ( + "unexpected dynamic __init__?") + initfuncdesc, = s_init.descriptions + if isinstance(initfuncdesc, FunctionDesc): + from rpython.annotator.bookkeeper import getbookkeeper + initmethdesc = getbookkeeper().getmethoddesc( + initfuncdesc, classdef, classdef, '__init__') + initdescs.append(initmethdesc) + # register a call to exactly these __init__ methods + if initdescs: + initdescs[0].mergecallfamilies(*initdescs[1:]) + MethodDesc.consider_call_site(initdescs, args, s_None, op) + + def getallbases(self): + desc = self + while desc is not None: + yield desc + desc = desc.basedesc + + @staticmethod + def getcommonbase(descs): + commondesc = descs[0] + for desc in descs[1:]: + allbases = set(commondesc.getallbases()) + while desc not in allbases: + assert desc is not None, "no common base for %r" % (descs,) + desc = desc.basedesc + commondesc = desc + return commondesc + + def rowkey(self): + return self + + def getattrfamily(self, attrname): + "Get the ClassAttrFamily object for attrname. Possibly creates one." + access_sets = self.bookkeeper.get_classpbc_attr_families(attrname) + _, _, attrfamily = access_sets.find(self) + return attrfamily + + def queryattrfamily(self, attrname): + """Retrieve the ClassAttrFamily object for attrname if there is one, + otherwise return None.""" + access_sets = self.bookkeeper.get_classpbc_attr_families(attrname) + try: + return access_sets[self] + except KeyError: + return None + + def mergeattrfamilies(self, others, attrname): + """Merge the attr families of the given Descs into one.""" + access_sets = self.bookkeeper.get_classpbc_attr_families(attrname) + changed, rep, attrfamily = access_sets.find(self) + for desc in others: + changed1, rep, attrfamily = access_sets.union(rep, desc) + changed = changed or changed1 + return changed + +# ____________________________________________________________ + +class Sample(object): + __slots__ = 'x' +MemberDescriptorTypes = [type(Sample.x)] +del Sample +try: + MemberDescriptorTypes.append(type(OSError.errno)) +except AttributeError: # on CPython <= 2.4 + pass + +# ____________________________________________________________ + +FORCE_ATTRIBUTES_INTO_CLASSES = { + EnvironmentError: {'errno': SomeInteger(), + 'strerror': SomeString(can_be_None=True), + 'filename': SomeString(can_be_None=True)}, +} + +try: + WindowsError +except NameError: + pass +else: + FORCE_ATTRIBUTES_INTO_CLASSES[WindowsError] = {'winerror': SomeInteger()} diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -2,13 +2,12 @@ import types from rpython.annotator.signature import ( enforce_signature_args, enforce_signature_return, finish_type) -from rpython.flowspace.model import Constant, FunctionGraph +from rpython.flowspace.model import FunctionGraph from rpython.flowspace.bytecode import cpython_code_signature from rpython.annotator.argument import rawshape, ArgErr, simple_args -from rpython.tool.sourcetools import valid_identifier, func_with_new_name +from rpython.tool.sourcetools import valid_identifier from rpython.tool.pairtype import extendabletype -from rpython.annotator.model import ( - AnnotatorError, SomeInteger, SomeString, s_ImpossibleValue) +from rpython.annotator.model import AnnotatorError, s_ImpossibleValue class CallFamily(object): """A family of Desc objects that could be called from common call sites. @@ -188,6 +187,8 @@ class NoStandardGraph(Exception): """The function doesn't have a single standard non-specialized graph.""" +NODEFAULT = object() + class FunctionDesc(Desc): knowntype = types.FunctionType @@ -404,443 +405,6 @@ return s_sigs -def is_mixin(cls): - return cls.__dict__.get('_mixin_', False) - -NODEFAULT = object() - -class ClassDesc(Desc): - knowntype = type - instance_level = False - all_enforced_attrs = None # or a set - settled = False - _detect_invalid_attrs = None - - def __init__(self, bookkeeper, cls, - name=None, basedesc=None, classdict=None): - super(ClassDesc, self).__init__(bookkeeper, cls) - if '__NOT_RPYTHON__' in cls.__dict__: - raise AnnotatorError('Bad class') - - if name is None: - name = cls.__module__ + '.' + cls.__name__ - self.name = name - self.basedesc = basedesc - if classdict is None: - classdict = {} # populated below - self.classdict = classdict # {attr: Constant-or-Desc} - if cls.__dict__.get('_annspecialcase_', ''): - raise AnnotatorError( - "Class specialization has been removed. The " - "'_annspecialcase_' class tag is now unsupported.") - self.classdef = None - - if is_mixin(cls): - raise AnnotatorError("cannot use directly the class %r because " - "it is a _mixin_" % (cls,)) - - assert cls.__module__ != '__builtin__' - baselist = list(cls.__bases__) - - # special case: skip BaseException, and pretend - # that all exceptions ultimately inherit from Exception instead - # of BaseException (XXX hack) - if cls is Exception: - baselist = [] - elif baselist == [BaseException]: - baselist = [Exception] - - mixins_before = [] - mixins_after = [] - base = object - for b1 in baselist: - if b1 is object: - continue - if is_mixin(b1): - if base is object: - mixins_before.append(b1) - else: - mixins_after.append(b1) - else: - assert base is object, ("multiple inheritance only supported " - "with _mixin_: %r" % (cls,)) - base = b1 - if mixins_before and mixins_after: - raise Exception("unsupported: class %r has mixin bases both" - " before and after the regular base" % (self,)) - self.add_mixins(mixins_after, check_not_in=base) - self.add_mixins(mixins_before) - self.add_sources_for_class(cls) - - if base is not object: - self.basedesc = bookkeeper.getdesc(base) - - if '_settled_' in cls.__dict__: - self.settled = bool(cls.__dict__['_settled_']) - - if '__slots__' in cls.__dict__ or '_attrs_' in cls.__dict__: - attrs = {} - for decl in ('__slots__', '_attrs_'): - decl = cls.__dict__.get(decl, []) - if isinstance(decl, str): - decl = (decl,) - decl = dict.fromkeys(decl) - attrs.update(decl) - if self.basedesc is not None: - if self.basedesc.all_enforced_attrs is None: - raise Exception("%r has slots or _attrs_, " - "but not its base class" % (cls,)) - attrs.update(self.basedesc.all_enforced_attrs) - self.all_enforced_attrs = attrs - - if (self.is_builtin_exception_class() and - self.all_enforced_attrs is None): - if cls not in FORCE_ATTRIBUTES_INTO_CLASSES: - self.all_enforced_attrs = [] # no attribute allowed - - def add_source_attribute(self, name, value, mixin=False): - if isinstance(value, property): - # special case for property object - if value.fget is not None: - newname = name + '__getter__' - func = func_with_new_name(value.fget, newname) - self.add_source_attribute(newname, func, mixin) - if value.fset is not None: - newname = name + '__setter__' - func = func_with_new_name(value.fset, newname) - self.add_source_attribute(newname, func, mixin) - self.classdict[name] = Constant(value) - return - - if isinstance(value, types.FunctionType): - # for debugging - if not hasattr(value, 'class_'): - value.class_ = self.pyobj - if mixin: - # make a new copy of the FunctionDesc for this class, - # but don't specialize further for all subclasses - funcdesc = FunctionDesc(self.bookkeeper, value) - self.classdict[name] = funcdesc - return - # NB. if value is, say, AssertionError.__init__, then we - # should not use getdesc() on it. Never. The problem is - # that the py lib has its own AssertionError.__init__ which - # is of type FunctionType. But bookkeeper.immutablevalue() - # will do the right thing in s_get_value(). - if isinstance(value, staticmethod) and mixin: - # make a new copy of staticmethod - func = value.__get__(42) - value = staticmethod(func_with_new_name(func, func.__name__)) - - if type(value) in MemberDescriptorTypes: - # skip __slots__, showing up in the class as 'member' objects - return - if name == '__init__' and self.is_builtin_exception_class(): - # pretend that built-in exceptions have no __init__, - # unless explicitly specified in builtin.py - from rpython.annotator.builtin import BUILTIN_ANALYZERS - value = getattr(value, 'im_func', value) - if value not in BUILTIN_ANALYZERS: - return - self.classdict[name] = Constant(value) - - def add_mixins(self, mixins, check_not_in=object): - if not mixins: - return - A = type('tmp', tuple(mixins) + (object,), {}) - mro = A.__mro__ - assert mro[0] is A and mro[-1] is object - mro = mro[1:-1] - # - skip = set() - def add(cls): - if cls is not object: - for base in cls.__bases__: - add(base) - for name in cls.__dict__: - skip.add(name) - add(check_not_in) - # - for base in reversed(mro): - assert is_mixin(base), ( - "Mixin class %r has non mixin base class %r" % (mixins, base)) - for name, value in base.__dict__.items(): - if name in skip: - continue - self.add_source_attribute(name, value, mixin=True) - - def add_sources_for_class(self, cls): - for name, value in cls.__dict__.items(): - self.add_source_attribute(name, value) - - def getclassdef(self, key): - return self.getuniqueclassdef() - - def _init_classdef(self): - from rpython.annotator.classdef import ClassDef - classdef = ClassDef(self.bookkeeper, self) - self.bookkeeper.classdefs.append(classdef) - self.classdef = classdef - - # forced attributes - cls = self.pyobj - if cls in FORCE_ATTRIBUTES_INTO_CLASSES: - for name, s_value in FORCE_ATTRIBUTES_INTO_CLASSES[cls].items(): - classdef.generalize_attr(name, s_value) - classdef.find_attribute(name).modified(classdef) - - # register all class attributes as coming from this ClassDesc - # (as opposed to prebuilt instances) - classsources = {} - for attr in self.classdict: - classsources[attr] = self # comes from this ClassDesc - classdef.setup(classsources) - # look for a __del__ method and annotate it if it's there - if '__del__' in self.classdict: - from rpython.annotator.model import s_None, SomeInstance - s_func = self.s_read_attribute('__del__') - args_s = [SomeInstance(classdef)] - s = self.bookkeeper.emulate_pbc_call(classdef, s_func, args_s) - assert s_None.contains(s) - return classdef - - def getuniqueclassdef(self): - if self.classdef is None: - self._init_classdef() - return self.classdef - - def pycall(self, whence, args, s_previous_result, op=None): - from rpython.annotator.model import SomeInstance, SomeImpossibleValue - classdef = self.getuniqueclassdef() - s_instance = SomeInstance(classdef) - # look up __init__ directly on the class, bypassing the normal - # lookup mechanisms ClassDef (to avoid influencing Attribute placement) - s_init = self.s_read_attribute('__init__') - if isinstance(s_init, SomeImpossibleValue): - # no __init__: check that there are no constructor args - if not self.is_exception_class(): - try: - args.fixedunpack(0) - except ValueError: - raise Exception("default __init__ takes no argument" - " (class %s)" % (self.name,)) - elif self.pyobj is Exception: - # check explicitly against "raise Exception, x" where x - # is a low-level exception pointer - try: - [s_arg] = args.fixedunpack(1) - except ValueError: - pass - else: - from rpython.rtyper.llannotation import SomePtr - assert not isinstance(s_arg, SomePtr) - else: - # call the constructor - args = args.prepend(s_instance) - s_init.call(args) - return s_instance - - def is_exception_class(self): - return issubclass(self.pyobj, BaseException) - - def is_builtin_exception_class(self): - if self.is_exception_class(): - if self.pyobj.__module__ == 'exceptions': - return True - if issubclass(self.pyobj, AssertionError): - return True - return False - - def lookup(self, name): - cdesc = self - while name not in cdesc.classdict: - cdesc = cdesc.basedesc - if cdesc is None: - return None - else: - return cdesc - - def read_attribute(self, name, default=NODEFAULT): - cdesc = self.lookup(name) - if cdesc is None: - if default is NODEFAULT: - raise AttributeError - else: - return default - else: - return cdesc.classdict[name] - - def s_read_attribute(self, name): - # look up an attribute in the class - cdesc = self.lookup(name) - if cdesc is None: - return s_ImpossibleValue - else: - # delegate to s_get_value to turn it into an annotation - return cdesc.s_get_value(None, name) - - def s_get_value(self, classdef, name): - obj = self.classdict[name] - if isinstance(obj, Constant): - value = obj.value - if isinstance(value, staticmethod): # special case - value = value.__get__(42) - classdef = None # don't bind - elif isinstance(value, classmethod): - raise AnnotatorError("classmethods are not supported") - s_value = self.bookkeeper.immutablevalue(value) - if classdef is not None: - s_value = s_value.bind_callables_under(classdef, name) - elif isinstance(obj, Desc): - from rpython.annotator.model import SomePBC - if classdef is not None: - obj = obj.bind_under(classdef, name) - s_value = SomePBC([obj]) - else: - raise TypeError("classdict should not contain %r" % (obj,)) - return s_value - - def create_new_attribute(self, name, value): - assert name not in self.classdict, "name clash: %r" % (name,) - self.classdict[name] = Constant(value) - - def find_source_for(self, name): - if name in self.classdict: - return self - # check whether there is a new attribute - cls = self.pyobj - if name in cls.__dict__: - self.add_source_attribute(name, cls.__dict__[name]) - if name in self.classdict: - return self - return None - - def maybe_return_immutable_list(self, attr, s_result): - # hack: 'x.lst' where lst is listed in _immutable_fields_ as - # either 'lst[*]' or 'lst?[*]' - # should really return an immutable list as a result. Implemented - # by changing the result's annotation (but not, of course, doing an - # actual copy in the rtyper). Tested in rpython.rtyper.test.test_rlist, - # test_immutable_list_out_of_instance. - if self._detect_invalid_attrs and attr in self._detect_invalid_attrs: - raise Exception("field %r was migrated to %r from a subclass in " - "which it was declared as _immutable_fields_" % - (attr, self.pyobj)) - search1 = '%s[*]' % (attr,) - search2 = '%s?[*]' % (attr,) - cdesc = self - while cdesc is not None: - if '_immutable_fields_' in cdesc.classdict: - if (search1 in cdesc.classdict['_immutable_fields_'].value or - search2 in cdesc.classdict['_immutable_fields_'].value): - s_result.listdef.never_resize() - s_copy = s_result.listdef.offspring() - s_copy.listdef.mark_as_immutable() - # - cdesc = cdesc.basedesc - while cdesc is not None: - if cdesc._detect_invalid_attrs is None: - cdesc._detect_invalid_attrs = set() - cdesc._detect_invalid_attrs.add(attr) - cdesc = cdesc.basedesc - # - return s_copy - cdesc = cdesc.basedesc - return s_result # common case - - @staticmethod - def consider_call_site(descs, args, s_result, op): - descs[0].getcallfamily() - descs[0].mergecallfamilies(*descs[1:]) - from rpython.annotator.model import SomeInstance, SomePBC, s_None - if len(descs) == 1: - # call to a single class, look at the result annotation - # in case it was specialized - if not isinstance(s_result, SomeInstance): - raise Exception("calling a class didn't return an instance??") - classdefs = [s_result.classdef] - else: - # call to multiple classes: specialization not supported - classdefs = [desc.getuniqueclassdef() for desc in descs] - # If some of the classes have an __init__ and others not, then - # we complain, even though in theory it could work if all the - # __init__s take no argument. But it's messy to implement, so - # let's just say it is not RPython and you have to add an empty - # __init__ to your base class. - has_init = False - for desc in descs: - s_init = desc.s_read_attribute('__init__') - has_init |= isinstance(s_init, SomePBC) - basedesc = ClassDesc.getcommonbase(descs) - s_init = basedesc.s_read_attribute('__init__') - parent_has_init = isinstance(s_init, SomePBC) - if has_init and not parent_has_init: - raise AnnotatorError( - "some subclasses among %r declare __init__()," - " but not the common parent class" % (descs,)) - # make a PBC of MethodDescs, one for the __init__ of each class - initdescs = [] - for desc, classdef in zip(descs, classdefs): - s_init = desc.s_read_attribute('__init__') - if isinstance(s_init, SomePBC): - assert len(s_init.descriptions) == 1, ( - "unexpected dynamic __init__?") - initfuncdesc, = s_init.descriptions - if isinstance(initfuncdesc, FunctionDesc): - from rpython.annotator.bookkeeper import getbookkeeper - initmethdesc = getbookkeeper().getmethoddesc( - initfuncdesc, classdef, classdef, '__init__') - initdescs.append(initmethdesc) - # register a call to exactly these __init__ methods - if initdescs: - initdescs[0].mergecallfamilies(*initdescs[1:]) - MethodDesc.consider_call_site(initdescs, args, s_None, op) - - def getallbases(self): - desc = self - while desc is not None: - yield desc - desc = desc.basedesc - - @staticmethod - def getcommonbase(descs): - commondesc = descs[0] - for desc in descs[1:]: - allbases = set(commondesc.getallbases()) - while desc not in allbases: - assert desc is not None, "no common base for %r" % (descs,) - desc = desc.basedesc - commondesc = desc - return commondesc - - def rowkey(self): - return self - - def getattrfamily(self, attrname): - "Get the ClassAttrFamily object for attrname. Possibly creates one." - access_sets = self.bookkeeper.get_classpbc_attr_families(attrname) - _, _, attrfamily = access_sets.find(self) - return attrfamily - - def queryattrfamily(self, attrname): - """Retrieve the ClassAttrFamily object for attrname if there is one, - otherwise return None.""" - access_sets = self.bookkeeper.get_classpbc_attr_families(attrname) - try: - return access_sets[self] - except KeyError: - return None - - def mergeattrfamilies(self, others, attrname): - """Merge the attr families of the given Descs into one.""" - access_sets = self.bookkeeper.get_classpbc_attr_families(attrname) - changed, rep, attrfamily = access_sets.find(self) - for desc in others: - changed1, rep, attrfamily = access_sets.union(rep, desc) - changed = changed or changed1 - return changed - - class MethodDesc(Desc): knowntype = types.MethodType @@ -1061,7 +625,6 @@ func_args = self.func_args(args) return self.funcdesc.get_graph(func_args, op) - @staticmethod def consider_call_site(descs, args, s_result, op): cnt, keys, star = rawshape(args) @@ -1073,29 +636,3 @@ def rowkey(self): return self.funcdesc - -# ____________________________________________________________ - -class Sample(object): - __slots__ = 'x' -MemberDescriptorTypes = [type(Sample.x)] -del Sample -try: - MemberDescriptorTypes.append(type(OSError.errno)) -except AttributeError: # on CPython <= 2.4 - pass - -# ____________________________________________________________ - -FORCE_ATTRIBUTES_INTO_CLASSES = { - EnvironmentError: {'errno': SomeInteger(), - 'strerror': SomeString(can_be_None=True), - 'filename': SomeString(can_be_None=True)}, -} - -try: - WindowsError -except NameError: - pass -else: - FORCE_ATTRIBUTES_INTO_CLASSES[WindowsError] = {'winerror': SomeInteger()} diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -466,7 +466,7 @@ if desc.pyobj is not None: self.const = desc.pyobj elif len(descriptions) > 1: - from rpython.annotator.description import ClassDesc + from rpython.annotator.classdesc import ClassDesc if self.getKind() is ClassDesc: # a PBC of several classes: enforce them all to be # built, without support for specialization. See diff --git a/rpython/annotator/test/test_description.py b/rpython/annotator/test/test_description.py --- a/rpython/annotator/test/test_description.py +++ b/rpython/annotator/test/test_description.py @@ -1,4 +1,4 @@ -from rpython.annotator.description import ClassDesc, is_mixin +from rpython.annotator.classdesc import ClassDesc, is_mixin class FakeBookkeeper: def __init__(self): diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -875,7 +875,7 @@ def getattr(self, s_attr): assert s_attr.is_constant() if s_attr.const == '__name__': - from rpython.annotator.description import ClassDesc + from rpython.annotator.classdesc import ClassDesc if self.getKind() is ClassDesc: return SomeString() bookkeeper = getbookkeeper() diff --git a/rpython/rtyper/normalizecalls.py b/rpython/rtyper/normalizecalls.py --- a/rpython/rtyper/normalizecalls.py +++ b/rpython/rtyper/normalizecalls.py @@ -6,6 +6,7 @@ from rpython.rtyper.error import TyperError from rpython.rtyper.rmodel import getgcflavor from rpython.tool.sourcetools import valid_identifier +from rpython.annotator.classdesc import ClassDesc def normalize_call_familes(annotator): @@ -213,7 +214,7 @@ descs = access_set.descs if len(descs) <= 1: continue - if not isinstance(descs.iterkeys().next(), description.ClassDesc): + if not isinstance(descs.iterkeys().next(), ClassDesc): continue classdefs = [desc.getuniqueclassdef() for desc in descs] commonbase = classdefs[0] @@ -241,7 +242,7 @@ if len(family.descs) <= 1: continue descs = family.descs.keys() - if not isinstance(descs[0], description.ClassDesc): + if not isinstance(descs[0], ClassDesc): continue # Note that if classes are in the same callfamily, their __init__ # attribute must be in the same attrfamily as well. diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -3,7 +3,8 @@ from rpython.flowspace.model import FunctionGraph, Link, Block, SpaceOperation from rpython.annotator import model as annmodel from rpython.annotator.description import ( - FunctionDesc, ClassDesc, MethodDesc, FrozenDesc, MethodOfFrozenDesc) + FunctionDesc, MethodDesc, FrozenDesc, MethodOfFrozenDesc) +from rpython.annotator.classdesc import ClassDesc from rpython.flowspace.model import Constant from rpython.annotator.argument import simple_args from rpython.rlib.debug import ll_assert From noreply at buildbot.pypy.org Wed Oct 14 02:43:43 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 14 Oct 2015 02:43:43 +0200 (CEST) Subject: [pypy-commit] pypy no-class-specialize: fix imports Message-ID: <20151014004343.502F51C103D@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: no-class-specialize Changeset: r80188:dc1d07c7ac73 Date: 2015-10-14 01:43 +0100 http://bitbucket.org/pypy/pypy/changeset/dc1d07c7ac73/ Log: fix imports diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -4,13 +4,14 @@ import sys from rpython.annotator.model import ( - SomeInteger, SomeObject, SomeChar, SomeBool, SomeString, SomeTuple, + SomeInteger, SomeChar, SomeBool, SomeString, SomeTuple, SomeUnicodeCodePoint, SomeFloat, unionof, SomeUnicodeString, SomePBC, SomeInstance, SomeDict, SomeList, SomeWeakRef, SomeIterator, SomeOrderedDict, SomeByteArray, add_knowntypedata, s_ImpossibleValue,) from rpython.annotator.bookkeeper import ( getbookkeeper, immutablevalue, BUILTIN_ANALYZERS, analyzer_for) from rpython.annotator import description +from rpython.annotator.classdesc import ClassDef from rpython.flowspace.model import Constant import rpython.rlib.rarithmetic import rpython.rlib.objectmodel @@ -123,7 +124,6 @@ def our_issubclass(cls1, cls2): """ we're going to try to be less silly in the face of old-style classes""" - from rpython.annotator.classdef import ClassDef if cls2 is object: return True def classify(cls): diff --git a/rpython/annotator/classdesc.py b/rpython/annotator/classdesc.py --- a/rpython/annotator/classdesc.py +++ b/rpython/annotator/classdesc.py @@ -118,14 +118,10 @@ for desc in s_newvalue.descriptions: if desc.selfclassdef is None: if homedef.classdesc.settled: - raise Exception("demoting method %s " - "to settled class %s not " - "allowed" % - (self.name, homedef) - ) - #self.bookkeeper.warning("demoting method %s " - # "to base class %s" % - # (self.name, homedef)) + raise AnnotatorError( + "demoting method %s to settled class " + "%s not allowed" % (self.name, homedef) + ) break # check for attributes forbidden by slots or _attrs_ @@ -616,7 +612,6 @@ return self.getuniqueclassdef() def _init_classdef(self): - from rpython.annotator.classdef import ClassDef classdef = ClassDef(self.bookkeeper, self) self.bookkeeper.classdefs.append(classdef) self.classdef = classdef @@ -731,7 +726,6 @@ if classdef is not None: s_value = s_value.bind_callables_under(classdef, name) elif isinstance(obj, Desc): - from rpython.annotator.model import SomePBC if classdef is not None: obj = obj.bind_under(classdef, name) s_value = SomePBC([obj]) diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -6,7 +6,7 @@ from rpython.annotator import model as annmodel from rpython.annotator.model import AnnotatorError, UnionError from rpython.annotator.annrpython import RPythonAnnotator as _RPythonAnnotator -from rpython.annotator.classdef import NoSuchAttrError +from rpython.annotator.classdesc import NoSuchAttrError from rpython.translator.translator import graphof as tgraphof from rpython.annotator.policy import AnnotatorPolicy from rpython.annotator.signature import Sig, SignatureError @@ -3049,7 +3049,6 @@ if works: a.build_types(fun, [int]) else: - from rpython.annotator.classdef import NoSuchAttrError py.test.raises(NoSuchAttrError, a.build_types, fun, [int]) def test_attrs_enforce_attrs(self): From noreply at buildbot.pypy.org Wed Oct 14 08:52:29 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Wed, 14 Oct 2015 08:52:29 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: removed fromfloat call, but called constructor (wrong), fixed Message-ID: <20151014065229.02F001C089E@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80189:7c83ce701286 Date: 2015-10-14 08:52 +0200 http://bitbucket.org/pypy/pypy/changeset/7c83ce701286/ Log: removed fromfloat call, but called constructor (wrong), fixed diff --git a/rpython/jit/tool/oparser.py b/rpython/jit/tool/oparser.py --- a/rpython/jit/tool/oparser.py +++ b/rpython/jit/tool/oparser.py @@ -172,7 +172,7 @@ if elem.startswith('i'): v = InputArgInt(0) elif elem.startswith('f'): - v = InputArgFloat(0.0) + v = InputArgFloat.fromfloat(0.0) elif elem.startswith('v'): v = InputArgVector() elem = self.update_vector(v, elem) From noreply at buildbot.pypy.org Wed Oct 14 08:56:25 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Wed, 14 Oct 2015 08:56:25 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: test case reduced size of constant integer to pass this test Message-ID: <20151014065625.2AF701C1186@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80190:1c315f467e89 Date: 2015-10-14 08:55 +0200 http://bitbucket.org/pypy/pypy/changeset/1c315f467e89/ Log: test case reduced size of constant integer to pass this test diff --git a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py --- a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py @@ -535,7 +535,7 @@ guard_nonnull(p13) [] # 2: 4,5 i14 = getfield_gc_i(p9) # 3: 5 p15 = getfield_gc_r(p13) # 4: 5 - guard_class(p15, 140737326900656) [p1, p0, p9, i14, p15, p13, p5, p6, p7] # 5: 6 + guard_class(p15, 14073732) [p1, p0, p9, i14, p15, p13, p5, p6, p7] # 5: 6 jump(p0,p1,p5,p6,p7,p9,p11,p12) # 6: """) self.assert_dependencies(graph, full_check=True) From noreply at buildbot.pypy.org Wed Oct 14 09:15:14 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Oct 2015 09:15:14 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Starting Message-ID: <20151014071514.23BBF1C1214@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80191:5bac5cb80e81 Date: 2015-10-14 09:03 +0200 http://bitbucket.org/pypy/pypy/changeset/5bac5cb80e81/ Log: Starting diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/rawrefcount.py @@ -0,0 +1,61 @@ +# +# See documentation in pypy/doc/discussion/rawrefcount.rst +# +from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rlib.objectmodel import we_are_translated, specialize +from rpython.rtyper.extregistry import ExtRegistryEntry +from rpython.rtyper import annlowlevel +from rpython.rlib import rgc + + +REFCNT_FROM_PYPY_OBJECT = 80 # == 0x50 + + +_p_list = [] # not rpython +_o_list = [] # not rpython + + +def create_link_from_pypy(p, ob): + "NOT_RPYTHON" + assert not hasattr(p, '__rawrefcount') + assert not ob.ob_pypy_link + ob.ob_pypy_link = rgc.cast_instance_to_gcref(p) + ob.ob_refcnt += REFCNT_FROM_PYPY_OBJECT + p.__rawrefcount = ob + _p_list.append(ob) + +def create_link_to_pypy(p, ob): + "NOT_RPYTHON" + assert not ob.ob_pypy_link + ob.ob_pypy_link = rgc.cast_instance_to_gcref(p) + ob.ob_refcnt += REFCNT_FROM_PYPY_OBJECT + _o_list.append(ob) + +def from_obj(OBTYPE, p): + "NOT_RPYTHON" + null = lltype.nullptr(OBTYPE) + ob = getattr(p, '__rawrefcount', null) + assert lltype.typeOf(ob) == lltype.Ptr(OBTYPE) + return ob + + at specialize.arg(0) +def to_obj(Class, ob): + pypy_gcref = ob.ob_pypy_link + if we_are_translated(): + return annlowlevel.cast_gcref_to_instance(Class, pypy_gcref) + else: + if not pypy_gcref: + return None + p = rgc.try_cast_gcref_to_instance(Class, pypy_gcref) + assert p is not None + return p + +def collect(): + "NOT_RPYTHON: for tests only" + for ob in _p_list: + xxx + +# ____________________________________________________________ + +## class Entry(ExtRegistryEntry): +## _about_ = create_link_from_pypy diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -481,6 +481,7 @@ class _GcRef(object): # implementation-specific: there should not be any after translation __slots__ = ['_x', '_handle'] + _TYPE = llmemory.GCREF def __init__(self, x): self._x = x def __hash__(self): diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py new file mode 100644 --- /dev/null +++ b/rpython/rlib/test/test_rawrefcount.py @@ -0,0 +1,32 @@ +from rpython.rlib import rawrefcount +from rpython.rtyper.lltypesystem import lltype, llmemory + +class W_Root(object): + def __init__(self, intval=0): + self.intval = intval + +PyObjectS = lltype.Struct('PyObjectS', + ('ob_refcnt', lltype.Signed), + ('ob_pypy_link', llmemory.GCREF)) +PyObject = lltype.Ptr(PyObjectS) + + +def test_create_link_from_pypy(): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, + track_allocation=False) + assert rawrefcount.from_obj(PyObjectS, p) == lltype.nullptr(PyObjectS) + assert rawrefcount.to_obj(W_Root, ob) == None + rawrefcount.create_link_from_pypy(p, ob) + assert rawrefcount.from_obj(PyObjectS, p) == ob + assert rawrefcount.to_obj(W_Root, ob) == p + +def test_create_link_to_pypy(): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, + track_allocation=False) + assert rawrefcount.from_obj(PyObjectS, p) == lltype.nullptr(PyObjectS) + assert rawrefcount.to_obj(W_Root, ob) == None + rawrefcount.create_link_to_pypy(p, ob) + assert rawrefcount.from_obj(PyObjectS, p) == lltype.nullptr(PyObjectS) + assert rawrefcount.to_obj(W_Root, ob) == p From noreply at buildbot.pypy.org Wed Oct 14 09:15:16 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Oct 2015 09:15:16 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: in-progress Message-ID: <20151014071516.2434E1C1214@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80192:015de8d029db Date: 2015-10-14 09:15 +0200 http://bitbucket.org/pypy/pypy/changeset/015de8d029db/ Log: in-progress diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -1,6 +1,7 @@ # # See documentation in pypy/doc/discussion/rawrefcount.rst # +import weakref from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rtyper.extregistry import ExtRegistryEntry @@ -50,10 +51,24 @@ assert p is not None return p -def collect(): +def _collect(): "NOT_RPYTHON: for tests only" + global _p_list + wrlist = [] + newlist = [] for ob in _p_list: - xxx + assert ob.ob_refcnt >= REFCNT_FROM_PYPY_OBJECT + if ob.ob_refcnt == REFCNT_FROM_PYPY_OBJECT: + wrlist.append(weakref.ref(ob)) + else: + newlist.append(ob) + _p_list = newlist + del ob + rgc.collect() # forces the cycles to be resolved and the weakrefs to die + for wr in wrlist: + ob = wr() + if ob is not None: + newlist.append(ob) # ____________________________________________________________ diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py --- a/rpython/rlib/test/test_rawrefcount.py +++ b/rpython/rlib/test/test_rawrefcount.py @@ -1,3 +1,4 @@ +import weakref from rpython.rlib import rawrefcount from rpython.rtyper.lltypesystem import lltype, llmemory @@ -11,22 +12,75 @@ PyObject = lltype.Ptr(PyObjectS) -def test_create_link_from_pypy(): - p = W_Root(42) - ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, - track_allocation=False) - assert rawrefcount.from_obj(PyObjectS, p) == lltype.nullptr(PyObjectS) - assert rawrefcount.to_obj(W_Root, ob) == None - rawrefcount.create_link_from_pypy(p, ob) - assert rawrefcount.from_obj(PyObjectS, p) == ob - assert rawrefcount.to_obj(W_Root, ob) == p +class TestRawRefCount: -def test_create_link_to_pypy(): - p = W_Root(42) - ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, - track_allocation=False) - assert rawrefcount.from_obj(PyObjectS, p) == lltype.nullptr(PyObjectS) - assert rawrefcount.to_obj(W_Root, ob) == None - rawrefcount.create_link_to_pypy(p, ob) - assert rawrefcount.from_obj(PyObjectS, p) == lltype.nullptr(PyObjectS) - assert rawrefcount.to_obj(W_Root, ob) == p + def setup_method(self, meth): + del rawrefcount._p_list[:] + del rawrefcount._o_list[:] + + def test_create_link_from_pypy(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, + track_allocation=False) + assert rawrefcount.from_obj(PyObjectS, p) == lltype.nullptr(PyObjectS) + assert rawrefcount.to_obj(W_Root, ob) == None + rawrefcount.create_link_from_pypy(p, ob) + assert rawrefcount.from_obj(PyObjectS, p) == ob + assert rawrefcount.to_obj(W_Root, ob) == p + + def test_create_link_to_pypy(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, + track_allocation=False) + assert rawrefcount.from_obj(PyObjectS, p) == lltype.nullptr(PyObjectS) + assert rawrefcount.to_obj(W_Root, ob) == None + rawrefcount.create_link_to_pypy(p, ob) + assert rawrefcount.from_obj(PyObjectS, p) == lltype.nullptr(PyObjectS) + assert rawrefcount.to_obj(W_Root, ob) == p + + def test_collect_dies(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, + track_allocation=False) + rawrefcount.create_link_from_pypy(p, ob) + assert rawrefcount._p_list == [ob] + wr_ob = weakref.ref(ob) + wr_p = weakref.ref(p) + del ob, p + rawrefcount._collect() + assert rawrefcount._p_list == [] + assert wr_ob() is None + assert wr_p() is None + + def test_collect_keepalive_pyobject(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, + track_allocation=False) + rawrefcount.create_link_from_pypy(p, ob) + assert rawrefcount._p_list == [ob] + wr_ob = weakref.ref(ob) + wr_p = weakref.ref(p) + ob.ob_refcnt += 1 # <= + del ob, p + rawrefcount._collect() + ob = wr_ob() + p = wr_p() + assert ob is not None and p is not None + assert rawrefcount._p_list == [ob] + assert rawrefcount.to_obj(W_Root, ob) == p + assert rawrefcount.from_obj(PyObjectS, p) == ob + + def test_collect_keepalive_w_root(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, + track_allocation=False) + rawrefcount.create_link_from_pypy(p, ob) + assert rawrefcount._p_list == [ob] + wr_ob = weakref.ref(ob) + del ob # p remains + rawrefcount._collect() + ob = wr_ob() + assert ob is not None + assert rawrefcount._p_list == [ob] + assert rawrefcount.to_obj(W_Root, ob) == p + assert rawrefcount.from_obj(PyObjectS, p) == ob From noreply at buildbot.pypy.org Wed Oct 14 09:36:04 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Oct 2015 09:36:04 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Collecting the O list Message-ID: <20151014073604.04F071C1464@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80193:7dce6c22637b Date: 2015-10-14 09:36 +0200 http://bitbucket.org/pypy/pypy/changeset/7dce6c22637b/ Log: Collecting the O list diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -52,23 +52,55 @@ return p def _collect(): - "NOT_RPYTHON: for tests only" - global _p_list - wrlist = [] - newlist = [] + """NOT_RPYTHON: for tests only. Emulates a GC collection. + Returns the list of ob's whose _Py_Dealloc() should be called, + from the O list. + """ + global _p_list, _o_list + wr_p_list = [] + new_p_list = [] for ob in _p_list: assert ob.ob_refcnt >= REFCNT_FROM_PYPY_OBJECT if ob.ob_refcnt == REFCNT_FROM_PYPY_OBJECT: - wrlist.append(weakref.ref(ob)) + wr_p_list.append(weakref.ref(ob)) else: - newlist.append(ob) - _p_list = newlist - del ob + new_p_list.append(ob) + ob = None + _p_list = Ellipsis + # + wr_o_list = [] + for ob in _o_list: + assert ob.ob_pypy_link + p = rgc.try_cast_gcref_to_instance(object, ob.ob_pypy_link) + assert p is not None + ob.ob_pypy_link = lltype.nullptr(llmemory.GCREF.TO) + wr_o_list.append((ob, weakref.ref(p))) + p = None + _o_list = Ellipsis + # rgc.collect() # forces the cycles to be resolved and the weakrefs to die - for wr in wrlist: + rgc.collect() + rgc.collect() + # + _p_list = new_p_list + for wr in wr_p_list: ob = wr() if ob is not None: - newlist.append(ob) + _p_list.append(ob) + # + dealloc = [] + _o_list = [] + for ob, wr in wr_o_list: + p = wr() + if p is not None: + ob.ob_pypy_link = rgc.cast_instance_to_gcref(p) + _o_list.append(ob) + else: + assert ob.ob_refcnt >= REFCNT_FROM_PYPY_OBJECT + ob.ob_refcnt -= REFCNT_FROM_PYPY_OBJECT + if ob.ob_refcnt == 0: + dealloc.append(ob) + return dealloc # ____________________________________________________________ diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py --- a/rpython/rlib/test/test_rawrefcount.py +++ b/rpython/rlib/test/test_rawrefcount.py @@ -38,7 +38,7 @@ assert rawrefcount.from_obj(PyObjectS, p) == lltype.nullptr(PyObjectS) assert rawrefcount.to_obj(W_Root, ob) == p - def test_collect_dies(self): + def test_collect_p_dies(self): p = W_Root(42) ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) @@ -52,7 +52,7 @@ assert wr_ob() is None assert wr_p() is None - def test_collect_keepalive_pyobject(self): + def test_collect_p_keepalive_pyobject(self): p = W_Root(42) ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) @@ -70,7 +70,7 @@ assert rawrefcount.to_obj(W_Root, ob) == p assert rawrefcount.from_obj(PyObjectS, p) == ob - def test_collect_keepalive_w_root(self): + def test_collect_p_keepalive_w_root(self): p = W_Root(42) ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) @@ -84,3 +84,55 @@ assert rawrefcount._p_list == [ob] assert rawrefcount.to_obj(W_Root, ob) == p assert rawrefcount.from_obj(PyObjectS, p) == ob + + def test_collect_o_dies(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, + track_allocation=False) + rawrefcount.create_link_to_pypy(p, ob) + assert rawrefcount._o_list == [ob] + wr_ob = weakref.ref(ob) + wr_p = weakref.ref(p) + del ob, p + dealloc = rawrefcount._collect() + ob = wr_ob() + assert ob is not None + assert dealloc == [ob] + assert rawrefcount._o_list == [] + assert wr_p() is None + + def test_collect_o_keepalive_pyobject(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, + track_allocation=False) + p._rawrefcount = ob + rawrefcount.create_link_to_pypy(p, ob) + assert rawrefcount._o_list == [ob] + wr_ob = weakref.ref(ob) + wr_p = weakref.ref(p) + ob.ob_refcnt += 1 # <= + del p + dealloc = rawrefcount._collect() + assert dealloc == [] + p = wr_p() + assert p is None # was unlinked + assert ob.ob_refcnt == 1 # != REFCNT_FROM_PYPY_OBJECT + 1 + assert rawrefcount._o_list == [] + assert rawrefcount.to_obj(W_Root, ob) == None + + def test_collect_o_keepalive_w_root(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, + track_allocation=False) + p._rawrefcount = ob + rawrefcount.create_link_to_pypy(p, ob) + assert rawrefcount._o_list == [ob] + wr_ob = weakref.ref(ob) + del ob # p remains + dealloc = rawrefcount._collect() + assert dealloc == [] + ob = wr_ob() + assert ob is not None + assert rawrefcount._o_list == [ob] + assert rawrefcount.to_obj(W_Root, ob) == p + assert p._rawrefcount == ob From noreply at buildbot.pypy.org Wed Oct 14 09:56:01 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Wed, 14 Oct 2015 09:56:01 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: copy copy copy. insertion of dummy methods to get the test environment going Message-ID: <20151014075601.F1DF81C01DE@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80194:1c30732c9462 Date: 2015-10-14 09:56 +0200 http://bitbucket.org/pypy/pypy/changeset/1c30732c9462/ Log: copy copy copy. insertion of dummy methods to get the test environment going diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -1,6 +1,52 @@ from rpython.jit.backend.llsupport.assembler import GuardToken, BaseAssembler +from rpython.jit.metainterp.resoperation import rop class AssemblerZARCH(BaseAssembler): + + def _build_failure_recovery(self, exc, withfloats=False): + pass # TODO + + def _build_wb_slowpath(self, withcards, withfloats=False, for_frame=False): + pass # TODO + + def build_frame_realloc_slowpath(self): + # this code should do the following steps + # a) store all registers in the jitframe + # b) fish for the arguments passed by the caller + # c) store the gcmap in the jitframe + # d) call realloc_frame + # e) set the fp to point to the new jitframe + # f) store the address of the new jitframe in the shadowstack + # c) set the gcmap field to 0 in the new jitframe + # g) restore registers and return + pass # TODO + + def _build_propagate_exception_path(self): + pass # TODO + + def _build_cond_call_slowpath(self, supports_floats, callee_only): + """ This builds a general call slowpath, for whatever call happens to + come. + """ + pass # TODO + + def _build_stack_check_slowpath(self): + pass # TODO + # ________________________________________ + # ASSEMBLER EMISSION + def emit_op_int_add(self, op): pass +def notimplemented_op(self, op, arglocs, regalloc, fcond): + print "[ZARCH/asm] %s not implemented" % op.getopname() + raise NotImplementedError(op) + +asm_operations = [notimplemented_op] * (rop._LAST + 1) +asm_extra_operations = {} + +for name, value in AssemblerZARCH.__dict__.iteritems(): + if name.startswith('emit_op_'): + opname = name[len('emit_op_'):] + num = getattr(rop, opname.upper()) + asm_operations[num] = value diff --git a/rpython/jit/backend/zarch/runner.py b/rpython/jit/backend/zarch/runner.py --- a/rpython/jit/backend/zarch/runner.py +++ b/rpython/jit/backend/zarch/runner.py @@ -1,7 +1,17 @@ from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU +from rpython.rtyper.lltypesystem import lltype, llmemory class AbstractZARCHCPU(AbstractLLCPU): - pass + def __init__(self, rtyper, stats, opts=None, translate_support_code=False, + gcdescr=None): + AbstractLLCPU.__init__(self, rtyper, stats, opts, + translate_support_code, gcdescr) + + def cast_ptr_to_int(x): + adr = llmemory.cast_ptr_to_adr(x) + return adr + cast_ptr_to_int._annspecialcase_ = 'specialize:arglltype(0)' + cast_ptr_to_int = staticmethod(cast_ptr_to_int) class CPU_S390_64(AbstractZARCHCPU): pass From noreply at buildbot.pypy.org Wed Oct 14 12:25:43 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Wed, 14 Oct 2015 12:25:43 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: started the auto instruction encoding, AR_rr correctly assembles Message-ID: <20151014102543.B82A21C089E@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80195:6177697cbd11 Date: 2015-10-14 12:25 +0200 http://bitbucket.org/pypy/pypy/changeset/6177697cbd11/ Log: started the auto instruction encoding, AR_rr correctly assembles diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -0,0 +1,93 @@ +from rpython.jit.backend.zarch import conditions as cond +from rpython.jit.backend.zarch import registers as reg +from rpython.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin +from rpython.rlib.objectmodel import we_are_translated +from rpython.rtyper.lltypesystem import lltype, rffi, llmemory +from rpython.tool.udir import udir +from rpython.jit.backend.detect_cpu import autodetect + +clear_cache = rffi.llexternal( + "__clear_cache", + [llmemory.Address, llmemory.Address], + lltype.Void, + _nowrapper=True, + sandboxsafe=True) + + +def binary_helper_call(name): + function = getattr(support, 'arm_%s' % name) + + def f(self, c=cond.AL): + """Generates a call to a helper function, takes its + arguments in r0 and r1, result is placed in r0""" + addr = rffi.cast(lltype.Signed, function) + self.BL(addr, c) + return f + + +codes = { + 'ADD_rr': 0x1A, +} + +def encode_rr(reg1, reg2): + return chr(((reg2 & 0x0f) << 4) | (reg1 & 0xf)) + +class AbstractZARCHBuilder(object): + def write32(self, word): + self.writechar(chr(word & 0xFF)) + self.writechar(chr((word >> 8) & 0xFF)) + self.writechar(chr((word >> 16) & 0xFF)) + self.writechar(chr((word >> 24) & 0xFF)) + + def AR_rr(self, reg1, reg2): + self.writechar(chr(0x1A)) + self.writechar(encode_rr(reg1, reg2)) + +class InstrBuilder(BlockBuilderMixin, AbstractZARCHBuilder): + + def __init__(self): + AbstractZARCHBuilder.__init__(self) + self.init_block_builder() + # + # ResOperation --> offset in the assembly. + # ops_offset[None] represents the beginning of the code after the last op + # (i.e., the tail of the loop) + self.ops_offset = {} + + def mark_op(self, op): + pos = self.get_relative_pos() + self.ops_offset[op] = pos + + def _dump_trace(self, addr, name, formatter=-1): + if not we_are_translated(): + if formatter != -1: + name = name % formatter + dir = udir.ensure('asm', dir=True) + f = dir.join(name).open('wb') + data = rffi.cast(rffi.CCHARP, addr) + for i in range(self.currpos()): + f.write(data[i]) + f.close() + + def clear_cache(self, addr): + if we_are_translated(): + startaddr = rffi.cast(llmemory.Address, addr) + endaddr = rffi.cast(llmemory.Address, + addr + self.get_relative_pos()) + clear_cache(startaddr, endaddr) + + def copy_to_raw_memory(self, addr): + self._copy_to_raw_memory(addr) + self.clear_cache(addr) + self._dump(addr, "jit-backend-dump", 'arm') + + def currpos(self): + return self.get_relative_pos() + +#define_instructions(AbstractARMBuilder) + +_classes = (AbstractZARCHBuilder,) + +# Used to build the MachineCodeBlockWrapper +all_instructions = sorted([name for cls in _classes for name in cls.__dict__ \ + if name.split('_')[0].isupper()]) diff --git a/rpython/jit/backend/zarch/runner.py b/rpython/jit/backend/zarch/runner.py --- a/rpython/jit/backend/zarch/runner.py +++ b/rpython/jit/backend/zarch/runner.py @@ -9,7 +9,7 @@ def cast_ptr_to_int(x): adr = llmemory.cast_ptr_to_adr(x) - return adr + return adr # TODO cast_ptr_to_int._annspecialcase_ = 'specialize:arglltype(0)' cast_ptr_to_int = staticmethod(cast_ptr_to_int) diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -18,7 +18,6 @@ CPU = getcpuclass() - class TestRunningAssembler(object): def setup_method(self, method): cpu = CPU(None, None) diff --git a/rpython/jit/backend/zarch/test/test_auto_encoding.py b/rpython/jit/backend/zarch/test/test_auto_encoding.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/test_auto_encoding.py @@ -0,0 +1,280 @@ +import os, random, struct +import subprocess +import py +from rpython.jit.backend.zarch import codebuilder +from rpython.rlib.rarithmetic import intmask +from rpython.tool.udir import udir + +INPUTNAME = 'checkfile_%s.s' +FILENAME = 'checkfile_%s.o' +BEGIN_TAG = '<<>>' +END_TAG = '<<>>' + +class CodeCheckerMixin(object): + def __init__(self, expected, accept_unnecessary_prefix): + self.expected = expected + self.accept_unnecessary_prefix = accept_unnecessary_prefix + self.index = 0 + + def begin(self, op): + self.op = op + self.instrindex = self.index + + def writechar(self, char): + if char != self.expected[self.index:self.index+1]: + if (char == self.accept_unnecessary_prefix + and self.index == self.instrindex): + return # ignore the extra character '\x40' + print self.op + print "\x09from codebuilder.py:", hexdump(self.expected[self.instrindex:self.index] + char)+"..." + print "\x09from 'as': ", hexdump(self.expected[self.instrindex:self.index+15])+"..." + raise Exception("Differs") + self.index += 1 + + def done(self): + assert len(self.expected) == self.index + + def stack_frame_size_delta(self, delta): + pass # ignored + + def check_stack_size_at_ret(self): + pass # ignored + +class CodeCheckerZARCH(CodeCheckerMixin, codebuilder.InstrBuilder): + pass + +def hexdump(s): + return ' '.join(["%02X" % ord(c) for c in s]) + +def reduce_to_32bit(s): + if s[:2] != '%r': + return s + if s[2:].isdigit(): + return s + 'd' + else: + return '%e' + s[2:] + +# ____________________________________________________________ + +COUNT1 = 15 +suffixes = {0:'', 1:'b', 2:'w', 4:'l', 8:'q'} + + +class TestZARCH(object): + WORD = 8 + TESTDIR = 'zarch' + REGS = range(15+1) + REGNAMES = ['%%r%d' % i for i in REGS] + accept_unnecessary_prefix = None + methname = '?' + + def reg_tests(self): + return self.REGS + + def stack_bp_tests(self, count=COUNT1): + return ([0, 4, -4, 124, 128, -128, -132] + + [random.randrange(-0x20000000, 0x20000000) * 4 + for i in range(count)]) + + def stack_sp_tests(self, count=COUNT1): + return ([0, 4, 124, 128] + + [random.randrange(0, 0x20000000) * 4 + for i in range(count)]) + + def memory_tests(self): + return [(reg, ofs) + for reg in self.NONSPECREGS + for ofs in self.stack_bp_tests(5) + ] + + def array_tests(self): + return [(reg1, reg2, scaleshift, ofs) + for reg1 in self.NONSPECREGS + for reg2 in self.NONSPECREGS + for scaleshift in [0, 1, 2, 3] + for ofs in self.stack_bp_tests(1) + ] + + def imm8_tests(self): + v = ([-128,-1,0,1,127] + + [random.randrange(-127, 127) for i in range(COUNT1)]) + return v + + def imm32_tests(self): + v = ([-0x80000000, 0x7FFFFFFF, 128, 256, -129, -255] + + [random.randrange(-32768,32768)<<16 | + random.randrange(0,65536) for i in range(COUNT1)] + + [random.randrange(128, 256) for i in range(COUNT1)]) + return self.imm8_tests() + v + + def relative_tests(self): + py.test.skip("explicit test required for %r" % (self.methname,)) + + def get_all_tests(self): + return { + 'r': self.reg_tests, + } + + def assembler_operand_reg(self, regnum): + return self.REGNAMES[regnum] + + def assembler_operand_reg8(self, regnum): + assert regnum & rx86.BYTE_REG_FLAG + return self.REGNAMES8[regnum &~ rx86.BYTE_REG_FLAG] + + def assembler_operand_xmm_reg(self, regnum): + return self.XMMREGNAMES[regnum] + + def assembler_operand_stack_bp(self, position): + return '%d(%s)' % (position, self.REGNAMES[5]) + + def assembler_operand_stack_sp(self, position): + return '%d(%s)' % (position, self.REGNAMES[4]) + + def assembler_operand_memory(self, (reg1, offset)): + if not offset: offset = '' + return '%s(%s)' % (offset, self.REGNAMES[reg1]) + + def assembler_operand_array(self, (reg1, reg2, scaleshift, offset)): + if not offset: offset = '' + return '%s(%s,%s,%d)' % (offset, self.REGNAMES[reg1], + self.REGNAMES[reg2], 1<=0 + j = data.find(END_TAG, i) + assert j>=0 + as_code = data[i+len(BEGIN_TAG)+1:j] + except IOError: + raise Exception("Assembler did not produce output?") + return oplist, as_code + + def make_all_tests(self, methname, modes, args=[]): + if modes: + tests = self.get_all_tests() + m = modes[0] + lst = tests[m]() + random.shuffle(lst) + if methname == 'PSRAD_xi' and m == 'i': + lst = [x for x in lst if 0 <= x <= 31] + result = [] + for v in lst: + result += self.make_all_tests(methname, modes[1:], args+[v]) + return result + else: + # special cases + if methname in ('ADD_ri', 'AND_ri', 'CMP_ri', 'OR_ri', + 'SUB_ri', 'XOR_ri', 'SBB_ri'): + if args[0] == rx86.R.eax: + return [] # ADD EAX, constant: there is a special encoding + if methname in ('CMP8_ri',): + if args[0] == rx86.R.al: + return [] # CMP AL, constant: there is a special encoding + if methname == 'XCHG_rr' and rx86.R.eax in args: + return [] # special encoding + if methname == 'MOV_rj' and args[0] == rx86.R.eax: + return [] # MOV EAX, [immediate]: there is a special encoding + if methname == 'MOV_jr' and args[1] == rx86.R.eax: + return [] # MOV [immediate], EAX: there is a special encoding + if methname == 'MOV8_rj' and args[0] == rx86.R.al: + return [] # MOV AL, [immediate]: there is a special encoding + if methname == 'MOV8_jr' and args[1] == rx86.R.al: + return [] # MOV [immediate], AL: there is a special encoding + + return [args] + + def should_skip_instruction(self, instrname, argmodes): + return False + + def complete_test(self, methname): + if '_' in methname: + instrname, argmodes = methname.split('_') + else: + instrname, argmodes = methname, '' + + if self.should_skip_instruction(instrname, argmodes): + print "Skipping %s" % methname + return + + instr_suffix = None + + print "Testing %s with argmodes=%r" % (instrname, argmodes) + self.methname = methname + ilist = self.make_all_tests(methname, argmodes) + oplist, as_code = self.run_test(methname, instrname, argmodes, ilist, + instr_suffix) + cc = CodeCheckerZARCH(as_code, self.accept_unnecessary_prefix) + for op, args in zip(oplist, ilist): + if op: + cc.begin(op) + getattr(cc, methname)(*args) + cc.done() + + def setup_class(cls): + import os + g = os.popen('as -version &1') + data = g.read() + g.close() + if not data.startswith('GNU assembler'): + py.test.skip("full tests require the GNU 'as' assembler") + + @py.test.mark.parametrize("name", codebuilder.all_instructions) + def test_all(self, name): + self.complete_test(name) From noreply at buildbot.pypy.org Wed Oct 14 13:44:16 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Wed, 14 Oct 2015 13:44:16 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: own linux tests fixed Message-ID: <20151014114416.133F51C1453@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80196:34775fad7013 Date: 2015-10-14 13:44 +0200 http://bitbucket.org/pypy/pypy/changeset/34775fad7013/ Log: own linux tests fixed diff --git a/rpython/jit/metainterp/test/test_resoperation.py b/rpython/jit/metainterp/test/test_resoperation.py --- a/rpython/jit/metainterp/test/test_resoperation.py +++ b/rpython/jit/metainterp/test/test_resoperation.py @@ -95,7 +95,7 @@ VARI = rop.InputArgInt() VARF = rop.InputArgFloat() @py.test.mark.parametrize('opnum,args,kwargs', - [ (rop.rop.INT_SIGNEXT, [VARI, ConstInt(2)], {'from': 8, 'to': 2, 'cast_to': ('i', 2) }), + [ (rop.rop.INT_SIGNEXT, [VARI, ConstInt(2)], {'from': INT_WORD, 'to': 2, 'cast_to': ('i', 2) }), (rop.rop.CAST_FLOAT_TO_INT, [VARF], {'from': 8, 'to': 4}), (rop.rop.CAST_SINGLEFLOAT_TO_FLOAT, [VARI], {'from': 4, 'to': 8}), (rop.rop.CAST_FLOAT_TO_SINGLEFLOAT, [VARF], {'from': 8, 'to': 4}), @@ -112,11 +112,11 @@ op = rop.ResOperation(rop.rop.VEC_UNPACK_I, [rop.InputArgVector(), ConstInt(0), ConstInt(1)]) assert (op.type, op.datatype, op.bytesize, op.is_vector()) == \ - ('i', 'i', 8, False) + ('i', 'i', INT_WORD, False) op = rop.ResOperation(rop.rop.VEC_UNPACK_I, [rop.InputArgVector(), ConstInt(0), ConstInt(2)]) assert (op.type, op.datatype, op.bytesize, op.is_vector()) == \ - ('i', 'i', 8, True) + ('i', 'i', INT_WORD, True) def test_load_singlefloat(): descr = ArrayDescr(8,4, None, 'S', concrete_type='f') From noreply at buildbot.pypy.org Wed Oct 14 13:47:47 2015 From: noreply at buildbot.pypy.org (devin.jeanpierre) Date: Wed, 14 Oct 2015 13:47:47 +0200 (CEST) Subject: [pypy-commit] pypy default: Expose traceback fields in cpyext, and expose PyFrameObject as "struct _frame". Message-ID: <20151014114747.1B3301C1453@cobra.cs.uni-duesseldorf.de> Author: Devin Jeanpierre Branch: Changeset: r80197:6e335c196a45 Date: 2015-10-04 23:35 -0700 http://bitbucket.org/pypy/pypy/changeset/6e335c196a45/ Log: Expose traceback fields in cpyext, and expose PyFrameObject as "struct _frame". diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -66,6 +66,7 @@ import pypy.module.cpyext.codecs import pypy.module.cpyext.pyfile import pypy.module.cpyext.pystrtod +import pypy.module.cpyext.pytraceback # now that all rffi_platform.Struct types are registered, configure them api.configure_types() diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -126,6 +126,7 @@ #include "fileobject.h" #include "pysignals.h" #include "pythread.h" +#include "traceback.h" /* Missing definitions */ #include "missing.h" diff --git a/pypy/module/cpyext/include/frameobject.h b/pypy/module/cpyext/include/frameobject.h --- a/pypy/module/cpyext/include/frameobject.h +++ b/pypy/module/cpyext/include/frameobject.h @@ -4,7 +4,7 @@ extern "C" { #endif -typedef struct { +typedef struct _frame { PyObject_HEAD PyCodeObject *f_code; PyObject *f_globals; diff --git a/pypy/module/cpyext/include/traceback.h b/pypy/module/cpyext/include/traceback.h --- a/pypy/module/cpyext/include/traceback.h +++ b/pypy/module/cpyext/include/traceback.h @@ -4,7 +4,15 @@ extern "C" { #endif -typedef PyObject PyTracebackObject; +struct _frame; + +typedef struct _traceback { + PyObject_HEAD + struct _traceback *tb_next; + struct _frame *tb_frame; + int tb_lasti; + int tb_lineno; +} PyTracebackObject; #ifdef __cplusplus } diff --git a/pypy/module/cpyext/pytraceback.py b/pypy/module/cpyext/pytraceback.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/pytraceback.py @@ -0,0 +1,47 @@ +from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + PyObjectFields, generic_cpy_call, CONST_STRING, CANNOT_FAIL, Py_ssize_t, + cpython_api, bootstrap_function, cpython_struct, build_type_checkers) +from pypy.module.cpyext.pyobject import ( + PyObject, make_ref, from_ref, Py_DecRef, make_typedescr, borrow_from) +from pypy.module.cpyext.frameobject import PyFrameObject +from rpython.rlib.unroll import unrolling_iterable +from pypy.interpreter.error import OperationError +from pypy.interpreter.pytraceback import PyTraceback +from pypy.interpreter import pycode + + +PyTracebackObjectStruct = lltype.ForwardReference() +PyTracebackObject = lltype.Ptr(PyTracebackObjectStruct) +PyTracebackObjectFields = PyObjectFields + ( + ("tb_next", PyTracebackObject), + ("tb_frame", PyFrameObject), + ("tb_lasti", rffi.INT), + ("tb_lineno", rffi.INT), +) +cpython_struct("PyTracebackObject", PyTracebackObjectFields, PyTracebackObjectStruct) + + at bootstrap_function +def init_traceback(space): + make_typedescr(PyTraceback.typedef, + basestruct=PyTracebackObject.TO, + attach=traceback_attach, + dealloc=traceback_dealloc) + # TODO: decref tb_frame properly! + + +def traceback_attach(space, py_obj, w_obj): + py_traceback = rffi.cast(PyTracebackObject, py_obj) + traceback = space.interp_w(PyTraceback, w_obj) + py_traceback.c_tb_next = rffi.cast(PyTracebackObject, make_ref(space, space.wrap(traceback.next))) + py_traceback.c_tb_frame = rffi.cast(PyFrameObject, make_ref(space, space.wrap(traceback.frame))) + rffi.setintfield(py_traceback, 'c_tb_lasti', traceback.lasti) + rffi.setintfield(py_traceback, 'c_tb_lineno',traceback.get_lineno()) + + at cpython_api([PyObject], lltype.Void, external=False) +def traceback_dealloc(space, py_obj): + py_traceback = rffi.cast(PyTracebackObject, py_obj) + Py_DecRef(space, rffi.cast(PyObject, py_traceback.c_tb_next)) + Py_DecRef(space, rffi.cast(PyObject, py_traceback.c_tb_frame)) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) diff --git a/pypy/module/cpyext/test/test_traceback.py b/pypy/module/cpyext/test/test_traceback.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_traceback.py @@ -0,0 +1,28 @@ +from rpython.rtyper.lltypesystem import rffi +from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref +from pypy.module.cpyext.pytraceback import PyTracebackObject +from pypy.interpreter.pytraceback import PyTraceback +from pypy.interpreter.pyframe import PyFrame + +class TestPyTracebackObject(BaseApiTest): + def test_traceback(self, space, api): + w_traceback = space.appexec([], """(): + import sys + try: + 1/0 + except: + return sys.exc_info()[2] + """) + py_obj = make_ref(space, w_traceback) + py_traceback = rffi.cast(PyTracebackObject, py_obj) + assert (from_ref(space, rffi.cast(PyObject, py_traceback.c_ob_type)) is + space.gettypeobject(PyTraceback.typedef)) + + traceback = space.interp_w(PyTraceback, w_traceback) + assert traceback.lasti == py_traceback.c_tb_lasti + assert traceback.get_lineno() == py_traceback.c_tb_lineno + assert traceback.next is space.interp_w(PyTraceback, from_ref(space, rffi.cast(PyObject, py_traceback.c_tb_next)), can_be_None=True) + assert traceback.frame is space.interp_w(PyFrame, from_ref(space, rffi.cast(PyObject, py_traceback.c_tb_frame)), can_be_None=True) + + api.Py_DecRef(py_obj) From noreply at buildbot.pypy.org Wed Oct 14 13:47:49 2015 From: noreply at buildbot.pypy.org (devin.jeanpierre) Date: Wed, 14 Oct 2015 13:47:49 +0200 (CEST) Subject: [pypy-commit] pypy default: Remove obsolete TODO. Message-ID: <20151014114749.241781C1453@cobra.cs.uni-duesseldorf.de> Author: Devin Jeanpierre Branch: Changeset: r80198:d8a5dd4228df Date: 2015-10-05 16:00 -0700 http://bitbucket.org/pypy/pypy/changeset/d8a5dd4228df/ Log: Remove obsolete TODO. diff --git a/pypy/module/cpyext/pytraceback.py b/pypy/module/cpyext/pytraceback.py --- a/pypy/module/cpyext/pytraceback.py +++ b/pypy/module/cpyext/pytraceback.py @@ -27,7 +27,6 @@ basestruct=PyTracebackObject.TO, attach=traceback_attach, dealloc=traceback_dealloc) - # TODO: decref tb_frame properly! def traceback_attach(space, py_obj, w_obj): From noreply at buildbot.pypy.org Wed Oct 14 13:47:51 2015 From: noreply at buildbot.pypy.org (devin.jeanpierre) Date: Wed, 14 Oct 2015 13:47:51 +0200 (CEST) Subject: [pypy-commit] pypy default: Rewrote test_traceback assertions to useapplication level equality. Message-ID: <20151014114751.4C70B1C1453@cobra.cs.uni-duesseldorf.de> Author: Devin Jeanpierre Branch: Changeset: r80199:0ffacb607d98 Date: 2015-10-06 10:18 -0700 http://bitbucket.org/pypy/pypy/changeset/0ffacb607d98/ Log: Rewrote test_traceback assertions to useapplication level equality. Also fixed formatting to keep within 80 characters. diff --git a/pypy/module/cpyext/test/test_traceback.py b/pypy/module/cpyext/test/test_traceback.py --- a/pypy/module/cpyext/test/test_traceback.py +++ b/pypy/module/cpyext/test/test_traceback.py @@ -22,7 +22,10 @@ traceback = space.interp_w(PyTraceback, w_traceback) assert traceback.lasti == py_traceback.c_tb_lasti assert traceback.get_lineno() == py_traceback.c_tb_lineno - assert traceback.next is space.interp_w(PyTraceback, from_ref(space, rffi.cast(PyObject, py_traceback.c_tb_next)), can_be_None=True) - assert traceback.frame is space.interp_w(PyFrame, from_ref(space, rffi.cast(PyObject, py_traceback.c_tb_frame)), can_be_None=True) + assert space.eq_w(space.getattr(w_traceback, space.wrap("tb_lasti")), + space.wrap(py_traceback.c_tb_lasti)) + assert space.is_w(space.getattr(w_traceback, space.wrap("tb_frame")), + from_ref(space,rffi.cast(PyObject, + py_traceback.c_tb_frame))) api.Py_DecRef(py_obj) From noreply at buildbot.pypy.org Wed Oct 14 13:47:53 2015 From: noreply at buildbot.pypy.org (devin.jeanpierre) Date: Wed, 14 Oct 2015 13:47:53 +0200 (CEST) Subject: [pypy-commit] pypy default: Fixed whitespace formatting. Message-ID: <20151014114753.6A1AA1C1453@cobra.cs.uni-duesseldorf.de> Author: Devin Jeanpierre Branch: Changeset: r80200:28e0799f9721 Date: 2015-10-06 10:23 -0700 http://bitbucket.org/pypy/pypy/changeset/28e0799f9721/ Log: Fixed whitespace formatting. diff --git a/pypy/module/cpyext/test/test_traceback.py b/pypy/module/cpyext/test/test_traceback.py --- a/pypy/module/cpyext/test/test_traceback.py +++ b/pypy/module/cpyext/test/test_traceback.py @@ -25,7 +25,7 @@ assert space.eq_w(space.getattr(w_traceback, space.wrap("tb_lasti")), space.wrap(py_traceback.c_tb_lasti)) assert space.is_w(space.getattr(w_traceback, space.wrap("tb_frame")), - from_ref(space,rffi.cast(PyObject, - py_traceback.c_tb_frame))) + from_ref(space, rffi.cast(PyObject, + py_traceback.c_tb_frame))) api.Py_DecRef(py_obj) From noreply at buildbot.pypy.org Wed Oct 14 13:47:55 2015 From: noreply at buildbot.pypy.org (devin.jeanpierre) Date: Wed, 14 Oct 2015 13:47:55 +0200 (CEST) Subject: [pypy-commit] pypy default: Add tests for tb_next. Message-ID: <20151014114755.8B3A41C1453@cobra.cs.uni-duesseldorf.de> Author: Devin Jeanpierre Branch: Changeset: r80201:954e1682cf0d Date: 2015-10-12 02:04 -0700 http://bitbucket.org/pypy/pypy/changeset/954e1682cf0d/ Log: Add tests for tb_next. diff --git a/pypy/module/cpyext/test/test_traceback.py b/pypy/module/cpyext/test/test_traceback.py --- a/pypy/module/cpyext/test/test_traceback.py +++ b/pypy/module/cpyext/test/test_traceback.py @@ -28,4 +28,13 @@ from_ref(space, rffi.cast(PyObject, py_traceback.c_tb_frame))) + while not space.is_w(w_traceback, space.w_None): + next_w_traceback = space.getattr(w_traceback, space.wrap("tb_next")) + next_py_traceback = py_traceback.c_tb_next + assert space.is_w( + next_w_traceback, + from_ref(space, rffi.cast(PyObject, next_py_traceback))) + w_traceback = next_w_traceback + py_traceback = next_py_traceback + api.Py_DecRef(py_obj) From noreply at buildbot.pypy.org Wed Oct 14 13:47:57 2015 From: noreply at buildbot.pypy.org (devin.jeanpierre) Date: Wed, 14 Oct 2015 13:47:57 +0200 (CEST) Subject: [pypy-commit] pypy default: Handle None explicitly, after failing to handle it twice! Message-ID: <20151014114757.C67161C1453@cobra.cs.uni-duesseldorf.de> Author: Devin Jeanpierre Branch: Changeset: r80202:0c4b1779bcfc Date: 2015-10-12 02:42 -0700 http://bitbucket.org/pypy/pypy/changeset/0c4b1779bcfc/ Log: Handle None explicitly, after failing to handle it twice! diff --git a/pypy/module/cpyext/pytraceback.py b/pypy/module/cpyext/pytraceback.py --- a/pypy/module/cpyext/pytraceback.py +++ b/pypy/module/cpyext/pytraceback.py @@ -32,7 +32,11 @@ def traceback_attach(space, py_obj, w_obj): py_traceback = rffi.cast(PyTracebackObject, py_obj) traceback = space.interp_w(PyTraceback, w_obj) - py_traceback.c_tb_next = rffi.cast(PyTracebackObject, make_ref(space, space.wrap(traceback.next))) + if traceback.next is None: + w_next_traceback = None + else: + w_next_traceback = space.wrap(traceback.next) + py_traceback.c_tb_next = rffi.cast(PyTracebackObject, make_ref(space, w_next_traceback)) py_traceback.c_tb_frame = rffi.cast(PyFrameObject, make_ref(space, space.wrap(traceback.frame))) rffi.setintfield(py_traceback, 'c_tb_lasti', traceback.lasti) rffi.setintfield(py_traceback, 'c_tb_lineno',traceback.get_lineno()) diff --git a/pypy/module/cpyext/test/test_traceback.py b/pypy/module/cpyext/test/test_traceback.py --- a/pypy/module/cpyext/test/test_traceback.py +++ b/pypy/module/cpyext/test/test_traceback.py @@ -1,4 +1,4 @@ -from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref from pypy.module.cpyext.pytraceback import PyTracebackObject @@ -29,12 +29,12 @@ py_traceback.c_tb_frame))) while not space.is_w(w_traceback, space.w_None): - next_w_traceback = space.getattr(w_traceback, space.wrap("tb_next")) - next_py_traceback = py_traceback.c_tb_next assert space.is_w( - next_w_traceback, - from_ref(space, rffi.cast(PyObject, next_py_traceback))) - w_traceback = next_w_traceback - py_traceback = next_py_traceback + w_traceback, + from_ref(space, rffi.cast(PyObject, py_traceback))) + w_traceback = space.getattr(w_traceback, space.wrap("tb_next")) + py_traceback = py_traceback.c_tb_next + + assert lltype.normalizeptr(py_traceback) is None api.Py_DecRef(py_obj) From noreply at buildbot.pypy.org Wed Oct 14 13:47:59 2015 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 14 Oct 2015 13:47:59 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in devin.jeanpierre/pypy-cpyext-traceback (pull request #335) Message-ID: <20151014114759.D9F7D1C1453@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r80203:ab8cc1414237 Date: 2015-10-14 13:47 +0200 http://bitbucket.org/pypy/pypy/changeset/ab8cc1414237/ Log: Merged in devin.jeanpierre/pypy-cpyext-traceback (pull request #335) Improve traceback support in cpyext. diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -66,6 +66,7 @@ import pypy.module.cpyext.codecs import pypy.module.cpyext.pyfile import pypy.module.cpyext.pystrtod +import pypy.module.cpyext.pytraceback # now that all rffi_platform.Struct types are registered, configure them api.configure_types() diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -126,6 +126,7 @@ #include "fileobject.h" #include "pysignals.h" #include "pythread.h" +#include "traceback.h" /* Missing definitions */ #include "missing.h" diff --git a/pypy/module/cpyext/include/frameobject.h b/pypy/module/cpyext/include/frameobject.h --- a/pypy/module/cpyext/include/frameobject.h +++ b/pypy/module/cpyext/include/frameobject.h @@ -4,7 +4,7 @@ extern "C" { #endif -typedef struct { +typedef struct _frame { PyObject_HEAD PyCodeObject *f_code; PyObject *f_globals; diff --git a/pypy/module/cpyext/include/traceback.h b/pypy/module/cpyext/include/traceback.h --- a/pypy/module/cpyext/include/traceback.h +++ b/pypy/module/cpyext/include/traceback.h @@ -4,7 +4,15 @@ extern "C" { #endif -typedef PyObject PyTracebackObject; +struct _frame; + +typedef struct _traceback { + PyObject_HEAD + struct _traceback *tb_next; + struct _frame *tb_frame; + int tb_lasti; + int tb_lineno; +} PyTracebackObject; #ifdef __cplusplus } diff --git a/pypy/module/cpyext/pytraceback.py b/pypy/module/cpyext/pytraceback.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/pytraceback.py @@ -0,0 +1,50 @@ +from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + PyObjectFields, generic_cpy_call, CONST_STRING, CANNOT_FAIL, Py_ssize_t, + cpython_api, bootstrap_function, cpython_struct, build_type_checkers) +from pypy.module.cpyext.pyobject import ( + PyObject, make_ref, from_ref, Py_DecRef, make_typedescr, borrow_from) +from pypy.module.cpyext.frameobject import PyFrameObject +from rpython.rlib.unroll import unrolling_iterable +from pypy.interpreter.error import OperationError +from pypy.interpreter.pytraceback import PyTraceback +from pypy.interpreter import pycode + + +PyTracebackObjectStruct = lltype.ForwardReference() +PyTracebackObject = lltype.Ptr(PyTracebackObjectStruct) +PyTracebackObjectFields = PyObjectFields + ( + ("tb_next", PyTracebackObject), + ("tb_frame", PyFrameObject), + ("tb_lasti", rffi.INT), + ("tb_lineno", rffi.INT), +) +cpython_struct("PyTracebackObject", PyTracebackObjectFields, PyTracebackObjectStruct) + + at bootstrap_function +def init_traceback(space): + make_typedescr(PyTraceback.typedef, + basestruct=PyTracebackObject.TO, + attach=traceback_attach, + dealloc=traceback_dealloc) + + +def traceback_attach(space, py_obj, w_obj): + py_traceback = rffi.cast(PyTracebackObject, py_obj) + traceback = space.interp_w(PyTraceback, w_obj) + if traceback.next is None: + w_next_traceback = None + else: + w_next_traceback = space.wrap(traceback.next) + py_traceback.c_tb_next = rffi.cast(PyTracebackObject, make_ref(space, w_next_traceback)) + py_traceback.c_tb_frame = rffi.cast(PyFrameObject, make_ref(space, space.wrap(traceback.frame))) + rffi.setintfield(py_traceback, 'c_tb_lasti', traceback.lasti) + rffi.setintfield(py_traceback, 'c_tb_lineno',traceback.get_lineno()) + + at cpython_api([PyObject], lltype.Void, external=False) +def traceback_dealloc(space, py_obj): + py_traceback = rffi.cast(PyTracebackObject, py_obj) + Py_DecRef(space, rffi.cast(PyObject, py_traceback.c_tb_next)) + Py_DecRef(space, rffi.cast(PyObject, py_traceback.c_tb_frame)) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) diff --git a/pypy/module/cpyext/test/test_traceback.py b/pypy/module/cpyext/test/test_traceback.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_traceback.py @@ -0,0 +1,40 @@ +from rpython.rtyper.lltypesystem import lltype, rffi +from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref +from pypy.module.cpyext.pytraceback import PyTracebackObject +from pypy.interpreter.pytraceback import PyTraceback +from pypy.interpreter.pyframe import PyFrame + +class TestPyTracebackObject(BaseApiTest): + def test_traceback(self, space, api): + w_traceback = space.appexec([], """(): + import sys + try: + 1/0 + except: + return sys.exc_info()[2] + """) + py_obj = make_ref(space, w_traceback) + py_traceback = rffi.cast(PyTracebackObject, py_obj) + assert (from_ref(space, rffi.cast(PyObject, py_traceback.c_ob_type)) is + space.gettypeobject(PyTraceback.typedef)) + + traceback = space.interp_w(PyTraceback, w_traceback) + assert traceback.lasti == py_traceback.c_tb_lasti + assert traceback.get_lineno() == py_traceback.c_tb_lineno + assert space.eq_w(space.getattr(w_traceback, space.wrap("tb_lasti")), + space.wrap(py_traceback.c_tb_lasti)) + assert space.is_w(space.getattr(w_traceback, space.wrap("tb_frame")), + from_ref(space, rffi.cast(PyObject, + py_traceback.c_tb_frame))) + + while not space.is_w(w_traceback, space.w_None): + assert space.is_w( + w_traceback, + from_ref(space, rffi.cast(PyObject, py_traceback))) + w_traceback = space.getattr(w_traceback, space.wrap("tb_next")) + py_traceback = py_traceback.c_tb_next + + assert lltype.normalizeptr(py_traceback) is None + + api.Py_DecRef(py_obj) From noreply at buildbot.pypy.org Wed Oct 14 14:07:55 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Wed, 14 Oct 2015 14:07:55 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: removed pdb set_tracer(), this prevented every single test from passing ... Message-ID: <20151014120755.48B531C14D2@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80204:dd282568c406 Date: 2015-10-14 14:08 +0200 http://bitbucket.org/pypy/pypy/changeset/dd282568c406/ Log: removed pdb set_tracer(), this prevented every single test from passing ... diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -63,7 +63,6 @@ stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = pipe.communicate() - import pdb; pdb.set_trace() if pipe.wait() < 0: raise IOError("subprocess was killed by signal %d" % ( pipe.returncode,)) From noreply at buildbot.pypy.org Wed Oct 14 16:54:17 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Wed, 14 Oct 2015 16:54:17 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: promoted value size added to expected trace (as a guard_value) Message-ID: <20151014145417.34A431C0726@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80205:e8a7239d1430 Date: 2015-10-14 16:54 +0200 http://bitbucket.org/pypy/pypy/changeset/e8a7239d1430/ Log: promoted value size added to expected trace (as a guard_value) diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -129,7 +129,8 @@ i28 = int_is_true(i27) guard_true(i28, descr=...) i29 = getfield_gc_pure_i(p6, descr=) - i30 = int_add(i5, i29) + guard_value(i29, 8, descr=...) + i30 = int_add(i5, 8) i31 = getfield_gc_pure_i(p1, descr=) i32 = int_ge(i25, i31) guard_false(i32, descr=...) From noreply at buildbot.pypy.org Wed Oct 14 16:55:05 2015 From: noreply at buildbot.pypy.org (vaibhavsood12) Date: Wed, 14 Oct 2015 16:55:05 +0200 (CEST) Subject: [pypy-commit] pypy default: Implement wrap_binaryfunc_l and wrap_binaryfunc_r in cpyext slotdefs Message-ID: <20151014145505.2DF981C0726@cobra.cs.uni-duesseldorf.de> Author: Vaibhav Sood Branch: Changeset: r80206:a0e67e614376 Date: 2015-10-06 12:28 +0530 http://bitbucket.org/pypy/pypy/changeset/a0e67e614376/ Log: Implement wrap_binaryfunc_l and wrap_binaryfunc_r in cpyext slotdefs diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -61,6 +61,30 @@ args_w = space.fixedview(w_args) return generic_cpy_call(space, func_binary, w_self, args_w[0]) +def wrap_binaryfunc_l(space, w_self, w_args, func): + func_binary = rffi.cast(binaryfunc, func) + check_num_args(space, w_args, 1) + args_w = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self))): + raise OperationError(space.w_NotImplementedError, space.wrap( + "NotImplemented")) + + return generic_cpy_call(space, func_binary, w_self, args_w[0]) + +def wrap_binaryfunc_r(space, w_self, w_args, func): + func_binary = rffi.cast(binaryfunc, func) + check_num_args(space, w_args, 1) + args_w = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self))): + raise OperationError(space.w_NotImplementedError, space.wrap( + "NotImplemented")) + + return generic_cpy_call(space, func_binary, args_w[0], w_self) + def wrap_inquirypred(space, w_self, w_args, func): func_inquiry = rffi.cast(inquiry, func) check_num_args(space, w_args, 0) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -589,6 +589,48 @@ assert bool(module.newInt(-1)) raises(ValueError, bool, module.newInt(-42)) + def test_binaryfunc(self): + module = self.import_extension('foo', [ + ("new_obj", "METH_NOARGS", + """ + FooObject *fooObj; + + Foo_Type.tp_as_number = &foo_as_number; + foo_as_number.nb_add = foo_nb_add_call; + if (PyType_Ready(&Foo_Type) < 0) return NULL; + fooObj = PyObject_New(FooObject, &Foo_Type); + if (!fooObj) { + return NULL; + } + + return (PyObject *)fooObj; + """)], + """ + typedef struct + { + PyObject_HEAD + } FooObject; + + static PyObject * + foo_nb_add_call(PyObject *self, PyObject *other) + { + return PyInt_FromLong(42); + } + + PyTypeObject Foo_Type = { + PyObject_HEAD_INIT(0) + /*ob_size*/ 0, + /*tp_name*/ "Foo", + /*tp_basicsize*/ sizeof(FooObject), + }; + static PyNumberMethods foo_as_number; + """) + a = module.new_obj() + b = module.new_obj() + c = 3 + assert (a + b) == 42 + raises(NotImplementedError, "b + c") + def test_tp_new_in_subclass_of_type(self): skip("BROKEN") module = self.import_module(name='foo3') From noreply at buildbot.pypy.org Wed Oct 14 16:55:09 2015 From: noreply at buildbot.pypy.org (vaibhavsood12) Date: Wed, 14 Oct 2015 16:55:09 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged pypy/pypy into default Message-ID: <20151014145509.BD2691C0726@cobra.cs.uni-duesseldorf.de> Author: Vaibhav Sood Branch: Changeset: r80207:8f2399c68f0f Date: 2015-10-06 14:51 +0530 http://bitbucket.org/pypy/pypy/changeset/8f2399c68f0f/ Log: Merged pypy/pypy into default diff too long, truncating to 2000 out of 5526 lines diff --git a/dotviewer/graphclient.py b/dotviewer/graphclient.py --- a/dotviewer/graphclient.py +++ b/dotviewer/graphclient.py @@ -127,16 +127,8 @@ return spawn_graphserver_handler((host, port)) def spawn_local_handler(): - if hasattr(sys, 'pypy_objspaceclass'): - # if 'python' is actually PyPy, e.g. in a virtualenv, then - # try hard to find a real CPython - try: - python = subprocess.check_output( - 'env -i $SHELL -l -c "which python"', shell=True).strip() - except subprocess.CalledProcessError: - # did not work, fall back to 'python' - python = 'python' - else: + python = os.getenv('PYPY_PYGAME_PYTHON') + if not python: python = sys.executable args = [python, '-u', GRAPHSERVER, '--stdio'] p = subprocess.Popen(args, diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -158,7 +158,7 @@ RegrTest('test_codecs.py', core=True, usemodules='_multibytecodec'), RegrTest('test_codeop.py', core=True), RegrTest('test_coding.py', core=True), - RegrTest('test_coercion.py', core=True), + RegrTest('test_coercion.py', core=True, usemodules='struct'), RegrTest('test_collections.py', usemodules='binascii struct'), RegrTest('test_colorsys.py'), RegrTest('test_commands.py'), diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -609,7 +609,7 @@ def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: - tp = ffi._parser._declarations[key] + tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) try: value = backendlib.load_function(BType, name) @@ -620,7 +620,7 @@ # key = 'variable ' + name if key in ffi._parser._declarations: - tp = ffi._parser._declarations[key] + tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) read_variable = backendlib.read_variable write_variable = backendlib.write_variable @@ -631,12 +631,23 @@ # if not copied_enums: from . import model - for key, tp in ffi._parser._declarations.items(): + error = None + for key, (tp, _) in ffi._parser._declarations.items(): if not isinstance(tp, model.EnumType): continue + try: + tp.check_not_partial() + except Exception as e: + error = e + continue for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + if error is not None: + if name in library.__dict__: + return # ignore error, about a different enum + raise error + for key, val in ffi._parser._int_constants.items(): if key not in library.__dict__: library.__dict__[key] = val diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -192,6 +192,7 @@ if not decl.name: raise api.CDefError("typedef does not declare any name", decl) + quals = 0 if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) and decl.type.type.names[-1] == '__dotdotdot__'): realtype = self._get_unknown_type(decl) @@ -202,8 +203,9 @@ decl.type.type.type.names == ['__dotdotdot__']): realtype = model.unknown_ptr_type(decl.name) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) + realtype, quals = self._get_type_and_quals( + decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype, quals=quals) else: raise api.CDefError("unrecognized construct", decl) except api.FFIError as e: @@ -255,9 +257,9 @@ def _parse_decl(self, decl): node = decl.type if isinstance(node, pycparser.c_ast.FuncDecl): - tp = self._get_type(node, name=decl.name) + tp, quals = self._get_type_and_quals(node, name=decl.name) assert isinstance(tp, model.RawFunctionType) - tp = self._get_type_pointer(tp) + tp = self._get_type_pointer(tp, quals) self._declare('function ' + decl.name, tp) else: if isinstance(node, pycparser.c_ast.Struct): @@ -271,9 +273,10 @@ decl) # if decl.name: - tp = self._get_type(node, partial_length_ok=True) + tp, quals = self._get_type_and_quals(node, + partial_length_ok=True) if tp.is_raw_function: - tp = self._get_type_pointer(tp) + tp = self._get_type_pointer(tp, quals) self._declare('function ' + decl.name, tp) elif (tp.is_integer_type() and hasattr(decl, 'init') and @@ -287,10 +290,10 @@ _r_int_literal.match(decl.init.expr.value)): self._add_integer_constant(decl.name, '-' + decl.init.expr.value) - elif self._is_constant_globalvar(node): - self._declare('constant ' + decl.name, tp) + elif (quals & model.Q_CONST) and not tp.is_array_type: + self._declare('constant ' + decl.name, tp, quals=quals) else: - self._declare('variable ' + decl.name, tp) + self._declare('variable ' + decl.name, tp, quals=quals) def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] @@ -298,40 +301,51 @@ exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type) + tp, quals = self._get_type_and_quals(exprnode.type) + return tp - def _declare(self, name, obj, included=False): + def _declare(self, name, obj, included=False, quals=0): if name in self._declarations: - if self._declarations[name] is obj: + prevobj, prevquals = self._declarations[name] + if prevobj is obj and prevquals == quals: return if not self._override: raise api.FFIError( "multiple declarations of %s (for interactive usage, " "try cdef(xx, override=True))" % (name,)) assert '__dotdotdot__' not in name.split() - self._declarations[name] = obj + self._declarations[name] = (obj, quals) if included: self._included_declarations.add(obj) - def _get_type_pointer(self, type, const=False, declname=None): + def _extract_quals(self, type): + quals = 0 + if isinstance(type, (pycparser.c_ast.TypeDecl, + pycparser.c_ast.PtrDecl)): + if 'const' in type.quals: + quals |= model.Q_CONST + if 'restrict' in type.quals: + quals |= model.Q_RESTRICT + return quals + + def _get_type_pointer(self, type, quals, declname=None): if isinstance(type, model.RawFunctionType): return type.as_function_pointer() if (isinstance(type, model.StructOrUnionOrEnum) and type.name.startswith('$') and type.name[1:].isdigit() and type.forcename is None and declname is not None): - return model.NamedPointerType(type, declname) - if const: - return model.ConstPointerType(type) - return model.PointerType(type) + return model.NamedPointerType(type, declname, quals) + return model.PointerType(type, quals) - def _get_type(self, typenode, name=None, partial_length_ok=False): + def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): - type = self._declarations['typedef ' + typenode.type.names[0]] - return type + tp, quals = self._declarations['typedef ' + typenode.type.names[0]] + quals |= self._extract_quals(typenode) + return tp, quals # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type @@ -340,18 +354,19 @@ else: length = self._parse_constant( typenode.dim, partial_length_ok=partial_length_ok) - tp = self._get_type(typenode.type, + tp, quals = self._get_type_and_quals(typenode.type, partial_length_ok=partial_length_ok) - return model.ArrayType(tp, length) + return model.ArrayType(tp, length), quals # if isinstance(typenode, pycparser.c_ast.PtrDecl): # pointer type - const = (isinstance(typenode.type, pycparser.c_ast.TypeDecl) - and 'const' in typenode.type.quals) - return self._get_type_pointer(self._get_type(typenode.type), const, - declname=name) + itemtype, itemquals = self._get_type_and_quals(typenode.type) + tp = self._get_type_pointer(itemtype, itemquals, declname=name) + quals = self._extract_quals(typenode) + return tp, quals # if isinstance(typenode, pycparser.c_ast.TypeDecl): + quals = self._extract_quals(typenode) type = typenode.type if isinstance(type, pycparser.c_ast.IdentifierType): # assume a primitive type. get it from .names, but reduce @@ -379,35 +394,38 @@ names = newnames + names ident = ' '.join(names) if ident == 'void': - return model.void_type + return model.void_type, quals if ident == '__dotdotdot__': raise api.FFIError(':%d: bad usage of "..."' % typenode.coord.line) - return resolve_common_type(ident) + return resolve_common_type(ident), quals # if isinstance(type, pycparser.c_ast.Struct): # 'struct foobar' - return self._get_struct_union_enum_type('struct', type, name) + tp = self._get_struct_union_enum_type('struct', type, name) + return tp, quals # if isinstance(type, pycparser.c_ast.Union): # 'union foobar' - return self._get_struct_union_enum_type('union', type, name) + tp = self._get_struct_union_enum_type('union', type, name) + return tp, quals # if isinstance(type, pycparser.c_ast.Enum): # 'enum foobar' - return self._get_struct_union_enum_type('enum', type, name) + tp = self._get_struct_union_enum_type('enum', type, name) + return tp, quals # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - return self._parse_function_type(typenode, name) + return self._parse_function_type(typenode, name), 0 # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): return self._get_struct_union_enum_type('struct', typenode, name, - nested=True) + nested=True), 0 if isinstance(typenode, pycparser.c_ast.Union): return self._get_struct_union_enum_type('union', typenode, name, - nested=True) + nested=True), 0 # raise api.FFIError(":%d: bad or unsupported type declaration" % typenode.coord.line) @@ -426,28 +444,21 @@ raise api.CDefError( "%s: a function with only '(...)' as argument" " is not correct C" % (funcname or 'in expression')) - args = [self._as_func_arg(self._get_type(argdeclnode.type)) + args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type)) for argdeclnode in params] if not ellipsis and args == [model.void_type]: args = [] - result = self._get_type(typenode.type) + result, quals = self._get_type_and_quals(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) - def _as_func_arg(self, type): + def _as_func_arg(self, type, quals): if isinstance(type, model.ArrayType): - return model.PointerType(type.item) + return model.PointerType(type.item, quals) elif isinstance(type, model.RawFunctionType): return type.as_function_pointer() else: return type - def _is_constant_globalvar(self, typenode): - if isinstance(typenode, pycparser.c_ast.PtrDecl): - return 'const' in typenode.quals - if isinstance(typenode, pycparser.c_ast.TypeDecl): - return 'const' in typenode.quals - return False - def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): # First, a level of caching on the exact 'type' node of the AST. # This is obscure, but needed because pycparser "unrolls" declarations @@ -486,7 +497,7 @@ else: explicit_name = name key = '%s %s' % (kind, name) - tp = self._declarations.get(key, None) + tp, _ = self._declarations.get(key, (None, None)) # if tp is None: if kind == 'struct': @@ -528,6 +539,7 @@ fldnames = [] fldtypes = [] fldbitsize = [] + fldquals = [] for decl in type.decls: if (isinstance(decl.type, pycparser.c_ast.IdentifierType) and ''.join(decl.type.names) == '__dotdotdot__'): @@ -541,7 +553,8 @@ else: bitsize = self._parse_constant(decl.bitsize) self._partial_length = False - type = self._get_type(decl.type, partial_length_ok=True) + type, fqual = self._get_type_and_quals(decl.type, + partial_length_ok=True) if self._partial_length: self._make_partial(tp, nested) if isinstance(type, model.StructType) and type.partial: @@ -549,9 +562,11 @@ fldnames.append(decl.name or '') fldtypes.append(type) fldbitsize.append(bitsize) + fldquals.append(fqual) tp.fldnames = tuple(fldnames) tp.fldtypes = tuple(fldtypes) tp.fldbitsize = tuple(fldbitsize) + tp.fldquals = tuple(fldquals) if fldbitsize != [-1] * len(fldbitsize): if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" @@ -632,14 +647,12 @@ return tp def include(self, other): - for name, tp in other._declarations.items(): + for name, (tp, quals) in other._declarations.items(): if name.startswith('anonymous $enum_$'): continue # fix for test_anonymous_enum_include kind = name.split(' ', 1)[0] - if kind in ('struct', 'union', 'enum', 'anonymous'): - self._declare(name, tp, included=True) - elif kind == 'typedef': - self._declare(name, tp, included=True) + if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'): + self._declare(name, tp, included=True, quals=quals) for k, v in other._int_constants.items(): self._add_constants(k, v) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -4,11 +4,26 @@ from .lock import allocate_lock +# type qualifiers +Q_CONST = 0x01 +Q_RESTRICT = 0x02 + +def qualify(quals, replace_with): + if quals & Q_CONST: + replace_with = ' const ' + replace_with.lstrip() + if quals & Q_RESTRICT: + # It seems that __restrict is supported by gcc and msvc. + # If you hit some different compiler, add a #define in + # _cffi_include.h for it (and in its copies, documented there) + replace_with = ' __restrict ' + replace_with.lstrip() + return replace_with + + class BaseTypeByIdentity(object): is_array_type = False is_raw_function = False - def get_c_name(self, replace_with='', context='a C file'): + def get_c_name(self, replace_with='', context='a C file', quals=0): result = self.c_name_with_marker assert result.count('&') == 1 # some logic duplication with ffi.getctype()... :-( @@ -18,6 +33,7 @@ replace_with = '(%s)' % replace_with elif not replace_with[0] in '[(': replace_with = ' ' + replace_with + replace_with = qualify(quals, replace_with) result = result.replace('&', replace_with) if '$' in result: from .ffiplatform import VerificationError @@ -225,16 +241,14 @@ class PointerType(BaseType): - _attrs_ = ('totype',) - _base_pattern = " *&" - _base_pattern_array = "(*&)" + _attrs_ = ('totype', 'quals') - def __init__(self, totype): + def __init__(self, totype, quals=0): self.totype = totype + self.quals = quals + extra = qualify(quals, " *&") if totype.is_array_type: - extra = self._base_pattern_array - else: - extra = self._base_pattern + extra = "(%s)" % (extra.lstrip(),) self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra) def build_backend_type(self, ffi, finishlist): @@ -243,10 +257,8 @@ voidp_type = PointerType(void_type) - -class ConstPointerType(PointerType): - _base_pattern = " const *&" - _base_pattern_array = "(const *&)" +def ConstPointerType(totype): + return PointerType(totype, Q_CONST) const_voidp_type = ConstPointerType(void_type) @@ -254,8 +266,8 @@ class NamedPointerType(PointerType): _attrs_ = ('totype', 'name') - def __init__(self, totype, name): - PointerType.__init__(self, totype) + def __init__(self, totype, name, quals=0): + PointerType.__init__(self, totype, quals) self.name = name self.c_name_with_marker = name + '&' @@ -315,11 +327,12 @@ partial = False packed = False - def __init__(self, name, fldnames, fldtypes, fldbitsize): + def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None): self.name = name self.fldnames = fldnames self.fldtypes = fldtypes self.fldbitsize = fldbitsize + self.fldquals = fldquals self.build_c_name_with_marker() def has_anonymous_struct_fields(self): @@ -331,14 +344,17 @@ return False def enumfields(self): - for name, type, bitsize in zip(self.fldnames, self.fldtypes, - self.fldbitsize): + fldquals = self.fldquals + if fldquals is None: + fldquals = (0,) * len(self.fldnames) + for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes, + self.fldbitsize, fldquals): if name == '' and isinstance(type, StructOrUnion): # nested anonymous struct/union for result in type.enumfields(): yield result else: - yield (name, type, bitsize) + yield (name, type, bitsize, quals) def force_flatten(self): # force the struct or union to have a declaration that lists @@ -347,13 +363,16 @@ names = [] types = [] bitsizes = [] - for name, type, bitsize in self.enumfields(): + fldquals = [] + for name, type, bitsize, quals in self.enumfields(): names.append(name) types.append(type) bitsizes.append(bitsize) + fldquals.append(quals) self.fldnames = tuple(names) self.fldtypes = tuple(types) self.fldbitsize = tuple(bitsizes) + self.fldquals = tuple(fldquals) def get_cached_btype(self, ffi, finishlist, can_delay=False): BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist, diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -195,17 +195,15 @@ elif isinstance(tp, model.StructOrUnion): if tp.fldtypes is not None and ( tp not in self.ffi._parser._included_declarations): - for name1, tp1, _ in tp.enumfields(): + for name1, tp1, _, _ in tp.enumfields(): self._do_collect_type(self._field_type(tp, name1, tp1)) else: for _, x in tp._get_items(): self._do_collect_type(x) - def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) - def _generate(self, step_name): - for name, tp in self._get_declarations(): + lst = self.ffi._parser._declarations.items() + for name, (tp, quals) in sorted(lst): kind, realname = name.split(' ', 1) try: method = getattr(self, '_generate_cpy_%s_%s' % (kind, @@ -214,6 +212,7 @@ raise ffiplatform.VerificationError( "not implemented in recompile(): %r" % name) try: + self._current_quals = quals method(tp, realname) except Exception as e: model.attach_exception_info(e, name) @@ -774,7 +773,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): try: if ftype.is_integer_type() or fbitsize >= 0: # accept all integers, but complain on float or double @@ -789,7 +788,8 @@ ftype = ftype.item fname = fname + '[0]' prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -823,7 +823,7 @@ c_fields = [] if reason_for_not_expanding is None: enumfields = list(tp.enumfields()) - for fldname, fldtype, fbitsize in enumfields: + for fldname, fldtype, fbitsize, fqual in enumfields: fldtype = self._field_type(tp, fldname, fldtype) # cname is None for _add_missing_struct_unions() only op = OP_NOOP @@ -879,7 +879,9 @@ # because they don't have any known C name. Check that they are # not partial (we can't complete or verify them!) and emit them # anonymously. - for tp in list(self._struct_unions): + lst = list(self._struct_unions.items()) + lst.sort(key=lambda tp_order: tp_order[1]) + for tp, order in lst: if tp not in self._seen_struct_unions: if tp.partial: raise NotImplementedError("internal inconsistency: %r is " @@ -1004,6 +1006,8 @@ def _enum_ctx(self, tp, cname): type_index = self._typesdict[tp] type_op = CffiOp(OP_ENUM, -1) + if self.target_is_python: + tp.check_not_partial() for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): self._lsts["global"].append( GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op, @@ -1081,7 +1085,8 @@ # if 'tp' were a function type, but that is not possible here. # (If 'tp' is a function _pointer_ type, then casts from "fn_t # **" to "void *" are again no-ops, as far as I can tell.) - prnt('static ' + tp.get_c_name('*_cffi_var_%s(void)' % (name,))) + decl = '*_cffi_var_%s(void)' % (name,) + prnt('static ' + tp.get_c_name(decl, quals=self._current_quals)) prnt('{') prnt(' return %s(%s);' % (ampersand, name)) prnt('}') diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -197,7 +197,10 @@ return library def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst def _generate(self, step_name): for name, tp in self._get_declarations(): @@ -468,7 +471,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double @@ -477,7 +480,8 @@ # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -488,7 +492,7 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) @@ -552,7 +556,7 @@ check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -87,7 +87,10 @@ return library def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst def _generate(self, step_name): for name, tp in self._get_declarations(): @@ -260,7 +263,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double @@ -269,7 +272,8 @@ # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -280,7 +284,7 @@ prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) @@ -342,7 +346,7 @@ check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -39,8 +39,9 @@ "_csv", "cppyy", "_pypyjson" ]) -if (sys.platform.startswith('linux') and os.uname()[4] == 'x86_64' - and sys.maxint > 2**32): # it's not enough that we get x86_64 +if ((sys.platform.startswith('linux') or sys.platform == 'darwin') + and os.uname()[4] == 'x86_64' and sys.maxint > 2**32): + # it's not enough that we get x86_64 working_modules.add('_vmprof') translation_modules = default_modules.copy() diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -43,3 +43,13 @@ .. branch: numpy-ctypes Add support for ndarray.ctypes property. + +.. branch: share-guard-info + +Share guard resume data between consecutive guards that have only +pure operations and guards in between. + +.. branch: issue-2148 + +Fix performance regression on operations mixing numpy scalars and Python +floats, cf. issue #2148. diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -253,26 +253,27 @@ def binaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_meth = self.getattr(space, specialname, False) + if isinstance(w_a, W_InstanceObject): + w_meth = w_a.getattr(space, specialname, False) if w_meth is None: return space.w_NotImplemented return space.call_function(w_meth, w_b) else: + # fall back to space.xxx() if coerce returns a non-W_Instance + # object as first argument return getattr(space, objspacename)(w_a, w_b) binaryop.func_name = name def rbinaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None or w_a is self: - w_meth = self.getattr(space, rspecialname, False) + if isinstance(w_a, W_InstanceObject): + w_meth = w_a.getattr(space, rspecialname, False) if w_meth is None: return space.w_NotImplemented - return space.call_function(w_meth, w_other) + return space.call_function(w_meth, w_b) else: + # fall back to space.xxx() if coerce returns a non-W_Instance + # object as first argument return getattr(space, objspacename)(w_b, w_a) rbinaryop.func_name = "r" + name return binaryop, rbinaryop @@ -283,7 +284,7 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - return [None, None] + return [w_self, w_other] return space.fixedview(w_tup, 2) def descr_instance_new(space, w_type, w_class, w_dict=None): @@ -523,13 +524,9 @@ def descr_cmp(self, space, w_other): # do all the work here like CPython w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - else: - if (not isinstance(w_a, W_InstanceObject) and - not isinstance(w_b, W_InstanceObject)): - return space.cmp(w_a, w_b) + if (not isinstance(w_a, W_InstanceObject) and + not isinstance(w_b, W_InstanceObject)): + return space.cmp(w_a, w_b) if isinstance(w_a, W_InstanceObject): w_func = w_a.getattr(space, '__cmp__', False) if w_func is not None: @@ -636,42 +633,36 @@ def descr_pow(self, space, w_other, w_modulo=None): if space.is_none(w_modulo): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) - return space.w_NotImplemented + if isinstance(w_a, W_InstanceObject): + w_func = w_a.getattr(space, '__pow__', False) + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other) else: return space.pow(w_a, w_b, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other, w_modulo) def descr_rpow(self, space, w_other, w_modulo=None): if space.is_none(w_modulo): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) - return space.w_NotImplemented + if isinstance(w_a, W_InstanceObject): + w_func = w_a.getattr(space, '__rpow__', False) + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other) else: return space.pow(w_b, w_a, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other, w_modulo) def descr_next(self, space): w_func = self.getattr(space, 'next', False) diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -417,6 +417,22 @@ pass raises(TypeError, coerce, B(), []) + def test_coerce_inf(self): + class B: + def __coerce__(self, other): + return B(), B() + def __add__(self, other): + return 42 + assert B() + B() == 42 + + def test_coerce_reverse(self): + class CoerceNumber: + def __coerce__(self, other): + assert isinstance(other, int) + return (6, other) + assert 5 + CoerceNumber() == 11 + assert 2 ** CoerceNumber() == 64 + def test_binaryop(self): class A: def __add__(self, other): diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -178,7 +178,8 @@ @jit.dont_look_inside -def _handle_applevel_exception(space, callback, e, ll_res, extra_line): +def _handle_applevel_exception(callback, e, ll_res, extra_line): + space = callback.space callback.write_error_return_value(ll_res) if callback.w_onerror is None: callback.print_error(e, extra_line) @@ -199,13 +200,21 @@ extra_line="\nDuring the call to 'onerror', " "another exception occurred:\n\n") + at jit.jit_callback("CFFI") +def py_invoke_callback(callback, ll_res, ll_args): + extra_line = '' + try: + w_res = callback.invoke(ll_args) + extra_line = "Trying to convert the result back to C:\n" + callback.convert_result(ll_res, w_res) + except OperationError, e: + _handle_applevel_exception(callback, e, ll_res, extra_line) - at jit.jit_callback("CFFI") def _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): """ Callback specification. ffi_cif - something ffi specific, don't care ll_args - rffi.VOIDPP - pointer to array of pointers to args - ll_restype - rffi.VOIDP - pointer to result + ll_res - rffi.VOIDP - pointer to result ll_userdata - a special structure which holds necessary information (what the real callback is for example), casted to VOIDP """ @@ -228,13 +237,7 @@ space = callback.space try: must_leave = space.threadlocals.try_enter_thread(space) - extra_line = '' - try: - w_res = callback.invoke(ll_args) - extra_line = "Trying to convert the result back to C:\n" - callback.convert_result(ll_res, w_res) - except OperationError, e: - _handle_applevel_exception(space, callback, e, ll_res, extra_line) + py_invoke_callback(callback, ll_res, ll_args) # except Exception, e: # oups! last-level attempt to recover. diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -11,7 +11,8 @@ class W_CType(W_Root): - _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_'] + _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_', + '_pointer_type'] _immutable_fields_ = ['size?', 'name', 'name_position'] # note that 'size' is not strictly immutable, because it can change # from -1 to the real value in the W_CTypeStruct subclass. diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -7,6 +7,7 @@ from rpython.rlib.rarithmetic import r_uint, r_ulonglong, intmask from rpython.rlib import jit from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.tool import rfficache from pypy.interpreter.error import oefmt from pypy.module._cffi_backend import cdataobj, misc @@ -130,7 +131,8 @@ # though it may be signed when 'wchar_t' is written to C). WCHAR_INT = {(2, False): rffi.USHORT, (4, False): rffi.UINT, - (4, True): rffi.INT}[rffi.sizeof(lltype.UniChar), rffi.r_wchar_t.SIGN] + (4, True): rffi.INT}[rffi.sizeof(lltype.UniChar), + rfficache.signof_c_type('wchar_t')] WCHAR_INTP = rffi.CArrayPtr(WCHAR_INT) class W_CTypePrimitiveUniChar(W_CTypePrimitiveCharOrUniChar): diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -168,7 +168,7 @@ class W_CTypePointer(W_CTypePtrBase): - _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr'] + _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr', '_array_types'] _immutable_fields_ = ['is_file', 'cache_array_type?', 'is_void_ptr'] kind = "pointer" cache_array_type = None diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -4,7 +4,7 @@ from rpython.rlib.objectmodel import specialize, r_dict, compute_identity_hash from rpython.rlib.rarithmetic import ovfcheck, intmask -from rpython.rlib import jit +from rpython.rlib import jit, rweakref from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform @@ -23,27 +23,12 @@ class UniqueCache: def __init__(self, space): - self.ctvoid = None # There can be only one - self.ctvoidp = None # Cache for self.pointers[self.ctvoid] - self.ctchara = None # Cache for self.arrays[charp, -1] - self.primitives = {} # Keys: name - self.pointers = {} # Keys: base_ctype - self.arrays = {} # Keys: (ptr_ctype, length_or_-1) - self.functions = r_dict(# Keys: (fargs, w_fresult, ellipsis) - _func_key_eq, _func_key_hash) - -def _func_key_eq((fargs1, w_fresult1, ellipsis1), - (fargs2, w_fresult2, ellipsis2)): - return (fargs1 == fargs2 and # list equality here - w_fresult1 is w_fresult2 and - ellipsis1 == ellipsis2) - -def _func_key_hash((fargs, w_fresult, ellipsis)): - x = compute_identity_hash(w_fresult) ^ ellipsis - for w_arg in fargs: - y = compute_identity_hash(w_arg) - x = intmask((1000003 * x) ^ y) - return x + self.ctvoid = None # Cache for the 'void' type + self.ctvoidp = None # Cache for the 'void *' type + self.ctchara = None # Cache for the 'char[]' type + self.primitives = {} # Cache for {name: primitive_type} + self.functions = [] # see _new_function_type() + self.for_testing = False def _clean_cache(space): "NOT_RPYTHON" @@ -165,20 +150,24 @@ # ____________________________________________________________ + at specialize.memo() +def _setup_wref(has_weakref_support): + assert has_weakref_support, "_cffi_backend requires weakrefs" + ctypeobj.W_CType._pointer_type = rweakref.dead_ref + ctypeptr.W_CTypePointer._array_types = None + @unwrap_spec(w_ctype=ctypeobj.W_CType) def new_pointer_type(space, w_ctype): return _new_pointer_type(space, w_ctype) @jit.elidable def _new_pointer_type(space, w_ctype): - unique_cache = space.fromcache(UniqueCache) - try: - return unique_cache.pointers[w_ctype] - except KeyError: - pass - ctypepointer = ctypeptr.W_CTypePointer(space, w_ctype) - unique_cache.pointers[w_ctype] = ctypepointer - return ctypepointer + _setup_wref(rweakref.has_weakref_support()) + ctptr = w_ctype._pointer_type() + if ctptr is None: + ctptr = ctypeptr.W_CTypePointer(space, w_ctype) + w_ctype._pointer_type = rweakref.ref(ctptr) + return ctptr # ____________________________________________________________ @@ -195,16 +184,19 @@ @jit.elidable def _new_array_type(space, w_ctptr, length): - unique_cache = space.fromcache(UniqueCache) - unique_key = (w_ctptr, length) - try: - return unique_cache.arrays[unique_key] - except KeyError: - pass - # + _setup_wref(rweakref.has_weakref_support()) if not isinstance(w_ctptr, ctypeptr.W_CTypePointer): raise OperationError(space.w_TypeError, space.wrap("first arg must be a pointer ctype")) + arrays = w_ctptr._array_types + if arrays is None: + arrays = rweakref.RWeakValueDictionary(int, ctypearray.W_CTypeArray) + w_ctptr._array_types = arrays + else: + ctype = arrays.get(length) + if ctype is not None: + return ctype + # ctitem = w_ctptr.ctitem if ctitem.size < 0: raise oefmt(space.w_ValueError, "array item of unknown size: '%s'", @@ -222,7 +214,7 @@ extra = '[%d]' % length # ctype = ctypearray.W_CTypeArray(space, w_ctptr, length, arraysize, extra) - unique_cache.arrays[unique_key] = ctype + arrays.set(length, ctype) return ctype # ____________________________________________________________ @@ -612,29 +604,69 @@ fargs.append(w_farg) return _new_function_type(space, fargs, w_fresult, bool(ellipsis)) +def _func_key_hash(unique_cache, fargs, fresult, ellipsis): + x = compute_identity_hash(fresult) + for w_arg in fargs: + y = compute_identity_hash(w_arg) + x = intmask((1000003 * x) ^ y) + x ^= ellipsis + if unique_cache.for_testing: # constant-folded to False in translation; + x &= 3 # but for test, keep only 2 bits of hash + return x + # can't use @jit.elidable here, because it might call back to random # space functions via force_lazy_struct() -def _new_function_type(space, fargs, w_fresult, ellipsis=False): +def _new_function_type(space, fargs, fresult, ellipsis=False): + try: + return _get_function_type(space, fargs, fresult, ellipsis) + except KeyError: + return _build_function_type(space, fargs, fresult, ellipsis) + + at jit.elidable +def _get_function_type(space, fargs, fresult, ellipsis): + # This function is elidable because if called again with exactly the + # same arguments (and if it didn't raise KeyError), it would give + # the same result, at least as long as this result is still live. + # + # 'unique_cache.functions' is a list of weak dicts, each mapping + # the func_hash number to a W_CTypeFunc. There is normally only + # one such dict, but in case of hash collision, there might be + # more. + unique_cache = space.fromcache(UniqueCache) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + for weakdict in unique_cache.functions: + ctype = weakdict.get(func_hash) + if (ctype is not None and + ctype.ctitem is fresult and + ctype.fargs == fargs and + ctype.ellipsis == ellipsis): + return ctype + raise KeyError + + at jit.dont_look_inside +def _build_function_type(space, fargs, fresult, ellipsis): from pypy.module._cffi_backend import ctypefunc # - unique_cache = space.fromcache(UniqueCache) - unique_key = (fargs, w_fresult, ellipsis) - try: - return unique_cache.functions[unique_key] - except KeyError: - pass - # - if ((w_fresult.size < 0 and - not isinstance(w_fresult, ctypevoid.W_CTypeVoid)) - or isinstance(w_fresult, ctypearray.W_CTypeArray)): - if (isinstance(w_fresult, ctypestruct.W_CTypeStructOrUnion) and - w_fresult.size < 0): + if ((fresult.size < 0 and + not isinstance(fresult, ctypevoid.W_CTypeVoid)) + or isinstance(fresult, ctypearray.W_CTypeArray)): + if (isinstance(fresult, ctypestruct.W_CTypeStructOrUnion) and + fresult.size < 0): raise oefmt(space.w_TypeError, - "result type '%s' is opaque", w_fresult.name) + "result type '%s' is opaque", fresult.name) else: raise oefmt(space.w_TypeError, - "invalid result type: '%s'", w_fresult.name) + "invalid result type: '%s'", fresult.name) # - fct = ctypefunc.W_CTypeFunc(space, fargs, w_fresult, ellipsis) - unique_cache.functions[unique_key] = fct + fct = ctypefunc.W_CTypeFunc(space, fargs, fresult, ellipsis) + unique_cache = space.fromcache(UniqueCache) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + for weakdict in unique_cache.functions: + if weakdict.get(func_hash) is None: + weakdict.set(func_hash, fct) + break + else: + weakdict = rweakref.RWeakValueDictionary(int, ctypefunc.W_CTypeFunc) + unique_cache.functions.append(weakdict) + weakdict.set(func_hash, fct) return fct diff --git a/pypy/module/_cffi_backend/test/test_c.py b/pypy/module/_cffi_backend/test/test_c.py --- a/pypy/module/_cffi_backend/test/test_c.py +++ b/pypy/module/_cffi_backend/test/test_c.py @@ -22,7 +22,7 @@ from rpython.tool.udir import udir from pypy.interpreter import gateway from pypy.module._cffi_backend import Module -from pypy.module._cffi_backend.newtype import _clean_cache +from pypy.module._cffi_backend.newtype import _clean_cache, UniqueCache from rpython.translator import cdir from rpython.translator.platform import host from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -86,8 +86,10 @@ _all_test_c.find_and_load_library = func _all_test_c._testfunc = testfunc """) + UniqueCache.for_testing = True def teardown_method(self, method): + UniqueCache.for_testing = False _clean_cache(self.space) diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -3,10 +3,13 @@ from rpython.rlib.buffer import SubBuffer from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi + from pypy.module.micronumpy import descriptor, loop, support from pypy.module.micronumpy.base import ( W_NDimArray, convert_to_array, W_NumpyObject) from pypy.module.micronumpy.converters import shape_converter +from . import constants as NPY +from .casting import scalar2dtype def build_scalar(space, w_dtype, w_state): @@ -82,7 +85,6 @@ return w_res def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False): - from pypy.module.micronumpy import strides # for anything that isn't already an array, try __array__ method first if not isinstance(w_object, W_NDimArray): @@ -143,16 +145,11 @@ w_base=w_base, start=imp.start) else: # not an array - shape, elems_w = strides.find_shape_and_elems(space, w_object, dtype) + shape, elems_w = find_shape_and_elems(space, w_object, dtype) if dtype is None and space.isinstance_w(w_object, space.w_buffer): dtype = descriptor.get_dtype_cache(space).w_uint8dtype if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): dtype = find_dtype_for_seq(space, elems_w, dtype) - if dtype is None: - dtype = descriptor.get_dtype_cache(space).w_float64dtype - elif dtype.is_str_or_unicode() and dtype.elsize < 1: - # promote S0 -> S1, U0 -> U1 - dtype = descriptor.variable_dtype(space, dtype.char + '1') w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) if support.product(shape) == 1: # safe from overflow since from_shape checks @@ -165,7 +162,6 @@ def numpify(space, w_object): """Convert the object to a W_NumpyObject""" # XXX: code duplication with _array() - from pypy.module.micronumpy import strides if isinstance(w_object, W_NumpyObject): return w_object # for anything that isn't already an array, try __array__ method first @@ -173,20 +169,82 @@ if w_array is not None: return w_array - shape, elems_w = strides.find_shape_and_elems(space, w_object, None) + if is_scalar_like(space, w_object, dtype=None): + dtype = scalar2dtype(space, w_object) + if dtype.is_str_or_unicode() and dtype.elsize < 1: + # promote S0 -> S1, U0 -> U1 + dtype = descriptor.variable_dtype(space, dtype.char + '1') + return dtype.coerce(space, w_object) + + shape, elems_w = _find_shape_and_elems(space, w_object) dtype = find_dtype_for_seq(space, elems_w, None) - if dtype is None: - dtype = descriptor.get_dtype_cache(space).w_float64dtype - elif dtype.is_str_or_unicode() and dtype.elsize < 1: - # promote S0 -> S1, U0 -> U1 - dtype = descriptor.variable_dtype(space, dtype.char + '1') + w_arr = W_NDimArray.from_shape(space, shape, dtype) + loop.assign(space, w_arr, elems_w) + return w_arr - if len(elems_w) == 1: - return dtype.coerce(space, elems_w[0]) + +def find_shape_and_elems(space, w_iterable, dtype): + if is_scalar_like(space, w_iterable, dtype): + return [], [w_iterable] + is_rec_type = dtype is not None and dtype.is_record() + return _find_shape_and_elems(space, w_iterable, is_rec_type) + +def is_scalar_like(space, w_obj, dtype): + isstr = space.isinstance_w(w_obj, space.w_str) + if not support.issequence_w(space, w_obj) or isstr: + if dtype is None or dtype.char != NPY.CHARLTR: + return True + is_rec_type = dtype is not None and dtype.is_record() + if is_rec_type and is_single_elem(space, w_obj, is_rec_type): + return True + if isinstance(w_obj, W_NDimArray) and w_obj.is_scalar(): + return True + return False + +def _find_shape_and_elems(space, w_iterable, is_rec_type=False): + from pypy.objspace.std.bufferobject import W_Buffer + shape = [space.len_w(w_iterable)] + if space.isinstance_w(w_iterable, space.w_buffer): + batch = [space.wrap(0)] * shape[0] + for i in range(shape[0]): + batch[i] = space.ord(space.getitem(w_iterable, space.wrap(i))) else: - w_arr = W_NDimArray.from_shape(space, shape, dtype) - loop.assign(space, w_arr, elems_w) - return w_arr + batch = space.listview(w_iterable) + while True: + if not batch: + return shape[:], [] + if is_single_elem(space, batch[0], is_rec_type): + for w_elem in batch: + if not is_single_elem(space, w_elem, is_rec_type): + raise OperationError(space.w_ValueError, space.wrap( + "setting an array element with a sequence")) + return shape[:], batch + new_batch = [] + size = space.len_w(batch[0]) + for w_elem in batch: + if (is_single_elem(space, w_elem, is_rec_type) or + space.len_w(w_elem) != size): + raise OperationError(space.w_ValueError, space.wrap( + "setting an array element with a sequence")) + w_array = space.lookup(w_elem, '__array__') + if w_array is not None: + # Make sure we call the array implementation of listview, + # since for some ndarray subclasses (matrix, for instance) + # listview does not reduce but rather returns the same class + w_elem = space.get_and_call_function(w_array, w_elem, space.w_None) + new_batch += space.listview(w_elem) + shape.append(size) + batch = new_batch + +def is_single_elem(space, w_elem, is_rec_type): + if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): + return True + if (space.isinstance_w(w_elem, space.w_tuple) or + space.isinstance_w(w_elem, space.w_list)): + return False + if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar(): + return False + return True def _dtype_guess(space, dtype, w_elem): from .casting import scalar2dtype, find_binop_result_dtype @@ -201,6 +259,11 @@ return _dtype_guess(space, dtype, w_elem) for w_elem in elems_w: dtype = _dtype_guess(space, dtype, w_elem) + if dtype is None: + dtype = descriptor.get_dtype_cache(space).w_float64dtype + elif dtype.is_str_or_unicode() and dtype.elsize < 1: + # promote S0 -> S1, U0 -> U1 + dtype = descriptor.variable_dtype(space, dtype.char + '1') return dtype diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -862,6 +862,8 @@ v = convert_to_array(space, w_v) ret = W_NDimArray.from_shape( space, v.get_shape(), get_dtype_cache(space).w_longdtype) + if ret.get_size() < 1: + return ret if side == NPY.SEARCHLEFT: binsearch = loop.binsearch_left else: @@ -1308,6 +1310,9 @@ [space.wrap(0)]), space.wrap("b")]) builder = StringBuilder() + if self.get_dtype().is_object(): + raise oefmt(space.w_NotImplementedError, + "reduce for 'object' dtype not supported yet") if isinstance(self.implementation, SliceArray): iter, state = self.implementation.create_iter() while not iter.done(state): diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -189,67 +189,6 @@ return rstrides, rbackstrides -def is_single_elem(space, w_elem, is_rec_type): - if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): - return True - if (space.isinstance_w(w_elem, space.w_tuple) or - space.isinstance_w(w_elem, space.w_list)): - return False - if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar(): - return False - return True - - -def find_shape_and_elems(space, w_iterable, dtype): - isstr = space.isinstance_w(w_iterable, space.w_str) - if not support.issequence_w(space, w_iterable) or isstr: - if dtype is None or dtype.char != NPY.CHARLTR: - return [], [w_iterable] - is_rec_type = dtype is not None and dtype.is_record() - if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): - return [], [w_iterable] - if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar(): - return [], [w_iterable] - return _find_shape_and_elems(space, w_iterable, is_rec_type) - - -def _find_shape_and_elems(space, w_iterable, is_rec_type): - from pypy.objspace.std.bufferobject import W_Buffer - shape = [space.len_w(w_iterable)] - if space.isinstance_w(w_iterable, space.w_buffer): - batch = [space.wrap(0)] * shape[0] - for i in range(shape[0]): - batch[i] = space.ord(space.getitem(w_iterable, space.wrap(i))) - else: - batch = space.listview(w_iterable) - while True: - if not batch: - return shape[:], [] - if is_single_elem(space, batch[0], is_rec_type): - for w_elem in batch: - if not is_single_elem(space, w_elem, is_rec_type): - raise OperationError(space.w_ValueError, space.wrap( - "setting an array element with a sequence")) - return shape[:], batch - new_batch = [] - size = space.len_w(batch[0]) - for w_elem in batch: - if (is_single_elem(space, w_elem, is_rec_type) or - space.len_w(w_elem) != size): - raise OperationError(space.w_ValueError, space.wrap( - "setting an array element with a sequence")) - w_array = space.lookup(w_elem, '__array__') - if w_array is not None: - # Make sure we call the array implementation of listview, - # since for some ndarray subclasses (matrix, for instance) - # listview does not reduce but rather returns the same class - w_elem = space.get_and_call_function(w_array, w_elem, space.w_None) - new_batch += space.listview(w_elem) - shape.append(size) - batch = new_batch - - - @jit.unroll_safe def shape_agreement(space, shape1, w_arr2, broadcast_down=True): if w_arr2 is None: diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -39,7 +39,10 @@ def product_check(s): i = 1 for x in s: - i = ovfcheck(i * x) + try: + i = ovfcheck(i * x) + except OverflowError: + raise return i def check_and_adjust_index(space, index, size, axis): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -169,7 +169,7 @@ [1, 1, 1, 105, 105] def test_find_shape(self): - from pypy.module.micronumpy.strides import find_shape_and_elems + from pypy.module.micronumpy.ctors import find_shape_and_elems space = self.space shape, elems = find_shape_and_elems(space, @@ -2475,6 +2475,18 @@ a.fill(12) assert (a == u'1').all() + def test_unicode_record_array(self) : + from numpy import dtype, array + t = dtype([('a', 'S3'), ('b', 'U2')]) + x = array([('a', u'b')], dtype=t) + assert str(x) == "[('a', u'b')]" + + t = dtype([('a', 'U3'), ('b', 'S2')]) + x = array([(u'a', 'b')], dtype=t) + x['a'] = u'1' + assert str(x) == "[(u'1', 'b')]" + + def test_boolean_indexing(self): import numpy as np a = np.zeros((1, 3)) @@ -2697,7 +2709,7 @@ "input array from shape (3,1) into shape (3)" a[:, 1] = b[:,0] > 0.5 assert (a == [[0, 1], [0, 1], [0, 1]]).all() - + def test_ufunc(self): from numpy import array @@ -3856,7 +3868,7 @@ assert a[0]['y'] == 2 assert a[1]['y'] == 1 - + a = array([(1, [])], dtype=[('a', int32), ('b', int32, 0)]) assert a['b'].shape == (1, 0) b = loads(dumps(a)) diff --git a/pypy/module/micronumpy/test/test_object_arrays.py b/pypy/module/micronumpy/test/test_object_arrays.py --- a/pypy/module/micronumpy/test/test_object_arrays.py +++ b/pypy/module/micronumpy/test/test_object_arrays.py @@ -3,6 +3,8 @@ class AppTestObjectDtypes(BaseNumpyAppTest): + spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) + def setup_class(cls): BaseNumpyAppTest.setup_class.im_func(cls) cls.w_runappdirect = cls.space.wrap(option.runappdirect) @@ -187,3 +189,21 @@ assert b.shape == (1,) assert b.dtype == np.float_ assert (b == 1.0).all() + + + def test__reduce__(self): + from numpy import arange, dtype + from cPickle import loads, dumps + import sys + + a = arange(15).astype(object) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, dumps, a) + skip('not implemented yet') + b = loads(dumps(a)) + assert (a == b).all() + + a = arange(15).astype(object).reshape((3, 5)) + b = loads(dumps(a)) + assert (a == b).all() + diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -480,3 +480,9 @@ u = unicode_(u'Aÿ') # raises(UnicodeEncodeError, "str(u)") # XXX assert repr(u) == repr(u'Aÿ') + + def test_binop_with_sequence(self): + import numpy as np + c = np.float64(1.) + [1.] + assert isinstance(c, np.ndarray) + assert (c == [2.]).all() diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -2231,9 +2231,9 @@ index = i + offset + 4*k data = rffi.cast(Int32.T, ord(box._value[k])) raw_storage_setitem_unaligned(storage, index, data) - for k in range(size, width // 4): - index = i + offset + 4*k - data = rffi.cast(Int32.T, 0) + # zero out the remaining memory + for index in range(size * 4 + i + offset, width): + data = rffi.cast(Int8.T, 0) raw_storage_setitem_unaligned(storage, index, data) def read(self, arr, i, offset, dtype): diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -479,6 +479,7 @@ dt_in, dt_out = self._calc_dtype(space, dtype, out, casting) return dt_in, dt_out, self.func + @jit.unroll_safe def _calc_dtype(self, space, arg_dtype, out=None, casting='unsafe'): if arg_dtype.is_object(): return arg_dtype, arg_dtype @@ -672,6 +673,7 @@ "requested type has type code '%s'" % (self.name, dtype.char)) + @jit.unroll_safe def _calc_dtype(self, space, l_dtype, r_dtype, out, casting, w_arg1, w_arg2): if l_dtype.is_object() or r_dtype.is_object(): diff --git a/pypy/module/pypyjit/__init__.py b/pypy/module/pypyjit/__init__.py --- a/pypy/module/pypyjit/__init__.py +++ b/pypy/module/pypyjit/__init__.py @@ -15,8 +15,12 @@ 'set_compile_hook': 'interp_resop.set_compile_hook', 'set_abort_hook': 'interp_resop.set_abort_hook', 'get_stats_snapshot': 'interp_resop.get_stats_snapshot', - 'enable_debug': 'interp_resop.enable_debug', - 'disable_debug': 'interp_resop.disable_debug', + # those things are disabled because they have bugs, but if + # they're found to be useful, fix test_ztranslation_jit_stats + # in the backend first. get_stats_snapshot still produces + # correct loop_runs if PYPYLOG is correct + #'enable_debug': 'interp_resop.enable_debug', + #'disable_debug': 'interp_resop.disable_debug', 'ResOperation': 'interp_resop.WrappedOp', 'DebugMergePoint': 'interp_resop.DebugMergePoint', 'JitLoopInfo': 'interp_resop.W_JitLoopInfo', diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -315,11 +315,12 @@ """ ll_times = jit_hooks.stats_get_loop_run_times(None) w_times = space.newdict() - for i in range(len(ll_times)): - w_key = space.newtuple([space.wrap(ll_times[i].type), - space.wrap(ll_times[i].number)]) - space.setitem(w_times, w_key, - space.wrap(ll_times[i].counter)) + if ll_times: + for i in range(len(ll_times)): + w_key = space.newtuple([space.wrap(ll_times[i].type), + space.wrap(ll_times[i].number)]) + space.setitem(w_times, w_key, + space.wrap(ll_times[i].counter)) w_counters = space.newdict() for i, counter_name in enumerate(Counters.counter_names): v = jit_hooks.stats_get_counter_value(None, i) diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -213,22 +213,6 @@ self.on_abort() assert l == [('pypyjit', 'ABORT_TOO_LONG', [])] - def test_on_optimize(self): - import pypyjit - l = [] - - def hook(info): - l.append(info.jitdriver_name) - - def optimize_hook(info): - return [] - - pypyjit.set_compile_hook(hook) - pypyjit.set_optimize_hook(optimize_hook) - self.on_optimize() - self.on_compile() - assert l == ['pypyjit'] - def test_creation(self): from pypyjit import ResOperation diff --git a/pypy/module/pypyjit/test_pypy_c/test_alloc.py b/pypy/module/pypyjit/test_pypy_c/test_alloc.py --- a/pypy/module/pypyjit/test_pypy_c/test_alloc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_alloc.py @@ -7,10 +7,11 @@ [2 ** n - 1 for n in range(26)]) def test_newstr_constant_size(self): - for size in TestAlloc.SIZES: + for size in sorted(TestAlloc.SIZES): yield self.newstr_constant_size, size def newstr_constant_size(self, size): + print 'size =', size src = """if 1: N = %(size)d part_a = 'a' * N diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -28,7 +28,7 @@ def test_struct_unpack(self): def main(n): - import struct + import _struct as struct import array a = array.array('c', struct.pack('i', 42)) i = 0 diff --git a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py --- a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py +++ b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py @@ -76,6 +76,6 @@ assert len(mod_bridges) in (1, 2, 3) # check that counts are reasonable (precise # may change in the future) - assert N - 2000 < sum(l.count for l in fn_with_bridges_loops) < N + 1000 + assert N - 2000 < sum(l.count for l in fn_with_bridges_loops) < N + 1500 diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py --- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py +++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py @@ -248,3 +248,42 @@ guard_false(i157, descr=...) jump(..., descr=...) """) + + def test_mixed_div(self): + N = 1500 + def main(): + N = 1500 + import _numpypy.multiarray as np + arr = np.zeros(N) + l = [arr[i]/2. for i in range(N)] + return l + log = self.run(main, []) + assert log.result == [0.] * N + loop, = log.loops_by_filename(self.filepath) + assert loop.match(""" + i92 = int_ge(i91, i37) + guard_false(i92, descr=...) + i93 = int_add(i91, 1) + setfield_gc(p23, i93, descr=) + i94 = int_ge(i91, i56) + guard_false(i94, descr=...) + i96 = int_mul(i91, i58) + i97 = int_add(i51, i96) + f98 = raw_load_f(i63, i97, descr=) + guard_not_invalidated(descr=...) + f100 = float_mul(f98, 0.500000) + i101 = int_add(i79, 1) + i102 = arraylen_gc(p85, descr=) + i103 = int_lt(i102, i101) + cond_call(i103, ConstClass(_ll_list_resize_hint_really_look_inside_iff__listPtr_Signed_Bool), p76, i101, 1, descr=) + guard_no_exception(descr=...) + p104 = getfield_gc_r(p76, descr=) + p105 = new_with_vtable(descr=) + setfield_gc(p105, f100, descr=) + setarrayitem_gc(p104, i79, p105, descr=) + i106 = getfield_raw_i(#, descr=) + setfield_gc(p76, i101, descr=) + i107 = int_lt(i106, 0) + guard_false(i107, descr=...) + jump(..., descr=...) + """) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -2,7 +2,7 @@ import py import platform import sys, ctypes -from cffi import FFI, CDefError, FFIError +from cffi import FFI, CDefError, FFIError, VerificationMissing from pypy.module.test_lib_pypy.cffi_tests.support import * SIZE_OF_INT = ctypes.sizeof(ctypes.c_int) @@ -757,8 +757,8 @@ p = ffi.cast("long long", ffi.cast("wchar_t", -1)) if SIZE_OF_WCHAR == 2: # 2 bytes, unsigned assert int(p) == 0xffff - elif platform.machine() == 'aarch64': # 4 bytes, unsigned - assert int(p) == 0xffffffff + elif platform.machine().startswith(('arm', 'aarch64')): + assert int(p) == 0xffffffff # 4 bytes, unsigned else: # 4 bytes, signed assert int(p) == -1 p = ffi.cast("int", u+'\u1234') @@ -927,6 +927,14 @@ assert ffi.string(ffi.cast("enum foo", -16)) == "E" assert ffi.string(ffi.cast("enum foo", -8)) == "F" + def test_enum_partial(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef(r"enum foo {A, ...}; enum bar { B, C };") + lib = ffi.dlopen(None) + assert lib.B == 0 + py.test.raises(VerificationMissing, getattr, lib, "A") + assert lib.C == 1 + def test_array_of_struct(self): ffi = FFI(backend=self.Backend()) ffi.cdef("struct foo { int a, b; };") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py @@ -58,6 +58,11 @@ assert ptr_type.get_c_name("") == "int(const *)[5]" assert ptr_type.get_c_name("*x") == "int(const * *x)[5]" +def test_qual_pointer_type(): + ptr_type = PointerType(PrimitiveType("long long"), Q_RESTRICT) + assert ptr_type.get_c_name("") == "long long __restrict *" + assert const_voidp_type.get_c_name("") == "void const *" + def test_unknown_pointer_type(): ptr_type = unknown_ptr_type("foo_p") assert ptr_type.get_c_name("") == "foo_p" diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py @@ -308,7 +308,6 @@ ffi.cdef("void f(WPARAM);") def test__is_constant_globalvar(): - from cffi.cparser import Parser, _get_parser for input, expected_output in [ ("int a;", False), ("const int a;", True), @@ -325,11 +324,36 @@ ("int a[5][6];", False), ("const int a[5][6];", False), ]: - p = Parser() - ast = _get_parser().parse(input) - decl = ast.children()[0][1] - node = decl.type - assert p._is_constant_globalvar(node) == expected_output + ffi = FFI() + ffi.cdef(input) + declarations = ffi._parser._declarations + assert ('constant a' in declarations) == expected_output + assert ('variable a' in declarations) == (not expected_output) + +def test_restrict(): + from cffi import model + for input, expected_output in [ + ("int a;", False), + ("restrict int a;", True), + ("int *a;", False), + ]: + ffi = FFI() + ffi.cdef(input) + tp, quals = ffi._parser._declarations['variable a'] + assert bool(quals & model.Q_RESTRICT) == expected_output + +def test_different_const_funcptr_types(): + lst = [] + for input in [ + "int(*)(int *a)", + "int(*)(int const *a)", + "int(*)(int * const a)", + "int(*)(int const a[])"]: + ffi = FFI(backend=FakeBackend()) + lst.append(ffi._parser.parse_type(input)) + assert lst[0] != lst[1] + assert lst[0] == lst[2] + assert lst[1] == lst[3] def test_enum(): ffi = FFI() diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py @@ -209,6 +209,9 @@ # Check the particular results on Intel import platform if (platform.machine().startswith('i386') or + platform.machine().startswith('i486') or + platform.machine().startswith('i586') or + platform.machine().startswith('i686') or platform.machine().startswith('x86')): assert abs(more_precise - 0.656769) < 0.001 assert abs(less_precise - 3.99091) < 0.001 @@ -1636,11 +1639,11 @@ def test_FILE_stored_explicitly(): ffi = FFI() - ffi.cdef("int myprintf(const char *, int); FILE *myfile;") + ffi.cdef("int myprintf11(const char *, int); FILE *myfile;") lib = ffi.verify(""" #include FILE *myfile; - int myprintf(const char *out, int value) { + int myprintf11(const char *out, int value) { return fprintf(myfile, out, value); } """) @@ -1650,7 +1653,7 @@ lib.myfile = ffi.cast("FILE *", fw1) # fw1.write(b"X") - r = lib.myprintf(b"hello, %d!\n", ffi.cast("int", 42)) + r = lib.myprintf11(b"hello, %d!\n", ffi.cast("int", 42)) fw1.close() assert r == len("hello, 42!\n") # @@ -2248,3 +2251,13 @@ e = py.test.raises(VerificationError, ffi.verify, "") assert str(e.value) == ("feature not supported with ffi.verify(), but only " "with ffi.set_source(): 'typedef unsigned long... t1'") + +def test_const_fields(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a; void *const b; };""") + ffi.verify("""struct foo_s { const int a; void *const b; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int") + assert foo_s.fields[1][0] == 'b' + assert foo_s.fields[1][1].type is ffi.typeof("void *") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py @@ -30,6 +30,32 @@ assert ffi.typeof("int[][10]") is ffi.typeof("int[][10]") assert ffi.typeof("int(*)()") is ffi.typeof("int(*)()") +def test_ffi_type_not_immortal(): + import weakref, gc + ffi = _cffi1_backend.FFI() + t1 = ffi.typeof("int **") + t2 = ffi.typeof("int *") + w1 = weakref.ref(t1) + w2 = weakref.ref(t2) + del t1, ffi + gc.collect() + assert w1() is None + assert w2() is t2 + ffi = _cffi1_backend.FFI() + assert ffi.typeof(ffi.new("int **")[0]) is t2 + # + ffi = _cffi1_backend.FFI() + t1 = ffi.typeof("int ***") + t2 = ffi.typeof("int **") + w1 = weakref.ref(t1) + w2 = weakref.ref(t2) + del t2, ffi + gc.collect() + assert w1() is t1 + assert w2() is not None # kept alive by t1 + ffi = _cffi1_backend.FFI() + assert ffi.typeof("int * *") is t1.item + def test_ffi_cache_type_globally(): ffi1 = _cffi1_backend.FFI() ffi2 = _cffi1_backend.FFI() diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_new_ffi_1.py @@ -782,8 +782,8 @@ p = ffi.cast("long long", ffi.cast("wchar_t", -1)) if SIZE_OF_WCHAR == 2: # 2 bytes, unsigned assert int(p) == 0xffff - elif platform.machine() == 'aarch64': # 4 bytes, unsigned - assert int(p) == 0xffffffff + elif platform.machine().startswith(('arm', 'aarch64')): + assert int(p) == 0xffffffff # 4 bytes, unsigned else: # 4 bytes, signed assert int(p) == -1 p = ffi.cast("int", u+'\u1234') diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py @@ -2,7 +2,7 @@ import sys import py from cffi import FFI -from cffi import recompiler, ffiplatform +from cffi import recompiler, ffiplatform, VerificationMissing from pypy.module.test_lib_pypy.cffi_tests.udir import udir @@ -204,3 +204,10 @@ "foobar", _version=0x2594) assert str(e.value).startswith( "cffi out-of-line Python module 'foobar' has unknown version") + +def test_partial_enum(): + ffi = FFI() + ffi.cdef("enum foo { A, B, ... };") + ffi.set_source('test_partial_enum', None) + py.test.raises(VerificationMissing, ffi.emit_python_code, + str(tmpdir.join('test_partial_enum.py'))) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1192,3 +1192,92 @@ py.test.raises(ffi.error, getattr, lib, 'my_value') e = py.test.raises(ffi.error, setattr, lib, 'my_value', 50) assert str(e.value) == "global variable 'my_value' is at address NULL" + +def test_const_fields(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a; void *const b; };""") + lib = verify(ffi, 'test_const_fields', """ + struct foo_s { const int a; void *const b; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int") + assert foo_s.fields[1][0] == 'b' + assert foo_s.fields[1][1].type is ffi.typeof("void *") + From noreply at buildbot.pypy.org Wed Oct 14 16:55:12 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Oct 2015 16:55:12 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in vaibhavsood12/pypy (pull request #337) Message-ID: <20151014145512.030A21C0726@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80208:90d58373607d Date: 2015-10-14 16:55 +0200 http://bitbucket.org/pypy/pypy/changeset/90d58373607d/ Log: Merged in vaibhavsood12/pypy (pull request #337) Implement wrap_binaryfunc_l and wrap_binaryfunc_r in cpyext slotdefs diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -61,6 +61,30 @@ args_w = space.fixedview(w_args) return generic_cpy_call(space, func_binary, w_self, args_w[0]) +def wrap_binaryfunc_l(space, w_self, w_args, func): + func_binary = rffi.cast(binaryfunc, func) + check_num_args(space, w_args, 1) + args_w = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self))): + raise OperationError(space.w_NotImplementedError, space.wrap( + "NotImplemented")) + + return generic_cpy_call(space, func_binary, w_self, args_w[0]) + +def wrap_binaryfunc_r(space, w_self, w_args, func): + func_binary = rffi.cast(binaryfunc, func) + check_num_args(space, w_args, 1) + args_w = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self))): + raise OperationError(space.w_NotImplementedError, space.wrap( + "NotImplemented")) + + return generic_cpy_call(space, func_binary, args_w[0], w_self) + def wrap_inquirypred(space, w_self, w_args, func): func_inquiry = rffi.cast(inquiry, func) check_num_args(space, w_args, 0) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -589,6 +589,48 @@ assert bool(module.newInt(-1)) raises(ValueError, bool, module.newInt(-42)) + def test_binaryfunc(self): + module = self.import_extension('foo', [ + ("new_obj", "METH_NOARGS", + """ + FooObject *fooObj; + + Foo_Type.tp_as_number = &foo_as_number; + foo_as_number.nb_add = foo_nb_add_call; + if (PyType_Ready(&Foo_Type) < 0) return NULL; + fooObj = PyObject_New(FooObject, &Foo_Type); + if (!fooObj) { + return NULL; + } + + return (PyObject *)fooObj; + """)], + """ + typedef struct + { + PyObject_HEAD + } FooObject; + + static PyObject * + foo_nb_add_call(PyObject *self, PyObject *other) + { + return PyInt_FromLong(42); + } + + PyTypeObject Foo_Type = { + PyObject_HEAD_INIT(0) + /*ob_size*/ 0, + /*tp_name*/ "Foo", + /*tp_basicsize*/ sizeof(FooObject), + }; + static PyNumberMethods foo_as_number; + """) + a = module.new_obj() + b = module.new_obj() + c = 3 + assert (a + b) == 42 + raises(NotImplementedError, "b + c") + def test_tp_new_in_subclass_of_type(self): skip("BROKEN") module = self.import_module(name='foo3') From noreply at buildbot.pypy.org Wed Oct 14 16:59:25 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Wed, 14 Oct 2015 16:59:25 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: skipping test_zjit for a non vectorizing cpu, all of the tests expect a SIMD backend to be implemented Message-ID: <20151014145925.CB2B01C0726@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80209:ef76f6b37cda Date: 2015-10-14 16:59 +0200 http://bitbucket.org/pypy/pypy/changeset/ef76f6b37cda/ Log: skipping test_zjit for a non vectorizing cpu, all of the tests expect a SIMD backend to be implemented diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -13,6 +13,11 @@ from pypy.module.micronumpy import boxes from pypy.module.micronumpy.compile import FakeSpace, Parser, InterpreterState from pypy.module.micronumpy.base import W_NDimArray +from rpython.jit.backend.detect_cpu import getcpuclass + +CPU = getcpuclass() +if not CPU.vector_extension: + py.test.skip("this cpu %s has no implemented vector backend" % CPU) def get_profiler(): from rpython.jit.metainterp import pyjitpl From noreply at buildbot.pypy.org Wed Oct 14 17:28:42 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Wed, 14 Oct 2015 17:28:42 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: copied locations and added gp registers (as well as floating register) Message-ID: <20151014152842.7BF911C1186@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80210:7881dc958ddb Date: 2015-10-14 17:18 +0200 http://bitbucket.org/pypy/pypy/changeset/7881dc958ddb/ Log: copied locations and added gp registers (as well as floating register) diff --git a/rpython/jit/backend/arm/locations.py b/rpython/jit/backend/arm/locations.py --- a/rpython/jit/backend/arm/locations.py +++ b/rpython/jit/backend/arm/locations.py @@ -1,7 +1,6 @@ from rpython.jit.metainterp.history import INT, FLOAT from rpython.jit.backend.arm.arch import WORD, DOUBLE_WORD, JITFRAME_FIXED_SIZE - class AssemblerLocation(object): _immutable_ = True type = INT diff --git a/rpython/jit/backend/zarch/arch.py b/rpython/jit/backend/zarch/arch.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/arch.py @@ -0,0 +1,5 @@ + +# TODO +WORD = 8 + +JITFRAME_FIXED_SIZE = 48 diff --git a/rpython/jit/backend/zarch/locations.py b/rpython/jit/backend/zarch/locations.py --- a/rpython/jit/backend/zarch/locations.py +++ b/rpython/jit/backend/zarch/locations.py @@ -1,3 +1,175 @@ +from rpython.jit.metainterp.history import INT, FLOAT +from rpython.jit.backend.zarch.arch import WORD, JITFRAME_FIXED_SIZE +class AssemblerLocation(object): + _immutable_ = True + type = INT -imm = None + def is_imm(self): + return False + + def is_stack(self): + return False + + def is_raw_sp(self): + return False + + def is_core_reg(self): + return False + + def is_vfp_reg(self): + return False + + def is_imm_float(self): + return False + + def is_float(self): + return False + + def as_key(self): + raise NotImplementedError + + def get_position(self): + raise NotImplementedError # only for stack + +class RegisterLocation(AssemblerLocation): + _immutable_ = True + width = WORD + + def __init__(self, value): + self.value = value + + def __repr__(self): + return 'r%d' % self.value + + def is_core_reg(self): + return True + + def as_key(self): # 0 <= as_key <= 15 + return self.value + + +class FloatRegisterLocation(RegisterLocation): + _immutable_ = True + type = FLOAT + width = WORD + + def __repr__(self): + return 'f%d' % self.value + + def is_core_reg(self): + return False + + def is_vfp_reg(self): + return True + + def as_key(self): # 20 <= as_key <= 35 + return self.value + 20 + + def is_float(self): + return True + +class ImmLocation(AssemblerLocation): + _immutable_ = True + width = WORD + + def __init__(self, value): + self.value = value + + def getint(self): + return self.value + + def __repr__(self): + return "imm(%d)" % (self.value) + + def is_imm(self): + return True + + +class ConstFloatLoc(AssemblerLocation): + """This class represents an imm float value which is stored in memory at + the address stored in the field value""" + _immutable_ = True + width = WORD + type = FLOAT + + def __init__(self, value): + self.value = value + + def getint(self): + return self.value + + def __repr__(self): + return "imm_float(stored at %d)" % (self.value) + + def is_imm_float(self): + return True + + def as_key(self): # a real address + 1 + return self.value | 1 + + def is_float(self): + return True + +class StackLocation(AssemblerLocation): + _immutable_ = True + + def __init__(self, position, fp_offset, type=INT): + if type == FLOAT: + self.width = DOUBLE_WORD + else: + self.width = WORD + self.position = position + self.value = fp_offset + self.type = type + + def __repr__(self): + return 'FP(%s)+%d' % (self.type, self.position,) + + def location_code(self): + return 'b' + + def get_position(self): + return self.position + + def assembler(self): + return repr(self) + + def is_stack(self): + return True + + def as_key(self): # an aligned word + 10000 + return self.position + 10000 + + def is_float(self): + return self.type == FLOAT + +class RawSPStackLocation(AssemblerLocation): + _immutable_ = True + + def __init__(self, sp_offset, type=INT): + if type == FLOAT: + self.width = DOUBLE_WORD + else: + self.width = WORD + self.value = sp_offset + self.type = type + + def __repr__(self): + return 'SP(%s)+%d' % (self.type, self.value,) + + def is_raw_sp(self): + return True + + def is_float(self): + return self.type == FLOAT + + def as_key(self): # a word >= 1000, and < 1000 + size of SP frame + return self.value + 1000 + + +def imm(i): + return ImmLocation(i) + +def get_fp_offset(base_ofs, position): + return base_ofs + WORD * (position + JITFRAME_FIXED_SIZE) diff --git a/rpython/jit/backend/zarch/registers.py b/rpython/jit/backend/zarch/registers.py --- a/rpython/jit/backend/zarch/registers.py +++ b/rpython/jit/backend/zarch/registers.py @@ -0,0 +1,13 @@ + + +from rpython.jit.backend.zarch.locations import FloatRegisterLocation +from rpython.jit.backend.zarch.locations import RegisterLocation + +registers = [RegisterLocation(i) for i in range(16)] +fpregisters = [FloatRegisterLocation(i) for i in range(16)] + +[r0,r1,r2,r3,r4,r5,r6,r7,r8, + r9,r10,r11,r12,r13,r14,r15] = registers + +[f0,f1,f2,f3,f4,f5,f6,f7,f8, + f9,f10,f11,f12,f13,f14,f15] = fpregisters From noreply at buildbot.pypy.org Wed Oct 14 18:31:30 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Oct 2015 18:31:30 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Make three versions: create_link_pypy, create_link_pyobj, create_link_shared Message-ID: <20151014163130.5B3071C120E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80211:aff2ddf11c01 Date: 2015-10-14 18:31 +0200 http://bitbucket.org/pypy/pypy/changeset/aff2ddf11c01/ Log: Make three versions: create_link_pypy, create_link_pyobj, create_link_shared diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -14,10 +14,11 @@ _p_list = [] # not rpython _o_list = [] # not rpython +_s_list = [] # not rpython -def create_link_from_pypy(p, ob): - "NOT_RPYTHON" +def create_link_pypy(p, ob): + "NOT_RPYTHON: a link where the PyPy object contains all the data" assert not hasattr(p, '__rawrefcount') assert not ob.ob_pypy_link ob.ob_pypy_link = rgc.cast_instance_to_gcref(p) @@ -25,13 +26,26 @@ p.__rawrefcount = ob _p_list.append(ob) -def create_link_to_pypy(p, ob): - "NOT_RPYTHON" +def create_link_pyobj(p, ob): + """NOT_RPYTHON: a link where the PyObject contains all the data. + from_obj() will not work on this 'p'.""" + assert not hasattr(p, '__rawrefcount') assert not ob.ob_pypy_link ob.ob_pypy_link = rgc.cast_instance_to_gcref(p) ob.ob_refcnt += REFCNT_FROM_PYPY_OBJECT + p.__rawrefcount = lltype.nullptr(lltype.typeOf(ob).TO) _o_list.append(ob) +def create_link_shared(p, ob): + """NOT_RPYTHON: a link where both p and ob contain some data. + from_obj() will not work on this 'p'.""" + assert not hasattr(p, '__rawrefcount') + assert not ob.ob_pypy_link + ob.ob_pypy_link = rgc.cast_instance_to_gcref(p) + ob.ob_refcnt += REFCNT_FROM_PYPY_OBJECT + p.__rawrefcount = lltype.nullptr(lltype.typeOf(ob).TO) + _s_list.append(ob) + def from_obj(OBTYPE, p): "NOT_RPYTHON" null = lltype.nullptr(OBTYPE) @@ -56,32 +70,55 @@ Returns the list of ob's whose _Py_Dealloc() should be called, from the O list. """ - global _p_list, _o_list - wr_p_list = [] - new_p_list = [] - for ob in _p_list: + def detach(ob, wr_list): assert ob.ob_refcnt >= REFCNT_FROM_PYPY_OBJECT - if ob.ob_refcnt == REFCNT_FROM_PYPY_OBJECT: - wr_p_list.append(weakref.ref(ob)) - else: - new_p_list.append(ob) - ob = None - _p_list = Ellipsis - # - wr_o_list = [] - for ob in _o_list: assert ob.ob_pypy_link p = rgc.try_cast_gcref_to_instance(object, ob.ob_pypy_link) assert p is not None ob.ob_pypy_link = lltype.nullptr(llmemory.GCREF.TO) - wr_o_list.append((ob, weakref.ref(p))) - p = None + wr_list.append((ob, weakref.ref(p))) + + global _p_list, _o_list, _s_list + wr_p_list = [] + new_p_list = [] + for ob in _p_list: + if ob.ob_refcnt > REFCNT_FROM_PYPY_OBJECT: + new_p_list.append(ob) + else: + wr_p_list.append(weakref.ref(ob)) + ob = None + _p_list = Ellipsis + + wr_s_list = [] + new_s_list = [] + for ob in _s_list: + if ob.ob_refcnt > REFCNT_FROM_PYPY_OBJECT: + new_s_list.append(ob) + else: + detach(ob, wr_s_list) + ob = None + _s_list = Ellipsis + + wr_o_list = [] + for ob in _o_list: + detach(ob, wr_o_list) _o_list = Ellipsis - # + rgc.collect() # forces the cycles to be resolved and the weakrefs to die rgc.collect() rgc.collect() - # + + def attach(ob, wr, final_list): + assert ob.ob_refcnt >= REFCNT_FROM_PYPY_OBJECT + p = wr() + if p is not None: + ob.ob_pypy_link = rgc.cast_instance_to_gcref(p) + final_list.append(ob) + else: + ob.ob_refcnt -= REFCNT_FROM_PYPY_OBJECT + if ob.ob_refcnt == 0: + dealloc.append(ob) + _p_list = new_p_list for wr in wr_p_list: ob = wr() @@ -89,17 +126,12 @@ _p_list.append(ob) # dealloc = [] + _s_list = new_s_list + for ob, wr in wr_s_list: + attach(ob, wr, _s_list) _o_list = [] for ob, wr in wr_o_list: - p = wr() - if p is not None: - ob.ob_pypy_link = rgc.cast_instance_to_gcref(p) - _o_list.append(ob) - else: - assert ob.ob_refcnt >= REFCNT_FROM_PYPY_OBJECT - ob.ob_refcnt -= REFCNT_FROM_PYPY_OBJECT - if ob.ob_refcnt == 0: - dealloc.append(ob) + attach(ob, wr, _o_list) return dealloc # ____________________________________________________________ diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py --- a/rpython/rlib/test/test_rawrefcount.py +++ b/rpython/rlib/test/test_rawrefcount.py @@ -17,24 +17,35 @@ def setup_method(self, meth): del rawrefcount._p_list[:] del rawrefcount._o_list[:] + del rawrefcount._s_list[:] - def test_create_link_from_pypy(self): + def test_create_link_pypy(self): p = W_Root(42) ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) assert rawrefcount.from_obj(PyObjectS, p) == lltype.nullptr(PyObjectS) assert rawrefcount.to_obj(W_Root, ob) == None - rawrefcount.create_link_from_pypy(p, ob) + rawrefcount.create_link_pypy(p, ob) assert rawrefcount.from_obj(PyObjectS, p) == ob assert rawrefcount.to_obj(W_Root, ob) == p - def test_create_link_to_pypy(self): + def test_create_link_pyobj(self): p = W_Root(42) ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) assert rawrefcount.from_obj(PyObjectS, p) == lltype.nullptr(PyObjectS) assert rawrefcount.to_obj(W_Root, ob) == None - rawrefcount.create_link_to_pypy(p, ob) + rawrefcount.create_link_pyobj(p, ob) + assert rawrefcount.from_obj(PyObjectS, p) == lltype.nullptr(PyObjectS) + assert rawrefcount.to_obj(W_Root, ob) == p + + def test_create_link_shared(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, + track_allocation=False) + assert rawrefcount.from_obj(PyObjectS, p) == lltype.nullptr(PyObjectS) + assert rawrefcount.to_obj(W_Root, ob) == None + rawrefcount.create_link_shared(p, ob) assert rawrefcount.from_obj(PyObjectS, p) == lltype.nullptr(PyObjectS) assert rawrefcount.to_obj(W_Root, ob) == p @@ -42,7 +53,7 @@ p = W_Root(42) ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) - rawrefcount.create_link_from_pypy(p, ob) + rawrefcount.create_link_pypy(p, ob) assert rawrefcount._p_list == [ob] wr_ob = weakref.ref(ob) wr_p = weakref.ref(p) @@ -56,7 +67,7 @@ p = W_Root(42) ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) - rawrefcount.create_link_from_pypy(p, ob) + rawrefcount.create_link_pypy(p, ob) assert rawrefcount._p_list == [ob] wr_ob = weakref.ref(ob) wr_p = weakref.ref(p) @@ -74,7 +85,7 @@ p = W_Root(42) ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) - rawrefcount.create_link_from_pypy(p, ob) + rawrefcount.create_link_pypy(p, ob) assert rawrefcount._p_list == [ob] wr_ob = weakref.ref(ob) del ob # p remains @@ -89,7 +100,7 @@ p = W_Root(42) ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) - rawrefcount.create_link_to_pypy(p, ob) + rawrefcount.create_link_pyobj(p, ob) assert rawrefcount._o_list == [ob] wr_ob = weakref.ref(ob) wr_p = weakref.ref(p) @@ -105,8 +116,8 @@ p = W_Root(42) ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) - p._rawrefcount = ob - rawrefcount.create_link_to_pypy(p, ob) + p.pyobj = ob + rawrefcount.create_link_pyobj(p, ob) assert rawrefcount._o_list == [ob] wr_ob = weakref.ref(ob) wr_p = weakref.ref(p) @@ -124,8 +135,8 @@ p = W_Root(42) ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) - p._rawrefcount = ob - rawrefcount.create_link_to_pypy(p, ob) + p.pyobj = ob + rawrefcount.create_link_pyobj(p, ob) assert rawrefcount._o_list == [ob] wr_ob = weakref.ref(ob) del ob # p remains @@ -135,4 +146,54 @@ assert ob is not None assert rawrefcount._o_list == [ob] assert rawrefcount.to_obj(W_Root, ob) == p - assert p._rawrefcount == ob + assert p.pyobj == ob + + def test_collect_s_dies(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, + track_allocation=False) + rawrefcount.create_link_shared(p, ob) + assert rawrefcount._s_list == [ob] + wr_ob = weakref.ref(ob) + wr_p = weakref.ref(p) + del ob, p + dealloc = rawrefcount._collect() + ob = wr_ob() + assert ob is not None + assert dealloc == [ob] + assert rawrefcount._s_list == [] + assert wr_p() is None + + def test_collect_s_keepalive_pyobject(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, + track_allocation=False) + p.pyobj = ob + rawrefcount.create_link_shared(p, ob) + assert rawrefcount._s_list == [ob] + wr_ob = weakref.ref(ob) + wr_p = weakref.ref(p) + ob.ob_refcnt += 1 # <= + del ob, p + rawrefcount._collect() + ob = wr_ob() + p = wr_p() + assert ob is not None and p is not None + assert rawrefcount._s_list == [ob] + assert rawrefcount.to_obj(W_Root, ob) == p + + def test_collect_s_keepalive_w_root(self): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, + track_allocation=False) + p.pyobj = ob + rawrefcount.create_link_shared(p, ob) + assert rawrefcount._s_list == [ob] + wr_ob = weakref.ref(ob) + del ob # p remains + dealloc = rawrefcount._collect() + assert dealloc == [] + ob = wr_ob() + assert ob is not None + assert rawrefcount._s_list == [ob] + assert rawrefcount.to_obj(W_Root, ob) == p From noreply at buildbot.pypy.org Wed Oct 14 18:39:47 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Oct 2015 18:39:47 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Comments Message-ID: <20151014163947.4EED71C0726@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80212:905a7c2186e0 Date: 2015-10-14 18:40 +0200 http://bitbucket.org/pypy/pypy/changeset/905a7c2186e0/ Log: Comments diff --git a/pypy/doc/discussion/rawrefcount.rst b/pypy/doc/discussion/rawrefcount.rst --- a/pypy/doc/discussion/rawrefcount.rst +++ b/pypy/doc/discussion/rawrefcount.rst @@ -107,7 +107,6 @@ A few special types need to be reflected both as PyPy objects and PyObjects. For now we assume that these are large and mostly -immutable, like objects. They should be linked in the O list, -and we'll ignore the issues of deallocation ordering for them. (Also, -W_TypeObject can have a back-reference field like -W_CPyExtPlaceHolderObject.) +immutable, like objects. They should be linked in some mixture +of the P list and the O list. Likely, the P list with an extra flag +that says "_Py_Dealloc must be invoked". diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -74,6 +74,7 @@ w_mod = check_sys_modules_w(space, modulename) if not w_mod or space.is_w(w_mod, space.w_None): w_mod = Module(space, space.wrap(modulename)) + XXX - "insert it into sys.modules!" return borrow_from(None, w_mod) @cpython_api([], PyObject) From noreply at buildbot.pypy.org Wed Oct 14 18:40:40 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Oct 2015 18:40:40 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: hg merge default Message-ID: <20151014164040.29C781C0726@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80213:2f3a3982fbe5 Date: 2015-10-14 18:40 +0200 http://bitbucket.org/pypy/pypy/changeset/2f3a3982fbe5/ Log: hg merge default diff too long, truncating to 2000 out of 2023 lines diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -74,3 +74,9 @@ ffi.new_handle() returns handles that work more like CPython's: they remain valid as long as the target exists (unlike the previous version, where handles become invalid *before* the __del__ is called). + +.. branch: ufunc-casting + +allow automatic casting in ufuncs (and frompypyfunc) to cast the +arguments to the allowed function type declarations, fixes various +failures in linalg cffi functions diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -66,6 +66,7 @@ import pypy.module.cpyext.codecs import pypy.module.cpyext.pyfile import pypy.module.cpyext.pystrtod +import pypy.module.cpyext.pytraceback # now that all rffi_platform.Struct types are registered, configure them api.configure_types() diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -126,6 +126,7 @@ #include "fileobject.h" #include "pysignals.h" #include "pythread.h" +#include "traceback.h" /* Missing definitions */ #include "missing.h" diff --git a/pypy/module/cpyext/include/frameobject.h b/pypy/module/cpyext/include/frameobject.h --- a/pypy/module/cpyext/include/frameobject.h +++ b/pypy/module/cpyext/include/frameobject.h @@ -4,7 +4,7 @@ extern "C" { #endif -typedef struct { +typedef struct _frame { PyObject_HEAD PyCodeObject *f_code; PyObject *f_globals; diff --git a/pypy/module/cpyext/include/traceback.h b/pypy/module/cpyext/include/traceback.h --- a/pypy/module/cpyext/include/traceback.h +++ b/pypy/module/cpyext/include/traceback.h @@ -4,7 +4,15 @@ extern "C" { #endif -typedef PyObject PyTracebackObject; +struct _frame; + +typedef struct _traceback { + PyObject_HEAD + struct _traceback *tb_next; + struct _frame *tb_frame; + int tb_lasti; + int tb_lineno; +} PyTracebackObject; #ifdef __cplusplus } diff --git a/pypy/module/cpyext/pytraceback.py b/pypy/module/cpyext/pytraceback.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/pytraceback.py @@ -0,0 +1,50 @@ +from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + PyObjectFields, generic_cpy_call, CONST_STRING, CANNOT_FAIL, Py_ssize_t, + cpython_api, bootstrap_function, cpython_struct, build_type_checkers) +from pypy.module.cpyext.pyobject import ( + PyObject, make_ref, from_ref, Py_DecRef, make_typedescr, borrow_from) +from pypy.module.cpyext.frameobject import PyFrameObject +from rpython.rlib.unroll import unrolling_iterable +from pypy.interpreter.error import OperationError +from pypy.interpreter.pytraceback import PyTraceback +from pypy.interpreter import pycode + + +PyTracebackObjectStruct = lltype.ForwardReference() +PyTracebackObject = lltype.Ptr(PyTracebackObjectStruct) +PyTracebackObjectFields = PyObjectFields + ( + ("tb_next", PyTracebackObject), + ("tb_frame", PyFrameObject), + ("tb_lasti", rffi.INT), + ("tb_lineno", rffi.INT), +) +cpython_struct("PyTracebackObject", PyTracebackObjectFields, PyTracebackObjectStruct) + + at bootstrap_function +def init_traceback(space): + make_typedescr(PyTraceback.typedef, + basestruct=PyTracebackObject.TO, + attach=traceback_attach, + dealloc=traceback_dealloc) + + +def traceback_attach(space, py_obj, w_obj): + py_traceback = rffi.cast(PyTracebackObject, py_obj) + traceback = space.interp_w(PyTraceback, w_obj) + if traceback.next is None: + w_next_traceback = None + else: + w_next_traceback = space.wrap(traceback.next) + py_traceback.c_tb_next = rffi.cast(PyTracebackObject, make_ref(space, w_next_traceback)) + py_traceback.c_tb_frame = rffi.cast(PyFrameObject, make_ref(space, space.wrap(traceback.frame))) + rffi.setintfield(py_traceback, 'c_tb_lasti', traceback.lasti) + rffi.setintfield(py_traceback, 'c_tb_lineno',traceback.get_lineno()) + + at cpython_api([PyObject], lltype.Void, external=False) +def traceback_dealloc(space, py_obj): + py_traceback = rffi.cast(PyTracebackObject, py_obj) + Py_DecRef(space, rffi.cast(PyObject, py_traceback.c_tb_next)) + Py_DecRef(space, rffi.cast(PyObject, py_traceback.c_tb_frame)) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -61,6 +61,30 @@ args_w = space.fixedview(w_args) return generic_cpy_call(space, func_binary, w_self, args_w[0]) +def wrap_binaryfunc_l(space, w_self, w_args, func): + func_binary = rffi.cast(binaryfunc, func) + check_num_args(space, w_args, 1) + args_w = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self))): + raise OperationError(space.w_NotImplementedError, space.wrap( + "NotImplemented")) + + return generic_cpy_call(space, func_binary, w_self, args_w[0]) + +def wrap_binaryfunc_r(space, w_self, w_args, func): + func_binary = rffi.cast(binaryfunc, func) + check_num_args(space, w_args, 1) + args_w = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self))): + raise OperationError(space.w_NotImplementedError, space.wrap( + "NotImplemented")) + + return generic_cpy_call(space, func_binary, args_w[0], w_self) + def wrap_inquirypred(space, w_self, w_args, func): func_inquiry = rffi.cast(inquiry, func) check_num_args(space, w_args, 0) diff --git a/pypy/module/cpyext/test/test_traceback.py b/pypy/module/cpyext/test/test_traceback.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_traceback.py @@ -0,0 +1,40 @@ +from rpython.rtyper.lltypesystem import lltype, rffi +from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref +from pypy.module.cpyext.pytraceback import PyTracebackObject +from pypy.interpreter.pytraceback import PyTraceback +from pypy.interpreter.pyframe import PyFrame + +class TestPyTracebackObject(BaseApiTest): + def test_traceback(self, space, api): + w_traceback = space.appexec([], """(): + import sys + try: + 1/0 + except: + return sys.exc_info()[2] + """) + py_obj = make_ref(space, w_traceback) + py_traceback = rffi.cast(PyTracebackObject, py_obj) + assert (from_ref(space, rffi.cast(PyObject, py_traceback.c_ob_type)) is + space.gettypeobject(PyTraceback.typedef)) + + traceback = space.interp_w(PyTraceback, w_traceback) + assert traceback.lasti == py_traceback.c_tb_lasti + assert traceback.get_lineno() == py_traceback.c_tb_lineno + assert space.eq_w(space.getattr(w_traceback, space.wrap("tb_lasti")), + space.wrap(py_traceback.c_tb_lasti)) + assert space.is_w(space.getattr(w_traceback, space.wrap("tb_frame")), + from_ref(space, rffi.cast(PyObject, + py_traceback.c_tb_frame))) + + while not space.is_w(w_traceback, space.w_None): + assert space.is_w( + w_traceback, + from_ref(space, rffi.cast(PyObject, py_traceback))) + w_traceback = space.getattr(w_traceback, space.wrap("tb_next")) + py_traceback = py_traceback.c_tb_next + + assert lltype.normalizeptr(py_traceback) is None + + api.Py_DecRef(py_obj) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -589,6 +589,48 @@ assert bool(module.newInt(-1)) raises(ValueError, bool, module.newInt(-42)) + def test_binaryfunc(self): + module = self.import_extension('foo', [ + ("new_obj", "METH_NOARGS", + """ + FooObject *fooObj; + + Foo_Type.tp_as_number = &foo_as_number; + foo_as_number.nb_add = foo_nb_add_call; + if (PyType_Ready(&Foo_Type) < 0) return NULL; + fooObj = PyObject_New(FooObject, &Foo_Type); + if (!fooObj) { + return NULL; + } + + return (PyObject *)fooObj; + """)], + """ + typedef struct + { + PyObject_HEAD + } FooObject; + + static PyObject * + foo_nb_add_call(PyObject *self, PyObject *other) + { + return PyInt_FromLong(42); + } + + PyTypeObject Foo_Type = { + PyObject_HEAD_INIT(0) + /*ob_size*/ 0, + /*tp_name*/ "Foo", + /*tp_basicsize*/ sizeof(FooObject), + }; + static PyNumberMethods foo_as_number; + """) + a = module.new_obj() + b = module.new_obj() + c = 3 + assert (a + b) == 42 + raises(NotImplementedError, "b + c") + def test_tp_new_in_subclass_of_type(self): skip("BROKEN") module = self.import_module(name='foo3') diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -74,10 +74,10 @@ call_many_to_one_driver = jit.JitDriver( name='numpy_call_many_to_one', - greens=['shapelen', 'nin', 'func', 'res_dtype'], + greens=['shapelen', 'nin', 'func', 'in_dtypes', 'res_dtype'], reds='auto') -def call_many_to_one(space, shape, func, res_dtype, in_args, out): +def call_many_to_one(space, shape, func, in_dtypes, res_dtype, in_args, out): # out must hav been built. func needs no calc_type, is usually an # external ufunc nin = len(in_args) @@ -95,9 +95,9 @@ vals = [None] * nin while not out_iter.done(out_state): call_many_to_one_driver.jit_merge_point(shapelen=shapelen, func=func, - res_dtype=res_dtype, nin=nin) + in_dtypes=in_dtypes, res_dtype=res_dtype, nin=nin) for i in range(nin): - vals[i] = in_iters[i].getitem(in_states[i]) + vals[i] = in_dtypes[i].coerce(space, in_iters[i].getitem(in_states[i])) w_arglist = space.newlist(vals) w_out_val = space.call_args(func, Arguments.frompacked(space, w_arglist)) out_iter.setitem(out_state, res_dtype.coerce(space, w_out_val)) @@ -108,10 +108,10 @@ call_many_to_many_driver = jit.JitDriver( name='numpy_call_many_to_many', - greens=['shapelen', 'nin', 'nout', 'func', 'res_dtype'], + greens=['shapelen', 'nin', 'nout', 'func', 'in_dtypes', 'out_dtypes'], reds='auto') -def call_many_to_many(space, shape, func, res_dtype, in_args, out_args): +def call_many_to_many(space, shape, func, in_dtypes, out_dtypes, in_args, out_args): # out must hav been built. func needs no calc_type, is usually an # external ufunc nin = len(in_args) @@ -134,24 +134,29 @@ out_states[i] = out_state shapelen = len(shape) vals = [None] * nin - while not out_iters[0].done(out_states[0]): + test_iter, test_state = in_iters[-1], in_states[-1] + if nout > 0: + test_iter, test_state = out_iters[0], out_states[0] + while not test_iter.done(test_state): call_many_to_many_driver.jit_merge_point(shapelen=shapelen, func=func, - res_dtype=res_dtype, nin=nin, nout=nout) + in_dtypes=in_dtypes, out_dtypes=out_dtypes, + nin=nin, nout=nout) for i in range(nin): - vals[i] = in_iters[i].getitem(in_states[i]) + vals[i] = in_dtypes[i].coerce(space, in_iters[i].getitem(in_states[i])) w_arglist = space.newlist(vals) w_outvals = space.call_args(func, Arguments.frompacked(space, w_arglist)) # w_outvals should be a tuple, but func can return a single value as well if space.isinstance_w(w_outvals, space.w_tuple): batch = space.listview(w_outvals) for i in range(len(batch)): - out_iters[i].setitem(out_states[i], res_dtype.coerce(space, batch[i])) + out_iters[i].setitem(out_states[i], out_dtypes[i].coerce(space, batch[i])) out_states[i] = out_iters[i].next(out_states[i]) - else: - out_iters[0].setitem(out_states[0], res_dtype.coerce(space, w_outvals)) + elif nout > 0: + out_iters[0].setitem(out_states[0], out_dtypes[0].coerce(space, w_outvals)) out_states[0] = out_iters[0].next(out_states[0]) for i in range(nin): in_states[i] = in_iters[i].next(in_states[i]) + test_state = test_iter.next(test_state) return space.newtuple([convert_to_array(space, o) for o in out_args]) setslice_driver = jit.JitDriver(name='numpy_setslice', diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -159,8 +159,7 @@ af2 = ufunc(af) assert all(af2 == af * 2) ac = arange(10, dtype=complex) - skip('casting not implemented yet') - ac1 = ufunc(ac) + raises(TypeError, ufunc, ac) def test_frompyfunc_2d_sig(self): import sys @@ -199,6 +198,10 @@ ai2 = ufunc(aiV) assert (ai2 == aiV * 2).all() + ai = arange(0).reshape(0, 1, 1) + ao = ufunc(ai) + assert ao.shape == (0, 1, 1) + def test_frompyfunc_needs_nditer(self): import sys from numpy import frompyfunc, dtype, arange @@ -268,6 +271,54 @@ assert out0.shape == in0.shape assert (out0 == in0 * 2).all() + def test_frompyfunc_casting(self): + import sys + import numpy as np + if '__pypy__' not in sys.builtin_module_names: + skip('PyPy only frompyfunc extension') + + def times2_int(in0, out0): + assert in0.dtype == int + assert out0.dtype == int + # hack to assing to a 0-dim array + out0.real = in0 * 2 + + def times2_complex(in0, out0): + assert in0.dtype == complex + assert out0.dtype == complex + out0.real = in0.real * 2 + out0.imag = in0.imag + + def times2_complex0(in0): + assert in0.dtype == complex + return in0 * 2 + + def times2_int0(in0): + assert in0.dtype == int + return in0 * 2 + + times2stacked = np.frompyfunc([times2_int, times2_complex], 1, 1, + dtypes=[np.dtype(int), np.dtype(int), + np.dtype(complex), np.dtype(complex)], + stack_inputs=True, signature='()->()', + ) + times2 = np.frompyfunc([times2_int0, times2_complex0], 1, 1, + dtypes=[np.dtype(int), np.dtype(int), + np.dtype(complex), np.dtype(complex)], + stack_inputs=False, + ) + for d in [np.dtype(float), np.dtype('uint8'), np.dtype('complex64')]: + in0 = np.arange(4, dtype=d) + out0 = times2stacked(in0) + assert out0.shape == in0.shape + assert out0.dtype in (int, complex) + assert (out0 == in0 * 2).all() + + out0 = times2(in0) + assert out0.shape == in0.shape + assert out0.dtype in (int, complex) + assert (out0 == in0 * 2).all() + def test_ufunc_kwargs(self): from numpy import ufunc, frompyfunc, arange, dtype def adder(a, b): @@ -1393,7 +1444,7 @@ def test_add_doc(self): import sys if '__pypy__' not in sys.builtin_module_names: - skip('') + skip('cpython sets docstrings differently') try: from numpy import set_docstring except ImportError: diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -709,6 +709,32 @@ raise oefmt(space.w_TypeError, "ufunc '%s' not supported for the input types", self.name) +def _match_dtypes(space, indtypes, targetdtypes, i_target, casting): + allok = True + for i in range(len(indtypes)): + origin = indtypes[i] + target = targetdtypes[i + i_target] + if origin is None: + continue + if target is None: + continue + if not can_cast_type(space, origin, target, casting): + allok = False + break + return allok + +def _raise_err_msg(self, space, dtypes0, dtypes1): + dtypesstr = '' + for d in dtypes0: + if d is None: + dtypesstr += 'None,' + else: + dtypesstr += '%s%s%s,' % (d.byteorder, d.kind, d.elsize) + _dtypesstr = ','.join(['%s%s%s' % (d.byteorder, d.kind, d.elsize) \ + for d in dtypes1]) + raise oefmt(space.w_TypeError, + "input dtype [%s] did not match any known dtypes [%s] ", + dtypesstr,_dtypesstr) class W_UfuncGeneric(W_Ufunc): @@ -799,29 +825,36 @@ outargs0 = outargs[0] assert isinstance(inargs0, W_NDimArray) assert isinstance(outargs0, W_NDimArray) + nin = self.nin + assert nin >= 0 res_dtype = outargs0.get_dtype() new_shape = inargs0.get_shape() # XXX use _find_array_wrap and wrap outargs using __array_wrap__ + if self.stack_inputs: + loop.call_many_to_many(space, new_shape, func, + dtypes, [], inargs + outargs, []) + if len(outargs) < 2: + return outargs[0] + return space.newtuple(outargs) if len(outargs) < 2: return loop.call_many_to_one(space, new_shape, func, - res_dtype, inargs, outargs[0]) + dtypes[:nin], dtypes[-1], inargs, outargs[0]) return loop.call_many_to_many(space, new_shape, func, - res_dtype, inargs, outargs) + dtypes[:nin], dtypes[nin:], inargs, outargs) + w_casting = space.w_None + w_op_dtypes = space.w_None for tf in need_to_cast: if tf: - raise oefmt(space.w_NotImplementedError, "casting not supported yet") + w_casting = space.wrap('safe') + w_op_dtypes = space.newtuple([space.wrap(d) for d in dtypes]) + w_flags = space.w_None # NOT 'external_loop', we do coalescing by core_num_dims - w_op_flags = space.newtuple([space.wrap(r) for r in ['readonly'] * len(inargs)] + \ - [space.wrap(r) for r in ['readwrite'] * len(outargs)]) - w_op_dtypes = space.w_None - w_casting = space.w_None + w_ro = space.newtuple([space.wrap('readonly'), space.wrap('copy')]) + w_rw = space.newtuple([space.wrap('readwrite'), space.wrap('updateifcopy')]) + + w_op_flags = space.newtuple([w_ro] * len(inargs) + [w_rw] * len(outargs)) w_op_axes = space.w_None - #print '\nsignature', sig - #print [(d, getattr(self,d)) for d in dir(self) if 'core' in d or 'broad' in d] - #print [(d, locals()[d]) for d in locals() if 'core' in d or 'broad' in d] - #print 'shapes',[d.get_shape() for d in inargs + outargs] - #print 'steps',[d.implementation.strides for d in inargs + outargs] if isinstance(func, W_GenericUFuncCaller): # Use GeneralizeUfunc interface with signature # Unlike numpy, we will not broadcast dims before @@ -934,19 +967,32 @@ # linear_search_type_resolver in numpy ufunc_type_resolutions.c # type_tup can be '', a tuple of dtypes, or a string # of the form d,t -> D where the letters are dtype specs - nop = len(inargs) + len(outargs) + + # XXX why does the next line not pass translation? + # dtypes = [i.get_dtype() for i in inargs] dtypes = [] + for i in inargs: + if isinstance(i, W_NDimArray): + dtypes.append(i.get_dtype()) + else: + dtypes.append(None) + for i in outargs: + if isinstance(i, W_NDimArray): + dtypes.append(i.get_dtype()) + else: + dtypes.append(None) if isinstance(type_tup, str) and len(type_tup) > 0: try: if len(type_tup) == 1: - dtypes = [get_dtype_cache(space).dtypes_by_name[type_tup]] * self.nargs + s_dtypes = [get_dtype_cache(space).dtypes_by_name[type_tup]] * self.nargs elif len(type_tup) == self.nargs + 2: + s_dtypes = [] for i in range(self.nin): - dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[i]]) + s_dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[i]]) #skip the '->' in the signature for i in range(self.nout): j = i + self.nin + 2 - dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[j]]) + s_dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[j]]) else: raise oefmt(space.w_TypeError, "a type-string for %s " \ "requires 1 typecode or %d typecode(s) before and %d" \ @@ -955,42 +1001,29 @@ except KeyError: raise oefmt(space.w_ValueError, "unknown typecode in" \ " call to %s with type-string '%s'", self.name, type_tup) - else: - # XXX why does the next line not pass translation? - # dtypes = [i.get_dtype() for i in inargs] - for i in inargs: - if isinstance(i, W_NDimArray): - dtypes.append(i.get_dtype()) - else: - dtypes.append(None) - for i in outargs: - if isinstance(i, W_NDimArray): - dtypes.append(i.get_dtype()) - else: - dtypes.append(None) + # Make sure args can be cast to dtypes + if not _match_dtypes(space, dtypes, s_dtypes, 0, "safe"): + _raise_err_msg(self, space, dtypes, s_dtypes) + dtypes = s_dtypes #Find the first matchup of dtypes with _dtypes for i in range(0, len(_dtypes), self.nargs): - allok = True - for j in range(self.nargs): - if dtypes[j] is not None and dtypes[j] != _dtypes[i+j]: - allok = False + allok = _match_dtypes(space, dtypes, _dtypes, i, "no") if allok: break else: - if len(self.funcs) > 1: - - dtypesstr = '' - for d in dtypes: - if d is None: - dtypesstr += 'None,' - else: - dtypesstr += '%s%s%s,' % (d.byteorder, d.kind, d.elsize) - _dtypesstr = ','.join(['%s%s%s' % (d.byteorder, d.kind, d.elsize) \ - for d in _dtypes]) - raise oefmt(space.w_TypeError, - "input dtype [%s] did not match any known dtypes [%s] ", - dtypesstr,_dtypesstr) - i = 0 + # No exact matches, can we cast? + for i in range(0, len(_dtypes), self.nargs): + allok = _match_dtypes(space, dtypes, _dtypes, i, "safe") + if allok: + end = i + self.nargs + assert i >= 0 + assert end >=0 + dtypes = _dtypes[i:end] + break + else: + if len(self.funcs) > 1: + _raise_err_msg(self, space, dtypes, _dtypes) + i = 0 # Fill in empty dtypes for j in range(self.nargs): if dtypes[j] is None: @@ -1086,7 +1119,7 @@ for j in range(offset, len(iter_shape)): x = iter_shape[j + offset] y = dims_to_broadcast[j] - if (x > y and x % y) or y %x: + if y != 0 and x != 0 and ((x > y and x % y) or y %x): raise oefmt(space.w_ValueError, "%s: %s operand %d has a " "mismatch in its broadcast dimension %d " "(size %d is different from %d)", @@ -1123,7 +1156,7 @@ # the current op (signalling it can handle ndarray's). # TODO parse and handle subok - # TODO handle flags, op_flags + # TODO handle more flags, op_flags #print 'iter_shape',iter_shape,'arg_shapes',arg_shapes,'matched_dims',matched_dims return iter_shape, arg_shapes, matched_dims diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -8,12 +8,12 @@ from contextlib import contextmanager from rpython.flowspace.model import Constant -from rpython.annotator.model import (SomeOrderedDict, - SomeString, SomeChar, SomeFloat, unionof, SomeInstance, SomeDict, - SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, +from rpython.annotator.model import ( + SomeOrderedDict, SomeString, SomeChar, SomeFloat, unionof, SomeInstance, + SomeDict, SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, s_None, s_ImpossibleValue, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, - SomeWeakRef, SomeByteArray, SomeConstantType, SomeProperty) + SomeWeakRef, SomeByteArray, SomeConstantType, SomeProperty, AnnotatorError) from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef @@ -225,7 +225,8 @@ x = int(x) result = SomeInteger(nonneg = x>=0) else: - raise Exception("seeing a prebuilt long (value %s)" % hex(x)) + # XXX: better error reporting? + raise ValueError("seeing a prebuilt long (value %s)" % hex(x)) elif issubclass(tp, str): # py.lib uses annotated str subclasses no_nul = not '\x00' in x if len(x) == 1: diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdef.py --- a/rpython/annotator/classdef.py +++ b/rpython/annotator/classdef.py @@ -112,14 +112,10 @@ for desc in s_newvalue.descriptions: if desc.selfclassdef is None: if homedef.classdesc.settled: - raise Exception("demoting method %s " - "to settled class %s not " - "allowed" % - (self.name, homedef) - ) - #self.bookkeeper.warning("demoting method %s " - # "to base class %s" % - # (self.name, homedef)) + raise AnnotatorError( + "demoting method %s to settled class " + "%s not allowed" % (self.name, homedef) + ) break # check for attributes forbidden by slots or _attrs_ diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -801,8 +801,9 @@ s_init = basedesc.s_read_attribute('__init__') parent_has_init = isinstance(s_init, SomePBC) if has_init and not parent_has_init: - raise Exception("some subclasses among %r declare __init__()," - " but not the common parent class" % (descs,)) + raise AnnotatorError( + "some subclasses among %r declare __init__()," + " but not the common parent class" % (descs,)) # make a PBC of MethodDescs, one for the __init__ of each class initdescs = [] for desc, classdef in zip(descs, classdefs): diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4,10 +4,12 @@ from rpython.conftest import option from rpython.annotator import model as annmodel +from rpython.annotator.model import AnnotatorError, UnionError from rpython.annotator.annrpython import RPythonAnnotator as _RPythonAnnotator +from rpython.annotator.classdef import NoSuchAttrError from rpython.translator.translator import graphof as tgraphof from rpython.annotator.policy import AnnotatorPolicy -from rpython.annotator.signature import Sig +from rpython.annotator.signature import Sig, SignatureError from rpython.annotator.listdef import ListDef, ListChangeUnallowed from rpython.annotator.dictdef import DictDef from rpython.flowspace.model import * @@ -213,7 +215,7 @@ def f(): return X().meth() a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) def test_methodcall1(self): a = self.RPythonAnnotator() @@ -360,7 +362,7 @@ def f(l): return g(*l) a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError): + with py.test.raises(AnnotatorError): a.build_types(f, [[int]]) def test_star_unpack_and_keywords(self): @@ -769,7 +771,8 @@ def f(): return x a = self.RPythonAnnotator(policy=AnnotatorPolicy()) - py.test.raises(Exception, a.build_types, f, []) + with py.test.raises(Exception): + a.build_types(f, []) def test_exception_deduction_with_raise1(self): a = self.RPythonAnnotator() @@ -959,14 +962,16 @@ def f(): return large_constant a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, []) + with py.test.raises(ValueError): + a.build_types(f, []) # if you want to get a r_uint, you have to be explicit about it def test_add_different_ints(self): def f(a, b): return a + b a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, [r_uint, int]) + with py.test.raises(UnionError): + a.build_types(f, [r_uint, int]) def test_merge_different_ints(self): def f(a, b): @@ -976,7 +981,8 @@ c = b return c a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, [r_uint, int]) + with py.test.raises(UnionError): + a.build_types(f, [r_uint, int]) def test_merge_ruint_zero(self): def f(a): @@ -2612,14 +2618,14 @@ def f(): return A() a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) # class B(object): pass x = B() def g(): return isinstance(x, A) - py.test.raises(annmodel.AnnotatorError, a.build_types, g, []) + py.test.raises(AnnotatorError, a.build_types, g, []) def test_import_from_mixin(self): class M(object): @@ -2694,7 +2700,8 @@ return a.x # should explode here a = self.RPythonAnnotator() - e = py.test.raises(Exception, a.build_types, f, [int]) + with py.test.raises(NoSuchAttrError) as excinfo: + a.build_types(f, [int]) # this should explode on reading the attribute 'a.x', but it can # sometimes explode on 'self.x = x', which does not make much sense. # But it looks hard to fix in general: we don't know yet during 'a.x' @@ -2928,7 +2935,8 @@ s = a.build_types(fun, [s_nonneg, s_nonneg]) assert isinstance(s, annmodel.SomeInteger) assert not s.nonneg - py.test.raises(Exception, a.build_types, fun, [int, int]) + with py.test.raises(SignatureError): + a.build_types(fun, [int, int]) def test_sig_simpler(self): def fun(x, y): @@ -2940,7 +2948,8 @@ s = a.build_types(fun, [s_nonneg, s_nonneg]) assert isinstance(s, annmodel.SomeInteger) assert not s.nonneg - py.test.raises(Exception, a.build_types, fun, [int, int]) + with py.test.raises(SignatureError): + a.build_types(fun, [int, int]) def test_sig_lambda(self): def fun(x, y): @@ -2954,7 +2963,8 @@ s = a.build_types(fun, [int, s_nonneg]) assert isinstance(s, annmodel.SomeInteger) assert not s.nonneg - py.test.raises(Exception, a.build_types, fun, [s_nonneg, int]) + with py.test.raises(SignatureError): + a.build_types(fun, [s_nonneg, int]) def test_sig_bug(self): def g(x, y=5): @@ -3004,8 +3014,8 @@ if works: a.build_types(fun, [int]) else: - from rpython.annotator.classdef import NoSuchAttrError - py.test.raises(NoSuchAttrError, a.build_types, fun, [int]) + with py.test.raises(NoSuchAttrError): + a.build_types(fun, [int]) def test_slots_enforce_attrs(self): class Superbase(object): @@ -3138,7 +3148,8 @@ return a.n() a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, fun, [bool]) + with py.test.raises(AnnotatorError): + a.build_types(fun, [bool]) def test_float_cmp(self): def fun(x, y): @@ -3227,6 +3238,7 @@ assert isinstance(s.items[2], annmodel.SomeInstance) assert s.items[2].flags == {} + @py.test.mark.xfail def test_no_access_directly_on_heap(self): from rpython.rlib.jit import hint @@ -3243,7 +3255,8 @@ i.x = x a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, []) + with py.test.raises(AnnotatorError): + a.build_types(f, []) class M: @@ -3267,7 +3280,7 @@ c.m.l.append(x) a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) def f(): x = A() @@ -3275,7 +3288,7 @@ c.m.d[None] = x a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) def f(): x = A() @@ -3283,7 +3296,7 @@ c.m.d[x] = None a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) def test_ctr_location(self): class A: @@ -3342,7 +3355,8 @@ if g(x, y): g(x, r_uint(y)) a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, [int, int]) + with py.test.raises(UnionError): + a.build_types(f, [int, int]) def test_compare_with_zero(self): def g(): @@ -3464,22 +3478,22 @@ return '%s' % unichr(x) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + py.test.raises(AnnotatorError, a.build_types, f, [int]) def f(x): return '%s' % (unichr(x) * 3) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + py.test.raises(AnnotatorError, a.build_types, f, [int]) def f(x): return '%s%s' % (1, unichr(x)) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + py.test.raises(AnnotatorError, a.build_types, f, [int]) def f(x): return '%s%s' % (1, unichr(x) * 15) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + py.test.raises(AnnotatorError, a.build_types, f, [int]) def test_strformatting_tuple(self): @@ -3517,7 +3531,7 @@ return [1, 2, 3][s:e] a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") + py.test.raises(AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) def f(x): @@ -3530,20 +3544,20 @@ return "xyz".find("x", s, e) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") + py.test.raises(AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) def f(s, e): return "xyz".rfind("x", s, e) - py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") + py.test.raises(AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) def f(s, e): return "xyz".count("x", s, e) - py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") + py.test.raises(AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) @@ -3717,7 +3731,8 @@ raise Exception(lle) # ^^^ instead, must cast back from a base ptr to an instance a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + with py.test.raises(AssertionError): + a.build_types(f, []) def test_enumerate(self): def f(): @@ -4102,7 +4117,8 @@ e = cls() e.foo = "bar" a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, fn, []) + with py.test.raises(NoSuchAttrError): + a.build_types(fn, []) def test_lower_char(self): def fn(c): @@ -4214,7 +4230,7 @@ return "bbb" a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) the_exc = exc.value @@ -4230,7 +4246,7 @@ return (1, 2) a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) assert "RPython cannot unify tuples of different length: 2 versus 1" in exc.value.msg @@ -4243,7 +4259,7 @@ return -1 a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) assert ("RPython cannot prove that these integers are of the " @@ -4260,7 +4276,7 @@ return B() a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) assert ("RPython cannot unify instances with no common base class" @@ -4276,7 +4292,7 @@ return d.itervalues() a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) assert ("RPython cannot unify incompatible iterator variants" in @@ -4288,7 +4304,7 @@ a = A() return getattr(a, y) a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError) as exc: + with py.test.raises(AnnotatorError) as exc: a.build_types(f, [str]) assert ("variable argument to getattr" in exc.value.msg) @@ -4296,7 +4312,7 @@ def f(x): return x() a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError) as exc: + with py.test.raises(AnnotatorError) as exc: a.build_types(f, [str]) assert ("Cannot prove that the object is callable" in exc.value.msg) @@ -4305,7 +4321,7 @@ def f(x): l.append(x) a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as excinfo: + with py.test.raises(UnionError) as excinfo: a.build_types(f, [int]) assert 'Happened at file' in excinfo.value.source assert 'Known variable annotations:' in excinfo.value.source @@ -4314,7 +4330,7 @@ def f(s, x): return s.format(x) a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError) as exc: + with py.test.raises(AnnotatorError) as exc: a.build_types(f, [str, str]) assert ("format() is not RPython" in exc.value.msg) @@ -4350,7 +4366,7 @@ def f(x): a, b = x a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, + py.test.raises(AnnotatorError, a.build_types, f, [annmodel.s_None]) def test_class___name__(self): @@ -4464,10 +4480,10 @@ o = O2(n) o.x = 20 a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f1, [int]) a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f2, [int]) def test_property_union_2(self): @@ -4496,7 +4512,7 @@ a = self.RPythonAnnotator() # Ideally, this should translate to something sensible, # but for now, AnnotatorError is better than silently mistranslating. - with py.test.raises(annmodel.AnnotatorError): + with py.test.raises(AnnotatorError): a.build_types(f, [int]) def test_property_union_3(self): @@ -4516,7 +4532,7 @@ obj = B() return obj.x a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError): + with py.test.raises(AnnotatorError): a.build_types(f, [int]) def test_dict_can_be_none_ordering_issue(self): diff --git a/rpython/annotator/test/test_annsimplifyrpython.py b/rpython/annotator/test/test_annsimplifyrpython.py --- a/rpython/annotator/test/test_annsimplifyrpython.py +++ b/rpython/annotator/test/test_annsimplifyrpython.py @@ -3,6 +3,7 @@ from rpython.annotator.test.test_annrpython import graphof from rpython.annotator.test.test_annrpython import TestAnnotateTestCase as parent +from rpython.annotator.model import AnnotatorError class TestAnnotateAndSimplifyTestCase(parent): @@ -132,5 +133,5 @@ cls = C return cls().foo a = self.RPythonAnnotator() - with py.test.raises(Exception): + with py.test.raises(AnnotatorError): a.build_types(f, [int]) diff --git a/rpython/rlib/rstrategies/rstrategies.py b/rpython/rlib/rstrategies/rstrategies.py --- a/rpython/rlib/rstrategies/rstrategies.py +++ b/rpython/rlib/rstrategies/rstrategies.py @@ -41,7 +41,7 @@ attrs['get_storage'] = get_storage attrs['set_storage'] = set_storage return type.__new__(self, name, bases, attrs) - + def strategy(generalize=None, singleton=True): """ Strategy classes must be decorated with this. @@ -71,19 +71,19 @@ class StrategyFactory(object): _immutable_fields_ = ["strategies[*]", "logger", "strategy_singleton_field"] factory_instance_counter = 0 - + def __init__(self, root_class, all_strategy_classes=None): if all_strategy_classes is None: all_strategy_classes = self._collect_subclasses(root_class) self.strategies = [] self.logger = logger.Logger() - + # This is to avoid confusion between multiple factories existing simultaneously (e.g. in tests) self.strategy_singleton_field = "__singleton_%i" % StrategyFactory.factory_instance_counter StrategyFactory.factory_instance_counter += 1 - + self._create_strategy_instances(root_class, all_strategy_classes) - + def _create_strategy_instances(self, root_class, all_strategy_classes): for strategy_class in all_strategy_classes: if strategy_class._is_strategy: @@ -91,11 +91,11 @@ self.strategies.append(strategy_class) self._patch_strategy_class(strategy_class, root_class) self._order_strategies() - + # ============================= # API methods # ============================= - + def switch_strategy(self, w_self, new_strategy_type, new_element=None): """ Switch the strategy of w_self to the new type. @@ -113,7 +113,7 @@ new_strategy.strategy_switched(w_self) self.log(w_self, new_strategy, old_strategy, new_element) return new_strategy - + def set_initial_strategy(self, w_self, strategy_type, size, elements=None): """ Initialize the strategy and storage fields of w_self. @@ -135,7 +135,7 @@ strategy.strategy_switched(w_self) self.log(w_self, strategy, None, element) return strategy - + @jit.unroll_safe def strategy_type_for(self, objects): """ @@ -153,8 +153,8 @@ for i, strategy_type in enumerate(self.strategies): if can_handle[i]: return strategy_type - raise Exception("Could not find strategy to handle: %s" % objects) - + raise ValueError("Could not find strategy to handle: %s" % objects) + def decorate_strategies(self, transitions): """ As an alternative to decorating all strategies with @strategy, @@ -165,11 +165,11 @@ "NOT_RPYTHON" for strategy_class, generalized in transitions.items(): strategy(generalized)(strategy_class) - + # ============================= # The following methods can be overwritten to customize certain aspects of the factory. # ============================= - + def instantiate_strategy(self, strategy_type, w_self=None, initial_size=0): """ Return a functional instance of strategy_type. @@ -177,7 +177,7 @@ The two additional parameters should be ignored for singleton-strategies. """ return strategy_type() - + def log(self, w_self, new_strategy, old_strategy=None, new_element=None): """ This can be overwritten into a more appropriate call to self.logger.log @@ -190,7 +190,7 @@ typename = "" cause = "Switched" if old_strategy else "Created" self.logger.log(new_strategy_str, size, cause, old_strategy_str, typename, element_typename) - + @specialize.call_location() def log_string_for_object(self, obj): """ @@ -198,8 +198,8 @@ Keep the specialize-annotation in order to handle different kinds of objects here. """ return obj.__class__.__name__ if obj else "" - - # These storage accessors are specialized because the storage field is + + # These storage accessors are specialized because the storage field is # populated by erased-objects which seem to be incompatible sometimes. @specialize.call_location() def get_storage(self, obj): @@ -207,16 +207,16 @@ @specialize.call_location() def set_storage(self, obj, val): return obj._set_storage(val) - + def get_strategy(self, obj): return obj._get_strategy() def set_strategy(self, obj, val): return obj._set_strategy(val) - + # ============================= # Internal methods # ============================= - + def _patch_strategy_class(self, strategy_class, root_class): "NOT_RPYTHON" # Patch root class: Add default handler for visitor @@ -225,12 +225,12 @@ funcname = "_convert_storage_from_" + strategy_class.__name__ _convert_storage_from_OTHER.func_name = funcname setattr(root_class, funcname, _convert_storage_from_OTHER) - + # Patch strategy class: Add polymorphic visitor function def _convert_storage_to(self, w_self, new_strategy): getattr(new_strategy, funcname)(w_self, self) strategy_class._convert_storage_to = _convert_storage_to - + def _collect_subclasses(self, cls): "NOT_RPYTHON" subclasses = [] @@ -238,7 +238,7 @@ subclasses.append(subcls) subclasses.extend(self._collect_subclasses(subcls)) return subclasses - + def _order_strategies(self): "NOT_RPYTHON" def get_generalization_depth(strategy, visited=None): @@ -256,11 +256,11 @@ else: return 0 self.strategies.sort(key=get_generalization_depth, reverse=True) - + @jit.elidable def strategy_singleton_instance(self, strategy_class): return getattr(strategy_class, self.strategy_singleton_field) - + def _freeze_(self): # Instance will be frozen at compile time, making accesses constant. # The constructor does meta stuff which is not possible after translation. @@ -271,65 +271,65 @@ == Required: strategy_factory(self) - Access to StorageFactory """ - + def strategy_switched(self, w_self): # Overwrite this method for a hook whenever the strategy # of w_self was switched to self. pass - + # Main Fixedsize API - + def store(self, w_self, index0, value): raise NotImplementedError("Abstract method") - + def fetch(self, w_self, index0): raise NotImplementedError("Abstract method") - + def size(self, w_self): raise NotImplementedError("Abstract method") - + # Fixedsize utility methods - + def slice(self, w_self, start, end): return [ self.fetch(w_self, i) for i in range(start, end)] - + def fetch_all(self, w_self): return self.slice(w_self, 0, self.size(w_self)) - + def store_all(self, w_self, elements): for i, e in enumerate(elements): self.store(w_self, i, e) - + # Main Varsize API - + def insert(self, w_self, index0, list_w): raise NotImplementedError("Abstract method") - + def delete(self, w_self, start, end): raise NotImplementedError("Abstract method") - + # Varsize utility methods - + def append(self, w_self, list_w): - self.insert(w_self, self.size(w_self), list_w) - + self.insert(w_self, self.size(w_self), list_w) + def pop(self, w_self, index0): e = self.fetch(w_self, index0) self.delete(w_self, index0, index0+1) return e # Internal methods - + def _initialize_storage(self, w_self, initial_size): raise NotImplementedError("Abstract method") - + def _check_can_handle(self, value): raise NotImplementedError("Abstract method") - + def _convert_storage_to(self, w_self, new_strategy): # This will be overwritten in _patch_strategy_class new_strategy._convert_storage_from(w_self, self) - + @jit.unroll_safe def _convert_storage_from(self, w_self, previous_strategy): # This is a very unefficient (but most generic) way to do this. @@ -338,16 +338,16 @@ self._initialize_storage(w_self, previous_strategy.size(w_self)) for i, field in enumerate(storage): self.store(w_self, i, field) - + def _generalize_for_value(self, w_self, value): strategy_type = self.generalized_strategy_for(value) new_instance = self.strategy_factory().switch_strategy(w_self, strategy_type, new_element=value) return new_instance - + def _cannot_handle_store(self, w_self, index0, value): new_instance = self._generalize_for_value(w_self, value) new_instance.store(w_self, index0, value) - + def _cannot_handle_insert(self, w_self, index0, list_w): # TODO - optimize. Prevent multiple generalizations and slicing done by callers. new_strategy = self._generalize_for_value(w_self, list_w[0]) @@ -358,7 +358,7 @@ class EmptyStrategy(AbstractStrategy): # == Required: # See AbstractStrategy - + def _initialize_storage(self, w_self, initial_size): assert initial_size == 0 self.set_storage(w_self, None) @@ -366,7 +366,7 @@ self.set_storage(w_self, None) def _check_can_handle(self, value): return False - + def fetch(self, w_self, index0): raise IndexError def store(self, w_self, index0, value): @@ -389,7 +389,7 @@ # See AbstractStrategy # check_index_*(...) - use mixin SafeIndexingMixin or UnsafeIndexingMixin # value(self) - the single value contained in this strategy. Should be constant. - + def _initialize_storage(self, w_self, initial_size): storage_obj = SingleValueStrategyStorage(initial_size) self.set_storage(w_self, storage_obj) @@ -397,7 +397,7 @@ self._initialize_storage(w_self, previous_strategy.size(w_self)) def _check_can_handle(self, value): return value is self.value() - + def fetch(self, w_self, index0): self.check_index_fetch(w_self, index0) return self.value() @@ -411,7 +411,7 @@ self.get_storage(w_self).size -= (end - start) def size(self, w_self): return self.get_storage(w_self).size - + @jit.unroll_safe def insert(self, w_self, index0, list_w): storage_obj = self.get_storage(w_self) @@ -429,18 +429,18 @@ # See AbstractStrategy # check_index_*(...) - use mixin SafeIndexingMixin or UnsafeIndexingMixin # default_value(self) - The value to be initially contained in this strategy - + def _initialize_storage(self, w_self, initial_size): default = self._unwrap(self.default_value()) self.set_storage(w_self, [default] * initial_size) - + @jit.unroll_safe def _convert_storage_from(self, w_self, previous_strategy): size = previous_strategy.size(w_self) new_storage = [ self._unwrap(previous_strategy.fetch(w_self, i)) for i in range(size) ] self.set_storage(w_self, new_storage) - + def store(self, w_self, index0, wrapped_value): self.check_index_store(w_self, index0) if self._check_can_handle(wrapped_value): @@ -448,21 +448,21 @@ self.get_storage(w_self)[index0] = unwrapped else: self._cannot_handle_store(w_self, index0, wrapped_value) - + def fetch(self, w_self, index0): self.check_index_fetch(w_self, index0) unwrapped = self.get_storage(w_self)[index0] return self._wrap(unwrapped) - + def _wrap(self, value): raise NotImplementedError("Abstract method") - + def _unwrap(self, value): raise NotImplementedError("Abstract method") - + def size(self, w_self): return len(self.get_storage(w_self)) - + @jit.unroll_safe def insert(self, w_self, start, list_w): # This is following Python's behaviour - insert automatically @@ -475,27 +475,27 @@ else: self._cannot_handle_insert(w_self, start + i, list_w[i:]) return - + def delete(self, w_self, start, end): self.check_index_range(w_self, start, end) assert start >= 0 and end >= 0 del self.get_storage(w_self)[start : end] - + class GenericStrategy(StrategyWithStorage): # == Required: # See StrategyWithStorage - + def _wrap(self, value): return value def _unwrap(self, value): return value def _check_can_handle(self, wrapped_value): return True - + class WeakGenericStrategy(StrategyWithStorage): # == Required: # See StrategyWithStorage - + def _wrap(self, value): return value() or self.default_value() def _unwrap(self, value): @@ -503,7 +503,7 @@ return weakref.ref(value) def _check_can_handle(self, wrapped_value): return True - + # ============== Mixins for index checking operations ============== class SafeIndexingMixin(object): @@ -535,37 +535,37 @@ # See StrategyWithStorage # wrap(self, value) - Return a boxed object for the primitive value # unwrap(self, value) - Return the unboxed primitive value of value - + def _unwrap(self, value): return self.unwrap(value) def _wrap(self, value): return self.wrap(value) - + class SingleTypeStrategy(SpecializedStrategy): # == Required Functions: # See SpecializedStrategy # contained_type - The wrapped type that can be stored in this strategy - + def _check_can_handle(self, value): return isinstance(value, self.contained_type) - + class TaggingStrategy(SingleTypeStrategy): """This strategy uses a special tag value to represent a single additional object.""" # == Required: # See SingleTypeStrategy # wrapped_tagged_value(self) - The tagged object # unwrapped_tagged_value(self) - The unwrapped tag value representing the tagged object - + def _check_can_handle(self, value): return value is self.wrapped_tagged_value() or \ (isinstance(value, self.contained_type) and \ self.unwrap(value) != self.unwrapped_tagged_value()) - + def _unwrap(self, value): if value is self.wrapped_tagged_value(): return self.unwrapped_tagged_value() return self.unwrap(value) - + def _wrap(self, value): if value == self.unwrapped_tagged_value(): return self.wrapped_tagged_value() diff --git a/rpython/rlib/rstrategies/test/test_rstrategies.py b/rpython/rlib/rstrategies/test/test_rstrategies.py --- a/rpython/rlib/rstrategies/test/test_rstrategies.py +++ b/rpython/rlib/rstrategies/test/test_rstrategies.py @@ -69,7 +69,7 @@ class Factory(rs.StrategyFactory): switching_log = [] - + def __init__(self, root_class): self.decorate_strategies({ EmptyStrategy: [NilStrategy, IntegerStrategy, IntegerOrNilStrategy, GenericStrategy], @@ -79,15 +79,15 @@ IntegerOrNilStrategy: [GenericStrategy], }) rs.StrategyFactory.__init__(self, root_class) - + def instantiate_strategy(self, strategy_type, w_self=None, size=0): return strategy_type(self, w_self, size) - - def set_strategy(self, w_list, strategy): + + def set_strategy(self, w_list, strategy): old_strategy = self.get_strategy(w_list) self.switching_log.append((old_strategy, strategy)) super(Factory, self).set_strategy(w_list, strategy) - + def clear_log(self): del self.switching_log[:] @@ -107,7 +107,7 @@ class WeakGenericStrategy(AbstractStrategy): import_from_mixin(rs.WeakGenericStrategy) def default_value(self): return w_nil - + class IntegerStrategy(AbstractStrategy): import_from_mixin(rs.SingleTypeStrategy) contained_type = W_Integer @@ -123,7 +123,7 @@ def default_value(self): return w_nil def wrapped_tagged_value(self): return w_nil def unwrapped_tagged_value(self): import sys; return sys.maxint - + @rs.strategy(generalize=[], singleton=False) class NonSingletonStrategy(GenericStrategy): def __init__(self, factory, w_list=None, size=0): @@ -214,22 +214,22 @@ py.test.raises(IndexError, s.fetch, l, 10) py.test.raises(IndexError, s.delete, l, 0, 1) py.test.raises(AssertionError, W_List, EmptyStrategy, 2) # Only size 0 possible. - + def test_init_Nil(): do_test_initialization(NilStrategy) def test_init_Generic(): do_test_initialization(GenericStrategy, is_safe=False) - + def test_init_WeakGeneric(): do_test_initialization(WeakGenericStrategy) - + def test_init_Integer(): do_test_initialization(IntegerStrategy, default_value=W_Integer(0)) - + def test_init_IntegerOrNil(): do_test_initialization(IntegerOrNilStrategy) - + # === Test Simple store def do_test_store(cls, stored_value=W_Object(), is_safe=True, is_varsize=False): @@ -256,13 +256,13 @@ def test_store_Generic(): do_test_store(GenericStrategy, is_safe=False) - + def test_store_WeakGeneric(): do_test_store(WeakGenericStrategy, stored_value=w_nil) - + def test_store_Integer(): do_test_store(IntegerStrategy, stored_value=W_Integer(100)) - + def test_store_IntegerOrNil(): do_test_store(IntegerOrNilStrategy, stored_value=W_Integer(100)) do_test_store(IntegerOrNilStrategy, stored_value=w_nil) @@ -289,17 +289,17 @@ def test_insert_Generic(): do_test_insert(GenericStrategy, [W_Object() for _ in range(6)]) - + def test_insert_WeakGeneric(): do_test_insert(WeakGenericStrategy, [W_Object() for _ in range(6)]) - + def test_insert_Integer(): do_test_insert(IntegerStrategy, [W_Integer(x) for x in range(6)]) - + def test_insert_IntegerOrNil(): do_test_insert(IntegerOrNilStrategy, [w_nil]+[W_Integer(x) for x in range(4)]+[w_nil]) do_test_insert(IntegerOrNilStrategy, [w_nil]*6) - + # === Test Delete def do_test_delete(cls, values, indexing_unsafe=False): @@ -319,13 +319,13 @@ def test_delete_Generic(): do_test_delete(GenericStrategy, [W_Object() for _ in range(6)], indexing_unsafe=True) - + def test_delete_WeakGeneric(): do_test_delete(WeakGenericStrategy, [W_Object() for _ in range(6)]) - + def test_delete_Integer(): do_test_delete(IntegerStrategy, [W_Integer(x) for x in range(6)]) - + def test_delete_IntegerOrNil(): do_test_delete(IntegerOrNilStrategy, [w_nil]+[W_Integer(x) for x in range(4)]+[w_nil]) do_test_delete(IntegerOrNilStrategy, [w_nil]*6) @@ -342,7 +342,7 @@ obj = W_Object() i = W_Integer(0) nil = w_nil - + assert_handles(EmptyStrategy, [], [nil, obj, i]) assert_handles(NilStrategy, [nil], [obj, i]) assert_handles(GenericStrategy, [nil, obj, i], []) @@ -392,7 +392,7 @@ o = W_Object() l = do_test_insert(NilStrategy, [w_nil, w_nil, o, o, w_nil, w_nil]) assert isinstance(l.strategy, GenericStrategy) - + def test_transition_to_nonSingleton(): l = W_List(NilStrategy, 5) factory.switch_strategy(l, NonSingletonStrategy) @@ -467,12 +467,12 @@ v3 = [W_Object() for _ in range(l.size()) ] assert v2 != v assert v3 != v - + l.store_all(v2) assert l.fetch_all() == v2+v[4:] l.store_all(v3) assert l.fetch_all() == v3 - + py.test.raises(IndexError, l.store_all, [W_Object() for _ in range(8) ]) # === Test Weak Strategy @@ -488,7 +488,7 @@ assert False, "The default convert_storage_from() should not be called!" def convert_storage_from_special(self, w_self, other): s.copied += 1 - + monkeypatch.setattr(AbstractStrategy, "_convert_storage_from_NilStrategy", convert_storage_from_special) monkeypatch.setattr(AbstractStrategy, "_convert_storage_from", convert_storage_from_default) try: @@ -507,7 +507,8 @@ assert factory.strategy_type_for([]) == EmptyStrategy monkeypatch.setattr(GenericStrategy, '_check_can_handle', lambda self, o: False) try: - py.test.raises(Exception, factory.strategy_type_for, [W_Object(), W_Object()]) + with py.test.raises(ValueError): + factory.strategy_type_for([W_Object(), W_Object()]) finally: monkeypatch.undo() @@ -549,4 +550,3 @@ 'Created (EmptyStrategy) size 0 objects 1', 'Created (IntegerStrategy) size 3 objects 1', 'Switched (IntegerStrategy -> IntegerOrNilStrategy) size 3 objects 1 elements: W_Object'] - \ No newline at end of file diff --git a/rpython/rlib/rweakref.py b/rpython/rlib/rweakref.py --- a/rpython/rlib/rweakref.py +++ b/rpython/rlib/rweakref.py @@ -2,10 +2,10 @@ Weakref support in RPython. Basic regular weakrefs without callbacks are supported. This file contains the following additions: a form of WeakKeyDictionary, and a limited version of WeakValueDictionary. -LLType only for now! """ import weakref +from rpython.annotator.model import UnionError ref = weakref.ref # basic regular weakrefs are supported in RPython @@ -191,9 +191,9 @@ class __extend__(pairtype(SomeWeakKeyDict, SomeWeakKeyDict)): def union((s_wkd1, s_wkd2)): if s_wkd1.keyclassdef is not s_wkd2.keyclassdef: - raise UnionError(w_wkd1, s_wkd2, "not the same key class!") + raise UnionError(s_wkd1, s_wkd2, "not the same key class!") if s_wkd1.valueclassdef is not s_wkd2.valueclassdef: - raise UnionError(w_wkd1, s_wkd2, "not the same value class!") + raise UnionError(s_wkd1, s_wkd2, "not the same value class!") return SomeWeakKeyDict(s_wkd1.keyclassdef, s_wkd1.valueclassdef) class Entry(extregistry.ExtRegistryEntry): diff --git a/rpython/rlib/test/test_rweakkeydict.py b/rpython/rlib/test/test_rweakkeydict.py --- a/rpython/rlib/test/test_rweakkeydict.py +++ b/rpython/rlib/test/test_rweakkeydict.py @@ -1,4 +1,5 @@ import py +from rpython.annotator.model import UnionError from rpython.rlib import rgc from rpython.rlib.rweakref import RWeakKeyDictionary from rpython.rtyper.test.test_llinterp import interpret @@ -120,25 +121,34 @@ f(1) interpret(f, [1]) + at py.test.mark.xfail( + reason="may fail with AssertionError, depending on annotation order") +def test_rpython_merge_RWeakKeyDictionary3(): def g(x): if x: d = RWeakKeyDictionary(KX, VX) else: d = RWeakKeyDictionary(KY, VX) d.set(KX(), VX()) - py.test.raises(Exception, interpret, g, [1]) + with py.test.raises(UnionError): + interpret(g, [1]) + + at py.test.mark.xfail( + reason="may fail with AssertionError, depending on annotation order") +def test_rpython_merge_RWeakKeyDictionary4(): def g(x): if x: d = RWeakKeyDictionary(KX, VX) else: d = RWeakKeyDictionary(KX, VY) d.set(KX(), VX()) - py.test.raises(Exception, interpret, g, [1]) + with py.test.raises(UnionError): + interpret(g, [1]) + at py.test.mark.xfail(reason="not implemented, messy") def test_rpython_free_values(): - import py; py.test.skip("XXX not implemented, messy") class VXDel: def __del__(self): state.freed.append(1) diff --git a/rpython/rlib/test/test_rweakvaldict.py b/rpython/rlib/test/test_rweakvaldict.py --- a/rpython/rlib/test/test_rweakvaldict.py +++ b/rpython/rlib/test/test_rweakvaldict.py @@ -1,4 +1,5 @@ import py +from rpython.annotator.model import UnionError from rpython.rlib import rgc from rpython.rlib.rweakref import RWeakValueDictionary from rpython.rtyper.test.test_llinterp import interpret @@ -143,7 +144,9 @@ else: d = RWeakValueDictionary(str, Y) d.set("x", X()) - py.test.raises(Exception, interpret, g, [1]) + + with py.test.raises(UnionError): + interpret(g, [1]) def test_rpython_RWeakValueDictionary_or_None(): diff --git a/rpython/rtyper/extfunc.py b/rpython/rtyper/extfunc.py --- a/rpython/rtyper/extfunc.py +++ b/rpython/rtyper/extfunc.py @@ -1,8 +1,8 @@ from rpython.rtyper import extregistry from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.lltypesystem.lltype import typeOf, FuncType, functionptr -from rpython.annotator import model as annmodel -from rpython.annotator.signature import annotation +from rpython.annotator.model import unionof +from rpython.annotator.signature import annotation, SignatureError import py, sys @@ -130,7 +130,7 @@ "Argument number mismatch" for i, expected in enumerate(signature_args): - arg = annmodel.unionof(args_s[i], expected) + arg = unionof(args_s[i], expected) if not expected.contains(arg): name = getattr(self, 'name', None) if not name: @@ -138,7 +138,7 @@ name = self.instance.__name__ except AttributeError: name = '?' - raise Exception("In call to external function %r:\n" + raise SignatureError("In call to external function %r:\n" "arg %d must be %s,\n" " got %s" % ( name, i+1, expected, args_s[i])) diff --git a/rpython/rtyper/test/test_extfunc.py b/rpython/rtyper/test/test_extfunc.py --- a/rpython/rtyper/test/test_extfunc.py +++ b/rpython/rtyper/test/test_extfunc.py @@ -2,9 +2,10 @@ from rpython.rtyper.extfunc import ExtFuncEntry, register_external,\ is_external, lazy_register -from rpython.annotator import model as annmodel +from rpython.annotator.model import SomeInteger, SomeString, AnnotatorError from rpython.annotator.annrpython import RPythonAnnotator from rpython.annotator.policy import AnnotatorPolicy +from rpython.annotator.signature import SignatureError from rpython.rtyper.test.test_llinterp import interpret class TestExtFuncEntry: @@ -21,8 +22,8 @@ class BTestFuncEntry(ExtFuncEntry): _about_ = b name = 'b' - signature_args = [annmodel.SomeInteger()] - signature_result = annmodel.SomeInteger() + signature_args = [SomeInteger()] + signature_result = SomeInteger() def f(): return b(2) @@ -30,7 +31,7 @@ policy = AnnotatorPolicy() a = RPythonAnnotator(policy=policy) s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeInteger) + assert isinstance(s, SomeInteger) res = interpret(f, []) assert res == 42 @@ -45,8 +46,8 @@ class CTestFuncEntry(ExtFuncEntry): _about_ = c name = 'ccc' - signature_args = [annmodel.SomeInteger()] * 2 - signature_result = annmodel.SomeInteger() + signature_args = [SomeInteger()] * 2 + signature_result = SomeInteger() def lltypeimpl(y, x): return y + x @@ -72,7 +73,7 @@ policy = AnnotatorPolicy() a = RPythonAnnotator(policy=policy) s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeInteger) + assert isinstance(s, SomeInteger) def test_register_external_tuple_args(self): """ @@ -96,7 +97,7 @@ s = a.build_types(f, []) # Not a very good assertion, but at least it means _something_ happened. - assert isinstance(s, annmodel.SomeInteger) + assert isinstance(s, SomeInteger) def test_register_external_return_goes_back(self): """ @@ -118,7 +119,7 @@ policy = AnnotatorPolicy() a = RPythonAnnotator(policy=policy) s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeInteger) + assert isinstance(s, SomeInteger) def test_register_external_specialcase(self): """ @@ -135,10 +136,10 @@ policy = AnnotatorPolicy() a = RPythonAnnotator(policy=policy) s = a.build_types(f, []) - assert isinstance(s, annmodel.SomeString) + assert isinstance(s, SomeString) def test_str0(self): - str0 = annmodel.SomeString(no_nul=True) + str0 = SomeString(no_nul=True) def os_open(s): pass register_external(os_open, [str0], None) @@ -152,25 +153,32 @@ a.translator.config.translation.check_str_without_nul=True def g(s): return os_open(s) - py.test.raises(Exception, a.build_types, g, [str]) + with py.test.raises(SignatureError): + a.build_types(g, [str]) a.build_types(g, [str0]) # Does not raise - def test_list_of_str0(self): - str0 = annmodel.SomeString(no_nul=True) + def test_list_of_str0_unchecked(self): + str0 = SomeString(no_nul=True) + def os_execve(l): pass + From noreply at buildbot.pypy.org Wed Oct 14 19:01:46 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Wed, 14 Oct 2015 19:01:46 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: extending to the different opcode formats, now supporting agr the 64 bit version of signed integer add! Message-ID: <20151014170146.716811C120E@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80214:b21ba89abdb2 Date: 2015-10-14 19:01 +0200 http://bitbucket.org/pypy/pypy/changeset/b21ba89abdb2/ Log: extending to the different opcode formats, now supporting agr the 64 bit version of signed integer add! diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -24,13 +24,41 @@ self.BL(addr, c) return f +class Operand(object): + pass -codes = { - 'ADD_rr': 0x1A, +def build_rr(mnemonic, args): + opcode = args[0] + assert isinstance(opcode, str) + def encode_rr(self, reg1, reg2): + self.writechar(opcode) + operands = ((reg2 & 0x0f) << 4) | (reg1 & 0xf) + self.writechar(chr(operands)) + return encode_rr + +def build_rre(mnemonic, args): + opcode1,opcode2 = args[0] + assert isinstance(opcode1, str) + assert isinstance(opcode2, str) + def encode_rr(self, reg1, reg2): + self.writechar(opcode1) + self.writechar(opcode2) + self.writechar('\x00') + #self.writechar('\x00') + operands = ((reg2 & 0x0f) << 4) | (reg1 & 0xf) + self.writechar(chr(operands)) + return encode_rr + +_mnemonic_codes = { + 'AR': (build_rr, ['\x1A']), + 'AGR': (build_rre, ['\xB9\x08']) } -def encode_rr(reg1, reg2): - return chr(((reg2 & 0x0f) << 4) | (reg1 & 0xf)) +def build_instr_codes(clazz): + for mnemonic, (builder, args) in _mnemonic_codes.items(): + func = builder(mnemonic, args) + name = mnemonic + "_" + builder.__name__.split("_")[1] + setattr(clazz, name, func) class AbstractZARCHBuilder(object): def write32(self, word): @@ -43,6 +71,9 @@ self.writechar(chr(0x1A)) self.writechar(encode_rr(reg1, reg2)) +build_instr_codes(AbstractZARCHBuilder) + + class InstrBuilder(BlockBuilderMixin, AbstractZARCHBuilder): def __init__(self): diff --git a/rpython/jit/backend/zarch/test/test_auto_encoding.py b/rpython/jit/backend/zarch/test/test_auto_encoding.py --- a/rpython/jit/backend/zarch/test/test_auto_encoding.py +++ b/rpython/jit/backend/zarch/test/test_auto_encoding.py @@ -4,6 +4,7 @@ from rpython.jit.backend.zarch import codebuilder from rpython.rlib.rarithmetic import intmask from rpython.tool.udir import udir +import itertools INPUTNAME = 'checkfile_%s.s' FILENAME = 'checkfile_%s.o' @@ -26,8 +27,8 @@ and self.index == self.instrindex): return # ignore the extra character '\x40' print self.op - print "\x09from codebuilder.py:", hexdump(self.expected[self.instrindex:self.index] + char)+"..." - print "\x09from 'as': ", hexdump(self.expected[self.instrindex:self.index+15])+"..." + print "\x09from codebuilder.py: ", hexdump(self.expected[self.instrindex:self.index] + char)+"..." + print "\x09from 'as': ", hexdump(self.expected[self.instrindex:self.index+15])+"..." raise Exception("Differs") self.index += 1 @@ -113,6 +114,7 @@ def get_all_tests(self): return { 'r': self.reg_tests, + 'e': lambda: [], } def assembler_operand_reg(self, regnum): @@ -207,38 +209,20 @@ return oplist, as_code def make_all_tests(self, methname, modes, args=[]): - if modes: - tests = self.get_all_tests() - m = modes[0] - lst = tests[m]() - random.shuffle(lst) - if methname == 'PSRAD_xi' and m == 'i': - lst = [x for x in lst if 0 <= x <= 31] - result = [] - for v in lst: - result += self.make_all_tests(methname, modes[1:], args+[v]) - return result - else: - # special cases - if methname in ('ADD_ri', 'AND_ri', 'CMP_ri', 'OR_ri', - 'SUB_ri', 'XOR_ri', 'SBB_ri'): - if args[0] == rx86.R.eax: - return [] # ADD EAX, constant: there is a special encoding - if methname in ('CMP8_ri',): - if args[0] == rx86.R.al: - return [] # CMP AL, constant: there is a special encoding - if methname == 'XCHG_rr' and rx86.R.eax in args: - return [] # special encoding - if methname == 'MOV_rj' and args[0] == rx86.R.eax: - return [] # MOV EAX, [immediate]: there is a special encoding - if methname == 'MOV_jr' and args[1] == rx86.R.eax: - return [] # MOV [immediate], EAX: there is a special encoding - if methname == 'MOV8_rj' and args[0] == rx86.R.al: - return [] # MOV AL, [immediate]: there is a special encoding - if methname == 'MOV8_jr' and args[1] == rx86.R.al: - return [] # MOV [immediate], AL: there is a special encoding - - return [args] + tests = { + 'r': self.REGS, + 'e': None, + } + combinations = [] + for m in modes: + if tests[m] is not None: + elems = tests[m] + random.shuffle(elems) + combinations.append(elems) + results = [] + for args in itertools.product(*combinations): + results.append(args) + return results def should_skip_instruction(self, instrname, argmodes): return False From noreply at buildbot.pypy.org Wed Oct 14 20:05:55 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Oct 2015 20:05:55 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Starting to rewrite cpyext, add a lot of ZZZ Message-ID: <20151014180555.4C91E1C0726@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80215:8e73db7caf49 Date: 2015-10-14 20:06 +0200 http://bitbucket.org/pypy/pypy/changeset/8e73db7caf49/ Log: Starting to rewrite cpyext, add a lot of ZZZ diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -286,75 +286,67 @@ @specialize.ll() def unwrapper(space, *args): from pypy.module.cpyext.pyobject import Py_DecRef - from pypy.module.cpyext.pyobject import make_ref, from_ref + from pypy.module.cpyext.pyobject import as_pyobj, is_pyobj from pypy.module.cpyext.pyobject import Reference newargs = () - to_decref = [] assert len(args) == len(api_function.argtypes) for i, (ARG, is_wrapped) in types_names_enum_ui: input_arg = args[i] if is_PyObject(ARG) and not is_wrapped: - # build a reference - if input_arg is None: - arg = lltype.nullptr(PyObject.TO) - elif isinstance(input_arg, W_Root): - ref = make_ref(space, input_arg) - to_decref.append(ref) - arg = rffi.cast(ARG, ref) + # build a 'PyObject *' (not holding a reference) + if not is_pyobj(input_arg): + input_arg = as_pyobj(input_arg) + arg = rffi.cast(ARG, input_arg) + elif is_PyObject(ARG) and is_wrapped: + # convert to a wrapped object + if is_pyobj(input_arg): + arg = from_ref(input_arg) else: arg = input_arg - elif is_PyObject(ARG) and is_wrapped: - # convert to a wrapped object - if input_arg is None: - arg = input_arg - elif isinstance(input_arg, W_Root): - arg = input_arg - else: - try: - arg = from_ref(space, - rffi.cast(PyObject, input_arg)) - except TypeError, e: - err = OperationError(space.w_TypeError, - space.wrap( - "could not cast arg to PyObject")) - if not catch_exception: - raise err - state = space.fromcache(State) - state.set_exception(err) - if is_PyObject(restype): - return None - else: - return api_function.error_value + + ## ZZZ: for is_pyobj: + ## try: + ## arg = from_ref(space, + ## rffi.cast(PyObject, input_arg)) + ## except TypeError, e: + ## err = OperationError(space.w_TypeError, + ## space.wrap( + ## "could not cast arg to PyObject")) + ## if not catch_exception: + ## raise err + ## state = space.fromcache(State) + ## state.set_exception(err) + ## if is_PyObject(restype): + ## return None + ## else: + ## return api_function.error_value else: - # convert to a wrapped object + # arg is not declared as PyObject, no magic arg = input_arg newargs += (arg, ) try: - try: - res = func(space, *newargs) - except OperationError, e: - if not catch_exception: - raise - if not hasattr(api_function, "error_value"): - raise - state = space.fromcache(State) - state.set_exception(e) - if is_PyObject(restype): - return None - else: - return api_function.error_value - if not we_are_translated(): - got_integer = isinstance(res, (int, long, float)) - assert got_integer == expect_integer,'got %r not integer' % res - if res is None: + res = func(space, *newargs) + except OperationError, e: + if not catch_exception: + raise + if not hasattr(api_function, "error_value"): + raise + state = space.fromcache(State) + state.set_exception(e) + if is_PyObject(restype): return None - elif isinstance(res, Reference): - return res.get_wrapped(space) else: - return res - finally: - for arg in to_decref: - Py_DecRef(space, arg) + return api_function.error_value + if not we_are_translated(): + got_integer = isinstance(res, (int, long, float)) + assert got_integer == expect_integer,'got %r not integer' % res + ZZZ # where is the logic to return PyObject?? + if res is None: + return None + elif isinstance(res, Reference): + return res.get_wrapped(space) + else: + return res unwrapper.func = func unwrapper.api_func = api_function unwrapper._always_inline_ = 'try' @@ -730,9 +722,9 @@ compilation_info=eci, _nowrapper=True) def init_types(space): from pypy.module.cpyext.typeobject import py_type_ready - py_type_ready(space, get_buffer_type()) - py_type_ready(space, get_cobject_type()) - py_type_ready(space, get_capsule_type()) + #py_type_ready(space, get_buffer_type()) ZZZ + #py_type_ready(space, get_cobject_type()) ZZZ + #py_type_ready(space, get_capsule_type()) ZZZ INIT_FUNCTIONS.append(init_types) from pypy.module.posix.interp_posix import add_fork_hook reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void, @@ -831,7 +823,8 @@ space.fromcache(State).install_dll(eci) # populate static data - for name, (typ, expr) in GLOBALS.iteritems(): + if 0: # ZZZ + for name, (typ, expr) in GLOBALS.iteritems(): from pypy.module import cpyext w_obj = eval(expr) if name.endswith('#'): diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -1,16 +1,16 @@ import sys from pypy.interpreter.baseobjspace import W_Root, SpaceCache -from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from pypy.module.cpyext.api import ( cpython_api, bootstrap_function, PyObject, PyObjectP, ADDR, - CANNOT_FAIL, Py_TPFLAGS_HEAPTYPE, PyTypeObjectPtr) + CANNOT_FAIL, Py_TPFLAGS_HEAPTYPE, PyTypeObjectPtr, is_PyObject) from pypy.module.cpyext.state import State from pypy.objspace.std.typeobject import W_TypeObject from pypy.objspace.std.objectobject import W_ObjectObject from rpython.rlib.objectmodel import specialize, we_are_translated -from rpython.rlib.rweakref import RWeakKeyDictionary from rpython.rtyper.annlowlevel import llhelper +from rpython.rlib import rawrefcount #________________________________________________________ # type description @@ -136,6 +136,7 @@ class RefcountState: def __init__(self, space): + ZZZ self.space = space self.py_objects_w2r = {} # { w_obj -> raw PyObject } self.py_objects_r2w = {} # { addr of raw PyObject -> w_obj } @@ -251,6 +252,7 @@ Allocates a PyObject, and fills its fields with info from the given intepreter object. """ + ZZZ state = space.fromcache(RefcountState) w_type = space.type(w_obj) if w_type.is_cpytype(): @@ -270,6 +272,7 @@ """ Ties together a PyObject and an interpreter object. """ + ZZZ # XXX looks like a PyObject_GC_TRACK ptr = rffi.cast(ADDR, py_obj) state = space.fromcache(RefcountState) @@ -282,12 +285,62 @@ if ptr: # init_typeobject() bootstraps with NULL references state.py_objects_r2w[ptr] = w_obj -def make_ref(space, w_obj): + +NULL_GCREF = lltype.nullptr(llmemory.GCREF.TO) + +def _create_pyobj_from_w_obj(w_obj): + # XXX temp, needs cases + ob = lltype.malloc(PyObject, flavor='raw', track_allocation=False) + ob.ob_refcnt = 0 + ob.ob_pypy_link = NULL_GCREF + rawrefcount.create_link_pypy(w_obj, ob) + return ob + + +def as_pyobj(w_obj): """ - Returns a new reference to an intepreter object. + Returns a 'PyObject *' representing the given intepreter object. + 'None' is returned as a NULL. This doesn't give a new reference, but + the returned 'PyObject *' is valid at least as long as 'w_obj' is. """ + assert is_wrapped(w_obj) if w_obj is None: return lltype.nullptr(PyObject.TO) + #if isinstance(w_obj, W_CPyExtPlaceHolderObject): + # xxx + ob = rawrefcount.from_obj(PyObject.TO, w_obj) + if not ob: + ob = _create_pyobj_from_w_obj(w_obj) + return ob +as_pyobj._always_inline_ = True + + + at specialize.ll() +def from_ref(pyobj): + assert not is_wrapped(pyobj) + if not pyobj: + return None + pyobj = rffi.cast(PyObject, pyobj) + w_obj = rawrefcount.to_obj(W_Root, pyobj) + if w_obj is None: + w_obj = _create_w_obj_from_pyobj(pyobj) + return w_obj +from_ref._always_inline_ = True + + +def is_pyobj(x): + "NOT_RPYTHON" + if x is None or isinstance(x, W_Root): + return False + else: + assert is_PyObject(lltype.typeOf(x)) + return True + +# ZZZ: use an ExtRegistryEntry to constant-fold is_pyobj() + + +def make_ref(space, w_obj): + ZZZ assert isinstance(w_obj, W_Root) state = space.fromcache(RefcountState) try: @@ -300,7 +353,7 @@ return py_obj -def from_ref(space, ref): +def ZZZ_from_ref(space, ref): """ Finds the interpreter object corresponding to the given reference. If the object is not yet realized (see stringobject.py), creates it. @@ -390,6 +443,7 @@ class PyOLifeline(object): def __init__(self, space, pyo): + ZZZ self.pyo = pyo self.space = space @@ -408,6 +462,7 @@ Create a borrowed reference, which will live as long as the container has a living reference (as a PyObject!) """ + ZZZ if w_borrowed is None: return lltype.nullptr(PyObject.TO) @@ -416,6 +471,7 @@ class Reference: def __init__(self, pyobj): + ZZZ assert not isinstance(pyobj, W_Root) self.pyobj = pyobj @@ -430,6 +486,7 @@ Delays the creation of a borrowed reference. """ def __init__(self, w_container, w_borrowed): + ZZZ self.w_container = w_container self.w_borrowed = w_borrowed diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py --- a/pypy/module/cpyext/test/test_api.py +++ b/pypy/module/cpyext/test/test_api.py @@ -49,21 +49,24 @@ return state.clear_exception() def setup_method(self, func): + return # ZZZ freeze_refcnts(self) def teardown_method(self, func): - state = self.space.fromcache(State) - try: - state.check_and_raise_exception() - except OperationError, e: - print e.errorstr(self.space) - raise + if 0: # ZZZ + state = self.space.fromcache(State) + try: + state.check_and_raise_exception() + except OperationError, e: + print e.errorstr(self.space) + raise try: del self.space.getexecutioncontext().cpyext_threadstate except AttributeError: pass + return # ZZZ if self.check_and_print_leaks(): assert False, "Test leaks or loses object(s)." diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -92,6 +92,7 @@ return str(pydname) def freeze_refcnts(self): + ZZZ state = self.space.fromcache(RefcountState) self.frozen_refcounts = {} for w_obj, obj in state.py_objects_w2r.iteritems(): diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -320,6 +320,8 @@ # - object.tp_bases is a tuple # - tuple.tp_bases is a tuple + return # ZZZ + # insert null placeholders to please create_ref() track_reference(space, lltype.nullptr(PyObject.TO), space.w_type) track_reference(space, lltype.nullptr(PyObject.TO), space.w_object) From noreply at buildbot.pypy.org Wed Oct 14 20:21:24 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Oct 2015 20:21:24 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: in-progress Message-ID: <20151014182125.098FC1C0726@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80216:35dda6474dfa Date: 2015-10-14 20:21 +0200 http://bitbucket.org/pypy/pypy/changeset/35dda6474dfa/ Log: in-progress diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -5,7 +5,7 @@ import py from pypy.conftest import pypydir -from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import ll2ctypes from rpython.rtyper.annlowlevel import llhelper @@ -278,6 +278,10 @@ raise ValueError("function %s has no return value for exceptions" % func) def make_unwrapper(catch_exception): + # ZZZ is this whole logic really needed??? It seems to be only + # for RPython code calling PyXxx() functions directly. I would + # think that usually directly calling the function is clean + # enough now names = api_function.argnames types_names_enum_ui = unrolling_iterable(enumerate( zip(api_function.argtypes, @@ -340,13 +344,7 @@ if not we_are_translated(): got_integer = isinstance(res, (int, long, float)) assert got_integer == expect_integer,'got %r not integer' % res - ZZZ # where is the logic to return PyObject?? - if res is None: - return None - elif isinstance(res, Reference): - return res.get_wrapped(space) - else: - return res + return res unwrapper.func = func unwrapper.api_func = api_function unwrapper._always_inline_ = 'try' @@ -508,7 +506,8 @@ # So we need a forward and backward mapping in our State instance PyObjectStruct = lltype.ForwardReference() PyObject = lltype.Ptr(PyObjectStruct) -PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_type", PyTypeObjectPtr)) +PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_type", PyTypeObjectPtr), + ("ob_pypy_link", llmemory.GCREF)) PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), ) cpython_struct('PyObject', PyObjectFields, PyObjectStruct) PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields) diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -18,7 +18,8 @@ #define PyObject_HEAD \ long ob_refcnt; \ - struct _typeobject *ob_type; + struct _typeobject *ob_type; \ + void *ob_pypy_link; #define PyObject_VAR_HEAD \ PyObject_HEAD \ diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -290,9 +290,10 @@ def _create_pyobj_from_w_obj(w_obj): # XXX temp, needs cases - ob = lltype.malloc(PyObject, flavor='raw', track_allocation=False) - ob.ob_refcnt = 0 - ob.ob_pypy_link = NULL_GCREF + ob = lltype.malloc(PyObject.TO, flavor='raw', track_allocation=False) + ob.c_ob_refcnt = 0 + ob.c_ob_pypy_link = NULL_GCREF + # ob.c_ob_type = ... rawrefcount.create_link_pypy(w_obj, ob) return ob @@ -303,7 +304,7 @@ 'None' is returned as a NULL. This doesn't give a new reference, but the returned 'PyObject *' is valid at least as long as 'w_obj' is. """ - assert is_wrapped(w_obj) + assert not is_pyobj(w_obj) if w_obj is None: return lltype.nullptr(PyObject.TO) #if isinstance(w_obj, W_CPyExtPlaceHolderObject): @@ -317,7 +318,7 @@ @specialize.ll() def from_ref(pyobj): - assert not is_wrapped(pyobj) + assert is_pyobj(pyobj) if not pyobj: return None pyobj = rffi.cast(PyObject, pyobj) diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -20,9 +20,9 @@ def create_link_pypy(p, ob): "NOT_RPYTHON: a link where the PyPy object contains all the data" assert not hasattr(p, '__rawrefcount') - assert not ob.ob_pypy_link - ob.ob_pypy_link = rgc.cast_instance_to_gcref(p) - ob.ob_refcnt += REFCNT_FROM_PYPY_OBJECT + assert not ob.c_ob_pypy_link + ob.c_ob_pypy_link = rgc.cast_instance_to_gcref(p) + ob.c_ob_refcnt += REFCNT_FROM_PYPY_OBJECT p.__rawrefcount = ob _p_list.append(ob) @@ -30,9 +30,9 @@ """NOT_RPYTHON: a link where the PyObject contains all the data. from_obj() will not work on this 'p'.""" assert not hasattr(p, '__rawrefcount') - assert not ob.ob_pypy_link - ob.ob_pypy_link = rgc.cast_instance_to_gcref(p) - ob.ob_refcnt += REFCNT_FROM_PYPY_OBJECT + assert not ob.c_ob_pypy_link + ob.c_ob_pypy_link = rgc.cast_instance_to_gcref(p) + ob.c_ob_refcnt += REFCNT_FROM_PYPY_OBJECT p.__rawrefcount = lltype.nullptr(lltype.typeOf(ob).TO) _o_list.append(ob) @@ -40,9 +40,9 @@ """NOT_RPYTHON: a link where both p and ob contain some data. from_obj() will not work on this 'p'.""" assert not hasattr(p, '__rawrefcount') - assert not ob.ob_pypy_link - ob.ob_pypy_link = rgc.cast_instance_to_gcref(p) - ob.ob_refcnt += REFCNT_FROM_PYPY_OBJECT + assert not ob.c_ob_pypy_link + ob.c_ob_pypy_link = rgc.cast_instance_to_gcref(p) + ob.c_ob_refcnt += REFCNT_FROM_PYPY_OBJECT p.__rawrefcount = lltype.nullptr(lltype.typeOf(ob).TO) _s_list.append(ob) @@ -55,7 +55,7 @@ @specialize.arg(0) def to_obj(Class, ob): - pypy_gcref = ob.ob_pypy_link + pypy_gcref = ob.c_ob_pypy_link if we_are_translated(): return annlowlevel.cast_gcref_to_instance(Class, pypy_gcref) else: @@ -71,18 +71,18 @@ from the O list. """ def detach(ob, wr_list): - assert ob.ob_refcnt >= REFCNT_FROM_PYPY_OBJECT - assert ob.ob_pypy_link - p = rgc.try_cast_gcref_to_instance(object, ob.ob_pypy_link) + assert ob.c_ob_refcnt >= REFCNT_FROM_PYPY_OBJECT + assert ob.c_ob_pypy_link + p = rgc.try_cast_gcref_to_instance(object, ob.c_ob_pypy_link) assert p is not None - ob.ob_pypy_link = lltype.nullptr(llmemory.GCREF.TO) + ob.c_ob_pypy_link = lltype.nullptr(llmemory.GCREF.TO) wr_list.append((ob, weakref.ref(p))) global _p_list, _o_list, _s_list wr_p_list = [] new_p_list = [] for ob in _p_list: - if ob.ob_refcnt > REFCNT_FROM_PYPY_OBJECT: + if ob.c_ob_refcnt > REFCNT_FROM_PYPY_OBJECT: new_p_list.append(ob) else: wr_p_list.append(weakref.ref(ob)) @@ -92,7 +92,7 @@ wr_s_list = [] new_s_list = [] for ob in _s_list: - if ob.ob_refcnt > REFCNT_FROM_PYPY_OBJECT: + if ob.c_ob_refcnt > REFCNT_FROM_PYPY_OBJECT: new_s_list.append(ob) else: detach(ob, wr_s_list) @@ -109,14 +109,14 @@ rgc.collect() def attach(ob, wr, final_list): - assert ob.ob_refcnt >= REFCNT_FROM_PYPY_OBJECT + assert ob.c_ob_refcnt >= REFCNT_FROM_PYPY_OBJECT p = wr() if p is not None: - ob.ob_pypy_link = rgc.cast_instance_to_gcref(p) + ob.c_ob_pypy_link = rgc.cast_instance_to_gcref(p) final_list.append(ob) else: - ob.ob_refcnt -= REFCNT_FROM_PYPY_OBJECT - if ob.ob_refcnt == 0: + ob.c_ob_refcnt -= REFCNT_FROM_PYPY_OBJECT + if ob.c_ob_refcnt == 0: dealloc.append(ob) _p_list = new_p_list diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py --- a/rpython/rlib/test/test_rawrefcount.py +++ b/rpython/rlib/test/test_rawrefcount.py @@ -7,8 +7,8 @@ self.intval = intval PyObjectS = lltype.Struct('PyObjectS', - ('ob_refcnt', lltype.Signed), - ('ob_pypy_link', llmemory.GCREF)) + ('c_ob_refcnt', lltype.Signed), + ('c_ob_pypy_link', llmemory.GCREF)) PyObject = lltype.Ptr(PyObjectS) @@ -71,7 +71,7 @@ assert rawrefcount._p_list == [ob] wr_ob = weakref.ref(ob) wr_p = weakref.ref(p) - ob.ob_refcnt += 1 # <= + ob.c_ob_refcnt += 1 # <= del ob, p rawrefcount._collect() ob = wr_ob() @@ -121,13 +121,13 @@ assert rawrefcount._o_list == [ob] wr_ob = weakref.ref(ob) wr_p = weakref.ref(p) - ob.ob_refcnt += 1 # <= + ob.c_ob_refcnt += 1 # <= del p dealloc = rawrefcount._collect() assert dealloc == [] p = wr_p() assert p is None # was unlinked - assert ob.ob_refcnt == 1 # != REFCNT_FROM_PYPY_OBJECT + 1 + assert ob.c_ob_refcnt == 1 # != REFCNT_FROM_PYPY_OBJECT + 1 assert rawrefcount._o_list == [] assert rawrefcount.to_obj(W_Root, ob) == None @@ -173,7 +173,7 @@ assert rawrefcount._s_list == [ob] wr_ob = weakref.ref(ob) wr_p = weakref.ref(p) - ob.ob_refcnt += 1 # <= + ob.c_ob_refcnt += 1 # <= del ob, p rawrefcount._collect() ob = wr_ob() From noreply at buildbot.pypy.org Wed Oct 14 21:57:13 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Oct 2015 21:57:13 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Add some keepalives when converting to 'PyObject *' to make sure the Message-ID: <20151014195713.2B53A1C1186@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80217:12bbded34910 Date: 2015-10-14 21:36 +0200 http://bitbucket.org/pypy/pypy/changeset/12bbded34910/ Log: Add some keepalives when converting to 'PyObject *' to make sure the original W_Root objects stay alive for the whole duration of the call diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -9,7 +9,7 @@ from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import ll2ctypes from rpython.rtyper.annlowlevel import llhelper -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, keepalive_until_here from rpython.translator import cdir from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.gensupp import NameManager @@ -293,16 +293,18 @@ from pypy.module.cpyext.pyobject import as_pyobj, is_pyobj from pypy.module.cpyext.pyobject import Reference newargs = () + keepalives = () assert len(args) == len(api_function.argtypes) for i, (ARG, is_wrapped) in types_names_enum_ui: input_arg = args[i] if is_PyObject(ARG) and not is_wrapped: # build a 'PyObject *' (not holding a reference) if not is_pyobj(input_arg): + keepalives += (input_arg,) input_arg = as_pyobj(input_arg) arg = rffi.cast(ARG, input_arg) elif is_PyObject(ARG) and is_wrapped: - # convert to a wrapped object + # build a W_Root, possibly from a 'PyObject *' if is_pyobj(input_arg): arg = from_ref(input_arg) else: @@ -329,7 +331,10 @@ arg = input_arg newargs += (arg, ) try: - res = func(space, *newargs) + try: + res = func(space, *newargs) + finally: + keepalive_until_here(*keepalives) except OperationError, e: if not catch_exception: raise From noreply at buildbot.pypy.org Wed Oct 14 21:57:15 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Oct 2015 21:57:15 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Yay, the first test passes (in test_api) Message-ID: <20151014195715.603461C1186@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80218:b20b38d7dd99 Date: 2015-10-14 21:57 +0200 http://bitbucket.org/pypy/pypy/changeset/b20b38d7dd99/ Log: Yay, the first test passes (in test_api) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -512,7 +512,7 @@ PyObjectStruct = lltype.ForwardReference() PyObject = lltype.Ptr(PyObjectStruct) PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_type", PyTypeObjectPtr), - ("ob_pypy_link", llmemory.GCREF)) + ("ob_pypy_link", lltype.Signed)) PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), ) cpython_struct('PyObject', PyObjectFields, PyObjectStruct) PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields) diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -17,9 +17,9 @@ #define staticforward static #define PyObject_HEAD \ - long ob_refcnt; \ - struct _typeobject *ob_type; \ - void *ob_pypy_link; + Py_ssize_t ob_refcnt; \ + Py_ssize_t ob_pypy_link; \ + struct _typeobject *ob_type; #define PyObject_VAR_HEAD \ PyObject_HEAD \ diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -286,13 +286,11 @@ state.py_objects_r2w[ptr] = w_obj -NULL_GCREF = lltype.nullptr(llmemory.GCREF.TO) - def _create_pyobj_from_w_obj(w_obj): # XXX temp, needs cases ob = lltype.malloc(PyObject.TO, flavor='raw', track_allocation=False) ob.c_ob_refcnt = 0 - ob.c_ob_pypy_link = NULL_GCREF + ob.c_ob_pypy_link = 0 # ob.c_ob_type = ... rawrefcount.create_link_pypy(w_obj, ob) return ob @@ -309,7 +307,7 @@ return lltype.nullptr(PyObject.TO) #if isinstance(w_obj, W_CPyExtPlaceHolderObject): # xxx - ob = rawrefcount.from_obj(PyObject.TO, w_obj) + ob = rawrefcount.from_obj(PyObject, w_obj) if not ob: ob = _create_pyobj_from_w_obj(w_obj) return ob diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -12,16 +12,25 @@ REFCNT_FROM_PYPY_OBJECT = 80 # == 0x50 -_p_list = [] # not rpython -_o_list = [] # not rpython -_s_list = [] # not rpython +def _reset_state(): + global _p_list, _o_list, _s_list, _adr2pypy + _p_list = [] # not rpython + _o_list = [] # not rpython + _s_list = [] # not rpython + _adr2pypy = [None] # not rpython +_reset_state() + +def _build_pypy_link(p): + res = len(_adr2pypy) + _adr2pypy.append(p) + return res def create_link_pypy(p, ob): "NOT_RPYTHON: a link where the PyPy object contains all the data" assert not hasattr(p, '__rawrefcount') assert not ob.c_ob_pypy_link - ob.c_ob_pypy_link = rgc.cast_instance_to_gcref(p) + ob.c_ob_pypy_link = _build_pypy_link(p) ob.c_ob_refcnt += REFCNT_FROM_PYPY_OBJECT p.__rawrefcount = ob _p_list.append(ob) @@ -31,7 +40,7 @@ from_obj() will not work on this 'p'.""" assert not hasattr(p, '__rawrefcount') assert not ob.c_ob_pypy_link - ob.c_ob_pypy_link = rgc.cast_instance_to_gcref(p) + ob.c_ob_pypy_link = _build_pypy_link(p) ob.c_ob_refcnt += REFCNT_FROM_PYPY_OBJECT p.__rawrefcount = lltype.nullptr(lltype.typeOf(ob).TO) _o_list.append(ob) @@ -41,28 +50,29 @@ from_obj() will not work on this 'p'.""" assert not hasattr(p, '__rawrefcount') assert not ob.c_ob_pypy_link - ob.c_ob_pypy_link = rgc.cast_instance_to_gcref(p) + ob.c_ob_pypy_link = _build_pypy_link(p) ob.c_ob_refcnt += REFCNT_FROM_PYPY_OBJECT p.__rawrefcount = lltype.nullptr(lltype.typeOf(ob).TO) _s_list.append(ob) -def from_obj(OBTYPE, p): +def from_obj(OB_PTR_TYPE, p): "NOT_RPYTHON" - null = lltype.nullptr(OBTYPE) + null = lltype.nullptr(OB_PTR_TYPE.TO) ob = getattr(p, '__rawrefcount', null) - assert lltype.typeOf(ob) == lltype.Ptr(OBTYPE) + assert lltype.typeOf(ob) == OB_PTR_TYPE return ob @specialize.arg(0) def to_obj(Class, ob): - pypy_gcref = ob.c_ob_pypy_link + link = ob.c_ob_pypy_link if we_are_translated(): + pypy_gcref = lltype.cast_int_to_ptr(llmemory.GCREF, link) return annlowlevel.cast_gcref_to_instance(Class, pypy_gcref) else: - if not pypy_gcref: + if link == 0: return None - p = rgc.try_cast_gcref_to_instance(Class, pypy_gcref) - assert p is not None + p = _adr2pypy[link] + assert isinstance(p, Class) return p def _collect(): @@ -73,9 +83,9 @@ def detach(ob, wr_list): assert ob.c_ob_refcnt >= REFCNT_FROM_PYPY_OBJECT assert ob.c_ob_pypy_link - p = rgc.try_cast_gcref_to_instance(object, ob.c_ob_pypy_link) + p = _adr2pypy[ob.c_ob_pypy_link] assert p is not None - ob.c_ob_pypy_link = lltype.nullptr(llmemory.GCREF.TO) + _adr2pypy[ob.c_ob_pypy_link] = None wr_list.append((ob, weakref.ref(p))) global _p_list, _o_list, _s_list @@ -85,7 +95,7 @@ if ob.c_ob_refcnt > REFCNT_FROM_PYPY_OBJECT: new_p_list.append(ob) else: - wr_p_list.append(weakref.ref(ob)) + detach(ob, wr_p_list) ob = None _p_list = Ellipsis @@ -112,18 +122,19 @@ assert ob.c_ob_refcnt >= REFCNT_FROM_PYPY_OBJECT p = wr() if p is not None: - ob.c_ob_pypy_link = rgc.cast_instance_to_gcref(p) + assert ob.c_ob_pypy_link + _adr2pypy[ob.c_ob_pypy_link] = p final_list.append(ob) else: ob.c_ob_refcnt -= REFCNT_FROM_PYPY_OBJECT - if ob.c_ob_refcnt == 0: + ob.c_ob_pypy_link = 0 + if ob.c_ob_refcnt == 0 and dealloc is not None: dealloc.append(ob) _p_list = new_p_list - for wr in wr_p_list: - ob = wr() - if ob is not None: - _p_list.append(ob) + dealloc = None + for ob, wr in wr_p_list: + attach(ob, wr, _p_list) # dealloc = [] _s_list = new_s_list diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py --- a/rpython/rlib/test/test_rawrefcount.py +++ b/rpython/rlib/test/test_rawrefcount.py @@ -8,45 +8,43 @@ PyObjectS = lltype.Struct('PyObjectS', ('c_ob_refcnt', lltype.Signed), - ('c_ob_pypy_link', llmemory.GCREF)) + ('c_ob_pypy_link', lltype.Signed)) PyObject = lltype.Ptr(PyObjectS) class TestRawRefCount: def setup_method(self, meth): - del rawrefcount._p_list[:] - del rawrefcount._o_list[:] - del rawrefcount._s_list[:] + rawrefcount._reset_state() def test_create_link_pypy(self): p = W_Root(42) ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) - assert rawrefcount.from_obj(PyObjectS, p) == lltype.nullptr(PyObjectS) + assert rawrefcount.from_obj(PyObject, p) == lltype.nullptr(PyObjectS) assert rawrefcount.to_obj(W_Root, ob) == None rawrefcount.create_link_pypy(p, ob) - assert rawrefcount.from_obj(PyObjectS, p) == ob + assert rawrefcount.from_obj(PyObject, p) == ob assert rawrefcount.to_obj(W_Root, ob) == p def test_create_link_pyobj(self): p = W_Root(42) ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) - assert rawrefcount.from_obj(PyObjectS, p) == lltype.nullptr(PyObjectS) + assert rawrefcount.from_obj(PyObject, p) == lltype.nullptr(PyObjectS) assert rawrefcount.to_obj(W_Root, ob) == None rawrefcount.create_link_pyobj(p, ob) - assert rawrefcount.from_obj(PyObjectS, p) == lltype.nullptr(PyObjectS) + assert rawrefcount.from_obj(PyObject, p) == lltype.nullptr(PyObjectS) assert rawrefcount.to_obj(W_Root, ob) == p def test_create_link_shared(self): p = W_Root(42) ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) - assert rawrefcount.from_obj(PyObjectS, p) == lltype.nullptr(PyObjectS) + assert rawrefcount.from_obj(PyObject, p) == lltype.nullptr(PyObjectS) assert rawrefcount.to_obj(W_Root, ob) == None rawrefcount.create_link_shared(p, ob) - assert rawrefcount.from_obj(PyObjectS, p) == lltype.nullptr(PyObjectS) + assert rawrefcount.from_obj(PyObject, p) == lltype.nullptr(PyObjectS) assert rawrefcount.to_obj(W_Root, ob) == p def test_collect_p_dies(self): @@ -79,7 +77,7 @@ assert ob is not None and p is not None assert rawrefcount._p_list == [ob] assert rawrefcount.to_obj(W_Root, ob) == p - assert rawrefcount.from_obj(PyObjectS, p) == ob + assert rawrefcount.from_obj(PyObject, p) == ob def test_collect_p_keepalive_w_root(self): p = W_Root(42) @@ -94,7 +92,7 @@ assert ob is not None assert rawrefcount._p_list == [ob] assert rawrefcount.to_obj(W_Root, ob) == p - assert rawrefcount.from_obj(PyObjectS, p) == ob + assert rawrefcount.from_obj(PyObject, p) == ob def test_collect_o_dies(self): p = W_Root(42) From noreply at buildbot.pypy.org Wed Oct 14 22:15:11 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Oct 2015 22:15:11 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Emulation: works also with objects with __slots__ Message-ID: <20151014201511.3E0091C120E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80219:7988b2e5327d Date: 2015-10-14 22:12 +0200 http://bitbucket.org/pypy/pypy/changeset/7988b2e5327d/ Log: Emulation: works also with objects with __slots__ diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -13,11 +13,12 @@ def _reset_state(): - global _p_list, _o_list, _s_list, _adr2pypy + global _p_list, _o_list, _s_list, _adr2pypy, _pypy2ob _p_list = [] # not rpython _o_list = [] # not rpython _s_list = [] # not rpython _adr2pypy = [None] # not rpython + _pypy2ob = {} # not rpython _reset_state() def _build_pypy_link(p): @@ -28,37 +29,36 @@ def create_link_pypy(p, ob): "NOT_RPYTHON: a link where the PyPy object contains all the data" - assert not hasattr(p, '__rawrefcount') + assert p not in _pypy2ob assert not ob.c_ob_pypy_link ob.c_ob_pypy_link = _build_pypy_link(p) ob.c_ob_refcnt += REFCNT_FROM_PYPY_OBJECT - p.__rawrefcount = ob + _pypy2ob[p] = ob _p_list.append(ob) def create_link_pyobj(p, ob): """NOT_RPYTHON: a link where the PyObject contains all the data. from_obj() will not work on this 'p'.""" - assert not hasattr(p, '__rawrefcount') + assert p not in _pypy2ob assert not ob.c_ob_pypy_link ob.c_ob_pypy_link = _build_pypy_link(p) ob.c_ob_refcnt += REFCNT_FROM_PYPY_OBJECT - p.__rawrefcount = lltype.nullptr(lltype.typeOf(ob).TO) _o_list.append(ob) def create_link_shared(p, ob): """NOT_RPYTHON: a link where both p and ob contain some data. from_obj() will not work on this 'p'.""" - assert not hasattr(p, '__rawrefcount') + assert p not in _pypy2ob assert not ob.c_ob_pypy_link ob.c_ob_pypy_link = _build_pypy_link(p) ob.c_ob_refcnt += REFCNT_FROM_PYPY_OBJECT - p.__rawrefcount = lltype.nullptr(lltype.typeOf(ob).TO) _s_list.append(ob) def from_obj(OB_PTR_TYPE, p): "NOT_RPYTHON" - null = lltype.nullptr(OB_PTR_TYPE.TO) - ob = getattr(p, '__rawrefcount', null) + ob = _pypy2ob.get(p) + if ob is None: + return lltype.nullptr(OB_PTR_TYPE.TO) assert lltype.typeOf(ob) == OB_PTR_TYPE return ob @@ -87,6 +87,7 @@ assert p is not None _adr2pypy[ob.c_ob_pypy_link] = None wr_list.append((ob, weakref.ref(p))) + return p global _p_list, _o_list, _s_list wr_p_list = [] @@ -95,7 +96,9 @@ if ob.c_ob_refcnt > REFCNT_FROM_PYPY_OBJECT: new_p_list.append(ob) else: - detach(ob, wr_p_list) + p = detach(ob, wr_p_list) + del _pypy2ob[p] + del p ob = None _p_list = Ellipsis @@ -125,16 +128,20 @@ assert ob.c_ob_pypy_link _adr2pypy[ob.c_ob_pypy_link] = p final_list.append(ob) + return p else: ob.c_ob_refcnt -= REFCNT_FROM_PYPY_OBJECT ob.c_ob_pypy_link = 0 if ob.c_ob_refcnt == 0 and dealloc is not None: dealloc.append(ob) + return None _p_list = new_p_list dealloc = None for ob, wr in wr_p_list: - attach(ob, wr, _p_list) + p = attach(ob, wr, _p_list) + if p: + _pypy2ob[p] = ob # dealloc = [] _s_list = new_s_list From noreply at buildbot.pypy.org Wed Oct 14 22:15:13 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 14 Oct 2015 22:15:13 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Pass a few tests from test_number. Message-ID: <20151014201513.CE97F1C120E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80220:41178972cbf5 Date: 2015-10-14 22:15 +0200 http://bitbucket.org/pypy/pypy/changeset/41178972cbf5/ Log: Pass a few tests from test_number. diff --git a/pypy/module/cpyext/number.py b/pypy/module/cpyext/number.py --- a/pypy/module/cpyext/number.py +++ b/pypy/module/cpyext/number.py @@ -77,17 +77,17 @@ some other error occurs, return -1 (failure) and don't increment the reference counts. The call PyNumber_Coerce(&o1, &o2) is equivalent to the Python statement o1, o2 = coerce(o1, o2).""" - w_obj1 = from_ref(space, pp1[0]) - w_obj2 = from_ref(space, pp2[0]) + w_obj1 = from_ref(pp1[0]) + w_obj2 = from_ref(pp2[0]) try: w_res = space.coerce(w_obj1, w_obj2) - except (TypeError, OperationError): + except OperationError: state = space.fromcache(State) state.clear_exception() return -1 w_res1, w_res2 = space.unpackiterable(w_res, 2) - pp1[0] = make_ref(space, w_res1) - pp2[0] = make_ref(space, w_res2) + pp1[0] = make_ref(w_res1) + pp2[0] = make_ref(w_res2) return 0 def func_rename(newname): diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -338,18 +338,10 @@ # ZZZ: use an ExtRegistryEntry to constant-fold is_pyobj() -def make_ref(space, w_obj): - ZZZ - assert isinstance(w_obj, W_Root) - state = space.fromcache(RefcountState) - try: - py_obj = state.py_objects_w2r[w_obj] - except KeyError: - py_obj = create_ref(space, w_obj) - track_reference(space, py_obj, w_obj) - else: - Py_IncRef(space, py_obj) - return py_obj +def make_ref(w_obj): + pyobj = as_pyobj(w_obj) + pyobj.c_ob_refcnt += 1 + return pyobj def ZZZ_from_ref(space, ref): diff --git a/pypy/module/cpyext/test/test_number.py b/pypy/module/cpyext/test/test_number.py --- a/pypy/module/cpyext/test/test_number.py +++ b/pypy/module/cpyext/test/test_number.py @@ -43,23 +43,23 @@ w_obj1 = space.wrap(123) w_obj2 = space.wrap(456.789) pp1 = lltype.malloc(PyObjectP.TO, 1, flavor='raw') - pp1[0] = make_ref(space, w_obj1) + pp1[0] = make_ref(w_obj1) pp2 = lltype.malloc(PyObjectP.TO, 1, flavor='raw') - pp2[0] = make_ref(space, w_obj2) + pp2[0] = make_ref(w_obj2) assert api.PyNumber_Coerce(pp1, pp2) == 0 - assert space.str_w(space.repr(from_ref(space, pp1[0]))) == '123.0' - assert space.str_w(space.repr(from_ref(space, pp2[0]))) == '456.789' - Py_DecRef(space, pp1[0]) + assert space.str_w(space.repr(from_ref(pp1[0]))) == '123.0' + assert space.str_w(space.repr(from_ref(pp2[0]))) == '456.789' + Py_DecRef(space, pp1[0]) # for the refs returned by PyNumber_Coerce Py_DecRef(space, pp2[0]) lltype.free(pp1, flavor='raw') # Yes, decrement twice since we decoupled between w_obj* and pp*[0]. - Py_DecRef(space, w_obj1) + Py_DecRef(space, w_obj1) # for the make_ref() above Py_DecRef(space, w_obj2) lltype.free(pp2, flavor='raw') def test_number_coerce_ex(self, space, api): - pl = make_ref(space, space.wrap(123)) - pf = make_ref(space, space.wrap(42.)) + pl = make_ref(space.wrap(123)) + pf = make_ref(space.wrap(42.)) ppl = lltype.malloc(PyObjectP.TO, 1, flavor='raw') ppf = lltype.malloc(PyObjectP.TO, 1, flavor='raw') ppl[0] = pl @@ -68,7 +68,7 @@ ret = api.PyNumber_CoerceEx(ppl, ppf) assert ret == 0 - w_res = from_ref(space, ppl[0]) + w_res = from_ref(ppl[0]) assert api.PyFloat_Check(w_res) assert space.unwrap(w_res) == 123. From noreply at buildbot.pypy.org Wed Oct 14 22:39:58 2015 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 14 Oct 2015 22:39:58 +0200 (CEST) Subject: [pypy-commit] pypy default: test and fix (via a fastpath for scalars) SliceArrays with scalar assignment Message-ID: <20151014203958.23E4E1C0726@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80221:de3e116ddd1c Date: 2015-10-14 23:40 +0300 http://bitbucket.org/pypy/pypy/changeset/de3e116ddd1c/ Log: test and fix (via a fastpath for scalars) SliceArrays with scalar assignment diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -70,7 +70,10 @@ @jit.unroll_safe def setslice(self, space, arr): - if len(arr.get_shape()) > len(self.get_shape()): + if arr.get_size() == 1: + # we can always set self[:] = scalar + pass + elif len(arr.get_shape()) > len(self.get_shape()): # record arrays get one extra dimension if not self.dtype.is_record() or \ len(arr.get_shape()) > len(self.get_shape()) + 1: diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -319,6 +319,28 @@ assert out0.dtype in (int, complex) assert (out0 == in0 * 2).all() + def test_frompyfunc_scalar(self): + import sys + import numpy as np + if '__pypy__' not in sys.builtin_module_names: + skip('PyPy only frompyfunc extension') + + def summer(in0): + out = np.empty(1, in0.dtype) + out[0] = in0.sum() + return out + + pysummer = np.frompyfunc([summer, summer], 1, 1, + dtypes=[np.dtype(int), np.dtype(int), + np.dtype(complex), np.dtype(complex)], + stack_inputs=False, signature='(m,m)->()', + ) + for d in [np.dtype(float), np.dtype('uint8'), np.dtype('complex64')]: + in0 = np.arange(4, dtype=d).reshape(1, 2, 2) + out0 = pysummer(in0) + assert out0 == in0.sum() + assert out0.dtype in (int, complex) + def test_ufunc_kwargs(self): from numpy import ufunc, frompyfunc, arange, dtype def adder(a, b): From noreply at buildbot.pypy.org Thu Oct 15 03:40:35 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 15 Oct 2015 03:40:35 +0200 (CEST) Subject: [pypy-commit] pypy no-class-specialize: Always compute the classdef when creating a ClassDesc Message-ID: <20151015014035.CD84A1C0726@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: no-class-specialize Changeset: r80222:526259e6794f Date: 2015-10-14 17:00 +0100 http://bitbucket.org/pypy/pypy/changeset/526259e6794f/ Log: Always compute the classdef when creating a ClassDesc diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -359,7 +359,7 @@ if pyobj.__module__ == '__builtin__': # avoid making classdefs for builtin types result = self.getfrozen(pyobj) else: - result = ClassDesc(self, pyobj) + return self._new_classdesc(pyobj) elif isinstance(pyobj, types.MethodType): if pyobj.im_self is None: # unbound return self.getdesc(pyobj.im_func) @@ -441,6 +441,12 @@ def valueoftype(self, t): return annotationoftype(t, self) + def _new_classdesc(self, pycls): + result = ClassDesc(self, pycls) + self.descs[pycls] = result + result._init_classdef() + return result + def get_classpbc_attr_families(self, attrname): """Return the UnionFind for the ClassAttrFamilies corresponding to attributes of the given name. From noreply at buildbot.pypy.org Thu Oct 15 03:40:37 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 15 Oct 2015 03:40:37 +0200 (CEST) Subject: [pypy-commit] pypy no-class-specialize: Remove 'bookkeeper' attribute from class 'Attribute' Message-ID: <20151015014037.E29121C0726@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: no-class-specialize Changeset: r80223:38bdaa3bcb57 Date: 2015-10-14 20:07 +0100 http://bitbucket.org/pypy/pypy/changeset/38bdaa3bcb57/ Log: Remove 'bookkeeper' attribute from class 'Attribute' diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -498,6 +498,11 @@ return s_result + def update_attr(self, clsdef, attrdef): + for position in attrdef.read_locations: + self.annotator.reflowfromposition(position) + attrdef.validate(homedef=clsdef) + def pbc_call(self, pbc, args, emulated=None): """Analyse a call to a SomePBC() with the given args (list of annotations). diff --git a/rpython/annotator/classdesc.py b/rpython/annotator/classdesc.py --- a/rpython/annotator/classdesc.py +++ b/rpython/annotator/classdesc.py @@ -73,10 +73,9 @@ # Both writing to the instance attribute and discovering prebuilt # instances that have the attribute set will turn off readonly-ness. - def __init__(self, name, bookkeeper): + def __init__(self, name): assert name != '__class__' self.name = name - self.bookkeeper = bookkeeper self.s_value = s_ImpossibleValue self.readonly = True self.attr_allowed = True @@ -87,7 +86,7 @@ if source.instance_level: # a prebuilt instance source forces readonly=False, see above self.modified(classdef) - s_new_value = unionof(self.s_value, s_value) # XXX "source %r attr %s" % (source, self.name), + s_new_value = unionof(self.s_value, s_value) self.s_value = s_new_value def getvalue(self): @@ -96,18 +95,15 @@ def merge(self, other, classdef='?'): assert self.name == other.name - s_new_value = unionof(self.s_value, other.s_value) # XXX "%s attr %s" % (classdef, self.name) + s_new_value = unionof(self.s_value, other.s_value) self.s_value = s_new_value if not other.readonly: self.modified(classdef) self.read_locations.update(other.read_locations) - def mutated(self, homedef): # reflow from attr read positions - s_newvalue = self.getvalue() - - for position in self.read_locations: - self.bookkeeper.annotator.reflowfromposition(position) - + def validate(self, homedef): + s_newvalue = self.s_value + homedesc = homedef.classdesc # check for method demotion and after-the-fact method additions if isinstance(s_newvalue, SomePBC): attr = self.name @@ -187,7 +183,7 @@ # but as an optimization we try to see if the attribute # has really been generalized if attrdef.s_value != s_prev_value: - attrdef.mutated(cdef) # reflow from all read positions + self.bookkeeper.update_attr(cdef, attrdef) return else: # remember the source in self.attr_sources @@ -205,7 +201,7 @@ s_prev_value = attrdef.s_value attrdef.add_constant_source(self, source) if attrdef.s_value != s_prev_value: - attrdef.mutated(subdef) # reflow from all read positions + self.bookkeeper.update_attr(subdef, attrdef) def locate_attribute(self, attr): while True: @@ -276,7 +272,7 @@ constant_sources.append((superdef, source)) # create the Attribute and do the generalization asked for - newattr = Attribute(attr, self.bookkeeper) + newattr = Attribute(attr) if s_value: #if newattr.name == 'intval' and getattr(s_value, 'unsigned', False): # import pdb; pdb.set_trace() @@ -296,7 +292,7 @@ newattr.add_constant_source(origin_classdef, source) # reflow from all read positions - newattr.mutated(self) + self.bookkeeper.update_attr(self, newattr) def generalize_attr(self, attr, s_value=None): # if the attribute exists in a superclass, generalize there, From noreply at buildbot.pypy.org Thu Oct 15 03:40:40 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 15 Oct 2015 03:40:40 +0200 (CEST) Subject: [pypy-commit] pypy no-class-specialize: fix PyPy translation Message-ID: <20151015014040.009F81C0726@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: no-class-specialize Changeset: r80224:d734e4d41935 Date: 2015-10-14 21:20 +0100 http://bitbucket.org/pypy/pypy/changeset/d734e4d41935/ Log: fix PyPy translation diff --git a/pypy/module/_minimal_curses/interp_curses.py b/pypy/module/_minimal_curses/interp_curses.py --- a/pypy/module/_minimal_curses/interp_curses.py +++ b/pypy/module/_minimal_curses/interp_curses.py @@ -13,7 +13,7 @@ def __init__(self, msg): self.msg = msg -from rpython.annotator.description import FORCE_ATTRIBUTES_INTO_CLASSES +from rpython.annotator.classdesc import FORCE_ATTRIBUTES_INTO_CLASSES from rpython.annotator.model import SomeString # this is necessary due to annmixlevel diff --git a/pypy/tool/ann_override.py b/pypy/tool/ann_override.py --- a/pypy/tool/ann_override.py +++ b/pypy/tool/ann_override.py @@ -2,6 +2,7 @@ from rpython.annotator.policy import AnnotatorPolicy from rpython.flowspace.model import Constant from rpython.annotator import specialize +from rpython.annotator.classdesc import InstanceSource, ClassDef @@ -20,7 +21,6 @@ def specialize__wrap(self, funcdesc, args_s): from pypy.interpreter.baseobjspace import W_Root - from rpython.annotator.classdef import ClassDef W_Root_def = funcdesc.bookkeeper.getuniqueclassdef(W_Root) typ = args_s[1].knowntype if isinstance(typ, ClassDef): @@ -73,7 +73,6 @@ return False def consider_lookup(self, bookkeeper, attr): - from rpython.annotator.classdef import InstanceSource assert attr not in self.lookups from pypy.objspace.std import typeobject cached = "cached_%s" % attr @@ -88,7 +87,6 @@ self.lookups[attr] = True def consider_lookup_in_type_where(self, bookkeeper, attr): - from rpython.annotator.classdef import InstanceSource assert attr not in self.lookups_where from pypy.objspace.std import typeobject cached = "cached_where_%s" % attr @@ -135,7 +133,6 @@ def event(self, bookkeeper, what, x): from pypy.objspace.std import typeobject if isinstance(x, typeobject.W_TypeObject): - from rpython.annotator.classdef import InstanceSource clsdef = bookkeeper.getuniqueclassdef(typeobject.W_TypeObject) self.pypytypes[x] = True #print "TYPE", x diff --git a/rpython/translator/tool/graphpage.py b/rpython/translator/tool/graphpage.py --- a/rpython/translator/tool/graphpage.py +++ b/rpython/translator/tool/graphpage.py @@ -3,7 +3,7 @@ from rpython.translator.tool.make_dot import DotGen, make_dot_graphs from rpython.annotator.model import SomePBC from rpython.annotator.description import MethodDesc -from rpython.annotator.classdef import ClassDef +from rpython.annotator.classdesc import ClassDef from rpython.tool.uid import uid from rpython.tool.udir import udir From noreply at buildbot.pypy.org Thu Oct 15 05:40:17 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Oct 2015 05:40:17 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20151015034017.1F7C61C120E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r641:8a731200993b Date: 2015-10-15 05:41 +0200 http://bitbucket.org/pypy/pypy.org/changeset/8a731200993b/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $60580 of $105000 (57.7%) + $60690 of $105000 (57.8%)
@@ -23,7 +23,7 @@
  • diff --git a/don3.html b/don3.html --- a/don3.html +++ b/don3.html @@ -15,7 +15,7 @@ - $52608 of $60000 (87.7%) + $52617 of $60000 (87.7%)
    @@ -23,7 +23,7 @@
  • diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -17,7 +17,7 @@ 2nd call: - $29481 of $80000 (36.9%) + $29527 of $80000 (36.9%)
    @@ -25,7 +25,7 @@
  • From noreply at buildbot.pypy.org Thu Oct 15 09:33:28 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 15 Oct 2015 09:33:28 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: merged default Message-ID: <20151015073328.114881C00E2@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80229:04ab6b2dc6ff Date: 2015-10-15 09:33 +0200 http://bitbucket.org/pypy/pypy/changeset/04ab6b2dc6ff/ Log: merged default diff too long, truncating to 2000 out of 2228 lines diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -75,7 +75,13 @@ remain valid as long as the target exists (unlike the previous version, where handles become invalid *before* the __del__ is called). +.. branch: ufunc-casting + +allow automatic casting in ufuncs (and frompypyfunc) to cast the +arguments to the allowed function type declarations, fixes various +failures in linalg cffi functions + .. branch: vecopt .. branch: vecopt-merge -A new optimization pass to use SIMD instructions for trace loop that allow this +A new optimization pass to use emit vectorized loops diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -66,6 +66,7 @@ import pypy.module.cpyext.codecs import pypy.module.cpyext.pyfile import pypy.module.cpyext.pystrtod +import pypy.module.cpyext.pytraceback # now that all rffi_platform.Struct types are registered, configure them api.configure_types() diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -126,6 +126,7 @@ #include "fileobject.h" #include "pysignals.h" #include "pythread.h" +#include "traceback.h" /* Missing definitions */ #include "missing.h" diff --git a/pypy/module/cpyext/include/frameobject.h b/pypy/module/cpyext/include/frameobject.h --- a/pypy/module/cpyext/include/frameobject.h +++ b/pypy/module/cpyext/include/frameobject.h @@ -4,7 +4,7 @@ extern "C" { #endif -typedef struct { +typedef struct _frame { PyObject_HEAD PyCodeObject *f_code; PyObject *f_globals; diff --git a/pypy/module/cpyext/include/traceback.h b/pypy/module/cpyext/include/traceback.h --- a/pypy/module/cpyext/include/traceback.h +++ b/pypy/module/cpyext/include/traceback.h @@ -4,7 +4,15 @@ extern "C" { #endif -typedef PyObject PyTracebackObject; +struct _frame; + +typedef struct _traceback { + PyObject_HEAD + struct _traceback *tb_next; + struct _frame *tb_frame; + int tb_lasti; + int tb_lineno; +} PyTracebackObject; #ifdef __cplusplus } diff --git a/pypy/module/cpyext/pytraceback.py b/pypy/module/cpyext/pytraceback.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/pytraceback.py @@ -0,0 +1,50 @@ +from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + PyObjectFields, generic_cpy_call, CONST_STRING, CANNOT_FAIL, Py_ssize_t, + cpython_api, bootstrap_function, cpython_struct, build_type_checkers) +from pypy.module.cpyext.pyobject import ( + PyObject, make_ref, from_ref, Py_DecRef, make_typedescr, borrow_from) +from pypy.module.cpyext.frameobject import PyFrameObject +from rpython.rlib.unroll import unrolling_iterable +from pypy.interpreter.error import OperationError +from pypy.interpreter.pytraceback import PyTraceback +from pypy.interpreter import pycode + + +PyTracebackObjectStruct = lltype.ForwardReference() +PyTracebackObject = lltype.Ptr(PyTracebackObjectStruct) +PyTracebackObjectFields = PyObjectFields + ( + ("tb_next", PyTracebackObject), + ("tb_frame", PyFrameObject), + ("tb_lasti", rffi.INT), + ("tb_lineno", rffi.INT), +) +cpython_struct("PyTracebackObject", PyTracebackObjectFields, PyTracebackObjectStruct) + + at bootstrap_function +def init_traceback(space): + make_typedescr(PyTraceback.typedef, + basestruct=PyTracebackObject.TO, + attach=traceback_attach, + dealloc=traceback_dealloc) + + +def traceback_attach(space, py_obj, w_obj): + py_traceback = rffi.cast(PyTracebackObject, py_obj) + traceback = space.interp_w(PyTraceback, w_obj) + if traceback.next is None: + w_next_traceback = None + else: + w_next_traceback = space.wrap(traceback.next) + py_traceback.c_tb_next = rffi.cast(PyTracebackObject, make_ref(space, w_next_traceback)) + py_traceback.c_tb_frame = rffi.cast(PyFrameObject, make_ref(space, space.wrap(traceback.frame))) + rffi.setintfield(py_traceback, 'c_tb_lasti', traceback.lasti) + rffi.setintfield(py_traceback, 'c_tb_lineno',traceback.get_lineno()) + + at cpython_api([PyObject], lltype.Void, external=False) +def traceback_dealloc(space, py_obj): + py_traceback = rffi.cast(PyTracebackObject, py_obj) + Py_DecRef(space, rffi.cast(PyObject, py_traceback.c_tb_next)) + Py_DecRef(space, rffi.cast(PyObject, py_traceback.c_tb_frame)) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -61,6 +61,30 @@ args_w = space.fixedview(w_args) return generic_cpy_call(space, func_binary, w_self, args_w[0]) +def wrap_binaryfunc_l(space, w_self, w_args, func): + func_binary = rffi.cast(binaryfunc, func) + check_num_args(space, w_args, 1) + args_w = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self))): + raise OperationError(space.w_NotImplementedError, space.wrap( + "NotImplemented")) + + return generic_cpy_call(space, func_binary, w_self, args_w[0]) + +def wrap_binaryfunc_r(space, w_self, w_args, func): + func_binary = rffi.cast(binaryfunc, func) + check_num_args(space, w_args, 1) + args_w = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self))): + raise OperationError(space.w_NotImplementedError, space.wrap( + "NotImplemented")) + + return generic_cpy_call(space, func_binary, args_w[0], w_self) + def wrap_inquirypred(space, w_self, w_args, func): func_inquiry = rffi.cast(inquiry, func) check_num_args(space, w_args, 0) diff --git a/pypy/module/cpyext/test/test_traceback.py b/pypy/module/cpyext/test/test_traceback.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_traceback.py @@ -0,0 +1,40 @@ +from rpython.rtyper.lltypesystem import lltype, rffi +from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref +from pypy.module.cpyext.pytraceback import PyTracebackObject +from pypy.interpreter.pytraceback import PyTraceback +from pypy.interpreter.pyframe import PyFrame + +class TestPyTracebackObject(BaseApiTest): + def test_traceback(self, space, api): + w_traceback = space.appexec([], """(): + import sys + try: + 1/0 + except: + return sys.exc_info()[2] + """) + py_obj = make_ref(space, w_traceback) + py_traceback = rffi.cast(PyTracebackObject, py_obj) + assert (from_ref(space, rffi.cast(PyObject, py_traceback.c_ob_type)) is + space.gettypeobject(PyTraceback.typedef)) + + traceback = space.interp_w(PyTraceback, w_traceback) + assert traceback.lasti == py_traceback.c_tb_lasti + assert traceback.get_lineno() == py_traceback.c_tb_lineno + assert space.eq_w(space.getattr(w_traceback, space.wrap("tb_lasti")), + space.wrap(py_traceback.c_tb_lasti)) + assert space.is_w(space.getattr(w_traceback, space.wrap("tb_frame")), + from_ref(space, rffi.cast(PyObject, + py_traceback.c_tb_frame))) + + while not space.is_w(w_traceback, space.w_None): + assert space.is_w( + w_traceback, + from_ref(space, rffi.cast(PyObject, py_traceback))) + w_traceback = space.getattr(w_traceback, space.wrap("tb_next")) + py_traceback = py_traceback.c_tb_next + + assert lltype.normalizeptr(py_traceback) is None + + api.Py_DecRef(py_obj) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -589,6 +589,48 @@ assert bool(module.newInt(-1)) raises(ValueError, bool, module.newInt(-42)) + def test_binaryfunc(self): + module = self.import_extension('foo', [ + ("new_obj", "METH_NOARGS", + """ + FooObject *fooObj; + + Foo_Type.tp_as_number = &foo_as_number; + foo_as_number.nb_add = foo_nb_add_call; + if (PyType_Ready(&Foo_Type) < 0) return NULL; + fooObj = PyObject_New(FooObject, &Foo_Type); + if (!fooObj) { + return NULL; + } + + return (PyObject *)fooObj; + """)], + """ + typedef struct + { + PyObject_HEAD + } FooObject; + + static PyObject * + foo_nb_add_call(PyObject *self, PyObject *other) + { + return PyInt_FromLong(42); + } + + PyTypeObject Foo_Type = { + PyObject_HEAD_INIT(0) + /*ob_size*/ 0, + /*tp_name*/ "Foo", + /*tp_basicsize*/ sizeof(FooObject), + }; + static PyNumberMethods foo_as_number; + """) + a = module.new_obj() + b = module.new_obj() + c = 3 + assert (a + b) == 42 + raises(NotImplementedError, "b + c") + def test_tp_new_in_subclass_of_type(self): skip("BROKEN") module = self.import_module(name='foo3') diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -70,7 +70,10 @@ @jit.unroll_safe def setslice(self, space, arr): - if len(arr.get_shape()) > len(self.get_shape()): + if arr.get_size() == 1: + # we can always set self[:] = scalar + pass + elif len(arr.get_shape()) > len(self.get_shape()): # record arrays get one extra dimension if not self.dtype.is_record() or \ len(arr.get_shape()) > len(self.get_shape()) + 1: diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -161,10 +161,10 @@ call_many_to_one_driver = jit.JitDriver( name='numpy_call_many_to_one', - greens=['shapelen', 'nin', 'func', 'res_dtype'], + greens=['shapelen', 'nin', 'func', 'in_dtypes', 'res_dtype'], reds='auto') -def call_many_to_one(space, shape, func, res_dtype, in_args, out): +def call_many_to_one(space, shape, func, in_dtypes, res_dtype, in_args, out): # out must hav been built. func needs no calc_type, is usually an # external ufunc nin = len(in_args) @@ -182,9 +182,9 @@ vals = [None] * nin while not out_iter.done(out_state): call_many_to_one_driver.jit_merge_point(shapelen=shapelen, func=func, - res_dtype=res_dtype, nin=nin) + in_dtypes=in_dtypes, res_dtype=res_dtype, nin=nin) for i in range(nin): - vals[i] = in_iters[i].getitem(in_states[i]) + vals[i] = in_dtypes[i].coerce(space, in_iters[i].getitem(in_states[i])) w_arglist = space.newlist(vals) w_out_val = space.call_args(func, Arguments.frompacked(space, w_arglist)) out_iter.setitem(out_state, res_dtype.coerce(space, w_out_val)) @@ -195,10 +195,10 @@ call_many_to_many_driver = jit.JitDriver( name='numpy_call_many_to_many', - greens=['shapelen', 'nin', 'nout', 'func', 'res_dtype'], + greens=['shapelen', 'nin', 'nout', 'func', 'in_dtypes', 'out_dtypes'], reds='auto') -def call_many_to_many(space, shape, func, res_dtype, in_args, out_args): +def call_many_to_many(space, shape, func, in_dtypes, out_dtypes, in_args, out_args): # out must hav been built. func needs no calc_type, is usually an # external ufunc nin = len(in_args) @@ -221,24 +221,29 @@ out_states[i] = out_state shapelen = len(shape) vals = [None] * nin - while not out_iters[0].done(out_states[0]): + test_iter, test_state = in_iters[-1], in_states[-1] + if nout > 0: + test_iter, test_state = out_iters[0], out_states[0] + while not test_iter.done(test_state): call_many_to_many_driver.jit_merge_point(shapelen=shapelen, func=func, - res_dtype=res_dtype, nin=nin, nout=nout) + in_dtypes=in_dtypes, out_dtypes=out_dtypes, + nin=nin, nout=nout) for i in range(nin): - vals[i] = in_iters[i].getitem(in_states[i]) + vals[i] = in_dtypes[i].coerce(space, in_iters[i].getitem(in_states[i])) w_arglist = space.newlist(vals) w_outvals = space.call_args(func, Arguments.frompacked(space, w_arglist)) # w_outvals should be a tuple, but func can return a single value as well if space.isinstance_w(w_outvals, space.w_tuple): batch = space.listview(w_outvals) for i in range(len(batch)): - out_iters[i].setitem(out_states[i], res_dtype.coerce(space, batch[i])) + out_iters[i].setitem(out_states[i], out_dtypes[i].coerce(space, batch[i])) out_states[i] = out_iters[i].next(out_states[i]) - else: - out_iters[0].setitem(out_states[0], res_dtype.coerce(space, w_outvals)) + elif nout > 0: + out_iters[0].setitem(out_states[0], out_dtypes[0].coerce(space, w_outvals)) out_states[0] = out_iters[0].next(out_states[0]) for i in range(nin): in_states[i] = in_iters[i].next(in_states[i]) + test_state = test_iter.next(test_state) return space.newtuple([convert_to_array(space, o) for o in out_args]) setslice_driver = jit.JitDriver(name='numpy_setslice', diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -159,8 +159,7 @@ af2 = ufunc(af) assert all(af2 == af * 2) ac = arange(10, dtype=complex) - skip('casting not implemented yet') - ac1 = ufunc(ac) + raises(TypeError, ufunc, ac) def test_frompyfunc_2d_sig(self): import sys @@ -199,6 +198,10 @@ ai2 = ufunc(aiV) assert (ai2 == aiV * 2).all() + ai = arange(0).reshape(0, 1, 1) + ao = ufunc(ai) + assert ao.shape == (0, 1, 1) + def test_frompyfunc_needs_nditer(self): import sys from numpy import frompyfunc, dtype, arange @@ -268,6 +271,76 @@ assert out0.shape == in0.shape assert (out0 == in0 * 2).all() + def test_frompyfunc_casting(self): + import sys + import numpy as np + if '__pypy__' not in sys.builtin_module_names: + skip('PyPy only frompyfunc extension') + + def times2_int(in0, out0): + assert in0.dtype == int + assert out0.dtype == int + # hack to assing to a 0-dim array + out0.real = in0 * 2 + + def times2_complex(in0, out0): + assert in0.dtype == complex + assert out0.dtype == complex + out0.real = in0.real * 2 + out0.imag = in0.imag + + def times2_complex0(in0): + assert in0.dtype == complex + return in0 * 2 + + def times2_int0(in0): + assert in0.dtype == int + return in0 * 2 + + times2stacked = np.frompyfunc([times2_int, times2_complex], 1, 1, + dtypes=[np.dtype(int), np.dtype(int), + np.dtype(complex), np.dtype(complex)], + stack_inputs=True, signature='()->()', + ) + times2 = np.frompyfunc([times2_int0, times2_complex0], 1, 1, + dtypes=[np.dtype(int), np.dtype(int), + np.dtype(complex), np.dtype(complex)], + stack_inputs=False, + ) + for d in [np.dtype(float), np.dtype('uint8'), np.dtype('complex64')]: + in0 = np.arange(4, dtype=d) + out0 = times2stacked(in0) + assert out0.shape == in0.shape + assert out0.dtype in (int, complex) + assert (out0 == in0 * 2).all() + + out0 = times2(in0) + assert out0.shape == in0.shape + assert out0.dtype in (int, complex) + assert (out0 == in0 * 2).all() + + def test_frompyfunc_scalar(self): + import sys + import numpy as np + if '__pypy__' not in sys.builtin_module_names: + skip('PyPy only frompyfunc extension') + + def summer(in0): + out = np.empty(1, in0.dtype) + out[0] = in0.sum() + return out + + pysummer = np.frompyfunc([summer, summer], 1, 1, + dtypes=[np.dtype(int), np.dtype(int), + np.dtype(complex), np.dtype(complex)], + stack_inputs=False, signature='(m,m)->()', + ) + for d in [np.dtype(float), np.dtype('uint8'), np.dtype('complex64')]: + in0 = np.arange(4, dtype=d).reshape(1, 2, 2) + out0 = pysummer(in0) + assert out0 == in0.sum() + assert out0.dtype in (int, complex) + def test_ufunc_kwargs(self): from numpy import ufunc, frompyfunc, arange, dtype def adder(a, b): @@ -1393,7 +1466,7 @@ def test_add_doc(self): import sys if '__pypy__' not in sys.builtin_module_names: - skip('') + skip('cpython sets docstrings differently') try: from numpy import set_docstring except ImportError: diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -709,6 +709,32 @@ raise oefmt(space.w_TypeError, "ufunc '%s' not supported for the input types", self.name) +def _match_dtypes(space, indtypes, targetdtypes, i_target, casting): + allok = True + for i in range(len(indtypes)): + origin = indtypes[i] + target = targetdtypes[i + i_target] + if origin is None: + continue + if target is None: + continue + if not can_cast_type(space, origin, target, casting): + allok = False + break + return allok + +def _raise_err_msg(self, space, dtypes0, dtypes1): + dtypesstr = '' + for d in dtypes0: + if d is None: + dtypesstr += 'None,' + else: + dtypesstr += '%s%s%s,' % (d.byteorder, d.kind, d.elsize) + _dtypesstr = ','.join(['%s%s%s' % (d.byteorder, d.kind, d.elsize) \ + for d in dtypes1]) + raise oefmt(space.w_TypeError, + "input dtype [%s] did not match any known dtypes [%s] ", + dtypesstr,_dtypesstr) class W_UfuncGeneric(W_Ufunc): @@ -799,29 +825,36 @@ outargs0 = outargs[0] assert isinstance(inargs0, W_NDimArray) assert isinstance(outargs0, W_NDimArray) + nin = self.nin + assert nin >= 0 res_dtype = outargs0.get_dtype() new_shape = inargs0.get_shape() # XXX use _find_array_wrap and wrap outargs using __array_wrap__ + if self.stack_inputs: + loop.call_many_to_many(space, new_shape, func, + dtypes, [], inargs + outargs, []) + if len(outargs) < 2: + return outargs[0] + return space.newtuple(outargs) if len(outargs) < 2: return loop.call_many_to_one(space, new_shape, func, - res_dtype, inargs, outargs[0]) + dtypes[:nin], dtypes[-1], inargs, outargs[0]) return loop.call_many_to_many(space, new_shape, func, - res_dtype, inargs, outargs) + dtypes[:nin], dtypes[nin:], inargs, outargs) + w_casting = space.w_None + w_op_dtypes = space.w_None for tf in need_to_cast: if tf: - raise oefmt(space.w_NotImplementedError, "casting not supported yet") + w_casting = space.wrap('safe') + w_op_dtypes = space.newtuple([space.wrap(d) for d in dtypes]) + w_flags = space.w_None # NOT 'external_loop', we do coalescing by core_num_dims - w_op_flags = space.newtuple([space.wrap(r) for r in ['readonly'] * len(inargs)] + \ - [space.wrap(r) for r in ['readwrite'] * len(outargs)]) - w_op_dtypes = space.w_None - w_casting = space.w_None + w_ro = space.newtuple([space.wrap('readonly'), space.wrap('copy')]) + w_rw = space.newtuple([space.wrap('readwrite'), space.wrap('updateifcopy')]) + + w_op_flags = space.newtuple([w_ro] * len(inargs) + [w_rw] * len(outargs)) w_op_axes = space.w_None - #print '\nsignature', sig - #print [(d, getattr(self,d)) for d in dir(self) if 'core' in d or 'broad' in d] - #print [(d, locals()[d]) for d in locals() if 'core' in d or 'broad' in d] - #print 'shapes',[d.get_shape() for d in inargs + outargs] - #print 'steps',[d.implementation.strides for d in inargs + outargs] if isinstance(func, W_GenericUFuncCaller): # Use GeneralizeUfunc interface with signature # Unlike numpy, we will not broadcast dims before @@ -934,19 +967,32 @@ # linear_search_type_resolver in numpy ufunc_type_resolutions.c # type_tup can be '', a tuple of dtypes, or a string # of the form d,t -> D where the letters are dtype specs - nop = len(inargs) + len(outargs) + + # XXX why does the next line not pass translation? + # dtypes = [i.get_dtype() for i in inargs] dtypes = [] + for i in inargs: + if isinstance(i, W_NDimArray): + dtypes.append(i.get_dtype()) + else: + dtypes.append(None) + for i in outargs: + if isinstance(i, W_NDimArray): + dtypes.append(i.get_dtype()) + else: + dtypes.append(None) if isinstance(type_tup, str) and len(type_tup) > 0: try: if len(type_tup) == 1: - dtypes = [get_dtype_cache(space).dtypes_by_name[type_tup]] * self.nargs + s_dtypes = [get_dtype_cache(space).dtypes_by_name[type_tup]] * self.nargs elif len(type_tup) == self.nargs + 2: + s_dtypes = [] for i in range(self.nin): - dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[i]]) + s_dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[i]]) #skip the '->' in the signature for i in range(self.nout): j = i + self.nin + 2 - dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[j]]) + s_dtypes.append(get_dtype_cache(space).dtypes_by_name[type_tup[j]]) else: raise oefmt(space.w_TypeError, "a type-string for %s " \ "requires 1 typecode or %d typecode(s) before and %d" \ @@ -955,42 +1001,29 @@ except KeyError: raise oefmt(space.w_ValueError, "unknown typecode in" \ " call to %s with type-string '%s'", self.name, type_tup) - else: - # XXX why does the next line not pass translation? - # dtypes = [i.get_dtype() for i in inargs] - for i in inargs: - if isinstance(i, W_NDimArray): - dtypes.append(i.get_dtype()) - else: - dtypes.append(None) - for i in outargs: - if isinstance(i, W_NDimArray): - dtypes.append(i.get_dtype()) - else: - dtypes.append(None) + # Make sure args can be cast to dtypes + if not _match_dtypes(space, dtypes, s_dtypes, 0, "safe"): + _raise_err_msg(self, space, dtypes, s_dtypes) + dtypes = s_dtypes #Find the first matchup of dtypes with _dtypes for i in range(0, len(_dtypes), self.nargs): - allok = True - for j in range(self.nargs): - if dtypes[j] is not None and dtypes[j] != _dtypes[i+j]: - allok = False + allok = _match_dtypes(space, dtypes, _dtypes, i, "no") if allok: break else: - if len(self.funcs) > 1: - - dtypesstr = '' - for d in dtypes: - if d is None: - dtypesstr += 'None,' - else: - dtypesstr += '%s%s%s,' % (d.byteorder, d.kind, d.elsize) - _dtypesstr = ','.join(['%s%s%s' % (d.byteorder, d.kind, d.elsize) \ - for d in _dtypes]) - raise oefmt(space.w_TypeError, - "input dtype [%s] did not match any known dtypes [%s] ", - dtypesstr,_dtypesstr) - i = 0 + # No exact matches, can we cast? + for i in range(0, len(_dtypes), self.nargs): + allok = _match_dtypes(space, dtypes, _dtypes, i, "safe") + if allok: + end = i + self.nargs + assert i >= 0 + assert end >=0 + dtypes = _dtypes[i:end] + break + else: + if len(self.funcs) > 1: + _raise_err_msg(self, space, dtypes, _dtypes) + i = 0 # Fill in empty dtypes for j in range(self.nargs): if dtypes[j] is None: @@ -1086,7 +1119,7 @@ for j in range(offset, len(iter_shape)): x = iter_shape[j + offset] y = dims_to_broadcast[j] - if (x > y and x % y) or y %x: + if y != 0 and x != 0 and ((x > y and x % y) or y %x): raise oefmt(space.w_ValueError, "%s: %s operand %d has a " "mismatch in its broadcast dimension %d " "(size %d is different from %d)", @@ -1123,7 +1156,7 @@ # the current op (signalling it can handle ndarray's). # TODO parse and handle subok - # TODO handle flags, op_flags + # TODO handle more flags, op_flags #print 'iter_shape',iter_shape,'arg_shapes',arg_shapes,'matched_dims',matched_dims return iter_shape, arg_shapes, matched_dims diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -8,12 +8,12 @@ from contextlib import contextmanager from rpython.flowspace.model import Constant -from rpython.annotator.model import (SomeOrderedDict, - SomeString, SomeChar, SomeFloat, unionof, SomeInstance, SomeDict, - SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, +from rpython.annotator.model import ( + SomeOrderedDict, SomeString, SomeChar, SomeFloat, unionof, SomeInstance, + SomeDict, SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, s_None, s_ImpossibleValue, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, - SomeWeakRef, SomeByteArray, SomeConstantType, SomeProperty) + SomeWeakRef, SomeByteArray, SomeConstantType, SomeProperty, AnnotatorError) from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef @@ -225,7 +225,8 @@ x = int(x) result = SomeInteger(nonneg = x>=0) else: - raise Exception("seeing a prebuilt long (value %s)" % hex(x)) + # XXX: better error reporting? + raise ValueError("seeing a prebuilt long (value %s)" % hex(x)) elif issubclass(tp, str): # py.lib uses annotated str subclasses no_nul = not '\x00' in x if len(x) == 1: diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdef.py --- a/rpython/annotator/classdef.py +++ b/rpython/annotator/classdef.py @@ -112,14 +112,10 @@ for desc in s_newvalue.descriptions: if desc.selfclassdef is None: if homedef.classdesc.settled: - raise Exception("demoting method %s " - "to settled class %s not " - "allowed" % - (self.name, homedef) - ) - #self.bookkeeper.warning("demoting method %s " - # "to base class %s" % - # (self.name, homedef)) + raise AnnotatorError( + "demoting method %s to settled class " + "%s not allowed" % (self.name, homedef) + ) break # check for attributes forbidden by slots or _attrs_ diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -801,8 +801,9 @@ s_init = basedesc.s_read_attribute('__init__') parent_has_init = isinstance(s_init, SomePBC) if has_init and not parent_has_init: - raise Exception("some subclasses among %r declare __init__()," - " but not the common parent class" % (descs,)) + raise AnnotatorError( + "some subclasses among %r declare __init__()," + " but not the common parent class" % (descs,)) # make a PBC of MethodDescs, one for the __init__ of each class initdescs = [] for desc, classdef in zip(descs, classdefs): diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4,10 +4,12 @@ from rpython.conftest import option from rpython.annotator import model as annmodel +from rpython.annotator.model import AnnotatorError, UnionError from rpython.annotator.annrpython import RPythonAnnotator as _RPythonAnnotator +from rpython.annotator.classdef import NoSuchAttrError from rpython.translator.translator import graphof as tgraphof from rpython.annotator.policy import AnnotatorPolicy -from rpython.annotator.signature import Sig +from rpython.annotator.signature import Sig, SignatureError from rpython.annotator.listdef import ListDef, ListChangeUnallowed from rpython.annotator.dictdef import DictDef from rpython.flowspace.model import * @@ -213,7 +215,7 @@ def f(): return X().meth() a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) def test_methodcall1(self): a = self.RPythonAnnotator() @@ -360,7 +362,7 @@ def f(l): return g(*l) a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError): + with py.test.raises(AnnotatorError): a.build_types(f, [[int]]) def test_star_unpack_and_keywords(self): @@ -769,7 +771,8 @@ def f(): return x a = self.RPythonAnnotator(policy=AnnotatorPolicy()) - py.test.raises(Exception, a.build_types, f, []) + with py.test.raises(Exception): + a.build_types(f, []) def test_exception_deduction_with_raise1(self): a = self.RPythonAnnotator() @@ -959,14 +962,16 @@ def f(): return large_constant a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, []) + with py.test.raises(ValueError): + a.build_types(f, []) # if you want to get a r_uint, you have to be explicit about it def test_add_different_ints(self): def f(a, b): return a + b a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, [r_uint, int]) + with py.test.raises(UnionError): + a.build_types(f, [r_uint, int]) def test_merge_different_ints(self): def f(a, b): @@ -976,7 +981,8 @@ c = b return c a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, [r_uint, int]) + with py.test.raises(UnionError): + a.build_types(f, [r_uint, int]) def test_merge_ruint_zero(self): def f(a): @@ -2612,14 +2618,14 @@ def f(): return A() a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) # class B(object): pass x = B() def g(): return isinstance(x, A) - py.test.raises(annmodel.AnnotatorError, a.build_types, g, []) + py.test.raises(AnnotatorError, a.build_types, g, []) def test_import_from_mixin(self): class M(object): @@ -2694,7 +2700,8 @@ return a.x # should explode here a = self.RPythonAnnotator() - e = py.test.raises(Exception, a.build_types, f, [int]) + with py.test.raises(NoSuchAttrError) as excinfo: + a.build_types(f, [int]) # this should explode on reading the attribute 'a.x', but it can # sometimes explode on 'self.x = x', which does not make much sense. # But it looks hard to fix in general: we don't know yet during 'a.x' @@ -2928,7 +2935,8 @@ s = a.build_types(fun, [s_nonneg, s_nonneg]) assert isinstance(s, annmodel.SomeInteger) assert not s.nonneg - py.test.raises(Exception, a.build_types, fun, [int, int]) + with py.test.raises(SignatureError): + a.build_types(fun, [int, int]) def test_sig_simpler(self): def fun(x, y): @@ -2940,7 +2948,8 @@ s = a.build_types(fun, [s_nonneg, s_nonneg]) assert isinstance(s, annmodel.SomeInteger) assert not s.nonneg - py.test.raises(Exception, a.build_types, fun, [int, int]) + with py.test.raises(SignatureError): + a.build_types(fun, [int, int]) def test_sig_lambda(self): def fun(x, y): @@ -2954,7 +2963,8 @@ s = a.build_types(fun, [int, s_nonneg]) assert isinstance(s, annmodel.SomeInteger) assert not s.nonneg - py.test.raises(Exception, a.build_types, fun, [s_nonneg, int]) + with py.test.raises(SignatureError): + a.build_types(fun, [s_nonneg, int]) def test_sig_bug(self): def g(x, y=5): @@ -3004,8 +3014,8 @@ if works: a.build_types(fun, [int]) else: - from rpython.annotator.classdef import NoSuchAttrError - py.test.raises(NoSuchAttrError, a.build_types, fun, [int]) + with py.test.raises(NoSuchAttrError): + a.build_types(fun, [int]) def test_slots_enforce_attrs(self): class Superbase(object): @@ -3138,7 +3148,8 @@ return a.n() a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, fun, [bool]) + with py.test.raises(AnnotatorError): + a.build_types(fun, [bool]) def test_float_cmp(self): def fun(x, y): @@ -3227,6 +3238,7 @@ assert isinstance(s.items[2], annmodel.SomeInstance) assert s.items[2].flags == {} + @py.test.mark.xfail def test_no_access_directly_on_heap(self): from rpython.rlib.jit import hint @@ -3243,7 +3255,8 @@ i.x = x a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, []) + with py.test.raises(AnnotatorError): + a.build_types(f, []) class M: @@ -3267,7 +3280,7 @@ c.m.l.append(x) a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) def f(): x = A() @@ -3275,7 +3288,7 @@ c.m.d[None] = x a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) def f(): x = A() @@ -3283,7 +3296,7 @@ c.m.d[x] = None a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + py.test.raises(AnnotatorError, a.build_types, f, []) def test_ctr_location(self): class A: @@ -3342,7 +3355,8 @@ if g(x, y): g(x, r_uint(y)) a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, f, [int, int]) + with py.test.raises(UnionError): + a.build_types(f, [int, int]) def test_compare_with_zero(self): def g(): @@ -3464,22 +3478,22 @@ return '%s' % unichr(x) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + py.test.raises(AnnotatorError, a.build_types, f, [int]) def f(x): return '%s' % (unichr(x) * 3) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + py.test.raises(AnnotatorError, a.build_types, f, [int]) def f(x): return '%s%s' % (1, unichr(x)) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + py.test.raises(AnnotatorError, a.build_types, f, [int]) def f(x): return '%s%s' % (1, unichr(x) * 15) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, a.build_types, f, [int]) + py.test.raises(AnnotatorError, a.build_types, f, [int]) def test_strformatting_tuple(self): @@ -3517,7 +3531,7 @@ return [1, 2, 3][s:e] a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") + py.test.raises(AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) def f(x): @@ -3530,20 +3544,20 @@ return "xyz".find("x", s, e) a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") + py.test.raises(AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) def f(s, e): return "xyz".rfind("x", s, e) - py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") + py.test.raises(AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) def f(s, e): return "xyz".count("x", s, e) - py.test.raises(annmodel.AnnotatorError, "a.build_types(f, [int, int])") + py.test.raises(AnnotatorError, "a.build_types(f, [int, int])") a.build_types(f, [annmodel.SomeInteger(nonneg=True), annmodel.SomeInteger(nonneg=True)]) @@ -3717,7 +3731,8 @@ raise Exception(lle) # ^^^ instead, must cast back from a base ptr to an instance a = self.RPythonAnnotator() - py.test.raises(AssertionError, a.build_types, f, []) + with py.test.raises(AssertionError): + a.build_types(f, []) def test_enumerate(self): def f(): @@ -4102,7 +4117,8 @@ e = cls() e.foo = "bar" a = self.RPythonAnnotator() - py.test.raises(Exception, a.build_types, fn, []) + with py.test.raises(NoSuchAttrError): + a.build_types(fn, []) def test_lower_char(self): def fn(c): @@ -4214,7 +4230,7 @@ return "bbb" a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) the_exc = exc.value @@ -4230,7 +4246,7 @@ return (1, 2) a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) assert "RPython cannot unify tuples of different length: 2 versus 1" in exc.value.msg @@ -4243,7 +4259,7 @@ return -1 a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) assert ("RPython cannot prove that these integers are of the " @@ -4260,7 +4276,7 @@ return B() a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) assert ("RPython cannot unify instances with no common base class" @@ -4276,7 +4292,7 @@ return d.itervalues() a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f, [int]) assert ("RPython cannot unify incompatible iterator variants" in @@ -4288,7 +4304,7 @@ a = A() return getattr(a, y) a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError) as exc: + with py.test.raises(AnnotatorError) as exc: a.build_types(f, [str]) assert ("variable argument to getattr" in exc.value.msg) @@ -4296,7 +4312,7 @@ def f(x): return x() a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError) as exc: + with py.test.raises(AnnotatorError) as exc: a.build_types(f, [str]) assert ("Cannot prove that the object is callable" in exc.value.msg) @@ -4305,7 +4321,7 @@ def f(x): l.append(x) a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as excinfo: + with py.test.raises(UnionError) as excinfo: a.build_types(f, [int]) assert 'Happened at file' in excinfo.value.source assert 'Known variable annotations:' in excinfo.value.source @@ -4314,7 +4330,7 @@ def f(s, x): return s.format(x) a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError) as exc: + with py.test.raises(AnnotatorError) as exc: a.build_types(f, [str, str]) assert ("format() is not RPython" in exc.value.msg) @@ -4350,7 +4366,7 @@ def f(x): a, b = x a = self.RPythonAnnotator() - py.test.raises(annmodel.AnnotatorError, + py.test.raises(AnnotatorError, a.build_types, f, [annmodel.s_None]) def test_class___name__(self): @@ -4464,10 +4480,10 @@ o = O2(n) o.x = 20 a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f1, [int]) a = self.RPythonAnnotator() - with py.test.raises(annmodel.UnionError) as exc: + with py.test.raises(UnionError) as exc: a.build_types(f2, [int]) def test_property_union_2(self): @@ -4496,7 +4512,7 @@ a = self.RPythonAnnotator() # Ideally, this should translate to something sensible, # but for now, AnnotatorError is better than silently mistranslating. - with py.test.raises(annmodel.AnnotatorError): + with py.test.raises(AnnotatorError): a.build_types(f, [int]) def test_property_union_3(self): @@ -4516,7 +4532,7 @@ obj = B() return obj.x a = self.RPythonAnnotator() - with py.test.raises(annmodel.AnnotatorError): + with py.test.raises(AnnotatorError): a.build_types(f, [int]) def test_dict_can_be_none_ordering_issue(self): diff --git a/rpython/annotator/test/test_annsimplifyrpython.py b/rpython/annotator/test/test_annsimplifyrpython.py --- a/rpython/annotator/test/test_annsimplifyrpython.py +++ b/rpython/annotator/test/test_annsimplifyrpython.py @@ -3,6 +3,7 @@ from rpython.annotator.test.test_annrpython import graphof from rpython.annotator.test.test_annrpython import TestAnnotateTestCase as parent +from rpython.annotator.model import AnnotatorError class TestAnnotateAndSimplifyTestCase(parent): @@ -132,5 +133,5 @@ cls = C return cls().foo a = self.RPythonAnnotator() - with py.test.raises(Exception): + with py.test.raises(AnnotatorError): a.build_types(f, [int]) diff --git a/rpython/rlib/rdynload.py b/rpython/rlib/rdynload.py --- a/rpython/rlib/rdynload.py +++ b/rpython/rlib/rdynload.py @@ -2,13 +2,13 @@ """ from rpython.rtyper.tool import rffi_platform -from rpython.rtyper.lltypesystem import rffi +from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import r_uint from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.platform import platform -import sys +import sys, os, string # maaaybe isinstance here would be better. Think _MSVC = platform.name == "msvc" @@ -103,6 +103,48 @@ return ("opening %r with ctypes.CDLL() works, " "but not with c_dlopen()??" % (name,)) + def _retry_as_ldscript(err, mode): + """ ld scripts are fairly straightforward to parse (the library we want + is in a form like 'GROUP ( '. A simple state machine + can parse that out (avoids regexes).""" + + parts = err.split(":") + if len(parts) != 2: + return lltype.nullptr(rffi.VOIDP.TO) + fullpath = parts[0] + actual = "" + last_five = " " + state = 0 + ldscript = os.open(fullpath, os.O_RDONLY, 0777) + c = os.read(ldscript, 1) + while c != "": + if state == 0: + last_five += c + last_five = last_five[1:6] + if last_five == "GROUP": + state = 1 + elif state == 1: + if c == "(": + state = 2 + elif state == 2: + if c not in string.whitespace: + actual += c + state = 3 + elif state == 3: + if c in string.whitespace or c == ")": + break + else: + actual += c + c = os.read(ldscript, 1) + os.close(ldscript) + if actual != "": + a = rffi.str2charp(actual) + lib = c_dlopen(a, rffi.cast(rffi.INT, mode)) + rffi.free_charp(a) + return lib + else: + return lltype.nullptr(rffi.VOIDP.TO) + def dlopen(name, mode=-1): """ Wrapper around C-level dlopen """ @@ -119,7 +161,17 @@ err = _dlerror_on_dlopen_untranslated(name) else: err = dlerror() - raise DLOpenError(err) + if platform.name == "linux" and 'invalid ELF header' in err: + # some linux distros put ld linker scripts in .so files + # to load libraries more dynamically. The error contains the + # full path to something that is probably a script to load + # the library we want. + res = _retry_as_ldscript(err, mode) + if not res: + raise DLOpenError(err) + return res + else: + raise DLOpenError(err) return res dlclose = c_dlclose diff --git a/rpython/rlib/rstrategies/rstrategies.py b/rpython/rlib/rstrategies/rstrategies.py --- a/rpython/rlib/rstrategies/rstrategies.py +++ b/rpython/rlib/rstrategies/rstrategies.py @@ -41,7 +41,7 @@ attrs['get_storage'] = get_storage attrs['set_storage'] = set_storage return type.__new__(self, name, bases, attrs) - + def strategy(generalize=None, singleton=True): """ Strategy classes must be decorated with this. @@ -71,19 +71,19 @@ class StrategyFactory(object): _immutable_fields_ = ["strategies[*]", "logger", "strategy_singleton_field"] factory_instance_counter = 0 - + def __init__(self, root_class, all_strategy_classes=None): if all_strategy_classes is None: all_strategy_classes = self._collect_subclasses(root_class) self.strategies = [] self.logger = logger.Logger() - + # This is to avoid confusion between multiple factories existing simultaneously (e.g. in tests) self.strategy_singleton_field = "__singleton_%i" % StrategyFactory.factory_instance_counter StrategyFactory.factory_instance_counter += 1 - + self._create_strategy_instances(root_class, all_strategy_classes) - + def _create_strategy_instances(self, root_class, all_strategy_classes): for strategy_class in all_strategy_classes: if strategy_class._is_strategy: @@ -91,11 +91,11 @@ self.strategies.append(strategy_class) self._patch_strategy_class(strategy_class, root_class) self._order_strategies() - + # ============================= # API methods # ============================= - + def switch_strategy(self, w_self, new_strategy_type, new_element=None): """ Switch the strategy of w_self to the new type. @@ -113,7 +113,7 @@ new_strategy.strategy_switched(w_self) self.log(w_self, new_strategy, old_strategy, new_element) return new_strategy - + def set_initial_strategy(self, w_self, strategy_type, size, elements=None): """ Initialize the strategy and storage fields of w_self. @@ -135,7 +135,7 @@ strategy.strategy_switched(w_self) self.log(w_self, strategy, None, element) return strategy - + @jit.unroll_safe def strategy_type_for(self, objects): """ @@ -153,8 +153,8 @@ for i, strategy_type in enumerate(self.strategies): if can_handle[i]: return strategy_type - raise Exception("Could not find strategy to handle: %s" % objects) - + raise ValueError("Could not find strategy to handle: %s" % objects) + def decorate_strategies(self, transitions): """ As an alternative to decorating all strategies with @strategy, @@ -165,11 +165,11 @@ "NOT_RPYTHON" for strategy_class, generalized in transitions.items(): strategy(generalized)(strategy_class) - + # ============================= # The following methods can be overwritten to customize certain aspects of the factory. # ============================= - + def instantiate_strategy(self, strategy_type, w_self=None, initial_size=0): """ Return a functional instance of strategy_type. @@ -177,7 +177,7 @@ The two additional parameters should be ignored for singleton-strategies. """ return strategy_type() - + def log(self, w_self, new_strategy, old_strategy=None, new_element=None): """ This can be overwritten into a more appropriate call to self.logger.log @@ -190,7 +190,7 @@ typename = "" cause = "Switched" if old_strategy else "Created" self.logger.log(new_strategy_str, size, cause, old_strategy_str, typename, element_typename) - + @specialize.call_location() def log_string_for_object(self, obj): """ @@ -198,8 +198,8 @@ Keep the specialize-annotation in order to handle different kinds of objects here. """ return obj.__class__.__name__ if obj else "" - - # These storage accessors are specialized because the storage field is + + # These storage accessors are specialized because the storage field is # populated by erased-objects which seem to be incompatible sometimes. @specialize.call_location() def get_storage(self, obj): @@ -207,16 +207,16 @@ @specialize.call_location() def set_storage(self, obj, val): return obj._set_storage(val) - + def get_strategy(self, obj): return obj._get_strategy() def set_strategy(self, obj, val): return obj._set_strategy(val) - + # ============================= # Internal methods # ============================= - + def _patch_strategy_class(self, strategy_class, root_class): "NOT_RPYTHON" # Patch root class: Add default handler for visitor @@ -225,12 +225,12 @@ funcname = "_convert_storage_from_" + strategy_class.__name__ _convert_storage_from_OTHER.func_name = funcname setattr(root_class, funcname, _convert_storage_from_OTHER) - + # Patch strategy class: Add polymorphic visitor function def _convert_storage_to(self, w_self, new_strategy): getattr(new_strategy, funcname)(w_self, self) strategy_class._convert_storage_to = _convert_storage_to - + def _collect_subclasses(self, cls): "NOT_RPYTHON" subclasses = [] @@ -238,7 +238,7 @@ subclasses.append(subcls) subclasses.extend(self._collect_subclasses(subcls)) return subclasses - + def _order_strategies(self): "NOT_RPYTHON" def get_generalization_depth(strategy, visited=None): @@ -256,11 +256,11 @@ else: return 0 self.strategies.sort(key=get_generalization_depth, reverse=True) - + @jit.elidable def strategy_singleton_instance(self, strategy_class): return getattr(strategy_class, self.strategy_singleton_field) - + def _freeze_(self): # Instance will be frozen at compile time, making accesses constant. # The constructor does meta stuff which is not possible after translation. @@ -271,65 +271,65 @@ == Required: strategy_factory(self) - Access to StorageFactory """ - + def strategy_switched(self, w_self): # Overwrite this method for a hook whenever the strategy # of w_self was switched to self. pass - + # Main Fixedsize API - + def store(self, w_self, index0, value): raise NotImplementedError("Abstract method") - + def fetch(self, w_self, index0): raise NotImplementedError("Abstract method") - + def size(self, w_self): raise NotImplementedError("Abstract method") - + # Fixedsize utility methods - + def slice(self, w_self, start, end): return [ self.fetch(w_self, i) for i in range(start, end)] - + def fetch_all(self, w_self): return self.slice(w_self, 0, self.size(w_self)) - + def store_all(self, w_self, elements): for i, e in enumerate(elements): self.store(w_self, i, e) - + # Main Varsize API - + def insert(self, w_self, index0, list_w): raise NotImplementedError("Abstract method") - + def delete(self, w_self, start, end): raise NotImplementedError("Abstract method") - + # Varsize utility methods - + def append(self, w_self, list_w): - self.insert(w_self, self.size(w_self), list_w) - + self.insert(w_self, self.size(w_self), list_w) + def pop(self, w_self, index0): e = self.fetch(w_self, index0) self.delete(w_self, index0, index0+1) return e # Internal methods - + def _initialize_storage(self, w_self, initial_size): raise NotImplementedError("Abstract method") - + def _check_can_handle(self, value): raise NotImplementedError("Abstract method") - + def _convert_storage_to(self, w_self, new_strategy): # This will be overwritten in _patch_strategy_class new_strategy._convert_storage_from(w_self, self) - + @jit.unroll_safe def _convert_storage_from(self, w_self, previous_strategy): # This is a very unefficient (but most generic) way to do this. @@ -338,16 +338,16 @@ self._initialize_storage(w_self, previous_strategy.size(w_self)) for i, field in enumerate(storage): self.store(w_self, i, field) - + def _generalize_for_value(self, w_self, value): strategy_type = self.generalized_strategy_for(value) new_instance = self.strategy_factory().switch_strategy(w_self, strategy_type, new_element=value) return new_instance - + def _cannot_handle_store(self, w_self, index0, value): new_instance = self._generalize_for_value(w_self, value) new_instance.store(w_self, index0, value) - + def _cannot_handle_insert(self, w_self, index0, list_w): # TODO - optimize. Prevent multiple generalizations and slicing done by callers. new_strategy = self._generalize_for_value(w_self, list_w[0]) @@ -358,7 +358,7 @@ class EmptyStrategy(AbstractStrategy): # == Required: # See AbstractStrategy - + def _initialize_storage(self, w_self, initial_size): assert initial_size == 0 self.set_storage(w_self, None) @@ -366,7 +366,7 @@ self.set_storage(w_self, None) def _check_can_handle(self, value): return False - + def fetch(self, w_self, index0): raise IndexError def store(self, w_self, index0, value): @@ -389,7 +389,7 @@ # See AbstractStrategy # check_index_*(...) - use mixin SafeIndexingMixin or UnsafeIndexingMixin # value(self) - the single value contained in this strategy. Should be constant. - + def _initialize_storage(self, w_self, initial_size): storage_obj = SingleValueStrategyStorage(initial_size) self.set_storage(w_self, storage_obj) @@ -397,7 +397,7 @@ self._initialize_storage(w_self, previous_strategy.size(w_self)) def _check_can_handle(self, value): return value is self.value() - + def fetch(self, w_self, index0): self.check_index_fetch(w_self, index0) return self.value() @@ -411,7 +411,7 @@ self.get_storage(w_self).size -= (end - start) def size(self, w_self): return self.get_storage(w_self).size - + @jit.unroll_safe def insert(self, w_self, index0, list_w): storage_obj = self.get_storage(w_self) @@ -429,18 +429,18 @@ # See AbstractStrategy # check_index_*(...) - use mixin SafeIndexingMixin or UnsafeIndexingMixin # default_value(self) - The value to be initially contained in this strategy - + def _initialize_storage(self, w_self, initial_size): default = self._unwrap(self.default_value()) self.set_storage(w_self, [default] * initial_size) - + @jit.unroll_safe def _convert_storage_from(self, w_self, previous_strategy): size = previous_strategy.size(w_self) new_storage = [ self._unwrap(previous_strategy.fetch(w_self, i)) for i in range(size) ] self.set_storage(w_self, new_storage) - + def store(self, w_self, index0, wrapped_value): self.check_index_store(w_self, index0) if self._check_can_handle(wrapped_value): @@ -448,21 +448,21 @@ self.get_storage(w_self)[index0] = unwrapped else: self._cannot_handle_store(w_self, index0, wrapped_value) - + def fetch(self, w_self, index0): self.check_index_fetch(w_self, index0) unwrapped = self.get_storage(w_self)[index0] return self._wrap(unwrapped) - + def _wrap(self, value): raise NotImplementedError("Abstract method") - + def _unwrap(self, value): raise NotImplementedError("Abstract method") - + def size(self, w_self): return len(self.get_storage(w_self)) - + @jit.unroll_safe def insert(self, w_self, start, list_w): # This is following Python's behaviour - insert automatically @@ -475,27 +475,27 @@ else: self._cannot_handle_insert(w_self, start + i, list_w[i:]) return - + def delete(self, w_self, start, end): self.check_index_range(w_self, start, end) assert start >= 0 and end >= 0 del self.get_storage(w_self)[start : end] - + class GenericStrategy(StrategyWithStorage): # == Required: # See StrategyWithStorage - + def _wrap(self, value): return value def _unwrap(self, value): return value def _check_can_handle(self, wrapped_value): return True - + class WeakGenericStrategy(StrategyWithStorage): # == Required: # See StrategyWithStorage - + def _wrap(self, value): return value() or self.default_value() def _unwrap(self, value): @@ -503,7 +503,7 @@ return weakref.ref(value) def _check_can_handle(self, wrapped_value): return True - + # ============== Mixins for index checking operations ============== class SafeIndexingMixin(object): @@ -535,37 +535,37 @@ # See StrategyWithStorage # wrap(self, value) - Return a boxed object for the primitive value # unwrap(self, value) - Return the unboxed primitive value of value - + def _unwrap(self, value): return self.unwrap(value) def _wrap(self, value): return self.wrap(value) - + class SingleTypeStrategy(SpecializedStrategy): # == Required Functions: # See SpecializedStrategy # contained_type - The wrapped type that can be stored in this strategy - + def _check_can_handle(self, value): return isinstance(value, self.contained_type) - + class TaggingStrategy(SingleTypeStrategy): """This strategy uses a special tag value to represent a single additional object.""" # == Required: # See SingleTypeStrategy # wrapped_tagged_value(self) - The tagged object # unwrapped_tagged_value(self) - The unwrapped tag value representing the tagged object - + def _check_can_handle(self, value): return value is self.wrapped_tagged_value() or \ (isinstance(value, self.contained_type) and \ self.unwrap(value) != self.unwrapped_tagged_value()) - + def _unwrap(self, value): if value is self.wrapped_tagged_value(): return self.unwrapped_tagged_value() return self.unwrap(value) - + def _wrap(self, value): if value == self.unwrapped_tagged_value(): return self.wrapped_tagged_value() diff --git a/rpython/rlib/rstrategies/test/test_rstrategies.py b/rpython/rlib/rstrategies/test/test_rstrategies.py --- a/rpython/rlib/rstrategies/test/test_rstrategies.py +++ b/rpython/rlib/rstrategies/test/test_rstrategies.py @@ -69,7 +69,7 @@ class Factory(rs.StrategyFactory): switching_log = [] - + def __init__(self, root_class): self.decorate_strategies({ EmptyStrategy: [NilStrategy, IntegerStrategy, IntegerOrNilStrategy, GenericStrategy], @@ -79,15 +79,15 @@ IntegerOrNilStrategy: [GenericStrategy], }) rs.StrategyFactory.__init__(self, root_class) - + def instantiate_strategy(self, strategy_type, w_self=None, size=0): return strategy_type(self, w_self, size) - - def set_strategy(self, w_list, strategy): + + def set_strategy(self, w_list, strategy): old_strategy = self.get_strategy(w_list) self.switching_log.append((old_strategy, strategy)) super(Factory, self).set_strategy(w_list, strategy) - + def clear_log(self): del self.switching_log[:] @@ -107,7 +107,7 @@ class WeakGenericStrategy(AbstractStrategy): import_from_mixin(rs.WeakGenericStrategy) def default_value(self): return w_nil - + class IntegerStrategy(AbstractStrategy): import_from_mixin(rs.SingleTypeStrategy) contained_type = W_Integer @@ -123,7 +123,7 @@ def default_value(self): return w_nil def wrapped_tagged_value(self): return w_nil def unwrapped_tagged_value(self): import sys; return sys.maxint - + @rs.strategy(generalize=[], singleton=False) class NonSingletonStrategy(GenericStrategy): def __init__(self, factory, w_list=None, size=0): @@ -214,22 +214,22 @@ py.test.raises(IndexError, s.fetch, l, 10) py.test.raises(IndexError, s.delete, l, 0, 1) py.test.raises(AssertionError, W_List, EmptyStrategy, 2) # Only size 0 possible. - + def test_init_Nil(): do_test_initialization(NilStrategy) def test_init_Generic(): do_test_initialization(GenericStrategy, is_safe=False) - + def test_init_WeakGeneric(): do_test_initialization(WeakGenericStrategy) - + def test_init_Integer(): do_test_initialization(IntegerStrategy, default_value=W_Integer(0)) - + def test_init_IntegerOrNil(): do_test_initialization(IntegerOrNilStrategy) - + # === Test Simple store def do_test_store(cls, stored_value=W_Object(), is_safe=True, is_varsize=False): @@ -256,13 +256,13 @@ def test_store_Generic(): do_test_store(GenericStrategy, is_safe=False) - + def test_store_WeakGeneric(): do_test_store(WeakGenericStrategy, stored_value=w_nil) - + def test_store_Integer(): do_test_store(IntegerStrategy, stored_value=W_Integer(100)) - + def test_store_IntegerOrNil(): do_test_store(IntegerOrNilStrategy, stored_value=W_Integer(100)) do_test_store(IntegerOrNilStrategy, stored_value=w_nil) @@ -289,17 +289,17 @@ def test_insert_Generic(): do_test_insert(GenericStrategy, [W_Object() for _ in range(6)]) - + def test_insert_WeakGeneric(): do_test_insert(WeakGenericStrategy, [W_Object() for _ in range(6)]) - + def test_insert_Integer(): do_test_insert(IntegerStrategy, [W_Integer(x) for x in range(6)]) - + def test_insert_IntegerOrNil(): do_test_insert(IntegerOrNilStrategy, [w_nil]+[W_Integer(x) for x in range(4)]+[w_nil]) do_test_insert(IntegerOrNilStrategy, [w_nil]*6) - + # === Test Delete def do_test_delete(cls, values, indexing_unsafe=False): @@ -319,13 +319,13 @@ def test_delete_Generic(): do_test_delete(GenericStrategy, [W_Object() for _ in range(6)], indexing_unsafe=True) - + def test_delete_WeakGeneric(): do_test_delete(WeakGenericStrategy, [W_Object() for _ in range(6)]) - + def test_delete_Integer(): do_test_delete(IntegerStrategy, [W_Integer(x) for x in range(6)]) - + def test_delete_IntegerOrNil(): do_test_delete(IntegerOrNilStrategy, [w_nil]+[W_Integer(x) for x in range(4)]+[w_nil]) do_test_delete(IntegerOrNilStrategy, [w_nil]*6) @@ -342,7 +342,7 @@ obj = W_Object() i = W_Integer(0) nil = w_nil - + assert_handles(EmptyStrategy, [], [nil, obj, i]) assert_handles(NilStrategy, [nil], [obj, i]) assert_handles(GenericStrategy, [nil, obj, i], []) @@ -392,7 +392,7 @@ o = W_Object() l = do_test_insert(NilStrategy, [w_nil, w_nil, o, o, w_nil, w_nil]) assert isinstance(l.strategy, GenericStrategy) - + def test_transition_to_nonSingleton(): l = W_List(NilStrategy, 5) factory.switch_strategy(l, NonSingletonStrategy) @@ -467,12 +467,12 @@ v3 = [W_Object() for _ in range(l.size()) ] assert v2 != v assert v3 != v - + l.store_all(v2) assert l.fetch_all() == v2+v[4:] l.store_all(v3) assert l.fetch_all() == v3 - + py.test.raises(IndexError, l.store_all, [W_Object() for _ in range(8) ]) # === Test Weak Strategy @@ -488,7 +488,7 @@ assert False, "The default convert_storage_from() should not be called!" def convert_storage_from_special(self, w_self, other): s.copied += 1 - + monkeypatch.setattr(AbstractStrategy, "_convert_storage_from_NilStrategy", convert_storage_from_special) monkeypatch.setattr(AbstractStrategy, "_convert_storage_from", convert_storage_from_default) try: @@ -507,7 +507,8 @@ assert factory.strategy_type_for([]) == EmptyStrategy monkeypatch.setattr(GenericStrategy, '_check_can_handle', lambda self, o: False) try: - py.test.raises(Exception, factory.strategy_type_for, [W_Object(), W_Object()]) + with py.test.raises(ValueError): + factory.strategy_type_for([W_Object(), W_Object()]) finally: monkeypatch.undo() @@ -549,4 +550,3 @@ 'Created (EmptyStrategy) size 0 objects 1', 'Created (IntegerStrategy) size 3 objects 1', 'Switched (IntegerStrategy -> IntegerOrNilStrategy) size 3 objects 1 elements: W_Object'] - \ No newline at end of file diff --git a/rpython/rlib/rweakref.py b/rpython/rlib/rweakref.py --- a/rpython/rlib/rweakref.py +++ b/rpython/rlib/rweakref.py @@ -2,10 +2,10 @@ Weakref support in RPython. Basic regular weakrefs without callbacks are supported. This file contains the following additions: a form of WeakKeyDictionary, and a limited version of WeakValueDictionary. -LLType only for now! """ import weakref +from rpython.annotator.model import UnionError ref = weakref.ref # basic regular weakrefs are supported in RPython @@ -191,9 +191,9 @@ class __extend__(pairtype(SomeWeakKeyDict, SomeWeakKeyDict)): def union((s_wkd1, s_wkd2)): if s_wkd1.keyclassdef is not s_wkd2.keyclassdef: - raise UnionError(w_wkd1, s_wkd2, "not the same key class!") + raise UnionError(s_wkd1, s_wkd2, "not the same key class!") if s_wkd1.valueclassdef is not s_wkd2.valueclassdef: - raise UnionError(w_wkd1, s_wkd2, "not the same value class!") + raise UnionError(s_wkd1, s_wkd2, "not the same value class!") return SomeWeakKeyDict(s_wkd1.keyclassdef, s_wkd1.valueclassdef) class Entry(extregistry.ExtRegistryEntry): diff --git a/rpython/rlib/test/ldscript_broken1.so b/rpython/rlib/test/ldscript_broken1.so new file mode 100644 --- /dev/null +++ b/rpython/rlib/test/ldscript_broken1.so @@ -0,0 +1,4 @@ +/* GNU ld script +*/ +OUTPUT_FORMAT(elf64-x86-64) +GROUP libc.so.6 ) diff --git a/rpython/rlib/test/ldscript_broken2.so b/rpython/rlib/test/ldscript_broken2.so new file mode 100644 --- /dev/null +++ b/rpython/rlib/test/ldscript_broken2.so @@ -0,0 +1,4 @@ +/* GNU ld script +*/ +OUTPUT_FORMAT(elf64-x86-64) +libc.so.6 diff --git a/rpython/rlib/test/ldscript_working1.so b/rpython/rlib/test/ldscript_working1.so new file mode 100644 --- /dev/null +++ b/rpython/rlib/test/ldscript_working1.so @@ -0,0 +1,4 @@ +/* GNU ld script +*/ +OUTPUT_FORMAT(elf64-x86-64) +GROUP ( libc.so.6 ) diff --git a/rpython/rlib/test/ldscript_working2.so b/rpython/rlib/test/ldscript_working2.so new file mode 100644 --- /dev/null +++ b/rpython/rlib/test/ldscript_working2.so @@ -0,0 +1,4 @@ +/* GNU ld script +*/ +OUTPUT_FORMAT(elf64-x86-64) +GROUP(libc.so.6) diff --git a/rpython/rlib/test/test_rdynload.py b/rpython/rlib/test/test_rdynload.py --- a/rpython/rlib/test/test_rdynload.py +++ b/rpython/rlib/test/test_rdynload.py @@ -1,6 +1,7 @@ from rpython.rlib.rdynload import * from rpython.rlib.clibffi import get_libc_name from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.translator.platform import platform import py class TestDLOperations: @@ -21,3 +22,28 @@ lltype.Signed)), dlsym(lib, 'abs')) assert 1 == handle(1) assert 1 == handle(-1) + + def test_ldscripts(self): + # this test only makes sense on linux + if platform.name != "linux": + return + + fname = os.path.join(os.path.dirname(__file__), "ldscript_working1.so") + s = rffi.str2charp(fname) + assert "C object" in str(dlopen(s)) + rffi.free_charp(s) + + fname = os.path.join(os.path.dirname(__file__), "ldscript_working2.so") + s = rffi.str2charp(fname) + assert "C object" in str(dlopen(s)) + rffi.free_charp(s) + + fname = os.path.join(os.path.dirname(__file__), "ldscript_broken1.so") + s = rffi.str2charp(fname) + py.test.raises(DLOpenError, 'dlopen(s)') + rffi.free_charp(s) + + fname = os.path.join(os.path.dirname(__file__), "ldscript_broken2.so") From noreply at buildbot.pypy.org Thu Oct 15 11:31:17 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 15 Oct 2015 11:31:17 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: index base displace parameter implemented Message-ID: <20151015093117.488221C0165@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80230:99988dacdae0 Date: 2015-10-15 11:31 +0200 http://bitbucket.org/pypy/pypy/changeset/99988dacdae0/ Log: index base displace parameter implemented diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -13,7 +13,6 @@ _nowrapper=True, sandboxsafe=True) - def binary_helper_call(name): function = getattr(support, 'arm_%s' % name) @@ -29,32 +28,47 @@ def build_rr(mnemonic, args): opcode = args[0] - assert isinstance(opcode, str) def encode_rr(self, reg1, reg2): self.writechar(opcode) - operands = ((reg2 & 0x0f) << 4) | (reg1 & 0xf) + operands = ((reg1 & 0x0f) << 4) | (reg2 & 0xf) self.writechar(chr(operands)) return encode_rr def build_rre(mnemonic, args): opcode1,opcode2 = args[0] - assert isinstance(opcode1, str) - assert isinstance(opcode2, str) def encode_rr(self, reg1, reg2): self.writechar(opcode1) self.writechar(opcode2) self.writechar('\x00') #self.writechar('\x00') - operands = ((reg2 & 0x0f) << 4) | (reg1 & 0xf) + operands = ((reg1 & 0x0f) << 4) | (reg2 & 0xf) self.writechar(chr(operands)) return encode_rr -_mnemonic_codes = { - 'AR': (build_rr, ['\x1A']), - 'AGR': (build_rre, ['\xB9\x08']) -} +def build_rx(mnemonic, args): + opcode = args[0] + def encode_rx(self, reg_or_mask, idxbasedisp): + self.writechar(opcode) + index = idxbasedisp.index + byte = (reg_or_mask & 0x0f) << 4 | index & 0xf + self.writechar(chr(byte)) + displace = idxbasedisp.displace & 0x3ff + base = idxbasedisp.base & 0xf + byte = displace >> 8 & 0xf | base << 4 + self.writechar(chr(byte)) + self.writechar(chr(displace & 0xff)) + + return encode_rx + def build_instr_codes(clazz): + _mnemonic_codes = { + 'AR': (build_rr, ['\x1A']), + 'AGR': (build_rre, ['\xB9\x08']), + 'AGFR': (build_rre, ['\xB9\x18']), + 'A': (build_rx, ['\x5A']), + } + for mnemonic, (builder, args) in _mnemonic_codes.items(): func = builder(mnemonic, args) name = mnemonic + "_" + builder.__name__.split("_")[1] diff --git a/rpython/jit/backend/zarch/test/test_auto_encoding.py b/rpython/jit/backend/zarch/test/test_auto_encoding.py --- a/rpython/jit/backend/zarch/test/test_auto_encoding.py +++ b/rpython/jit/backend/zarch/test/test_auto_encoding.py @@ -27,9 +27,11 @@ and self.index == self.instrindex): return # ignore the extra character '\x40' print self.op - print "\x09from codebuilder.py: ", hexdump(self.expected[self.instrindex:self.index] + char)+"..." - print "\x09from 'as': ", hexdump(self.expected[self.instrindex:self.index+15])+"..." - raise Exception("Differs") + generated = "\x09from codebuilder.py: " + hexdump(self.expected[self.instrindex:self.index] + char)+"..." + print generated + expected = "\x09from gnu as: " + hexdump(self.expected[self.instrindex:self.index+15])+"..." + print expected + raise Exception("Differs:\n" + generated + "\n" + expected) self.index += 1 def done(self): @@ -60,6 +62,26 @@ COUNT1 = 15 suffixes = {0:'', 1:'b', 2:'w', 4:'l', 8:'q'} +class FakeIndexBaseDisplace(object): + def __init__(self, index, base, disp): + self.index = index + self.base = base + self.displace = disp + + def __str__(self): + disp = self.displace + index = self.index + base = self.base + return "{disp}(%r{index},%r{base})".format(**locals()) + +def build_idx_base_disp(index_bits, base_bits, displace_bits): + + possibilities = itertools.product(range(index_bits), range(base_bits), + range(displace_bits)) + results = [] + for (index,base,disp) in possibilities: + results.append(FakeIndexBaseDisplace(index,base,disp)) + return results class TestZARCH(object): WORD = 8 @@ -68,6 +90,7 @@ REGNAMES = ['%%r%d' % i for i in REGS] accept_unnecessary_prefix = None methname = '?' + INDEX_BASE_DISPLACE = build_idx_base_disp(8,8,12) def reg_tests(self): return self.REGS @@ -111,47 +134,14 @@ def relative_tests(self): py.test.skip("explicit test required for %r" % (self.methname,)) - def get_all_tests(self): - return { - 'r': self.reg_tests, - 'e': lambda: [], - } - def assembler_operand_reg(self, regnum): return self.REGNAMES[regnum] - def assembler_operand_reg8(self, regnum): - assert regnum & rx86.BYTE_REG_FLAG - return self.REGNAMES8[regnum &~ rx86.BYTE_REG_FLAG] - - def assembler_operand_xmm_reg(self, regnum): - return self.XMMREGNAMES[regnum] - - def assembler_operand_stack_bp(self, position): - return '%d(%s)' % (position, self.REGNAMES[5]) - - def assembler_operand_stack_sp(self, position): - return '%d(%s)' % (position, self.REGNAMES[4]) - - def assembler_operand_memory(self, (reg1, offset)): - if not offset: offset = '' - return '%s(%s)' % (offset, self.REGNAMES[reg1]) - - def assembler_operand_array(self, (reg1, reg2, scaleshift, offset)): - if not offset: offset = '' - return '%s(%s,%s,%d)' % (offset, self.REGNAMES[reg1], - self.REGNAMES[reg2], 1< Author: Richard Plangger Branch: s390x-backend Changeset: r80231:c919b5688d2b Date: 2015-10-15 11:54 +0200 http://bitbucket.org/pypy/pypy/changeset/c919b5688d2b/ Log: index base displace with a long displacement (20 bits instead of 12) diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -26,16 +26,15 @@ class Operand(object): pass -def build_rr(mnemonic, args): - opcode = args[0] +def build_rr(mnemonic, (opcode,)): def encode_rr(self, reg1, reg2): self.writechar(opcode) operands = ((reg1 & 0x0f) << 4) | (reg2 & 0xf) self.writechar(chr(operands)) return encode_rr -def build_rre(mnemonic, args): - opcode1,opcode2 = args[0] +def build_rre(mnemonic, (opcode,)): + opcode1,opcode2 = opcode def encode_rr(self, reg1, reg2): self.writechar(opcode1) self.writechar(opcode2) @@ -45,8 +44,7 @@ self.writechar(chr(operands)) return encode_rr -def build_rx(mnemonic, args): - opcode = args[0] +def build_rx(mnemonic, (opcode,)): def encode_rx(self, reg_or_mask, idxbasedisp): self.writechar(opcode) index = idxbasedisp.index @@ -60,6 +58,21 @@ return encode_rx +def build_rxy(mnemonic, (opcode1,opcode2)): + def encode_rxy(self, reg_or_mask, idxbasedisp): + self.writechar(opcode1) + index = idxbasedisp.index + byte = (reg_or_mask & 0x0f) << 4 | index & 0xf + self.writechar(chr(byte)) + displace = idxbasedisp.displace & 0x3ff + base = idxbasedisp.base & 0xf + byte = displace >> 8 & 0xf | base << 4 + self.writechar(chr(byte)) + self.writechar(chr(displace & 0xff)) + self.writechar(chr(displace >> 12 & 0xff)) + self.writechar(opcode2) + + return encode_rxy def build_instr_codes(clazz): _mnemonic_codes = { @@ -67,6 +80,8 @@ 'AGR': (build_rre, ['\xB9\x08']), 'AGFR': (build_rre, ['\xB9\x18']), 'A': (build_rx, ['\x5A']), + 'AY': (build_rxy, ['\xE3','\x5A']), + 'AG': (build_rxy, ['\xE3','\x08']), } for mnemonic, (builder, args) in _mnemonic_codes.items(): diff --git a/rpython/jit/backend/zarch/test/test_auto_encoding.py b/rpython/jit/backend/zarch/test/test_auto_encoding.py --- a/rpython/jit/backend/zarch/test/test_auto_encoding.py +++ b/rpython/jit/backend/zarch/test/test_auto_encoding.py @@ -91,6 +91,7 @@ accept_unnecessary_prefix = None methname = '?' INDEX_BASE_DISPLACE = build_idx_base_disp(8,8,12) + INDEX_BASE_DISPLACE_LONG = build_idx_base_disp(8,8,20) def reg_tests(self): return self.REGS @@ -137,12 +138,18 @@ def assembler_operand_reg(self, regnum): return self.REGNAMES[regnum] - def get_all_assembler_operands(self): + def get_mapping_asm_to_str(self): return { 'r': self.assembler_operand_reg, 'x': lambda x: str(x), + 'y': lambda x: str(x), } + def operand_combinations(self, modes, arguments): + mapping = self.get_mapping_asm_to_str() + for mode, args in zip(modes, arguments): + yield mapping[mode](args) + def run_test(self, methname, instrname, argmodes, args_lists, instr_suffix=None): global labelcount @@ -158,12 +165,8 @@ suffix = "" if instr_suffix is not None: suffix = instr_suffix # overwrite - - assembler_operand = self.get_all_assembler_operands() - ops = [] - for mode, v in zip(argmodes, args): - ops.append(assembler_operand[mode](v)) # + ops = self.operand_combinations(argmodes, args) op = '\t%s%s %s' % (instrname.lower(), suffix, ', '.join(ops)) g.write('%s\n' % op) @@ -197,11 +200,17 @@ raise Exception("Assembler did not produce output?") return oplist, as_code + def modes(self, mode): + if mode == "rxy": + return "ry" + return mode + def make_all_tests(self, methname, modes, args=[]): tests = { 'r': self.REGS, 'e': None, 'x': self.INDEX_BASE_DISPLACE, + 'y': self.INDEX_BASE_DISPLACE_LONG, } combinations = [] for m in modes: @@ -222,6 +231,7 @@ instrname, argmodes = methname.split('_') else: instrname, argmodes = methname, '' + argmodes = self.modes(argmodes) if self.should_skip_instruction(instrname, argmodes): print "Skipping %s" % methname From noreply at buildbot.pypy.org Thu Oct 15 12:16:24 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 15 Oct 2015 12:16:24 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: register immediate encoding Message-ID: <20151015101624.DC5031C0104@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80232:066bb29ef362 Date: 2015-10-15 12:16 +0200 http://bitbucket.org/pypy/pypy/changeset/066bb29ef362/ Log: register immediate encoding diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -39,7 +39,6 @@ self.writechar(opcode1) self.writechar(opcode2) self.writechar('\x00') - #self.writechar('\x00') operands = ((reg1 & 0x0f) << 4) | (reg2 & 0xf) self.writechar(chr(operands)) return encode_rr @@ -55,7 +54,6 @@ byte = displace >> 8 & 0xf | base << 4 self.writechar(chr(byte)) self.writechar(chr(displace & 0xff)) - return encode_rx def build_rxy(mnemonic, (opcode1,opcode2)): @@ -71,8 +69,16 @@ self.writechar(chr(displace & 0xff)) self.writechar(chr(displace >> 12 & 0xff)) self.writechar(opcode2) + return encode_rxy - return encode_rxy +def build_ri(mnemonic, (opcode,halfopcode)): + def encode_ri(self, reg_or_mask, imm): + self.writechar(opcode) + byte = (reg_or_mask & 0xf) << 4 | (ord(halfopcode) & 0xf) + self.writechar(chr(byte)) + self.writechar(chr(imm >> 8 & 0xff)) + self.writechar(chr(imm & 0xff)) + return encode_ri def build_instr_codes(clazz): _mnemonic_codes = { @@ -82,6 +88,8 @@ 'A': (build_rx, ['\x5A']), 'AY': (build_rxy, ['\xE3','\x5A']), 'AG': (build_rxy, ['\xE3','\x08']), + 'AGF': (build_rxy, ['\xE3','\x18']), + 'AHI': (build_ri, ['\xA7','\x0A']), } for mnemonic, (builder, args) in _mnemonic_codes.items(): diff --git a/rpython/jit/backend/zarch/test/test_auto_encoding.py b/rpython/jit/backend/zarch/test/test_auto_encoding.py --- a/rpython/jit/backend/zarch/test/test_auto_encoding.py +++ b/rpython/jit/backend/zarch/test/test_auto_encoding.py @@ -27,9 +27,10 @@ and self.index == self.instrindex): return # ignore the extra character '\x40' print self.op - generated = "\x09from codebuilder.py: " + hexdump(self.expected[self.instrindex:self.index] + char)+"..." + post = self.expected[self.index+1:self.index+1+15] + generated = "\x09from codebuilder.py: " + hexdump(self.expected[self.instrindex:self.index] + "!" + char + "!" + post)+"..." print generated - expected = "\x09from gnu as: " + hexdump(self.expected[self.instrindex:self.index+15])+"..." + expected = "\x09from gnu as: " + hexdump(self.expected[self.instrindex:self.index+15])+"..." print expected raise Exception("Differs:\n" + generated + "\n" + expected) self.index += 1 @@ -120,6 +121,11 @@ for ofs in self.stack_bp_tests(1) ] + def imm16_tests(self): + v = ([-128,-1,0,1,127] + + [random.randrange(-32768, 32767) for i in range(COUNT1)]) + return v + def imm8_tests(self): v = ([-128,-1,0,1,127] + [random.randrange(-127, 127) for i in range(COUNT1)]) @@ -143,6 +149,7 @@ 'r': self.assembler_operand_reg, 'x': lambda x: str(x), 'y': lambda x: str(x), + 'i': lambda x: str(x) } def operand_combinations(self, modes, arguments): @@ -203,14 +210,16 @@ def modes(self, mode): if mode == "rxy": return "ry" + if mode == "rre": + return "rr" return mode def make_all_tests(self, methname, modes, args=[]): tests = { 'r': self.REGS, - 'e': None, 'x': self.INDEX_BASE_DISPLACE, 'y': self.INDEX_BASE_DISPLACE_LONG, + 'i': self.imm16_tests(), } combinations = [] for m in modes: From noreply at buildbot.pypy.org Thu Oct 15 13:57:12 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 15 Oct 2015 13:57:12 +0200 (CEST) Subject: [pypy-commit] pypy osx-vmprof-support: in-progress work on osx vmprof support Message-ID: <20151015115712.2EA851C21A7@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: osx-vmprof-support Changeset: r80233:a360011efe6d Date: 2015-10-15 13:50 +0200 http://bitbucket.org/pypy/pypy/changeset/a360011efe6d/ Log: in-progress work on osx vmprof support diff --git a/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h b/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h --- a/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h +++ b/rpython/rlib/rvmprof/src/vmprof_get_custom_offset.h @@ -18,7 +18,7 @@ intptr_t ip_l = (intptr_t)ip; return pypy_jit_stack_depth_at_loc(ip_l); - + #elif defined(CPYTHON_GET_CUSTOM_OFFSET) if (ip >= tramp_start && ip <= tramp_end) { diff --git a/rpython/rlib/rvmprof/src/vmprof_getpc.h b/rpython/rlib/rvmprof/src/vmprof_getpc.h --- a/rpython/rlib/rvmprof/src/vmprof_getpc.h +++ b/rpython/rlib/rvmprof/src/vmprof_getpc.h @@ -190,4 +190,24 @@ #endif +#ifdef __APPLE__ +void *vmprof_ip_from_ucontext(ucontext_t *signal_ucontext) +{ + return (void*)(signal_ucontext->uc_mcontext->__ss.__rip); +} +void *vmprof_sp_from_ucontext(ucontext_t *signal_ucontext) +{ + return (void*)(signal_ucontext->uc_mcontext->__ss.__rsp); +} +#else +void *vmprof_ip_from_ucontext(ucontext_t *signal_ucontext) +{ + return (void*)signal_ucontext->uc_mcontext.gregs[REG_EIP]; +} +void *vmprof_sp_from_ucontext(ucontext_t *signal_ucontext) +{ + return (void*)signal_ucontext->uc_mcontext.gregs[REG_ESP]; +} +#endif + #endif // BASE_GETPC_H_ diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -32,6 +32,7 @@ #include #include "vmprof_getpc.h" #ifdef __APPLE__ +#define UNW_LOCAL_ONLY #include "libunwind.h" #else #include "vmprof_unwind.h" @@ -188,16 +189,21 @@ if (sp_offset == -1) { // it means that the ip is NOT in JITted code, so we can use the // stardard unw_step - return unw_step(cp); + int res = unw_step(cp); + if (res <= 0) { + printf("unw_step returned %d\n", res); + } + return res; } else { - // this is a horrible hack to manually walk the stack frame, by + // manually walk the stack frame, by // setting the IP and SP in the cursor - vmprof_hacked_unw_cursor_t *cp2 = (vmprof_hacked_unw_cursor_t*)cp; - void* bp = (void*)sp + sp_offset; - cp2->sp = bp; + char* bp = (char*)sp + sp_offset; bp -= sizeof(void*); - cp2->ip = ((void**)bp)[0]; + printf("AAA: %ld\n", (unw_word_t)((void**)bp)[0]); + printf("setting_ip: %d\n", unw_set_reg(cp, UNW_REG_IP, (unw_word_t)((void**)bp)[0])); + bp += sizeof(void*); + printf("setting_sp: %d\n", unw_set_reg(cp, UNW_REG_SP, (unw_word_t)bp)); // the ret is on the top of the stack minus WORD return 1; } @@ -209,7 +215,8 @@ * ************************************************************* */ -static int get_stack_trace(void** result, int max_depth, ucontext_t *ucontext) +static int get_stack_trace(void** result, int max_depth, ucontext_t *ucontext, + void *ip_from_ucontext, void* sp_from_ucontext) { void *ip; int n = 0; @@ -222,6 +229,10 @@ #endif int ret = unw_init_local(&cursor, &uc); + unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *) &ip); + printf("XXX: %ld %ld\n", ip_from_ucontext, ip); + unw_set_reg(&cursor, UNW_REG_IP, (unw_word_t)ip_from_ucontext); + unw_set_reg(&cursor, UNW_REG_SP, (unw_word_t)sp_from_ucontext); assert(ret >= 0); (void)ret; @@ -248,9 +259,12 @@ int first_run = (n == 0); result[n++] = ip; + printf("IP2: %ld\n", ip); n = vmprof_write_header_for_jit_addr(result, n, ip, max_depth); - if (vmprof_unw_step(&cursor, first_run) <= 0) + if (vmprof_unw_step(&cursor, first_run) <= 0) { + printf("done\n"); break; + } } return n; } @@ -297,7 +311,9 @@ st->marker = MARKER_STACKTRACE; st->count = 1; st->stack[0] = GetPC((ucontext_t*)ucontext); - depth = get_stack_trace(st->stack+1, MAX_STACK_DEPTH-2, ucontext); + depth = get_stack_trace(st->stack+1, MAX_STACK_DEPTH-2, ucontext, + vmprof_ip_from_ucontext(ucontext), + vmprof_sp_from_ucontext(ucontext)); depth++; // To account for pc value in stack[0]; st->depth = depth; st->stack[depth++] = get_current_thread_id(); From noreply at buildbot.pypy.org Thu Oct 15 14:32:45 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 15 Oct 2015 14:32:45 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: immediate encoding with base register displacement (SI) Message-ID: <20151015123245.2B8711C002A@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80234:235a94b23040 Date: 2015-10-15 14:33 +0200 http://bitbucket.org/pypy/pypy/changeset/235a94b23040/ Log: immediate encoding with base register displacement (SI) diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -5,6 +5,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi, llmemory from rpython.tool.udir import udir from rpython.jit.backend.detect_cpu import autodetect +from rpython.rtyper.lltypesystem.rbuilder import always_inline clear_cache = rffi.llexternal( "__clear_cache", @@ -26,6 +27,14 @@ class Operand(object): pass + at always_inline +def encode_base_displace(mc, base_displace): + displace = base_displace.displace # & 0x3ff + base = base_displace.base & 0xf + byte = (displace >> 8 & 0xf) | base << 4 + mc.writechar(chr(byte)) + mc.writechar(chr(displace & 0xff)) + def build_rr(mnemonic, (opcode,)): def encode_rr(self, reg1, reg2): self.writechar(opcode) @@ -72,25 +81,34 @@ return encode_rxy def build_ri(mnemonic, (opcode,halfopcode)): - def encode_ri(self, reg_or_mask, imm): + def encode_ri(self, reg_or_mask, imm16): self.writechar(opcode) byte = (reg_or_mask & 0xf) << 4 | (ord(halfopcode) & 0xf) self.writechar(chr(byte)) - self.writechar(chr(imm >> 8 & 0xff)) - self.writechar(chr(imm & 0xff)) + self.writechar(chr(imm16 >> 8 & 0xff)) + self.writechar(chr(imm16 & 0xff)) return encode_ri +def build_si(mnemonic, (opcode,)): + def encode_si(self, base_displace, uimm8): + self.writechar(opcode) + self.writechar(chr(uimm8)) + encode_base_displace(self, base_displace) + return encode_si + +_mnemonic_codes = { + 'AR': (build_rr, ['\x1A']), + 'AGR': (build_rre, ['\xB9\x08']), + 'AGFR': (build_rre, ['\xB9\x18']), + 'A': (build_rx, ['\x5A']), + 'AY': (build_rxy, ['\xE3','\x5A']), + 'AG': (build_rxy, ['\xE3','\x08']), + 'AGF': (build_rxy, ['\xE3','\x18']), + 'AHI': (build_ri, ['\xA7','\x0A']), + 'NI': (build_si, ['\x94']), +} + def build_instr_codes(clazz): - _mnemonic_codes = { - 'AR': (build_rr, ['\x1A']), - 'AGR': (build_rre, ['\xB9\x08']), - 'AGFR': (build_rre, ['\xB9\x18']), - 'A': (build_rx, ['\x5A']), - 'AY': (build_rxy, ['\xE3','\x5A']), - 'AG': (build_rxy, ['\xE3','\x08']), - 'AGF': (build_rxy, ['\xE3','\x18']), - 'AHI': (build_ri, ['\xA7','\x0A']), - } for mnemonic, (builder, args) in _mnemonic_codes.items(): func = builder(mnemonic, args) diff --git a/rpython/jit/backend/zarch/test/test_auto_encoding.py b/rpython/jit/backend/zarch/test/test_auto_encoding.py --- a/rpython/jit/backend/zarch/test/test_auto_encoding.py +++ b/rpython/jit/backend/zarch/test/test_auto_encoding.py @@ -5,6 +5,7 @@ from rpython.rlib.rarithmetic import intmask from rpython.tool.udir import udir import itertools +import re INPUTNAME = 'checkfile_%s.s' FILENAME = 'checkfile_%s.o' @@ -28,7 +29,7 @@ return # ignore the extra character '\x40' print self.op post = self.expected[self.index+1:self.index+1+15] - generated = "\x09from codebuilder.py: " + hexdump(self.expected[self.instrindex:self.index] + "!" + char + "!" + post)+"..." + generated = "\x09from codebuilder.py: " + hexdump(self.expected[self.instrindex:self.index] + char )+"..." print generated expected = "\x09from gnu as: " + hexdump(self.expected[self.instrindex:self.index+15])+"..." print expected @@ -75,8 +76,24 @@ base = self.base return "{disp}(%r{index},%r{base})".format(**locals()) +class FakeBaseDisplace(object): + def __init__(self, base, disp): + self.base = base + self.displace = disp + + def __str__(self): + disp = self.displace + base = self.base + return "{disp}(%r{base})".format(**locals()) + +def build_base_disp(base_bits, displace_bits): + possibilities = itertools.product(range(base_bits), range(displace_bits)) + results = [] + for (base,disp) in possibilities: + results.append(FakeBaseDisplace(base,disp)) + return results + def build_idx_base_disp(index_bits, base_bits, displace_bits): - possibilities = itertools.product(range(index_bits), range(base_bits), range(displace_bits)) results = [] @@ -91,6 +108,7 @@ REGNAMES = ['%%r%d' % i for i in REGS] accept_unnecessary_prefix = None methname = '?' + BASE_DISPLACE = build_base_disp(8,12) INDEX_BASE_DISPLACE = build_idx_base_disp(8,8,12) INDEX_BASE_DISPLACE_LONG = build_idx_base_disp(8,8,20) @@ -121,8 +139,24 @@ for ofs in self.stack_bp_tests(1) ] + def imm_tests(self, name, modes, index): + from rpython.jit.backend.zarch.codebuilder import AbstractZARCHBuilder + import inspect + mode = modes[index] + assert mode == 'i' + func = getattr(AbstractZARCHBuilder, name) + args = inspect.getargspec(func).args + # 1 off, self is first arg + match = re.compile("(u?imm\d+)").match(args[index+1]) + assert match + return getattr(self, match.group(1) + "_tests")() + + def uimm16_tests(self): + v = ([0,1,65535] + + [random.randrange(0,65535) for i in range(COUNT1)]) + return v def imm16_tests(self): - v = ([-128,-1,0,1,127] + + v = ([-32768,-1,0,1,32767] + [random.randrange(-32768, 32767) for i in range(COUNT1)]) return v @@ -130,6 +164,10 @@ v = ([-128,-1,0,1,127] + [random.randrange(-127, 127) for i in range(COUNT1)]) return v + def uimm8_tests(self): + v = ([0,1,255] + + [random.randrange(0,255) for i in range(COUNT1)]) + return v def imm32_tests(self): v = ([-0x80000000, 0x7FFFFFFF, 128, 256, -129, -255] + @@ -147,6 +185,7 @@ def get_mapping_asm_to_str(self): return { 'r': self.assembler_operand_reg, + 's': lambda x: str(x), 'x': lambda x: str(x), 'y': lambda x: str(x), 'i': lambda x: str(x) @@ -216,17 +255,17 @@ def make_all_tests(self, methname, modes, args=[]): tests = { - 'r': self.REGS, - 'x': self.INDEX_BASE_DISPLACE, - 'y': self.INDEX_BASE_DISPLACE_LONG, - 'i': self.imm16_tests(), + 'r': lambda i: self.REGS, + 'x': lambda i: self.INDEX_BASE_DISPLACE, + 'y': lambda i: self.INDEX_BASE_DISPLACE_LONG, + 'i': lambda i: self.imm_tests(methname, modes, i), + 's': lambda i: self.BASE_DISPLACE, } combinations = [] - for m in modes: - if tests[m] is not None: - elems = tests[m] - random.shuffle(elems) - combinations.append(elems) + for i,m in enumerate(modes): + elems = tests[m](i) + random.shuffle(elems) + combinations.append(elems) results = [] for args in itertools.product(*combinations): results.append(args) From noreply at buildbot.pypy.org Thu Oct 15 14:35:05 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 15 Oct 2015 14:35:05 +0200 (CEST) Subject: [pypy-commit] pypy default: merged vecopt Message-ID: <20151015123505.08F051C002A@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: Changeset: r80235:4a3dc93c017c Date: 2015-10-15 14:35 +0200 http://bitbucket.org/pypy/pypy/changeset/4a3dc93c017c/ Log: merged vecopt diff too long, truncating to 2000 out of 14247 lines diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,14 @@ .hg .svn +# VIM +*.swp +*.swo + *.pyc *.pyo *~ +__pycache__/ bin/pypy-c include/*.h @@ -22,4 +27,6 @@ pypy/translator/goal/pypy-c pypy/translator/goal/target*-c release/ +!pypy/tool/release/ rpython/_cache/ +__pycache__/ diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -80,3 +80,8 @@ allow automatic casting in ufuncs (and frompypyfunc) to cast the arguments to the allowed function type declarations, fixes various failures in linalg cffi functions + +.. branch: vecopt +.. branch: vecopt-merge + +A new optimization pass to use emit vectorized loops diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -2,6 +2,7 @@ It should not be imported by the module itself """ import re +import py from pypy.interpreter import special from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root, ObjSpace from pypy.interpreter.error import OperationError @@ -12,6 +13,10 @@ from pypy.module.micronumpy.ndarray import W_NDimArray from pypy.module.micronumpy.ctors import array from pypy.module.micronumpy.descriptor import get_dtype_cache +from pypy.interpreter.miscutils import ThreadLocals, make_weak_value_dictionary +from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag, + UserDelAction) +from pypy.interpreter.pyframe import PyFrame class BogusBytecode(Exception): @@ -32,12 +37,11 @@ class BadToken(Exception): pass - SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", "unegative", "flat", "tostring", "count_nonzero", "argsort", "cumsum", "logical_xor_reduce"] -TWO_ARG_FUNCTIONS = ["dot", 'take', 'searchsorted'] -TWO_ARG_FUNCTIONS_OR_NONE = ['view', 'astype'] +TWO_ARG_FUNCTIONS = ["dot", 'take', 'searchsorted', 'multiply'] +TWO_ARG_FUNCTIONS_OR_NONE = ['view', 'astype', 'reshape'] THREE_ARG_FUNCTIONS = ['where'] class W_TypeObject(W_Root): @@ -57,6 +61,10 @@ w_OverflowError = W_TypeObject("OverflowError") w_NotImplementedError = W_TypeObject("NotImplementedError") w_AttributeError = W_TypeObject("AttributeError") + w_StopIteration = W_TypeObject("StopIteration") + w_KeyError = W_TypeObject("KeyError") + w_SystemExit = W_TypeObject("SystemExit") + w_KeyboardInterrupt = W_TypeObject("KeyboardInterrupt") w_None = None w_bool = W_TypeObject("bool") @@ -73,12 +81,24 @@ w_object = W_TypeObject("object") w_buffer = W_TypeObject("buffer") - def __init__(self): + def __init__(self, config=None): """NOT_RPYTHON""" self.fromcache = InternalSpaceCache(self).getorbuild self.w_Ellipsis = special.Ellipsis() self.w_NotImplemented = special.NotImplemented() + if config is None: + from pypy.config.pypyoption import get_pypy_config + config = get_pypy_config(translating=False) + self.config = config + + self.interned_strings = make_weak_value_dictionary(self, str, W_Root) + self.builtin = DictObject({}) + self.FrameClass = PyFrame + self.threadlocals = ThreadLocals() + self.actionflag = ActionFlag() # changed by the signal module + self.check_signal_action = None # changed by the signal module + def _freeze_(self): return True @@ -89,12 +109,17 @@ return isinstance(w_obj, ListObject) or isinstance(w_obj, W_NDimArray) def len(self, w_obj): - assert isinstance(w_obj, ListObject) - return self.wrap(len(w_obj.items)) + if isinstance(w_obj, ListObject): + return self.wrap(len(w_obj.items)) + elif isinstance(w_obj, DictObject): + return self.wrap(len(w_obj.items)) + raise NotImplementedError def getattr(self, w_obj, w_attr): assert isinstance(w_attr, StringObject) - return w_obj.getdictvalue(self, w_attr.v) + if isinstance(w_obj, DictObject): + return w_obj.getdictvalue(self, w_attr) + return None def isinstance_w(self, w_obj, w_tp): try: @@ -102,6 +127,22 @@ except AttributeError: return False + def iter(self, w_iter): + if isinstance(w_iter, ListObject): + raise NotImplementedError + #return IterObject(space, w_iter.items) + elif isinstance(w_iter, DictObject): + return IterDictObject(self, w_iter) + + def next(self, w_iter): + return w_iter.next() + + def contains(self, w_iter, w_key): + if isinstance(w_iter, DictObject): + return self.wrap(w_key in w_iter.items) + + raise NotImplementedError + def decode_index4(self, w_idx, size): if isinstance(w_idx, IntObject): return (self.int_w(w_idx), 0, 0, 1) @@ -123,6 +164,10 @@ lgt = (stop - start - 1) / step + 1 return (start, stop, step, lgt) + def unicode_from_object(self, w_item): + # XXX + return StringObject("") + @specialize.argtype(1) def wrap(self, obj): if isinstance(obj, float): @@ -145,7 +190,55 @@ def newcomplex(self, r, i): return ComplexObject(r, i) + def newfloat(self, f): + return self.float(f) + + def le(self, w_obj1, w_obj2): + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) + return w_obj1.descr_le(self, w_obj2) + + def lt(self, w_obj1, w_obj2): + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) + return w_obj1.descr_lt(self, w_obj2) + + def ge(self, w_obj1, w_obj2): + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) + return w_obj1.descr_ge(self, w_obj2) + + def add(self, w_obj1, w_obj2): + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) + return w_obj1.descr_add(self, w_obj2) + + def sub(self, w_obj1, w_obj2): + return self.wrap(1) + + def mul(self, w_obj1, w_obj2): + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) + return w_obj1.descr_mul(self, w_obj2) + + def pow(self, w_obj1, w_obj2, _): + return self.wrap(1) + + def neg(self, w_obj1): + return self.wrap(0) + + def repr(self, w_obj1): + return self.wrap('fake') + def getitem(self, obj, index): + if isinstance(obj, DictObject): + w_dict = obj.getdict(self) + if w_dict is not None: + try: + return w_dict[index] + except KeyError, e: + raise OperationError(self.w_KeyError, self.wrap("key error")) + assert isinstance(obj, ListObject) assert isinstance(index, IntObject) return obj.items[index.intval] @@ -191,12 +284,24 @@ return w_obj.v raise NotImplementedError + def unicode_w(self, w_obj): + # XXX + if isinstance(w_obj, StringObject): + return unicode(w_obj.v) + raise NotImplementedError + def int(self, w_obj): if isinstance(w_obj, IntObject): return w_obj assert isinstance(w_obj, boxes.W_GenericBox) return self.int(w_obj.descr_int(self)) + def long(self, w_obj): + if isinstance(w_obj, LongObject): + return w_obj + assert isinstance(w_obj, boxes.W_GenericBox) + return self.int(w_obj.descr_long(self)) + def str(self, w_obj): if isinstance(w_obj, StringObject): return w_obj @@ -240,9 +345,29 @@ def gettypefor(self, w_obj): return W_TypeObject(w_obj.typedef.name) - def call_function(self, tp, w_dtype): + def call_function(self, tp, w_dtype, *args): + if tp is self.w_float: + if isinstance(w_dtype, boxes.W_Float64Box): + return FloatObject(float(w_dtype.value)) + if isinstance(w_dtype, boxes.W_Float32Box): + return FloatObject(float(w_dtype.value)) + if isinstance(w_dtype, boxes.W_Int64Box): + return FloatObject(float(int(w_dtype.value))) + if isinstance(w_dtype, boxes.W_Int32Box): + return FloatObject(float(int(w_dtype.value))) + if isinstance(w_dtype, boxes.W_Int16Box): + return FloatObject(float(int(w_dtype.value))) + if isinstance(w_dtype, boxes.W_Int8Box): + return FloatObject(float(int(w_dtype.value))) + if isinstance(w_dtype, IntObject): + return FloatObject(float(w_dtype.intval)) + if tp is self.w_int: + if isinstance(w_dtype, FloatObject): + return IntObject(int(w_dtype.floatval)) + return w_dtype + @specialize.arg(2) def call_method(self, w_obj, s, *args): # XXX even the hacks have hacks return getattr(w_obj, 'descr_' + s)(self, *args) @@ -258,21 +383,21 @@ def newtuple(self, list_w): return ListObject(list_w) - def newdict(self): - return {} + def newdict(self, module=True): + return DictObject({}) - def setitem(self, dict, item, value): - dict[item] = value + def newint(self, i): + if isinstance(i, IntObject): + return i + return IntObject(i) - def len_w(self, w_obj): - if isinstance(w_obj, ListObject): - return len(w_obj.items) - # XXX array probably - assert False + def setitem(self, obj, index, value): + obj.items[index] = value def exception_match(self, w_exc_type, w_check_class): - # Good enough for now - raise NotImplementedError + assert isinstance(w_exc_type, W_TypeObject) + assert isinstance(w_check_class, W_TypeObject) + return w_exc_type.name == w_check_class.name class FloatObject(W_Root): tp = FakeSpace.w_float @@ -283,6 +408,9 @@ tp = FakeSpace.w_bool def __init__(self, boolval): self.intval = boolval +FakeSpace.w_True = BoolObject(True) +FakeSpace.w_False = BoolObject(False) + class IntObject(W_Root): tp = FakeSpace.w_int @@ -299,6 +427,33 @@ def __init__(self, items): self.items = items +class DictObject(W_Root): + tp = FakeSpace.w_dict + def __init__(self, items): + self.items = items + + def getdict(self, space): + return self.items + + def getdictvalue(self, space, key): + return self.items[key] + +class IterDictObject(W_Root): + def __init__(self, space, w_dict): + self.space = space + self.items = w_dict.items.items() + self.i = 0 + + def __iter__(self): + return self + + def next(self): + space = self.space + if self.i >= len(self.items): + raise OperationError(space.w_StopIteration, space.wrap("stop iteration")) + self.i += 1 + return self.items[self.i-1][0] + class SliceObject(W_Root): tp = FakeSpace.w_slice def __init__(self, start, stop, step): @@ -414,6 +569,15 @@ w_rhs = IntObject(int(w_rhs.floatval)) assert isinstance(w_lhs, W_NDimArray) w_res = w_lhs.descr_getitem(interp.space, w_rhs) + if isinstance(w_rhs, IntObject): + if isinstance(w_res, boxes.W_Float64Box): + print "access", w_lhs, "[", w_rhs.intval, "] => ", float(w_res.value) + if isinstance(w_res, boxes.W_Float32Box): + print "access", w_lhs, "[", w_rhs.intval, "] => ", float(w_res.value) + if isinstance(w_res, boxes.W_Int64Box): + print "access", w_lhs, "[", w_rhs.intval, "] => ", int(w_res.value) + if isinstance(w_res, boxes.W_Int32Box): + print "access", w_lhs, "[", w_rhs.intval, "] => ", int(w_res.value) else: raise NotImplementedError if (not isinstance(w_res, W_NDimArray) and @@ -425,9 +589,22 @@ def __repr__(self): return '(%r %s %r)' % (self.lhs, self.name, self.rhs) -class FloatConstant(Node): +class NumberConstant(Node): def __init__(self, v): - self.v = float(v) + if isinstance(v, int): + self.v = v + elif isinstance(v, float): + self.v = v + else: + assert isinstance(v, str) + assert len(v) > 0 + c = v[-1] + if c == 'f': + self.v = float(v[:-1]) + elif c == 'i': + self.v = int(v[:-1]) + else: + self.v = float(v) def __repr__(self): return "Const(%s)" % self.v @@ -519,8 +696,24 @@ def execute(self, interp): if self.v == 'int': dtype = get_dtype_cache(interp.space).w_int64dtype + elif self.v == 'int8': + dtype = get_dtype_cache(interp.space).w_int8dtype + elif self.v == 'int16': + dtype = get_dtype_cache(interp.space).w_int16dtype + elif self.v == 'int32': + dtype = get_dtype_cache(interp.space).w_int32dtype + elif self.v == 'uint': + dtype = get_dtype_cache(interp.space).w_uint64dtype + elif self.v == 'uint8': + dtype = get_dtype_cache(interp.space).w_uint8dtype + elif self.v == 'uint16': + dtype = get_dtype_cache(interp.space).w_uint16dtype + elif self.v == 'uint32': + dtype = get_dtype_cache(interp.space).w_uint32dtype elif self.v == 'float': dtype = get_dtype_cache(interp.space).w_float64dtype + elif self.v == 'float32': + dtype = get_dtype_cache(interp.space).w_float32dtype else: raise BadToken('unknown v to dtype "%s"' % self.v) return dtype @@ -556,8 +749,13 @@ raise ArgumentMismatch if self.name == "sum": if len(self.args)>1: - w_res = arr.descr_sum(interp.space, + var = self.args[1] + if isinstance(var, DtypeClass): + w_res = arr.descr_sum(interp.space, None, var.execute(interp)) + else: + w_res = arr.descr_sum(interp.space, self.args[1].execute(interp)) + else: w_res = arr.descr_sum(interp.space) elif self.name == "prod": @@ -577,10 +775,10 @@ w_res = logical_xor.reduce(interp.space, arr, None) elif self.name == "unegative": neg = ufuncs.get(interp.space).negative - w_res = neg.call(interp.space, [arr], None, None, None) + w_res = neg.call(interp.space, [arr], None, 'unsafe', None) elif self.name == "cos": cos = ufuncs.get(interp.space).cos - w_res = cos.call(interp.space, [arr], None, None, None) + w_res = cos.call(interp.space, [arr], None, 'unsafe', None) elif self.name == "flat": w_res = arr.descr_get_flatiter(interp.space) elif self.name == "argsort": @@ -598,6 +796,8 @@ raise ArgumentNotAnArray if self.name == "dot": w_res = arr.descr_dot(interp.space, arg) + elif self.name == 'multiply': + w_res = arr.descr_mul(interp.space, arg) elif self.name == 'take': w_res = arr.descr_take(interp.space, arg) elif self.name == "searchsorted": @@ -617,7 +817,7 @@ if self.name == "where": w_res = where(interp.space, arr, arg1, arg2) else: - assert False + assert False # unreachable code elif self.name in TWO_ARG_FUNCTIONS_OR_NONE: if len(self.args) != 2: raise ArgumentMismatch @@ -626,6 +826,11 @@ w_res = arr.descr_view(interp.space, arg) elif self.name == 'astype': w_res = arr.descr_astype(interp.space, arg) + elif self.name == 'reshape': + w_arg = self.args[1] + assert isinstance(w_arg, ArrayConstant) + order = -1 + w_res = arr.reshape(interp.space, w_arg.wrap(interp.space), order) else: assert False else: @@ -645,7 +850,7 @@ return W_NDimArray.new_scalar(interp.space, dtype, w_res) _REGEXES = [ - ('-?[\d\.]+', 'number'), + ('-?[\d\.]+(i|f)?', 'number'), ('\[', 'array_left'), (':', 'colon'), ('\w+', 'identifier'), @@ -719,7 +924,7 @@ start = 0 else: if tokens.get(0).name != 'colon': - return FloatConstant(start_tok.v) + return NumberConstant(start_tok.v) start = int(start_tok.v) tokens.pop() if not tokens.get(0).name in ['colon', 'number']: @@ -751,8 +956,30 @@ stack.append(ArrayClass()) elif token.v.strip(' ') == 'int': stack.append(DtypeClass('int')) + elif token.v.strip(' ') == 'int8': + stack.append(DtypeClass('int8')) + elif token.v.strip(' ') == 'int16': + stack.append(DtypeClass('int16')) + elif token.v.strip(' ') == 'int32': + stack.append(DtypeClass('int32')) + elif token.v.strip(' ') == 'int64': + stack.append(DtypeClass('int')) + elif token.v.strip(' ') == 'uint': + stack.append(DtypeClass('uint')) + elif token.v.strip(' ') == 'uint8': + stack.append(DtypeClass('uint8')) + elif token.v.strip(' ') == 'uint16': + stack.append(DtypeClass('uint16')) + elif token.v.strip(' ') == 'uint32': + stack.append(DtypeClass('uint32')) + elif token.v.strip(' ') == 'uint64': + stack.append(DtypeClass('uint')) elif token.v.strip(' ') == 'float': stack.append(DtypeClass('float')) + elif token.v.strip(' ') == 'float32': + stack.append(DtypeClass('float32')) + elif token.v.strip(' ') == 'float64': + stack.append(DtypeClass('float')) else: stack.append(Variable(token.v.strip(' '))) elif token.name == 'array_left': @@ -805,7 +1032,7 @@ while True: token = tokens.pop() if token.name == 'number': - elems.append(FloatConstant(token.v)) + elems.append(NumberConstant(token.v)) elif token.name == 'array_left': elems.append(ArrayConstant(self.parse_array_const(tokens))) elif token.name == 'paren_left': diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -97,7 +97,7 @@ finally: self.iter.reset(self.state, mutate=True) - def descr___array_wrap__(self, space, obj): + def descr___array_wrap__(self, space, obj, w_context=None): return obj W_FlatIterator.typedef = TypeDef("numpy.flatiter", diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -83,6 +83,12 @@ self._indices = indices self.offset = offset + def same(self, other): + if self.offset == other.offset and \ + self.index == other.index and \ + self._indices == other._indices: + return self.iterator.same_shape(other.iterator) + return False class ArrayIter(object): _immutable_fields_ = ['contiguous', 'array', 'size', 'ndim_m1', 'shape_m1[*]', @@ -100,6 +106,7 @@ self.array = array self.size = size self.ndim_m1 = len(shape) - 1 + # self.shape_m1 = [s - 1 for s in shape] self.strides = strides self.backstrides = backstrides @@ -113,6 +120,17 @@ factors[ndim-i-1] = factors[ndim-i] * shape[ndim-i] self.factors = factors + def same_shape(self, other): + """ Iterating over the same element """ + if not self.contiguous or not other.contiguous: + return False + return (self.contiguous == other.contiguous and + self.array.dtype is self.array.dtype and + self.shape_m1 == other.shape_m1 and + self.strides == other.strides and + self.backstrides == other.backstrides and + self.factors == other.factors) + @jit.unroll_safe def reset(self, state=None, mutate=False): index = 0 @@ -138,9 +156,13 @@ indices = state._indices offset = state.offset if self.contiguous: - offset += self.array.dtype.elsize + elsize = self.array.dtype.elsize + jit.promote(elsize) + offset += elsize elif self.ndim_m1 == 0: - offset += self.strides[0] + stride = self.strides[0] + jit.promote(stride) + offset += stride else: for i in xrange(self.ndim_m1, -1, -1): idx = indices[i] @@ -192,7 +214,7 @@ return state.index >= self.size def getitem(self, state): - assert state.iterator is self + # assert state.iterator is self return self.array.getitem(state.offset) def getitem_bool(self, state): @@ -203,7 +225,6 @@ assert state.iterator is self self.array.setitem(state.offset, elem) - def AxisIter(array, shape, axis): strides = array.get_strides() backstrides = array.get_backstrides() diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -2,6 +2,7 @@ operations. This is the place to look for all the computations that iterate over all the array elements. """ +import py from pypy.interpreter.error import OperationError from rpython.rlib import jit from rpython.rlib.rstring import StringBuilder @@ -13,11 +14,6 @@ from pypy.interpreter.argument import Arguments -call2_driver = jit.JitDriver( - name='numpy_call2', - greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], - reds='auto') - def call2(space, shape, func, calc_dtype, w_lhs, w_rhs, out): if w_lhs.get_size() == 1: w_left = w_lhs.get_scalar_value().convert_to(space, calc_dtype) @@ -38,24 +34,104 @@ out_iter, out_state = out.create_iter(shape) shapelen = len(shape) res_dtype = out.get_dtype() - while not out_iter.done(out_state): - call2_driver.jit_merge_point(shapelen=shapelen, func=func, - calc_dtype=calc_dtype, res_dtype=res_dtype) - if left_iter: - w_left = left_iter.getitem(left_state).convert_to(space, calc_dtype) - left_state = left_iter.next(left_state) - if right_iter: - w_right = right_iter.getitem(right_state).convert_to(space, calc_dtype) - right_state = right_iter.next(right_state) - out_iter.setitem(out_state, func(calc_dtype, w_left, w_right).convert_to( - space, res_dtype)) - out_state = out_iter.next(out_state) - return out + call2_func = try_to_share_iterators_call2(left_iter, right_iter, + left_state, right_state, out_state) + params = (space, shapelen, func, calc_dtype, res_dtype, out, + w_left, w_right, left_iter, right_iter, out_iter, + left_state, right_state, out_state) + return call2_func(*params) + +def try_to_share_iterators_call2(left_iter, right_iter, left_state, right_state, out_state): + # these are all possible iterator sharing combinations + # left == right == out + # left == right + # left == out + # right == out + right_out_equal = False + if right_iter: + # rhs is not a scalar + if out_state.same(right_state): + right_out_equal = True + # + if not left_iter: + # lhs is a scalar + if right_out_equal: + return call2_advance_out_left + else: + # worst case, nothing can be shared and lhs is a scalar + return call2_advance_out_left_right + else: + # lhs is NOT a scalar + if out_state.same(left_state): + # (2) out and left are the same -> remove left + if right_out_equal: + # the best case + return call2_advance_out + else: + return call2_advance_out_right + else: + if right_out_equal: + # right and out are equal, only advance left and out + return call2_advance_out_left + else: + if right_iter and right_state.same(left_state): + # left and right are equal, but still need to advance out + return call2_advance_out_left_eq_right + else: + # worst case, nothing can be shared + return call2_advance_out_left_right + + assert 0, "logical problem with the selection of the call2 case" + +def generate_call2_cases(name, left_state, right_state): + call2_driver = jit.JitDriver(name='numpy_call2_' + name, + greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], + reds='auto', vectorize=True) + # + advance_left_state = left_state == "left_state" + advance_right_state = right_state == "right_state" + code = """ + def method(space, shapelen, func, calc_dtype, res_dtype, out, + w_left, w_right, left_iter, right_iter, out_iter, + left_state, right_state, out_state): + while not out_iter.done(out_state): + call2_driver.jit_merge_point(shapelen=shapelen, func=func, + calc_dtype=calc_dtype, res_dtype=res_dtype) + if left_iter: + w_left = left_iter.getitem({left_state}).convert_to(space, calc_dtype) + if right_iter: + w_right = right_iter.getitem({right_state}).convert_to(space, calc_dtype) + w_out = func(calc_dtype, w_left, w_right) + out_iter.setitem(out_state, w_out.convert_to(space, res_dtype)) + out_state = out_iter.next(out_state) + if advance_left_state and left_iter: + left_state = left_iter.next(left_state) + if advance_right_state and right_iter: + right_state = right_iter.next(right_state) + # + # if not set to None, the values will be loop carried + # (for the var,var case), forcing the vectorization to unpack + # the vector registers at the end of the loop + if left_iter: + w_left = None + if right_iter: + w_right = None + return out + """ + exec(py.code.Source(code.format(left_state=left_state,right_state=right_state)).compile(), locals()) + method.__name__ = "call2_" + name + return method + +call2_advance_out = generate_call2_cases("inc_out", "out_state", "out_state") +call2_advance_out_left = generate_call2_cases("inc_out_left", "left_state", "out_state") +call2_advance_out_right = generate_call2_cases("inc_out_right", "out_state", "right_state") +call2_advance_out_left_eq_right = generate_call2_cases("inc_out_left_eq_right", "left_state", "left_state") +call2_advance_out_left_right = generate_call2_cases("inc_out_left_right", "left_state", "right_state") call1_driver = jit.JitDriver( name='numpy_call1', - greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], - reds='auto') + greens=['shapelen', 'share_iterator', 'func', 'calc_dtype', 'res_dtype'], + reds='auto', vectorize=True) def call1(space, shape, func, calc_dtype, w_obj, w_ret): obj_iter, obj_state = w_obj.create_iter(shape) @@ -63,13 +139,24 @@ out_iter, out_state = w_ret.create_iter(shape) shapelen = len(shape) res_dtype = w_ret.get_dtype() + share_iterator = out_state.same(obj_state) while not out_iter.done(out_state): call1_driver.jit_merge_point(shapelen=shapelen, func=func, + share_iterator=share_iterator, calc_dtype=calc_dtype, res_dtype=res_dtype) - elem = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) + if share_iterator: + # use out state as param to getitem + elem = obj_iter.getitem(out_state).convert_to(space, calc_dtype) + else: + elem = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) out_iter.setitem(out_state, func(calc_dtype, elem).convert_to(space, res_dtype)) - out_state = out_iter.next(out_state) - obj_state = obj_iter.next(obj_state) + if share_iterator: + # only advance out, they share the same iteration space + out_state = out_iter.next(out_state) + else: + out_state = out_iter.next(out_state) + obj_state = obj_iter.next(obj_state) + elem = None return w_ret call_many_to_one_driver = jit.JitDriver( @@ -145,7 +232,7 @@ vals[i] = in_dtypes[i].coerce(space, in_iters[i].getitem(in_states[i])) w_arglist = space.newlist(vals) w_outvals = space.call_args(func, Arguments.frompacked(space, w_arglist)) - # w_outvals should be a tuple, but func can return a single value as well + # w_outvals should be a tuple, but func can return a single value as well if space.isinstance_w(w_outvals, space.w_tuple): batch = space.listview(w_outvals) for i in range(len(batch)): @@ -161,7 +248,7 @@ setslice_driver = jit.JitDriver(name='numpy_setslice', greens = ['shapelen', 'dtype'], - reds = 'auto') + reds = 'auto', vectorize=True) def setslice(space, shape, target, source): if not shape: @@ -239,7 +326,8 @@ reduce_flat_driver = jit.JitDriver( name='numpy_reduce_flat', - greens = ['shapelen', 'func', 'done_func', 'calc_dtype'], reds = 'auto') + greens = ['shapelen', 'func', 'done_func', 'calc_dtype'], reds = 'auto', + vectorize = True) def reduce_flat(space, func, w_arr, calc_dtype, done_func, identity): obj_iter, obj_state = w_arr.create_iter() @@ -260,10 +348,10 @@ obj_state = obj_iter.next(obj_state) return cur_value - reduce_driver = jit.JitDriver( name='numpy_reduce', - greens=['shapelen', 'func', 'dtype'], reds='auto') + greens=['shapelen', 'func', 'dtype'], reds='auto', + vectorize=True) def reduce(space, func, w_arr, axis_flags, dtype, out, identity): out_iter, out_state = out.create_iter() @@ -298,7 +386,7 @@ accumulate_flat_driver = jit.JitDriver( name='numpy_accumulate_flat', greens=['shapelen', 'func', 'dtype', 'out_dtype'], - reds='auto') + reds='auto', vectorize=True) def accumulate_flat(space, func, w_arr, calc_dtype, w_out, identity): arr_iter, arr_state = w_arr.create_iter() @@ -325,7 +413,9 @@ accumulate_driver = jit.JitDriver( name='numpy_accumulate', - greens=['shapelen', 'func', 'calc_dtype'], reds='auto') + greens=['shapelen', 'func', 'calc_dtype'], + reds='auto', + vectorize=True) def accumulate(space, func, w_arr, axis, calc_dtype, w_out, identity): @@ -375,7 +465,8 @@ where_driver = jit.JitDriver(name='numpy_where', greens = ['shapelen', 'dtype', 'arr_dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def where(space, out, shape, arr, x, y, dtype): out_iter, out_state = out.create_iter(shape) @@ -416,7 +507,6 @@ state = x_state return out - def _new_argmin_argmax(op_name): arg_driver = jit.JitDriver(name='numpy_' + op_name, greens = ['shapelen', 'dtype'], @@ -481,7 +571,8 @@ dot_driver = jit.JitDriver(name = 'numpy_dot', greens = ['dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def multidim_dot(space, left, right, result, dtype, right_critical_dim): ''' assumes left, right are concrete arrays @@ -524,8 +615,8 @@ lval = left_impl.getitem(i1).convert_to(space, dtype) rval = right_impl.getitem(i2).convert_to(space, dtype) oval = dtype.itemtype.add(oval, dtype.itemtype.mul(lval, rval)) - i1 += s1 - i2 += s2 + i1 += jit.promote(s1) + i2 += jit.promote(s2) outi.setitem(outs, oval) outs = outi.next(outs) rights = righti.next(rights) @@ -535,7 +626,8 @@ count_all_true_driver = jit.JitDriver(name = 'numpy_count', greens = ['shapelen', 'dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def count_all_true_concrete(impl): s = 0 @@ -556,7 +648,8 @@ nonzero_driver = jit.JitDriver(name = 'numpy_nonzero', greens = ['shapelen', 'dims', 'dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def nonzero(res, arr, box): res_iter, res_state = res.create_iter() @@ -578,7 +671,8 @@ getitem_filter_driver = jit.JitDriver(name = 'numpy_getitem_bool', greens = ['shapelen', 'arr_dtype', 'index_dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def getitem_filter(res, arr, index): res_iter, res_state = res.create_iter() @@ -606,7 +700,8 @@ setitem_filter_driver = jit.JitDriver(name = 'numpy_setitem_bool', greens = ['shapelen', 'arr_dtype', 'index_dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def setitem_filter(space, arr, index, value): arr_iter, arr_state = arr.create_iter() @@ -635,7 +730,8 @@ flatiter_getitem_driver = jit.JitDriver(name = 'numpy_flatiter_getitem', greens = ['dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def flatiter_getitem(res, base_iter, base_state, step): ri, rs = res.create_iter() @@ -649,7 +745,8 @@ flatiter_setitem_driver = jit.JitDriver(name = 'numpy_flatiter_setitem', greens = ['dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def flatiter_setitem(space, dtype, val, arr_iter, arr_state, step, length): val_iter, val_state = val.create_iter() @@ -758,7 +855,8 @@ byteswap_driver = jit.JitDriver(name='numpy_byteswap_driver', greens = ['dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def byteswap(from_, to): dtype = from_.dtype @@ -773,7 +871,8 @@ choose_driver = jit.JitDriver(name='numpy_choose_driver', greens = ['shapelen', 'mode', 'dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def choose(space, arr, choices, shape, dtype, out, mode): shapelen = len(shape) @@ -807,7 +906,8 @@ clip_driver = jit.JitDriver(name='numpy_clip_driver', greens = ['shapelen', 'dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def clip(space, arr, shape, min, max, out): assert min or max @@ -842,7 +942,8 @@ round_driver = jit.JitDriver(name='numpy_round_driver', greens = ['shapelen', 'dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def round(space, arr, dtype, shape, decimals, out): arr_iter, arr_state = arr.create_iter(shape) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -7,6 +7,7 @@ # structures to describe slicing class BaseChunk(object): + _attrs_ = ['step','out_dim'] pass diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -1,6 +1,6 @@ import py from pypy.module.micronumpy.compile import (numpy_compile, Assignment, - ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, + ArrayConstant, NumberConstant, Operator, Variable, RangeConstant, Execute, FunctionCall, FakeSpace, W_NDimArray) @@ -25,30 +25,30 @@ interp = self.compile(code) assert isinstance(interp.code.statements[0].expr, ArrayConstant) st = interp.code.statements[0] - assert st.expr.items == [FloatConstant(1), FloatConstant(2), - FloatConstant(3)] + assert st.expr.items == [NumberConstant(1), NumberConstant(2), + NumberConstant(3)] def test_array_literal2(self): code = "a = [[1],[2],[3]]" interp = self.compile(code) assert isinstance(interp.code.statements[0].expr, ArrayConstant) st = interp.code.statements[0] - assert st.expr.items == [ArrayConstant([FloatConstant(1)]), - ArrayConstant([FloatConstant(2)]), - ArrayConstant([FloatConstant(3)])] + assert st.expr.items == [ArrayConstant([NumberConstant(1)]), + ArrayConstant([NumberConstant(2)]), + ArrayConstant([NumberConstant(3)])] def test_expr_1(self): code = "b = a + 1" interp = self.compile(code) assert (interp.code.statements[0].expr == - Operator(Variable("a"), "+", FloatConstant(1))) + Operator(Variable("a"), "+", NumberConstant(1))) def test_expr_2(self): code = "b = a + b - 3" interp = self.compile(code) assert (interp.code.statements[0].expr == Operator(Operator(Variable("a"), "+", Variable("b")), "-", - FloatConstant(3))) + NumberConstant(3))) def test_expr_3(self): # an equivalent of range @@ -60,13 +60,13 @@ code = "3 + a" interp = self.compile(code) assert interp.code.statements[0] == Execute( - Operator(FloatConstant(3), "+", Variable("a"))) + Operator(NumberConstant(3), "+", Variable("a"))) def test_array_access(self): code = "a -> 3" interp = self.compile(code) assert interp.code.statements[0] == Execute( - Operator(Variable("a"), "->", FloatConstant(3))) + Operator(Variable("a"), "->", NumberConstant(3))) def test_function_call(self): code = "sum(a)" @@ -81,7 +81,7 @@ """ interp = self.compile(code) assert interp.code.statements[0] == Assignment( - 'a', Operator(Variable('b'), "+", FloatConstant(3))) + 'a', Operator(Variable('b'), "+", NumberConstant(3))) class TestRunner(object): @@ -272,6 +272,14 @@ """) assert interp.results[0].value == 3 + def test_any(self): + interp = self.run(""" + a = [0,0,0,0,0.1,0,0,0,0] + b = any(a) + b -> 0 + """) + assert interp.results[0].value == 1 + def test_where(self): interp = self.run(''' a = [1, 0, 3, 0] diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -4,17 +4,37 @@ import py from rpython.jit.metainterp.test.support import LLJitMixin +from rpython.jit.backend.x86.test.test_basic import Jit386Mixin from rpython.jit.metainterp.warmspot import reset_jit, get_stats +from rpython.jit.metainterp.jitprof import Profiler +from rpython.jit.metainterp import counter +from rpython.rlib.jit import Counters +from rpython.rlib.rarithmetic import intmask from pypy.module.micronumpy import boxes from pypy.module.micronumpy.compile import FakeSpace, Parser, InterpreterState from pypy.module.micronumpy.base import W_NDimArray +from rpython.jit.backend.detect_cpu import getcpuclass -py.test.skip('move these to pypyjit/test_pypy_c/test_micronumpy') +CPU = getcpuclass() +if not CPU.vector_extension: + py.test.skip("this cpu %s has no implemented vector backend" % CPU) + +def get_profiler(): + from rpython.jit.metainterp import pyjitpl + return pyjitpl._warmrunnerdesc.metainterp_sd.profiler class TestNumpyJit(LLJitMixin): + enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" graph = None interp = None + def setup_method(self, method): + if not self.CPUClass.vector_extension: + py.test.skip("needs vector extension to run (for now)") + + def assert_float_equal(self, f1, f2, delta=0.0001): + assert abs(f1-f2) < delta + def setup_class(cls): default = """ a = [1,2,3,4] @@ -52,12 +72,29 @@ w_res = i.getitem(s) if isinstance(w_res, boxes.W_Float64Box): return w_res.value + if isinstance(w_res, boxes.W_Float32Box): + return float(w_res.value) elif isinstance(w_res, boxes.W_Int64Box): return float(w_res.value) + elif isinstance(w_res, boxes.W_Int32Box): + return float(int(w_res.value)) + elif isinstance(w_res, boxes.W_Int16Box): + return float(int(w_res.value)) + elif isinstance(w_res, boxes.W_Int8Box): + return float(int(w_res.value)) + elif isinstance(w_res, boxes.W_UInt64Box): + return float(intmask(w_res.value)) + elif isinstance(w_res, boxes.W_UInt32Box): + return float(intmask(w_res.value)) + elif isinstance(w_res, boxes.W_UInt16Box): + return float(intmask(w_res.value)) + elif isinstance(w_res, boxes.W_UInt8Box): + return float(intmask(w_res.value)) elif isinstance(w_res, boxes.W_LongBox): return float(w_res.value) elif isinstance(w_res, boxes.W_BoolBox): return float(w_res.value) + print "ERROR: did not implement return type for interpreter" raise TypeError(w_res) if self.graph is None: @@ -65,122 +102,354 @@ listops=True, listcomp=True, backendopt=True, - graph_and_interp_only=True) + graph_and_interp_only=True, + ProfilerClass=Profiler, + vec=True) self.__class__.interp = interp self.__class__.graph = graph + def check_vectorized(self, expected_tried, expected_success): + profiler = get_profiler() + tried = profiler.get_counter(Counters.OPT_VECTORIZE_TRY) + success = profiler.get_counter(Counters.OPT_VECTORIZED) + assert tried >= success + assert tried == expected_tried + assert success == expected_success + def run(self, name): self.compile_graph() + profiler = get_profiler() + profiler.start() reset_jit() i = self.code_mapping[name] retval = self.interp.eval_graph(self.graph, [i]) return retval - def define_add(): + def define_float32_copy(): + return """ + a = astype(|30|, float32) + x1 = a -> 7 + x2 = a -> 8 + x3 = a -> 9 + x4 = a -> 10 + r = x1 + x2 + x3 + x4 + r + """ + def test_float32_copy(self): + result = self.run("float32_copy") + assert int(result) == 7+8+9+10 + self.check_vectorized(1, 1) + + def define_int32_copy(): + return """ + a = astype(|30|, int32) + x1 = a -> 7 + x2 = a -> 8 + x3 = a -> 9 + x4 = a -> 10 + x1 + x2 + x3 + x4 + """ + def test_int32_copy(self): + result = self.run("int32_copy") + assert int(result) == 7+8+9+10 + self.check_vectorized(1, 1) + + def define_float32_add(): + return """ + a = astype(|30|, float32) + b = a + a + b -> 15 + """ + def test_float32_add(self): + result = self.run("float32_add") + self.assert_float_equal(result, 15.0 + 15.0) + self.check_vectorized(2, 2) + + def define_float_add(): return """ a = |30| b = a + a - b -> 3 + b -> 17 """ - - def test_add(self): - result = self.run("add") - py.test.skip("don't run for now") - self.check_simple_loop({'raw_load': 2, 'float_add': 1, - 'raw_store': 1, 'int_add': 1, - 'int_ge': 1, 'guard_false': 1, 'jump': 1, - 'arraylen_gc': 1}) - assert result == 3 + 3 - - def define_float_add(): - return """ - a = |30| + 3 - a -> 3 - """ - def test_float_add(self): result = self.run("float_add") - assert result == 3 + 3 - self.check_trace_count(1) - self.check_simple_loop({ - 'float_add': 1, - 'guard_false': 1, - 'guard_not_invalidated': 1, - 'int_add': 3, - 'int_ge': 1, - 'jump': 1, - 'raw_load': 1, - 'raw_store': 1, - }) + self.assert_float_equal(result, 17.0 + 17.0) + self.check_vectorized(1, 1) - def define_pow(): + def define_uint_add(): return """ - a = |30| ** 2 - a -> 3 + a = astype(|30|, uint64) + b = a + a + b -> 17 """ + def test_uint_add(self): + result = self.run("uint_add") + assert int(result) == 17+17 + self.check_vectorized(2, 1) - def test_pow(self): - result = self.run("pow") - assert result == 3 ** 2 - self.check_trace_count(1) - self.check_simple_loop({ - 'call': 2, # ccall_pow / _ll_1_threadlocalref_get(rpy_errno) - 'float_eq': 2, - 'float_mul': 2, - 'guard_false': 2, - 'guard_not_invalidated': 1, - 'guard_true': 2, - 'int_add': 3, - 'int_ge': 1, - 'int_is_true': 1, - 'jump': 1, - 'raw_load': 1, - 'raw_store': 1, - }) + def define_float32_add_const(): + return """ + a = astype(|30|, float32) + b = a + 77.345 + b -> 29 + """ + def test_float32_add_const(self): + result = self.run("float32_add_const") + self.assert_float_equal(result, 29.0 + 77.345) + self.check_vectorized(2, 2) - def define_pow_int(): + def define_float_add_const(): + return """ + a = |30| + 25.5 + a -> 29 + """ + def test_float_add_const(self): + result = self.run("float_add_const") + self.assert_float_equal(result, 29.0 + 25.5) + self.check_vectorized(1, 1) + + def define_int_add_const(): return """ a = astype(|30|, int) - b = astype([2], int) - c = a ** b - c -> 3 + b = a + 1i + d = astype(|30|, int) + c = d + 2.0 + x1 = b -> 7 + x2 = b -> 8 + x3 = c -> 11 + x4 = c -> 12 + x1 + x2 + x3 + x4 """ + def test_int_add_const(self): + result = self.run("int_add_const") + assert int(result) == 7+1+8+1+11+2+12+2 + self.check_vectorized(2, 2) - def test_pow_int(self): - result = self.run("pow_int") - assert result == 3 ** 2 - self.check_trace_count(2) # extra one for the astype - del get_stats().loops[0] # we don't care about it - self.check_simple_loop({ - 'call': 1, - 'guard_false': 1, - 'guard_not_invalidated': 1, - 'int_add': 3, - 'int_ge': 1, - 'jump': 1, - 'raw_load': 1, - 'raw_store': 1, - }) + def define_int_expand(): + return """ + a = astype(|30|, int) + c = astype(|1|, int) + c[0] = 16 + b = a + c + x1 = b -> 7 + x2 = b -> 8 + x1 + x2 + """ + def test_int_expand(self): + result = self.run("int_expand") + assert int(result) == 7+16+8+16 + self.check_vectorized(2, 2) + + def define_int32_expand(): + return """ + a = astype(|30|, int32) + c = astype(|1|, int32) + c[0] = 16i + b = a + c + x1 = b -> 7 + x2 = b -> 8 + x1 + x2 + """ + def test_int32_expand(self): + result = self.run("int32_expand") + assert int(result) == 7+16+8+16 + self.check_vectorized(2, 1) + + def define_int16_expand(): + return """ + a = astype(|30|, int16) + c = astype(|1|, int16) + c[0] = 16i + b = a + c + d = b -> 7:15 + sum(d) + """ + def test_int16_expand(self): + result = self.run("int16_expand") + i = 8 + assert int(result) == i*16 + sum(range(7,7+i)) + # currently is is not possible to accum for types with < 8 bytes + self.check_vectorized(3, 0) + + def define_int8_expand(): + return """ + a = astype(|30|, int8) + c = astype(|1|, int8) + c[0] = 8i + b = a + c + d = b -> 0:17 + sum(d) + """ + def test_int8_expand(self): + result = self.run("int8_expand") + assert int(result) == 17*8 + sum(range(0,17)) + # does not pay off to cast float64 -> int8 + # neither does sum + # a + c should work, but it is given as a parameter + # thus the accum must handle this! + self.check_vectorized(3, 0) + + def define_int32_add_const(): + return """ + a = astype(|30|, int32) + b = a + 1i + d = astype(|30|, int32) + c = d + 2.0 + x1 = b -> 7 + x2 = b -> 8 + x3 = c -> 11 + x4 = c -> 12 + x1 + x2 + x3 + x4 + """ + def test_int32_add_const(self): + result = self.run("int32_add_const") + assert int(result) == 7+1+8+1+11+2+12+2 + self.check_vectorized(2, 2) + + def define_float_mul_array(): + return """ + a = astype(|30|, float) + b = astype(|30|, float) + c = a * b + x1 = c -> 7 + x2 = c -> 8 + x3 = c -> 11 + x4 = c -> 12 + x1 + x2 + x3 + x4 + """ + def test_float_mul_array(self): + result = self.run("float_mul_array") + assert int(result) == 7*7+8*8+11*11+12*12 + self.check_vectorized(2, 2) + + def define_int32_mul_array(): + return """ + a = astype(|30|, int32) + b = astype(|30|, int32) + c = a * b + x1 = c -> 7 + x2 = c -> 8 + x3 = c -> 11 + x4 = c -> 12 + x1 + x2 + x3 + x4 + """ + def test_int32_mul_array(self): + result = self.run("int32_mul_array") + assert int(result) == 7*7+8*8+11*11+12*12 + self.check_vectorized(2, 2) + + def define_float32_mul_array(): + return """ + a = astype(|30|, float32) + b = astype(|30|, float32) + c = a * b + x1 = c -> 7 + x2 = c -> 8 + x3 = c -> 11 + x4 = c -> 12 + x1 + x2 + x3 + x4 + """ + def test_float32_mul_array(self): + result = self.run("float32_mul_array") + assert int(result) == 7*7+8*8+11*11+12*12 + self.check_vectorized(2, 2) + + def define_conversion(): + return """ + a = astype(|30|, int8) + b = astype(|30|, int) + c = a + b + sum(c) + """ + def test_conversion(self): + result = self.run("conversion") + assert result == sum(range(30)) + sum(range(30)) + self.check_vectorized(4, 2) # only sum and astype(int) succeed def define_sum(): return """ a = |30| sum(a) """ - def test_sum(self): result = self.run("sum") assert result == sum(range(30)) - self.check_trace_count(1) - self.check_simple_loop({ - 'float_add': 1, - 'guard_false': 1, - 'guard_not_invalidated': 1, - 'int_add': 2, - 'int_ge': 1, - 'jump': 1, - 'raw_load': 1, - }) + self.check_vectorized(1, 1) + + def define_sum(): + return """ + a = |30| + sum(a) + """ + def test_sum(self): + result = self.run("sum") + assert result == sum(range(30)) + self.check_vectorized(1, 1) + + def define_sum_int(): + return """ + a = astype(|65|,int) + sum(a) + """ + def test_sum_int(self): + result = self.run("sum_int") + assert result == sum(range(65)) + self.check_vectorized(2, 2) + + def define_sum_multi(): + return """ + a = |30| + b = sum(a) + c = |60| + d = sum(c) + b + d + """ + + def test_sum_multi(self): + result = self.run("sum_multi") + assert result == sum(range(30)) + sum(range(60)) + self.check_vectorized(1, 1) + + def define_sum_float_to_int16(): + return """ + a = |30| + sum(a,int16) + """ + def test_sum_float_to_int16(self): + result = self.run("sum_float_to_int16") + assert result == sum(range(30)) + # one can argue that this is not desired, + # but unpacking exactly hits savings = 0 + self.check_vectorized(1, 1) + def define_sum_float_to_int32(): + return """ + a = |30| + sum(a,int32) + """ + def test_sum_float_to_int32(self): + result = self.run("sum_float_to_int32") + assert result == sum(range(30)) + self.check_vectorized(1, 1) + + def define_sum_float_to_float32(): + return """ + a = |30| + sum(a,float32) + """ + def test_sum_float_to_float32(self): + result = self.run("sum_float_to_float32") + assert result == sum(range(30)) + self.check_vectorized(1, 1) + + def define_sum_float_to_uint64(): + return """ + a = |30| + sum(a,uint64) + """ + def test_sum_float_to_uint64(self): + result = self.run("sum_float_to_uint64") + assert result == sum(range(30)) + self.check_vectorized(1, 0) # unsigned def define_cumsum(): return """ @@ -192,17 +461,6 @@ def test_cumsum(self): result = self.run("cumsum") assert result == 15 - self.check_trace_count(1) - self.check_simple_loop({ - 'float_add': 1, - 'guard_false': 1, - 'guard_not_invalidated': 1, - 'int_add': 3, - 'int_ge': 1, - 'jump': 1, - 'raw_load': 1, - 'raw_store': 1, - }) def define_axissum(): return """ @@ -216,47 +474,7 @@ assert result == 30 # XXX note - the bridge here is fairly crucial and yet it's pretty # bogus. We need to improve the situation somehow. - self.check_trace_count(2) - self.check_simple_loop({ - 'float_add': 1, - 'getarrayitem_gc': 2, - 'guard_false': 2, - 'guard_not_invalidated': 1, - 'guard_true': 1, - 'int_add': 4, - 'int_ge': 1, - 'int_is_zero': 1, - 'int_lt': 1, - 'jump': 1, - 'raw_load': 2, - 'raw_store': 1, - 'setarrayitem_gc': 1, - }) - self.check_resops({ - 'float_add': 2, - 'getarrayitem_gc': 5, - 'getarrayitem_gc_pure': 7, - 'getfield_gc': 5, - 'getfield_gc_pure': 51, - 'guard_class': 3, - 'guard_false': 12, - 'guard_nonnull': 11, - 'guard_nonnull_class': 3, - 'guard_not_invalidated': 2, - 'guard_true': 10, - 'guard_value': 6, - 'int_add': 13, - 'int_ge': 4, - 'int_is_true': 3, - 'int_is_zero': 4, - 'int_le': 2, - 'int_lt': 3, - 'int_sub': 1, - 'jump': 2, - 'raw_load': 4, - 'raw_store': 2, - 'setarrayitem_gc': 4, - }) + self.check_vectorized(1, 0) def define_reduce(): return """ @@ -270,9 +488,12 @@ i = self.code_mapping['reduce'] # run it twice retval = self.interp.eval_graph(self.graph, [i]) + assert retval == sum(range(1,11)) retval = self.interp.eval_graph(self.graph, [i]) + assert retval == sum(range(1,11)) # check that we got only one loop assert len(get_stats().loops) == 1 + self.check_vectorized(2, 1) def test_reduce_axis_compile_only_once(self): self.compile_graph() @@ -283,69 +504,42 @@ retval = self.interp.eval_graph(self.graph, [i]) # check that we got only one loop assert len(get_stats().loops) == 1 + self.check_vectorized(3, 1) def define_prod(): return """ - a = |30| + a = [1,2,3,4,1,2,3,4] + prod(a) + """ + + def define_prod_zero(): + return """ + a = [1,2,3,4,1,2,3,0] prod(a) """ def test_prod(self): result = self.run("prod") - expected = 1 - for i in range(30): - expected *= i * 2 - assert result == expected - self.check_trace_count(1) - self.check_simple_loop({ - 'float_mul': 1, - 'guard_false': 1, - 'guard_not_invalidated': 1, - 'int_add': 2, - 'int_ge': 1, - 'jump': 1, - 'raw_load': 1, - }) + assert int(result) == 576 + self.check_vectorized(1, 1) + + def test_prod_zero(self): + result = self.run("prod_zero") + assert int(result) == 0 + self.check_vectorized(1, 1) + def define_max(): return """ a = |30| - a[13] = 128 + a[13] = 128.0 max(a) """ def test_max(self): result = self.run("max") assert result == 128 - self.check_trace_count(3) - self.check_simple_loop({ - 'float_ge': 1, - 'float_ne': 1, - 'guard_false': 3, - 'guard_not_invalidated': 1, - 'int_add': 2, - 'int_ge': 1, - 'jump': 1, - 'raw_load': 1, - }) - self.check_resops({ - 'float_ge': 2, - 'float_ne': 2, - 'getfield_gc': 4, - 'getfield_gc_pure': 30, - 'guard_class': 1, - 'guard_false': 8, - 'guard_nonnull': 2, - 'guard_nonnull_class': 2, - 'guard_not_invalidated': 2, - 'guard_true': 7, - 'guard_value': 2, - 'int_add': 8, - 'int_ge': 4, - 'int_is_true': 3, - 'jump': 3, - 'raw_load': 2, - }) + self.check_vectorized(1, 0) def define_min(): return """ @@ -357,60 +551,113 @@ def test_min(self): result = self.run("min") assert result == -128 - self.check_trace_count(1) - self.check_simple_loop({ - 'float_le': 1, - 'guard_false': 1, - 'guard_not_invalidated': 1, - 'guard_true': 1, - 'int_add': 2, - 'int_ge': 1, - 'jump': 1, - 'raw_load': 1, - }) + self.check_vectorized(1, 0) def define_any(): return """ - a = [0,0,0,0,0,0,0,1,0,0,0] + a = astype([0,0,0,0,0,0,0,1,0,0,0],int8) any(a) """ + def define_any_int(): + return """ + a = astype([0,0,0,0,256,0,0,0,0,0,0],int16) + any(a) + """ + + def define_any_ret_0(): + return """ + a = astype([0,0,0,0,0,0,0,0,0,0,0],int64) + any(a) + """ + + def define_float_any(): + return """ + a = [0,0,0,0,0,0,0,0.1,0,0,0] + any(a) + """ + + def define_float32_any(): + return """ + a = astype([0,0,0,0,0,0,0,0.1,0,0,0], float32) + any(a) + """ + + def test_any_float(self): + result = self.run("float_any") + assert int(result) == 1 + self.check_vectorized(1, 1) + + def test_any_float32(self): + result = self.run("float32_any") + assert int(result) == 1 + self.check_vectorized(2, 2) + def test_any(self): result = self.run("any") - assert result == 1 - self.check_trace_count(1) - self.check_simple_loop({ - 'cast_float_to_int': 1, - 'guard_false': 2, - 'guard_not_invalidated': 1, - 'int_add': 2, - 'int_and': 1, - 'int_ge': 1, - 'jump': 1, - 'raw_load': 1, - }) + assert int(result) == 1 + self.check_vectorized(2, 1) + + def test_any_int(self): + result = self.run("any_int") + assert int(result) == 1 + self.check_vectorized(2, 1) + + def test_any_ret_0(self): + result = self.run("any_ret_0") + assert int(result) == 0 + self.check_vectorized(2, 2) def define_all(): return """ + a = astype([1,1,1,1,1,1,1,1],int32) + all(a) + """ + def define_all_int(): + return """ + a = astype([1,100,255,1,3,1,1,1],int32) + all(a) + """ + def define_all_ret_0(): + return """ + a = astype([1,1,1,1,1,0,1,1],int32) + all(a) + """ + def define_float_all(): + return """ a = [1,1,1,1,1,1,1,1] all(a) """ + def define_float32_all(): + return """ + a = astype([1,1,1,1,1,1,1,1],float32) + all(a) + """ + + def test_all_float(self): + result = self.run("float_all") + assert int(result) == 1 + self.check_vectorized(1, 1) + + def test_all_float32(self): + result = self.run("float32_all") + assert int(result) == 1 + self.check_vectorized(2, 2) def test_all(self): result = self.run("all") - assert result == 1 - self.check_trace_count(1) - self.check_simple_loop({ - 'cast_float_to_int': 1, - 'guard_false': 1, - 'guard_not_invalidated': 1, - 'guard_true': 1, - 'int_add': 2, - 'int_and': 1, - 'int_ge': 1, - 'jump': 1, - 'raw_load': 1, - }) + assert int(result) == 1 + self.check_vectorized(2, 2) + + def test_all_int(self): + result = self.run("all_int") + assert int(result) == 1 + self.check_vectorized(2, 2) + + def test_all_ret_0(self): + result = self.run("all_ret_0") + assert int(result) == 0 + self.check_vectorized(2, 2) def define_logical_xor_reduce(): return """ @@ -421,25 +668,7 @@ def test_logical_xor_reduce(self): result = self.run("logical_xor_reduce") assert result == 0 - self.check_trace_count(2) - # XXX fix this - self.check_simple_loop({ - 'cast_float_to_int': 1, - 'getfield_gc': 2, - 'getfield_gc_pure': 11, - 'guard_class': 1, - 'guard_false': 1, - 'guard_not_invalidated': 1, - 'guard_true': 5, - 'int_add': 2, - 'int_and': 1, - 'int_ge': 1, - 'int_is_true': 2, - 'jump': 1, - 'new_with_vtable': 1, - 'raw_load': 1, - 'setfield_gc': 4, - }) + self.check_vectorized(0, 0) # TODO reduce def define_already_forced(): return """ @@ -453,17 +682,7 @@ def test_already_forced(self): result = self.run("already_forced") assert result == (5 + 4.5) * 8 - # This is the sum of the ops for both loops, however if you remove the - # optimization then you end up with 2 float_adds, so we can still be - # sure it was optimized correctly. - py.test.skip("too fragile") - self.check_resops({'raw_store': 4, 'getfield_gc': 22, - 'getarrayitem_gc': 4, 'getarrayitem_gc_pure': 2, - 'getfield_gc_pure': 8, - 'guard_class': 8, 'int_add': 8, 'float_mul': 2, - 'jump': 2, 'int_ge': 4, - 'raw_load': 4, 'float_add': 2, - 'guard_false': 4, 'arraylen_gc': 2, 'same_as': 2}) + self.check_vectorized(2, 2) def define_ufunc(): return """ @@ -475,16 +694,7 @@ def test_ufunc(self): result = self.run("ufunc") assert result == -3 - self.check_simple_loop({ - 'float_neg': 1, - 'guard_not_invalidated': 1, - 'int_add': 3, - 'int_ge': 1, - 'guard_false': 1, - 'jump': 1, - 'raw_load': 1, - 'raw_store': 1, - }) + self.check_vectorized(1, 1) def define_specialization(): return """ @@ -507,56 +717,9 @@ """ def test_specialization(self): - self.run("specialization") - py.test.skip("don't run for now") - # This is 3, not 2 because there is a bridge for the exit. - self.check_trace_count(3) - - def define_slice(): - return """ - a = |30| - b = a -> ::3 From noreply at buildbot.pypy.org Thu Oct 15 14:35:59 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 15 Oct 2015 14:35:59 +0200 (CEST) Subject: [pypy-commit] pypy vecopt-merge: close branch Message-ID: <20151015123559.CAF8C1C0413@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt-merge Changeset: r80236:e040414ce026 Date: 2015-10-15 14:36 +0200 http://bitbucket.org/pypy/pypy/changeset/e040414ce026/ Log: close branch From noreply at buildbot.pypy.org Thu Oct 15 14:36:02 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 15 Oct 2015 14:36:02 +0200 (CEST) Subject: [pypy-commit] pypy vecopt: close branch Message-ID: <20151015123602.015831C0413@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: vecopt Changeset: r80237:5548644da690 Date: 2015-10-15 14:36 +0200 http://bitbucket.org/pypy/pypy/changeset/5548644da690/ Log: close branch From noreply at buildbot.pypy.org Thu Oct 15 15:13:36 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 15 Oct 2015 15:13:36 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: SIY extended 20 bit base displacement encoding Message-ID: <20151015131336.632B91C0041@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80238:96207b27efbe Date: 2015-10-15 15:13 +0200 http://bitbucket.org/pypy/pypy/changeset/96207b27efbe/ Log: SIY extended 20 bit base displacement encoding diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -96,6 +96,16 @@ encode_base_displace(self, base_displace) return encode_si +def build_siy(mnemonic, (opcode1,opcode2)): + def encode_siy(self, base_displace, uimm8): + self.writechar(opcode1) + self.writechar(chr(uimm8)) + encode_base_displace(self, base_displace) + displace = base_displace.displace + self.writechar(chr(displace >> 12 & 0xff)) + self.writechar(opcode2) + return encode_siy + _mnemonic_codes = { 'AR': (build_rr, ['\x1A']), 'AGR': (build_rre, ['\xB9\x08']), @@ -106,6 +116,7 @@ 'AGF': (build_rxy, ['\xE3','\x18']), 'AHI': (build_ri, ['\xA7','\x0A']), 'NI': (build_si, ['\x94']), + 'NIY': (build_siy, ['\xEB','\x54']), } def build_instr_codes(clazz): diff --git a/rpython/jit/backend/zarch/test/test_auto_encoding.py b/rpython/jit/backend/zarch/test/test_auto_encoding.py --- a/rpython/jit/backend/zarch/test/test_auto_encoding.py +++ b/rpython/jit/backend/zarch/test/test_auto_encoding.py @@ -109,6 +109,7 @@ accept_unnecessary_prefix = None methname = '?' BASE_DISPLACE = build_base_disp(8,12) + BASE_DISPLACE_LONG = build_base_disp(8,20) INDEX_BASE_DISPLACE = build_idx_base_disp(8,8,12) INDEX_BASE_DISPLACE_LONG = build_idx_base_disp(8,8,20) @@ -247,10 +248,6 @@ return oplist, as_code def modes(self, mode): - if mode == "rxy": - return "ry" - if mode == "rre": - return "rr" return mode def make_all_tests(self, methname, modes, args=[]): @@ -261,11 +258,19 @@ 'i': lambda i: self.imm_tests(methname, modes, i), 's': lambda i: self.BASE_DISPLACE, } - combinations = [] - for i,m in enumerate(modes): - elems = tests[m](i) - random.shuffle(elems) - combinations.append(elems) + tests_all = { + 'rxy': (tests['r'], tests['y']), + 'siy': (lambda i: self.BASE_DISPLACE_LONG, tests['i']), + 'rre': (tests['r'], tests['r']) + } + if modes in tests_all: + combinations = [f(i) for i,f in enumerate(tests_all[modes])] + else: + combinations = [] + for i,m in enumerate(modes): + elems = tests[m](i) + random.shuffle(elems) + combinations.append(elems) results = [] for args in itertools.product(*combinations): results.append(args) From noreply at buildbot.pypy.org Thu Oct 15 16:55:02 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 15 Oct 2015 16:55:02 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: added base displacement parameters with length in various flavours (instr category named ss_a, ss_b, ss_c) Message-ID: <20151015145502.7261E1C01DC@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80239:d28ca84c9d6a Date: 2015-10-15 16:55 +0200 http://bitbucket.org/pypy/pypy/changeset/d28ca84c9d6a/ Log: added base displacement parameters with length in various flavours (instr category named ss_a, ss_b, ss_c) diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -27,6 +27,12 @@ class Operand(object): pass +def arguments(args_str): + def impl(func): + func._arguments_ = args_str.split(',') + return func + return impl + @always_inline def encode_base_displace(mc, base_displace): displace = base_displace.displace # & 0x3ff @@ -106,6 +112,33 @@ self.writechar(opcode2) return encode_siy +def build_ssa(mnemonic, (opcode1,)): + def encode_ssa(self, len_base_disp, base_displace): + self.writechar(opcode1) + self.writechar(chr(len_base_disp.length & 0xff)) + encode_base_displace(self, len_base_disp) + encode_base_displace(self, base_displace) + return encode_ssa + +def build_ssb(mnemonic, (opcode1,)): + def encode_ssb(self, len_base_disp1, len_base_disp2): + self.writechar(opcode1) + byte = (len_base_disp1.length & 0xf) << 4 | len_base_disp2.length & 0xf + self.writechar(chr(byte)) + encode_base_displace(self, len_base_disp1) + encode_base_displace(self, len_base_disp2) + return encode_ssb + +def build_ssc(mnemonic, (opcode1,)): + @arguments('l,l,u4') + def encode_ssc(self, len_base_disp1, len_base_disp2, uimm4): + self.writechar(opcode1) + byte = (len_base_disp1.length & 0xf) << 4 | uimm4 & 0xf + self.writechar(chr(byte)) + encode_base_displace(self, len_base_disp1) + encode_base_displace(self, len_base_disp2) + return encode_ssc + _mnemonic_codes = { 'AR': (build_rr, ['\x1A']), 'AGR': (build_rre, ['\xB9\x08']), @@ -117,6 +150,9 @@ 'AHI': (build_ri, ['\xA7','\x0A']), 'NI': (build_si, ['\x94']), 'NIY': (build_siy, ['\xEB','\x54']), + 'NC': (build_ssa, ['\xD4']), + 'AP': (build_ssb, ['\xFA']), + 'SRP': (build_ssc, ['\xF0']), } def build_instr_codes(clazz): diff --git a/rpython/jit/backend/zarch/test/test_auto_encoding.py b/rpython/jit/backend/zarch/test/test_auto_encoding.py --- a/rpython/jit/backend/zarch/test/test_auto_encoding.py +++ b/rpython/jit/backend/zarch/test/test_auto_encoding.py @@ -86,6 +86,18 @@ base = self.base return "{disp}(%r{base})".format(**locals()) +class FakeLengthBaseDisplace(object): + def __init__(self, len, base, disp): + self.length = len + self.base = base + self.displace = disp + + def __str__(self): + disp = self.displace + base = self.base + length = self.length + 1 + return "{disp}({length},%r{base})".format(**locals()) + def build_base_disp(base_bits, displace_bits): possibilities = itertools.product(range(base_bits), range(displace_bits)) results = [] @@ -101,6 +113,14 @@ results.append(FakeIndexBaseDisplace(index,base,disp)) return results +def build_len_base_disp(len_bits, base_bits, displace_bits): + possibilities = itertools.product(range(len_bits), range(base_bits), + range(displace_bits)) + results = [] + for (length,base,disp) in possibilities: + results.append(FakeLengthBaseDisplace(length,base,disp)) + return results + class TestZARCH(object): WORD = 8 TESTDIR = 'zarch' @@ -112,6 +132,8 @@ BASE_DISPLACE_LONG = build_base_disp(8,20) INDEX_BASE_DISPLACE = build_idx_base_disp(8,8,12) INDEX_BASE_DISPLACE_LONG = build_idx_base_disp(8,8,20) + LENGTH4_BASE_DISPLACE = build_len_base_disp(4,8,12) + LENGTH8_BASE_DISPLACE = build_len_base_disp(8,8,12) def reg_tests(self): return self.REGS @@ -143,8 +165,6 @@ def imm_tests(self, name, modes, index): from rpython.jit.backend.zarch.codebuilder import AbstractZARCHBuilder import inspect - mode = modes[index] - assert mode == 'i' func = getattr(AbstractZARCHBuilder, name) args = inspect.getargspec(func).args # 1 off, self is first arg @@ -169,6 +189,8 @@ v = ([0,1,255] + [random.randrange(0,255) for i in range(COUNT1)]) return v + def uimm4_tests(self): + return list(range(0,16)) def imm32_tests(self): v = ([-0x80000000, 0x7FFFFFFF, 128, 256, -129, -255] + @@ -189,11 +211,24 @@ 's': lambda x: str(x), 'x': lambda x: str(x), 'y': lambda x: str(x), - 'i': lambda x: str(x) + 'i': lambda x: str(x), + 'l': lambda x: str(x), + 'L': lambda x: str(x), } def operand_combinations(self, modes, arguments): + remap = { + 'rxy': 'rx', + 'siy': 'si', + 'rre': 'rr', + 'ssa': 'Ls', + 'ssb': 'll', + 'ssc': 'lsi', + 'ssd': 'xsr', + 'sse': 'rrss', + } mapping = self.get_mapping_asm_to_str() + modes = remap.get(modes, modes) for mode, args in zip(modes, arguments): yield mapping[mode](args) @@ -257,11 +292,16 @@ 'y': lambda i: self.INDEX_BASE_DISPLACE_LONG, 'i': lambda i: self.imm_tests(methname, modes, i), 's': lambda i: self.BASE_DISPLACE, + 'L': lambda i: self.LENGTH8_BASE_DISPLACE, + 'l': lambda i: self.LENGTH4_BASE_DISPLACE, } tests_all = { 'rxy': (tests['r'], tests['y']), 'siy': (lambda i: self.BASE_DISPLACE_LONG, tests['i']), - 'rre': (tests['r'], tests['r']) + 'rre': (tests['r'], tests['r']), + 'ssa': (tests['L'], tests['s']), + 'ssb': (tests['l'], tests['l']), + 'ssc': (tests['l'], tests['s'], tests['i']), } if modes in tests_all: combinations = [f(i) for i,f in enumerate(tests_all[modes])] From noreply at buildbot.pypy.org Thu Oct 15 17:14:37 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 15 Oct 2015 17:14:37 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: More thoughts... Message-ID: <20151015151437.7073F1C002A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80240:f94e515eb625 Date: 2015-10-15 17:14 +0200 http://bitbucket.org/pypy/pypy/changeset/f94e515eb625/ Log: More thoughts... diff --git a/pypy/doc/discussion/rawrefcount.rst b/pypy/doc/discussion/rawrefcount.rst --- a/pypy/doc/discussion/rawrefcount.rst +++ b/pypy/doc/discussion/rawrefcount.rst @@ -10,103 +10,139 @@ ob_pypy_link. The ob_refcnt is the reference counter as used on CPython. If the PyObject structure is linked to a live PyPy object, its current address is stored in ob_pypy_link and ob_refcnt is bumped -by the constant REFCNT_FROM_PYPY_OBJECT. +by either the constant REFCNT_FROM_PYPY, or the constant +REFCNT_FROM_PYPY_DIRECT (== REFCNT_FROM_PYPY + SOME_HUGE_VALUE). -rawrefcount_create_link_from_pypy(p, ob) +Most PyPy objects exist outside cpyext, and conversely in cpyext it is +possible that a lot of PyObjects exist without being seen by the rest +of PyPy. At the interface, however, we can "link" a PyPy object and a +PyObject. There are two kinds of link: + +rawrefcount.create_link_pypy(p, ob) Makes a link between an exising object gcref 'p' and a newly - allocated PyObject structure 'ob'. Both must not be linked so far. - This adds REFCNT_FROM_PYPY_OBJECT to ob->ob_refcnt. + allocated PyObject structure 'ob'. ob->ob_refcnt must be + initialized to either REFCNT_FROM_PYPY, or + REFCNT_FROM_PYPY_DIRECT. (The second case is an optimization: + when the GC finds the PyPy object and PyObject no longer + referenced, it can just free() the PyObject.) -rawrefcount_create_link_to_pypy(p, ob) +rawrefcount.create_link_pyobj(p, ob) Makes a link from an existing PyObject structure 'ob' to a newly - allocated W_CPyExtPlaceHolderObject 'p'. The 'p' should have a - back-reference field pointing to 'ob'. This also adds - REFCNT_FROM_PYPY_OBJECT to ob->ob_refcnt. + allocated W_CPyExtPlaceHolderObject 'p'. You must also add + REFCNT_FROM_PYPY to ob->ob_refcnt. For cases where the PyObject + contains all the data, and the PyPy object is just a proxy. The + W_CPyExtPlaceHolderObject should have only a field that contains + the address of the PyObject, but that's outside the scope of the + GC. -rawrefcount_from_obj(p) +rawrefcount.from_obj(p) If there is a link from object 'p', and 'p' is not a W_CPyExtPlaceHolderObject, returns the corresponding 'ob'. Otherwise, returns NULL. -rawrefcount_to_obj(ob) +rawrefcount.to_obj(Class, ob) - Returns ob->ob_pypy_link, cast to a GCREF. + Returns ob->ob_pypy_link, cast to an instance of 'Class'. Collection logic ---------------- -Objects existing purely on the C side have ob->ob_from_pypy == NULL; +Objects existing purely on the C side have ob->ob_pypy_link == 0; these are purely reference counted. On the other hand, if -ob->ob_from_pypy != NULL, then ob->ob_refcnt is at least -REFCNT_FROM_PYPY_OBJECT and the object is part of a "link". +ob->ob_pypy_link != 0, then ob->ob_refcnt is at least REFCNT_FROM_PYPY +and the object is part of a "link". The idea is that links whose 'p' is not reachable from other PyPy -objects *and* whose 'ob->ob_refcnt' is REFCNT_FROM_PYPY_OBJECT are the -ones who die. But it is more messy because links created with -rawrefcount_create_link_to_pypy() need to have a deallocator called, +objects *and* whose 'ob->ob_refcnt' is REFCNT_FROM_PYPY or +REFCNT_FROM_PYPY_DIRECT are the ones who die. But it is more messy +because PyObjects still (usually) need to have a tp_dealloc called, and this cannot occur immediately (and can do random things like accessing other references this object points to, or resurrecting the object). -Let P = list of links created with rawrefcount_create_link_from_pypy() -and O = list of links created with rawrefcount_create_link_to_pypy(). +Let P = list of links created with rawrefcount.create_link_pypy() +and O = list of links created with rawrefcount.create_link_pyobj(). The PyPy objects in the list O are all W_CPyExtPlaceHolderObject: all -the data is in the PyObjects, and all references are regular -CPython-like reference counts. It is the opposite with the P links: -all references are regular PyPy references from the 'p' object, and -the 'ob' is trivial. +the data is in the PyObjects, and all references (if any) are regular +CPython-like reference counts. -So, after the collection we do this about P links: +So, during the collection we do this about P links: for (p, ob) in P: - if ob->ob_refcnt != REFCNT_FROM_PYPY_OBJECT: + if ob->ob_refcnt != REFCNT_FROM_PYPY + and ob->ob_refcnt != REFCNT_FROM_PYPY_DIRECT: mark 'p' as surviving, as well as all its dependencies - for (p, ob) in P: - if p is not surviving: - unlink p and ob, free ob +At the end of the collection, the P and O links are both handled like +this: -Afterwards, the O links are handled like this: - - for (p, ob) in O: - # p is trivial: it cannot point to other PyPy objects + for (p, ob) in P + O: if p is not surviving: unlink p and ob - ob->ob_refcnt -= REFCNT_FROM_PYPY_OBJECT - if ob->ob_refcnt == 0: - invoke _Py_Dealloc(ob) later, outside the GC + if ob->ob_refcnt == REFCNT_FROM_PYPY_DIRECT: + free(ob) + else: + ob->ob_refcnt -= REFCNT_FROM_PYPY + if ob->ob_refcnt == 0: + invoke _Py_Dealloc(ob) later, outside the GC GC Implementation ----------------- -We need two P lists and two O lists, for young or old objects. All -four lists can actually be linked lists of 'ob', using yet another -field 'ob_pypy_next'; or they can be regular AddressLists (unsure -about the overhead of this extra field for all PyObjects -- even ones -not linked to PyPy objects). +We need two copies of both the P list and O list, for young or old +objects. All four lists can be regular AddressLists of 'ob' objects. We also need an AddressDict mapping 'p' to 'ob' for all links in the P -list. This dict contains both young and old 'p'; we simply write a -new entry when the object moves. As a result it can contain some -extra garbage entries after some minor collections. It is cleaned up -by being rebuilt at the next major collection. We never walk all -items of that dict; we only walk the two explicit P lists. +list, and update it when PyPy objects move. Further notes ------------- -For small immutable types like and , we can actually -create a PyIntObject as a complete copy of the W_IntObject whenever -asked, and not record any link. Is it cheaper? Unclear. +For objects that are opaque in CPython, like , we always create +a PyPy object, and then when needed we make an empty PyObject and +attach it with create_link_pypy()/REFCNT_FROM_PYPY_DIRECT. -A few special types need to be reflected both as PyPy objects and -PyObjects. For now we assume that these are large and mostly -immutable, like objects. They should be linked in some mixture -of the P list and the O list. Likely, the P list with an extra flag -that says "_Py_Dealloc must be invoked". +For and objects, the corresponding PyObjects contain a +"long" or "double" field too. We link them with create_link_pypy() +and we can use REFCNT_FROM_PYPY_DIRECT too: 'tp_dealloc' doesn't +need to be called, and instead just calling free() is fine. + +For objects, we need both a PyPy and a PyObject side. These +are made with create_link_pypy()/REFCNT_FROM_PYPY. + +For custom PyXxxObjects allocated from the C extension module, we +need create_link_pyobj(). + +For or objects coming from PyPy, we use +create_link_pypy()/REFCNT_FROM_PYPY_DIRECT with a PyObject +preallocated with the size of the string. We copy the string +lazily into that area if PyString_AS_STRING() is called. + +For , , or objects in the C extension +module, we first allocate it as only a PyObject, which supports +mutation of the data from C, like CPython. When it is exported to +PyPy we could make a W_CPyExtPlaceHolderObject with +create_link_pyobj(). + +For objects coming from PyPy, if they are not specialized, +then the PyPy side holds a regular reference to the items. Then we +can allocate a PyTupleObject and store in it borrowed PyObject +pointers to the items. Such a case is created with +create_link_pypy()/REFCNT_FROM_PYPY_DIRECT. If it is specialized, +then it doesn't work because the items are created just-in-time on the +PyPy side. In this case, the PyTupleObject needs to hold real +references to the PyObject items, and we use create_link_pypy()/ +REFCNT_FROM_PYPY. In all cases, we have a C array of PyObjects +that we can return from PySequence_Fast_ITEMS. + +For objects coming from PyPy, we can use a cpyext list +strategy. The list turns into a PyListObject, as if it had been +allocated from C in the first place. The special strategy can hold +(only) a direct reference to the PyListObject, and we can use +create_link_pyobj(). PySequence_Fast_ITEMS then works for lists too. From noreply at buildbot.pypy.org Thu Oct 15 17:42:15 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 15 Oct 2015 17:42:15 +0200 (CEST) Subject: [pypy-commit] pypy default: Remove OrderedDict compatibility hacks for Python 2.6 Message-ID: <20151015154215.6FD2C1C00E2@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r80242:81593cb4a496 Date: 2015-10-15 16:41 +0100 http://bitbucket.org/pypy/pypy/changeset/81593cb4a496/ Log: Remove OrderedDict compatibility hacks for Python 2.6 diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -6,6 +6,7 @@ import sys, types, inspect, weakref from contextlib import contextmanager +from collections import OrderedDict from rpython.flowspace.model import Constant from rpython.annotator.model import ( @@ -257,12 +258,12 @@ result.const_box = key return result elif (tp is dict or tp is r_dict or - tp is SomeOrderedDict.knowntype or tp is r_ordereddict): + tp is OrderedDict or tp is r_ordereddict): key = Constant(x) try: return self.immutable_cache[key] except KeyError: - if tp is SomeOrderedDict.knowntype or tp is r_ordereddict: + if tp is OrderedDict or tp is r_ordereddict: cls = SomeOrderedDict else: cls = SomeDict diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -2,6 +2,7 @@ Built-in functions. """ import sys +from collections import OrderedDict from rpython.annotator.model import ( SomeInteger, SomeObject, SomeChar, SomeBool, SomeString, SomeTuple, @@ -356,7 +357,7 @@ def unicodedata_decimal(s_uchr): raise TypeError("unicodedate.decimal() calls should not happen at interp-level") - at analyzer_for(SomeOrderedDict.knowntype) + at analyzer_for(OrderedDict) def analyze(): return SomeOrderedDict(getbookkeeper().getdictdef()) diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -32,6 +32,7 @@ import inspect import weakref from types import BuiltinFunctionType, MethodType +from collections import OrderedDict import rpython from rpython.tool import descriptor @@ -377,11 +378,7 @@ return type(self)(self.dictdef) class SomeOrderedDict(SomeDict): - try: - from collections import OrderedDict as knowntype - except ImportError: # Python 2.6 - class PseudoOrderedDict(dict): pass - knowntype = PseudoOrderedDict + knowntype = OrderedDict def method_copy(dct): return SomeOrderedDict(dct.dictdef) diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py --- a/rpython/rtyper/rbuiltin.py +++ b/rpython/rtyper/rbuiltin.py @@ -1,3 +1,5 @@ +from collections import OrderedDict + from rpython.annotator import model as annmodel from rpython.flowspace.model import Constant from rpython.rlib import rarithmetic, objectmodel @@ -721,7 +723,7 @@ raise TyperError("hasattr is only suported on a constant") - at typer_for(annmodel.SomeOrderedDict.knowntype) + at typer_for(OrderedDict) @typer_for(objectmodel.r_dict) @typer_for(objectmodel.r_ordereddict) def rtype_dict_constructor(hop, i_force_non_null=None): From noreply at buildbot.pypy.org Thu Oct 15 17:49:16 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 15 Oct 2015 17:49:16 +0200 (CEST) Subject: [pypy-commit] pypy no-class-specialize: hg merge default Message-ID: <20151015154916.0FD681C00E2@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: no-class-specialize Changeset: r80243:7553d3865133 Date: 2015-10-15 16:48 +0100 http://bitbucket.org/pypy/pypy/changeset/7553d3865133/ Log: hg merge default diff too long, truncating to 2000 out of 14785 lines diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,14 @@ .hg .svn +# VIM +*.swp +*.swo + *.pyc *.pyo *~ +__pycache__/ bin/pypy-c include/*.h @@ -22,4 +27,6 @@ pypy/translator/goal/pypy-c pypy/translator/goal/target*-c release/ +!pypy/tool/release/ rpython/_cache/ +__pycache__/ diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -80,3 +80,8 @@ allow automatic casting in ufuncs (and frompypyfunc) to cast the arguments to the allowed function type declarations, fixes various failures in linalg cffi functions + +.. branch: vecopt +.. branch: vecopt-merge + +A new optimization pass to use emit vectorized loops diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -66,6 +66,7 @@ import pypy.module.cpyext.codecs import pypy.module.cpyext.pyfile import pypy.module.cpyext.pystrtod +import pypy.module.cpyext.pytraceback # now that all rffi_platform.Struct types are registered, configure them api.configure_types() diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -126,6 +126,7 @@ #include "fileobject.h" #include "pysignals.h" #include "pythread.h" +#include "traceback.h" /* Missing definitions */ #include "missing.h" diff --git a/pypy/module/cpyext/include/frameobject.h b/pypy/module/cpyext/include/frameobject.h --- a/pypy/module/cpyext/include/frameobject.h +++ b/pypy/module/cpyext/include/frameobject.h @@ -4,7 +4,7 @@ extern "C" { #endif -typedef struct { +typedef struct _frame { PyObject_HEAD PyCodeObject *f_code; PyObject *f_globals; diff --git a/pypy/module/cpyext/include/traceback.h b/pypy/module/cpyext/include/traceback.h --- a/pypy/module/cpyext/include/traceback.h +++ b/pypy/module/cpyext/include/traceback.h @@ -4,7 +4,15 @@ extern "C" { #endif -typedef PyObject PyTracebackObject; +struct _frame; + +typedef struct _traceback { + PyObject_HEAD + struct _traceback *tb_next; + struct _frame *tb_frame; + int tb_lasti; + int tb_lineno; +} PyTracebackObject; #ifdef __cplusplus } diff --git a/pypy/module/cpyext/pytraceback.py b/pypy/module/cpyext/pytraceback.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/pytraceback.py @@ -0,0 +1,50 @@ +from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + PyObjectFields, generic_cpy_call, CONST_STRING, CANNOT_FAIL, Py_ssize_t, + cpython_api, bootstrap_function, cpython_struct, build_type_checkers) +from pypy.module.cpyext.pyobject import ( + PyObject, make_ref, from_ref, Py_DecRef, make_typedescr, borrow_from) +from pypy.module.cpyext.frameobject import PyFrameObject +from rpython.rlib.unroll import unrolling_iterable +from pypy.interpreter.error import OperationError +from pypy.interpreter.pytraceback import PyTraceback +from pypy.interpreter import pycode + + +PyTracebackObjectStruct = lltype.ForwardReference() +PyTracebackObject = lltype.Ptr(PyTracebackObjectStruct) +PyTracebackObjectFields = PyObjectFields + ( + ("tb_next", PyTracebackObject), + ("tb_frame", PyFrameObject), + ("tb_lasti", rffi.INT), + ("tb_lineno", rffi.INT), +) +cpython_struct("PyTracebackObject", PyTracebackObjectFields, PyTracebackObjectStruct) + + at bootstrap_function +def init_traceback(space): + make_typedescr(PyTraceback.typedef, + basestruct=PyTracebackObject.TO, + attach=traceback_attach, + dealloc=traceback_dealloc) + + +def traceback_attach(space, py_obj, w_obj): + py_traceback = rffi.cast(PyTracebackObject, py_obj) + traceback = space.interp_w(PyTraceback, w_obj) + if traceback.next is None: + w_next_traceback = None + else: + w_next_traceback = space.wrap(traceback.next) + py_traceback.c_tb_next = rffi.cast(PyTracebackObject, make_ref(space, w_next_traceback)) + py_traceback.c_tb_frame = rffi.cast(PyFrameObject, make_ref(space, space.wrap(traceback.frame))) + rffi.setintfield(py_traceback, 'c_tb_lasti', traceback.lasti) + rffi.setintfield(py_traceback, 'c_tb_lineno',traceback.get_lineno()) + + at cpython_api([PyObject], lltype.Void, external=False) +def traceback_dealloc(space, py_obj): + py_traceback = rffi.cast(PyTracebackObject, py_obj) + Py_DecRef(space, rffi.cast(PyObject, py_traceback.c_tb_next)) + Py_DecRef(space, rffi.cast(PyObject, py_traceback.c_tb_frame)) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -61,6 +61,30 @@ args_w = space.fixedview(w_args) return generic_cpy_call(space, func_binary, w_self, args_w[0]) +def wrap_binaryfunc_l(space, w_self, w_args, func): + func_binary = rffi.cast(binaryfunc, func) + check_num_args(space, w_args, 1) + args_w = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self))): + raise OperationError(space.w_NotImplementedError, space.wrap( + "NotImplemented")) + + return generic_cpy_call(space, func_binary, w_self, args_w[0]) + +def wrap_binaryfunc_r(space, w_self, w_args, func): + func_binary = rffi.cast(binaryfunc, func) + check_num_args(space, w_args, 1) + args_w = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self))): + raise OperationError(space.w_NotImplementedError, space.wrap( + "NotImplemented")) + + return generic_cpy_call(space, func_binary, args_w[0], w_self) + def wrap_inquirypred(space, w_self, w_args, func): func_inquiry = rffi.cast(inquiry, func) check_num_args(space, w_args, 0) diff --git a/pypy/module/cpyext/test/test_traceback.py b/pypy/module/cpyext/test/test_traceback.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_traceback.py @@ -0,0 +1,40 @@ +from rpython.rtyper.lltypesystem import lltype, rffi +from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref +from pypy.module.cpyext.pytraceback import PyTracebackObject +from pypy.interpreter.pytraceback import PyTraceback +from pypy.interpreter.pyframe import PyFrame + +class TestPyTracebackObject(BaseApiTest): + def test_traceback(self, space, api): + w_traceback = space.appexec([], """(): + import sys + try: + 1/0 + except: + return sys.exc_info()[2] + """) + py_obj = make_ref(space, w_traceback) + py_traceback = rffi.cast(PyTracebackObject, py_obj) + assert (from_ref(space, rffi.cast(PyObject, py_traceback.c_ob_type)) is + space.gettypeobject(PyTraceback.typedef)) + + traceback = space.interp_w(PyTraceback, w_traceback) + assert traceback.lasti == py_traceback.c_tb_lasti + assert traceback.get_lineno() == py_traceback.c_tb_lineno + assert space.eq_w(space.getattr(w_traceback, space.wrap("tb_lasti")), + space.wrap(py_traceback.c_tb_lasti)) + assert space.is_w(space.getattr(w_traceback, space.wrap("tb_frame")), + from_ref(space, rffi.cast(PyObject, + py_traceback.c_tb_frame))) + + while not space.is_w(w_traceback, space.w_None): + assert space.is_w( + w_traceback, + from_ref(space, rffi.cast(PyObject, py_traceback))) + w_traceback = space.getattr(w_traceback, space.wrap("tb_next")) + py_traceback = py_traceback.c_tb_next + + assert lltype.normalizeptr(py_traceback) is None + + api.Py_DecRef(py_obj) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -589,6 +589,48 @@ assert bool(module.newInt(-1)) raises(ValueError, bool, module.newInt(-42)) + def test_binaryfunc(self): + module = self.import_extension('foo', [ + ("new_obj", "METH_NOARGS", + """ + FooObject *fooObj; + + Foo_Type.tp_as_number = &foo_as_number; + foo_as_number.nb_add = foo_nb_add_call; + if (PyType_Ready(&Foo_Type) < 0) return NULL; + fooObj = PyObject_New(FooObject, &Foo_Type); + if (!fooObj) { + return NULL; + } + + return (PyObject *)fooObj; + """)], + """ + typedef struct + { + PyObject_HEAD + } FooObject; + + static PyObject * + foo_nb_add_call(PyObject *self, PyObject *other) + { + return PyInt_FromLong(42); + } + + PyTypeObject Foo_Type = { + PyObject_HEAD_INIT(0) + /*ob_size*/ 0, + /*tp_name*/ "Foo", + /*tp_basicsize*/ sizeof(FooObject), + }; + static PyNumberMethods foo_as_number; + """) + a = module.new_obj() + b = module.new_obj() + c = 3 + assert (a + b) == 42 + raises(NotImplementedError, "b + c") + def test_tp_new_in_subclass_of_type(self): skip("BROKEN") module = self.import_module(name='foo3') diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -2,6 +2,7 @@ It should not be imported by the module itself """ import re +import py from pypy.interpreter import special from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root, ObjSpace from pypy.interpreter.error import OperationError @@ -12,6 +13,10 @@ from pypy.module.micronumpy.ndarray import W_NDimArray from pypy.module.micronumpy.ctors import array from pypy.module.micronumpy.descriptor import get_dtype_cache +from pypy.interpreter.miscutils import ThreadLocals, make_weak_value_dictionary +from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag, + UserDelAction) +from pypy.interpreter.pyframe import PyFrame class BogusBytecode(Exception): @@ -32,12 +37,11 @@ class BadToken(Exception): pass - SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", "unegative", "flat", "tostring", "count_nonzero", "argsort", "cumsum", "logical_xor_reduce"] -TWO_ARG_FUNCTIONS = ["dot", 'take', 'searchsorted'] -TWO_ARG_FUNCTIONS_OR_NONE = ['view', 'astype'] +TWO_ARG_FUNCTIONS = ["dot", 'take', 'searchsorted', 'multiply'] +TWO_ARG_FUNCTIONS_OR_NONE = ['view', 'astype', 'reshape'] THREE_ARG_FUNCTIONS = ['where'] class W_TypeObject(W_Root): @@ -57,6 +61,10 @@ w_OverflowError = W_TypeObject("OverflowError") w_NotImplementedError = W_TypeObject("NotImplementedError") w_AttributeError = W_TypeObject("AttributeError") + w_StopIteration = W_TypeObject("StopIteration") + w_KeyError = W_TypeObject("KeyError") + w_SystemExit = W_TypeObject("SystemExit") + w_KeyboardInterrupt = W_TypeObject("KeyboardInterrupt") w_None = None w_bool = W_TypeObject("bool") @@ -73,12 +81,24 @@ w_object = W_TypeObject("object") w_buffer = W_TypeObject("buffer") - def __init__(self): + def __init__(self, config=None): """NOT_RPYTHON""" self.fromcache = InternalSpaceCache(self).getorbuild self.w_Ellipsis = special.Ellipsis() self.w_NotImplemented = special.NotImplemented() + if config is None: + from pypy.config.pypyoption import get_pypy_config + config = get_pypy_config(translating=False) + self.config = config + + self.interned_strings = make_weak_value_dictionary(self, str, W_Root) + self.builtin = DictObject({}) + self.FrameClass = PyFrame + self.threadlocals = ThreadLocals() + self.actionflag = ActionFlag() # changed by the signal module + self.check_signal_action = None # changed by the signal module + def _freeze_(self): return True @@ -89,12 +109,17 @@ return isinstance(w_obj, ListObject) or isinstance(w_obj, W_NDimArray) def len(self, w_obj): - assert isinstance(w_obj, ListObject) - return self.wrap(len(w_obj.items)) + if isinstance(w_obj, ListObject): + return self.wrap(len(w_obj.items)) + elif isinstance(w_obj, DictObject): + return self.wrap(len(w_obj.items)) + raise NotImplementedError def getattr(self, w_obj, w_attr): assert isinstance(w_attr, StringObject) - return w_obj.getdictvalue(self, w_attr.v) + if isinstance(w_obj, DictObject): + return w_obj.getdictvalue(self, w_attr) + return None def isinstance_w(self, w_obj, w_tp): try: @@ -102,6 +127,22 @@ except AttributeError: return False + def iter(self, w_iter): + if isinstance(w_iter, ListObject): + raise NotImplementedError + #return IterObject(space, w_iter.items) + elif isinstance(w_iter, DictObject): + return IterDictObject(self, w_iter) + + def next(self, w_iter): + return w_iter.next() + + def contains(self, w_iter, w_key): + if isinstance(w_iter, DictObject): + return self.wrap(w_key in w_iter.items) + + raise NotImplementedError + def decode_index4(self, w_idx, size): if isinstance(w_idx, IntObject): return (self.int_w(w_idx), 0, 0, 1) @@ -123,6 +164,10 @@ lgt = (stop - start - 1) / step + 1 return (start, stop, step, lgt) + def unicode_from_object(self, w_item): + # XXX + return StringObject("") + @specialize.argtype(1) def wrap(self, obj): if isinstance(obj, float): @@ -145,7 +190,55 @@ def newcomplex(self, r, i): return ComplexObject(r, i) + def newfloat(self, f): + return self.float(f) + + def le(self, w_obj1, w_obj2): + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) + return w_obj1.descr_le(self, w_obj2) + + def lt(self, w_obj1, w_obj2): + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) + return w_obj1.descr_lt(self, w_obj2) + + def ge(self, w_obj1, w_obj2): + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) + return w_obj1.descr_ge(self, w_obj2) + + def add(self, w_obj1, w_obj2): + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) + return w_obj1.descr_add(self, w_obj2) + + def sub(self, w_obj1, w_obj2): + return self.wrap(1) + + def mul(self, w_obj1, w_obj2): + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) + return w_obj1.descr_mul(self, w_obj2) + + def pow(self, w_obj1, w_obj2, _): + return self.wrap(1) + + def neg(self, w_obj1): + return self.wrap(0) + + def repr(self, w_obj1): + return self.wrap('fake') + def getitem(self, obj, index): + if isinstance(obj, DictObject): + w_dict = obj.getdict(self) + if w_dict is not None: + try: + return w_dict[index] + except KeyError, e: + raise OperationError(self.w_KeyError, self.wrap("key error")) + assert isinstance(obj, ListObject) assert isinstance(index, IntObject) return obj.items[index.intval] @@ -191,12 +284,24 @@ return w_obj.v raise NotImplementedError + def unicode_w(self, w_obj): + # XXX + if isinstance(w_obj, StringObject): + return unicode(w_obj.v) + raise NotImplementedError + def int(self, w_obj): if isinstance(w_obj, IntObject): return w_obj assert isinstance(w_obj, boxes.W_GenericBox) return self.int(w_obj.descr_int(self)) + def long(self, w_obj): + if isinstance(w_obj, LongObject): + return w_obj + assert isinstance(w_obj, boxes.W_GenericBox) + return self.int(w_obj.descr_long(self)) + def str(self, w_obj): if isinstance(w_obj, StringObject): return w_obj @@ -240,9 +345,29 @@ def gettypefor(self, w_obj): return W_TypeObject(w_obj.typedef.name) - def call_function(self, tp, w_dtype): + def call_function(self, tp, w_dtype, *args): + if tp is self.w_float: + if isinstance(w_dtype, boxes.W_Float64Box): + return FloatObject(float(w_dtype.value)) + if isinstance(w_dtype, boxes.W_Float32Box): + return FloatObject(float(w_dtype.value)) + if isinstance(w_dtype, boxes.W_Int64Box): + return FloatObject(float(int(w_dtype.value))) + if isinstance(w_dtype, boxes.W_Int32Box): + return FloatObject(float(int(w_dtype.value))) + if isinstance(w_dtype, boxes.W_Int16Box): + return FloatObject(float(int(w_dtype.value))) + if isinstance(w_dtype, boxes.W_Int8Box): + return FloatObject(float(int(w_dtype.value))) + if isinstance(w_dtype, IntObject): + return FloatObject(float(w_dtype.intval)) + if tp is self.w_int: + if isinstance(w_dtype, FloatObject): + return IntObject(int(w_dtype.floatval)) + return w_dtype + @specialize.arg(2) def call_method(self, w_obj, s, *args): # XXX even the hacks have hacks return getattr(w_obj, 'descr_' + s)(self, *args) @@ -258,21 +383,21 @@ def newtuple(self, list_w): return ListObject(list_w) - def newdict(self): - return {} + def newdict(self, module=True): + return DictObject({}) - def setitem(self, dict, item, value): - dict[item] = value + def newint(self, i): + if isinstance(i, IntObject): + return i + return IntObject(i) - def len_w(self, w_obj): - if isinstance(w_obj, ListObject): - return len(w_obj.items) - # XXX array probably - assert False + def setitem(self, obj, index, value): + obj.items[index] = value def exception_match(self, w_exc_type, w_check_class): - # Good enough for now - raise NotImplementedError + assert isinstance(w_exc_type, W_TypeObject) + assert isinstance(w_check_class, W_TypeObject) + return w_exc_type.name == w_check_class.name class FloatObject(W_Root): tp = FakeSpace.w_float @@ -283,6 +408,9 @@ tp = FakeSpace.w_bool def __init__(self, boolval): self.intval = boolval +FakeSpace.w_True = BoolObject(True) +FakeSpace.w_False = BoolObject(False) + class IntObject(W_Root): tp = FakeSpace.w_int @@ -299,6 +427,33 @@ def __init__(self, items): self.items = items +class DictObject(W_Root): + tp = FakeSpace.w_dict + def __init__(self, items): + self.items = items + + def getdict(self, space): + return self.items + + def getdictvalue(self, space, key): + return self.items[key] + +class IterDictObject(W_Root): + def __init__(self, space, w_dict): + self.space = space + self.items = w_dict.items.items() + self.i = 0 + + def __iter__(self): + return self + + def next(self): + space = self.space + if self.i >= len(self.items): + raise OperationError(space.w_StopIteration, space.wrap("stop iteration")) + self.i += 1 + return self.items[self.i-1][0] + class SliceObject(W_Root): tp = FakeSpace.w_slice def __init__(self, start, stop, step): @@ -414,6 +569,15 @@ w_rhs = IntObject(int(w_rhs.floatval)) assert isinstance(w_lhs, W_NDimArray) w_res = w_lhs.descr_getitem(interp.space, w_rhs) + if isinstance(w_rhs, IntObject): + if isinstance(w_res, boxes.W_Float64Box): + print "access", w_lhs, "[", w_rhs.intval, "] => ", float(w_res.value) + if isinstance(w_res, boxes.W_Float32Box): + print "access", w_lhs, "[", w_rhs.intval, "] => ", float(w_res.value) + if isinstance(w_res, boxes.W_Int64Box): + print "access", w_lhs, "[", w_rhs.intval, "] => ", int(w_res.value) + if isinstance(w_res, boxes.W_Int32Box): + print "access", w_lhs, "[", w_rhs.intval, "] => ", int(w_res.value) else: raise NotImplementedError if (not isinstance(w_res, W_NDimArray) and @@ -425,9 +589,22 @@ def __repr__(self): return '(%r %s %r)' % (self.lhs, self.name, self.rhs) -class FloatConstant(Node): +class NumberConstant(Node): def __init__(self, v): - self.v = float(v) + if isinstance(v, int): + self.v = v + elif isinstance(v, float): + self.v = v + else: + assert isinstance(v, str) + assert len(v) > 0 + c = v[-1] + if c == 'f': + self.v = float(v[:-1]) + elif c == 'i': + self.v = int(v[:-1]) + else: + self.v = float(v) def __repr__(self): return "Const(%s)" % self.v @@ -519,8 +696,24 @@ def execute(self, interp): if self.v == 'int': dtype = get_dtype_cache(interp.space).w_int64dtype + elif self.v == 'int8': + dtype = get_dtype_cache(interp.space).w_int8dtype + elif self.v == 'int16': + dtype = get_dtype_cache(interp.space).w_int16dtype + elif self.v == 'int32': + dtype = get_dtype_cache(interp.space).w_int32dtype + elif self.v == 'uint': + dtype = get_dtype_cache(interp.space).w_uint64dtype + elif self.v == 'uint8': + dtype = get_dtype_cache(interp.space).w_uint8dtype + elif self.v == 'uint16': + dtype = get_dtype_cache(interp.space).w_uint16dtype + elif self.v == 'uint32': + dtype = get_dtype_cache(interp.space).w_uint32dtype elif self.v == 'float': dtype = get_dtype_cache(interp.space).w_float64dtype + elif self.v == 'float32': + dtype = get_dtype_cache(interp.space).w_float32dtype else: raise BadToken('unknown v to dtype "%s"' % self.v) return dtype @@ -556,8 +749,13 @@ raise ArgumentMismatch if self.name == "sum": if len(self.args)>1: - w_res = arr.descr_sum(interp.space, + var = self.args[1] + if isinstance(var, DtypeClass): + w_res = arr.descr_sum(interp.space, None, var.execute(interp)) + else: + w_res = arr.descr_sum(interp.space, self.args[1].execute(interp)) + else: w_res = arr.descr_sum(interp.space) elif self.name == "prod": @@ -577,10 +775,10 @@ w_res = logical_xor.reduce(interp.space, arr, None) elif self.name == "unegative": neg = ufuncs.get(interp.space).negative - w_res = neg.call(interp.space, [arr], None, None, None) + w_res = neg.call(interp.space, [arr], None, 'unsafe', None) elif self.name == "cos": cos = ufuncs.get(interp.space).cos - w_res = cos.call(interp.space, [arr], None, None, None) + w_res = cos.call(interp.space, [arr], None, 'unsafe', None) elif self.name == "flat": w_res = arr.descr_get_flatiter(interp.space) elif self.name == "argsort": @@ -598,6 +796,8 @@ raise ArgumentNotAnArray if self.name == "dot": w_res = arr.descr_dot(interp.space, arg) + elif self.name == 'multiply': + w_res = arr.descr_mul(interp.space, arg) elif self.name == 'take': w_res = arr.descr_take(interp.space, arg) elif self.name == "searchsorted": @@ -617,7 +817,7 @@ if self.name == "where": w_res = where(interp.space, arr, arg1, arg2) else: - assert False + assert False # unreachable code elif self.name in TWO_ARG_FUNCTIONS_OR_NONE: if len(self.args) != 2: raise ArgumentMismatch @@ -626,6 +826,11 @@ w_res = arr.descr_view(interp.space, arg) elif self.name == 'astype': w_res = arr.descr_astype(interp.space, arg) + elif self.name == 'reshape': + w_arg = self.args[1] + assert isinstance(w_arg, ArrayConstant) + order = -1 + w_res = arr.reshape(interp.space, w_arg.wrap(interp.space), order) else: assert False else: @@ -645,7 +850,7 @@ return W_NDimArray.new_scalar(interp.space, dtype, w_res) _REGEXES = [ - ('-?[\d\.]+', 'number'), + ('-?[\d\.]+(i|f)?', 'number'), ('\[', 'array_left'), (':', 'colon'), ('\w+', 'identifier'), @@ -719,7 +924,7 @@ start = 0 else: if tokens.get(0).name != 'colon': - return FloatConstant(start_tok.v) + return NumberConstant(start_tok.v) start = int(start_tok.v) tokens.pop() if not tokens.get(0).name in ['colon', 'number']: @@ -751,8 +956,30 @@ stack.append(ArrayClass()) elif token.v.strip(' ') == 'int': stack.append(DtypeClass('int')) + elif token.v.strip(' ') == 'int8': + stack.append(DtypeClass('int8')) + elif token.v.strip(' ') == 'int16': + stack.append(DtypeClass('int16')) + elif token.v.strip(' ') == 'int32': + stack.append(DtypeClass('int32')) + elif token.v.strip(' ') == 'int64': + stack.append(DtypeClass('int')) + elif token.v.strip(' ') == 'uint': + stack.append(DtypeClass('uint')) + elif token.v.strip(' ') == 'uint8': + stack.append(DtypeClass('uint8')) + elif token.v.strip(' ') == 'uint16': + stack.append(DtypeClass('uint16')) + elif token.v.strip(' ') == 'uint32': + stack.append(DtypeClass('uint32')) + elif token.v.strip(' ') == 'uint64': + stack.append(DtypeClass('uint')) elif token.v.strip(' ') == 'float': stack.append(DtypeClass('float')) + elif token.v.strip(' ') == 'float32': + stack.append(DtypeClass('float32')) + elif token.v.strip(' ') == 'float64': + stack.append(DtypeClass('float')) else: stack.append(Variable(token.v.strip(' '))) elif token.name == 'array_left': @@ -805,7 +1032,7 @@ while True: token = tokens.pop() if token.name == 'number': - elems.append(FloatConstant(token.v)) + elems.append(NumberConstant(token.v)) elif token.name == 'array_left': elems.append(ArrayConstant(self.parse_array_const(tokens))) elif token.name == 'paren_left': diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -70,7 +70,10 @@ @jit.unroll_safe def setslice(self, space, arr): - if len(arr.get_shape()) > len(self.get_shape()): + if arr.get_size() == 1: + # we can always set self[:] = scalar + pass + elif len(arr.get_shape()) > len(self.get_shape()): # record arrays get one extra dimension if not self.dtype.is_record() or \ len(arr.get_shape()) > len(self.get_shape()) + 1: diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -97,7 +97,7 @@ finally: self.iter.reset(self.state, mutate=True) - def descr___array_wrap__(self, space, obj): + def descr___array_wrap__(self, space, obj, w_context=None): return obj W_FlatIterator.typedef = TypeDef("numpy.flatiter", diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -83,6 +83,12 @@ self._indices = indices self.offset = offset + def same(self, other): + if self.offset == other.offset and \ + self.index == other.index and \ + self._indices == other._indices: + return self.iterator.same_shape(other.iterator) + return False class ArrayIter(object): _immutable_fields_ = ['contiguous', 'array', 'size', 'ndim_m1', 'shape_m1[*]', @@ -100,6 +106,7 @@ self.array = array self.size = size self.ndim_m1 = len(shape) - 1 + # self.shape_m1 = [s - 1 for s in shape] self.strides = strides self.backstrides = backstrides @@ -113,6 +120,17 @@ factors[ndim-i-1] = factors[ndim-i] * shape[ndim-i] self.factors = factors + def same_shape(self, other): + """ Iterating over the same element """ + if not self.contiguous or not other.contiguous: + return False + return (self.contiguous == other.contiguous and + self.array.dtype is self.array.dtype and + self.shape_m1 == other.shape_m1 and + self.strides == other.strides and + self.backstrides == other.backstrides and + self.factors == other.factors) + @jit.unroll_safe def reset(self, state=None, mutate=False): index = 0 @@ -138,9 +156,13 @@ indices = state._indices offset = state.offset if self.contiguous: - offset += self.array.dtype.elsize + elsize = self.array.dtype.elsize + jit.promote(elsize) + offset += elsize elif self.ndim_m1 == 0: - offset += self.strides[0] + stride = self.strides[0] + jit.promote(stride) + offset += stride else: for i in xrange(self.ndim_m1, -1, -1): idx = indices[i] @@ -192,7 +214,7 @@ return state.index >= self.size def getitem(self, state): - assert state.iterator is self + # assert state.iterator is self return self.array.getitem(state.offset) def getitem_bool(self, state): @@ -203,7 +225,6 @@ assert state.iterator is self self.array.setitem(state.offset, elem) - def AxisIter(array, shape, axis): strides = array.get_strides() backstrides = array.get_backstrides() diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -2,6 +2,7 @@ operations. This is the place to look for all the computations that iterate over all the array elements. """ +import py from pypy.interpreter.error import OperationError from rpython.rlib import jit from rpython.rlib.rstring import StringBuilder @@ -13,11 +14,6 @@ from pypy.interpreter.argument import Arguments -call2_driver = jit.JitDriver( - name='numpy_call2', - greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], - reds='auto') - def call2(space, shape, func, calc_dtype, w_lhs, w_rhs, out): if w_lhs.get_size() == 1: w_left = w_lhs.get_scalar_value().convert_to(space, calc_dtype) @@ -38,24 +34,104 @@ out_iter, out_state = out.create_iter(shape) shapelen = len(shape) res_dtype = out.get_dtype() - while not out_iter.done(out_state): - call2_driver.jit_merge_point(shapelen=shapelen, func=func, - calc_dtype=calc_dtype, res_dtype=res_dtype) - if left_iter: - w_left = left_iter.getitem(left_state).convert_to(space, calc_dtype) - left_state = left_iter.next(left_state) - if right_iter: - w_right = right_iter.getitem(right_state).convert_to(space, calc_dtype) - right_state = right_iter.next(right_state) - out_iter.setitem(out_state, func(calc_dtype, w_left, w_right).convert_to( - space, res_dtype)) - out_state = out_iter.next(out_state) - return out + call2_func = try_to_share_iterators_call2(left_iter, right_iter, + left_state, right_state, out_state) + params = (space, shapelen, func, calc_dtype, res_dtype, out, + w_left, w_right, left_iter, right_iter, out_iter, + left_state, right_state, out_state) + return call2_func(*params) + +def try_to_share_iterators_call2(left_iter, right_iter, left_state, right_state, out_state): + # these are all possible iterator sharing combinations + # left == right == out + # left == right + # left == out + # right == out + right_out_equal = False + if right_iter: + # rhs is not a scalar + if out_state.same(right_state): + right_out_equal = True + # + if not left_iter: + # lhs is a scalar + if right_out_equal: + return call2_advance_out_left + else: + # worst case, nothing can be shared and lhs is a scalar + return call2_advance_out_left_right + else: + # lhs is NOT a scalar + if out_state.same(left_state): + # (2) out and left are the same -> remove left + if right_out_equal: + # the best case + return call2_advance_out + else: + return call2_advance_out_right + else: + if right_out_equal: + # right and out are equal, only advance left and out + return call2_advance_out_left + else: + if right_iter and right_state.same(left_state): + # left and right are equal, but still need to advance out + return call2_advance_out_left_eq_right + else: + # worst case, nothing can be shared + return call2_advance_out_left_right + + assert 0, "logical problem with the selection of the call2 case" + +def generate_call2_cases(name, left_state, right_state): + call2_driver = jit.JitDriver(name='numpy_call2_' + name, + greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], + reds='auto', vectorize=True) + # + advance_left_state = left_state == "left_state" + advance_right_state = right_state == "right_state" + code = """ + def method(space, shapelen, func, calc_dtype, res_dtype, out, + w_left, w_right, left_iter, right_iter, out_iter, + left_state, right_state, out_state): + while not out_iter.done(out_state): + call2_driver.jit_merge_point(shapelen=shapelen, func=func, + calc_dtype=calc_dtype, res_dtype=res_dtype) + if left_iter: + w_left = left_iter.getitem({left_state}).convert_to(space, calc_dtype) + if right_iter: + w_right = right_iter.getitem({right_state}).convert_to(space, calc_dtype) + w_out = func(calc_dtype, w_left, w_right) + out_iter.setitem(out_state, w_out.convert_to(space, res_dtype)) + out_state = out_iter.next(out_state) + if advance_left_state and left_iter: + left_state = left_iter.next(left_state) + if advance_right_state and right_iter: + right_state = right_iter.next(right_state) + # + # if not set to None, the values will be loop carried + # (for the var,var case), forcing the vectorization to unpack + # the vector registers at the end of the loop + if left_iter: + w_left = None + if right_iter: + w_right = None + return out + """ + exec(py.code.Source(code.format(left_state=left_state,right_state=right_state)).compile(), locals()) + method.__name__ = "call2_" + name + return method + +call2_advance_out = generate_call2_cases("inc_out", "out_state", "out_state") +call2_advance_out_left = generate_call2_cases("inc_out_left", "left_state", "out_state") +call2_advance_out_right = generate_call2_cases("inc_out_right", "out_state", "right_state") +call2_advance_out_left_eq_right = generate_call2_cases("inc_out_left_eq_right", "left_state", "left_state") +call2_advance_out_left_right = generate_call2_cases("inc_out_left_right", "left_state", "right_state") call1_driver = jit.JitDriver( name='numpy_call1', - greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], - reds='auto') + greens=['shapelen', 'share_iterator', 'func', 'calc_dtype', 'res_dtype'], + reds='auto', vectorize=True) def call1(space, shape, func, calc_dtype, w_obj, w_ret): obj_iter, obj_state = w_obj.create_iter(shape) @@ -63,13 +139,24 @@ out_iter, out_state = w_ret.create_iter(shape) shapelen = len(shape) res_dtype = w_ret.get_dtype() + share_iterator = out_state.same(obj_state) while not out_iter.done(out_state): call1_driver.jit_merge_point(shapelen=shapelen, func=func, + share_iterator=share_iterator, calc_dtype=calc_dtype, res_dtype=res_dtype) - elem = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) + if share_iterator: + # use out state as param to getitem + elem = obj_iter.getitem(out_state).convert_to(space, calc_dtype) + else: + elem = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) out_iter.setitem(out_state, func(calc_dtype, elem).convert_to(space, res_dtype)) - out_state = out_iter.next(out_state) - obj_state = obj_iter.next(obj_state) + if share_iterator: + # only advance out, they share the same iteration space + out_state = out_iter.next(out_state) + else: + out_state = out_iter.next(out_state) + obj_state = obj_iter.next(obj_state) + elem = None return w_ret call_many_to_one_driver = jit.JitDriver( @@ -145,7 +232,7 @@ vals[i] = in_dtypes[i].coerce(space, in_iters[i].getitem(in_states[i])) w_arglist = space.newlist(vals) w_outvals = space.call_args(func, Arguments.frompacked(space, w_arglist)) - # w_outvals should be a tuple, but func can return a single value as well + # w_outvals should be a tuple, but func can return a single value as well if space.isinstance_w(w_outvals, space.w_tuple): batch = space.listview(w_outvals) for i in range(len(batch)): @@ -161,7 +248,7 @@ setslice_driver = jit.JitDriver(name='numpy_setslice', greens = ['shapelen', 'dtype'], - reds = 'auto') + reds = 'auto', vectorize=True) def setslice(space, shape, target, source): if not shape: @@ -239,7 +326,8 @@ reduce_flat_driver = jit.JitDriver( name='numpy_reduce_flat', - greens = ['shapelen', 'func', 'done_func', 'calc_dtype'], reds = 'auto') + greens = ['shapelen', 'func', 'done_func', 'calc_dtype'], reds = 'auto', + vectorize = True) def reduce_flat(space, func, w_arr, calc_dtype, done_func, identity): obj_iter, obj_state = w_arr.create_iter() @@ -260,10 +348,10 @@ obj_state = obj_iter.next(obj_state) return cur_value - reduce_driver = jit.JitDriver( name='numpy_reduce', - greens=['shapelen', 'func', 'dtype'], reds='auto') + greens=['shapelen', 'func', 'dtype'], reds='auto', + vectorize=True) def reduce(space, func, w_arr, axis_flags, dtype, out, identity): out_iter, out_state = out.create_iter() @@ -298,7 +386,7 @@ accumulate_flat_driver = jit.JitDriver( name='numpy_accumulate_flat', greens=['shapelen', 'func', 'dtype', 'out_dtype'], - reds='auto') + reds='auto', vectorize=True) def accumulate_flat(space, func, w_arr, calc_dtype, w_out, identity): arr_iter, arr_state = w_arr.create_iter() @@ -325,7 +413,9 @@ accumulate_driver = jit.JitDriver( name='numpy_accumulate', - greens=['shapelen', 'func', 'calc_dtype'], reds='auto') + greens=['shapelen', 'func', 'calc_dtype'], + reds='auto', + vectorize=True) def accumulate(space, func, w_arr, axis, calc_dtype, w_out, identity): @@ -375,7 +465,8 @@ where_driver = jit.JitDriver(name='numpy_where', greens = ['shapelen', 'dtype', 'arr_dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def where(space, out, shape, arr, x, y, dtype): out_iter, out_state = out.create_iter(shape) @@ -416,7 +507,6 @@ state = x_state return out - def _new_argmin_argmax(op_name): arg_driver = jit.JitDriver(name='numpy_' + op_name, greens = ['shapelen', 'dtype'], @@ -481,7 +571,8 @@ dot_driver = jit.JitDriver(name = 'numpy_dot', greens = ['dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def multidim_dot(space, left, right, result, dtype, right_critical_dim): ''' assumes left, right are concrete arrays @@ -524,8 +615,8 @@ lval = left_impl.getitem(i1).convert_to(space, dtype) rval = right_impl.getitem(i2).convert_to(space, dtype) oval = dtype.itemtype.add(oval, dtype.itemtype.mul(lval, rval)) - i1 += s1 - i2 += s2 + i1 += jit.promote(s1) + i2 += jit.promote(s2) outi.setitem(outs, oval) outs = outi.next(outs) rights = righti.next(rights) @@ -535,7 +626,8 @@ count_all_true_driver = jit.JitDriver(name = 'numpy_count', greens = ['shapelen', 'dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def count_all_true_concrete(impl): s = 0 @@ -556,7 +648,8 @@ nonzero_driver = jit.JitDriver(name = 'numpy_nonzero', greens = ['shapelen', 'dims', 'dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def nonzero(res, arr, box): res_iter, res_state = res.create_iter() @@ -578,7 +671,8 @@ getitem_filter_driver = jit.JitDriver(name = 'numpy_getitem_bool', greens = ['shapelen', 'arr_dtype', 'index_dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def getitem_filter(res, arr, index): res_iter, res_state = res.create_iter() @@ -606,7 +700,8 @@ setitem_filter_driver = jit.JitDriver(name = 'numpy_setitem_bool', greens = ['shapelen', 'arr_dtype', 'index_dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def setitem_filter(space, arr, index, value): arr_iter, arr_state = arr.create_iter() @@ -635,7 +730,8 @@ flatiter_getitem_driver = jit.JitDriver(name = 'numpy_flatiter_getitem', greens = ['dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def flatiter_getitem(res, base_iter, base_state, step): ri, rs = res.create_iter() @@ -649,7 +745,8 @@ flatiter_setitem_driver = jit.JitDriver(name = 'numpy_flatiter_setitem', greens = ['dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def flatiter_setitem(space, dtype, val, arr_iter, arr_state, step, length): val_iter, val_state = val.create_iter() @@ -758,7 +855,8 @@ byteswap_driver = jit.JitDriver(name='numpy_byteswap_driver', greens = ['dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def byteswap(from_, to): dtype = from_.dtype @@ -773,7 +871,8 @@ choose_driver = jit.JitDriver(name='numpy_choose_driver', greens = ['shapelen', 'mode', 'dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def choose(space, arr, choices, shape, dtype, out, mode): shapelen = len(shape) @@ -807,7 +906,8 @@ clip_driver = jit.JitDriver(name='numpy_clip_driver', greens = ['shapelen', 'dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def clip(space, arr, shape, min, max, out): assert min or max @@ -842,7 +942,8 @@ round_driver = jit.JitDriver(name='numpy_round_driver', greens = ['shapelen', 'dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def round(space, arr, dtype, shape, decimals, out): arr_iter, arr_state = arr.create_iter(shape) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -7,6 +7,7 @@ # structures to describe slicing class BaseChunk(object): + _attrs_ = ['step','out_dim'] pass diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -1,6 +1,6 @@ import py from pypy.module.micronumpy.compile import (numpy_compile, Assignment, - ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, + ArrayConstant, NumberConstant, Operator, Variable, RangeConstant, Execute, FunctionCall, FakeSpace, W_NDimArray) @@ -25,30 +25,30 @@ interp = self.compile(code) assert isinstance(interp.code.statements[0].expr, ArrayConstant) st = interp.code.statements[0] - assert st.expr.items == [FloatConstant(1), FloatConstant(2), - FloatConstant(3)] + assert st.expr.items == [NumberConstant(1), NumberConstant(2), + NumberConstant(3)] def test_array_literal2(self): code = "a = [[1],[2],[3]]" interp = self.compile(code) assert isinstance(interp.code.statements[0].expr, ArrayConstant) st = interp.code.statements[0] - assert st.expr.items == [ArrayConstant([FloatConstant(1)]), - ArrayConstant([FloatConstant(2)]), - ArrayConstant([FloatConstant(3)])] + assert st.expr.items == [ArrayConstant([NumberConstant(1)]), + ArrayConstant([NumberConstant(2)]), + ArrayConstant([NumberConstant(3)])] def test_expr_1(self): code = "b = a + 1" interp = self.compile(code) assert (interp.code.statements[0].expr == - Operator(Variable("a"), "+", FloatConstant(1))) + Operator(Variable("a"), "+", NumberConstant(1))) def test_expr_2(self): code = "b = a + b - 3" interp = self.compile(code) assert (interp.code.statements[0].expr == Operator(Operator(Variable("a"), "+", Variable("b")), "-", - FloatConstant(3))) + NumberConstant(3))) def test_expr_3(self): # an equivalent of range @@ -60,13 +60,13 @@ code = "3 + a" interp = self.compile(code) assert interp.code.statements[0] == Execute( - Operator(FloatConstant(3), "+", Variable("a"))) + Operator(NumberConstant(3), "+", Variable("a"))) def test_array_access(self): code = "a -> 3" interp = self.compile(code) assert interp.code.statements[0] == Execute( - Operator(Variable("a"), "->", FloatConstant(3))) + Operator(Variable("a"), "->", NumberConstant(3))) def test_function_call(self): code = "sum(a)" @@ -81,7 +81,7 @@ """ interp = self.compile(code) assert interp.code.statements[0] == Assignment( - 'a', Operator(Variable('b'), "+", FloatConstant(3))) + 'a', Operator(Variable('b'), "+", NumberConstant(3))) class TestRunner(object): @@ -272,6 +272,14 @@ """) assert interp.results[0].value == 3 + def test_any(self): + interp = self.run(""" + a = [0,0,0,0,0.1,0,0,0,0] + b = any(a) + b -> 0 + """) + assert interp.results[0].value == 1 + def test_where(self): interp = self.run(''' a = [1, 0, 3, 0] diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -319,6 +319,28 @@ assert out0.dtype in (int, complex) assert (out0 == in0 * 2).all() + def test_frompyfunc_scalar(self): + import sys + import numpy as np + if '__pypy__' not in sys.builtin_module_names: + skip('PyPy only frompyfunc extension') + + def summer(in0): + out = np.empty(1, in0.dtype) + out[0] = in0.sum() + return out + + pysummer = np.frompyfunc([summer, summer], 1, 1, + dtypes=[np.dtype(int), np.dtype(int), + np.dtype(complex), np.dtype(complex)], + stack_inputs=False, signature='(m,m)->()', + ) + for d in [np.dtype(float), np.dtype('uint8'), np.dtype('complex64')]: + in0 = np.arange(4, dtype=d).reshape(1, 2, 2) + out0 = pysummer(in0) + assert out0 == in0.sum() + assert out0.dtype in (int, complex) + def test_ufunc_kwargs(self): from numpy import ufunc, frompyfunc, arange, dtype def adder(a, b): diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -4,17 +4,37 @@ import py from rpython.jit.metainterp.test.support import LLJitMixin +from rpython.jit.backend.x86.test.test_basic import Jit386Mixin from rpython.jit.metainterp.warmspot import reset_jit, get_stats +from rpython.jit.metainterp.jitprof import Profiler +from rpython.jit.metainterp import counter +from rpython.rlib.jit import Counters +from rpython.rlib.rarithmetic import intmask from pypy.module.micronumpy import boxes from pypy.module.micronumpy.compile import FakeSpace, Parser, InterpreterState from pypy.module.micronumpy.base import W_NDimArray +from rpython.jit.backend.detect_cpu import getcpuclass -py.test.skip('move these to pypyjit/test_pypy_c/test_micronumpy') +CPU = getcpuclass() +if not CPU.vector_extension: + py.test.skip("this cpu %s has no implemented vector backend" % CPU) + +def get_profiler(): + from rpython.jit.metainterp import pyjitpl + return pyjitpl._warmrunnerdesc.metainterp_sd.profiler class TestNumpyJit(LLJitMixin): + enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" graph = None interp = None + def setup_method(self, method): + if not self.CPUClass.vector_extension: + py.test.skip("needs vector extension to run (for now)") + + def assert_float_equal(self, f1, f2, delta=0.0001): + assert abs(f1-f2) < delta + def setup_class(cls): default = """ a = [1,2,3,4] @@ -52,12 +72,29 @@ w_res = i.getitem(s) if isinstance(w_res, boxes.W_Float64Box): return w_res.value + if isinstance(w_res, boxes.W_Float32Box): + return float(w_res.value) elif isinstance(w_res, boxes.W_Int64Box): return float(w_res.value) + elif isinstance(w_res, boxes.W_Int32Box): + return float(int(w_res.value)) + elif isinstance(w_res, boxes.W_Int16Box): + return float(int(w_res.value)) + elif isinstance(w_res, boxes.W_Int8Box): + return float(int(w_res.value)) + elif isinstance(w_res, boxes.W_UInt64Box): + return float(intmask(w_res.value)) + elif isinstance(w_res, boxes.W_UInt32Box): + return float(intmask(w_res.value)) + elif isinstance(w_res, boxes.W_UInt16Box): + return float(intmask(w_res.value)) + elif isinstance(w_res, boxes.W_UInt8Box): + return float(intmask(w_res.value)) elif isinstance(w_res, boxes.W_LongBox): return float(w_res.value) elif isinstance(w_res, boxes.W_BoolBox): return float(w_res.value) + print "ERROR: did not implement return type for interpreter" raise TypeError(w_res) if self.graph is None: @@ -65,122 +102,354 @@ listops=True, listcomp=True, backendopt=True, - graph_and_interp_only=True) + graph_and_interp_only=True, + ProfilerClass=Profiler, + vec=True) self.__class__.interp = interp self.__class__.graph = graph + def check_vectorized(self, expected_tried, expected_success): + profiler = get_profiler() + tried = profiler.get_counter(Counters.OPT_VECTORIZE_TRY) + success = profiler.get_counter(Counters.OPT_VECTORIZED) + assert tried >= success + assert tried == expected_tried + assert success == expected_success + def run(self, name): self.compile_graph() + profiler = get_profiler() + profiler.start() reset_jit() i = self.code_mapping[name] retval = self.interp.eval_graph(self.graph, [i]) return retval - def define_add(): + def define_float32_copy(): + return """ + a = astype(|30|, float32) + x1 = a -> 7 + x2 = a -> 8 + x3 = a -> 9 + x4 = a -> 10 + r = x1 + x2 + x3 + x4 + r + """ + def test_float32_copy(self): + result = self.run("float32_copy") + assert int(result) == 7+8+9+10 + self.check_vectorized(1, 1) + + def define_int32_copy(): + return """ + a = astype(|30|, int32) + x1 = a -> 7 + x2 = a -> 8 + x3 = a -> 9 + x4 = a -> 10 + x1 + x2 + x3 + x4 + """ + def test_int32_copy(self): + result = self.run("int32_copy") + assert int(result) == 7+8+9+10 + self.check_vectorized(1, 1) + + def define_float32_add(): + return """ + a = astype(|30|, float32) + b = a + a + b -> 15 + """ + def test_float32_add(self): + result = self.run("float32_add") + self.assert_float_equal(result, 15.0 + 15.0) + self.check_vectorized(2, 2) + + def define_float_add(): return """ a = |30| b = a + a - b -> 3 + b -> 17 """ - - def test_add(self): - result = self.run("add") - py.test.skip("don't run for now") - self.check_simple_loop({'raw_load': 2, 'float_add': 1, - 'raw_store': 1, 'int_add': 1, - 'int_ge': 1, 'guard_false': 1, 'jump': 1, - 'arraylen_gc': 1}) - assert result == 3 + 3 - - def define_float_add(): - return """ - a = |30| + 3 - a -> 3 - """ - def test_float_add(self): result = self.run("float_add") - assert result == 3 + 3 - self.check_trace_count(1) - self.check_simple_loop({ - 'float_add': 1, - 'guard_false': 1, - 'guard_not_invalidated': 1, - 'int_add': 3, - 'int_ge': 1, - 'jump': 1, - 'raw_load': 1, - 'raw_store': 1, - }) + self.assert_float_equal(result, 17.0 + 17.0) + self.check_vectorized(1, 1) - def define_pow(): + def define_uint_add(): return """ - a = |30| ** 2 - a -> 3 + a = astype(|30|, uint64) + b = a + a + b -> 17 """ + def test_uint_add(self): + result = self.run("uint_add") + assert int(result) == 17+17 + self.check_vectorized(2, 1) - def test_pow(self): - result = self.run("pow") - assert result == 3 ** 2 - self.check_trace_count(1) - self.check_simple_loop({ - 'call': 2, # ccall_pow / _ll_1_threadlocalref_get(rpy_errno) - 'float_eq': 2, - 'float_mul': 2, - 'guard_false': 2, - 'guard_not_invalidated': 1, - 'guard_true': 2, - 'int_add': 3, - 'int_ge': 1, - 'int_is_true': 1, - 'jump': 1, - 'raw_load': 1, - 'raw_store': 1, - }) + def define_float32_add_const(): + return """ + a = astype(|30|, float32) + b = a + 77.345 + b -> 29 + """ + def test_float32_add_const(self): + result = self.run("float32_add_const") + self.assert_float_equal(result, 29.0 + 77.345) + self.check_vectorized(2, 2) - def define_pow_int(): + def define_float_add_const(): + return """ + a = |30| + 25.5 + a -> 29 + """ + def test_float_add_const(self): + result = self.run("float_add_const") + self.assert_float_equal(result, 29.0 + 25.5) + self.check_vectorized(1, 1) + + def define_int_add_const(): return """ a = astype(|30|, int) - b = astype([2], int) - c = a ** b - c -> 3 + b = a + 1i + d = astype(|30|, int) + c = d + 2.0 + x1 = b -> 7 + x2 = b -> 8 + x3 = c -> 11 + x4 = c -> 12 + x1 + x2 + x3 + x4 """ + def test_int_add_const(self): + result = self.run("int_add_const") + assert int(result) == 7+1+8+1+11+2+12+2 + self.check_vectorized(2, 2) - def test_pow_int(self): - result = self.run("pow_int") - assert result == 3 ** 2 - self.check_trace_count(2) # extra one for the astype - del get_stats().loops[0] # we don't care about it - self.check_simple_loop({ - 'call': 1, - 'guard_false': 1, - 'guard_not_invalidated': 1, - 'int_add': 3, - 'int_ge': 1, - 'jump': 1, - 'raw_load': 1, - 'raw_store': 1, - }) + def define_int_expand(): + return """ + a = astype(|30|, int) + c = astype(|1|, int) + c[0] = 16 + b = a + c + x1 = b -> 7 + x2 = b -> 8 + x1 + x2 + """ + def test_int_expand(self): + result = self.run("int_expand") + assert int(result) == 7+16+8+16 + self.check_vectorized(2, 2) + + def define_int32_expand(): + return """ + a = astype(|30|, int32) + c = astype(|1|, int32) + c[0] = 16i + b = a + c + x1 = b -> 7 + x2 = b -> 8 + x1 + x2 + """ + def test_int32_expand(self): + result = self.run("int32_expand") + assert int(result) == 7+16+8+16 + self.check_vectorized(2, 1) + + def define_int16_expand(): + return """ + a = astype(|30|, int16) + c = astype(|1|, int16) + c[0] = 16i + b = a + c + d = b -> 7:15 + sum(d) + """ + def test_int16_expand(self): + result = self.run("int16_expand") + i = 8 + assert int(result) == i*16 + sum(range(7,7+i)) + # currently is is not possible to accum for types with < 8 bytes + self.check_vectorized(3, 0) + + def define_int8_expand(): + return """ + a = astype(|30|, int8) + c = astype(|1|, int8) + c[0] = 8i + b = a + c + d = b -> 0:17 + sum(d) + """ + def test_int8_expand(self): + result = self.run("int8_expand") + assert int(result) == 17*8 + sum(range(0,17)) + # does not pay off to cast float64 -> int8 + # neither does sum + # a + c should work, but it is given as a parameter + # thus the accum must handle this! + self.check_vectorized(3, 0) + + def define_int32_add_const(): + return """ + a = astype(|30|, int32) + b = a + 1i + d = astype(|30|, int32) + c = d + 2.0 + x1 = b -> 7 + x2 = b -> 8 + x3 = c -> 11 + x4 = c -> 12 + x1 + x2 + x3 + x4 + """ + def test_int32_add_const(self): + result = self.run("int32_add_const") + assert int(result) == 7+1+8+1+11+2+12+2 + self.check_vectorized(2, 2) + + def define_float_mul_array(): + return """ + a = astype(|30|, float) + b = astype(|30|, float) + c = a * b + x1 = c -> 7 + x2 = c -> 8 + x3 = c -> 11 + x4 = c -> 12 + x1 + x2 + x3 + x4 + """ + def test_float_mul_array(self): + result = self.run("float_mul_array") + assert int(result) == 7*7+8*8+11*11+12*12 + self.check_vectorized(2, 2) + + def define_int32_mul_array(): + return """ + a = astype(|30|, int32) + b = astype(|30|, int32) + c = a * b + x1 = c -> 7 + x2 = c -> 8 + x3 = c -> 11 + x4 = c -> 12 + x1 + x2 + x3 + x4 + """ + def test_int32_mul_array(self): + result = self.run("int32_mul_array") + assert int(result) == 7*7+8*8+11*11+12*12 + self.check_vectorized(2, 2) + + def define_float32_mul_array(): + return """ + a = astype(|30|, float32) + b = astype(|30|, float32) + c = a * b + x1 = c -> 7 + x2 = c -> 8 + x3 = c -> 11 + x4 = c -> 12 + x1 + x2 + x3 + x4 + """ + def test_float32_mul_array(self): + result = self.run("float32_mul_array") + assert int(result) == 7*7+8*8+11*11+12*12 + self.check_vectorized(2, 2) + + def define_conversion(): + return """ + a = astype(|30|, int8) + b = astype(|30|, int) + c = a + b + sum(c) + """ + def test_conversion(self): + result = self.run("conversion") + assert result == sum(range(30)) + sum(range(30)) + self.check_vectorized(4, 2) # only sum and astype(int) succeed def define_sum(): return """ a = |30| sum(a) """ - def test_sum(self): result = self.run("sum") assert result == sum(range(30)) - self.check_trace_count(1) - self.check_simple_loop({ - 'float_add': 1, - 'guard_false': 1, - 'guard_not_invalidated': 1, - 'int_add': 2, - 'int_ge': 1, - 'jump': 1, - 'raw_load': 1, - }) + self.check_vectorized(1, 1) + + def define_sum(): + return """ + a = |30| + sum(a) + """ + def test_sum(self): + result = self.run("sum") + assert result == sum(range(30)) + self.check_vectorized(1, 1) + + def define_sum_int(): + return """ + a = astype(|65|,int) + sum(a) + """ + def test_sum_int(self): + result = self.run("sum_int") + assert result == sum(range(65)) + self.check_vectorized(2, 2) + + def define_sum_multi(): + return """ + a = |30| + b = sum(a) + c = |60| + d = sum(c) + b + d + """ + + def test_sum_multi(self): + result = self.run("sum_multi") + assert result == sum(range(30)) + sum(range(60)) + self.check_vectorized(1, 1) + + def define_sum_float_to_int16(): + return """ + a = |30| + sum(a,int16) + """ + def test_sum_float_to_int16(self): + result = self.run("sum_float_to_int16") + assert result == sum(range(30)) + # one can argue that this is not desired, + # but unpacking exactly hits savings = 0 + self.check_vectorized(1, 1) + def define_sum_float_to_int32(): + return """ + a = |30| + sum(a,int32) + """ + def test_sum_float_to_int32(self): + result = self.run("sum_float_to_int32") + assert result == sum(range(30)) + self.check_vectorized(1, 1) + + def define_sum_float_to_float32(): + return """ + a = |30| + sum(a,float32) + """ + def test_sum_float_to_float32(self): + result = self.run("sum_float_to_float32") + assert result == sum(range(30)) + self.check_vectorized(1, 1) + + def define_sum_float_to_uint64(): + return """ + a = |30| + sum(a,uint64) + """ + def test_sum_float_to_uint64(self): + result = self.run("sum_float_to_uint64") + assert result == sum(range(30)) + self.check_vectorized(1, 0) # unsigned def define_cumsum(): return """ @@ -192,17 +461,6 @@ def test_cumsum(self): result = self.run("cumsum") assert result == 15 - self.check_trace_count(1) - self.check_simple_loop({ - 'float_add': 1, - 'guard_false': 1, - 'guard_not_invalidated': 1, - 'int_add': 3, - 'int_ge': 1, - 'jump': 1, - 'raw_load': 1, - 'raw_store': 1, - }) def define_axissum(): return """ @@ -216,47 +474,7 @@ assert result == 30 # XXX note - the bridge here is fairly crucial and yet it's pretty # bogus. We need to improve the situation somehow. - self.check_trace_count(2) - self.check_simple_loop({ - 'float_add': 1, - 'getarrayitem_gc': 2, - 'guard_false': 2, - 'guard_not_invalidated': 1, - 'guard_true': 1, - 'int_add': 4, - 'int_ge': 1, - 'int_is_zero': 1, - 'int_lt': 1, - 'jump': 1, - 'raw_load': 2, - 'raw_store': 1, - 'setarrayitem_gc': 1, - }) - self.check_resops({ - 'float_add': 2, - 'getarrayitem_gc': 5, - 'getarrayitem_gc_pure': 7, - 'getfield_gc': 5, - 'getfield_gc_pure': 51, - 'guard_class': 3, - 'guard_false': 12, - 'guard_nonnull': 11, - 'guard_nonnull_class': 3, - 'guard_not_invalidated': 2, - 'guard_true': 10, - 'guard_value': 6, - 'int_add': 13, - 'int_ge': 4, - 'int_is_true': 3, - 'int_is_zero': 4, - 'int_le': 2, - 'int_lt': 3, - 'int_sub': 1, - 'jump': 2, - 'raw_load': 4, - 'raw_store': 2, - 'setarrayitem_gc': 4, - }) + self.check_vectorized(1, 0) def define_reduce(): return """ @@ -270,9 +488,12 @@ i = self.code_mapping['reduce'] # run it twice retval = self.interp.eval_graph(self.graph, [i]) + assert retval == sum(range(1,11)) retval = self.interp.eval_graph(self.graph, [i]) + assert retval == sum(range(1,11)) # check that we got only one loop assert len(get_stats().loops) == 1 + self.check_vectorized(2, 1) def test_reduce_axis_compile_only_once(self): self.compile_graph() @@ -283,69 +504,42 @@ retval = self.interp.eval_graph(self.graph, [i]) # check that we got only one loop assert len(get_stats().loops) == 1 + self.check_vectorized(3, 1) def define_prod(): return """ - a = |30| + a = [1,2,3,4,1,2,3,4] + prod(a) + """ + + def define_prod_zero(): + return """ + a = [1,2,3,4,1,2,3,0] prod(a) """ def test_prod(self): result = self.run("prod") - expected = 1 - for i in range(30): - expected *= i * 2 - assert result == expected - self.check_trace_count(1) - self.check_simple_loop({ - 'float_mul': 1, - 'guard_false': 1, From noreply at buildbot.pypy.org Thu Oct 15 18:47:04 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 15 Oct 2015 18:47:04 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: added ss_d, ss_e, ss_f. now the a subset of the whole range is taken as paramters testing the corner cases better in the assembler (e.g. immediate 8 bit [-128, -1, 0, 1, 127] + some random inbetween -128 and 127) Message-ID: <20151015164704.B416F1C15B6@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80244:b3805605f684 Date: 2015-10-15 18:47 +0200 http://bitbucket.org/pypy/pypy/changeset/b3805605f684/ Log: added ss_d, ss_e, ss_f. now the a subset of the whole range is taken as paramters testing the corner cases better in the assembler (e.g. immediate 8 bit [-128,-1,0,1,127] + some random inbetween -128 and 127) diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -28,11 +28,24 @@ pass def arguments(args_str): + """ + Available names: + r - register + i4 - immediate 4 bits (signed) + u4 - immediate 4 bits (unsigend) + bd - base displacement + l4db - length base displacement (4 bit) + l8db - length base displacement (8 bit) + """ def impl(func): func._arguments_ = args_str.split(',') return func return impl +BIT_MASK_4 = 0xF +BIT_MASK_12 = 0xFFF +BIT_MASK_20 = 0xFFFFF + @always_inline def encode_base_displace(mc, base_displace): displace = base_displace.displace # & 0x3ff @@ -64,7 +77,7 @@ index = idxbasedisp.index byte = (reg_or_mask & 0x0f) << 4 | index & 0xf self.writechar(chr(byte)) - displace = idxbasedisp.displace & 0x3ff + displace = idxbasedisp.displace & BIT_MASK_12 base = idxbasedisp.base & 0xf byte = displace >> 8 & 0xf | base << 4 self.writechar(chr(byte)) @@ -77,12 +90,13 @@ index = idxbasedisp.index byte = (reg_or_mask & 0x0f) << 4 | index & 0xf self.writechar(chr(byte)) - displace = idxbasedisp.displace & 0x3ff + displace = idxbasedisp.displace & 0xfffff base = idxbasedisp.base & 0xf byte = displace >> 8 & 0xf | base << 4 self.writechar(chr(byte)) self.writechar(chr(displace & 0xff)) - self.writechar(chr(displace >> 12 & 0xff)) + byte = displace >> 12 & 0xff + self.writechar(chr(byte)) self.writechar(opcode2) return encode_rxy @@ -130,7 +144,7 @@ return encode_ssb def build_ssc(mnemonic, (opcode1,)): - @arguments('l,l,u4') + @arguments('lbp,lbp,u4') def encode_ssc(self, len_base_disp1, len_base_disp2, uimm4): self.writechar(opcode1) byte = (len_base_disp1.length & 0xf) << 4 | uimm4 & 0xf @@ -139,6 +153,34 @@ encode_base_displace(self, len_base_disp2) return encode_ssc +def build_ssd(mnemonic, (opcode,)): + @arguments('rbd,bd,r') + def encode_ssd(self, index_base_disp, base_disp, reg): + self.writechar(opcode) + byte = (index_base_disp.index & 0xf) << 4 | reg & 0xf + self.writechar(chr(byte)) + encode_base_displace(self, index_base_disp) + encode_base_displace(self, base_disp) + return encode_ssd + +def build_sse(mnemonic, (opcode,)): + @arguments('r,bd,r,bd') + def encode_sse(self, reg1, reg3, base_disp2, base_disp4): + self.writechar(opcode) + byte = (reg1 & BIT_MASK_4) << 4 | reg3 & BIT_MASK_4 + self.writechar(chr(byte)) + encode_base_displace(self, base_disp2) + encode_base_displace(self, base_disp4) + return encode_sse + +def build_ssf(mnemonic, (opcode,)): + def encode_ssf(self, base_disp, len_base_disp): + self.writechar(opcode) + self.writechar(chr(len_base_disp.length & 0xff)) + encode_base_displace(self, base_disp) + encode_base_displace(self, len_base_disp) + return encode_ssf + _mnemonic_codes = { 'AR': (build_rr, ['\x1A']), 'AGR': (build_rre, ['\xB9\x08']), @@ -153,6 +195,9 @@ 'NC': (build_ssa, ['\xD4']), 'AP': (build_ssb, ['\xFA']), 'SRP': (build_ssc, ['\xF0']), + 'MVCK': (build_ssd, ['\xD9']), + 'LMD': (build_sse, ['\xEF']), + 'PKA': (build_ssf, ['\xE9']), } def build_instr_codes(clazz): diff --git a/rpython/jit/backend/zarch/test/test_auto_encoding.py b/rpython/jit/backend/zarch/test/test_auto_encoding.py --- a/rpython/jit/backend/zarch/test/test_auto_encoding.py +++ b/rpython/jit/backend/zarch/test/test_auto_encoding.py @@ -29,7 +29,8 @@ return # ignore the extra character '\x40' print self.op post = self.expected[self.index+1:self.index+1+15] - generated = "\x09from codebuilder.py: " + hexdump(self.expected[self.instrindex:self.index] + char )+"..." + generated = "\x09from codebuilder.py: " + hexdump(self.expected[self.instrindex:self.index]) + "!" + \ + hexdump([char])+ "!" +hexdump(post) print generated expected = "\x09from gnu as: " + hexdump(self.expected[self.instrindex:self.index+15])+"..." print expected @@ -76,6 +77,8 @@ base = self.base return "{disp}(%r{index},%r{base})".format(**locals()) + __repr__ = __str__ + class FakeBaseDisplace(object): def __init__(self, base, disp): self.base = base @@ -86,6 +89,8 @@ base = self.base return "{disp}(%r{base})".format(**locals()) + __repr__ = __str__ + class FakeLengthBaseDisplace(object): def __init__(self, len, base, disp): self.length = len @@ -98,27 +103,27 @@ length = self.length + 1 return "{disp}({length},%r{base})".format(**locals()) -def build_base_disp(base_bits, displace_bits): - possibilities = itertools.product(range(base_bits), range(displace_bits)) + __repr__ = __str__ + +def test_range(bits, signed=False, count=24): + if isinstance(bits, tuple): + bits, signed = bits + if signed: + bits -= 1 + maximum = 2**bits + return [-maximum,-1,0,1,maximum-1] + [random.randrange(-maximum,maximum) for i in range(count)] + maximum = 2**bits + return [0,1,maximum-1] + [random.randrange(0,maximum) for i in range(count)] + +def build_fake(clazz, *arg_bits): + possibilities = itertools.product(*[test_range(b) for b in arg_bits]) results = [] - for (base,disp) in possibilities: - results.append(FakeBaseDisplace(base,disp)) - return results - -def build_idx_base_disp(index_bits, base_bits, displace_bits): - possibilities = itertools.product(range(index_bits), range(base_bits), - range(displace_bits)) - results = [] - for (index,base,disp) in possibilities: - results.append(FakeIndexBaseDisplace(index,base,disp)) - return results - -def build_len_base_disp(len_bits, base_bits, displace_bits): - possibilities = itertools.product(range(len_bits), range(base_bits), - range(displace_bits)) - results = [] - for (length,base,disp) in possibilities: - results.append(FakeLengthBaseDisplace(length,base,disp)) + i = 0 + for args in possibilities: + results.append(clazz(*args)) + i+=1 + if i > 20: + break return results class TestZARCH(object): @@ -128,12 +133,12 @@ REGNAMES = ['%%r%d' % i for i in REGS] accept_unnecessary_prefix = None methname = '?' - BASE_DISPLACE = build_base_disp(8,12) - BASE_DISPLACE_LONG = build_base_disp(8,20) - INDEX_BASE_DISPLACE = build_idx_base_disp(8,8,12) - INDEX_BASE_DISPLACE_LONG = build_idx_base_disp(8,8,20) - LENGTH4_BASE_DISPLACE = build_len_base_disp(4,8,12) - LENGTH8_BASE_DISPLACE = build_len_base_disp(8,8,12) + BASE_DISPLACE = build_fake(FakeBaseDisplace,4,12) + BASE_DISPLACE_LONG = build_fake(FakeBaseDisplace,4,(20,True)) + INDEX_BASE_DISPLACE = build_fake(FakeIndexBaseDisplace,4,4,12) + INDEX_BASE_DISPLACE_LONG = build_fake(FakeIndexBaseDisplace,4,4,(20,True)) + LENGTH4_BASE_DISPLACE = build_fake(FakeLengthBaseDisplace,4,4,12) + LENGTH8_BASE_DISPLACE = build_fake(FakeLengthBaseDisplace,8,4,12) def reg_tests(self): return self.REGS @@ -172,32 +177,12 @@ assert match return getattr(self, match.group(1) + "_tests")() - def uimm16_tests(self): - v = ([0,1,65535] + - [random.randrange(0,65535) for i in range(COUNT1)]) - return v - def imm16_tests(self): - v = ([-32768,-1,0,1,32767] + - [random.randrange(-32768, 32767) for i in range(COUNT1)]) - return v - - def imm8_tests(self): - v = ([-128,-1,0,1,127] + - [random.randrange(-127, 127) for i in range(COUNT1)]) - return v - def uimm8_tests(self): - v = ([0,1,255] + - [random.randrange(0,255) for i in range(COUNT1)]) - return v - def uimm4_tests(self): - return list(range(0,16)) - - def imm32_tests(self): - v = ([-0x80000000, 0x7FFFFFFF, 128, 256, -129, -255] + - [random.randrange(-32768,32768)<<16 | - random.randrange(0,65536) for i in range(COUNT1)] + - [random.randrange(128, 256) for i in range(COUNT1)]) - return self.imm8_tests() + v + def uimm16_tests(self): return test_range(16) + def imm16_tests(self): return test_range(16,signed=True) + def imm8_tests(self): return test_range(8,signed=True) + def uimm8_tests(self): return test_range(8) + def uimm4_tests(self): return test_range(4) + def imm32_tests(self): return test_range(32, signed=True) def relative_tests(self): py.test.skip("explicit test required for %r" % (self.methname,)) @@ -218,14 +203,15 @@ def operand_combinations(self, modes, arguments): remap = { + 'rre': 'rr', 'rxy': 'rx', 'siy': 'si', - 'rre': 'rr', 'ssa': 'Ls', 'ssb': 'll', 'ssc': 'lsi', 'ssd': 'xsr', 'sse': 'rrss', + 'ssf': 'sL', } mapping = self.get_mapping_asm_to_str() modes = remap.get(modes, modes) @@ -302,6 +288,9 @@ 'ssa': (tests['L'], tests['s']), 'ssb': (tests['l'], tests['l']), 'ssc': (tests['l'], tests['s'], tests['i']), + 'ssd': (tests['x'], tests['s'], tests['r']), + 'sse': (tests['r'], tests['r'], tests['s'], tests['s']), + 'ssf': (tests['s'], tests['L']), } if modes in tests_all: combinations = [f(i) for i,f in enumerate(tests_all[modes])] From noreply at buildbot.pypy.org Thu Oct 15 19:14:58 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 15 Oct 2015 19:14:58 +0200 (CEST) Subject: [pypy-commit] pypy default: raise a ValueError with a clear msg when attempting to create a ndarray from a type(ndarray) Message-ID: <20151015171458.29B241C002A@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80245:d924723d483b Date: 2015-10-15 20:15 +0300 http://bitbucket.org/pypy/pypy/changeset/d924723d483b/ Log: raise a ValueError with a clear msg when attempting to create a ndarray from a type(ndarray) diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -80,6 +80,7 @@ w_dict = W_TypeObject("dict") w_object = W_TypeObject("object") w_buffer = W_TypeObject("buffer") + w_type = W_TypeObject("type") def __init__(self, config=None): """NOT_RPYTHON""" diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -86,6 +86,9 @@ def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False): + # numpy testing calls array(type(array([]))) and expects a ValueError + if space.isinstance_w(w_object, space.w_type): + raise oefmt(space.w_ValueError, "cannot create ndarray from type instance") # for anything that isn't already an array, try __array__ method first if not isinstance(w_object, W_NDimArray): w_array = try_array_method(space, w_object, w_dtype) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -292,6 +292,8 @@ a = np.array('123', dtype='intp') assert a == 123 assert a.dtype == np.intp + # required for numpy test suite + raises(ValueError, np.array, type(a)) def test_array_copy(self): from numpy import array From noreply at buildbot.pypy.org Thu Oct 15 21:16:21 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 15 Oct 2015 21:16:21 +0200 (CEST) Subject: [pypy-commit] pypy default: add new docs to indices Message-ID: <20151015191621.515861C01DC@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80249:11d17ae1d0e4 Date: 2015-10-15 22:15 +0300 http://bitbucket.org/pypy/pypy/changeset/11d17ae1d0e4/ Log: add new docs to indices diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-15.11.0.rst release-2.6.1.rst release-2.6.0.rst release-2.5.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-15.11.0.rst whatsnew-2.6.1.rst whatsnew-2.6.0.rst whatsnew-2.5.1.rst From noreply at buildbot.pypy.org Thu Oct 15 21:16:14 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 15 Oct 2015 21:16:14 +0200 (CEST) Subject: [pypy-commit] pypy release-15.11: start a release Message-ID: <20151015191614.E60751C00E2@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-15.11 Changeset: r80246:92ba90a94650 Date: 2015-10-15 21:56 +0300 http://bitbucket.org/pypy/pypy/changeset/92ba90a94650/ Log: start a release diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -48,22 +48,23 @@ Michael Hudson David Schneider Holger Krekel + Ronan Lamy + Manuel Jacob Christian Tismer Hakan Ardo - Manuel Jacob - Ronan Lamy Benjamin Peterson Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen Richard Emslie + Richard Plangger Alexander Schremmer Dan Villiom Podlaski Christiansen Lukas Diekmann Sven Hager Anders Lehmann + Remi Meier Aurelien Campeas - Remi Meier Niklaus Haldimann Camillo Bruni Laura Creighton @@ -87,7 +88,6 @@ Ludovic Aubry Jacob Hallen Jason Creighton - Richard Plangger Alex Martelli Michal Bendowski stian @@ -119,10 +119,12 @@ Mark Pearse Simon Cross Andreas Stührk + Edd Barrett Stefano Rivera Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Jeremy Thurgood Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy @@ -134,14 +136,12 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Edd Barrett Wanja Saatkamp Gerald Klix Mike Blume Tobias Pape Oscar Nierstrasz Stefan H. Muller - Jeremy Thurgood Rami Chowdhury Eugene Oden Henry Mason @@ -153,6 +153,7 @@ Lukas Renggli Guenter Jantzen Ned Batchelder + Anton Gulenko Amit Regmi Ben Young Nicolas Chauvat @@ -200,9 +201,11 @@ Alex Perry Vincent Legoll Alan McIntyre + William Leslie Alexander Sedov Attila Gobi Christopher Pope + Devin Jeanpierre Christian Tismer Marc Abramowitz Dan Stromberg @@ -214,7 +217,6 @@ Carl Meyer Karl Ramm Pieter Zieschang - Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -226,6 +228,7 @@ Alejandro J. Cura Jacob Oscarson Travis Francis Athougies + Vaibhav Sood Ryan Gonzalez Ian Foote Kristjan Valur Jonsson @@ -233,8 +236,11 @@ Neil Blakey-Milner Lutz Paelike Lucio Torre + Spenser Bauman Lars Wassermann + Philipp Rustemeuer Henrik Vendelbo + Richard Lancaster Dan Buch Miguel de Val Borro Artur Lisiecki @@ -251,7 +257,6 @@ roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez - William Leslie Bobby Impollonia timo at eistee.fritz.box Andrew Thompson @@ -283,6 +288,7 @@ shoma hosaka Daniel Neuhäuser Ben Mather + Niclas Olofsson halgari Boglarka Vezer Chris Pressey diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -18,23 +18,23 @@ Michael Hudson David Schneider Holger Krekel + Ronan Lamy + Manuel Jacob Christian Tismer Hakan Ardo - Manuel Jacob - Ronan Lamy Benjamin Peterson Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen Richard Emslie + Richard Plangger Alexander Schremmer Dan Villiom Podlaski Christiansen Lukas Diekmann Sven Hager Anders Lehmann - Richard Plangger + Remi Meier Aurelien Campeas - Remi Meier Niklaus Haldimann Camillo Bruni Laura Creighton @@ -89,10 +89,12 @@ Mark Pearse Simon Cross Andreas Stührk + Edd Barrett Stefano Rivera Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Jeremy Thurgood Paweł Piotr Przeradowski Paul deGrandis Ilya Osadchiy @@ -104,14 +106,12 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Edd Barrett Wanja Saatkamp Gerald Klix Mike Blume Tobias Pape Oscar Nierstrasz Stefan H. Muller - Jeremy Thurgood Rami Chowdhury Eugene Oden Henry Mason @@ -123,6 +123,7 @@ Lukas Renggli Guenter Jantzen Ned Batchelder + Anton Gulenko Amit Regmi Ben Young Nicolas Chauvat @@ -170,9 +171,11 @@ Alex Perry Vincent Legoll Alan McIntyre + William Leslie Alexander Sedov Attila Gobi Christopher Pope + Devin Jeanpierre Christian Tismer Marc Abramowitz Dan Stromberg @@ -184,7 +187,6 @@ Carl Meyer Karl Ramm Pieter Zieschang - Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -196,6 +198,7 @@ Alejandro J. Cura Jacob Oscarson Travis Francis Athougies + Vaibhav Sood Ryan Gonzalez Ian Foote Kristjan Valur Jonsson @@ -203,8 +206,11 @@ Neil Blakey-Milner Lutz Paelike Lucio Torre + Spenser Bauman Lars Wassermann + Philipp Rustemeuer Henrik Vendelbo + Richard Lancaster Dan Buch Miguel de Val Borro Artur Lisiecki @@ -221,7 +227,6 @@ roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez - William Leslie Bobby Impollonia timo at eistee.fritz.box Andrew Thompson diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py --- a/pypy/doc/tool/makecontributor.py +++ b/pypy/doc/tool/makecontributor.py @@ -69,7 +69,9 @@ 'Rami Chowdhury': ['necaris'], 'Stanislaw Halik':['w31rd0'], 'Wenzhu Man':['wenzhu man', 'wenzhuman'], - 'Anton Gulenko':['anton gulenko'], + 'Anton Gulenko':['anton gulenko', 'anton_gulenko'], + 'Richard Lancaster':['richardlancaster'], + 'William Leslie':['William Leslie', 'William ML Leslie'], } alias_map = {} diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "2.7.0-alpha0" +#define PYPY_VERSION "15.11.0" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 7, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (15, 11, 0, "final", 0) #XXX # sync patchlevel.h import pypy From noreply at buildbot.pypy.org Thu Oct 15 21:16:23 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 15 Oct 2015 21:16:23 +0200 (CEST) Subject: [pypy-commit] pypy release-15.11: merge default into release branch Message-ID: <20151015191623.674A91C00E2@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-15.11 Changeset: r80250:13ef2a718c53 Date: 2015-10-15 22:16 +0300 http://bitbucket.org/pypy/pypy/changeset/13ef2a718c53/ Log: merge default into release branch diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-15.11.0.rst release-2.6.1.rst release-2.6.0.rst release-2.5.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-15.11.0.rst whatsnew-2.6.1.rst whatsnew-2.6.0.rst whatsnew-2.5.1.rst diff --git a/pypy/doc/release-15.11.0.rst b/pypy/doc/release-15.11.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-15.11.0.rst @@ -0,0 +1,80 @@ +============ +PyPy 15.11.0 +============ + +We're pleased and proud to unleash PyPy 15.11, a major update of the PyPy python2.7.10 +compatible interpreter with a Just In Time compiler. +We have improved warmup time and memory overhead used for tracing, added vectorization +for numpy and general loops where possible on x86 hardware, ... +and increased functionality of numpy. + +You can download the PyPy 15.11 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project, and our volunteers and contributors. + +We would also like to thank our contributors (7 new ones since PyPy 2.6.0) and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD_, freebsd_), +as well as newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _OpenBSD: http://cvsweb.openbsd.org/cgi-bin/cvsweb/ports/lang/pypy +.. _freebsd: https://svnweb.freebsd.org/ports/head/lang/pypy/ +.. _`dynamic languages`: http://pypyjs.org + +Highlights +=========== + +* Bug Fixes + + * ... + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy. + +* New features: + + * ... + +* Numpy: + + * ... + +* Performance improvements: + + * ... + +.. _`vmprof`: https://vmprof.readthedocs.org +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-15.11.0.html + +Please try it out and let us know what you think. We welcome feedback, +we know you are using PyPy, please tell us about it! + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-15.11.0.rst copy from pypy/doc/whatsnew-head.rst copy to pypy/doc/whatsnew-15.11.0.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-15.11.0.rst @@ -1,6 +1,6 @@ -======================= -What's new in PyPy 2.6+ -======================= +======================== +What's new in PyPy 15.11 +======================== .. this is a revision shortly after release-2.6.1 .. startrev: 07769be4057b diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,87 +1,8 @@ -======================= -What's new in PyPy 2.6+ -======================= +========================= +What's new in PyPy 15.11+ +========================= -.. this is a revision shortly after release-2.6.1 -.. startrev: 07769be4057b +.. this is a revision shortly after release-15.11.0 +.. startrev: d924723d483b -.. branch: keys_with_hash -Improve the performance of dict.update() and a bunch of methods from -sets, by reusing the hash value stored in one dict when inspecting -or changing another dict with that key. -.. branch: optresult-unroll -A major refactoring of the ResOperations that kills Box. Also rewrote -unrolling to enable future enhancements. Should improve warmup time -by 20% or so. - -.. branch: optimize-cond-call -Optimize common sequences of operations like -``int_lt/cond_call`` in the JIT backends - -.. branch: missing_openssl_include -Fix for missing headers in OpenBSD, already applied in downstream ports - -.. branch: gc-more-incremental -Remove a source of non-incremental-ness in the GC: now -external_malloc() no longer runs gc_step_until() any more. If there -is a currently-running major collection, we do only so many steps -before returning. This number of steps depends on the size of the -allocated object. It is controlled by tracking the general progress -of these major collection steps and the size of old objects that -keep adding up between them. - -.. branch: remember-tracing-counts -Reenable jithooks - -.. branch: detect_egd2 - -.. branch: shadowstack-no-move-2 -Issue #2141: fix a crash on Windows and OS/X and ARM when running -at least 20 threads. - -.. branch: numpy-ctypes - -Add support for ndarray.ctypes property. - -.. branch: share-guard-info - -Share guard resume data between consecutive guards that have only -pure operations and guards in between. - -.. branch: issue-2148 - -Fix performance regression on operations mixing numpy scalars and Python -floats, cf. issue #2148. - -.. branch: cffi-stdcall -Win32: support '__stdcall' in CFFI. - -.. branch: callfamily - -Refactorings of annotation and rtyping of function calls. - -.. branch: fortran-order - -Allow creation of fortran-ordered ndarrays - -.. branch: type_system-cleanup - -Remove some remnants of the old ootypesystem vs lltypesystem dichotomy. - -.. branch: cffi-handle-lifetime - -ffi.new_handle() returns handles that work more like CPython's: they -remain valid as long as the target exists (unlike the previous -version, where handles become invalid *before* the __del__ is called). - -.. branch: ufunc-casting - -allow automatic casting in ufuncs (and frompypyfunc) to cast the -arguments to the allowed function type declarations, fixes various -failures in linalg cffi functions - -.. branch: vecopt -.. branch: vecopt-merge - -A new optimization pass to use emit vectorized loops From noreply at buildbot.pypy.org Thu Oct 15 21:16:17 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 15 Oct 2015 21:16:17 +0200 (CEST) Subject: [pypy-commit] pypy default: add release notes and whats new for PyPy 15.11 Message-ID: <20151015191617.2718D1C0104@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80247:e99e98d6ab73 Date: 2015-10-15 22:12 +0300 http://bitbucket.org/pypy/pypy/changeset/e99e98d6ab73/ Log: add release notes and whats new for PyPy 15.11 diff --git a/pypy/doc/release-15.11.0.rst b/pypy/doc/release-15.11.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-15.11.0.rst @@ -0,0 +1,80 @@ +============ +PyPy 15.11.0 +============ + +We're pleased and proud to unleash PyPy 15.11, a major update of the PyPy python2.7.10 +compatible interpreter with a Just In Time compiler. +We have improved warmup time and memory overhead used for tracing, added vectorization +for numpy and general loops where possible on x86 hardware, ... +and increased functionality of numpy. + +You can download the PyPy 15.11 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project, and our volunteers and contributors. + +We would also like to thank our contributors (7 new ones since PyPy 2.6.0) and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD_, freebsd_), +as well as newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _OpenBSD: http://cvsweb.openbsd.org/cgi-bin/cvsweb/ports/lang/pypy +.. _freebsd: https://svnweb.freebsd.org/ports/head/lang/pypy/ +.. _`dynamic languages`: http://pypyjs.org + +Highlights +=========== + +* Bug Fixes + + * ... + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy. + +* New features: + + * ... + +* Numpy: + + * ... + +* Performance improvements: + + * ... + +.. _`vmprof`: https://vmprof.readthedocs.org +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-15.11.0.html + +Please try it out and let us know what you think. We welcome feedback, +we know you are using PyPy, please tell us about it! + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-15.11.0.rst rename from pypy/doc/whatsnew-head.rst rename to pypy/doc/whatsnew-15.11.0.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-15.11.0.rst @@ -1,6 +1,6 @@ -======================= -What's new in PyPy 2.6+ -======================= +======================== +What's new in PyPy 15.11 +======================== .. this is a revision shortly after release-2.6.1 .. startrev: 07769be4057b From noreply at buildbot.pypy.org Thu Oct 15 21:16:19 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 15 Oct 2015 21:16:19 +0200 (CEST) Subject: [pypy-commit] pypy default: restart whatsnew-head Message-ID: <20151015191619.38B3C1C0165@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80248:407cf334f66a Date: 2015-10-15 22:12 +0300 http://bitbucket.org/pypy/pypy/changeset/407cf334f66a/ Log: restart whatsnew-head diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-head.rst @@ -0,0 +1,8 @@ +========================= +What's new in PyPy 15.11+ +========================= + +.. this is a revision shortly after release-15.11.0 +.. startrev: d924723d483b + + From noreply at buildbot.pypy.org Fri Oct 16 03:06:25 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 16 Oct 2015 03:06:25 +0200 (CEST) Subject: [pypy-commit] pypy no-class-specialize: Add ClassDesc method to retrieve interp-level class attributes used as translatioin hints (like '_virtualizable_', '_alloc_flavor_', etc.) Message-ID: <20151016010625.617D11C00E2@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: no-class-specialize Changeset: r80251:b49e148d9af1 Date: 2015-10-16 02:06 +0100 http://bitbucket.org/pypy/pypy/changeset/b49e148d9af1/ Log: Add ClassDesc method to retrieve interp-level class attributes used as translatioin hints (like '_virtualizable_', '_alloc_flavor_', etc.) diff --git a/rpython/annotator/classdesc.py b/rpython/annotator/classdesc.py --- a/rpython/annotator/classdesc.py +++ b/rpython/annotator/classdesc.py @@ -274,8 +274,6 @@ # create the Attribute and do the generalization asked for newattr = Attribute(attr) if s_value: - #if newattr.name == 'intval' and getattr(s_value, 'unsigned', False): - # import pdb; pdb.set_trace() newattr.s_value = s_value # keep all subattributes' values @@ -690,6 +688,18 @@ else: return cdesc + def get_param(self, name, default=None, inherit=True): + if inherit: + try: + return self.read_attribute(name).value + except AttributeError: + return default + else: + try: + return self.classdict[name].value + except KeyError: + return default + def read_attribute(self, name, default=NODEFAULT): cdesc = self.lookup(name) if cdesc is None: diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -299,8 +299,7 @@ if isinstance(s_x, annmodel.SomeInstance): from rpython.flowspace.model import Constant classdesc = s_x.classdef.classdesc - virtualizable = classdesc.read_attribute('_virtualizable_', - Constant(None)).value + virtualizable = classdesc.get_param('_virtualizable_') if virtualizable is not None: flags = s_x.flags.copy() flags['access_directly'] = True @@ -340,7 +339,7 @@ Used by _vmprof """ from rpython.rtyper.lltypesystem import lltype, llmemory - + return lltype.nullptr(llmemory.GCREF.TO) class GetVirtualizableTokenEntry(ExtRegistryEntry): diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -97,8 +97,7 @@ unboxed = [subdef for subdef in classdef.getallsubdefs() if subdef.classdesc.pyobj is not None and issubclass(subdef.classdesc.pyobj, UnboxedValue)] - virtualizable = classdef.classdesc.read_attribute( - '_virtualizable_', Constant(False)).value + virtualizable = classdef.classdesc.get_param('_virtualizable_', False) config = rtyper.annotator.translator.config usetagging = len(unboxed) != 0 and config.translation.taggedpointers @@ -529,26 +528,26 @@ self.allinstancefields = allinstancefields def _check_for_immutable_hints(self, hints): - loc = self.classdef.classdesc.lookup('_immutable_') - if loc is not None: - if loc is not self.classdef.classdesc: + hints = hints.copy() + classdesc = self.classdef.classdesc + immut = classdesc.get_param('_immutable_', inherit=False) + if immut is None: + if classdesc.get_param('_immutable_', inherit=True): raise ImmutableConflictError( "class %r inherits from its parent _immutable_=True, " "so it should also declare _immutable_=True" % ( self.classdef,)) - if loc.classdict.get('_immutable_').value is not True: - raise TyperError( - "class %r: _immutable_ = something else than True" % ( - self.classdef,)) - hints = hints.copy() + elif immut is not True: + raise TyperError( + "class %r: _immutable_ = something else than True" % ( + self.classdef,)) + else: hints['immutable'] = True self.immutable_field_set = set() # unless overwritten below - if self.classdef.classdesc.lookup('_immutable_fields_') is not None: - hints = hints.copy() - immutable_fields = self.classdef.classdesc.classdict.get( - '_immutable_fields_') - if immutable_fields is not None: - self.immutable_field_set = set(immutable_fields.value) + if classdesc.get_param('_immutable_fields_'): + own_fields = classdesc.get_param('_immutable_fields_', inherit=False) + if own_fields is not None: + self.immutable_field_set = set(own_fields) accessor = FieldListAccessor() hints['immutable_fields'] = accessor return hints diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -390,8 +390,7 @@ def getgcflavor(classdef): classdesc = classdef.classdesc - alloc_flavor = classdesc.read_attribute('_alloc_flavor_', - Constant('gc')).value + alloc_flavor = classdesc.get_param('_alloc_flavor_', default='gc') return alloc_flavor def externalvsinternal(rtyper, item_repr): # -> external_item_repr, (internal_)item_repr diff --git a/rpython/rtyper/rvirtualizable.py b/rpython/rtyper/rvirtualizable.py --- a/rpython/rtyper/rvirtualizable.py +++ b/rpython/rtyper/rvirtualizable.py @@ -11,13 +11,13 @@ def __init__(self, rtyper, classdef): self._super().__init__(rtyper, classdef) classdesc = classdef.classdesc - if '_virtualizable2_' in classdesc.classdict: + if classdesc.get_param('_virtualizable2_'): raise Exception("_virtualizable2_ is now called _virtualizable_, " "please rename") - if '_virtualizable_' in classdesc.classdict: + if classdesc.get_param('_virtualizable_', inherit=False): basedesc = classdesc.basedesc - assert basedesc is None or basedesc.lookup( - '_virtualizable_') is None + assert (basedesc is None or + basedesc.get_param('_virtualizable_') is None) self.top_of_virtualizable_hierarchy = True self.accessor = FieldListAccessor() else: @@ -37,9 +37,9 @@ self._super()._setup_repr(llfields, hints=hints) else: self._super()._setup_repr(hints = hints) - c_vfields = self.classdef.classdesc.classdict['_virtualizable_'] + vfields = self.classdef.classdesc.get_param('_virtualizable_') self.my_redirected_fields = self._parse_field_list( - c_vfields.value, self.accessor, hints) + vfields, self.accessor, hints) else: self._super()._setup_repr() # ootype needs my_redirected_fields even for subclass. lltype does diff --git a/rpython/rtyper/test/test_rtyper.py b/rpython/rtyper/test/test_rtyper.py --- a/rpython/rtyper/test/test_rtyper.py +++ b/rpython/rtyper/test/test_rtyper.py @@ -91,8 +91,6 @@ class R: _alloc_flavor_ = "raw" - NDF = object() - class DummyClsDescDef: def __init__(self, cls): self._cls = cls @@ -102,14 +100,8 @@ def getmro(self): return [self] - def read_attribute(self, attr, default=NDF): - try: - return Constant(getattr(self._cls, attr)) - except AttributeError: - if default is NDF: - raise - else: - return default + def get_param(self, name, default=None, inherit=True): + return getattr(self._cls, name, default) assert rmodel.getgcflavor(DummyClsDescDef(A)) == 'gc' assert rmodel.getgcflavor(DummyClsDescDef(B)) == 'gc' From noreply at buildbot.pypy.org Fri Oct 16 07:49:27 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 16 Oct 2015 07:49:27 +0200 (CEST) Subject: [pypy-commit] pypy default: refactor whatsnew-* into release notes Message-ID: <20151016054927.CAE811C0726@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80252:18f21006dc31 Date: 2015-10-16 08:49 +0300 http://bitbucket.org/pypy/pypy/changeset/18f21006dc31/ Log: refactor whatsnew-* into release notes diff --git a/pypy/doc/release-15.11.0.rst b/pypy/doc/release-15.11.0.rst --- a/pypy/doc/release-15.11.0.rst +++ b/pypy/doc/release-15.11.0.rst @@ -2,11 +2,36 @@ PyPy 15.11.0 ============ -We're pleased and proud to unleash PyPy 15.11, a major update of the PyPy python2.7.10 -compatible interpreter with a Just In Time compiler. -We have improved warmup time and memory overhead used for tracing, added vectorization -for numpy and general loops where possible on x86 hardware, ... -and increased functionality of numpy. +We're pleased and proud to unleash PyPy 15.11, a major update of the PyPy +python2.7.10 compatible interpreter with a Just In Time compiler. +We have improved `warmup time and memory overhead used for tracing`_, added +`vectorization`_ for numpy and general loops where possible on x86 hardware, +refactored rough edges in rpython, and increased functionality of numpy. + +Vectorization +============= + +Richard Plangger began work in March and continued over a Google Summer of Code +to add a optimization step to the trace optimizer. The step recognizes common +constructs and emits SIMD code where possible, much as any modern compiler does. +This vectorization happens while tracing running code, so it is actually easier +at run-time to determine the +availability of possible vectorization than it is for ahead-of-time compilers. + +Availability of SIMD hardware is detected at run time, without needing to +precompile various code paths into the executable. + +Internal Refactoring and Warmup Time Improvement +================================================ + +Maciej Fijalkowski and Armin Rigo refactored internals of rpython that now allow +PyPy to more efficiently use `guards`_ in jitted code. They also rewrote unrolling, +leading to a warmup time improvement of 20% or so at the cost of a minor +regression in jitted code speed. + +.. _`warmup time and memory overhead used for tracing`: http://morepypy.blogspot.com/2015/10 +.. _`vectorization`: http://pypyvecopt.blogspot.co.at/ +.. _`guards`: http://rpython.readthedocs.org/en/latest/glossary.html You can download the PyPy 15.11 release here: @@ -45,28 +70,49 @@ .. _freebsd: https://svnweb.freebsd.org/ports/head/lang/pypy/ .. _`dynamic languages`: http://pypyjs.org -Highlights -=========== +Highlights (since 2.6.1 release two months ago) +=============================================== * Bug Fixes - * ... + * Applied OPENBSD downstream fixes + + * Fix a crash on non-linux when running more than 20 threads + + * In cffi, ffi.new_handle() is more cpython compliant * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at - #pypy. + #pypy * New features: - * ... + * Add an optimization pass to vectorize loops using x86 SIMD intrinsics. + + * Support __stdcall on Windows in CFFI * Numpy: - * ... + * Add support for ndarray.ctypes + + * Fast path for mixing numpy scalars and floats + + * Add support for creating Fortran-ordered ndarrays + + * Fix casting failures in linalg (by extending ufunc casting) * Performance improvements: - * ... + * Reuse hashed keys across dictionaries and sets + + * Refactor JIT interals to improve warmup time by 20% or so at the cost of a + minor regression in JIT speed + + * Recognize patterns of common sequences in the JIT backends and optimize them + + * Make the garbage collecter more intcremental over external_malloc() calls + + * Share guard resume data where possible which reduces memory usage .. _`vmprof`: https://vmprof.readthedocs.org .. _resolved: http://doc.pypy.org/en/latest/whatsnew-15.11.0.html diff --git a/rpython/doc/glossary.rst b/rpython/doc/glossary.rst --- a/rpython/doc/glossary.rst +++ b/rpython/doc/glossary.rst @@ -29,6 +29,9 @@ Code that makes it possible to write :doc:`RPython's garbage collectors ` in Python itself. + guard + a small test that checks if assumptions the JIT makes during tracing are still true + JIT :doc:`just in time compiler `. From noreply at buildbot.pypy.org Fri Oct 16 08:08:10 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 16 Oct 2015 08:08:10 +0200 (CEST) Subject: [pypy-commit] pypy default: add a numpy section Message-ID: <20151016060810.204DB1C12DC@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80253:ab9ecde87dba Date: 2015-10-16 09:08 +0300 http://bitbucket.org/pypy/pypy/changeset/ab9ecde87dba/ Log: add a numpy section diff --git a/pypy/doc/release-15.11.0.rst b/pypy/doc/release-15.11.0.rst --- a/pypy/doc/release-15.11.0.rst +++ b/pypy/doc/release-15.11.0.rst @@ -29,6 +29,16 @@ leading to a warmup time improvement of 20% or so at the cost of a minor regression in jitted code speed. +Numpy +===== + +Our implementation of numpy continues to improve. ndarray and the numeric dtypes +are very close to feature-complete; record, string and unicode dtypes are mostly +supported. We have reimplemented numpy linalg, random and fft as cffi-1.0 +modules that call out to the same underlying libraries that upstream numpy uses. +Please try it out, especially using the new vectorization (via --jit vec=1 on the +command line) and let us know what is missing for your code. + .. _`warmup time and memory overhead used for tracing`: http://morepypy.blogspot.com/2015/10 .. _`vectorization`: http://pypyvecopt.blogspot.co.at/ .. _`guards`: http://rpython.readthedocs.org/en/latest/glossary.html From noreply at buildbot.pypy.org Fri Oct 16 08:09:12 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 08:09:12 +0200 (CEST) Subject: [pypy-commit] cffi default: Document ffi.memmove() Message-ID: <20151016060912.1DD531C0165@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2340:9d51e2cd2f08 Date: 2015-10-16 08:10 +0200 http://bitbucket.org/cffi/cffi/changeset/9d51e2cd2f08/ Log: Document ffi.memmove() diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -547,7 +547,8 @@ On Win32, functions can have two main calling conventions: either "cdecl" (the default), or "stdcall" (also known as "WINAPI"). There -are also other, rare calling conventions; these are not supported. +are also other rare calling conventions, but these are not supported. +*New in version 1.3.* When you issue calls from Python to C, the implementation is such that it works with any of these two main calling conventions; you don't @@ -577,10 +578,11 @@ These calling convention specifiers are accepted but ignored on any platform other than 32-bit Windows. -*New in version 1.3:* the calling convention specifiers are not -recognized in previous versions. In API mode, you could work around -it by using an indirection, like in the example in the section about -Callbacks_. There was no way to use stdcall callbacks in ABI mode. +In CFFI versions before 1.3, the calling convention specifiers are not +recognized. In API mode, you could work around it by using an +indirection, like in the example in the section about Callbacks_ +(``"example_build.py"``). There was no way to use stdcall callbacks +in ABI mode. FFI Interface @@ -701,6 +703,27 @@ ``ffi.from_buffer()`` is alive. *New in version 0.9.* +.. _memmove: + +**ffi.memmove(dest, src, n)**: copy ``n`` bytes from memory area +``src`` to memory area ``dest``. See examples below. Inspired by the +C functions ``memcpy()`` and ``memmove()``---like the latter, the +areas can overlap. Each of ``dest`` and ``src`` can be either a cdata +pointer or a Python object supporting the buffer/memoryview interface. +In the case of ``dest``, the buffer/memoryview must be writable. +Unlike ``ffi.from_buffer()``, there are no restrictions on the type of +buffer. *New in version 1.3.* Examples: + +* ``ffi.memmove(myptr, b"hello", 5)`` copies the 5 bytes of + ``b"hello"`` to the area that ``myptr`` points to. + +* ``ba = bytearray(100); ffi.memmove(ba, myptr, 100)`` copies 100 + bytes from ``myptr`` into the bytearray ``ba``. + +* ``ffi.memmove(myptr + 1, myptr, 100)`` shifts 100 bytes from + the memory at ``myptr`` to the memory at ``myptr + 1``. + + **ffi.typeof("C type" or cdata object)**: return an object of type ```` corresponding to the parsed string, or to the C type of the cdata instance. Usually you don't need to call this function or to @@ -756,7 +779,7 @@ of a pointer or array type. For example, ``ffi.offsetof("int[5]", 2)`` is equal to the size of two integers, as is ``ffi.offsetof("int *", 2)``. - + **ffi.getctype("C type" or , extra="")**: return the string representation of the given C type. If non-empty, the "extra" string is appended (or inserted at the right place in more complicated cases); it diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -6,31 +6,33 @@ v1.3.0 ====== +* Added `ffi.memmove()`_. + * Pull request #64: out-of-line API mode: we can now declare floating-point types with ``typedef float... foo_t;``. This only works if ``foo_t`` is a float or a double, not ``long double``. -* Issue #217: fix possible unaligned pointer manipulation, which crash +* Issue #217: fix possible unaligned pointer manipulation, which crashes on some architectures (64-bit, non-x86). * Issues #64 and #126: when using ``set_source()`` or ``verify()``, the ``const`` and ``restrict`` keywords are copied from the cdef to the generated C code; this fixes warnings by the C compiler. It also fixes corner cases like ``typedef const int T; T a;`` - which would previously not consider ``a`` as a constant. + which would previously not consider ``a`` as a constant. (The + cdata objects themselves are never ``const``.) * Win32: support for ``__stdcall``. For callbacks and function - pointers; regular C functions don't need to have their `calling + pointers; regular C functions still don't need to have their `calling convention`_ declared. -* ffi.memmove XXX - * Windows: CPython 2.7 distutils doesn't work with Microsoft's official Visual Studio for Python, and I'm told this is `not a bug`__. For ffi.compile(), we `removed a workaround`__ that was inside cffi but which had unwanted side-effects. Try saying ``import setuptools`` first, which patches distutils... +.. _`ffi.memmove()`: using.html#memmove .. __: https://bugs.python.org/issue23246 .. __: https://bitbucket.org/cffi/cffi/pull-requests/65/remove-_hack_at_distutils-which-imports/diff .. _`calling convention`: using.html#windows-calling-conventions From noreply at buildbot.pypy.org Fri Oct 16 08:11:52 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 08:11:52 +0200 (CEST) Subject: [pypy-commit] pypy default: import cffi/9d51e2cd2f08 Message-ID: <20151016061152.A7F151C0165@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80254:abdd9c11d3a0 Date: 2015-10-16 08:12 +0200 http://bitbucket.org/pypy/pypy/changeset/abdd9c11d3a0/ Log: import cffi/9d51e2cd2f08 diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -14,17 +14,7 @@ LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs', 'extra_objects', 'depends'] -def _hack_at_distutils(): - # Windows-only workaround for some configurations: see - # https://bugs.python.org/issue23246 (Python 2.7.9) - if sys.platform == "win32": - try: - import setuptools # for side-effects, patches distutils - except ImportError: - pass - def get_extension(srcfilename, modname, sources=(), **kwds): - _hack_at_distutils() # *before* the following import from distutils.core import Extension allsources = [srcfilename] allsources.extend(sources) @@ -47,7 +37,6 @@ def _build(tmpdir, ext): # XXX compact but horrible :-( - _hack_at_distutils() from distutils.core import Distribution import distutils.errors # diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -22,6 +22,16 @@ s = s.encode('ascii') super(NativeIO, self).write(s) +def _hack_at_distutils(): + # Windows-only workaround for some configurations: see + # https://bugs.python.org/issue23246 (Python 2.7 with + # a specific MS compiler suite download) + if sys.platform == "win32": + try: + import setuptools # for side-effects, patches distutils + except ImportError: + pass + class Verifier(object): @@ -112,6 +122,7 @@ return basename def get_extension(self): + _hack_at_distutils() # backward compatibility hack if not self._has_source: with self.ffi._lock: if not self._has_source: From noreply at buildbot.pypy.org Fri Oct 16 09:14:35 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 09:14:35 +0200 (CEST) Subject: [pypy-commit] cffi default: win64 fix Message-ID: <20151016071435.E28091C0165@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2341:8baf7de121f6 Date: 2015-10-16 09:11 +0200 http://bitbucket.org/cffi/cffi/changeset/8baf7de121f6/ Log: win64 fix diff --git a/testing/cffi0/test_function.py b/testing/cffi0/test_function.py --- a/testing/cffi0/test_function.py +++ b/testing/cffi0/test_function.py @@ -433,6 +433,7 @@ py.test.skip("Windows-only test") if self.Backend is CTypesBackend: py.test.skip("not with the ctypes backend") + win64 = (sys.maxsize > 2**32) # ffi = FFI(backend=self.Backend()) ffi.cdef(""" @@ -456,8 +457,11 @@ """) m = ffi.dlopen("Kernel32.dll") tps = ffi.typeof(m.QueryPerformanceFrequency) - assert tps is not tpc - assert str(tps) == "" + if win64: + assert tps is tpc + else: + assert tps is not tpc + assert str(tps) == "" # ffi = FFI(backend=self.Backend()) ffi.cdef("typedef int (__cdecl *fnc_t)(int);") @@ -465,18 +469,20 @@ tpc = ffi.typeof("fnc_t") tps = ffi.typeof("fns_t") assert str(tpc) == "" - assert str(tps) == "" + if win64: + assert tps is tpc + else: + assert str(tps) == "" # fnc = ffi.cast("fnc_t", 0) fns = ffi.cast("fns_t", 0) ffi.new("fnc_t[]", [fnc]) - py.test.raises(TypeError, ffi.new, "fnc_t[]", [fns]) - py.test.raises(TypeError, ffi.new, "fns_t[]", [fnc]) + if not win64: + py.test.raises(TypeError, ffi.new, "fnc_t[]", [fns]) + py.test.raises(TypeError, ffi.new, "fns_t[]", [fnc]) ffi.new("fns_t[]", [fns]) def test_stdcall_only_on_windows(self): - if sys.platform == 'win32': - py.test.skip("not-Windows-only test") ffi = FFI(backend=self.Backend()) ffi.cdef("double __stdcall sin(double x);") # stdcall ignored m = ffi.dlopen(lib_m) From noreply at buildbot.pypy.org Fri Oct 16 09:14:37 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 09:14:37 +0200 (CEST) Subject: [pypy-commit] cffi default: win64 fix Message-ID: <20151016071437.E513F1C0165@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2342:0df91a104b01 Date: 2015-10-16 09:14 +0200 http://bitbucket.org/cffi/cffi/changeset/0df91a104b01/ Log: win64 fix diff --git a/testing/cffi0/test_parsing.py b/testing/cffi0/test_parsing.py --- a/testing/cffi0/test_parsing.py +++ b/testing/cffi0/test_parsing.py @@ -370,7 +370,7 @@ tp = ffi.typeof("int(*)(int __stdcall x(int)," " long (__cdecl*y)(void)," " short(WINAPI *z)(short))") - if sys.platform == 'win32': + if sys.platform == 'win32' and sys.maxsize < 2**32: stdcall = '__stdcall ' else: stdcall = '' From noreply at buildbot.pypy.org Fri Oct 16 09:15:43 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 16 Oct 2015 09:15:43 +0200 (CEST) Subject: [pypy-commit] pypy default: Add content by reviewing all commits to default since release-2.6.1 Message-ID: <20151016071543.45A7D1C0165@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80255:d7186a14bc21 Date: 2015-10-16 10:16 +0300 http://bitbucket.org/pypy/pypy/changeset/d7186a14bc21/ Log: Add content by reviewing all commits to default since release-2.6.1 diff --git a/pypy/doc/release-15.11.0.rst b/pypy/doc/release-15.11.0.rst --- a/pypy/doc/release-15.11.0.rst +++ b/pypy/doc/release-15.11.0.rst @@ -12,7 +12,7 @@ ============= Richard Plangger began work in March and continued over a Google Summer of Code -to add a optimization step to the trace optimizer. The step recognizes common +to add a vectorization step to the trace optimizer. The step recognizes common constructs and emits SIMD code where possible, much as any modern compiler does. This vectorization happens while tracing running code, so it is actually easier at run-time to determine the @@ -39,6 +39,14 @@ Please try it out, especially using the new vectorization (via --jit vec=1 on the command line) and let us know what is missing for your code. +CFFI +==== + +While not applicable only to PyPy, `cffi`_ is arguably our most significant +contribution to the python ecosystem. Armin Rigo continued improving it, +and PyPy reaps the benefits of cffi-1.3: improved manangement of object +lifetimes, __stdcall on Win32, ffi.memmove(), ... + .. _`warmup time and memory overhead used for tracing`: http://morepypy.blogspot.com/2015/10 .. _`vectorization`: http://pypyvecopt.blogspot.co.at/ .. _`guards`: http://rpython.readthedocs.org/en/latest/glossary.html @@ -58,6 +66,7 @@ .. _`PyPy`: http://doc.pypy.org .. _`RPython`: https://rpython.readthedocs.org +.. _`cffi`: https://cffi.readthedocs.org .. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly .. _`help`: http://doc.pypy.org/en/latest/project-ideas.html @@ -91,6 +100,26 @@ * In cffi, ffi.new_handle() is more cpython compliant + * Accept unicode in functions inside the _curses cffi backend exactly like cpython + + * Fix a segfault in itertools.islice() + + * Use gcrootfinder=shadowstack by default, asmgcc on linux only + + * Fix ndarray.copy() for upstream compatability when copying non-contiguous arrays + + * Fix assumption that lltype.UniChar is unsigned + + * Fix a subtle bug with stacklets on shadowstack + + * Improve support for the cpython capi in cpyext (our capi compatibility + layer). Fixing these issues inspired some thought about cpyext in general, + stay tuned for more improvements + + * When loading dynamic libraries, in case of a certain loading error, retry + loading the library assuming it is actually a linker script, like on Arch + and Gentoo + * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy @@ -101,6 +130,12 @@ * Support __stdcall on Windows in CFFI + * Improve debug logging when using PYPYLOG=??? + + * Deal with platforms with no RAND_egd() in OpenSSL + + * Enable building _vmprof in translation on OS/X by default + * Numpy: * Add support for ndarray.ctypes @@ -111,7 +146,10 @@ * Fix casting failures in linalg (by extending ufunc casting) -* Performance improvements: + * Recognize and disallow (for now) pickling of ndarrays with objects + embedded in them + +* Performance improvements and refactorings: * Reuse hashed keys across dictionaries and sets @@ -124,6 +162,22 @@ * Share guard resume data where possible which reduces memory usage + * Fast path for zip(list, list) + + * Reduce the number of checks in the JIT for lst[a:] + + * Move the non-optimizable part of callbacks outside the JIT + + * Factor in field immutability when invalidating heap information + + * Unroll itertools.izip_longest() with two sequences + + * Minor optimizations after analyzing output from `vmprof`_ and trace logs + + * Remove many class attributes in rpython classes + + * Handle getfield_gc_pure* and getfield_gc_* uniformly in heap.py + .. _`vmprof`: https://vmprof.readthedocs.org .. _resolved: http://doc.pypy.org/en/latest/whatsnew-15.11.0.html From noreply at buildbot.pypy.org Fri Oct 16 09:18:58 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 16 Oct 2015 09:18:58 +0200 (CEST) Subject: [pypy-commit] pypy default: shuffle and cleanup release notice Message-ID: <20151016071858.143E41C0290@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80256:f3e5e5b87b80 Date: 2015-10-16 10:19 +0300 http://bitbucket.org/pypy/pypy/changeset/f3e5e5b87b80/ Log: shuffle and cleanup release notice diff --git a/pypy/doc/release-15.11.0.rst b/pypy/doc/release-15.11.0.rst --- a/pypy/doc/release-15.11.0.rst +++ b/pypy/doc/release-15.11.0.rst @@ -8,6 +8,20 @@ `vectorization`_ for numpy and general loops where possible on x86 hardware, refactored rough edges in rpython, and increased functionality of numpy. +You can download the PyPy 15.11 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors (7 new ones since PyPy 2.6.0) and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + + Vectorization ============= @@ -51,19 +65,6 @@ .. _`vectorization`: http://pypyvecopt.blogspot.co.at/ .. _`guards`: http://rpython.readthedocs.org/en/latest/glossary.html -You can download the PyPy 15.11 release here: - - http://pypy.org/download.html - -We would like to thank our donors for the continued support of the PyPy -project, and our volunteers and contributors. - -We would also like to thank our contributors (7 new ones since PyPy 2.6.0) and -encourage new people to join the project. PyPy has many -layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation -improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ -with making RPython's JIT even better. - .. _`PyPy`: http://doc.pypy.org .. _`RPython`: https://rpython.readthedocs.org .. _`cffi`: https://cffi.readthedocs.org From noreply at buildbot.pypy.org Fri Oct 16 09:32:47 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 09:32:47 +0200 (CEST) Subject: [pypy-commit] cffi default: win64 libffi issue Message-ID: <20151016073247.831E01C1230@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2343:c0269f40b3d4 Date: 2015-10-16 09:31 +0200 http://bitbucket.org/cffi/cffi/changeset/c0269f40b3d4/ Log: win64 libffi issue diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -3787,8 +3787,15 @@ ffitype = &ffi_type_float; else if (strcmp(ptypes->name, "double") == 0) ffitype = &ffi_type_double; - else if (strcmp(ptypes->name, "long double") == 0) - ffitype = &ffi_type_longdouble; + else if (strcmp(ptypes->name, "long double") == 0) { + /* assume that if sizeof(double) == sizeof(long double), then + the two types are equivalent for C. libffi bugs on Win64 + if a function's return type is ffi_type_longdouble... */ + if (sizeof(double) == sizeof(long double)) + ffitype = &ffi_type_double; + else + ffitype = &ffi_type_longdouble; + } else goto bad_ffi_type; } From noreply at buildbot.pypy.org Fri Oct 16 10:10:20 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 10:10:20 +0200 (CEST) Subject: [pypy-commit] cffi default: win64 fix Message-ID: <20151016081020.CD4451C0165@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2344:47f0a9e503bc Date: 2015-10-16 09:35 +0200 http://bitbucket.org/cffi/cffi/changeset/47f0a9e503bc/ Log: win64 fix diff --git a/testing/cffi0/test_verify.py b/testing/cffi0/test_verify.py --- a/testing/cffi0/test_verify.py +++ b/testing/cffi0/test_verify.py @@ -2285,7 +2285,7 @@ #print '...' assert res == -500*999*3 #print 'done' - if sys.platform == 'win32': + if sys.platform == 'win32' and sys.maxsize < 2**32: assert '__stdcall' in str(ffi.typeof(cb2)) assert '__stdcall' not in str(ffi.typeof(cb1)) py.test.raises(TypeError, lib.call1, cb2) @@ -2411,7 +2411,7 @@ return result; } """) - if sys.platform == 'win32': + if sys.platform == 'win32' and sys.maxsize < 2**32: py.test.raises(TypeError, lib.call1, lib.cb2) py.test.raises(TypeError, lib.call2, lib.cb1) pt = lib.call1(lib.cb1) From noreply at buildbot.pypy.org Fri Oct 16 10:10:22 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 10:10:22 +0200 (CEST) Subject: [pypy-commit] cffi default: win64 fix Message-ID: <20151016081022.E64FB1C0165@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2345:2595d6ad4cb4 Date: 2015-10-16 09:46 +0200 http://bitbucket.org/cffi/cffi/changeset/2595d6ad4cb4/ Log: win64 fix diff --git a/c/parse_c_type.c b/c/parse_c_type.c --- a/c/parse_c_type.c +++ b/c/parse_c_type.c @@ -376,11 +376,14 @@ case TOK_INTEGER: errno = 0; -#ifndef MS_WIN32 - if (sizeof(length) > sizeof(unsigned long)) + if (sizeof(length) > sizeof(unsigned long)) { +#ifdef MS_WIN32 /* actually for win64 */ + length = _strtoui64(tok->p, &endptr, 0); +#else length = strtoull(tok->p, &endptr, 0); +#endif + } else -#endif length = strtoul(tok->p, &endptr, 0); if (endptr != tok->p + tok->size) return parse_error(tok, "invalid number"); From noreply at buildbot.pypy.org Fri Oct 16 10:10:24 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 10:10:24 +0200 (CEST) Subject: [pypy-commit] cffi default: win64 fix Message-ID: <20151016081024.EACD31C0165@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2346:b9031351ecbd Date: 2015-10-16 09:47 +0200 http://bitbucket.org/cffi/cffi/changeset/b9031351ecbd/ Log: win64 fix diff --git a/testing/cffi1/test_realize_c_type.py b/testing/cffi1/test_realize_c_type.py --- a/testing/cffi1/test_realize_c_type.py +++ b/testing/cffi1/test_realize_c_type.py @@ -53,7 +53,7 @@ ffi = _cffi_backend.FFI() ct = ffi.typeof(ffi.callback(input, lambda: None)) assert isinstance(ct, ffi.CType) - if sys.platform != 'win32': + if sys.platform != 'win32' or sys.maxsize > 2**32: expected_output = expected_output.replace('__stdcall *', '*') assert ct.cname == expected_output From noreply at buildbot.pypy.org Fri Oct 16 10:10:26 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 10:10:26 +0200 (CEST) Subject: [pypy-commit] cffi default: win64 fixes Message-ID: <20151016081026.E5ACE1C0165@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2347:aa235bb60ca3 Date: 2015-10-16 11:05 +0300 http://bitbucket.org/cffi/cffi/changeset/aa235bb60ca3/ Log: win64 fixes diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -198,7 +198,7 @@ vals = ['42', '-42', '0x80000000', '-2147483648', '0', '9223372036854775809ULL', '-9223372036854775807LL'] - if sys.maxsize <= 2**32: + if sys.maxint <= 2**32: vals.remove('-2147483648') ffi = FFI() cdef_lines = ['#define FOO_%d_%d %s' % (i, j, vals[i]) @@ -458,7 +458,7 @@ ffi.cdef("typedef enum { AA=%d } e1;" % sys.maxsize) lib = verify(ffi, 'test_verify_anonymous_enum_with_typedef2', "typedef enum { AA=%d } e1;" % sys.maxsize) - assert lib.AA == sys.maxsize + assert lib.AA == int(ffi.cast("long", sys.maxsize)) assert ffi.sizeof("e1") == ffi.sizeof("long") def test_unique_types(): @@ -1320,7 +1320,7 @@ res = lib.call2(cb2) assert res == -500*999*3 assert res == ffi.addressof(lib, 'call2')(cb2) - if sys.platform == 'win32': + if sys.platform == 'win32' and not sys.maxsize > 2**32: assert '__stdcall' in str(ffi.typeof(cb2)) assert '__stdcall' not in str(ffi.typeof(cb1)) py.test.raises(TypeError, lib.call1, cb2) @@ -1408,7 +1408,7 @@ """) ptr_call1 = ffi.addressof(lib, 'call1') ptr_call2 = ffi.addressof(lib, 'call2') - if sys.platform == 'win32': + if sys.platform == 'win32' and not sys.maxsize > 2**32: py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) @@ -1464,7 +1464,7 @@ """) ptr_call1 = ffi.addressof(lib, 'call1') ptr_call2 = ffi.addressof(lib, 'call2') - if sys.platform == 'win32': + if sys.platform == 'win32' and not sys.maxsize > 2**32: py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) From noreply at buildbot.pypy.org Fri Oct 16 10:30:36 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 10:30:36 +0200 (CEST) Subject: [pypy-commit] cffi default: win32 fix Message-ID: <20151016083036.7CB1D1C1230@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2348:7dee2a770bc5 Date: 2015-10-16 10:24 +0200 http://bitbucket.org/cffi/cffi/changeset/7dee2a770bc5/ Log: win32 fix diff --git a/c/parse_c_type.c b/c/parse_c_type.c --- a/c/parse_c_type.c +++ b/c/parse_c_type.c @@ -377,8 +377,12 @@ case TOK_INTEGER: errno = 0; if (sizeof(length) > sizeof(unsigned long)) { -#ifdef MS_WIN32 /* actually for win64 */ +#ifdef MS_WIN32 +# ifdef _WIN64 length = _strtoui64(tok->p, &endptr, 0); +# else + abort(); /* unreachable */ +# endif #else length = strtoull(tok->p, &endptr, 0); #endif diff --git a/testing/cffi0/test_function.py b/testing/cffi0/test_function.py --- a/testing/cffi0/test_function.py +++ b/testing/cffi0/test_function.py @@ -486,6 +486,9 @@ ffi = FFI(backend=self.Backend()) ffi.cdef("double __stdcall sin(double x);") # stdcall ignored m = ffi.dlopen(lib_m) - assert "double(*)(double)" in str(ffi.typeof(m.sin)) + if sys.platform == 'win32' and sys.maxint < 2**32: + assert "double(__stdcall *)(double)" in str(ffi.typeof(m.sin)) + else: + assert "double(*)(double)" in str(ffi.typeof(m.sin)) x = m.sin(1.23) assert x == math.sin(1.23) From noreply at buildbot.pypy.org Fri Oct 16 10:30:38 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 10:30:38 +0200 (CEST) Subject: [pypy-commit] cffi default: win32 fix Message-ID: <20151016083038.7FAAA1C1230@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2349:74820048ec4d Date: 2015-10-16 10:27 +0200 http://bitbucket.org/cffi/cffi/changeset/74820048ec4d/ Log: win32 fix diff --git a/testing/cffi0/test_function.py b/testing/cffi0/test_function.py --- a/testing/cffi0/test_function.py +++ b/testing/cffi0/test_function.py @@ -486,7 +486,8 @@ ffi = FFI(backend=self.Backend()) ffi.cdef("double __stdcall sin(double x);") # stdcall ignored m = ffi.dlopen(lib_m) - if sys.platform == 'win32' and sys.maxint < 2**32: + if (sys.platform == 'win32' and sys.maxint < 2**32 and + self.Backend is not CTypesBackend): assert "double(__stdcall *)(double)" in str(ffi.typeof(m.sin)) else: assert "double(*)(double)" in str(ffi.typeof(m.sin)) From noreply at buildbot.pypy.org Fri Oct 16 11:07:43 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 11:07:43 +0200 (CEST) Subject: [pypy-commit] cffi default: py3 and no-g++ fixes Message-ID: <20151016090743.5B4A21C12DC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2350:706778a4c9ab Date: 2015-10-16 11:02 +0200 http://bitbucket.org/cffi/cffi/changeset/706778a4c9ab/ Log: py3 and no-g++ fixes diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -21,7 +21,7 @@ kwds.setdefault('undef_macros', ['NDEBUG']) module_name = '_CFFI_' + module_name ffi.set_source(module_name, source) - if 1: # test the .cpp mode too + if not os.environ.get('NO_CPP'): # test the .cpp mode too kwds.setdefault('source_extension', '.cpp') source = 'extern "C" {\n%s\n}' % (source,) else: @@ -198,7 +198,7 @@ vals = ['42', '-42', '0x80000000', '-2147483648', '0', '9223372036854775809ULL', '-9223372036854775807LL'] - if sys.maxint <= 2**32: + if sys.maxsize <= 2**32 or sys.platform == 'win32': vals.remove('-2147483648') ffi = FFI() cdef_lines = ['#define FOO_%d_%d %s' % (i, j, vals[i]) From noreply at buildbot.pypy.org Fri Oct 16 11:36:11 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 11:36:11 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: threadlocalref_get Message-ID: <20151016093611.4FCA61C1232@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: ppc-updated-backend Changeset: r80257:d1c06d78b23c Date: 2015-10-16 11:14 +0200 http://bitbucket.org/pypy/pypy/changeset/d1c06d78b23c/ Log: threadlocalref_get diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -221,6 +221,25 @@ l0, res = arglocs self.mc.fsqrt(res.value, l0.value) + def _emit_threadlocalref_get(self, op, arglocs, regalloc): + [resloc] = arglocs + offset = op.getarg(1).getint() # getarg(0) == 'threadlocalref_get' + calldescr = op.getdescr() + size = calldescr.get_result_size() + sign = calldescr.is_result_signed() + # + # This loads the stack location THREADLOCAL_OFS into a + # register, and then read the word at the given offset. + # It is only supported if 'translate_support_code' is + # true; otherwise, the execute_token() was done with a + # dummy value for the stack location THREADLOCAL_OFS + # + assert self.cpu.translate_support_code + assert resloc.is_reg() + assert _check_imm_arg(offset) + self.mc.ld(resloc.value, r.SP.value, THREADLOCAL_ADDR_OFFSET) + self._load_from_mem(resloc, resloc, imm(offset), imm(size), imm(sign)) + emit_float_le = gen_emit_cmp_op(c.LE, fp=True) emit_float_lt = gen_emit_cmp_op(c.LT, fp=True) emit_float_gt = gen_emit_cmp_op(c.GT, fp=True) @@ -584,6 +603,8 @@ oopspecindex = regalloc.get_oopspecindex(op) if oopspecindex == EffectInfo.OS_MATH_SQRT: return self._emit_math_sqrt(op, arglocs, regalloc) + if oopspecindex == EffectInfo.OS_THREADLOCALREF_GET: + return self._emit_threadlocalref_get(op, arglocs, regalloc) self._emit_call(op, arglocs) emit_call_i = _genop_call diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -522,6 +522,13 @@ res = self.fprm.force_allocate_reg(op) return [loc, res] + def _prepare_threadlocalref_get(self, op): + if self.cpu.translate_support_code: + res = self.force_allocate_reg(op) + return [res] + else: + return self._prepare_call(op) + def prepare_cast_float_to_int(self, op): loc1 = self.ensure_reg(op.getarg(0)) self.free_op_vars() @@ -918,6 +925,8 @@ oopspecindex = self.get_oopspecindex(op) if oopspecindex == EffectInfo.OS_MATH_SQRT: return self._prepare_math_sqrt(op) + if oopspecindex == EffectInfo.OS_THREADLOCALREF_GET: + return self._prepare_threadlocalref_get(op) return self._prepare_call(op) prepare_call_i = _prepare_call From noreply at buildbot.pypy.org Fri Oct 16 11:36:13 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 11:36:13 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: need more stack! Message-ID: <20151016093613.605F31C1232@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: ppc-updated-backend Changeset: r80258:9c76fe579f23 Date: 2015-10-16 11:14 +0200 http://bitbucket.org/pypy/pypy/changeset/9c76fe579f23/ Log: need more stack! diff --git a/rpython/translator/c/src/stack.h b/rpython/translator/c/src/stack.h --- a/rpython/translator/c/src/stack.h +++ b/rpython/translator/c/src/stack.h @@ -6,7 +6,14 @@ #ifndef MAX_STACK_SIZE +# if defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) + /* PowerPC seems to consume the stack very quickly. The default + * value of 768 kb is only enough for 406 levels on ppc64, and 792 + * on ppc64le */ +# define MAX_STACK_SIZE (11 << 18) /* 2.8 mb */ +# else # define MAX_STACK_SIZE (3 << 18) /* 768 kb */ +# endif #endif From noreply at buildbot.pypy.org Fri Oct 16 11:36:22 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 11:36:22 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: hg merge default Message-ID: <20151016093622.3DB5C1C1232@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: ppc-updated-backend Changeset: r80259:c63862115db0 Date: 2015-10-16 11:19 +0200 http://bitbucket.org/pypy/pypy/changeset/c63862115db0/ Log: hg merge default diff too long, truncating to 2000 out of 31445 lines diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,14 @@ .hg .svn +# VIM +*.swp +*.swo + *.pyc *.pyo *~ +__pycache__/ bin/pypy-c include/*.h @@ -22,4 +27,6 @@ pypy/translator/goal/pypy-c pypy/translator/goal/target*-c release/ +!pypy/tool/release/ rpython/_cache/ +__pycache__/ diff --git a/dotviewer/graphclient.py b/dotviewer/graphclient.py --- a/dotviewer/graphclient.py +++ b/dotviewer/graphclient.py @@ -127,16 +127,8 @@ return spawn_graphserver_handler((host, port)) def spawn_local_handler(): - if hasattr(sys, 'pypy_objspaceclass'): - # if 'python' is actually PyPy, e.g. in a virtualenv, then - # try hard to find a real CPython - try: - python = subprocess.check_output( - 'env -i $SHELL -l -c "which python"', shell=True).strip() - except subprocess.CalledProcessError: - # did not work, fall back to 'python' - python = 'python' - else: + python = os.getenv('PYPY_PYGAME_PYTHON') + if not python: python = sys.executable args = [python, '-u', GRAPHSERVER, '--stdio'] p = subprocess.Popen(args, diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -158,7 +158,7 @@ RegrTest('test_codecs.py', core=True, usemodules='_multibytecodec'), RegrTest('test_codeop.py', core=True), RegrTest('test_coding.py', core=True), - RegrTest('test_coercion.py', core=True), + RegrTest('test_coercion.py', core=True, usemodules='struct'), RegrTest('test_collections.py', usemodules='binascii struct'), RegrTest('test_colorsys.py'), RegrTest('test_commands.py'), diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -310,6 +310,22 @@ """ return self._backend.from_buffer(self.BCharA, python_buffer) + def memmove(self, dest, src, n): + """ffi.memmove(dest, src, n) copies n bytes of memory from src to dest. + + Like the C function memmove(), the memory areas may overlap; + apart from that it behaves like the C function memcpy(). + + 'src' can be any cdata ptr or array, or any Python buffer object. + 'dest' can be any cdata ptr or array, or a writable Python buffer + object. The size to copy, 'n', is always measured in bytes. + + Unlike other methods, this one supports all Python buffer including + byte strings and bytearrays---but it still does not support + non-contiguous buffers. + """ + return self._backend.memmove(dest, src, n) + def callback(self, cdecl, python_callable=None, error=None, onerror=None): """Return a callback object or a decorator making such a callback object. 'cdecl' must name a C function pointer type. @@ -609,7 +625,7 @@ def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: - tp = ffi._parser._declarations[key] + tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) try: value = backendlib.load_function(BType, name) @@ -620,7 +636,7 @@ # key = 'variable ' + name if key in ffi._parser._declarations: - tp = ffi._parser._declarations[key] + tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) read_variable = backendlib.read_variable write_variable = backendlib.write_variable @@ -631,12 +647,23 @@ # if not copied_enums: from . import model - for key, tp in ffi._parser._declarations.items(): + error = None + for key, (tp, _) in ffi._parser._declarations.items(): if not isinstance(tp, model.EnumType): continue + try: + tp.check_not_partial() + except Exception as e: + error = e + continue for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + if error is not None: + if name in library.__dict__: + return # ignore error, about a different enum + raise error + for key, val in ffi._parser._int_constants.items(): if key not in library.__dict__: library.__dict__[key] = val diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -26,6 +26,9 @@ _r_words = re.compile(r"\w+|\S") _parser_cache = None _r_int_literal = re.compile(r"-?0?x?[0-9a-f]+[lu]*$", re.IGNORECASE) +_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") +_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") +_r_cdecl = re.compile(r"\b__cdecl\b") def _get_parser(): global _parser_cache @@ -44,6 +47,14 @@ macrovalue = macrovalue.replace('\\\n', '').strip() macros[macroname] = macrovalue csource = _r_define.sub('', csource) + # BIG HACK: replace WINAPI or __stdcall with "volatile const". + # It doesn't make sense for the return type of a function to be + # "volatile volatile const", so we abuse it to detect __stdcall... + # Hack number 2 is that "int(volatile *fptr)();" is not valid C + # syntax, so we place the "volatile" before the opening parenthesis. + csource = _r_stdcall2.sub(' volatile volatile const(', csource) + csource = _r_stdcall1.sub(' volatile volatile const ', csource) + csource = _r_cdecl.sub(' ', csource) # Replace "[...]" with "[__dotdotdotarray__]" csource = _r_partial_array.sub('[__dotdotdotarray__]', csource) # Replace "...}" with "__dotdotdotNUM__}". This construction should @@ -192,6 +203,7 @@ if not decl.name: raise api.CDefError("typedef does not declare any name", decl) + quals = 0 if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) and decl.type.type.names[-1] == '__dotdotdot__'): realtype = self._get_unknown_type(decl) @@ -202,8 +214,9 @@ decl.type.type.type.names == ['__dotdotdot__']): realtype = model.unknown_ptr_type(decl.name) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) + realtype, quals = self._get_type_and_quals( + decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype, quals=quals) else: raise api.CDefError("unrecognized construct", decl) except api.FFIError as e: @@ -255,9 +268,9 @@ def _parse_decl(self, decl): node = decl.type if isinstance(node, pycparser.c_ast.FuncDecl): - tp = self._get_type(node, name=decl.name) + tp, quals = self._get_type_and_quals(node, name=decl.name) assert isinstance(tp, model.RawFunctionType) - tp = self._get_type_pointer(tp) + tp = self._get_type_pointer(tp, quals) self._declare('function ' + decl.name, tp) else: if isinstance(node, pycparser.c_ast.Struct): @@ -271,9 +284,10 @@ decl) # if decl.name: - tp = self._get_type(node, partial_length_ok=True) + tp, quals = self._get_type_and_quals(node, + partial_length_ok=True) if tp.is_raw_function: - tp = self._get_type_pointer(tp) + tp = self._get_type_pointer(tp, quals) self._declare('function ' + decl.name, tp) elif (tp.is_integer_type() and hasattr(decl, 'init') and @@ -287,10 +301,10 @@ _r_int_literal.match(decl.init.expr.value)): self._add_integer_constant(decl.name, '-' + decl.init.expr.value) - elif self._is_constant_globalvar(node): - self._declare('constant ' + decl.name, tp) + elif (quals & model.Q_CONST) and not tp.is_array_type: + self._declare('constant ' + decl.name, tp, quals=quals) else: - self._declare('variable ' + decl.name, tp) + self._declare('variable ' + decl.name, tp, quals=quals) def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] @@ -298,40 +312,51 @@ exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type) + tp, quals = self._get_type_and_quals(exprnode.type) + return tp - def _declare(self, name, obj, included=False): + def _declare(self, name, obj, included=False, quals=0): if name in self._declarations: - if self._declarations[name] is obj: + prevobj, prevquals = self._declarations[name] + if prevobj is obj and prevquals == quals: return if not self._override: raise api.FFIError( "multiple declarations of %s (for interactive usage, " "try cdef(xx, override=True))" % (name,)) assert '__dotdotdot__' not in name.split() - self._declarations[name] = obj + self._declarations[name] = (obj, quals) if included: self._included_declarations.add(obj) - def _get_type_pointer(self, type, const=False, declname=None): + def _extract_quals(self, type): + quals = 0 + if isinstance(type, (pycparser.c_ast.TypeDecl, + pycparser.c_ast.PtrDecl)): + if 'const' in type.quals: + quals |= model.Q_CONST + if 'restrict' in type.quals: + quals |= model.Q_RESTRICT + return quals + + def _get_type_pointer(self, type, quals, declname=None): if isinstance(type, model.RawFunctionType): return type.as_function_pointer() if (isinstance(type, model.StructOrUnionOrEnum) and type.name.startswith('$') and type.name[1:].isdigit() and type.forcename is None and declname is not None): - return model.NamedPointerType(type, declname) - if const: - return model.ConstPointerType(type) - return model.PointerType(type) + return model.NamedPointerType(type, declname, quals) + return model.PointerType(type, quals) - def _get_type(self, typenode, name=None, partial_length_ok=False): + def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): - type = self._declarations['typedef ' + typenode.type.names[0]] - return type + tp, quals = self._declarations['typedef ' + typenode.type.names[0]] + quals |= self._extract_quals(typenode) + return tp, quals # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type @@ -340,18 +365,19 @@ else: length = self._parse_constant( typenode.dim, partial_length_ok=partial_length_ok) - tp = self._get_type(typenode.type, + tp, quals = self._get_type_and_quals(typenode.type, partial_length_ok=partial_length_ok) - return model.ArrayType(tp, length) + return model.ArrayType(tp, length), quals # if isinstance(typenode, pycparser.c_ast.PtrDecl): # pointer type - const = (isinstance(typenode.type, pycparser.c_ast.TypeDecl) - and 'const' in typenode.type.quals) - return self._get_type_pointer(self._get_type(typenode.type), const, - declname=name) + itemtype, itemquals = self._get_type_and_quals(typenode.type) + tp = self._get_type_pointer(itemtype, itemquals, declname=name) + quals = self._extract_quals(typenode) + return tp, quals # if isinstance(typenode, pycparser.c_ast.TypeDecl): + quals = self._extract_quals(typenode) type = typenode.type if isinstance(type, pycparser.c_ast.IdentifierType): # assume a primitive type. get it from .names, but reduce @@ -379,35 +405,38 @@ names = newnames + names ident = ' '.join(names) if ident == 'void': - return model.void_type + return model.void_type, quals if ident == '__dotdotdot__': raise api.FFIError(':%d: bad usage of "..."' % typenode.coord.line) - return resolve_common_type(ident) + return resolve_common_type(ident), quals # if isinstance(type, pycparser.c_ast.Struct): # 'struct foobar' - return self._get_struct_union_enum_type('struct', type, name) + tp = self._get_struct_union_enum_type('struct', type, name) + return tp, quals # if isinstance(type, pycparser.c_ast.Union): # 'union foobar' - return self._get_struct_union_enum_type('union', type, name) + tp = self._get_struct_union_enum_type('union', type, name) + return tp, quals # if isinstance(type, pycparser.c_ast.Enum): # 'enum foobar' - return self._get_struct_union_enum_type('enum', type, name) + tp = self._get_struct_union_enum_type('enum', type, name) + return tp, quals # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - return self._parse_function_type(typenode, name) + return self._parse_function_type(typenode, name), 0 # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): return self._get_struct_union_enum_type('struct', typenode, name, - nested=True) + nested=True), 0 if isinstance(typenode, pycparser.c_ast.Union): return self._get_struct_union_enum_type('union', typenode, name, - nested=True) + nested=True), 0 # raise api.FFIError(":%d: bad or unsupported type declaration" % typenode.coord.line) @@ -426,28 +455,28 @@ raise api.CDefError( "%s: a function with only '(...)' as argument" " is not correct C" % (funcname or 'in expression')) - args = [self._as_func_arg(self._get_type(argdeclnode.type)) + args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type)) for argdeclnode in params] if not ellipsis and args == [model.void_type]: args = [] - result = self._get_type(typenode.type) - return model.RawFunctionType(tuple(args), result, ellipsis) + result, quals = self._get_type_and_quals(typenode.type) + # the 'quals' on the result type are ignored. HACK: we absure them + # to detect __stdcall functions: we textually replace "__stdcall" + # with "volatile volatile const" above. + abi = None + if hasattr(typenode.type, 'quals'): # else, probable syntax error anyway + if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']: + abi = '__stdcall' + return model.RawFunctionType(tuple(args), result, ellipsis, abi) - def _as_func_arg(self, type): + def _as_func_arg(self, type, quals): if isinstance(type, model.ArrayType): - return model.PointerType(type.item) + return model.PointerType(type.item, quals) elif isinstance(type, model.RawFunctionType): return type.as_function_pointer() else: return type - def _is_constant_globalvar(self, typenode): - if isinstance(typenode, pycparser.c_ast.PtrDecl): - return 'const' in typenode.quals - if isinstance(typenode, pycparser.c_ast.TypeDecl): - return 'const' in typenode.quals - return False - def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): # First, a level of caching on the exact 'type' node of the AST. # This is obscure, but needed because pycparser "unrolls" declarations @@ -486,7 +515,7 @@ else: explicit_name = name key = '%s %s' % (kind, name) - tp = self._declarations.get(key, None) + tp, _ = self._declarations.get(key, (None, None)) # if tp is None: if kind == 'struct': @@ -528,6 +557,7 @@ fldnames = [] fldtypes = [] fldbitsize = [] + fldquals = [] for decl in type.decls: if (isinstance(decl.type, pycparser.c_ast.IdentifierType) and ''.join(decl.type.names) == '__dotdotdot__'): @@ -541,7 +571,8 @@ else: bitsize = self._parse_constant(decl.bitsize) self._partial_length = False - type = self._get_type(decl.type, partial_length_ok=True) + type, fqual = self._get_type_and_quals(decl.type, + partial_length_ok=True) if self._partial_length: self._make_partial(tp, nested) if isinstance(type, model.StructType) and type.partial: @@ -549,9 +580,11 @@ fldnames.append(decl.name or '') fldtypes.append(type) fldbitsize.append(bitsize) + fldquals.append(fqual) tp.fldnames = tuple(fldnames) tp.fldtypes = tuple(fldtypes) tp.fldbitsize = tuple(fldbitsize) + tp.fldquals = tuple(fldquals) if fldbitsize != [-1] * len(fldbitsize): if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" @@ -632,14 +665,12 @@ return tp def include(self, other): - for name, tp in other._declarations.items(): + for name, (tp, quals) in other._declarations.items(): if name.startswith('anonymous $enum_$'): continue # fix for test_anonymous_enum_include kind = name.split(' ', 1)[0] - if kind in ('struct', 'union', 'enum', 'anonymous'): - self._declare(name, tp, included=True) - elif kind == 'typedef': - self._declare(name, tp, included=True) + if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'): + self._declare(name, tp, included=True, quals=quals) for k, v in other._int_constants.items(): self._add_constants(k, v) diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -14,17 +14,7 @@ LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs', 'extra_objects', 'depends'] -def _hack_at_distutils(): - # Windows-only workaround for some configurations: see - # https://bugs.python.org/issue23246 (Python 2.7.9) - if sys.platform == "win32": - try: - import setuptools # for side-effects, patches distutils - except ImportError: - pass - def get_extension(srcfilename, modname, sources=(), **kwds): - _hack_at_distutils() # *before* the following import from distutils.core import Extension allsources = [srcfilename] allsources.extend(sources) @@ -47,7 +37,6 @@ def _build(tmpdir, ext): # XXX compact but horrible :-( - _hack_at_distutils() from distutils.core import Distribution import distutils.errors # diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,14 +1,29 @@ -import types +import types, sys import weakref from .lock import allocate_lock +# type qualifiers +Q_CONST = 0x01 +Q_RESTRICT = 0x02 + +def qualify(quals, replace_with): + if quals & Q_CONST: + replace_with = ' const ' + replace_with.lstrip() + if quals & Q_RESTRICT: + # It seems that __restrict is supported by gcc and msvc. + # If you hit some different compiler, add a #define in + # _cffi_include.h for it (and in its copies, documented there) + replace_with = ' __restrict ' + replace_with.lstrip() + return replace_with + + class BaseTypeByIdentity(object): is_array_type = False is_raw_function = False - def get_c_name(self, replace_with='', context='a C file'): + def get_c_name(self, replace_with='', context='a C file', quals=0): result = self.c_name_with_marker assert result.count('&') == 1 # some logic duplication with ffi.getctype()... :-( @@ -18,6 +33,7 @@ replace_with = '(%s)' % replace_with elif not replace_with[0] in '[(': replace_with = ' ' + replace_with + replace_with = qualify(quals, replace_with) result = result.replace('&', replace_with) if '$' in result: from .ffiplatform import VerificationError @@ -177,18 +193,21 @@ class BaseFunctionType(BaseType): - _attrs_ = ('args', 'result', 'ellipsis') + _attrs_ = ('args', 'result', 'ellipsis', 'abi') - def __init__(self, args, result, ellipsis): + def __init__(self, args, result, ellipsis, abi=None): self.args = args self.result = result self.ellipsis = ellipsis + self.abi = abi # reprargs = [arg._get_c_name() for arg in self.args] if self.ellipsis: reprargs.append('...') reprargs = reprargs or ['void'] replace_with = self._base_pattern % (', '.join(reprargs),) + if abi is not None: + replace_with = replace_with[:1] + abi + ' ' + replace_with[1:] self.c_name_with_marker = ( self.result.c_name_with_marker.replace('&', replace_with)) @@ -206,7 +225,7 @@ "type, not a pointer-to-function type" % (self,)) def as_function_pointer(self): - return FunctionPtrType(self.args, self.result, self.ellipsis) + return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi) class FunctionPtrType(BaseFunctionType): @@ -217,24 +236,29 @@ args = [] for tp in self.args: args.append(tp.get_cached_btype(ffi, finishlist)) + abi_args = () + if self.abi == "__stdcall": + if not self.ellipsis: # __stdcall ignored for variadic funcs + try: + abi_args = (ffi._backend.FFI_STDCALL,) + except AttributeError: + pass return global_cache(self, ffi, 'new_function_type', - tuple(args), result, self.ellipsis) + tuple(args), result, self.ellipsis, *abi_args) def as_raw_function(self): - return RawFunctionType(self.args, self.result, self.ellipsis) + return RawFunctionType(self.args, self.result, self.ellipsis, self.abi) class PointerType(BaseType): - _attrs_ = ('totype',) - _base_pattern = " *&" - _base_pattern_array = "(*&)" + _attrs_ = ('totype', 'quals') - def __init__(self, totype): + def __init__(self, totype, quals=0): self.totype = totype + self.quals = quals + extra = qualify(quals, " *&") if totype.is_array_type: - extra = self._base_pattern_array - else: - extra = self._base_pattern + extra = "(%s)" % (extra.lstrip(),) self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra) def build_backend_type(self, ffi, finishlist): @@ -243,10 +267,8 @@ voidp_type = PointerType(void_type) - -class ConstPointerType(PointerType): - _base_pattern = " const *&" - _base_pattern_array = "(const *&)" +def ConstPointerType(totype): + return PointerType(totype, Q_CONST) const_voidp_type = ConstPointerType(void_type) @@ -254,8 +276,8 @@ class NamedPointerType(PointerType): _attrs_ = ('totype', 'name') - def __init__(self, totype, name): - PointerType.__init__(self, totype) + def __init__(self, totype, name, quals=0): + PointerType.__init__(self, totype, quals) self.name = name self.c_name_with_marker = name + '&' @@ -315,11 +337,12 @@ partial = False packed = False - def __init__(self, name, fldnames, fldtypes, fldbitsize): + def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None): self.name = name self.fldnames = fldnames self.fldtypes = fldtypes self.fldbitsize = fldbitsize + self.fldquals = fldquals self.build_c_name_with_marker() def has_anonymous_struct_fields(self): @@ -331,14 +354,17 @@ return False def enumfields(self): - for name, type, bitsize in zip(self.fldnames, self.fldtypes, - self.fldbitsize): + fldquals = self.fldquals + if fldquals is None: + fldquals = (0,) * len(self.fldnames) + for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes, + self.fldbitsize, fldquals): if name == '' and isinstance(type, StructOrUnion): # nested anonymous struct/union for result in type.enumfields(): yield result else: - yield (name, type, bitsize) + yield (name, type, bitsize, quals) def force_flatten(self): # force the struct or union to have a declaration that lists @@ -347,13 +373,16 @@ names = [] types = [] bitsizes = [] - for name, type, bitsize in self.enumfields(): + fldquals = [] + for name, type, bitsize, quals in self.enumfields(): names.append(name) types.append(type) bitsizes.append(bitsize) + fldquals.append(quals) self.fldnames = tuple(names) self.fldtypes = tuple(types) self.fldbitsize = tuple(bitsizes) + self.fldquals = tuple(fldquals) def get_cached_btype(self, ffi, finishlist, can_delay=False): BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist, diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h --- a/lib_pypy/cffi/parse_c_type.h +++ b/lib_pypy/cffi/parse_c_type.h @@ -5,7 +5,7 @@ #define _CFFI_OP(opcode, arg) (_cffi_opcode_t)(opcode | (((uintptr_t)(arg)) << 8)) #define _CFFI_GETOP(cffi_opcode) ((unsigned char)(uintptr_t)cffi_opcode) -#define _CFFI_GETARG(cffi_opcode) (((uintptr_t)cffi_opcode) >> 8) +#define _CFFI_GETARG(cffi_opcode) (((intptr_t)cffi_opcode) >> 8) #define _CFFI_OP_PRIMITIVE 1 #define _CFFI_OP_POINTER 3 diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -195,17 +195,15 @@ elif isinstance(tp, model.StructOrUnion): if tp.fldtypes is not None and ( tp not in self.ffi._parser._included_declarations): - for name1, tp1, _ in tp.enumfields(): + for name1, tp1, _, _ in tp.enumfields(): self._do_collect_type(self._field_type(tp, name1, tp1)) else: for _, x in tp._get_items(): self._do_collect_type(x) - def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) - def _generate(self, step_name): - for name, tp in self._get_declarations(): + lst = self.ffi._parser._declarations.items() + for name, (tp, quals) in sorted(lst): kind, realname = name.split(' ', 1) try: method = getattr(self, '_generate_cpy_%s_%s' % (kind, @@ -214,6 +212,7 @@ raise ffiplatform.VerificationError( "not implemented in recompile(): %r" % name) try: + self._current_quals = quals method(tp, realname) except Exception as e: model.attach_exception_info(e, name) @@ -608,7 +607,11 @@ call_arguments.append('x%d' % i) repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' - name_and_arguments = '_cffi_d_%s(%s)' % (name, repr_arguments) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments) prnt('static %s' % (tp.result.get_c_name(name_and_arguments),)) prnt('{') call_arguments = ', '.join(call_arguments) @@ -711,7 +714,8 @@ if difference: repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' - name_and_arguments = '_cffi_f_%s(%s)' % (name, repr_arguments) + name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name, + repr_arguments) prnt('static %s' % (tp_result.get_c_name(name_and_arguments),)) prnt('{') if result_decl: @@ -774,7 +778,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): try: if ftype.is_integer_type() or fbitsize >= 0: # accept all integers, but complain on float or double @@ -789,7 +793,8 @@ ftype = ftype.item fname = fname + '[0]' prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -823,7 +828,7 @@ c_fields = [] if reason_for_not_expanding is None: enumfields = list(tp.enumfields()) - for fldname, fldtype, fbitsize in enumfields: + for fldname, fldtype, fbitsize, fqual in enumfields: fldtype = self._field_type(tp, fldname, fldtype) # cname is None for _add_missing_struct_unions() only op = OP_NOOP @@ -879,7 +884,9 @@ # because they don't have any known C name. Check that they are # not partial (we can't complete or verify them!) and emit them # anonymously. - for tp in list(self._struct_unions): + lst = list(self._struct_unions.items()) + lst.sort(key=lambda tp_order: tp_order[1]) + for tp, order in lst: if tp not in self._seen_struct_unions: if tp.partial: raise NotImplementedError("internal inconsistency: %r is " @@ -1004,6 +1011,8 @@ def _enum_ctx(self, tp, cname): type_index = self._typesdict[tp] type_op = CffiOp(OP_ENUM, -1) + if self.target_is_python: + tp.check_not_partial() for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): self._lsts["global"].append( GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op, @@ -1081,7 +1090,8 @@ # if 'tp' were a function type, but that is not possible here. # (If 'tp' is a function _pointer_ type, then casts from "fn_t # **" to "void *" are again no-ops, as far as I can tell.) - prnt('static ' + tp.get_c_name('*_cffi_var_%s(void)' % (name,))) + decl = '*_cffi_var_%s(void)' % (name,) + prnt('static ' + tp.get_c_name(decl, quals=self._current_quals)) prnt('{') prnt(' return %s(%s);' % (ampersand, name)) prnt('}') @@ -1130,7 +1140,13 @@ else: self.cffi_types[index] = CffiOp(OP_NOOP, realindex) index += 1 - self.cffi_types[index] = CffiOp(OP_FUNCTION_END, int(tp.ellipsis)) + flags = int(tp.ellipsis) + if tp.abi is not None: + if tp.abi == '__stdcall': + flags |= 2 + else: + raise NotImplementedError("abi=%r" % (tp.abi,)) + self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags) def _emit_bytecode_PointerType(self, tp, index): self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype]) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -197,7 +197,10 @@ return library def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst def _generate(self, step_name): for name, tp in self._get_declarations(): @@ -468,7 +471,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double @@ -477,7 +480,8 @@ # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -488,7 +492,7 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) @@ -552,7 +556,7 @@ check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -87,7 +87,10 @@ return library def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst def _generate(self, step_name): for name, tp in self._get_declarations(): @@ -156,7 +159,11 @@ arglist = ', '.join(arglist) or 'void' wrappername = '_cffi_f_%s' % name self.export_symbols.append(wrappername) - funcdecl = ' %s(%s)' % (wrappername, arglist) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + funcdecl = ' %s%s(%s)' % (abi, wrappername, arglist) context = 'result of %s' % name prnt(tpresult.get_c_name(funcdecl, context)) prnt('{') @@ -260,7 +267,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double @@ -269,7 +276,8 @@ # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -280,7 +288,7 @@ prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) @@ -342,7 +350,7 @@ check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -22,6 +22,16 @@ s = s.encode('ascii') super(NativeIO, self).write(s) +def _hack_at_distutils(): + # Windows-only workaround for some configurations: see + # https://bugs.python.org/issue23246 (Python 2.7 with + # a specific MS compiler suite download) + if sys.platform == "win32": + try: + import setuptools # for side-effects, patches distutils + except ImportError: + pass + class Verifier(object): @@ -112,6 +122,7 @@ return basename def get_extension(self): + _hack_at_distutils() # backward compatibility hack if not self._has_source: with self.ffi._lock: if not self._has_source: diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -39,8 +39,9 @@ "_csv", "cppyy", "_pypyjson" ]) -if (sys.platform.startswith('linux') and os.uname()[4] == 'x86_64' - and sys.maxint > 2**32): # it's not enough that we get x86_64 +if ((sys.platform.startswith('linux') or sys.platform == 'darwin') + and os.uname()[4] == 'x86_64' and sys.maxint > 2**32): + # it's not enough that we get x86_64 working_modules.add('_vmprof') translation_modules = default_modules.copy() diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -5,8 +5,8 @@ with any external library. Right now, there are the following possibilities of providing -third-party modules for the PyPy python interpreter (in order of -usefulness): +third-party modules for the PyPy python interpreter (in order, from most +directly useful to most messy to use with PyPy): * Write them in pure Python and use CFFI_. @@ -83,7 +83,7 @@ RPython Mixed Modules -===================== +--------------------- This is the internal way to write built-in extension modules in PyPy. It cannot be used by any 3rd-party module: the extension modules are diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -67,7 +67,7 @@ The other commands of ``setup.py`` are available too, like ``build``. .. _PyPI: https://pypi.python.org/pypi -.. _`use virtualenv (as documented here)`: getting-started.html#installing-using-virtualenv +.. _`use virtualenv (as documented here)`: install.html#installing-using-virtualenv Module xyz does not work in the sandboxed PyPy? diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-15.11.0.rst release-2.6.1.rst release-2.6.0.rst release-2.5.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-15.11.0.rst whatsnew-2.6.1.rst whatsnew-2.6.0.rst whatsnew-2.5.1.rst diff --git a/pypy/doc/release-15.11.0.rst b/pypy/doc/release-15.11.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-15.11.0.rst @@ -0,0 +1,191 @@ +============ +PyPy 15.11.0 +============ + +We're pleased and proud to unleash PyPy 15.11, a major update of the PyPy +python2.7.10 compatible interpreter with a Just In Time compiler. +We have improved `warmup time and memory overhead used for tracing`_, added +`vectorization`_ for numpy and general loops where possible on x86 hardware, +refactored rough edges in rpython, and increased functionality of numpy. + +You can download the PyPy 15.11 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors (7 new ones since PyPy 2.6.0) and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + + +Vectorization +============= + +Richard Plangger began work in March and continued over a Google Summer of Code +to add a vectorization step to the trace optimizer. The step recognizes common +constructs and emits SIMD code where possible, much as any modern compiler does. +This vectorization happens while tracing running code, so it is actually easier +at run-time to determine the +availability of possible vectorization than it is for ahead-of-time compilers. + +Availability of SIMD hardware is detected at run time, without needing to +precompile various code paths into the executable. + +Internal Refactoring and Warmup Time Improvement +================================================ + +Maciej Fijalkowski and Armin Rigo refactored internals of rpython that now allow +PyPy to more efficiently use `guards`_ in jitted code. They also rewrote unrolling, +leading to a warmup time improvement of 20% or so at the cost of a minor +regression in jitted code speed. + +Numpy +===== + +Our implementation of numpy continues to improve. ndarray and the numeric dtypes +are very close to feature-complete; record, string and unicode dtypes are mostly +supported. We have reimplemented numpy linalg, random and fft as cffi-1.0 +modules that call out to the same underlying libraries that upstream numpy uses. +Please try it out, especially using the new vectorization (via --jit vec=1 on the +command line) and let us know what is missing for your code. + +CFFI +==== + +While not applicable only to PyPy, `cffi`_ is arguably our most significant +contribution to the python ecosystem. Armin Rigo continued improving it, +and PyPy reaps the benefits of cffi-1.3: improved manangement of object +lifetimes, __stdcall on Win32, ffi.memmove(), ... + +.. _`warmup time and memory overhead used for tracing`: http://morepypy.blogspot.com/2015/10 +.. _`vectorization`: http://pypyvecopt.blogspot.co.at/ +.. _`guards`: http://rpython.readthedocs.org/en/latest/glossary.html + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`cffi`: https://cffi.readthedocs.org +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD_, freebsd_), +as well as newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _OpenBSD: http://cvsweb.openbsd.org/cgi-bin/cvsweb/ports/lang/pypy +.. _freebsd: https://svnweb.freebsd.org/ports/head/lang/pypy/ +.. _`dynamic languages`: http://pypyjs.org + +Highlights (since 2.6.1 release two months ago) +=============================================== + +* Bug Fixes + + * Applied OPENBSD downstream fixes + + * Fix a crash on non-linux when running more than 20 threads + + * In cffi, ffi.new_handle() is more cpython compliant + + * Accept unicode in functions inside the _curses cffi backend exactly like cpython + + * Fix a segfault in itertools.islice() + + * Use gcrootfinder=shadowstack by default, asmgcc on linux only + + * Fix ndarray.copy() for upstream compatability when copying non-contiguous arrays + + * Fix assumption that lltype.UniChar is unsigned + + * Fix a subtle bug with stacklets on shadowstack + + * Improve support for the cpython capi in cpyext (our capi compatibility + layer). Fixing these issues inspired some thought about cpyext in general, + stay tuned for more improvements + + * When loading dynamic libraries, in case of a certain loading error, retry + loading the library assuming it is actually a linker script, like on Arch + and Gentoo + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy + +* New features: + + * Add an optimization pass to vectorize loops using x86 SIMD intrinsics. + + * Support __stdcall on Windows in CFFI + + * Improve debug logging when using PYPYLOG=??? + + * Deal with platforms with no RAND_egd() in OpenSSL + + * Enable building _vmprof in translation on OS/X by default + +* Numpy: + + * Add support for ndarray.ctypes + + * Fast path for mixing numpy scalars and floats + + * Add support for creating Fortran-ordered ndarrays + + * Fix casting failures in linalg (by extending ufunc casting) + + * Recognize and disallow (for now) pickling of ndarrays with objects + embedded in them + +* Performance improvements and refactorings: + + * Reuse hashed keys across dictionaries and sets + + * Refactor JIT interals to improve warmup time by 20% or so at the cost of a + minor regression in JIT speed + + * Recognize patterns of common sequences in the JIT backends and optimize them + + * Make the garbage collecter more intcremental over external_malloc() calls + + * Share guard resume data where possible which reduces memory usage + + * Fast path for zip(list, list) + + * Reduce the number of checks in the JIT for lst[a:] + + * Move the non-optimizable part of callbacks outside the JIT + + * Factor in field immutability when invalidating heap information + + * Unroll itertools.izip_longest() with two sequences + + * Minor optimizations after analyzing output from `vmprof`_ and trace logs + + * Remove many class attributes in rpython classes + + * Handle getfield_gc_pure* and getfield_gc_* uniformly in heap.py + +.. _`vmprof`: https://vmprof.readthedocs.org +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-15.11.0.html + +Please try it out and let us know what you think. We welcome feedback, +we know you are using PyPy, please tell us about it! + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-15.11.0.rst b/pypy/doc/whatsnew-15.11.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-15.11.0.rst @@ -0,0 +1,87 @@ +======================== +What's new in PyPy 15.11 +======================== + +.. this is a revision shortly after release-2.6.1 +.. startrev: 07769be4057b + +.. branch: keys_with_hash +Improve the performance of dict.update() and a bunch of methods from +sets, by reusing the hash value stored in one dict when inspecting +or changing another dict with that key. + +.. branch: optresult-unroll +A major refactoring of the ResOperations that kills Box. Also rewrote +unrolling to enable future enhancements. Should improve warmup time +by 20% or so. + +.. branch: optimize-cond-call +Optimize common sequences of operations like +``int_lt/cond_call`` in the JIT backends + +.. branch: missing_openssl_include +Fix for missing headers in OpenBSD, already applied in downstream ports + +.. branch: gc-more-incremental +Remove a source of non-incremental-ness in the GC: now +external_malloc() no longer runs gc_step_until() any more. If there +is a currently-running major collection, we do only so many steps +before returning. This number of steps depends on the size of the +allocated object. It is controlled by tracking the general progress +of these major collection steps and the size of old objects that +keep adding up between them. + +.. branch: remember-tracing-counts +Reenable jithooks + +.. branch: detect_egd2 + +.. branch: shadowstack-no-move-2 +Issue #2141: fix a crash on Windows and OS/X and ARM when running +at least 20 threads. + +.. branch: numpy-ctypes + +Add support for ndarray.ctypes property. + +.. branch: share-guard-info + +Share guard resume data between consecutive guards that have only +pure operations and guards in between. + +.. branch: issue-2148 + +Fix performance regression on operations mixing numpy scalars and Python +floats, cf. issue #2148. + +.. branch: cffi-stdcall +Win32: support '__stdcall' in CFFI. + +.. branch: callfamily + +Refactorings of annotation and rtyping of function calls. + +.. branch: fortran-order + +Allow creation of fortran-ordered ndarrays + +.. branch: type_system-cleanup + +Remove some remnants of the old ootypesystem vs lltypesystem dichotomy. + +.. branch: cffi-handle-lifetime + +ffi.new_handle() returns handles that work more like CPython's: they +remain valid as long as the target exists (unlike the previous +version, where handles become invalid *before* the __del__ is called). + +.. branch: ufunc-casting + +allow automatic casting in ufuncs (and frompypyfunc) to cast the +arguments to the allowed function type declarations, fixes various +failures in linalg cffi functions + +.. branch: vecopt +.. branch: vecopt-merge + +A new optimization pass to use emit vectorized loops diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,35 +1,8 @@ -======================= -What's new in PyPy 2.6+ -======================= +========================= +What's new in PyPy 15.11+ +========================= -.. this is a revision shortly after release-2.6.1 -.. startrev: 07769be4057b +.. this is a revision shortly after release-15.11.0 +.. startrev: d924723d483b -.. branch: keys_with_hash -Improve the performance of dict.update() and a bunch of methods from -sets, by reusing the hash value stored in one dict when inspecting -or changing another dict with that key. -.. branch: optresult-unroll -A major refactoring of the ResOperations that kills Box. Also rewrote -unrolling to enable future enhancements. Should improve warmup time -by 20% or so. - -.. branch: optimize-cond-call -Optimize common sequences of operations like -``int_lt/cond_call`` in the JIT backends - -.. branch: missing_openssl_include -Fix for missing headers in OpenBSD, already applied in downstream ports - -.. branch: gc-more-incremental -Remove a source of non-incremental-ness in the GC: now -external_malloc() no longer runs gc_step_until() any more. If there -is a currently-running major collection, we do only so many steps -before returning. This number of steps depends on the size of the -allocated object. It is controlled by tracking the general progress -of these major collection steps and the size of old objects that -keep adding up between them. - -.. branch: remember-tracing-counts -Reenable jithooks diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1058,6 +1058,14 @@ args = Arguments.frompacked(self, w_args, w_kwds) return self.call_args(w_callable, args) + def _try_fetch_pycode(self, w_func): + from pypy.interpreter.function import Function, Method + if isinstance(w_func, Method): + w_func = w_func.w_function + if isinstance(w_func, Function): + return w_func.code + return None + def call_function(self, w_func, *args_w): nargs = len(args_w) # used for pruning funccall versions if not self.config.objspace.disable_call_speedhacks and nargs < 5: diff --git a/pypy/module/__builtin__/app_functional.py b/pypy/module/__builtin__/app_functional.py --- a/pypy/module/__builtin__/app_functional.py +++ b/pypy/module/__builtin__/app_functional.py @@ -5,6 +5,7 @@ from __future__ import with_statement import operator from __pypy__ import resizelist_hint, newlist_hint +from __pypy__ import specialized_zip_2_lists # ____________________________________________________________ @@ -217,11 +218,16 @@ in length to the length of the shortest argument sequence.""" l = len(sequences) if l == 2: + # A very fast path if the two sequences are lists + seq0 = sequences[0] + seq1 = sequences[1] + try: + return specialized_zip_2_lists(seq0, seq1) + except TypeError: + pass # This is functionally the same as the code below, but more # efficient because it unrolls the loops over 'sequences'. # Only for two arguments, which is the most common case. - seq0 = sequences[0] - seq1 = sequences[1] iter0 = iter(seq0) iter1 = iter(seq1) hint = min(100000000, # max 100M diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -253,26 +253,27 @@ def binaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_meth = self.getattr(space, specialname, False) + if isinstance(w_a, W_InstanceObject): + w_meth = w_a.getattr(space, specialname, False) if w_meth is None: return space.w_NotImplemented return space.call_function(w_meth, w_b) else: + # fall back to space.xxx() if coerce returns a non-W_Instance + # object as first argument return getattr(space, objspacename)(w_a, w_b) binaryop.func_name = name def rbinaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None or w_a is self: - w_meth = self.getattr(space, rspecialname, False) + if isinstance(w_a, W_InstanceObject): + w_meth = w_a.getattr(space, rspecialname, False) if w_meth is None: return space.w_NotImplemented - return space.call_function(w_meth, w_other) + return space.call_function(w_meth, w_b) else: + # fall back to space.xxx() if coerce returns a non-W_Instance + # object as first argument return getattr(space, objspacename)(w_b, w_a) rbinaryop.func_name = "r" + name return binaryop, rbinaryop @@ -283,7 +284,7 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - return [None, None] + return [w_self, w_other] return space.fixedview(w_tup, 2) def descr_instance_new(space, w_type, w_class, w_dict=None): @@ -523,13 +524,9 @@ def descr_cmp(self, space, w_other): # do all the work here like CPython w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - else: - if (not isinstance(w_a, W_InstanceObject) and - not isinstance(w_b, W_InstanceObject)): - return space.cmp(w_a, w_b) + if (not isinstance(w_a, W_InstanceObject) and + not isinstance(w_b, W_InstanceObject)): + return space.cmp(w_a, w_b) if isinstance(w_a, W_InstanceObject): w_func = w_a.getattr(space, '__cmp__', False) if w_func is not None: @@ -636,42 +633,36 @@ def descr_pow(self, space, w_other, w_modulo=None): if space.is_none(w_modulo): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) - return space.w_NotImplemented + if isinstance(w_a, W_InstanceObject): + w_func = w_a.getattr(space, '__pow__', False) + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other) else: return space.pow(w_a, w_b, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other, w_modulo) def descr_rpow(self, space, w_other, w_modulo=None): if space.is_none(w_modulo): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) - return space.w_NotImplemented + if isinstance(w_a, W_InstanceObject): + w_func = w_a.getattr(space, '__rpow__', False) + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other) else: return space.pow(w_b, w_a, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other, w_modulo) def descr_next(self, space): w_func = self.getattr(space, 'next', False) diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -417,6 +417,22 @@ pass raises(TypeError, coerce, B(), []) + def test_coerce_inf(self): + class B: + def __coerce__(self, other): + return B(), B() + def __add__(self, other): + return 42 + assert B() + B() == 42 + + def test_coerce_reverse(self): + class CoerceNumber: + def __coerce__(self, other): + assert isinstance(other, int) + return (6, other) + assert 5 + CoerceNumber() == 11 + assert 2 ** CoerceNumber() == 64 + def test_binaryop(self): class A: def __add__(self, other): diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -83,6 +83,7 @@ 'newdict' : 'interp_dict.newdict', 'reversed_dict' : 'interp_dict.reversed_dict', 'strategy' : 'interp_magic.strategy', # dict,set,list + 'specialized_zip_2_lists' : 'interp_magic.specialized_zip_2_lists', 'set_debug' : 'interp_magic.set_debug', 'locals_to_fast' : 'interp_magic.locals_to_fast', 'save_module_content_for_future_reload': diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -147,3 +147,7 @@ @unwrap_spec(w_module=MixedModule) def save_module_content_for_future_reload(space, w_module): w_module.save_module_content_for_future_reload() + +def specialized_zip_2_lists(space, w_list1, w_list2): + from pypy.objspace.std.specialisedtupleobject import specialized_zip_2_lists + return specialized_zip_2_lists(space, w_list1, w_list2) diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,9 +1,16 @@ import sys from pypy.interpreter.mixedmodule import MixedModule -from rpython.rlib import rdynload +from rpython.rlib import rdynload, clibffi VERSION = "1.3.0" +FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI +try: + FFI_STDCALL = clibffi.FFI_STDCALL + has_stdcall = True +except AttributeError: + has_stdcall = False + class Module(MixedModule): @@ -40,12 +47,13 @@ 'string': 'func.string', 'buffer': 'cbuffer.buffer', + 'memmove': 'func.memmove', 'get_errno': 'cerrno.get_errno', 'set_errno': 'cerrno.set_errno', - 'FFI_DEFAULT_ABI': 'ctypefunc._get_abi(space, "FFI_DEFAULT_ABI")', - 'FFI_CDECL': 'ctypefunc._get_abi(space,"FFI_DEFAULT_ABI")',#win32 name + 'FFI_DEFAULT_ABI': 'space.wrap(%d)' % FFI_DEFAULT_ABI, + 'FFI_CDECL': 'space.wrap(%d)' % FFI_DEFAULT_ABI, # win32 name # CFFI 1.0 'FFI': 'ffi_obj.W_FFIObject', @@ -53,6 +61,9 @@ if sys.platform == 'win32': interpleveldefs['getwinerror'] = 'cerrno.getwinerror' + if has_stdcall: + interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL + def get_dict_rtld_constants(): found = {} diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -1,11 +1,11 @@ """ Callbacks. """ -import sys, os +import sys, os, py -from rpython.rlib import clibffi, rweakref, jit, jit_libffi -from rpython.rlib.objectmodel import compute_unique_id, keepalive_until_here -from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib import clibffi, jit, jit_libffi, rgc, objectmodel +from rpython.rlib.objectmodel import keepalive_until_here +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from pypy.interpreter.error import OperationError, oefmt from pypy.module._cffi_backend import cerrno, misc @@ -19,6 +19,23 @@ # ____________________________________________________________ + at jit.dont_look_inside +def make_callback(space, ctype, w_callable, w_error, w_onerror): + # Allocate a callback as a nonmovable W_CDataCallback instance, which + # we can cast to a plain VOIDP. As long as the object is not freed, + # we can cast the VOIDP back to a W_CDataCallback in reveal_callback(). + cdata = objectmodel.instantiate(W_CDataCallback, nonmovable=True) + gcref = rgc.cast_instance_to_gcref(cdata) + raw_cdata = rgc.hide_nonmovable_gcref(gcref) + cdata.__init__(space, ctype, w_callable, w_error, w_onerror, raw_cdata) + return cdata + +def reveal_callback(raw_ptr): + addr = rffi.cast(llmemory.Address, raw_ptr) + gcref = rgc.reveal_gcref(addr) + return rgc.try_cast_gcref_to_instance(W_CDataCallback, gcref) + + class Closure(object): """This small class is here to have a __del__ outside any cycle.""" @@ -34,10 +51,11 @@ class W_CDataCallback(W_CData): - #_immutable_fields_ = ... + _immutable_fields_ = ['key_pycode'] w_onerror = None - def __init__(self, space, ctype, w_callable, w_error, w_onerror): + def __init__(self, space, ctype, w_callable, w_error, w_onerror, + raw_cdata): raw_closure = rffi.cast(rffi.CCHARP, clibffi.closureHeap.alloc()) self._closure = Closure(raw_closure) W_CData.__init__(self, space, raw_closure, ctype) @@ -46,6 +64,7 @@ raise oefmt(space.w_TypeError, "expected a callable object, not %T", w_callable) self.w_callable = w_callable + self.key_pycode = space._try_fetch_pycode(w_callable) if not space.is_none(w_onerror): if not space.is_true(space.callable(w_onerror)): raise oefmt(space.w_TypeError, @@ -64,8 +83,12 @@ convert_from_object_fficallback(fresult, self._closure.ll_error, w_error) # - self.unique_id = compute_unique_id(self) - global_callback_mapping.set(self.unique_id, self) + # We must setup the GIL here, in case the callback is invoked in + # some other non-Pythonic thread. This is the same as cffi on + # CPython. + if space.config.translation.thread: + from pypy.module.thread.os_thread import setup_threads + setup_threads(space) # cif_descr = self.getfunctype().cif_descr if not cif_descr: @@ -74,20 +97,13 @@ "return type or with '...'", self.getfunctype().name) with self as ptr: closure_ptr = rffi.cast(clibffi.FFI_CLOSUREP, ptr) - unique_id = rffi.cast(rffi.VOIDP, self.unique_id) + unique_id = rffi.cast(rffi.VOIDP, raw_cdata) res = clibffi.c_ffi_prep_closure(closure_ptr, cif_descr.cif, invoke_callback, unique_id) if rffi.cast(lltype.Signed, res) != clibffi.FFI_OK: raise OperationError(space.w_SystemError, space.wrap("libffi failed to build this callback")) - # - # We must setup the GIL here, in case the callback is invoked in - # some other non-Pythonic thread. This is the same as cffi on - # CPython. - if space.config.translation.thread: - from pypy.module.thread.os_thread import setup_threads - setup_threads(space) def _repr_extra(self): space = self.space @@ -105,6 +121,7 @@ def invoke(self, ll_args): space = self.space ctype = self.getfunctype() + ctype = jit.promote(ctype) args_w = [] for i, farg in enumerate(ctype.fargs): ll_arg = rffi.cast(rffi.CCHARP, ll_args[i]) @@ -127,9 +144,6 @@ keepalive_until_here(self) # to keep self._closure.ll_error alive -global_callback_mapping = rweakref.RWeakValueDictionary(int, W_CDataCallback) - - def convert_from_object_fficallback(fresult, ll_res, w_res): space = fresult.space small_result = fresult.size < SIZE_OF_FFI_ARG @@ -178,7 +192,8 @@ @jit.dont_look_inside -def _handle_applevel_exception(space, callback, e, ll_res, extra_line): +def _handle_applevel_exception(callback, e, ll_res, extra_line): + space = callback.space callback.write_error_return_value(ll_res) if callback.w_onerror is None: callback.print_error(e, extra_line) @@ -199,19 +214,36 @@ extra_line="\nDuring the call to 'onerror', " "another exception occurred:\n\n") +def get_printable_location(key_pycode): + if key_pycode is None: + return 'cffi_callback ' + return 'cffi_callback ' + key_pycode.get_repr() - at jit.jit_callback("CFFI") +jitdriver = jit.JitDriver(name='cffi_callback', + greens=['callback.key_pycode'], + reds=['ll_res', 'll_args', 'callback'], + get_printable_location=get_printable_location) + +def py_invoke_callback(callback, ll_res, ll_args): + jitdriver.jit_merge_point(callback=callback, ll_res=ll_res, ll_args=ll_args) + extra_line = '' + try: + w_res = callback.invoke(ll_args) + extra_line = "Trying to convert the result back to C:\n" + callback.convert_result(ll_res, w_res) + except OperationError, e: + _handle_applevel_exception(callback, e, ll_res, extra_line) + def _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): """ Callback specification. ffi_cif - something ffi specific, don't care ll_args - rffi.VOIDPP - pointer to array of pointers to args - ll_restype - rffi.VOIDP - pointer to result + ll_res - rffi.VOIDP - pointer to result ll_userdata - a special structure which holds necessary information (what the real callback is for example), casted to VOIDP """ ll_res = rffi.cast(rffi.CCHARP, ll_res) - unique_id = rffi.cast(lltype.Signed, ll_userdata) - callback = global_callback_mapping.get(unique_id) + callback = reveal_callback(ll_userdata) if callback is None: # oups! try: @@ -224,17 +256,11 @@ misc._raw_memclear(ll_res, SIZE_OF_FFI_ARG) return # + space = callback.space must_leave = False - space = callback.space try: must_leave = space.threadlocals.try_enter_thread(space) - extra_line = '' - try: - w_res = callback.invoke(ll_args) - extra_line = "Trying to convert the result back to C:\n" - callback.convert_result(ll_res, w_res) - except OperationError, e: - _handle_applevel_exception(space, callback, e, ll_res, extra_line) + py_invoke_callback(callback, ll_res, ll_args) # except Exception, e: # oups! last-level attempt to recover. diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -18,6 +18,7 @@ _attrs_ = ['ctptr'] _immutable_fields_ = ['ctptr'] kind = "array" + is_nonfunc_pointer_or_array = True def __init__(self, space, ctptr, length, arraysize, extra): W_CTypePtrOrArray.__init__(self, space, arraysize, extra, 0, diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -12,6 +12,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from pypy.interpreter.error import OperationError, oefmt +from pypy.module import _cffi_backend from pypy.module._cffi_backend import ctypearray, cdataobj, cerrno from pypy.module._cffi_backend.ctypeobj import W_CType from pypy.module._cffi_backend.ctypeptr import W_CTypePtrBase, W_CTypePointer @@ -23,20 +24,22 @@ class W_CTypeFunc(W_CTypePtrBase): - _attrs_ = ['fargs', 'ellipsis', 'cif_descr'] - _immutable_fields_ = ['fargs[*]', 'ellipsis', 'cif_descr'] + _attrs_ = ['fargs', 'ellipsis', 'abi', 'cif_descr'] + _immutable_fields_ = ['fargs[*]', 'ellipsis', 'abi', 'cif_descr'] kind = "function" cif_descr = lltype.nullptr(CIF_DESCRIPTION) - def __init__(self, space, fargs, fresult, ellipsis): + def __init__(self, space, fargs, fresult, ellipsis, + abi=_cffi_backend.FFI_DEFAULT_ABI): assert isinstance(ellipsis, bool) - extra = self._compute_extra_text(fargs, fresult, ellipsis) + extra, xpos = self._compute_extra_text(fargs, fresult, ellipsis, abi) size = rffi.sizeof(rffi.VOIDP) - W_CTypePtrBase.__init__(self, space, size, extra, 2, fresult, + W_CTypePtrBase.__init__(self, space, size, extra, xpos, fresult, could_cast_anything=False) self.fargs = fargs self.ellipsis = ellipsis + self.abi = abi # fresult is stored in self.ctitem if not ellipsis: @@ -44,7 +47,7 @@ # at all. The cif is computed on every call from the actual # types passed in. For all other functions, the cif_descr # is computed here. - builder = CifDescrBuilder(fargs, fresult) + builder = CifDescrBuilder(fargs, fresult, abi) try: builder.rawallocate(self) except OperationError, e: @@ -76,7 +79,7 @@ ctypefunc.fargs = fvarargs ctypefunc.ctitem = self.ctitem #ctypefunc.cif_descr = NULL --- already provided as the default - CifDescrBuilder(fvarargs, self.ctitem).rawallocate(ctypefunc) + CifDescrBuilder(fvarargs, self.ctitem, self.abi).rawallocate(ctypefunc) return ctypefunc @rgc.must_be_light_finalizer @@ -84,8 +87,13 @@ if self.cif_descr: lltype.free(self.cif_descr, flavor='raw') - def _compute_extra_text(self, fargs, fresult, ellipsis): + def _compute_extra_text(self, fargs, fresult, ellipsis, abi): + from pypy.module._cffi_backend import newtype argnames = ['(*)('] + xpos = 2 + if _cffi_backend.has_stdcall and abi == _cffi_backend.FFI_STDCALL: + argnames[0] = '(__stdcall *)(' + xpos += len('__stdcall ') for i, farg in enumerate(fargs): if i > 0: argnames.append(', ') @@ -95,7 +103,7 @@ argnames.append(', ') argnames.append('...') argnames.append(')') - return ''.join(argnames) + return ''.join(argnames), xpos def _fget(self, attrchar): if attrchar == 'a': # args @@ -106,7 +114,7 @@ if attrchar == 'E': # ellipsis return self.space.wrap(self.ellipsis) if attrchar == 'A': # abi - return self.space.wrap(clibffi.FFI_DEFAULT_ABI) # XXX + return self.space.wrap(self.abi) return W_CTypePtrBase._fget(self, attrchar) def call(self, funcaddr, args_w): @@ -181,11 +189,6 @@ def set_mustfree_flag(data, flag): rffi.ptradd(data, -1)[0] = chr(flag) -def _get_abi(space, name): - abi = getattr(clibffi, name) - assert isinstance(abi, int) - return space.wrap(abi) - # ____________________________________________________________ @@ -260,9 +263,10 @@ class CifDescrBuilder(object): rawmem = lltype.nullptr(rffi.CCHARP.TO) - def __init__(self, fargs, fresult): + def __init__(self, fargs, fresult, fabi): self.fargs = fargs self.fresult = fresult + self.fabi = fabi def fb_alloc(self, size): size = llmemory.raw_malloc_usage(size) @@ -421,7 +425,7 @@ cif_descr.exchange_size = exchange_offset def fb_extra_fields(self, cif_descr): - cif_descr.abi = clibffi.FFI_DEFAULT_ABI # XXX + cif_descr.abi = self.fabi cif_descr.nargs = len(self.fargs) cif_descr.rtype = self.rtype cif_descr.atypes = self.atypes diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -11,7 +11,8 @@ class W_CType(W_Root): - _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_'] + _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_', + '_pointer_type'] _immutable_fields_ = ['size?', 'name', 'name_position'] # note that 'size' is not strictly immutable, because it can change # from -1 to the real value in the W_CTypeStruct subclass. @@ -20,6 +21,7 @@ cast_anything = False is_primitive_integer = False + is_nonfunc_pointer_or_array = False kind = "?" def __init__(self, space, size, name, name_position): @@ -142,7 +144,7 @@ # obscure hack when untranslated, maybe, approximate, don't use if isinstance(align, llmemory.FieldOffset): align = rffi.sizeof(align.TYPE.y) - if (1 << (8*align-2)) > sys.maxint: + if sys.platform != 'win32' and (1 << (8*align-2)) > sys.maxint: align /= 2 else: # a different hack when translated, to avoid seeing constants diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -7,6 +7,7 @@ from rpython.rlib.rarithmetic import r_uint, r_ulonglong, intmask from rpython.rlib import jit from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.tool import rfficache from pypy.interpreter.error import oefmt from pypy.module._cffi_backend import cdataobj, misc @@ -125,12 +126,25 @@ cdata[0] = value +# XXX explicitly use an integer type instead of lltype.UniChar here, +# because for now the latter is defined as unsigned by RPython (even +# though it may be signed when 'wchar_t' is written to C). From noreply at buildbot.pypy.org Fri Oct 16 11:36:24 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 11:36:24 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: kill Message-ID: <20151016093624.5DB4A1C1232@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: ppc-updated-backend Changeset: r80260:c423b1050564 Date: 2015-10-16 11:19 +0200 http://bitbucket.org/pypy/pypy/changeset/c423b1050564/ Log: kill diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -103,34 +103,6 @@ frame_reg = r.SPP assert set(save_around_call_regs).issubset(all_regs) - REGLOC_TO_COPY_AREA_OFS = { - r.r5: MY_COPY_OF_REGS + 0 * WORD, - r.r6: MY_COPY_OF_REGS + 1 * WORD, - r.r7: MY_COPY_OF_REGS + 2 * WORD, - r.r8: MY_COPY_OF_REGS + 3 * WORD, - r.r9: MY_COPY_OF_REGS + 4 * WORD, - r.r10: MY_COPY_OF_REGS + 5 * WORD, - r.r11: MY_COPY_OF_REGS + 6 * WORD, - r.r12: MY_COPY_OF_REGS + 7 * WORD, - r.r14: MY_COPY_OF_REGS + 8 * WORD, - r.r15: MY_COPY_OF_REGS + 9 * WORD, - r.r16: MY_COPY_OF_REGS + 10 * WORD, - r.r17: MY_COPY_OF_REGS + 11 * WORD, - r.r18: MY_COPY_OF_REGS + 12 * WORD, - r.r19: MY_COPY_OF_REGS + 13 * WORD, - r.r20: MY_COPY_OF_REGS + 14 * WORD, - r.r21: MY_COPY_OF_REGS + 15 * WORD, - r.r22: MY_COPY_OF_REGS + 16 * WORD, - r.r23: MY_COPY_OF_REGS + 17 * WORD, - r.r24: MY_COPY_OF_REGS + 18 * WORD, - r.r25: MY_COPY_OF_REGS + 19 * WORD, - r.r26: MY_COPY_OF_REGS + 20 * WORD, - r.r27: MY_COPY_OF_REGS + 21 * WORD, - r.r28: MY_COPY_OF_REGS + 22 * WORD, - r.r29: MY_COPY_OF_REGS + 23 * WORD, - r.r30: MY_COPY_OF_REGS + 24 * WORD, - } - def __init__(self, longevity, frame_manager=None, assembler=None): RegisterManager.__init__(self, longevity, frame_manager, assembler) From noreply at buildbot.pypy.org Fri Oct 16 11:36:26 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 11:36:26 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: save_exc_class, save_exception, restore_exception Message-ID: <20151016093626.6F37A1C1232@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: ppc-updated-backend Changeset: r80261:4f967ab0ad89 Date: 2015-10-16 11:20 +0200 http://bitbucket.org/pypy/pypy/changeset/4f967ab0ad89/ Log: save_exc_class, save_exception, restore_exception diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -554,6 +554,18 @@ pmc.bc(BO, BI, relative_target) pmc.overwrite() + def emit_save_exc_class(self, op, arglocs, regalloc): + [resloc] = arglocs + diff = self.mc.load_imm_plus(r.r2, self.cpu.pos_exception()) + self.mc.load(resloc.value, r.r2.value, diff) + + def emit_save_exception(self, op, arglocs, regalloc): + [resloc] = arglocs + self._store_and_reset_exception(self.mc, resloc) + + def emit_restore_exception(self, op, arglocs, regalloc): + self._restore_exception(self.mc, arglocs[1], arglocs[0]) + def emit_guard_exception(self, op, arglocs, regalloc): loc, resloc = arglocs[:2] failargs = arglocs[2:] diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -586,6 +586,16 @@ arglocs = self._prepare_guard(op, [loc, resloc]) return arglocs + def prepare_save_exception(self, op): + res = self.rm.force_allocate_reg(op) + return [res] + prepare_save_exc_class = prepare_save_exception + + def prepare_restore_exception(self, op): + loc0 = self.ensure_reg(op.getarg(0)) + loc1 = self.ensure_reg(op.getarg(1)) + return [loc0, loc1] + def prepare_guard_no_exception(self, op): arglocs = self._prepare_guard(op) return arglocs From noreply at buildbot.pypy.org Fri Oct 16 11:36:28 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 11:36:28 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: cancel a change by mistake Message-ID: <20151016093628.77C131C1232@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: ppc-updated-backend Changeset: r80262:373e67dd241e Date: 2015-10-16 11:23 +0200 http://bitbucket.org/pypy/pypy/changeset/373e67dd241e/ Log: cancel a change by mistake diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -41,7 +41,7 @@ from rpython.jit.backend.llsupport.descr import CallDescr -class TempInt(TempBox): +class TempInt(TempVar): type = INT def __repr__(self): From noreply at buildbot.pypy.org Fri Oct 16 11:36:30 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 11:36:30 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: document Message-ID: <20151016093630.7A1B41C1232@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: ppc-updated-backend Changeset: r80263:92200f058163 Date: 2015-10-16 11:28 +0200 http://bitbucket.org/pypy/pypy/changeset/92200f058163/ Log: document diff --git a/rpython/translator/c/src/stack.c b/rpython/translator/c/src/stack.c --- a/rpython/translator/c/src/stack.c +++ b/rpython/translator/c/src/stack.c @@ -9,6 +9,8 @@ /* the current stack is in the interval [end-length:end]. We assume a stack that grows downward here. */ +/* (stored in a struct to ensure that stack_end and stack_length are + close together; used e.g. by the ppc jit backend) */ rpy_stacktoobig_t rpy_stacktoobig = { NULL, /* stack_end */ MAX_STACK_SIZE, /* stack_length */ From noreply at buildbot.pypy.org Fri Oct 16 11:38:30 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 16 Oct 2015 11:38:30 +0200 (CEST) Subject: [pypy-commit] pypy default: unroll find_result_type if the arrays are either virtual or small Message-ID: <20151016093830.6F2961C1232@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r80264:30212baeb975 Date: 2015-10-16 11:38 +0200 http://bitbucket.org/pypy/pypy/changeset/30212baeb975/ Log: unroll find_result_type if the arrays are either virtual or small diff --git a/pypy/module/micronumpy/casting.py b/pypy/module/micronumpy/casting.py --- a/pypy/module/micronumpy/casting.py +++ b/pypy/module/micronumpy/casting.py @@ -38,7 +38,9 @@ dtypes_w.append(dtype) return find_result_type(space, arrays_w, dtypes_w) - + at jit.look_inside_iff(lambda space, arrays_w, dtypes_w: + jit.loop_unrolling_heuristic(arrays_w) and + jit.loop_unrolling_heuristic(dtypes_w)) def find_result_type(space, arrays_w, dtypes_w): # equivalent to PyArray_ResultType if len(arrays_w) == 1 and not dtypes_w: From noreply at buildbot.pypy.org Fri Oct 16 11:38:33 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 16 Oct 2015 11:38:33 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20151016093833.64ADD1C1232@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r80265:f377aa5469e9 Date: 2015-10-16 11:38 +0200 http://bitbucket.org/pypy/pypy/changeset/f377aa5469e9/ Log: merge diff too long, truncating to 2000 out of 15271 lines diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,14 @@ .hg .svn +# VIM +*.swp +*.swo + *.pyc *.pyo *~ +__pycache__/ bin/pypy-c include/*.h @@ -22,4 +27,6 @@ pypy/translator/goal/pypy-c pypy/translator/goal/target*-c release/ +!pypy/tool/release/ rpython/_cache/ +__pycache__/ diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -14,17 +14,7 @@ LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs', 'extra_objects', 'depends'] -def _hack_at_distutils(): - # Windows-only workaround for some configurations: see - # https://bugs.python.org/issue23246 (Python 2.7.9) - if sys.platform == "win32": - try: - import setuptools # for side-effects, patches distutils - except ImportError: - pass - def get_extension(srcfilename, modname, sources=(), **kwds): - _hack_at_distutils() # *before* the following import from distutils.core import Extension allsources = [srcfilename] allsources.extend(sources) @@ -47,7 +37,6 @@ def _build(tmpdir, ext): # XXX compact but horrible :-( - _hack_at_distutils() from distutils.core import Distribution import distutils.errors # diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -22,6 +22,16 @@ s = s.encode('ascii') super(NativeIO, self).write(s) +def _hack_at_distutils(): + # Windows-only workaround for some configurations: see + # https://bugs.python.org/issue23246 (Python 2.7 with + # a specific MS compiler suite download) + if sys.platform == "win32": + try: + import setuptools # for side-effects, patches distutils + except ImportError: + pass + class Verifier(object): @@ -112,6 +122,7 @@ return basename def get_extension(self): + _hack_at_distutils() # backward compatibility hack if not self._has_source: with self.ffi._lock: if not self._has_source: diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-15.11.0.rst release-2.6.1.rst release-2.6.0.rst release-2.5.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-15.11.0.rst whatsnew-2.6.1.rst whatsnew-2.6.0.rst whatsnew-2.5.1.rst diff --git a/pypy/doc/release-15.11.0.rst b/pypy/doc/release-15.11.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-15.11.0.rst @@ -0,0 +1,191 @@ +============ +PyPy 15.11.0 +============ + +We're pleased and proud to unleash PyPy 15.11, a major update of the PyPy +python2.7.10 compatible interpreter with a Just In Time compiler. +We have improved `warmup time and memory overhead used for tracing`_, added +`vectorization`_ for numpy and general loops where possible on x86 hardware, +refactored rough edges in rpython, and increased functionality of numpy. + +You can download the PyPy 15.11 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors (7 new ones since PyPy 2.6.0) and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + + +Vectorization +============= + +Richard Plangger began work in March and continued over a Google Summer of Code +to add a vectorization step to the trace optimizer. The step recognizes common +constructs and emits SIMD code where possible, much as any modern compiler does. +This vectorization happens while tracing running code, so it is actually easier +at run-time to determine the +availability of possible vectorization than it is for ahead-of-time compilers. + +Availability of SIMD hardware is detected at run time, without needing to +precompile various code paths into the executable. + +Internal Refactoring and Warmup Time Improvement +================================================ + +Maciej Fijalkowski and Armin Rigo refactored internals of rpython that now allow +PyPy to more efficiently use `guards`_ in jitted code. They also rewrote unrolling, +leading to a warmup time improvement of 20% or so at the cost of a minor +regression in jitted code speed. + +Numpy +===== + +Our implementation of numpy continues to improve. ndarray and the numeric dtypes +are very close to feature-complete; record, string and unicode dtypes are mostly +supported. We have reimplemented numpy linalg, random and fft as cffi-1.0 +modules that call out to the same underlying libraries that upstream numpy uses. +Please try it out, especially using the new vectorization (via --jit vec=1 on the +command line) and let us know what is missing for your code. + +CFFI +==== + +While not applicable only to PyPy, `cffi`_ is arguably our most significant +contribution to the python ecosystem. Armin Rigo continued improving it, +and PyPy reaps the benefits of cffi-1.3: improved manangement of object +lifetimes, __stdcall on Win32, ffi.memmove(), ... + +.. _`warmup time and memory overhead used for tracing`: http://morepypy.blogspot.com/2015/10 +.. _`vectorization`: http://pypyvecopt.blogspot.co.at/ +.. _`guards`: http://rpython.readthedocs.org/en/latest/glossary.html + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`cffi`: https://cffi.readthedocs.org +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD_, freebsd_), +as well as newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _OpenBSD: http://cvsweb.openbsd.org/cgi-bin/cvsweb/ports/lang/pypy +.. _freebsd: https://svnweb.freebsd.org/ports/head/lang/pypy/ +.. _`dynamic languages`: http://pypyjs.org + +Highlights (since 2.6.1 release two months ago) +=============================================== + +* Bug Fixes + + * Applied OPENBSD downstream fixes + + * Fix a crash on non-linux when running more than 20 threads + + * In cffi, ffi.new_handle() is more cpython compliant + + * Accept unicode in functions inside the _curses cffi backend exactly like cpython + + * Fix a segfault in itertools.islice() + + * Use gcrootfinder=shadowstack by default, asmgcc on linux only + + * Fix ndarray.copy() for upstream compatability when copying non-contiguous arrays + + * Fix assumption that lltype.UniChar is unsigned + + * Fix a subtle bug with stacklets on shadowstack + + * Improve support for the cpython capi in cpyext (our capi compatibility + layer). Fixing these issues inspired some thought about cpyext in general, + stay tuned for more improvements + + * When loading dynamic libraries, in case of a certain loading error, retry + loading the library assuming it is actually a linker script, like on Arch + and Gentoo + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy + +* New features: + + * Add an optimization pass to vectorize loops using x86 SIMD intrinsics. + + * Support __stdcall on Windows in CFFI + + * Improve debug logging when using PYPYLOG=??? + + * Deal with platforms with no RAND_egd() in OpenSSL + + * Enable building _vmprof in translation on OS/X by default + +* Numpy: + + * Add support for ndarray.ctypes + + * Fast path for mixing numpy scalars and floats + + * Add support for creating Fortran-ordered ndarrays + + * Fix casting failures in linalg (by extending ufunc casting) + + * Recognize and disallow (for now) pickling of ndarrays with objects + embedded in them + +* Performance improvements and refactorings: + + * Reuse hashed keys across dictionaries and sets + + * Refactor JIT interals to improve warmup time by 20% or so at the cost of a + minor regression in JIT speed + + * Recognize patterns of common sequences in the JIT backends and optimize them + + * Make the garbage collecter more intcremental over external_malloc() calls + + * Share guard resume data where possible which reduces memory usage + + * Fast path for zip(list, list) + + * Reduce the number of checks in the JIT for lst[a:] + + * Move the non-optimizable part of callbacks outside the JIT + + * Factor in field immutability when invalidating heap information + + * Unroll itertools.izip_longest() with two sequences + + * Minor optimizations after analyzing output from `vmprof`_ and trace logs + + * Remove many class attributes in rpython classes + + * Handle getfield_gc_pure* and getfield_gc_* uniformly in heap.py + +.. _`vmprof`: https://vmprof.readthedocs.org +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-15.11.0.html + +Please try it out and let us know what you think. We welcome feedback, +we know you are using PyPy, please tell us about it! + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-15.11.0.rst b/pypy/doc/whatsnew-15.11.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-15.11.0.rst @@ -0,0 +1,87 @@ +======================== +What's new in PyPy 15.11 +======================== + +.. this is a revision shortly after release-2.6.1 +.. startrev: 07769be4057b + +.. branch: keys_with_hash +Improve the performance of dict.update() and a bunch of methods from +sets, by reusing the hash value stored in one dict when inspecting +or changing another dict with that key. + +.. branch: optresult-unroll +A major refactoring of the ResOperations that kills Box. Also rewrote +unrolling to enable future enhancements. Should improve warmup time +by 20% or so. + +.. branch: optimize-cond-call +Optimize common sequences of operations like +``int_lt/cond_call`` in the JIT backends + +.. branch: missing_openssl_include +Fix for missing headers in OpenBSD, already applied in downstream ports + +.. branch: gc-more-incremental +Remove a source of non-incremental-ness in the GC: now +external_malloc() no longer runs gc_step_until() any more. If there +is a currently-running major collection, we do only so many steps +before returning. This number of steps depends on the size of the +allocated object. It is controlled by tracking the general progress +of these major collection steps and the size of old objects that +keep adding up between them. + +.. branch: remember-tracing-counts +Reenable jithooks + +.. branch: detect_egd2 + +.. branch: shadowstack-no-move-2 +Issue #2141: fix a crash on Windows and OS/X and ARM when running +at least 20 threads. + +.. branch: numpy-ctypes + +Add support for ndarray.ctypes property. + +.. branch: share-guard-info + +Share guard resume data between consecutive guards that have only +pure operations and guards in between. + +.. branch: issue-2148 + +Fix performance regression on operations mixing numpy scalars and Python +floats, cf. issue #2148. + +.. branch: cffi-stdcall +Win32: support '__stdcall' in CFFI. + +.. branch: callfamily + +Refactorings of annotation and rtyping of function calls. + +.. branch: fortran-order + +Allow creation of fortran-ordered ndarrays + +.. branch: type_system-cleanup + +Remove some remnants of the old ootypesystem vs lltypesystem dichotomy. + +.. branch: cffi-handle-lifetime + +ffi.new_handle() returns handles that work more like CPython's: they +remain valid as long as the target exists (unlike the previous +version, where handles become invalid *before* the __del__ is called). + +.. branch: ufunc-casting + +allow automatic casting in ufuncs (and frompypyfunc) to cast the +arguments to the allowed function type declarations, fixes various +failures in linalg cffi functions + +.. branch: vecopt +.. branch: vecopt-merge + +A new optimization pass to use emit vectorized loops diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,82 +1,8 @@ -======================= -What's new in PyPy 2.6+ -======================= +========================= +What's new in PyPy 15.11+ +========================= -.. this is a revision shortly after release-2.6.1 -.. startrev: 07769be4057b +.. this is a revision shortly after release-15.11.0 +.. startrev: d924723d483b -.. branch: keys_with_hash -Improve the performance of dict.update() and a bunch of methods from -sets, by reusing the hash value stored in one dict when inspecting -or changing another dict with that key. -.. branch: optresult-unroll -A major refactoring of the ResOperations that kills Box. Also rewrote -unrolling to enable future enhancements. Should improve warmup time -by 20% or so. - -.. branch: optimize-cond-call -Optimize common sequences of operations like -``int_lt/cond_call`` in the JIT backends - -.. branch: missing_openssl_include -Fix for missing headers in OpenBSD, already applied in downstream ports - -.. branch: gc-more-incremental -Remove a source of non-incremental-ness in the GC: now -external_malloc() no longer runs gc_step_until() any more. If there -is a currently-running major collection, we do only so many steps -before returning. This number of steps depends on the size of the -allocated object. It is controlled by tracking the general progress -of these major collection steps and the size of old objects that -keep adding up between them. - -.. branch: remember-tracing-counts -Reenable jithooks - -.. branch: detect_egd2 - -.. branch: shadowstack-no-move-2 -Issue #2141: fix a crash on Windows and OS/X and ARM when running -at least 20 threads. - -.. branch: numpy-ctypes - -Add support for ndarray.ctypes property. - -.. branch: share-guard-info - -Share guard resume data between consecutive guards that have only -pure operations and guards in between. - -.. branch: issue-2148 - -Fix performance regression on operations mixing numpy scalars and Python -floats, cf. issue #2148. - -.. branch: cffi-stdcall -Win32: support '__stdcall' in CFFI. - -.. branch: callfamily - -Refactorings of annotation and rtyping of function calls. - -.. branch: fortran-order - -Allow creation of fortran-ordered ndarrays - -.. branch: type_system-cleanup - -Remove some remnants of the old ootypesystem vs lltypesystem dichotomy. - -.. branch: cffi-handle-lifetime - -ffi.new_handle() returns handles that work more like CPython's: they -remain valid as long as the target exists (unlike the previous -version, where handles become invalid *before* the __del__ is called). - -.. branch: ufunc-casting - -allow automatic casting in ufuncs (and frompypyfunc) to cast the -arguments to the allowed function type declarations, fixes various -failures in linalg cffi functions diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -66,6 +66,7 @@ import pypy.module.cpyext.codecs import pypy.module.cpyext.pyfile import pypy.module.cpyext.pystrtod +import pypy.module.cpyext.pytraceback # now that all rffi_platform.Struct types are registered, configure them api.configure_types() diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -126,6 +126,7 @@ #include "fileobject.h" #include "pysignals.h" #include "pythread.h" +#include "traceback.h" /* Missing definitions */ #include "missing.h" diff --git a/pypy/module/cpyext/include/frameobject.h b/pypy/module/cpyext/include/frameobject.h --- a/pypy/module/cpyext/include/frameobject.h +++ b/pypy/module/cpyext/include/frameobject.h @@ -4,7 +4,7 @@ extern "C" { #endif -typedef struct { +typedef struct _frame { PyObject_HEAD PyCodeObject *f_code; PyObject *f_globals; diff --git a/pypy/module/cpyext/include/traceback.h b/pypy/module/cpyext/include/traceback.h --- a/pypy/module/cpyext/include/traceback.h +++ b/pypy/module/cpyext/include/traceback.h @@ -4,7 +4,15 @@ extern "C" { #endif -typedef PyObject PyTracebackObject; +struct _frame; + +typedef struct _traceback { + PyObject_HEAD + struct _traceback *tb_next; + struct _frame *tb_frame; + int tb_lasti; + int tb_lineno; +} PyTracebackObject; #ifdef __cplusplus } diff --git a/pypy/module/cpyext/pytraceback.py b/pypy/module/cpyext/pytraceback.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/pytraceback.py @@ -0,0 +1,50 @@ +from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + PyObjectFields, generic_cpy_call, CONST_STRING, CANNOT_FAIL, Py_ssize_t, + cpython_api, bootstrap_function, cpython_struct, build_type_checkers) +from pypy.module.cpyext.pyobject import ( + PyObject, make_ref, from_ref, Py_DecRef, make_typedescr, borrow_from) +from pypy.module.cpyext.frameobject import PyFrameObject +from rpython.rlib.unroll import unrolling_iterable +from pypy.interpreter.error import OperationError +from pypy.interpreter.pytraceback import PyTraceback +from pypy.interpreter import pycode + + +PyTracebackObjectStruct = lltype.ForwardReference() +PyTracebackObject = lltype.Ptr(PyTracebackObjectStruct) +PyTracebackObjectFields = PyObjectFields + ( + ("tb_next", PyTracebackObject), + ("tb_frame", PyFrameObject), + ("tb_lasti", rffi.INT), + ("tb_lineno", rffi.INT), +) +cpython_struct("PyTracebackObject", PyTracebackObjectFields, PyTracebackObjectStruct) + + at bootstrap_function +def init_traceback(space): + make_typedescr(PyTraceback.typedef, + basestruct=PyTracebackObject.TO, + attach=traceback_attach, + dealloc=traceback_dealloc) + + +def traceback_attach(space, py_obj, w_obj): + py_traceback = rffi.cast(PyTracebackObject, py_obj) + traceback = space.interp_w(PyTraceback, w_obj) + if traceback.next is None: + w_next_traceback = None + else: + w_next_traceback = space.wrap(traceback.next) + py_traceback.c_tb_next = rffi.cast(PyTracebackObject, make_ref(space, w_next_traceback)) + py_traceback.c_tb_frame = rffi.cast(PyFrameObject, make_ref(space, space.wrap(traceback.frame))) + rffi.setintfield(py_traceback, 'c_tb_lasti', traceback.lasti) + rffi.setintfield(py_traceback, 'c_tb_lineno',traceback.get_lineno()) + + at cpython_api([PyObject], lltype.Void, external=False) +def traceback_dealloc(space, py_obj): + py_traceback = rffi.cast(PyTracebackObject, py_obj) + Py_DecRef(space, rffi.cast(PyObject, py_traceback.c_tb_next)) + Py_DecRef(space, rffi.cast(PyObject, py_traceback.c_tb_frame)) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -61,6 +61,30 @@ args_w = space.fixedview(w_args) return generic_cpy_call(space, func_binary, w_self, args_w[0]) +def wrap_binaryfunc_l(space, w_self, w_args, func): + func_binary = rffi.cast(binaryfunc, func) + check_num_args(space, w_args, 1) + args_w = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self))): + raise OperationError(space.w_NotImplementedError, space.wrap( + "NotImplemented")) + + return generic_cpy_call(space, func_binary, w_self, args_w[0]) + +def wrap_binaryfunc_r(space, w_self, w_args, func): + func_binary = rffi.cast(binaryfunc, func) + check_num_args(space, w_args, 1) + args_w = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self))): + raise OperationError(space.w_NotImplementedError, space.wrap( + "NotImplemented")) + + return generic_cpy_call(space, func_binary, args_w[0], w_self) + def wrap_inquirypred(space, w_self, w_args, func): func_inquiry = rffi.cast(inquiry, func) check_num_args(space, w_args, 0) diff --git a/pypy/module/cpyext/test/test_traceback.py b/pypy/module/cpyext/test/test_traceback.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_traceback.py @@ -0,0 +1,40 @@ +from rpython.rtyper.lltypesystem import lltype, rffi +from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref +from pypy.module.cpyext.pytraceback import PyTracebackObject +from pypy.interpreter.pytraceback import PyTraceback +from pypy.interpreter.pyframe import PyFrame + +class TestPyTracebackObject(BaseApiTest): + def test_traceback(self, space, api): + w_traceback = space.appexec([], """(): + import sys + try: + 1/0 + except: + return sys.exc_info()[2] + """) + py_obj = make_ref(space, w_traceback) + py_traceback = rffi.cast(PyTracebackObject, py_obj) + assert (from_ref(space, rffi.cast(PyObject, py_traceback.c_ob_type)) is + space.gettypeobject(PyTraceback.typedef)) + + traceback = space.interp_w(PyTraceback, w_traceback) + assert traceback.lasti == py_traceback.c_tb_lasti + assert traceback.get_lineno() == py_traceback.c_tb_lineno + assert space.eq_w(space.getattr(w_traceback, space.wrap("tb_lasti")), + space.wrap(py_traceback.c_tb_lasti)) + assert space.is_w(space.getattr(w_traceback, space.wrap("tb_frame")), + from_ref(space, rffi.cast(PyObject, + py_traceback.c_tb_frame))) + + while not space.is_w(w_traceback, space.w_None): + assert space.is_w( + w_traceback, + from_ref(space, rffi.cast(PyObject, py_traceback))) + w_traceback = space.getattr(w_traceback, space.wrap("tb_next")) + py_traceback = py_traceback.c_tb_next + + assert lltype.normalizeptr(py_traceback) is None + + api.Py_DecRef(py_obj) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -589,6 +589,48 @@ assert bool(module.newInt(-1)) raises(ValueError, bool, module.newInt(-42)) + def test_binaryfunc(self): + module = self.import_extension('foo', [ + ("new_obj", "METH_NOARGS", + """ + FooObject *fooObj; + + Foo_Type.tp_as_number = &foo_as_number; + foo_as_number.nb_add = foo_nb_add_call; + if (PyType_Ready(&Foo_Type) < 0) return NULL; + fooObj = PyObject_New(FooObject, &Foo_Type); + if (!fooObj) { + return NULL; + } + + return (PyObject *)fooObj; + """)], + """ + typedef struct + { + PyObject_HEAD + } FooObject; + + static PyObject * + foo_nb_add_call(PyObject *self, PyObject *other) + { + return PyInt_FromLong(42); + } + + PyTypeObject Foo_Type = { + PyObject_HEAD_INIT(0) + /*ob_size*/ 0, + /*tp_name*/ "Foo", + /*tp_basicsize*/ sizeof(FooObject), + }; + static PyNumberMethods foo_as_number; + """) + a = module.new_obj() + b = module.new_obj() + c = 3 + assert (a + b) == 42 + raises(NotImplementedError, "b + c") + def test_tp_new_in_subclass_of_type(self): skip("BROKEN") module = self.import_module(name='foo3') diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -2,6 +2,7 @@ It should not be imported by the module itself """ import re +import py from pypy.interpreter import special from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root, ObjSpace from pypy.interpreter.error import OperationError @@ -12,6 +13,10 @@ from pypy.module.micronumpy.ndarray import W_NDimArray from pypy.module.micronumpy.ctors import array from pypy.module.micronumpy.descriptor import get_dtype_cache +from pypy.interpreter.miscutils import ThreadLocals, make_weak_value_dictionary +from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag, + UserDelAction) +from pypy.interpreter.pyframe import PyFrame class BogusBytecode(Exception): @@ -32,12 +37,11 @@ class BadToken(Exception): pass - SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", "unegative", "flat", "tostring", "count_nonzero", "argsort", "cumsum", "logical_xor_reduce"] -TWO_ARG_FUNCTIONS = ["dot", 'take', 'searchsorted'] -TWO_ARG_FUNCTIONS_OR_NONE = ['view', 'astype'] +TWO_ARG_FUNCTIONS = ["dot", 'take', 'searchsorted', 'multiply'] +TWO_ARG_FUNCTIONS_OR_NONE = ['view', 'astype', 'reshape'] THREE_ARG_FUNCTIONS = ['where'] class W_TypeObject(W_Root): @@ -57,6 +61,10 @@ w_OverflowError = W_TypeObject("OverflowError") w_NotImplementedError = W_TypeObject("NotImplementedError") w_AttributeError = W_TypeObject("AttributeError") + w_StopIteration = W_TypeObject("StopIteration") + w_KeyError = W_TypeObject("KeyError") + w_SystemExit = W_TypeObject("SystemExit") + w_KeyboardInterrupt = W_TypeObject("KeyboardInterrupt") w_None = None w_bool = W_TypeObject("bool") @@ -72,13 +80,26 @@ w_dict = W_TypeObject("dict") w_object = W_TypeObject("object") w_buffer = W_TypeObject("buffer") + w_type = W_TypeObject("type") - def __init__(self): + def __init__(self, config=None): """NOT_RPYTHON""" self.fromcache = InternalSpaceCache(self).getorbuild self.w_Ellipsis = special.Ellipsis() self.w_NotImplemented = special.NotImplemented() + if config is None: + from pypy.config.pypyoption import get_pypy_config + config = get_pypy_config(translating=False) + self.config = config + + self.interned_strings = make_weak_value_dictionary(self, str, W_Root) + self.builtin = DictObject({}) + self.FrameClass = PyFrame + self.threadlocals = ThreadLocals() + self.actionflag = ActionFlag() # changed by the signal module + self.check_signal_action = None # changed by the signal module + def _freeze_(self): return True @@ -89,12 +110,17 @@ return isinstance(w_obj, ListObject) or isinstance(w_obj, W_NDimArray) def len(self, w_obj): - assert isinstance(w_obj, ListObject) - return self.wrap(len(w_obj.items)) + if isinstance(w_obj, ListObject): + return self.wrap(len(w_obj.items)) + elif isinstance(w_obj, DictObject): + return self.wrap(len(w_obj.items)) + raise NotImplementedError def getattr(self, w_obj, w_attr): assert isinstance(w_attr, StringObject) - return w_obj.getdictvalue(self, w_attr.v) + if isinstance(w_obj, DictObject): + return w_obj.getdictvalue(self, w_attr) + return None def isinstance_w(self, w_obj, w_tp): try: @@ -102,6 +128,22 @@ except AttributeError: return False + def iter(self, w_iter): + if isinstance(w_iter, ListObject): + raise NotImplementedError + #return IterObject(space, w_iter.items) + elif isinstance(w_iter, DictObject): + return IterDictObject(self, w_iter) + + def next(self, w_iter): + return w_iter.next() + + def contains(self, w_iter, w_key): + if isinstance(w_iter, DictObject): + return self.wrap(w_key in w_iter.items) + + raise NotImplementedError + def decode_index4(self, w_idx, size): if isinstance(w_idx, IntObject): return (self.int_w(w_idx), 0, 0, 1) @@ -123,6 +165,10 @@ lgt = (stop - start - 1) / step + 1 return (start, stop, step, lgt) + def unicode_from_object(self, w_item): + # XXX + return StringObject("") + @specialize.argtype(1) def wrap(self, obj): if isinstance(obj, float): @@ -145,7 +191,55 @@ def newcomplex(self, r, i): return ComplexObject(r, i) + def newfloat(self, f): + return self.float(f) + + def le(self, w_obj1, w_obj2): + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) + return w_obj1.descr_le(self, w_obj2) + + def lt(self, w_obj1, w_obj2): + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) + return w_obj1.descr_lt(self, w_obj2) + + def ge(self, w_obj1, w_obj2): + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) + return w_obj1.descr_ge(self, w_obj2) + + def add(self, w_obj1, w_obj2): + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) + return w_obj1.descr_add(self, w_obj2) + + def sub(self, w_obj1, w_obj2): + return self.wrap(1) + + def mul(self, w_obj1, w_obj2): + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) + return w_obj1.descr_mul(self, w_obj2) + + def pow(self, w_obj1, w_obj2, _): + return self.wrap(1) + + def neg(self, w_obj1): + return self.wrap(0) + + def repr(self, w_obj1): + return self.wrap('fake') + def getitem(self, obj, index): + if isinstance(obj, DictObject): + w_dict = obj.getdict(self) + if w_dict is not None: + try: + return w_dict[index] + except KeyError, e: + raise OperationError(self.w_KeyError, self.wrap("key error")) + assert isinstance(obj, ListObject) assert isinstance(index, IntObject) return obj.items[index.intval] @@ -191,12 +285,24 @@ return w_obj.v raise NotImplementedError + def unicode_w(self, w_obj): + # XXX + if isinstance(w_obj, StringObject): + return unicode(w_obj.v) + raise NotImplementedError + def int(self, w_obj): if isinstance(w_obj, IntObject): return w_obj assert isinstance(w_obj, boxes.W_GenericBox) return self.int(w_obj.descr_int(self)) + def long(self, w_obj): + if isinstance(w_obj, LongObject): + return w_obj + assert isinstance(w_obj, boxes.W_GenericBox) + return self.int(w_obj.descr_long(self)) + def str(self, w_obj): if isinstance(w_obj, StringObject): return w_obj @@ -240,9 +346,29 @@ def gettypefor(self, w_obj): return W_TypeObject(w_obj.typedef.name) - def call_function(self, tp, w_dtype): + def call_function(self, tp, w_dtype, *args): + if tp is self.w_float: + if isinstance(w_dtype, boxes.W_Float64Box): + return FloatObject(float(w_dtype.value)) + if isinstance(w_dtype, boxes.W_Float32Box): + return FloatObject(float(w_dtype.value)) + if isinstance(w_dtype, boxes.W_Int64Box): + return FloatObject(float(int(w_dtype.value))) + if isinstance(w_dtype, boxes.W_Int32Box): + return FloatObject(float(int(w_dtype.value))) + if isinstance(w_dtype, boxes.W_Int16Box): + return FloatObject(float(int(w_dtype.value))) + if isinstance(w_dtype, boxes.W_Int8Box): + return FloatObject(float(int(w_dtype.value))) + if isinstance(w_dtype, IntObject): + return FloatObject(float(w_dtype.intval)) + if tp is self.w_int: + if isinstance(w_dtype, FloatObject): + return IntObject(int(w_dtype.floatval)) + return w_dtype + @specialize.arg(2) def call_method(self, w_obj, s, *args): # XXX even the hacks have hacks return getattr(w_obj, 'descr_' + s)(self, *args) @@ -258,21 +384,21 @@ def newtuple(self, list_w): return ListObject(list_w) - def newdict(self): - return {} + def newdict(self, module=True): + return DictObject({}) - def setitem(self, dict, item, value): - dict[item] = value + def newint(self, i): + if isinstance(i, IntObject): + return i + return IntObject(i) - def len_w(self, w_obj): - if isinstance(w_obj, ListObject): - return len(w_obj.items) - # XXX array probably - assert False + def setitem(self, obj, index, value): + obj.items[index] = value def exception_match(self, w_exc_type, w_check_class): - # Good enough for now - raise NotImplementedError + assert isinstance(w_exc_type, W_TypeObject) + assert isinstance(w_check_class, W_TypeObject) + return w_exc_type.name == w_check_class.name class FloatObject(W_Root): tp = FakeSpace.w_float @@ -283,6 +409,9 @@ tp = FakeSpace.w_bool def __init__(self, boolval): self.intval = boolval +FakeSpace.w_True = BoolObject(True) +FakeSpace.w_False = BoolObject(False) + class IntObject(W_Root): tp = FakeSpace.w_int @@ -299,6 +428,33 @@ def __init__(self, items): self.items = items +class DictObject(W_Root): + tp = FakeSpace.w_dict + def __init__(self, items): + self.items = items + + def getdict(self, space): + return self.items + + def getdictvalue(self, space, key): + return self.items[key] + +class IterDictObject(W_Root): + def __init__(self, space, w_dict): + self.space = space + self.items = w_dict.items.items() + self.i = 0 + + def __iter__(self): + return self + + def next(self): + space = self.space + if self.i >= len(self.items): + raise OperationError(space.w_StopIteration, space.wrap("stop iteration")) + self.i += 1 + return self.items[self.i-1][0] + class SliceObject(W_Root): tp = FakeSpace.w_slice def __init__(self, start, stop, step): @@ -414,6 +570,15 @@ w_rhs = IntObject(int(w_rhs.floatval)) assert isinstance(w_lhs, W_NDimArray) w_res = w_lhs.descr_getitem(interp.space, w_rhs) + if isinstance(w_rhs, IntObject): + if isinstance(w_res, boxes.W_Float64Box): + print "access", w_lhs, "[", w_rhs.intval, "] => ", float(w_res.value) + if isinstance(w_res, boxes.W_Float32Box): + print "access", w_lhs, "[", w_rhs.intval, "] => ", float(w_res.value) + if isinstance(w_res, boxes.W_Int64Box): + print "access", w_lhs, "[", w_rhs.intval, "] => ", int(w_res.value) + if isinstance(w_res, boxes.W_Int32Box): + print "access", w_lhs, "[", w_rhs.intval, "] => ", int(w_res.value) else: raise NotImplementedError if (not isinstance(w_res, W_NDimArray) and @@ -425,9 +590,22 @@ def __repr__(self): return '(%r %s %r)' % (self.lhs, self.name, self.rhs) -class FloatConstant(Node): +class NumberConstant(Node): def __init__(self, v): - self.v = float(v) + if isinstance(v, int): + self.v = v + elif isinstance(v, float): + self.v = v + else: + assert isinstance(v, str) + assert len(v) > 0 + c = v[-1] + if c == 'f': + self.v = float(v[:-1]) + elif c == 'i': + self.v = int(v[:-1]) + else: + self.v = float(v) def __repr__(self): return "Const(%s)" % self.v @@ -519,8 +697,24 @@ def execute(self, interp): if self.v == 'int': dtype = get_dtype_cache(interp.space).w_int64dtype + elif self.v == 'int8': + dtype = get_dtype_cache(interp.space).w_int8dtype + elif self.v == 'int16': + dtype = get_dtype_cache(interp.space).w_int16dtype + elif self.v == 'int32': + dtype = get_dtype_cache(interp.space).w_int32dtype + elif self.v == 'uint': + dtype = get_dtype_cache(interp.space).w_uint64dtype + elif self.v == 'uint8': + dtype = get_dtype_cache(interp.space).w_uint8dtype + elif self.v == 'uint16': + dtype = get_dtype_cache(interp.space).w_uint16dtype + elif self.v == 'uint32': + dtype = get_dtype_cache(interp.space).w_uint32dtype elif self.v == 'float': dtype = get_dtype_cache(interp.space).w_float64dtype + elif self.v == 'float32': + dtype = get_dtype_cache(interp.space).w_float32dtype else: raise BadToken('unknown v to dtype "%s"' % self.v) return dtype @@ -556,8 +750,13 @@ raise ArgumentMismatch if self.name == "sum": if len(self.args)>1: - w_res = arr.descr_sum(interp.space, + var = self.args[1] + if isinstance(var, DtypeClass): + w_res = arr.descr_sum(interp.space, None, var.execute(interp)) + else: + w_res = arr.descr_sum(interp.space, self.args[1].execute(interp)) + else: w_res = arr.descr_sum(interp.space) elif self.name == "prod": @@ -577,10 +776,10 @@ w_res = logical_xor.reduce(interp.space, arr, None) elif self.name == "unegative": neg = ufuncs.get(interp.space).negative - w_res = neg.call(interp.space, [arr], None, None, None) + w_res = neg.call(interp.space, [arr], None, 'unsafe', None) elif self.name == "cos": cos = ufuncs.get(interp.space).cos - w_res = cos.call(interp.space, [arr], None, None, None) + w_res = cos.call(interp.space, [arr], None, 'unsafe', None) elif self.name == "flat": w_res = arr.descr_get_flatiter(interp.space) elif self.name == "argsort": @@ -598,6 +797,8 @@ raise ArgumentNotAnArray if self.name == "dot": w_res = arr.descr_dot(interp.space, arg) + elif self.name == 'multiply': + w_res = arr.descr_mul(interp.space, arg) elif self.name == 'take': w_res = arr.descr_take(interp.space, arg) elif self.name == "searchsorted": @@ -617,7 +818,7 @@ if self.name == "where": w_res = where(interp.space, arr, arg1, arg2) else: - assert False + assert False # unreachable code elif self.name in TWO_ARG_FUNCTIONS_OR_NONE: if len(self.args) != 2: raise ArgumentMismatch @@ -626,6 +827,11 @@ w_res = arr.descr_view(interp.space, arg) elif self.name == 'astype': w_res = arr.descr_astype(interp.space, arg) + elif self.name == 'reshape': + w_arg = self.args[1] + assert isinstance(w_arg, ArrayConstant) + order = -1 + w_res = arr.reshape(interp.space, w_arg.wrap(interp.space), order) else: assert False else: @@ -645,7 +851,7 @@ return W_NDimArray.new_scalar(interp.space, dtype, w_res) _REGEXES = [ - ('-?[\d\.]+', 'number'), + ('-?[\d\.]+(i|f)?', 'number'), ('\[', 'array_left'), (':', 'colon'), ('\w+', 'identifier'), @@ -719,7 +925,7 @@ start = 0 else: if tokens.get(0).name != 'colon': - return FloatConstant(start_tok.v) + return NumberConstant(start_tok.v) start = int(start_tok.v) tokens.pop() if not tokens.get(0).name in ['colon', 'number']: @@ -751,8 +957,30 @@ stack.append(ArrayClass()) elif token.v.strip(' ') == 'int': stack.append(DtypeClass('int')) + elif token.v.strip(' ') == 'int8': + stack.append(DtypeClass('int8')) + elif token.v.strip(' ') == 'int16': + stack.append(DtypeClass('int16')) + elif token.v.strip(' ') == 'int32': + stack.append(DtypeClass('int32')) + elif token.v.strip(' ') == 'int64': + stack.append(DtypeClass('int')) + elif token.v.strip(' ') == 'uint': + stack.append(DtypeClass('uint')) + elif token.v.strip(' ') == 'uint8': + stack.append(DtypeClass('uint8')) + elif token.v.strip(' ') == 'uint16': + stack.append(DtypeClass('uint16')) + elif token.v.strip(' ') == 'uint32': + stack.append(DtypeClass('uint32')) + elif token.v.strip(' ') == 'uint64': + stack.append(DtypeClass('uint')) elif token.v.strip(' ') == 'float': stack.append(DtypeClass('float')) + elif token.v.strip(' ') == 'float32': + stack.append(DtypeClass('float32')) + elif token.v.strip(' ') == 'float64': + stack.append(DtypeClass('float')) else: stack.append(Variable(token.v.strip(' '))) elif token.name == 'array_left': @@ -805,7 +1033,7 @@ while True: token = tokens.pop() if token.name == 'number': - elems.append(FloatConstant(token.v)) + elems.append(NumberConstant(token.v)) elif token.name == 'array_left': elems.append(ArrayConstant(self.parse_array_const(tokens))) elif token.name == 'paren_left': diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -70,7 +70,10 @@ @jit.unroll_safe def setslice(self, space, arr): - if len(arr.get_shape()) > len(self.get_shape()): + if arr.get_size() == 1: + # we can always set self[:] = scalar + pass + elif len(arr.get_shape()) > len(self.get_shape()): # record arrays get one extra dimension if not self.dtype.is_record() or \ len(arr.get_shape()) > len(self.get_shape()) + 1: diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -86,6 +86,9 @@ def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False): + # numpy testing calls array(type(array([]))) and expects a ValueError + if space.isinstance_w(w_object, space.w_type): + raise oefmt(space.w_ValueError, "cannot create ndarray from type instance") # for anything that isn't already an array, try __array__ method first if not isinstance(w_object, W_NDimArray): w_array = try_array_method(space, w_object, w_dtype) diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -97,7 +97,7 @@ finally: self.iter.reset(self.state, mutate=True) - def descr___array_wrap__(self, space, obj): + def descr___array_wrap__(self, space, obj, w_context=None): return obj W_FlatIterator.typedef = TypeDef("numpy.flatiter", diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -83,6 +83,12 @@ self._indices = indices self.offset = offset + def same(self, other): + if self.offset == other.offset and \ + self.index == other.index and \ + self._indices == other._indices: + return self.iterator.same_shape(other.iterator) + return False class ArrayIter(object): _immutable_fields_ = ['contiguous', 'array', 'size', 'ndim_m1', 'shape_m1[*]', @@ -100,6 +106,7 @@ self.array = array self.size = size self.ndim_m1 = len(shape) - 1 + # self.shape_m1 = [s - 1 for s in shape] self.strides = strides self.backstrides = backstrides @@ -113,6 +120,17 @@ factors[ndim-i-1] = factors[ndim-i] * shape[ndim-i] self.factors = factors + def same_shape(self, other): + """ Iterating over the same element """ + if not self.contiguous or not other.contiguous: + return False + return (self.contiguous == other.contiguous and + self.array.dtype is self.array.dtype and + self.shape_m1 == other.shape_m1 and + self.strides == other.strides and + self.backstrides == other.backstrides and + self.factors == other.factors) + @jit.unroll_safe def reset(self, state=None, mutate=False): index = 0 @@ -138,9 +156,13 @@ indices = state._indices offset = state.offset if self.contiguous: - offset += self.array.dtype.elsize + elsize = self.array.dtype.elsize + jit.promote(elsize) + offset += elsize elif self.ndim_m1 == 0: - offset += self.strides[0] + stride = self.strides[0] + jit.promote(stride) + offset += stride else: for i in xrange(self.ndim_m1, -1, -1): idx = indices[i] @@ -192,7 +214,7 @@ return state.index >= self.size def getitem(self, state): - assert state.iterator is self + # assert state.iterator is self return self.array.getitem(state.offset) def getitem_bool(self, state): @@ -203,7 +225,6 @@ assert state.iterator is self self.array.setitem(state.offset, elem) - def AxisIter(array, shape, axis): strides = array.get_strides() backstrides = array.get_backstrides() diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -2,6 +2,7 @@ operations. This is the place to look for all the computations that iterate over all the array elements. """ +import py from pypy.interpreter.error import OperationError from rpython.rlib import jit from rpython.rlib.rstring import StringBuilder @@ -13,11 +14,6 @@ from pypy.interpreter.argument import Arguments -call2_driver = jit.JitDriver( - name='numpy_call2', - greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], - reds='auto') - def call2(space, shape, func, calc_dtype, w_lhs, w_rhs, out): if w_lhs.get_size() == 1: w_left = w_lhs.get_scalar_value().convert_to(space, calc_dtype) @@ -38,24 +34,104 @@ out_iter, out_state = out.create_iter(shape) shapelen = len(shape) res_dtype = out.get_dtype() - while not out_iter.done(out_state): - call2_driver.jit_merge_point(shapelen=shapelen, func=func, - calc_dtype=calc_dtype, res_dtype=res_dtype) - if left_iter: - w_left = left_iter.getitem(left_state).convert_to(space, calc_dtype) - left_state = left_iter.next(left_state) - if right_iter: - w_right = right_iter.getitem(right_state).convert_to(space, calc_dtype) - right_state = right_iter.next(right_state) - out_iter.setitem(out_state, func(calc_dtype, w_left, w_right).convert_to( - space, res_dtype)) - out_state = out_iter.next(out_state) - return out + call2_func = try_to_share_iterators_call2(left_iter, right_iter, + left_state, right_state, out_state) + params = (space, shapelen, func, calc_dtype, res_dtype, out, + w_left, w_right, left_iter, right_iter, out_iter, + left_state, right_state, out_state) + return call2_func(*params) + +def try_to_share_iterators_call2(left_iter, right_iter, left_state, right_state, out_state): + # these are all possible iterator sharing combinations + # left == right == out + # left == right + # left == out + # right == out + right_out_equal = False + if right_iter: + # rhs is not a scalar + if out_state.same(right_state): + right_out_equal = True + # + if not left_iter: + # lhs is a scalar + if right_out_equal: + return call2_advance_out_left + else: + # worst case, nothing can be shared and lhs is a scalar + return call2_advance_out_left_right + else: + # lhs is NOT a scalar + if out_state.same(left_state): + # (2) out and left are the same -> remove left + if right_out_equal: + # the best case + return call2_advance_out + else: + return call2_advance_out_right + else: + if right_out_equal: + # right and out are equal, only advance left and out + return call2_advance_out_left + else: + if right_iter and right_state.same(left_state): + # left and right are equal, but still need to advance out + return call2_advance_out_left_eq_right + else: + # worst case, nothing can be shared + return call2_advance_out_left_right + + assert 0, "logical problem with the selection of the call2 case" + +def generate_call2_cases(name, left_state, right_state): + call2_driver = jit.JitDriver(name='numpy_call2_' + name, + greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], + reds='auto', vectorize=True) + # + advance_left_state = left_state == "left_state" + advance_right_state = right_state == "right_state" + code = """ + def method(space, shapelen, func, calc_dtype, res_dtype, out, + w_left, w_right, left_iter, right_iter, out_iter, + left_state, right_state, out_state): + while not out_iter.done(out_state): + call2_driver.jit_merge_point(shapelen=shapelen, func=func, + calc_dtype=calc_dtype, res_dtype=res_dtype) + if left_iter: + w_left = left_iter.getitem({left_state}).convert_to(space, calc_dtype) + if right_iter: + w_right = right_iter.getitem({right_state}).convert_to(space, calc_dtype) + w_out = func(calc_dtype, w_left, w_right) + out_iter.setitem(out_state, w_out.convert_to(space, res_dtype)) + out_state = out_iter.next(out_state) + if advance_left_state and left_iter: + left_state = left_iter.next(left_state) + if advance_right_state and right_iter: + right_state = right_iter.next(right_state) + # + # if not set to None, the values will be loop carried + # (for the var,var case), forcing the vectorization to unpack + # the vector registers at the end of the loop + if left_iter: + w_left = None + if right_iter: + w_right = None + return out + """ + exec(py.code.Source(code.format(left_state=left_state,right_state=right_state)).compile(), locals()) + method.__name__ = "call2_" + name + return method + +call2_advance_out = generate_call2_cases("inc_out", "out_state", "out_state") +call2_advance_out_left = generate_call2_cases("inc_out_left", "left_state", "out_state") +call2_advance_out_right = generate_call2_cases("inc_out_right", "out_state", "right_state") +call2_advance_out_left_eq_right = generate_call2_cases("inc_out_left_eq_right", "left_state", "left_state") +call2_advance_out_left_right = generate_call2_cases("inc_out_left_right", "left_state", "right_state") call1_driver = jit.JitDriver( name='numpy_call1', - greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], - reds='auto') + greens=['shapelen', 'share_iterator', 'func', 'calc_dtype', 'res_dtype'], + reds='auto', vectorize=True) def call1(space, shape, func, calc_dtype, w_obj, w_ret): obj_iter, obj_state = w_obj.create_iter(shape) @@ -63,13 +139,24 @@ out_iter, out_state = w_ret.create_iter(shape) shapelen = len(shape) res_dtype = w_ret.get_dtype() + share_iterator = out_state.same(obj_state) while not out_iter.done(out_state): call1_driver.jit_merge_point(shapelen=shapelen, func=func, + share_iterator=share_iterator, calc_dtype=calc_dtype, res_dtype=res_dtype) - elem = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) + if share_iterator: + # use out state as param to getitem + elem = obj_iter.getitem(out_state).convert_to(space, calc_dtype) + else: + elem = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) out_iter.setitem(out_state, func(calc_dtype, elem).convert_to(space, res_dtype)) - out_state = out_iter.next(out_state) - obj_state = obj_iter.next(obj_state) + if share_iterator: + # only advance out, they share the same iteration space + out_state = out_iter.next(out_state) + else: + out_state = out_iter.next(out_state) + obj_state = obj_iter.next(obj_state) + elem = None return w_ret call_many_to_one_driver = jit.JitDriver( @@ -145,7 +232,7 @@ vals[i] = in_dtypes[i].coerce(space, in_iters[i].getitem(in_states[i])) w_arglist = space.newlist(vals) w_outvals = space.call_args(func, Arguments.frompacked(space, w_arglist)) - # w_outvals should be a tuple, but func can return a single value as well + # w_outvals should be a tuple, but func can return a single value as well if space.isinstance_w(w_outvals, space.w_tuple): batch = space.listview(w_outvals) for i in range(len(batch)): @@ -161,7 +248,7 @@ setslice_driver = jit.JitDriver(name='numpy_setslice', greens = ['shapelen', 'dtype'], - reds = 'auto') + reds = 'auto', vectorize=True) def setslice(space, shape, target, source): if not shape: @@ -239,7 +326,8 @@ reduce_flat_driver = jit.JitDriver( name='numpy_reduce_flat', - greens = ['shapelen', 'func', 'done_func', 'calc_dtype'], reds = 'auto') + greens = ['shapelen', 'func', 'done_func', 'calc_dtype'], reds = 'auto', + vectorize = True) def reduce_flat(space, func, w_arr, calc_dtype, done_func, identity): obj_iter, obj_state = w_arr.create_iter() @@ -260,10 +348,10 @@ obj_state = obj_iter.next(obj_state) return cur_value - reduce_driver = jit.JitDriver( name='numpy_reduce', - greens=['shapelen', 'func', 'dtype'], reds='auto') + greens=['shapelen', 'func', 'dtype'], reds='auto', + vectorize=True) def reduce(space, func, w_arr, axis_flags, dtype, out, identity): out_iter, out_state = out.create_iter() @@ -298,7 +386,7 @@ accumulate_flat_driver = jit.JitDriver( name='numpy_accumulate_flat', greens=['shapelen', 'func', 'dtype', 'out_dtype'], - reds='auto') + reds='auto', vectorize=True) def accumulate_flat(space, func, w_arr, calc_dtype, w_out, identity): arr_iter, arr_state = w_arr.create_iter() @@ -325,7 +413,9 @@ accumulate_driver = jit.JitDriver( name='numpy_accumulate', - greens=['shapelen', 'func', 'calc_dtype'], reds='auto') + greens=['shapelen', 'func', 'calc_dtype'], + reds='auto', + vectorize=True) def accumulate(space, func, w_arr, axis, calc_dtype, w_out, identity): @@ -375,7 +465,8 @@ where_driver = jit.JitDriver(name='numpy_where', greens = ['shapelen', 'dtype', 'arr_dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def where(space, out, shape, arr, x, y, dtype): out_iter, out_state = out.create_iter(shape) @@ -416,7 +507,6 @@ state = x_state return out - def _new_argmin_argmax(op_name): arg_driver = jit.JitDriver(name='numpy_' + op_name, greens = ['shapelen', 'dtype'], @@ -481,7 +571,8 @@ dot_driver = jit.JitDriver(name = 'numpy_dot', greens = ['dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def multidim_dot(space, left, right, result, dtype, right_critical_dim): ''' assumes left, right are concrete arrays @@ -524,8 +615,8 @@ lval = left_impl.getitem(i1).convert_to(space, dtype) rval = right_impl.getitem(i2).convert_to(space, dtype) oval = dtype.itemtype.add(oval, dtype.itemtype.mul(lval, rval)) - i1 += s1 - i2 += s2 + i1 += jit.promote(s1) + i2 += jit.promote(s2) outi.setitem(outs, oval) outs = outi.next(outs) rights = righti.next(rights) @@ -535,7 +626,8 @@ count_all_true_driver = jit.JitDriver(name = 'numpy_count', greens = ['shapelen', 'dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def count_all_true_concrete(impl): s = 0 @@ -556,7 +648,8 @@ nonzero_driver = jit.JitDriver(name = 'numpy_nonzero', greens = ['shapelen', 'dims', 'dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def nonzero(res, arr, box): res_iter, res_state = res.create_iter() @@ -578,7 +671,8 @@ getitem_filter_driver = jit.JitDriver(name = 'numpy_getitem_bool', greens = ['shapelen', 'arr_dtype', 'index_dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def getitem_filter(res, arr, index): res_iter, res_state = res.create_iter() @@ -606,7 +700,8 @@ setitem_filter_driver = jit.JitDriver(name = 'numpy_setitem_bool', greens = ['shapelen', 'arr_dtype', 'index_dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def setitem_filter(space, arr, index, value): arr_iter, arr_state = arr.create_iter() @@ -635,7 +730,8 @@ flatiter_getitem_driver = jit.JitDriver(name = 'numpy_flatiter_getitem', greens = ['dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def flatiter_getitem(res, base_iter, base_state, step): ri, rs = res.create_iter() @@ -649,7 +745,8 @@ flatiter_setitem_driver = jit.JitDriver(name = 'numpy_flatiter_setitem', greens = ['dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def flatiter_setitem(space, dtype, val, arr_iter, arr_state, step, length): val_iter, val_state = val.create_iter() @@ -758,7 +855,8 @@ byteswap_driver = jit.JitDriver(name='numpy_byteswap_driver', greens = ['dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def byteswap(from_, to): dtype = from_.dtype @@ -773,7 +871,8 @@ choose_driver = jit.JitDriver(name='numpy_choose_driver', greens = ['shapelen', 'mode', 'dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def choose(space, arr, choices, shape, dtype, out, mode): shapelen = len(shape) @@ -807,7 +906,8 @@ clip_driver = jit.JitDriver(name='numpy_clip_driver', greens = ['shapelen', 'dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def clip(space, arr, shape, min, max, out): assert min or max @@ -842,7 +942,8 @@ round_driver = jit.JitDriver(name='numpy_round_driver', greens = ['shapelen', 'dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def round(space, arr, dtype, shape, decimals, out): arr_iter, arr_state = arr.create_iter(shape) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -7,6 +7,7 @@ # structures to describe slicing class BaseChunk(object): + _attrs_ = ['step','out_dim'] pass diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -1,6 +1,6 @@ import py from pypy.module.micronumpy.compile import (numpy_compile, Assignment, - ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, + ArrayConstant, NumberConstant, Operator, Variable, RangeConstant, Execute, FunctionCall, FakeSpace, W_NDimArray) @@ -25,30 +25,30 @@ interp = self.compile(code) assert isinstance(interp.code.statements[0].expr, ArrayConstant) st = interp.code.statements[0] - assert st.expr.items == [FloatConstant(1), FloatConstant(2), - FloatConstant(3)] + assert st.expr.items == [NumberConstant(1), NumberConstant(2), + NumberConstant(3)] def test_array_literal2(self): code = "a = [[1],[2],[3]]" interp = self.compile(code) assert isinstance(interp.code.statements[0].expr, ArrayConstant) st = interp.code.statements[0] - assert st.expr.items == [ArrayConstant([FloatConstant(1)]), - ArrayConstant([FloatConstant(2)]), - ArrayConstant([FloatConstant(3)])] + assert st.expr.items == [ArrayConstant([NumberConstant(1)]), + ArrayConstant([NumberConstant(2)]), + ArrayConstant([NumberConstant(3)])] def test_expr_1(self): code = "b = a + 1" interp = self.compile(code) assert (interp.code.statements[0].expr == - Operator(Variable("a"), "+", FloatConstant(1))) + Operator(Variable("a"), "+", NumberConstant(1))) def test_expr_2(self): code = "b = a + b - 3" interp = self.compile(code) assert (interp.code.statements[0].expr == Operator(Operator(Variable("a"), "+", Variable("b")), "-", - FloatConstant(3))) + NumberConstant(3))) def test_expr_3(self): # an equivalent of range @@ -60,13 +60,13 @@ code = "3 + a" interp = self.compile(code) assert interp.code.statements[0] == Execute( - Operator(FloatConstant(3), "+", Variable("a"))) + Operator(NumberConstant(3), "+", Variable("a"))) def test_array_access(self): code = "a -> 3" interp = self.compile(code) assert interp.code.statements[0] == Execute( - Operator(Variable("a"), "->", FloatConstant(3))) + Operator(Variable("a"), "->", NumberConstant(3))) def test_function_call(self): code = "sum(a)" @@ -81,7 +81,7 @@ """ interp = self.compile(code) assert interp.code.statements[0] == Assignment( - 'a', Operator(Variable('b'), "+", FloatConstant(3))) + 'a', Operator(Variable('b'), "+", NumberConstant(3))) class TestRunner(object): @@ -272,6 +272,14 @@ """) assert interp.results[0].value == 3 + def test_any(self): + interp = self.run(""" + a = [0,0,0,0,0.1,0,0,0,0] + b = any(a) + b -> 0 + """) + assert interp.results[0].value == 1 + def test_where(self): interp = self.run(''' a = [1, 0, 3, 0] diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -292,6 +292,8 @@ a = np.array('123', dtype='intp') assert a == 123 assert a.dtype == np.intp + # required for numpy test suite + raises(ValueError, np.array, type(a)) def test_array_copy(self): from numpy import array diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -319,6 +319,28 @@ assert out0.dtype in (int, complex) assert (out0 == in0 * 2).all() + def test_frompyfunc_scalar(self): + import sys + import numpy as np + if '__pypy__' not in sys.builtin_module_names: + skip('PyPy only frompyfunc extension') + + def summer(in0): + out = np.empty(1, in0.dtype) + out[0] = in0.sum() + return out + + pysummer = np.frompyfunc([summer, summer], 1, 1, + dtypes=[np.dtype(int), np.dtype(int), + np.dtype(complex), np.dtype(complex)], + stack_inputs=False, signature='(m,m)->()', + ) + for d in [np.dtype(float), np.dtype('uint8'), np.dtype('complex64')]: + in0 = np.arange(4, dtype=d).reshape(1, 2, 2) + out0 = pysummer(in0) + assert out0 == in0.sum() + assert out0.dtype in (int, complex) + def test_ufunc_kwargs(self): from numpy import ufunc, frompyfunc, arange, dtype def adder(a, b): diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -4,17 +4,37 @@ import py from rpython.jit.metainterp.test.support import LLJitMixin +from rpython.jit.backend.x86.test.test_basic import Jit386Mixin from rpython.jit.metainterp.warmspot import reset_jit, get_stats +from rpython.jit.metainterp.jitprof import Profiler +from rpython.jit.metainterp import counter +from rpython.rlib.jit import Counters +from rpython.rlib.rarithmetic import intmask from pypy.module.micronumpy import boxes from pypy.module.micronumpy.compile import FakeSpace, Parser, InterpreterState from pypy.module.micronumpy.base import W_NDimArray +from rpython.jit.backend.detect_cpu import getcpuclass -py.test.skip('move these to pypyjit/test_pypy_c/test_micronumpy') +CPU = getcpuclass() +if not CPU.vector_extension: + py.test.skip("this cpu %s has no implemented vector backend" % CPU) + +def get_profiler(): + from rpython.jit.metainterp import pyjitpl + return pyjitpl._warmrunnerdesc.metainterp_sd.profiler class TestNumpyJit(LLJitMixin): + enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" graph = None interp = None + def setup_method(self, method): + if not self.CPUClass.vector_extension: + py.test.skip("needs vector extension to run (for now)") + + def assert_float_equal(self, f1, f2, delta=0.0001): + assert abs(f1-f2) < delta + def setup_class(cls): default = """ a = [1,2,3,4] @@ -52,12 +72,29 @@ w_res = i.getitem(s) if isinstance(w_res, boxes.W_Float64Box): return w_res.value + if isinstance(w_res, boxes.W_Float32Box): + return float(w_res.value) elif isinstance(w_res, boxes.W_Int64Box): return float(w_res.value) + elif isinstance(w_res, boxes.W_Int32Box): + return float(int(w_res.value)) + elif isinstance(w_res, boxes.W_Int16Box): + return float(int(w_res.value)) + elif isinstance(w_res, boxes.W_Int8Box): + return float(int(w_res.value)) + elif isinstance(w_res, boxes.W_UInt64Box): + return float(intmask(w_res.value)) + elif isinstance(w_res, boxes.W_UInt32Box): + return float(intmask(w_res.value)) + elif isinstance(w_res, boxes.W_UInt16Box): + return float(intmask(w_res.value)) + elif isinstance(w_res, boxes.W_UInt8Box): + return float(intmask(w_res.value)) elif isinstance(w_res, boxes.W_LongBox): return float(w_res.value) elif isinstance(w_res, boxes.W_BoolBox): return float(w_res.value) + print "ERROR: did not implement return type for interpreter" raise TypeError(w_res) if self.graph is None: @@ -65,122 +102,354 @@ listops=True, listcomp=True, backendopt=True, - graph_and_interp_only=True) + graph_and_interp_only=True, + ProfilerClass=Profiler, + vec=True) self.__class__.interp = interp self.__class__.graph = graph + def check_vectorized(self, expected_tried, expected_success): + profiler = get_profiler() + tried = profiler.get_counter(Counters.OPT_VECTORIZE_TRY) + success = profiler.get_counter(Counters.OPT_VECTORIZED) + assert tried >= success + assert tried == expected_tried + assert success == expected_success + def run(self, name): self.compile_graph() + profiler = get_profiler() + profiler.start() reset_jit() i = self.code_mapping[name] retval = self.interp.eval_graph(self.graph, [i]) return retval - def define_add(): + def define_float32_copy(): + return """ + a = astype(|30|, float32) + x1 = a -> 7 + x2 = a -> 8 + x3 = a -> 9 + x4 = a -> 10 + r = x1 + x2 + x3 + x4 + r + """ + def test_float32_copy(self): + result = self.run("float32_copy") + assert int(result) == 7+8+9+10 + self.check_vectorized(1, 1) + + def define_int32_copy(): + return """ + a = astype(|30|, int32) + x1 = a -> 7 + x2 = a -> 8 + x3 = a -> 9 + x4 = a -> 10 + x1 + x2 + x3 + x4 + """ + def test_int32_copy(self): + result = self.run("int32_copy") + assert int(result) == 7+8+9+10 + self.check_vectorized(1, 1) + + def define_float32_add(): + return """ + a = astype(|30|, float32) + b = a + a + b -> 15 + """ + def test_float32_add(self): + result = self.run("float32_add") + self.assert_float_equal(result, 15.0 + 15.0) + self.check_vectorized(2, 2) + + def define_float_add(): return """ a = |30| b = a + a From noreply at buildbot.pypy.org Fri Oct 16 11:47:50 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 16 Oct 2015 11:47:50 +0200 (CEST) Subject: [pypy-commit] pypy default: grumble Message-ID: <20151016094750.B225C1C1248@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r80266:e7ea0af5ce3e Date: 2015-10-16 11:48 +0200 http://bitbucket.org/pypy/pypy/changeset/e7ea0af5ce3e/ Log: grumble diff --git a/pypy/module/micronumpy/casting.py b/pypy/module/micronumpy/casting.py --- a/pypy/module/micronumpy/casting.py +++ b/pypy/module/micronumpy/casting.py @@ -39,8 +39,8 @@ return find_result_type(space, arrays_w, dtypes_w) @jit.look_inside_iff(lambda space, arrays_w, dtypes_w: - jit.loop_unrolling_heuristic(arrays_w) and - jit.loop_unrolling_heuristic(dtypes_w)) + jit.loop_unrolling_heuristic(arrays_w, len(arrays_w)) and + jit.loop_unrolling_heuristic(dtypes_w, len(dtypes_w))) def find_result_type(space, arrays_w, dtypes_w): # equivalent to PyArray_ResultType if len(arrays_w) == 1 and not dtypes_w: From noreply at buildbot.pypy.org Fri Oct 16 11:54:42 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 11:54:42 +0200 (CEST) Subject: [pypy-commit] pypy ppc-updated-backend: Close branch, ready for merge Message-ID: <20151016095442.2BD221C12D6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: ppc-updated-backend Changeset: r80267:e3f760f59b9e Date: 2015-10-16 11:49 +0200 http://bitbucket.org/pypy/pypy/changeset/e3f760f59b9e/ Log: Close branch, ready for merge From noreply at buildbot.pypy.org Fri Oct 16 11:54:45 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 11:54:45 +0200 (CEST) Subject: [pypy-commit] pypy default: hg merge ppc-updated-backend Message-ID: <20151016095445.613701C12D6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80268:74f10fa5dff0 Date: 2015-10-16 11:54 +0200 http://bitbucket.org/pypy/pypy/changeset/74f10fa5dff0/ Log: hg merge ppc-updated-backend PPC backend #8. The PPC backend now seems to work and be stable, so let's merge it to default to bring in the few changes done outside the "backend/ppc" directory: some extra tests in runner_test, some details for big-endian machines in backend/llsupport/test, and so on. diff too long, truncating to 2000 out of 9622 lines diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -76,6 +76,11 @@ if "cppyy" in working_modules: working_modules.remove("cppyy") # depends on ctypes +if sys.platform.startswith("linux"): + _mach = os.popen('uname -m', 'r').read().strip() + if _mach.startswith('ppc'): + working_modules.remove("_continuation") + module_dependencies = { '_multiprocessing': [('objspace.usemodules.time', True), diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -224,6 +224,10 @@ if not for_frame: self._push_all_regs_to_jitframe(mc, [], withfloats, callee_only=True) else: + # NOTE: don't save registers on the jitframe here! It might + # override already-saved values that will be restored + # later... + # # we're possibly called from the slowpath of malloc # save the caller saved registers # assuming we do not collect here diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -41,10 +41,6 @@ from rpython.jit.backend.llsupport.descr import CallDescr -# xxx hack: set a default value for TargetToken._ll_loop_code. If 0, we know -# that it is a LABEL that was not compiled yet. -TargetToken._ll_loop_code = 0 - class TempInt(TempVar): type = INT @@ -1257,18 +1253,6 @@ prepare_op_call_assembler_f = _prepare_op_call_assembler prepare_op_call_assembler_n = _prepare_op_call_assembler - def _prepare_args_for_new_op(self, new_args): - gc_ll_descr = self.cpu.gc_ll_descr - args = gc_ll_descr.args_for_new(new_args) - arglocs = [] - for i in range(len(args)): - arg = args[i] - t = TempInt() - l = self.force_allocate_reg(t, selected_reg=r.all_regs[i]) - self.assembler.load(l, imm(arg)) - arglocs.append(t) - return arglocs - prepare_op_float_add = prepare_two_regs_op prepare_op_float_sub = prepare_two_regs_op prepare_op_float_mul = prepare_two_regs_op diff --git a/rpython/jit/backend/arm/test/test_runner.py b/rpython/jit/backend/arm/test/test_runner.py --- a/rpython/jit/backend/arm/test/test_runner.py +++ b/rpython/jit/backend/arm/test/test_runner.py @@ -26,24 +26,24 @@ # for the individual tests see # ====> ../../test/runner_test.py - add_loop_instructions = ['ldr', 'adds', 'cmp', 'beq', 'b'] - bridge_loop_instructions = ['ldr', 'mov', 'nop', 'cmp', 'bge', - 'push', 'mov', 'mov', 'push', 'mov', 'mov', - 'blx', 'mov', 'mov', 'bx'] + add_loop_instructions = 'ldr; adds; cmp; beq; b;$' + bridge_loop_instructions = ('ldr; mov; nop; cmp; bge; ' + 'push; mov; mov; push; mov; mov; ' + 'blx; mov; mov; bx;$') arch_version = detect_arch_version() if arch_version == 7: - bridge_loop_instructions = ['ldr', 'mov', 'nop', 'cmp', 'bge', - 'push', 'mov', 'mov', 'push', 'mov', 'mov', - 'blx', 'mov', 'mov', 'bx'] + bridge_loop_instructions = ('ldr; mov; nop; cmp; bge; ' + 'push; mov; mov; push; mov; mov; ' + 'blx; mov; mov; bx;$') else: - bridge_loop_instructions = ['ldr', 'mov', 'nop', 'nop', 'nop', 'cmp', 'bge', - 'push', 'ldr', 'mov', - '*', # inline constant - 'push', 'ldr', 'mov', - '*', # inline constant - 'blx', 'ldr', 'mov', - '*', # inline constant - 'bx'] + bridge_loop_instructions = ('ldr; mov; nop; nop; nop; cmp; bge; ' + 'push; ldr; mov; ' + '[^;]+; ' # inline constant + 'push; ldr; mov; ' + '[^;]+; ' # inline constant + 'blx; ldr; mov; ' + '[^;]+; ' # inline constant + 'bx;$') def get_cpu(self): cpu = CPU(rtyper=None, stats=FakeStats()) diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -59,6 +59,8 @@ 'i86pc': MODEL_X86, # Solaris/Intel 'x86': MODEL_X86, # Apple 'Power Macintosh': MODEL_PPC_64, + 'ppc64': MODEL_PPC_64, + 'ppc64le': MODEL_PPC_64, 'x86_64': MODEL_X86, 'amd64': MODEL_X86, # freebsd 'AMD64': MODEL_X86, # win64 @@ -118,6 +120,8 @@ return "rpython.jit.backend.x86.runner", "CPU_X86_64_SSE4" elif backend_name == MODEL_ARM: return "rpython.jit.backend.arm.runner", "CPU_ARM" + elif backend_name == MODEL_PPC_64: + return "rpython.jit.backend.ppc.runner", "PPC_CPU" else: raise ProcessorAutodetectError, ( "we have no JIT backend for this cpu: '%s'" % backend_name) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -243,6 +243,23 @@ self.mc.get_relative_pos()) def call_assembler(self, op, argloc, vloc, result_loc, tmploc): + """ + * argloc: location of the frame argument that we're passing to + the called assembler (this is the first return value + of locs_for_call_assembler()) + + * vloc: location of the virtualizable (not in a register; + this is the optional second return value of + locs_for_call_assembler(), or imm(0) if none returned) + + * result_loc: location of op.result (which is not be + confused with the next one) + + * tmploc: location where the actual call to the other piece + of assembler will return its jitframe result + (which is always a REF), before the helper may be + called + """ descr = op.getdescr() assert isinstance(descr, JitCellToken) # diff --git a/rpython/jit/backend/llsupport/llerrno.py b/rpython/jit/backend/llsupport/llerrno.py --- a/rpython/jit/backend/llsupport/llerrno.py +++ b/rpython/jit/backend/llsupport/llerrno.py @@ -1,14 +1,22 @@ +import sys from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.jit.backend.llsupport.symbolic import WORD +if sys.byteorder == 'little' or sys.maxint <= 2**32: + long2int = int2long = lambda x: x +else: + def long2int(x): return x >> 32 + def int2long(x): return x << 32 + + def get_debug_saved_errno(cpu): - return cpu._debug_errno_container[3] + return long2int(cpu._debug_errno_container[3]) def set_debug_saved_errno(cpu, nerrno): assert nerrno >= 0 - cpu._debug_errno_container[3] = nerrno + cpu._debug_errno_container[3] = int2long(nerrno) def get_rpy_errno_offset(cpu): if cpu.translate_support_code: @@ -19,11 +27,11 @@ def get_debug_saved_alterrno(cpu): - return cpu._debug_errno_container[4] + return long2int(cpu._debug_errno_container[4]) def set_debug_saved_alterrno(cpu, nerrno): assert nerrno >= 0 - cpu._debug_errno_container[4] = nerrno + cpu._debug_errno_container[4] = int2long(nerrno) def get_alt_errno_offset(cpu): if cpu.translate_support_code: diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -3,7 +3,7 @@ """ import py -import re +import re, sys, struct from rpython.jit.metainterp.history import TargetToken, BasicFinalDescr,\ JitCellToken, BasicFailDescr, AbstractDescr from rpython.jit.backend.llsupport.gc import GcLLDescription, GcLLDescr_boehm,\ @@ -90,6 +90,8 @@ assert nos == [0, 1, 25] elif self.cpu.backend_name.startswith('arm'): assert nos == [0, 1, 47] + elif self.cpu.backend_name.startswith('ppc64'): + assert nos == [0, 1, 33] else: raise Exception("write the data here") assert frame.jf_frame[nos[0]] @@ -155,6 +157,8 @@ self.nursery = lltype.malloc(NTP, 64, flavor='raw') for i in range(64): self.nursery[i] = NOT_INITIALIZED + self.nursery_words = rffi.cast(rffi.CArrayPtr(lltype.Signed), + self.nursery) self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 2, flavor='raw') self.addrs[0] = rffi.cast(lltype.Signed, self.nursery) @@ -263,11 +267,11 @@ # slowpath never called assert gc_ll_descr.calls == [] - def test_malloc_nursery_varsize(self): + def test_malloc_nursery_varsize_nonframe(self): self.cpu = self.getcpu(None) A = lltype.GcArray(lltype.Signed) arraydescr = self.cpu.arraydescrof(A) - arraydescr.tid = 15 + arraydescr.tid = 1515 ops = ''' [i0, i1, i2] p0 = call_malloc_nursery_varsize(0, 8, i0, descr=arraydescr) @@ -283,8 +287,8 @@ assert rffi.cast(lltype.Signed, ref(0)) == nurs_adr + 0 assert rffi.cast(lltype.Signed, ref(1)) == nurs_adr + 2*WORD + 8*1 # check the nursery content and state - assert gc_ll_descr.nursery[0] == chr(15) - assert gc_ll_descr.nursery[2 * WORD + 8] == chr(15) + assert gc_ll_descr.nursery_words[0] == 1515 + assert gc_ll_descr.nursery_words[2 + 8 // WORD] == 1515 assert gc_ll_descr.addrs[0] == nurs_adr + (((4 * WORD + 8*1 + 5*2) + (WORD - 1)) & ~(WORD - 1)) # slowpath never called assert gc_ll_descr.calls == [] @@ -323,11 +327,11 @@ idx = 1 assert len(frame.jf_gcmap) == expected_size if self.cpu.IS_64_BIT: - assert frame.jf_gcmap[idx] == (1<<29) | (1 << 30) + exp_idx = self.cpu.JITFRAME_FIXED_SIZE + 1 # +1 from i0 else: assert frame.jf_gcmap[idx] exp_idx = self.cpu.JITFRAME_FIXED_SIZE - 32 * idx + 1 # +1 from i0 - assert frame.jf_gcmap[idx] == (1 << (exp_idx + 1)) | (1 << exp_idx) + assert frame.jf_gcmap[idx] == (1 << (exp_idx + 1)) | (1 << exp_idx) self.cpu = self.getcpu(check) ops = ''' @@ -609,7 +613,10 @@ cpu = CPU(None, None) cpu.gc_ll_descr = GCDescrShadowstackDirect() wbd = cpu.gc_ll_descr.write_barrier_descr - wbd.jit_wb_if_flag_byteofs = 0 # directly into 'hdr' field + if sys.byteorder == 'little': + wbd.jit_wb_if_flag_byteofs = 0 # directly into 'hdr' field + else: + wbd.jit_wb_if_flag_byteofs = struct.calcsize("l") - 1 S = lltype.GcForwardReference() S.become(lltype.GcStruct('S', ('hdr', lltype.Signed), @@ -636,7 +643,9 @@ frames.append(frame) new_frame = JITFRAME.allocate(frame.jf_frame_info) gcmap = unpack_gcmap(frame) - if self.cpu.IS_64_BIT: + if self.cpu.backend_name.startswith('ppc64'): + assert gcmap == [30, 31, 32] + elif self.cpu.IS_64_BIT: assert gcmap == [28, 29, 30] elif self.cpu.backend_name.startswith('arm'): assert gcmap == [44, 45, 46] @@ -647,6 +656,8 @@ new_frame.jf_frame[item] = rffi.cast(lltype.Signed, s) assert cpu.gc_ll_descr.gcrootmap.stack[0] == rffi.cast(lltype.Signed, frame) cpu.gc_ll_descr.gcrootmap.stack[0] = rffi.cast(lltype.Signed, new_frame) + print '"Collecting" moved the frame from %d to %d' % ( + i, cpu.gc_ll_descr.gcrootmap.stack[0]) frames.append(new_frame) def check2(i): diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -307,7 +307,7 @@ for line in open(str(logfile)): if 'guard_class' in line: guard_class += 1 - # if we get many more guard_classes, it means that we generate + # if we get many more guard_classes (~93), it means that we generate # guards that always fail (the following assert's original purpose # is to catch the following case: each GUARD_CLASS is misgenerated # and always fails with "gcremovetypeptr") diff --git a/rpython/jit/backend/ppc/__init__.py b/rpython/jit/backend/ppc/__init__.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/ppc/__init__.py @@ -0,0 +1,1 @@ +# diff --git a/rpython/jit/backend/ppc/arch.py b/rpython/jit/backend/ppc/arch.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/ppc/arch.py @@ -0,0 +1,82 @@ +# Constants that depend on whether we are on 32-bit or 64-bit + +import sys +from rpython.jit.backend.ppc import register as r + +import sys +if sys.maxint == (2**31 - 1): + assert False, "the ppc backend only supports PPC-64 for now" + WORD = 4 + #DWORD = 2 * WORD + IS_PPC_32 = True + #BACKCHAIN_SIZE = 2 + #FPR_SAVE_AREA = len(NONVOLATILES_FLOAT) * DWORD +else: + WORD = 8 + #DWORD = 2 * WORD + IS_PPC_32 = False + #BACKCHAIN_SIZE = 6 + #FPR_SAVE_AREA = len(NONVOLATILES_FLOAT) * WORD + +IS_PPC_64 = not IS_PPC_32 +MY_COPY_OF_REGS = 0 + +IS_BIG_ENDIAN = sys.byteorder == 'big' +IS_LITTLE_ENDIAN = sys.byteorder == 'little' +assert IS_BIG_ENDIAN ^ IS_LITTLE_ENDIAN + +#FORCE_INDEX = WORD +#GPR_SAVE_AREA = len(NONVOLATILES) * WORD +#FLOAT_INT_CONVERSION = WORD +MAX_REG_PARAMS = 8 +MAX_FREG_PARAMS = 13 +# we need at most 5 instructions to load a constant +# and one instruction to patch the stack pointer +#SIZE_LOAD_IMM_PATCH_SP = 6 + +#FORCE_INDEX_OFS = (len(MANAGED_REGS) + len(MANAGED_FP_REGS)) * WORD + + +# BIG ENDIAN LITTLE ENDIAN +# +# +--------------------+ <- SP + STD_FRAME_SIZE +# | general registers | +# | save area | +# +--------------------+ <- SP + 120 SP + 104 +# | Local vars | +# +--------------------+ <- SP + 112 SP + 96 +# | Parameter save | +# | area (8 args max) | +# +--------------------+ <- SP + 48 SP + 32 +# | TOC (unused) | +# +--------------------+ <- SP + 40 SP + 24 +# | link ed. (unused) | +# +--------------------+ <- SP + 32 absent +# | compiler (unused) | +# +--------------------+ <- SP + 24 absent +# | LR save area | +# +--------------------+ <- SP + 16 SP + 16 +# | CR save (unused) | +# +--------------------+ <- SP + 8 SP + 8 +# | SP back chain | +# +--------------------+ <- SP SP + +# The local variables area contains only a copy of the 2nd argument +# passed to the machine code function, which is the ll_threadlocal_addr. +# The 1st argument, i.e. the GC-managed jitframe, is stored in the +# register r31. + + +LR_BC_OFFSET = 16 +_GAP = 0 if IS_BIG_ENDIAN else 16 +PARAM_SAVE_AREA_OFFSET = 48 - _GAP +LOCAL_VARS_OFFSET = 112 - _GAP +THREADLOCAL_ADDR_OFFSET = LOCAL_VARS_OFFSET +GPR_SAVE_AREA_OFFSET = 120 - _GAP + +REGISTERS_SAVED = [r.r25, r.r26, r.r27, r.r28, r.r29, r.r30, r.r31] +assert REGISTERS_SAVED == [_r for _r in r.NONVOLATILES + if _r in r.MANAGED_REGS or _r == r.r31] + +STD_FRAME_SIZE_IN_BYTES = GPR_SAVE_AREA_OFFSET + len(REGISTERS_SAVED) * WORD +assert STD_FRAME_SIZE_IN_BYTES % 16 == 0 diff --git a/rpython/jit/backend/ppc/callbuilder.py b/rpython/jit/backend/ppc/callbuilder.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/ppc/callbuilder.py @@ -0,0 +1,278 @@ +from rpython.jit.backend.ppc.arch import IS_PPC_64, WORD, PARAM_SAVE_AREA_OFFSET +from rpython.jit.backend.ppc.arch import THREADLOCAL_ADDR_OFFSET +import rpython.jit.backend.ppc.register as r +from rpython.jit.metainterp.history import INT, FLOAT +from rpython.jit.backend.llsupport.callbuilder import AbstractCallBuilder +from rpython.jit.backend.ppc.jump import remap_frame_layout +from rpython.rlib.objectmodel import we_are_translated +from rpython.jit.backend.llsupport import llerrno +from rpython.rtyper.lltypesystem import rffi + + +def follow_jump(addr): + # xxx implement me + return addr + + +class CallBuilder(AbstractCallBuilder): + GPR_ARGS = [r.r3, r.r4, r.r5, r.r6, r.r7, r.r8, r.r9, r.r10] + FPR_ARGS = r.MANAGED_FP_REGS + assert FPR_ARGS == [r.f1, r.f2, r.f3, r.f4, r.f5, r.f6, r.f7, + r.f8, r.f9, r.f10, r.f11, r.f12, r.f13] + RSHADOWPTR = r.RCS1 + RFASTGILPTR = r.RCS2 + RSHADOWOLD = r.RCS3 + + def __init__(self, assembler, fnloc, arglocs, resloc): + AbstractCallBuilder.__init__(self, assembler, fnloc, arglocs, + resloc, restype=INT, ressize=None) + + def prepare_arguments(self): + assert IS_PPC_64 + self.subtracted_to_sp = 0 + + # Prepare arguments. Note that this follows the convention where + # a prototype is in scope, and doesn't take "..." arguments. If + # you were to call a C function with a "..." argument with cffi, + # it would not go there but instead via libffi. If you pretend + # instead that it takes fixed arguments, then it would arrive here + # but the convention is bogus for floating-point arguments. (And, + # to add to the mess, at least CPython's ctypes cannot be used + # to call a "..." function with floating-point arguments. As I + # guess that it's a problem with libffi, it means PyPy inherits + # the same problem.) + arglocs = self.arglocs + num_args = len(arglocs) + + non_float_locs = [] + non_float_regs = [] + float_locs = [] + for i in range(min(num_args, 8)): + if arglocs[i].type != FLOAT: + non_float_locs.append(arglocs[i]) + non_float_regs.append(self.GPR_ARGS[i]) + else: + float_locs.append(arglocs[i]) + # now 'non_float_locs' and 'float_locs' together contain the + # locations of the first 8 arguments + + if num_args > 8: + # We need to make a larger PPC stack frame, as shown on the + # picture in arch.py. It needs to be 48 bytes + 8 * num_args. + # The new SP back chain location should point to the top of + # the whole stack frame, i.e. jumping over both the existing + # fixed-sise part and the new variable-sized part. + base = PARAM_SAVE_AREA_OFFSET + varsize = base + 8 * num_args + varsize = (varsize + 15) & ~15 # align + self.mc.load(r.SCRATCH2.value, r.SP.value, 0) # SP back chain + self.mc.store_update(r.SCRATCH2.value, r.SP.value, -varsize) + self.subtracted_to_sp = varsize + + # In this variable-sized part, only the arguments from the 8th + # one need to be written, starting at SP + 112 + for n in range(8, num_args): + loc = arglocs[n] + if loc.type != FLOAT: + # after the 8th argument, a non-float location is + # always stored in the stack + if loc.is_reg(): + src = loc + else: + src = r.r2 + self.asm.regalloc_mov(loc, src) + self.mc.std(src.value, r.SP.value, base + 8 * n) + else: + # the first 13 floating-point arguments are all passed + # in the registers f1 to f13, independently on their + # index in the complete list of arguments + if len(float_locs) < len(self.FPR_ARGS): + float_locs.append(loc) + else: + if loc.is_fp_reg(): + src = loc + else: + src = r.FP_SCRATCH + self.asm.regalloc_mov(loc, src) + self.mc.stfd(src.value, r.SP.value, base + 8 * n) + + # We must also copy fnloc into FNREG + non_float_locs.append(self.fnloc) + non_float_regs.append(self.mc.RAW_CALL_REG) # r2 or r12 + + if float_locs: + assert len(float_locs) <= len(self.FPR_ARGS) + remap_frame_layout(self.asm, float_locs, + self.FPR_ARGS[:len(float_locs)], + r.FP_SCRATCH) + + remap_frame_layout(self.asm, non_float_locs, non_float_regs, + r.SCRATCH) + + + def push_gcmap(self): + # we push *now* the gcmap, describing the status of GC registers + # after the rearrangements done just before, ignoring the return + # value r3, if necessary + assert not self.is_call_release_gil + noregs = self.asm.cpu.gc_ll_descr.is_shadow_stack() + gcmap = self.asm._regalloc.get_gcmap([r.r3], noregs=noregs) + self.asm.push_gcmap(self.mc, gcmap, store=True) + + def pop_gcmap(self): + ssreg = None + gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap + if gcrootmap: + if gcrootmap.is_shadow_stack and self.is_call_release_gil: + # in this mode, RSHADOWOLD happens to contain the shadowstack + # top at this point, so reuse it instead of loading it again + ssreg = self.RSHADOWOLD + self.asm._reload_frame_if_necessary(self.mc, shadowstack_reg=ssreg) + + def emit_raw_call(self): + self.mc.raw_call() + + def restore_stack_pointer(self): + if self.subtracted_to_sp != 0: + self.mc.addi(r.SP.value, r.SP.value, self.subtracted_to_sp) + + def load_result(self): + assert (self.resloc is None or + self.resloc is r.r3 or + self.resloc is r.f1) + + + def call_releasegil_addr_and_move_real_arguments(self, fastgil): + assert self.is_call_release_gil + RSHADOWPTR = self.RSHADOWPTR + RFASTGILPTR = self.RFASTGILPTR + RSHADOWOLD = self.RSHADOWOLD + # + # Save this thread's shadowstack pointer into r29, for later comparison + gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap + if gcrootmap: + if gcrootmap.is_shadow_stack: + rst = gcrootmap.get_root_stack_top_addr() + self.mc.load_imm(RSHADOWPTR, rst) + self.mc.load(RSHADOWOLD.value, RSHADOWPTR.value, 0) + # + # change 'rpy_fastgil' to 0 (it should be non-zero right now) + self.mc.load_imm(RFASTGILPTR, fastgil) + self.mc.li(r.r0.value, 0) + self.mc.lwsync() + self.mc.std(r.r0.value, RFASTGILPTR.value, 0) + # + if not we_are_translated(): # for testing: we should not access + self.mc.addi(r.SPP.value, r.SPP.value, 1) # r31 any more + + + def move_real_result_and_call_reacqgil_addr(self, fastgil): + from rpython.jit.backend.ppc.codebuilder import OverwritingBuilder + + # try to reacquire the lock. The following registers are still + # valid from before the call: + RSHADOWPTR = self.RSHADOWPTR # r30: &root_stack_top + RFASTGILPTR = self.RFASTGILPTR # r29: &fastgil + RSHADOWOLD = self.RSHADOWOLD # r28: previous val of root_stack_top + + # Equivalent of 'r10 = __sync_lock_test_and_set(&rpy_fastgil, 1);' + self.mc.li(r.r9.value, 1) + retry_label = self.mc.currpos() + self.mc.ldarx(r.r10.value, 0, RFASTGILPTR.value) # load the lock value + self.mc.stdcxx(r.r9.value, 0, RFASTGILPTR.value) # try to claim lock + self.mc.bc(6, 2, retry_label - self.mc.currpos()) # retry if failed + self.mc.isync() + + self.mc.cmpdi(0, r.r10.value, 0) + b1_location = self.mc.currpos() + self.mc.trap() # boehm: patched with a BEQ: jump if r10 is zero + # shadowstack: patched with BNE instead + + if self.asm.cpu.gc_ll_descr.gcrootmap: + # When doing a call_release_gil with shadowstack, there + # is the risk that the 'rpy_fastgil' was free but the + # current shadowstack can be the one of a different + # thread. So here we check if the shadowstack pointer + # is still the same as before we released the GIL (saved + # in RSHADOWOLD), and if not, we fall back to 'reacqgil_addr'. + self.mc.load(r.r9.value, RSHADOWPTR.value, 0) + self.mc.cmpdi(0, r.r9.value, RSHADOWOLD.value) + bne_location = b1_location + b1_location = self.mc.currpos() + self.mc.trap() + + # revert the rpy_fastgil acquired above, so that the + # general 'reacqgil_addr' below can acquire it again... + # (here, r10 is conveniently zero) + self.mc.std(r.r10.value, RFASTGILPTR.value, 0) + + pmc = OverwritingBuilder(self.mc, bne_location, 1) + pmc.bne(self.mc.currpos() - bne_location) + pmc.overwrite() + # + # Yes, we need to call the reacqgil() function. + # save the result we just got + RSAVEDRES = RFASTGILPTR # can reuse this reg here + reg = self.resloc + if reg is not None: + if reg.is_core_reg(): + self.mc.mr(RSAVEDRES.value, reg.value) + elif reg.is_fp_reg(): + self.mc.stfd(reg.value, r.SP.value, + PARAM_SAVE_AREA_OFFSET + 7 * WORD) + self.mc.load_imm(self.mc.RAW_CALL_REG, self.asm.reacqgil_addr) + self.mc.raw_call() + if reg is not None: + if reg.is_core_reg(): + self.mc.mr(reg.value, RSAVEDRES.value) + elif reg.is_fp_reg(): + self.mc.lfd(reg.value, r.SP.value, + PARAM_SAVE_AREA_OFFSET + 7 * WORD) + + # replace b1_location with BEQ(here) + pmc = OverwritingBuilder(self.mc, b1_location, 1) + pmc.beq(self.mc.currpos() - b1_location) + pmc.overwrite() + + if not we_are_translated(): # for testing: now we can access + self.mc.addi(r.SPP.value, r.SPP.value, -1) # r31 again + + + def write_real_errno(self, save_err): + if save_err & rffi.RFFI_READSAVED_ERRNO: + # Just before a call, read '*_errno' and write it into the + # real 'errno'. A lot of registers are free here, notably + # r11 and r0. + if save_err & rffi.RFFI_ALT_ERRNO: + rpy_errno = llerrno.get_alt_errno_offset(self.asm.cpu) + else: + rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu) + p_errno = llerrno.get_p_errno_offset(self.asm.cpu) + self.mc.ld(r.r11.value, r.SP.value, + THREADLOCAL_ADDR_OFFSET + self.subtracted_to_sp) + self.mc.lwz(r.r0.value, r.r11.value, rpy_errno) + self.mc.ld(r.r11.value, r.r11.value, p_errno) + self.mc.stw(r.r0.value, r.r11.value, 0) + elif save_err & rffi.RFFI_ZERO_ERRNO_BEFORE: + # Same, but write zero. + p_errno = llerrno.get_p_errno_offset(self.asm.cpu) + self.mc.ld(r.r11.value, r.SP.value, + THREADLOCAL_ADDR_OFFSET + self.subtracted_to_sp) + self.mc.ld(r.r11.value, r.r11.value, p_errno) + self.mc.li(r.r0.value, 0) + self.mc.stw(r.r0.value, r.r11.value, 0) + + def read_real_errno(self, save_err): + if save_err & rffi.RFFI_SAVE_ERRNO: + # Just after a call, read the real 'errno' and save a copy of + # it inside our thread-local '*_errno'. Registers r4-r10 + # never contain anything after the call. + if save_err & rffi.RFFI_ALT_ERRNO: + rpy_errno = llerrno.get_alt_errno_offset(self.asm.cpu) + else: + rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu) + p_errno = llerrno.get_p_errno_offset(self.asm.cpu) + self.mc.ld(r.r9.value, r.SP.value, THREADLOCAL_ADDR_OFFSET) + self.mc.ld(r.r10.value, r.r9.value, p_errno) + self.mc.lwz(r.r10.value, r.r10.value, 0) + self.mc.stw(r.r10.value, r.r9.value, rpy_errno) diff --git a/rpython/jit/backend/ppc/codebuilder.py b/rpython/jit/backend/ppc/codebuilder.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/ppc/codebuilder.py @@ -0,0 +1,1292 @@ +import os +from rpython.jit.backend.ppc.ppc_form import PPCForm as Form +from rpython.jit.backend.ppc.locations import RegisterLocation +from rpython.jit.backend.ppc.ppc_field import ppc_fields +from rpython.jit.backend.ppc.arch import (IS_PPC_32, WORD, IS_PPC_64, + LR_BC_OFFSET, IS_BIG_ENDIAN, IS_LITTLE_ENDIAN) +import rpython.jit.backend.ppc.register as r +import rpython.jit.backend.ppc.condition as c +from rpython.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin +from rpython.jit.backend.llsupport.assembler import GuardToken +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.jit.metainterp.resoperation import rop +from rpython.tool.udir import udir +from rpython.rlib.objectmodel import we_are_translated + +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.jit.backend.ppc.rassemblermaker import make_rassembler + + +# the following instructions can't accept "r0" as the second argument +# (i.e. the base address): it is recognized as "0" instead, or is +# even invalid (load-with-update, store-with-update). +# +# any load or store instruction +# addi rD, r0, immed +# subi rD, r0, immed +# addis rD, r0, immed +# subis rD, r0, immed + + +A = Form("frD", "frA", "frB", "XO3", "Rc") +A1 = Form("frD", "frB", "XO3", "Rc") +A2 = Form("frD", "frA", "frC", "XO3", "Rc") +A3 = Form("frD", "frA", "frC", "frB", "XO3", "Rc") + +I = Form("LI", "AA", "LK") + +B = Form("BO", "BI", "BD", "AA", "LK") + +SC = Form("AA") # fudge + +DD = Form("rD", "rA", "SIMM") +DDO = Form("rD", "rA", "ds", "XO4") +DS = Form("rA", "rS", "UIMM") + +X = Form("XO1") +XS = Form("rA", "rS", "rB", "XO1", "Rc") +XSO = Form("rS", "rA", "rB", "XO1") +XD = Form("rD", "rA", "rB", "XO1") +XO = Form("rD", "rA", "rB", "OE", "XO2", "Rc") +XO0 = Form("rD", "rA", "OE", "XO2", "Rc") +XDB = Form("frD", "frB", "XO1", "Rc") +XS0 = Form("rA", "rS", "XO1", "Rc") +X0 = Form("rA", "rB", "XO1") +XcAB = Form("crfD", "rA", "rB", "XO1") +XN = Form("rD", "rA", "NB", "XO1") +XL = Form("crbD", "crbA", "crbB", "XO1") +XL1 = Form("crfD", "crfS") +XL2 = Form("crbD", "XO1", "Rc") +XFL = Form("FM", "frB", "XO1", "Rc") +XFX = Form("CRM", "rS", "XO1") +XLL = Form("LL", "XO1") + +MI = Form("rA", "rS", "SH", "MB", "ME", "Rc") +MB = Form("rA", "rS", "rB", "MB", "ME", "Rc") +MDI = Form("rA", "rS", "sh", "mbe", "XO5", "Rc") +MDS = Form("rA", "rS", "rB", "mbe", "XO7", "Rc") + +class BasicPPCAssembler(object): + + def disassemble(cls, inst, labels={}, pc=0): + cache = cls.__dict__.get('idesc cache') + if cache is None: + idescs = cls.get_idescs() + cache = {} + for n, i in idescs: + cache.setdefault(i.specializations[ppc_fields['opcode']], + []).append((n,i)) + setattr(cls, 'idesc cache', cache) + matches = [] + idescs = cache[ppc_fields['opcode'].decode(inst)] + for name, idesc in idescs: + m = idesc.match(inst) + if m > 0: + matches.append((m, idesc, name)) + if matches: + score, idesc, name = max(matches) + return idesc.disassemble(name, inst, labels, pc) + disassemble = classmethod(disassemble) + + # "basic" means no simplified mnemonics + + # I form + b = I(18, AA=0, LK=0) + ba = I(18, AA=1, LK=0) + bl = I(18, AA=0, LK=1) + bla = I(18, AA=1, LK=1) + + # B form + bc = B(16, AA=0, LK=0) + bcl = B(16, AA=0, LK=1) + bca = B(16, AA=1, LK=0) + bcla = B(16, AA=1, LK=1) + + # SC form + sc = SC(17, AA=1) # it's not really the aa field... + + # D form + addi = DD(14) + addic = DD(12) + addicx = DD(13) + addis = DD(15) + + andix = DS(28) + andisx = DS(29) + + cmpi = Form("crfD", "L", "rA", "SIMM")(11) + cmpi.default(L=0).default(crfD=0) + cmpli = Form("crfD", "L", "rA", "UIMM")(10) + cmpli.default(L=0).default(crfD=0) + + lbz = DD(34) + lbzu = DD(35) + ld = DDO(58, XO4=0) + ldu = DDO(58, XO4=1) + lfd = DD(50) + lfdu = DD(51) + lfs = DD(48) + lfsu = DD(49) + lha = DD(42) + lhau = DD(43) + lhz = DD(40) + lhzu = DD(41) + lmw = DD(46) + lwa = DDO(58, XO4=2) + lwz = DD(32) + lwzu = DD(33) + + mulli = DD(7) + ori = DS(24) + oris = DS(25) + + stb = DD(38) + stbu = DD(39) + std = DDO(62, XO4=0) + stdu = DDO(62, XO4=1) + stfd = DD(54) + stfdu = DD(55) + stfs = DD(52) + stfsu = DD(53) + sth = DD(44) + sthu = DD(45) + stmw = DD(47) + stw = DD(36) + stwu = DD(37) + + subfic = DD(8) + tdi = Form("TO", "rA", "SIMM")(2) + twi = Form("TO", "rA", "SIMM")(3) + xori = DS(26) + xoris = DS(27) + + # X form + + and_ = XS(31, XO1=28, Rc=0) + and_x = XS(31, XO1=28, Rc=1) + + andc_ = XS(31, XO1=60, Rc=0) + andc_x = XS(31, XO1=60, Rc=1) + + # is the L bit for 64 bit compares? hmm + cmp = Form("crfD", "L", "rA", "rB", "XO1")(31, XO1=0) + cmp.default(L=0).default(crfD=0) + cmpl = Form("crfD", "L", "rA", "rB", "XO1")(31, XO1=32) + cmpl.default(L=0).default(crfD=0) + + cntlzd = XS0(31, XO1=58, Rc=0) + cntlzdx = XS0(31, XO1=58, Rc=1) + cntlzw = XS0(31, XO1=26, Rc=0) + cntlzwx = XS0(31, XO1=26, Rc=1) + + dcba = X0(31, XO1=758) + dcbf = X0(31, XO1=86) + dcbi = X0(31, XO1=470) + dcbst = X0(31, XO1=54) + dcbt = X0(31, XO1=278) + dcbtst = X0(31, XO1=246) + dcbz = X0(31, XO1=1014) + + eciwx = XD(31, XO1=310) + ecowx = XS(31, XO1=438, Rc=0) + + eieio = X(31, XO1=854) + + eqv = XS(31, XO1=284, Rc=0) + eqvx = XS(31, XO1=284, Rc=1) + + extsb = XS0(31, XO1=954, Rc=0) + extsbx = XS0(31, XO1=954, Rc=1) + + extsh = XS0(31, XO1=922, Rc=0) + extshx = XS0(31, XO1=922, Rc=1) + + extsw = XS0(31, XO1=986, Rc=0) + extswx = XS0(31, XO1=986, Rc=1) + + fabs = XDB(63, XO1=264, Rc=0) + fabsx = XDB(63, XO1=264, Rc=1) + + fcmpo = XcAB(63, XO1=32) + fcmpu = XcAB(63, XO1=0) + + fcfid = XDB(63, XO1=846, Rc=0) + fcfidx = XDB(63, XO1=846, Rc=1) + + fctid = XDB(63, XO1=814, Rc=0) + fctidx = XDB(63, XO1=814, Rc=1) + + fctidz = XDB(63, XO1=815, Rc=0) + fctidzx = XDB(63, XO1=815, Rc=1) + + fctiw = XDB(63, XO1=14, Rc=0) + fctiwx = XDB(63, XO1=14, Rc=1) + + fctiwz = XDB(63, XO1=15, Rc=0) + fctiwzx = XDB(63, XO1=15, Rc=1) + + fmr = XDB(63, XO1=72, Rc=0) + fmrx = XDB(63, XO1=72, Rc=1) + + fnabs = XDB(63, XO1=136, Rc=0) + fnabsx = XDB(63, XO1=136, Rc=1) + + fneg = XDB(63, XO1=40, Rc=0) + fnegx = XDB(63, XO1=40, Rc=1) + + frsp = XDB(63, XO1=12, Rc=0) + frspx = XDB(63, XO1=12, Rc=1) + + fsqrt = XDB(63, XO1=22, Rc=0) + + mffgpr = XS(31, XO1=607, Rc=0) + mftgpr = XS(31, XO1=735, Rc=0) + + icbi = X0(31, XO1=982) + + lbzux = XD(31, XO1=119) + lbzx = XD(31, XO1=87) + ldarx = XD(31, XO1=84) + ldux = XD(31, XO1=53) + ldx = XD(31, XO1=21) + lfdux = XD(31, XO1=631) + lfdx = XD(31, XO1=599) + lfsux = XD(31, XO1=567) + lfsx = XD(31, XO1=535) + lhaux = XD(31, XO1=375) + lhax = XD(31, XO1=343) + lhbrx = XD(31, XO1=790) + lhzux = XD(31, XO1=311) + lhzx = XD(31, XO1=279) + lswi = XD(31, XO1=597) + lswx = XD(31, XO1=533) + lwarx = XD(31, XO1=20) + lwaux = XD(31, XO1=373) + lwax = XD(31, XO1=341) + lwbrx = XD(31, XO1=534) + lwzux = XD(31, XO1=55) + lwzx = XD(31, XO1=23) + + mcrfs = Form("crfD", "crfS", "XO1")(63, XO1=64) + mcrxr = Form("crfD", "XO1")(31, XO1=512) + mfcr = Form("rD", "XO1")(31, XO1=19) + mffs = Form("frD", "XO1", "Rc")(63, XO1=583, Rc=0) + mffsx = Form("frD", "XO1", "Rc")(63, XO1=583, Rc=1) + mfmsr = Form("rD", "XO1")(31, XO1=83) + mfsr = Form("rD", "SR", "XO1")(31, XO1=595) + mfsrin = XDB(31, XO1=659, Rc=0) + + add = XO(31, XO2=266, OE=0, Rc=0) + addx = XO(31, XO2=266, OE=0, Rc=1) + addo = XO(31, XO2=266, OE=1, Rc=0) + addox = XO(31, XO2=266, OE=1, Rc=1) + + addc = XO(31, XO2=10, OE=0, Rc=0) + addcx = XO(31, XO2=10, OE=0, Rc=1) + addco = XO(31, XO2=10, OE=1, Rc=0) + addcox = XO(31, XO2=10, OE=1, Rc=1) + + adde = XO(31, XO2=138, OE=0, Rc=0) + addex = XO(31, XO2=138, OE=0, Rc=1) + addeo = XO(31, XO2=138, OE=1, Rc=0) + addeox = XO(31, XO2=138, OE=1, Rc=1) + + addme = XO(31, rB=0, XO2=234, OE=0, Rc=0) + addmex = XO(31, rB=0, XO2=234, OE=0, Rc=1) + addmeo = XO(31, rB=0, XO2=234, OE=1, Rc=0) + addmeox = XO(31, rB=0, XO2=234, OE=1, Rc=1) + + addze = XO(31, rB=0, XO2=202, OE=0, Rc=0) + addzex = XO(31, rB=0, XO2=202, OE=0, Rc=1) + addzeo = XO(31, rB=0, XO2=202, OE=1, Rc=0) + addzeox = XO(31, rB=0, XO2=202, OE=1, Rc=1) + + bcctr = Form("BO", "BI", "XO1", "LK")(19, XO1=528, LK=0) + bcctrl = Form("BO", "BI", "XO1", "LK")(19, XO1=528, LK=1) + + bclr = Form("BO", "BI", "XO1", "LK")(19, XO1=16, LK=0) + bclrl = Form("BO", "BI", "XO1", "LK")(19, XO1=16, LK=1) + + crand = XL(19, XO1=257) + crandc = XL(19, XO1=129) + creqv = XL(19, XO1=289) + crnand = XL(19, XO1=225) + crnor = XL(19, XO1=33) + cror = XL(19, XO1=449) + crorc = XL(19, XO1=417) + crxor = XL(19, XO1=193) + + divd = XO(31, XO2=489, OE=0, Rc=0) + divdx = XO(31, XO2=489, OE=0, Rc=1) + divdo = XO(31, XO2=489, OE=1, Rc=0) + divdox = XO(31, XO2=489, OE=1, Rc=1) + + divdu = XO(31, XO2=457, OE=0, Rc=0) + divdux = XO(31, XO2=457, OE=0, Rc=1) + divduo = XO(31, XO2=457, OE=1, Rc=0) + divduox = XO(31, XO2=457, OE=1, Rc=1) + + divw = XO(31, XO2=491, OE=0, Rc=0) + divwx = XO(31, XO2=491, OE=0, Rc=1) + divwo = XO(31, XO2=491, OE=1, Rc=0) + divwox = XO(31, XO2=491, OE=1, Rc=1) + + divwu = XO(31, XO2=459, OE=0, Rc=0) + divwux = XO(31, XO2=459, OE=0, Rc=1) + divwuo = XO(31, XO2=459, OE=1, Rc=0) + divwuox = XO(31, XO2=459, OE=1, Rc=1) + + fadd = A(63, XO3=21, Rc=0) + faddx = A(63, XO3=21, Rc=1) + fadds = A(59, XO3=21, Rc=0) + faddsx = A(59, XO3=21, Rc=1) + + fdiv = A(63, XO3=18, Rc=0) + fdivx = A(63, XO3=18, Rc=1) + fdivs = A(59, XO3=18, Rc=0) + fdivsx = A(59, XO3=18, Rc=1) + + fmadd = A3(63, XO3=19, Rc=0) + fmaddx = A3(63, XO3=19, Rc=1) + fmadds = A3(59, XO3=19, Rc=0) + fmaddsx = A3(59, XO3=19, Rc=1) + + fmsub = A3(63, XO3=28, Rc=0) + fmsubx = A3(63, XO3=28, Rc=1) + fmsubs = A3(59, XO3=28, Rc=0) + fmsubsx = A3(59, XO3=28, Rc=1) + + fmul = A2(63, XO3=25, Rc=0) + fmulx = A2(63, XO3=25, Rc=1) + fmuls = A2(59, XO3=25, Rc=0) + fmulsx = A2(59, XO3=25, Rc=1) + + fnmadd = A3(63, XO3=31, Rc=0) + fnmaddx = A3(63, XO3=31, Rc=1) + fnmadds = A3(59, XO3=31, Rc=0) + fnmaddsx = A3(59, XO3=31, Rc=1) + + fnmsub = A3(63, XO3=30, Rc=0) + fnmsubx = A3(63, XO3=30, Rc=1) + fnmsubs = A3(59, XO3=30, Rc=0) + fnmsubsx = A3(59, XO3=30, Rc=1) + + fres = A1(59, XO3=24, Rc=0) + fresx = A1(59, XO3=24, Rc=1) + + frsp = A1(63, XO3=12, Rc=0) + frspx = A1(63, XO3=12, Rc=1) + + frsqrte = A1(63, XO3=26, Rc=0) + frsqrtex = A1(63, XO3=26, Rc=1) + + fsel = A3(63, XO3=23, Rc=0) + fselx = A3(63, XO3=23, Rc=1) + + frsqrt = A1(63, XO3=22, Rc=0) + frsqrtx = A1(63, XO3=22, Rc=1) + frsqrts = A1(59, XO3=22, Rc=0) + frsqrtsx = A1(59, XO3=22, Rc=1) + + fsub = A(63, XO3=20, Rc=0) + fsubx = A(63, XO3=20, Rc=1) + fsubs = A(59, XO3=20, Rc=0) + fsubsx = A(59, XO3=20, Rc=1) + + isync = X(19, XO1=150) + + mcrf = XL1(19) + + mfspr = Form("rD", "spr", "XO1")(31, XO1=339) + mftb = Form("rD", "spr", "XO1")(31, XO1=371) + + mtcrf = XFX(31, XO1=144) + + mtfsb0 = XL2(63, XO1=70, Rc=0) + mtfsb0x = XL2(63, XO1=70, Rc=1) + mtfsb1 = XL2(63, XO1=38, Rc=0) + mtfsb1x = XL2(63, XO1=38, Rc=1) + + mtfsf = XFL(63, XO1=711, Rc=0) + mtfsfx = XFL(63, XO1=711, Rc=1) + + mtfsfi = Form("crfD", "IMM", "XO1", "Rc")(63, XO1=134, Rc=0) + mtfsfix = Form("crfD", "IMM", "XO1", "Rc")(63, XO1=134, Rc=1) + + mtmsr = Form("rS", "XO1")(31, XO1=146) + + mtspr = Form("rS", "spr", "XO1")(31, XO1=467) + + mtsr = Form("rS", "SR", "XO1")(31, XO1=210) + mtsrin = Form("rS", "rB", "XO1")(31, XO1=242) + + mulhd = XO(31, OE=0, XO2=73, Rc=0) + mulhdx = XO(31, OE=0, XO2=73, Rc=1) + + mulhdu = XO(31, OE=0, XO2=9, Rc=0) + mulhdux = XO(31, OE=0, XO2=9, Rc=1) + + mulld = XO(31, OE=0, XO2=233, Rc=0) + mulldx = XO(31, OE=0, XO2=233, Rc=1) + mulldo = XO(31, OE=1, XO2=233, Rc=0) + mulldox = XO(31, OE=1, XO2=233, Rc=1) + + mulhw = XO(31, OE=0, XO2=75, Rc=0) + mulhwx = XO(31, OE=0, XO2=75, Rc=1) + + mulhwu = XO(31, OE=0, XO2=11, Rc=0) + mulhwux = XO(31, OE=0, XO2=11, Rc=1) + + mullw = XO(31, OE=0, XO2=235, Rc=0) + mullwx = XO(31, OE=0, XO2=235, Rc=1) + mullwo = XO(31, OE=1, XO2=235, Rc=0) + mullwox = XO(31, OE=1, XO2=235, Rc=1) + + nand = XS(31, XO1=476, Rc=0) + nandx = XS(31, XO1=476, Rc=1) + + neg = XO0(31, OE=0, XO2=104, Rc=0) + negx = XO0(31, OE=0, XO2=104, Rc=1) + nego = XO0(31, OE=1, XO2=104, Rc=0) + negox = XO0(31, OE=1, XO2=104, Rc=1) + + nor = XS(31, XO1=124, Rc=0) + norx = XS(31, XO1=124, Rc=1) + + or_ = XS(31, XO1=444, Rc=0) + or_x = XS(31, XO1=444, Rc=1) + + orc = XS(31, XO1=412, Rc=0) + orcx = XS(31, XO1=412, Rc=1) + + rfi = X(19, XO1=50) + + rfid = X(19, XO1=18) + + rldcl = MDS(30, XO7=8, Rc=0) + rldclx = MDS(30, XO7=8, Rc=1) + rldcr = MDS(30, XO7=9, Rc=0) + rldcrx = MDS(30, XO7=9, Rc=1) + + rldic = MDI(30, XO5=2, Rc=0) + rldicx = MDI(30, XO5=2, Rc=1) + rldicl = MDI(30, XO5=0, Rc=0) + rldiclx = MDI(30, XO5=0, Rc=1) + rldicr = MDI(30, XO5=1, Rc=0) + rldicrx = MDI(30, XO5=1, Rc=1) + rldimi = MDI(30, XO5=3, Rc=0) + rldimix = MDI(30, XO5=3, Rc=1) + + rlwimi = MI(20, Rc=0) + rlwimix = MI(20, Rc=1) + + rlwinm = MI(21, Rc=0) + rlwinmx = MI(21, Rc=1) + + rlwnm = MB(23, Rc=0) + rlwnmx = MB(23, Rc=1) + + sld = XS(31, XO1=27, Rc=0) + sldx = XS(31, XO1=27, Rc=1) + + slw = XS(31, XO1=24, Rc=0) + slwx = XS(31, XO1=24, Rc=1) + + srad = XS(31, XO1=794, Rc=0) + sradx = XS(31, XO1=794, Rc=1) + + sradi = Form("rA", "rS", "SH", "XO6", "sh", "Rc")(31, XO6=413, Rc=0) + sradix = Form("rA", "rS", "SH", "XO6", "sh", "Rc")(31, XO6=413, Rc=1) + + sraw = XS(31, XO1=792, Rc=0) + srawx = XS(31, XO1=792, Rc=1) + + srawi = Form("rA", "rS", "SH", "XO1", "Rc")(31, XO1=824, Rc=0) + srawix = Form("rA", "rS", "SH", "XO1", "Rc")(31, XO1=824, Rc=1) + + srd = XS(31, XO1=539, Rc=0) + srdx = XS(31, XO1=539, Rc=1) + + srw = XS(31, XO1=536, Rc=0) + srwx = XS(31, XO1=536, Rc=1) + + stbux = XSO(31, XO1=247) + stbx = XSO(31, XO1=215) + stdcxx = Form("rS", "rA", "rB", "XO1", "Rc")(31, XO1=214, Rc=1) + stdux = XSO(31, XO1=181) + stdx = XSO(31, XO1=149) + stfdux = XSO(31, XO1=759) + stfdx = XSO(31, XO1=727) + stfiwx = XSO(31, XO1=983) + stfsux = XSO(31, XO1=695) + stfsx = XSO(31, XO1=663) + sthbrx = XSO(31, XO1=918) + sthux = XSO(31, XO1=439) + sthx = XSO(31, XO1=407) + stswi = Form("rS", "rA", "NB", "XO1")(31, XO1=725) + stswx = XSO(31, XO1=661) + stwbrx = XSO(31, XO1=662) + stwcxx = Form("rS", "rA", "rB", "XO1", "Rc")(31, XO1=150, Rc=1) + stwux = XSO(31, XO1=183) + stwx = XSO(31, XO1=151) + + subf = XO(31, XO2=40, OE=0, Rc=0) + subfx = XO(31, XO2=40, OE=0, Rc=1) + subfo = XO(31, XO2=40, OE=1, Rc=0) + subfox = XO(31, XO2=40, OE=1, Rc=1) + + subfc = XO(31, XO2=8, OE=0, Rc=0) + subfcx = XO(31, XO2=8, OE=0, Rc=1) + subfco = XO(31, XO2=8, OE=1, Rc=0) + subfcox = XO(31, XO2=8, OE=1, Rc=1) + + subfe = XO(31, XO2=136, OE=0, Rc=0) + subfex = XO(31, XO2=136, OE=0, Rc=1) + subfeo = XO(31, XO2=136, OE=1, Rc=0) + subfeox = XO(31, XO2=136, OE=1, Rc=1) + + subfme = XO0(31, OE=0, XO2=232, Rc=0) + subfmex = XO0(31, OE=0, XO2=232, Rc=1) + subfmeo = XO0(31, OE=1, XO2=232, Rc=0) + subfmeox= XO0(31, OE=1, XO2=232, Rc=1) + + subfze = XO0(31, OE=0, XO2=200, Rc=0) + subfzex = XO0(31, OE=0, XO2=200, Rc=1) + subfzeo = XO0(31, OE=1, XO2=200, Rc=0) + subfzeox= XO0(31, OE=1, XO2=200, Rc=1) + + sync = XLL(31, LL=0, XO1=598) + lwsync = XLL(31, LL=1, XO1=598) + + tlbia = X(31, XO1=370) + tlbie = Form("rB", "XO1")(31, XO1=306) + tlbsync = X(31, XO1=566) + + td = Form("TO", "rA", "rB", "XO1")(31, XO1=68) + tw = Form("TO", "rA", "rB", "XO1")(31, XO1=4) + + xor = XS(31, XO1=316, Rc=0) + xorx = XS(31, XO1=316, Rc=1) + +class PPCAssembler(BasicPPCAssembler): + BA = BasicPPCAssembler + + # awkward mnemonics: + # mftb + # most of the branch mnemonics... + + # F.2 Simplified Mnemonics for Subtract Instructions + + def subi(self, rD, rA, value): + self.addi(rD, rA, -value) + def subis(self, rD, rA, value): + self.addis(rD, rA, -value) + def subic(self, rD, rA, value): + self.addic(rD, rA, -value) + def subicx(self, rD, rA, value): + self.addicx(rD, rA, -value) + + def sub(self, rD, rA, rB): + self.subf(rD, rB, rA) + def subc(self, rD, rA, rB): + self.subfc(rD, rB, rA) + def subx(self, rD, rA, rB): + self.subfx(rD, rB, rA) + def subcx(self, rD, rA, rB): + self.subfcx(rD, rB, rA) + def subo(self, rD, rA, rB): + self.subfo(rD, rB, rA) + def subco(self, rD, rA, rB): + self.subfco(rD, rB, rA) + def subox(self, rD, rA, rB): + self.subfox(rD, rB, rA) + def subcox(self, rD, rA, rB): + self.subfcox(rD, rB, rA) + + # F.3 Simplified Mnemonics for Compare Instructions + + cmpdi = BA.cmpi(L=1) + cmpwi = BA.cmpi(L=0) + cmpldi = BA.cmpli(L=1) + cmplwi = BA.cmpli(L=0) + cmpd = BA.cmp(L=1) + cmpw = BA.cmp(L=0) + cmpld = BA.cmpl(L=1) + cmplw = BA.cmpl(L=0) + + # F.4 Simplified Mnemonics for Rotate and Shift Instructions + + def extlwi(self, rA, rS, n, b): + self.rlwinm(rA, rS, b, 0, n-1) + + def extrwi(self, rA, rS, n, b): + self.rlwinm(rA, rS, b+n, 32-n, 31) + + def inslwi(self, rA, rS, n, b): + self.rwlimi(rA, rS, 32-b, b, b + n -1) + + def insrwi(self, rA, rS, n, b): + self.rwlimi(rA, rS, 32-(b+n), b, b + n -1) + + def rotlwi(self, rA, rS, n): + self.rlwinm(rA, rS, n, 0, 31) + + def rotrwi(self, rA, rS, n): + self.rlwinm(rA, rS, 32-n, 0, 31) + + def rotlw(self, rA, rS, rB): + self.rlwnm(rA, rS, rB, 0, 31) + + def slwi(self, rA, rS, n): + self.rlwinm(rA, rS, n, 0, 31-n) + + def srwi(self, rA, rS, n): + self.rlwinm(rA, rS, 32-n, n, 31) + + def sldi(self, rA, rS, n): + self.rldicr(rA, rS, n, 63-n) + + def srdi(self, rA, rS, n): + self.rldicl(rA, rS, 64-n, n) + + # F.5 Simplified Mnemonics for Branch Instructions + + # there's a lot of these! + bt = BA.bc(BO=12) + bf = BA.bc(BO=4) + bdnz = BA.bc(BO=16, BI=0) + bdnzt = BA.bc(BO=8) + bdnzf = BA.bc(BO=0) + bdz = BA.bc(BO=18, BI=0) + bdzt = BA.bc(BO=10) + bdzf = BA.bc(BO=2) + + bta = BA.bca(BO=12) + bfa = BA.bca(BO=4) + bdnza = BA.bca(BO=16, BI=0) + bdnzta = BA.bca(BO=8) + bdnzfa = BA.bca(BO=0) + bdza = BA.bca(BO=18, BI=0) + bdzta = BA.bca(BO=10) + bdzfa = BA.bca(BO=2) + + btl = BA.bcl(BO=12) + bfl = BA.bcl(BO=4) + bdnzl = BA.bcl(BO=16, BI=0) + bdnztl = BA.bcl(BO=8) + bdnzfl = BA.bcl(BO=0) + bdzl = BA.bcl(BO=18, BI=0) + bdztl = BA.bcl(BO=10) + bdzfl = BA.bcl(BO=2) + + btla = BA.bcla(BO=12) + bfla = BA.bcla(BO=4) + bdnzla = BA.bcla(BO=16, BI=0) + bdnztla = BA.bcla(BO=8) + bdnzfla = BA.bcla(BO=0) + bdzla = BA.bcla(BO=18, BI=0) + bdztla = BA.bcla(BO=10) + bdzfla = BA.bcla(BO=2) + + blr = BA.bclr(BO=20, BI=0) + btlr = BA.bclr(BO=12) + bflr = BA.bclr(BO=4) + bdnzlr = BA.bclr(BO=16, BI=0) + bdnztlr = BA.bclr(BO=8) + bdnzflr = BA.bclr(BO=0) + bdzlr = BA.bclr(BO=18, BI=0) + bdztlr = BA.bclr(BO=10) + bdzflr = BA.bclr(BO=2) + + bctr = BA.bcctr(BO=20, BI=0) + btctr = BA.bcctr(BO=12) + bfctr = BA.bcctr(BO=4) + + blrl = BA.bclrl(BO=20, BI=0) + btlrl = BA.bclrl(BO=12) + bflrl = BA.bclrl(BO=4) + bdnzlrl = BA.bclrl(BO=16, BI=0) + bdnztlrl = BA.bclrl(BO=8) + bdnzflrl = BA.bclrl(BO=0) + bdzlrl = BA.bclrl(BO=18, BI=0) + bdztlrl = BA.bclrl(BO=10) + bdzflrl = BA.bclrl(BO=2) + + bctrl = BA.bcctrl(BO=20, BI=0) + btctrl = BA.bcctrl(BO=12) + bfctrl = BA.bcctrl(BO=4) + + # these should/could take a[n optional] crf argument, but it's a + # bit hard to see how to arrange that. + + blt = BA.bc(BO=12, BI=0) + ble = BA.bc(BO=4, BI=1) + beq = BA.bc(BO=12, BI=2) + bge = BA.bc(BO=4, BI=0) + bgt = BA.bc(BO=12, BI=1) + bnl = BA.bc(BO=4, BI=0) + bne = BA.bc(BO=4, BI=2) + bng = BA.bc(BO=4, BI=1) + bso = BA.bc(BO=12, BI=3) + bns = BA.bc(BO=4, BI=3) + bun = BA.bc(BO=12, BI=3) + bnu = BA.bc(BO=4, BI=3) + + blta = BA.bca(BO=12, BI=0) + blea = BA.bca(BO=4, BI=1) + beqa = BA.bca(BO=12, BI=2) + bgea = BA.bca(BO=4, BI=0) + bgta = BA.bca(BO=12, BI=1) + bnla = BA.bca(BO=4, BI=0) + bnea = BA.bca(BO=4, BI=2) + bnga = BA.bca(BO=4, BI=1) + bsoa = BA.bca(BO=12, BI=3) + bnsa = BA.bca(BO=4, BI=3) + buna = BA.bca(BO=12, BI=3) + bnua = BA.bca(BO=4, BI=3) + + bltl = BA.bcl(BO=12, BI=0) + blel = BA.bcl(BO=4, BI=1) + beql = BA.bcl(BO=12, BI=2) + bgel = BA.bcl(BO=4, BI=0) + bgtl = BA.bcl(BO=12, BI=1) + bnll = BA.bcl(BO=4, BI=0) + bnel = BA.bcl(BO=4, BI=2) + bngl = BA.bcl(BO=4, BI=1) + bsol = BA.bcl(BO=12, BI=3) + bnsl = BA.bcl(BO=4, BI=3) + bunl = BA.bcl(BO=12, BI=3) + bnul = BA.bcl(BO=4, BI=3) + + bltla = BA.bcla(BO=12, BI=0) + blela = BA.bcla(BO=4, BI=1) + beqla = BA.bcla(BO=12, BI=2) + bgela = BA.bcla(BO=4, BI=0) + bgtla = BA.bcla(BO=12, BI=1) + bnlla = BA.bcla(BO=4, BI=0) + bnela = BA.bcla(BO=4, BI=2) + bngla = BA.bcla(BO=4, BI=1) + bsola = BA.bcla(BO=12, BI=3) + bnsla = BA.bcla(BO=4, BI=3) + bunla = BA.bcla(BO=12, BI=3) + bnula = BA.bcla(BO=4, BI=3) + + bltlr = BA.bclr(BO=12, BI=0) + blelr = BA.bclr(BO=4, BI=1) + beqlr = BA.bclr(BO=12, BI=2) + bgelr = BA.bclr(BO=4, BI=0) + bgtlr = BA.bclr(BO=12, BI=1) + bnllr = BA.bclr(BO=4, BI=0) + bnelr = BA.bclr(BO=4, BI=2) + bnglr = BA.bclr(BO=4, BI=1) + bsolr = BA.bclr(BO=12, BI=3) + bnslr = BA.bclr(BO=4, BI=3) + bunlr = BA.bclr(BO=12, BI=3) + bnulr = BA.bclr(BO=4, BI=3) + + bltctr = BA.bcctr(BO=12, BI=0) + blectr = BA.bcctr(BO=4, BI=1) + beqctr = BA.bcctr(BO=12, BI=2) + bgectr = BA.bcctr(BO=4, BI=0) + bgtctr = BA.bcctr(BO=12, BI=1) + bnlctr = BA.bcctr(BO=4, BI=0) + bnectr = BA.bcctr(BO=4, BI=2) + bngctr = BA.bcctr(BO=4, BI=1) + bsoctr = BA.bcctr(BO=12, BI=3) + bnsctr = BA.bcctr(BO=4, BI=3) + bunctr = BA.bcctr(BO=12, BI=3) + bnuctr = BA.bcctr(BO=4, BI=3) + + bltlrl = BA.bclrl(BO=12, BI=0) + blelrl = BA.bclrl(BO=4, BI=1) + beqlrl = BA.bclrl(BO=12, BI=2) + bgelrl = BA.bclrl(BO=4, BI=0) + bgtlrl = BA.bclrl(BO=12, BI=1) + bnllrl = BA.bclrl(BO=4, BI=0) + bnelrl = BA.bclrl(BO=4, BI=2) + bnglrl = BA.bclrl(BO=4, BI=1) + bsolrl = BA.bclrl(BO=12, BI=3) + bnslrl = BA.bclrl(BO=4, BI=3) + bunlrl = BA.bclrl(BO=12, BI=3) + bnulrl = BA.bclrl(BO=4, BI=3) + + bltctrl = BA.bcctrl(BO=12, BI=0) + blectrl = BA.bcctrl(BO=4, BI=1) + beqctrl = BA.bcctrl(BO=12, BI=2) + bgectrl = BA.bcctrl(BO=4, BI=0) + bgtctrl = BA.bcctrl(BO=12, BI=1) + bnlctrl = BA.bcctrl(BO=4, BI=0) + bnectrl = BA.bcctrl(BO=4, BI=2) + bngctrl = BA.bcctrl(BO=4, BI=1) + bsoctrl = BA.bcctrl(BO=12, BI=3) + bnsctrl = BA.bcctrl(BO=4, BI=3) + bunctrl = BA.bcctrl(BO=12, BI=3) + bnuctrl = BA.bcctrl(BO=4, BI=3) + + # whew! and we haven't even begun the predicted versions... + + # F.6 Simplified Mnemonics for Condition Register + # Logical Instructions + + crset = BA.creqv(crbA="crbD", crbB="crbD") + crclr = BA.crxor(crbA="crbD", crbB="crbD") + crmove = BA.cror(crbA="crbB") + crnot = BA.crnor(crbA="crbB") + + # F.7 Simplified Mnemonics for Trap Instructions + + trap = BA.tw(TO=31, rA=0, rB=0) + twlt = BA.tw(TO=16) + twle = BA.tw(TO=20) + tweq = BA.tw(TO=4) + twge = BA.tw(TO=12) + twgt = BA.tw(TO=8) + twnl = BA.tw(TO=12) + twng = BA.tw(TO=24) + twllt = BA.tw(TO=2) + twlle = BA.tw(TO=6) + twlge = BA.tw(TO=5) + twlgt = BA.tw(TO=1) + twlnl = BA.tw(TO=5) + twlng = BA.tw(TO=6) + + twlti = BA.twi(TO=16) + twlei = BA.twi(TO=20) + tweqi = BA.twi(TO=4) + twgei = BA.twi(TO=12) + twgti = BA.twi(TO=8) + twnli = BA.twi(TO=12) + twnei = BA.twi(TO=24) + twngi = BA.twi(TO=20) + twllti = BA.twi(TO=2) + twllei = BA.twi(TO=6) + twlgei = BA.twi(TO=5) + twlgti = BA.twi(TO=1) + twlnli = BA.twi(TO=5) + twlngi = BA.twi(TO=6) + + # F.8 Simplified Mnemonics for Special-Purpose + # Registers + + mfctr = BA.mfspr(spr=9) + mflr = BA.mfspr(spr=8) + mftbl = BA.mftb(spr=268) + mftbu = BA.mftb(spr=269) + mfxer = BA.mfspr(spr=1) + + mtctr = BA.mtspr(spr=9) + mtlr = BA.mtspr(spr=8) + mtxer = BA.mtspr(spr=1) + + # F.9 Recommended Simplified Mnemonics + + nop = BA.ori(rS=0, rA=0, UIMM=0) + + li = BA.addi(rA=0) + lis = BA.addis(rA=0) + + mr = BA.or_(rB="rS") + mrx = BA.or_x(rB="rS") + + not_ = BA.nor(rB="rS") + not_x = BA.norx(rB="rS") + + mtcr = BA.mtcrf(CRM=0xFF) + +PPCAssembler = make_rassembler(PPCAssembler) + +def hi(w): + return w >> 16 + +def ha(w): + if (w >> 15) & 1: + return (w >> 16) + 1 + else: + return w >> 16 + +def lo(w): + return w & 0x0000FFFF + +def la(w): + v = w & 0x0000FFFF + if v & 0x8000: + return -((v ^ 0xFFFF) + 1) # "sign extend" to 32 bits + return v + +def highest(w): + return w >> 48 + +def higher(w): + return (w >> 32) & 0x0000FFFF + +def high(w): + return (w >> 16) & 0x0000FFFF + +_eci = ExternalCompilationInfo(post_include_bits=[ + '#define rpython_flush_icache() asm("isync":::"memory")\n' + ]) +flush_icache = rffi.llexternal( + "rpython_flush_icache", + [], + lltype.Void, + compilation_info=_eci, + _nowrapper=True, + sandboxsafe=True) + + +class PPCGuardToken(GuardToken): + def __init__(self, cpu, gcmap, descr, failargs, faillocs, + guard_opnum, frame_depth, fcond=c.cond_none): + GuardToken.__init__(self, cpu, gcmap, descr, failargs, faillocs, + guard_opnum, frame_depth) + self.fcond = fcond + + +class OverwritingBuilder(PPCAssembler): + def __init__(self, mc, start, num_insts=0): + PPCAssembler.__init__(self) + self.mc = mc + self.index = start + + def currpos(self): + assert 0, "not implemented" + + def write32(self, word): + index = self.index + if IS_BIG_ENDIAN: + self.mc.overwrite(index, chr((word >> 24) & 0xff)) + self.mc.overwrite(index + 1, chr((word >> 16) & 0xff)) + self.mc.overwrite(index + 2, chr((word >> 8) & 0xff)) + self.mc.overwrite(index + 3, chr(word & 0xff)) + elif IS_LITTLE_ENDIAN: + self.mc.overwrite(index , chr(word & 0xff)) + self.mc.overwrite(index + 1, chr((word >> 8) & 0xff)) + self.mc.overwrite(index + 2, chr((word >> 16) & 0xff)) + self.mc.overwrite(index + 3, chr((word >> 24) & 0xff)) + self.index = index + 4 + + def overwrite(self): + pass + +class PPCBuilder(BlockBuilderMixin, PPCAssembler): + def __init__(self): + PPCAssembler.__init__(self) + self.init_block_builder() + self.ops_offset = {} + + def mark_op(self, op): + pos = self.get_relative_pos() + self.ops_offset[op] = pos + + def check(self, desc, v, *args): + desc.__get__(self)(*args) + ins = self.insts.pop() + expected = ins.assemble() + if expected < 0: + expected += 1<<32 + assert v == expected + + def load_imm(self, dest_reg, word): + rD = dest_reg.value + if word <= 32767 and word >= -32768: + self.li(rD, word) + elif IS_PPC_32 or (word <= 2147483647 and word >= -2147483648): + self.lis(rD, hi(word)) + if word & 0xFFFF != 0: + self.ori(rD, rD, lo(word)) + else: + self.load_imm(dest_reg, word>>32) + self.sldi(rD, rD, 32) + if word & 0xFFFF0000 != 0: + self.oris(rD, rD, high(word)) + if word & 0xFFFF != 0: + self.ori(rD, rD, lo(word)) + + def load_imm_plus(self, dest_reg, word): + """Like load_imm(), but with one instruction less, and + leaves the loaded value off by some signed 16-bit difference. + Returns that difference.""" + diff = rffi.cast(lltype.Signed, rffi.cast(rffi.SHORT, word)) + word -= diff + assert word & 0xFFFF == 0 + self.load_imm(dest_reg, word) + return diff + + def load_from_addr(self, rD, addr): + assert rD is not r.r0 + diff = self.load_imm_plus(rD, addr) + if IS_PPC_32: + self.lwz(rD.value, rD.value, diff) + else: + self.ld(rD.value, rD.value, diff) + + def b_offset(self, target): + curpos = self.currpos() + offset = target - curpos + assert offset < (1 << 24) + self.b(offset) + + def b_cond_offset(self, offset, condition): + assert condition != c.cond_none + BI, BO = c.encoding[condition] + + pos = self.currpos() + target_ofs = offset - pos + self.bc(BO, BI, target_ofs) + + def b_cond_abs(self, addr, condition): + assert condition != c.cond_none + BI, BO = c.encoding[condition] + + with scratch_reg(self): + self.load_imm(r.SCRATCH, addr) + self.mtctr(r.SCRATCH.value) + self.bcctr(BO, BI) + + def b_abs(self, address, trap=False): + with scratch_reg(self): + self.load_imm(r.SCRATCH, address) + self.mtctr(r.SCRATCH.value) + if trap: + self.trap() + self.bctr() + + def bl_abs(self, address): + with scratch_reg(self): + self.load_imm(r.SCRATCH, address) + self.mtctr(r.SCRATCH.value) + self.bctrl() + + if IS_BIG_ENDIAN: + RAW_CALL_REG = r.r2 + else: + RAW_CALL_REG = r.r12 + + def raw_call(self, call_reg=RAW_CALL_REG): + """Emit a call to the address stored in the register 'call_reg', + which must be either RAW_CALL_REG or r12. This is a regular C + function pointer, which means on big-endian that it is actually + the address of a three-words descriptor. + """ + if IS_BIG_ENDIAN: + # Load the function descriptor (currently in r2) from memory: + # [r2 + 0] -> ctr + # [r2 + 16] -> r11 + # [r2 + 8] -> r2 (= TOC) + assert self.RAW_CALL_REG is r.r2 + assert call_reg is r.r2 or call_reg is r.r12 + self.ld(r.SCRATCH.value, call_reg.value, 0) + self.ld(r.r11.value, call_reg.value, 16) + self.mtctr(r.SCRATCH.value) + self.ld(r.TOC.value, call_reg.value, 8) # must be last: TOC is r2 + elif IS_LITTLE_ENDIAN: + assert self.RAW_CALL_REG is r.r12 # 'r12' is fixed by this ABI + assert call_reg is r.r12 + self.mtctr(r.r12.value) + # Call the function + self.bctrl() + + + def load(self, target_reg, base_reg, offset): + if IS_PPC_32: + self.lwz(target_reg, base_reg, offset) + else: + self.ld(target_reg, base_reg, offset) + + def loadx(self, target_reg, base_reg, offset_reg): + if IS_PPC_32: + self.lwzx(target_reg, base_reg, offset_reg) + else: + self.ldx(target_reg, base_reg, offset_reg) + + def store(self, from_reg, base_reg, offset): + if IS_PPC_32: + self.stw(from_reg, base_reg, offset) + else: + self.std(from_reg, base_reg, offset) + + def storex(self, from_reg, base_reg, offset_reg): + if IS_PPC_32: + self.stwx(from_reg, base_reg, offset_reg) + else: + self.stdx(from_reg, base_reg, offset_reg) + + def store_update(self, target_reg, from_reg, offset): + if IS_PPC_32: + self.stwu(target_reg, from_reg, offset) + else: + self.stdu(target_reg, from_reg, offset) + + def srli_op(self, target_reg, from_reg, numbits): + if IS_PPC_32: + self.srwi(target_reg, from_reg, numbits) + else: + self.srdi(target_reg, from_reg, numbits) + + def sl_op(self, target_reg, from_reg, numbit_reg): + if IS_PPC_32: + self.slw(target_reg, from_reg, numbit_reg) + else: + self.sld(target_reg, from_reg, numbit_reg) + + def _dump_trace(self, addr, name, formatter=-1): + if not we_are_translated(): + if formatter != -1: + name = name % formatter + dir = udir.ensure('asm', dir=True) + f = dir.join(name).open('wb') + data = rffi.cast(rffi.CCHARP, addr) + for i in range(self.currpos()): + f.write(data[i]) + f.close() + + def write32(self, word): + if IS_BIG_ENDIAN: + self.writechar(chr((word >> 24) & 0xFF)) + self.writechar(chr((word >> 16) & 0xFF)) + self.writechar(chr((word >> 8) & 0xFF)) + self.writechar(chr(word & 0xFF)) + elif IS_LITTLE_ENDIAN: + self.writechar(chr(word & 0xFF)) + self.writechar(chr((word >> 8) & 0xFF)) + self.writechar(chr((word >> 16) & 0xFF)) + self.writechar(chr((word >> 24) & 0xFF)) + + def write64(self, word): + if IS_BIG_ENDIAN: + self.writechar(chr((word >> 56) & 0xFF)) + self.writechar(chr((word >> 48) & 0xFF)) + self.writechar(chr((word >> 40) & 0xFF)) + self.writechar(chr((word >> 32) & 0xFF)) + self.writechar(chr((word >> 24) & 0xFF)) + self.writechar(chr((word >> 16) & 0xFF)) + self.writechar(chr((word >> 8) & 0xFF)) + self.writechar(chr(word & 0xFF)) + elif IS_LITTLE_ENDIAN: + self.writechar(chr(word & 0xFF)) + self.writechar(chr((word >> 8) & 0xFF)) + self.writechar(chr((word >> 16) & 0xFF)) + self.writechar(chr((word >> 24) & 0xFF)) + self.writechar(chr((word >> 32) & 0xFF)) + self.writechar(chr((word >> 40) & 0xFF)) + self.writechar(chr((word >> 48) & 0xFF)) + self.writechar(chr((word >> 56) & 0xFF)) + + def currpos(self): + return self.get_relative_pos() + + def copy_to_raw_memory(self, addr): + self._copy_to_raw_memory(addr) + if we_are_translated(): + flush_icache() + self._dump(addr, "jit-backend-dump", 'ppc') + + def cmp_op(self, block, a, b, imm=False, signed=True, fp=False): + if fp == True: + self.fcmpu(block, a, b) + elif IS_PPC_32: + if signed: + if imm: + # 32 bit immediate signed + self.cmpwi(block, a, b) + else: + # 32 bit signed + self.cmpw(block, a, b) + else: + if imm: + # 32 bit immediate unsigned + self.cmplwi(block, a, b) + else: + # 32 bit unsigned + self.cmplw(block, a, b) + else: + if signed: + if imm: + # 64 bit immediate signed + self.cmpdi(block, a, b) + else: + # 64 bit signed + self.cmpd(block, a, b) + else: + if imm: + # 64 bit immediate unsigned + self.cmpldi(block, a, b) + else: + # 64 bit unsigned + self.cmpld(block, a, b) + + def alloc_scratch_reg(self): + pass + #assert not self.r0_in_use + #self.r0_in_use = True + + def free_scratch_reg(self): + pass + #assert self.r0_in_use + #self.r0_in_use = False + + def get_assembler_function(self): + "NOT_RPYTHON: tests only" + from rpython.jit.backend.llsupport.asmmemmgr import AsmMemoryManager + class FakeCPU: + HAS_CODEMAP = False + asmmemmgr = AsmMemoryManager() + addr = self.materialize(FakeCPU(), []) + if IS_BIG_ENDIAN: + mc = PPCBuilder() + mc.write64(addr) # the 3-words descriptor + mc.write64(0) + mc.write64(0) + addr = mc.materialize(FakeCPU(), []) + return rffi.cast(lltype.Ptr(lltype.FuncType([], lltype.Signed)), addr) + + +class scratch_reg(object): + def __init__(self, mc): + self.mc = mc + + def __enter__(self): + self.mc.alloc_scratch_reg() + + def __exit__(self, *args): + self.mc.free_scratch_reg() + +class BranchUpdater(PPCAssembler): + def __init__(self): + PPCAssembler.__init__(self) + self.init_block_builder() + + def write_to_mem(self, addr): + self.assemble() + self.copy_to_raw_memory(addr) + + def assemble(self, dump=os.environ.has_key('PYPY_DEBUG')): + insns = self.assemble0(dump) + for i in insns: + self.emit(i) + +def b(n): + r = [] + for i in range(32): + r.append(n&1) + n >>= 1 + r.reverse() + return ''.join(map(str, r)) + +def make_operations(): + def not_implemented(builder, trace_op, cpu, *rest_args): + import pdb; pdb.set_trace() + + oplist = [None] * (rop._LAST + 1) + for key, val in rop.__dict__.items(): + if key.startswith("_"): + continue + opname = key.lower() + methname = "emit_%s" % opname + if hasattr(PPCBuilder, methname): + oplist[val] = getattr(PPCBuilder, methname).im_func + else: + oplist[val] = not_implemented + return oplist + +PPCBuilder.operations = make_operations() diff --git a/rpython/jit/backend/ppc/condition.py b/rpython/jit/backend/ppc/condition.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/ppc/condition.py @@ -0,0 +1,32 @@ +EQ = 0 +NE = 1 +LE = 2 +GT = 3 +LT = 4 +GE = 5 +SO = 6 +NS = 7 +cond_none = -1 # invalid + +def negate(cond): + return cond ^ 1 + +assert negate(EQ) == NE From noreply at buildbot.pypy.org Fri Oct 16 12:22:09 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 12:22:09 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Draft blog about PPC Message-ID: <20151016102209.76B391C0165@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5570:74cf69f6e2ac Date: 2015-10-16 12:23 +0200 http://bitbucket.org/pypy/extradoc/changeset/74cf69f6e2ac/ Log: Draft blog about PPC diff --git a/blog/draft/ppc-backend.rst b/blog/draft/ppc-backend.rst new file mode 100644 --- /dev/null +++ b/blog/draft/ppc-backend.rst @@ -0,0 +1,118 @@ +Hi all, + +PyPy's JIT now supports the 64-bit PowerPC architecture! This is the +third architecture supported, in addition to x86 (32 and 64) and ARM +(32-bit only). More precisely, we support the big- and the +little-endian variants of ppc64. Thanks to IBM for funding this work! + +The new JIT backend has been merged into "default". You should be able +to translate PPC versions `as usual`__ directly on the machines. For +the foreseeable future, I will compile and distribute binary versions +corresponding to the official releases (for Fedora), but of course I'd +welcome it if someone else could step in and do it. Also, it is unclear +yet if we will run a buildbot. + +.. __: http://pypy.org/download.html#building-from-source + +To check that the result performs well, I logged in a ppc64le machine +and ran the usual benchmark suite of PyPy (minus sqlitesynth: sqlite +was not installed on that machine). I ran it twice at a difference of +12 hours, as an attempt to reduce risks caused by other users suddenly +using the machine. The machine was overall relatively quiet. Of +course, this is scientifically not good enough; it is what I could come +up with given the limited resources. + +Here are the results, where the numbers are speed-up factors between the +non-jit and the jit version of PyPy. The first column is x86-64, for +reference. The second and third columns are the two ppc64le runs. A +few benchmarks are not reported here because the runner doesn't execute +them on non-jit (however, apart from sqlitesynth, they all worked). + +:: + + ai 13.7342 16.1659 14.9091 + bm_chameleon 8.5944 8.5858 8.66 + bm_dulwich_log 5.1256 5.4368 5.5928 + bm_krakatau 5.5201 2.3915 2.3452 + bm_mako 8.4802 6.8937 6.9335 + bm_mdp 2.0315 1.7162 1.9131 + chaos 56.9705 57.2608 56.2374 + sphinx + crypto_pyaes 62.505 80.149 79.7801 + deltablue 3.3403 5.1199 4.7872 + django 28.9829 23.206 23.47 + eparse 2.3164 2.6281 2.589 + fannkuch 9.1242 15.1768 11.3906 + float 13.8145 17.2582 17.2451 + genshi_text 16.4608 13.9398 13.7998 + genshi_xml 8.2782 8.0879 9.2315 + go 6.7458 11.8226 15.4183 + hexiom2 24.3612 34.7991 33.4734 + html5lib 5.4515 5.5186 5.365 + json_bench 28.8774 29.5022 28.8897 + meteor-contest 5.1518 5.6567 5.7514 + nbody_modified 20.6138 22.5466 21.3992 + pidigits 1.0118 1.022 1.0829 + pyflate-fast 9.0684 10.0168 10.3119 + pypy_interp 3.3977 3.9307 3.8798 + raytrace-simple 69.0114 108.8875 127.1518 + richards 94.1863 118.1257 102.1906 + rietveld 3.2421 3.0126 3.1592 + scimark_fft + scimark_lu + scimark_montecarlo + scimark_sor + scimark_sparsematmul + slowspitfire 2.8539 3.3924 3.5541 + spambayes 5.0646 6.3446 6.237 + spectral-norm 41.9148 42.1831 43.2913 + spitfire 3.8788 4.8214 4.701 + spitfire_cstringio 7.606 9.1809 9.1691 + sqlitesynth + sympy_expand 2.9537 2.0705 1.9299 + sympy_integrate 4.3805 4.3467 4.7052 + sympy_str 1.5431 1.6248 1.5825 + sympy_sum 6.2519 6.096 5.6643 + telco 61.2416 54.7187 55.1705 + trans2_annotate + trans2_rtype + trans2_backendopt + trans2_database + trans2_source + twisted_iteration 55.5019 51.5127 63.0592 + twisted_names 8.2262 9.0062 10.306 + twisted_pb 12.1134 13.644 12.1177 + twisted_tcp 4.9778 1.934 5.4931 + + GEOMETRIC MEAN 9.31 9.70 10.01 + +The last line reports the geometric mean of each column. We see that +the goal was reached: PyPy's JIT actually improves performance by a +factor of around 9.7 to 10 times on ppc64le. By comparison, it "only" +improves performance by a factor 9.3 on Intel x86-64. I don't know why, +but I'd guess it mostly means that a non-jitted PyPy performs slightly +better on Intel than it does on PowerPC. + +Why is that? Actually, similar numbers are also higher on ARM than on +Intel. We like to guess that on ARM, running the whole interpreter in +PyPy takes up a lot of resources, e.g. the instruction cache, which the +JIT's assembler doesn't need any more after the process is warmed up. +This argument doesn't work for PowerPC, but there are other more subtle +variants of it. Notably, Intel is doing crazy things about branch +prediction, which likely helps a big interpreter---both the non-JITted +PyPy and CPython, and both for the interpreter's main loop itself and +for the numerous indirect branches that depend on the types of the +objects. Moreover, on PowerPC I did notice that gcc itself is not +perfect at optimization: during development of this backend, I often +looked at assembler produced by gcc, and there are a number of +inefficiencies there. All these are factors that slow down the +non-JITted version of PyPy, but don't influence the speed of the +assembler produced just-in-time. + +Anyway, this is just guessing. The fact remains that PyPy can now +be used on PowerPC machines. Have fun! + + +A bientot, + +Armin. From noreply at buildbot.pypy.org Fri Oct 16 12:40:18 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 12:40:18 +0200 (CEST) Subject: [pypy-commit] extradoc extradoc: Mention it's on Linux Message-ID: <20151016104018.0B0011C01DC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5571:df759011beb4 Date: 2015-10-16 12:41 +0200 http://bitbucket.org/pypy/extradoc/changeset/df759011beb4/ Log: Mention it's on Linux diff --git a/blog/draft/ppc-backend.rst b/blog/draft/ppc-backend.rst --- a/blog/draft/ppc-backend.rst +++ b/blog/draft/ppc-backend.rst @@ -2,7 +2,7 @@ PyPy's JIT now supports the 64-bit PowerPC architecture! This is the third architecture supported, in addition to x86 (32 and 64) and ARM -(32-bit only). More precisely, we support the big- and the +(32-bit only). More precisely, we support Linux running the big- and the little-endian variants of ppc64. Thanks to IBM for funding this work! The new JIT backend has been merged into "default". You should be able @@ -24,9 +24,10 @@ Here are the results, where the numbers are speed-up factors between the non-jit and the jit version of PyPy. The first column is x86-64, for -reference. The second and third columns are the two ppc64le runs. A -few benchmarks are not reported here because the runner doesn't execute -them on non-jit (however, apart from sqlitesynth, they all worked). +reference. The second and third columns are the two ppc64le runs. All +are Linux. A few benchmarks are not reported here because the runner +doesn't execute them on non-jit (however, apart from sqlitesynth, they +all worked). :: From noreply at buildbot.pypy.org Fri Oct 16 12:52:16 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 16 Oct 2015 12:52:16 +0200 (CEST) Subject: [pypy-commit] pypy default: unroll one more function Message-ID: <20151016105216.9FF5E1C1248@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r80269:1a34b3e8080d Date: 2015-10-16 12:50 +0200 http://bitbucket.org/pypy/pypy/changeset/1a34b3e8080d/ Log: unroll one more function diff --git a/pypy/module/micronumpy/casting.py b/pypy/module/micronumpy/casting.py --- a/pypy/module/micronumpy/casting.py +++ b/pypy/module/micronumpy/casting.py @@ -91,6 +91,9 @@ NPY.STRINGLTR: 3, NPY.STRINGLTR2: 3, UnicodeType.kind: 3, VoidType.kind: 3, ObjectType.kind: 3} +# this is safe to unroll since it'll only be seen if we look inside +# the find_result_type + at jit.unroll_safe def _use_min_scalar(arrays_w, dtypes_w): """Helper for find_result_type()""" if not arrays_w: From noreply at buildbot.pypy.org Fri Oct 16 12:52:19 2015 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 16 Oct 2015 12:52:19 +0200 (CEST) Subject: [pypy-commit] pypy default: merge Message-ID: <20151016105219.3FF471C1248@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r80270:3bc924586ed6 Date: 2015-10-16 12:52 +0200 http://bitbucket.org/pypy/pypy/changeset/3bc924586ed6/ Log: merge diff too long, truncating to 2000 out of 9622 lines diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -76,6 +76,11 @@ if "cppyy" in working_modules: working_modules.remove("cppyy") # depends on ctypes +if sys.platform.startswith("linux"): + _mach = os.popen('uname -m', 'r').read().strip() + if _mach.startswith('ppc'): + working_modules.remove("_continuation") + module_dependencies = { '_multiprocessing': [('objspace.usemodules.time', True), diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -224,6 +224,10 @@ if not for_frame: self._push_all_regs_to_jitframe(mc, [], withfloats, callee_only=True) else: + # NOTE: don't save registers on the jitframe here! It might + # override already-saved values that will be restored + # later... + # # we're possibly called from the slowpath of malloc # save the caller saved registers # assuming we do not collect here diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -41,10 +41,6 @@ from rpython.jit.backend.llsupport.descr import CallDescr -# xxx hack: set a default value for TargetToken._ll_loop_code. If 0, we know -# that it is a LABEL that was not compiled yet. -TargetToken._ll_loop_code = 0 - class TempInt(TempVar): type = INT @@ -1257,18 +1253,6 @@ prepare_op_call_assembler_f = _prepare_op_call_assembler prepare_op_call_assembler_n = _prepare_op_call_assembler - def _prepare_args_for_new_op(self, new_args): - gc_ll_descr = self.cpu.gc_ll_descr - args = gc_ll_descr.args_for_new(new_args) - arglocs = [] - for i in range(len(args)): - arg = args[i] - t = TempInt() - l = self.force_allocate_reg(t, selected_reg=r.all_regs[i]) - self.assembler.load(l, imm(arg)) - arglocs.append(t) - return arglocs - prepare_op_float_add = prepare_two_regs_op prepare_op_float_sub = prepare_two_regs_op prepare_op_float_mul = prepare_two_regs_op diff --git a/rpython/jit/backend/arm/test/test_runner.py b/rpython/jit/backend/arm/test/test_runner.py --- a/rpython/jit/backend/arm/test/test_runner.py +++ b/rpython/jit/backend/arm/test/test_runner.py @@ -26,24 +26,24 @@ # for the individual tests see # ====> ../../test/runner_test.py - add_loop_instructions = ['ldr', 'adds', 'cmp', 'beq', 'b'] - bridge_loop_instructions = ['ldr', 'mov', 'nop', 'cmp', 'bge', - 'push', 'mov', 'mov', 'push', 'mov', 'mov', - 'blx', 'mov', 'mov', 'bx'] + add_loop_instructions = 'ldr; adds; cmp; beq; b;$' + bridge_loop_instructions = ('ldr; mov; nop; cmp; bge; ' + 'push; mov; mov; push; mov; mov; ' + 'blx; mov; mov; bx;$') arch_version = detect_arch_version() if arch_version == 7: - bridge_loop_instructions = ['ldr', 'mov', 'nop', 'cmp', 'bge', - 'push', 'mov', 'mov', 'push', 'mov', 'mov', - 'blx', 'mov', 'mov', 'bx'] + bridge_loop_instructions = ('ldr; mov; nop; cmp; bge; ' + 'push; mov; mov; push; mov; mov; ' + 'blx; mov; mov; bx;$') else: - bridge_loop_instructions = ['ldr', 'mov', 'nop', 'nop', 'nop', 'cmp', 'bge', - 'push', 'ldr', 'mov', - '*', # inline constant - 'push', 'ldr', 'mov', - '*', # inline constant - 'blx', 'ldr', 'mov', - '*', # inline constant - 'bx'] + bridge_loop_instructions = ('ldr; mov; nop; nop; nop; cmp; bge; ' + 'push; ldr; mov; ' + '[^;]+; ' # inline constant + 'push; ldr; mov; ' + '[^;]+; ' # inline constant + 'blx; ldr; mov; ' + '[^;]+; ' # inline constant + 'bx;$') def get_cpu(self): cpu = CPU(rtyper=None, stats=FakeStats()) diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -59,6 +59,8 @@ 'i86pc': MODEL_X86, # Solaris/Intel 'x86': MODEL_X86, # Apple 'Power Macintosh': MODEL_PPC_64, + 'ppc64': MODEL_PPC_64, + 'ppc64le': MODEL_PPC_64, 'x86_64': MODEL_X86, 'amd64': MODEL_X86, # freebsd 'AMD64': MODEL_X86, # win64 @@ -118,6 +120,8 @@ return "rpython.jit.backend.x86.runner", "CPU_X86_64_SSE4" elif backend_name == MODEL_ARM: return "rpython.jit.backend.arm.runner", "CPU_ARM" + elif backend_name == MODEL_PPC_64: + return "rpython.jit.backend.ppc.runner", "PPC_CPU" else: raise ProcessorAutodetectError, ( "we have no JIT backend for this cpu: '%s'" % backend_name) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -243,6 +243,23 @@ self.mc.get_relative_pos()) def call_assembler(self, op, argloc, vloc, result_loc, tmploc): + """ + * argloc: location of the frame argument that we're passing to + the called assembler (this is the first return value + of locs_for_call_assembler()) + + * vloc: location of the virtualizable (not in a register; + this is the optional second return value of + locs_for_call_assembler(), or imm(0) if none returned) + + * result_loc: location of op.result (which is not be + confused with the next one) + + * tmploc: location where the actual call to the other piece + of assembler will return its jitframe result + (which is always a REF), before the helper may be + called + """ descr = op.getdescr() assert isinstance(descr, JitCellToken) # diff --git a/rpython/jit/backend/llsupport/llerrno.py b/rpython/jit/backend/llsupport/llerrno.py --- a/rpython/jit/backend/llsupport/llerrno.py +++ b/rpython/jit/backend/llsupport/llerrno.py @@ -1,14 +1,22 @@ +import sys from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.jit.backend.llsupport.symbolic import WORD +if sys.byteorder == 'little' or sys.maxint <= 2**32: + long2int = int2long = lambda x: x +else: + def long2int(x): return x >> 32 + def int2long(x): return x << 32 + + def get_debug_saved_errno(cpu): - return cpu._debug_errno_container[3] + return long2int(cpu._debug_errno_container[3]) def set_debug_saved_errno(cpu, nerrno): assert nerrno >= 0 - cpu._debug_errno_container[3] = nerrno + cpu._debug_errno_container[3] = int2long(nerrno) def get_rpy_errno_offset(cpu): if cpu.translate_support_code: @@ -19,11 +27,11 @@ def get_debug_saved_alterrno(cpu): - return cpu._debug_errno_container[4] + return long2int(cpu._debug_errno_container[4]) def set_debug_saved_alterrno(cpu, nerrno): assert nerrno >= 0 - cpu._debug_errno_container[4] = nerrno + cpu._debug_errno_container[4] = int2long(nerrno) def get_alt_errno_offset(cpu): if cpu.translate_support_code: diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -3,7 +3,7 @@ """ import py -import re +import re, sys, struct from rpython.jit.metainterp.history import TargetToken, BasicFinalDescr,\ JitCellToken, BasicFailDescr, AbstractDescr from rpython.jit.backend.llsupport.gc import GcLLDescription, GcLLDescr_boehm,\ @@ -90,6 +90,8 @@ assert nos == [0, 1, 25] elif self.cpu.backend_name.startswith('arm'): assert nos == [0, 1, 47] + elif self.cpu.backend_name.startswith('ppc64'): + assert nos == [0, 1, 33] else: raise Exception("write the data here") assert frame.jf_frame[nos[0]] @@ -155,6 +157,8 @@ self.nursery = lltype.malloc(NTP, 64, flavor='raw') for i in range(64): self.nursery[i] = NOT_INITIALIZED + self.nursery_words = rffi.cast(rffi.CArrayPtr(lltype.Signed), + self.nursery) self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 2, flavor='raw') self.addrs[0] = rffi.cast(lltype.Signed, self.nursery) @@ -263,11 +267,11 @@ # slowpath never called assert gc_ll_descr.calls == [] - def test_malloc_nursery_varsize(self): + def test_malloc_nursery_varsize_nonframe(self): self.cpu = self.getcpu(None) A = lltype.GcArray(lltype.Signed) arraydescr = self.cpu.arraydescrof(A) - arraydescr.tid = 15 + arraydescr.tid = 1515 ops = ''' [i0, i1, i2] p0 = call_malloc_nursery_varsize(0, 8, i0, descr=arraydescr) @@ -283,8 +287,8 @@ assert rffi.cast(lltype.Signed, ref(0)) == nurs_adr + 0 assert rffi.cast(lltype.Signed, ref(1)) == nurs_adr + 2*WORD + 8*1 # check the nursery content and state - assert gc_ll_descr.nursery[0] == chr(15) - assert gc_ll_descr.nursery[2 * WORD + 8] == chr(15) + assert gc_ll_descr.nursery_words[0] == 1515 + assert gc_ll_descr.nursery_words[2 + 8 // WORD] == 1515 assert gc_ll_descr.addrs[0] == nurs_adr + (((4 * WORD + 8*1 + 5*2) + (WORD - 1)) & ~(WORD - 1)) # slowpath never called assert gc_ll_descr.calls == [] @@ -323,11 +327,11 @@ idx = 1 assert len(frame.jf_gcmap) == expected_size if self.cpu.IS_64_BIT: - assert frame.jf_gcmap[idx] == (1<<29) | (1 << 30) + exp_idx = self.cpu.JITFRAME_FIXED_SIZE + 1 # +1 from i0 else: assert frame.jf_gcmap[idx] exp_idx = self.cpu.JITFRAME_FIXED_SIZE - 32 * idx + 1 # +1 from i0 - assert frame.jf_gcmap[idx] == (1 << (exp_idx + 1)) | (1 << exp_idx) + assert frame.jf_gcmap[idx] == (1 << (exp_idx + 1)) | (1 << exp_idx) self.cpu = self.getcpu(check) ops = ''' @@ -609,7 +613,10 @@ cpu = CPU(None, None) cpu.gc_ll_descr = GCDescrShadowstackDirect() wbd = cpu.gc_ll_descr.write_barrier_descr - wbd.jit_wb_if_flag_byteofs = 0 # directly into 'hdr' field + if sys.byteorder == 'little': + wbd.jit_wb_if_flag_byteofs = 0 # directly into 'hdr' field + else: + wbd.jit_wb_if_flag_byteofs = struct.calcsize("l") - 1 S = lltype.GcForwardReference() S.become(lltype.GcStruct('S', ('hdr', lltype.Signed), @@ -636,7 +643,9 @@ frames.append(frame) new_frame = JITFRAME.allocate(frame.jf_frame_info) gcmap = unpack_gcmap(frame) - if self.cpu.IS_64_BIT: + if self.cpu.backend_name.startswith('ppc64'): + assert gcmap == [30, 31, 32] + elif self.cpu.IS_64_BIT: assert gcmap == [28, 29, 30] elif self.cpu.backend_name.startswith('arm'): assert gcmap == [44, 45, 46] @@ -647,6 +656,8 @@ new_frame.jf_frame[item] = rffi.cast(lltype.Signed, s) assert cpu.gc_ll_descr.gcrootmap.stack[0] == rffi.cast(lltype.Signed, frame) cpu.gc_ll_descr.gcrootmap.stack[0] = rffi.cast(lltype.Signed, new_frame) + print '"Collecting" moved the frame from %d to %d' % ( + i, cpu.gc_ll_descr.gcrootmap.stack[0]) frames.append(new_frame) def check2(i): diff --git a/rpython/jit/backend/llsupport/test/ztranslation_test.py b/rpython/jit/backend/llsupport/test/ztranslation_test.py --- a/rpython/jit/backend/llsupport/test/ztranslation_test.py +++ b/rpython/jit/backend/llsupport/test/ztranslation_test.py @@ -307,7 +307,7 @@ for line in open(str(logfile)): if 'guard_class' in line: guard_class += 1 - # if we get many more guard_classes, it means that we generate + # if we get many more guard_classes (~93), it means that we generate # guards that always fail (the following assert's original purpose # is to catch the following case: each GUARD_CLASS is misgenerated # and always fails with "gcremovetypeptr") diff --git a/rpython/jit/backend/ppc/__init__.py b/rpython/jit/backend/ppc/__init__.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/ppc/__init__.py @@ -0,0 +1,1 @@ +# diff --git a/rpython/jit/backend/ppc/arch.py b/rpython/jit/backend/ppc/arch.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/ppc/arch.py @@ -0,0 +1,82 @@ +# Constants that depend on whether we are on 32-bit or 64-bit + +import sys +from rpython.jit.backend.ppc import register as r + +import sys +if sys.maxint == (2**31 - 1): + assert False, "the ppc backend only supports PPC-64 for now" + WORD = 4 + #DWORD = 2 * WORD + IS_PPC_32 = True + #BACKCHAIN_SIZE = 2 + #FPR_SAVE_AREA = len(NONVOLATILES_FLOAT) * DWORD +else: + WORD = 8 + #DWORD = 2 * WORD + IS_PPC_32 = False + #BACKCHAIN_SIZE = 6 + #FPR_SAVE_AREA = len(NONVOLATILES_FLOAT) * WORD + +IS_PPC_64 = not IS_PPC_32 +MY_COPY_OF_REGS = 0 + +IS_BIG_ENDIAN = sys.byteorder == 'big' +IS_LITTLE_ENDIAN = sys.byteorder == 'little' +assert IS_BIG_ENDIAN ^ IS_LITTLE_ENDIAN + +#FORCE_INDEX = WORD +#GPR_SAVE_AREA = len(NONVOLATILES) * WORD +#FLOAT_INT_CONVERSION = WORD +MAX_REG_PARAMS = 8 +MAX_FREG_PARAMS = 13 +# we need at most 5 instructions to load a constant +# and one instruction to patch the stack pointer +#SIZE_LOAD_IMM_PATCH_SP = 6 + +#FORCE_INDEX_OFS = (len(MANAGED_REGS) + len(MANAGED_FP_REGS)) * WORD + + +# BIG ENDIAN LITTLE ENDIAN +# +# +--------------------+ <- SP + STD_FRAME_SIZE +# | general registers | +# | save area | +# +--------------------+ <- SP + 120 SP + 104 +# | Local vars | +# +--------------------+ <- SP + 112 SP + 96 +# | Parameter save | +# | area (8 args max) | +# +--------------------+ <- SP + 48 SP + 32 +# | TOC (unused) | +# +--------------------+ <- SP + 40 SP + 24 +# | link ed. (unused) | +# +--------------------+ <- SP + 32 absent +# | compiler (unused) | +# +--------------------+ <- SP + 24 absent +# | LR save area | +# +--------------------+ <- SP + 16 SP + 16 +# | CR save (unused) | +# +--------------------+ <- SP + 8 SP + 8 +# | SP back chain | +# +--------------------+ <- SP SP + +# The local variables area contains only a copy of the 2nd argument +# passed to the machine code function, which is the ll_threadlocal_addr. +# The 1st argument, i.e. the GC-managed jitframe, is stored in the +# register r31. + + +LR_BC_OFFSET = 16 +_GAP = 0 if IS_BIG_ENDIAN else 16 +PARAM_SAVE_AREA_OFFSET = 48 - _GAP +LOCAL_VARS_OFFSET = 112 - _GAP +THREADLOCAL_ADDR_OFFSET = LOCAL_VARS_OFFSET +GPR_SAVE_AREA_OFFSET = 120 - _GAP + +REGISTERS_SAVED = [r.r25, r.r26, r.r27, r.r28, r.r29, r.r30, r.r31] +assert REGISTERS_SAVED == [_r for _r in r.NONVOLATILES + if _r in r.MANAGED_REGS or _r == r.r31] + +STD_FRAME_SIZE_IN_BYTES = GPR_SAVE_AREA_OFFSET + len(REGISTERS_SAVED) * WORD +assert STD_FRAME_SIZE_IN_BYTES % 16 == 0 diff --git a/rpython/jit/backend/ppc/callbuilder.py b/rpython/jit/backend/ppc/callbuilder.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/ppc/callbuilder.py @@ -0,0 +1,278 @@ +from rpython.jit.backend.ppc.arch import IS_PPC_64, WORD, PARAM_SAVE_AREA_OFFSET +from rpython.jit.backend.ppc.arch import THREADLOCAL_ADDR_OFFSET +import rpython.jit.backend.ppc.register as r +from rpython.jit.metainterp.history import INT, FLOAT +from rpython.jit.backend.llsupport.callbuilder import AbstractCallBuilder +from rpython.jit.backend.ppc.jump import remap_frame_layout +from rpython.rlib.objectmodel import we_are_translated +from rpython.jit.backend.llsupport import llerrno +from rpython.rtyper.lltypesystem import rffi + + +def follow_jump(addr): + # xxx implement me + return addr + + +class CallBuilder(AbstractCallBuilder): + GPR_ARGS = [r.r3, r.r4, r.r5, r.r6, r.r7, r.r8, r.r9, r.r10] + FPR_ARGS = r.MANAGED_FP_REGS + assert FPR_ARGS == [r.f1, r.f2, r.f3, r.f4, r.f5, r.f6, r.f7, + r.f8, r.f9, r.f10, r.f11, r.f12, r.f13] + RSHADOWPTR = r.RCS1 + RFASTGILPTR = r.RCS2 + RSHADOWOLD = r.RCS3 + + def __init__(self, assembler, fnloc, arglocs, resloc): + AbstractCallBuilder.__init__(self, assembler, fnloc, arglocs, + resloc, restype=INT, ressize=None) + + def prepare_arguments(self): + assert IS_PPC_64 + self.subtracted_to_sp = 0 + + # Prepare arguments. Note that this follows the convention where + # a prototype is in scope, and doesn't take "..." arguments. If + # you were to call a C function with a "..." argument with cffi, + # it would not go there but instead via libffi. If you pretend + # instead that it takes fixed arguments, then it would arrive here + # but the convention is bogus for floating-point arguments. (And, + # to add to the mess, at least CPython's ctypes cannot be used + # to call a "..." function with floating-point arguments. As I + # guess that it's a problem with libffi, it means PyPy inherits + # the same problem.) + arglocs = self.arglocs + num_args = len(arglocs) + + non_float_locs = [] + non_float_regs = [] + float_locs = [] + for i in range(min(num_args, 8)): + if arglocs[i].type != FLOAT: + non_float_locs.append(arglocs[i]) + non_float_regs.append(self.GPR_ARGS[i]) + else: + float_locs.append(arglocs[i]) + # now 'non_float_locs' and 'float_locs' together contain the + # locations of the first 8 arguments + + if num_args > 8: + # We need to make a larger PPC stack frame, as shown on the + # picture in arch.py. It needs to be 48 bytes + 8 * num_args. + # The new SP back chain location should point to the top of + # the whole stack frame, i.e. jumping over both the existing + # fixed-sise part and the new variable-sized part. + base = PARAM_SAVE_AREA_OFFSET + varsize = base + 8 * num_args + varsize = (varsize + 15) & ~15 # align + self.mc.load(r.SCRATCH2.value, r.SP.value, 0) # SP back chain + self.mc.store_update(r.SCRATCH2.value, r.SP.value, -varsize) + self.subtracted_to_sp = varsize + + # In this variable-sized part, only the arguments from the 8th + # one need to be written, starting at SP + 112 + for n in range(8, num_args): + loc = arglocs[n] + if loc.type != FLOAT: + # after the 8th argument, a non-float location is + # always stored in the stack + if loc.is_reg(): + src = loc + else: + src = r.r2 + self.asm.regalloc_mov(loc, src) + self.mc.std(src.value, r.SP.value, base + 8 * n) + else: + # the first 13 floating-point arguments are all passed + # in the registers f1 to f13, independently on their + # index in the complete list of arguments + if len(float_locs) < len(self.FPR_ARGS): + float_locs.append(loc) + else: + if loc.is_fp_reg(): + src = loc + else: + src = r.FP_SCRATCH + self.asm.regalloc_mov(loc, src) + self.mc.stfd(src.value, r.SP.value, base + 8 * n) + + # We must also copy fnloc into FNREG + non_float_locs.append(self.fnloc) + non_float_regs.append(self.mc.RAW_CALL_REG) # r2 or r12 + + if float_locs: + assert len(float_locs) <= len(self.FPR_ARGS) + remap_frame_layout(self.asm, float_locs, + self.FPR_ARGS[:len(float_locs)], + r.FP_SCRATCH) + + remap_frame_layout(self.asm, non_float_locs, non_float_regs, + r.SCRATCH) + + + def push_gcmap(self): + # we push *now* the gcmap, describing the status of GC registers + # after the rearrangements done just before, ignoring the return + # value r3, if necessary + assert not self.is_call_release_gil + noregs = self.asm.cpu.gc_ll_descr.is_shadow_stack() + gcmap = self.asm._regalloc.get_gcmap([r.r3], noregs=noregs) + self.asm.push_gcmap(self.mc, gcmap, store=True) + + def pop_gcmap(self): + ssreg = None + gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap + if gcrootmap: + if gcrootmap.is_shadow_stack and self.is_call_release_gil: + # in this mode, RSHADOWOLD happens to contain the shadowstack + # top at this point, so reuse it instead of loading it again + ssreg = self.RSHADOWOLD + self.asm._reload_frame_if_necessary(self.mc, shadowstack_reg=ssreg) + + def emit_raw_call(self): + self.mc.raw_call() + + def restore_stack_pointer(self): + if self.subtracted_to_sp != 0: + self.mc.addi(r.SP.value, r.SP.value, self.subtracted_to_sp) + + def load_result(self): + assert (self.resloc is None or + self.resloc is r.r3 or + self.resloc is r.f1) + + + def call_releasegil_addr_and_move_real_arguments(self, fastgil): + assert self.is_call_release_gil + RSHADOWPTR = self.RSHADOWPTR + RFASTGILPTR = self.RFASTGILPTR + RSHADOWOLD = self.RSHADOWOLD + # + # Save this thread's shadowstack pointer into r29, for later comparison + gcrootmap = self.asm.cpu.gc_ll_descr.gcrootmap + if gcrootmap: + if gcrootmap.is_shadow_stack: + rst = gcrootmap.get_root_stack_top_addr() + self.mc.load_imm(RSHADOWPTR, rst) + self.mc.load(RSHADOWOLD.value, RSHADOWPTR.value, 0) + # + # change 'rpy_fastgil' to 0 (it should be non-zero right now) + self.mc.load_imm(RFASTGILPTR, fastgil) + self.mc.li(r.r0.value, 0) + self.mc.lwsync() + self.mc.std(r.r0.value, RFASTGILPTR.value, 0) + # + if not we_are_translated(): # for testing: we should not access + self.mc.addi(r.SPP.value, r.SPP.value, 1) # r31 any more + + + def move_real_result_and_call_reacqgil_addr(self, fastgil): + from rpython.jit.backend.ppc.codebuilder import OverwritingBuilder + + # try to reacquire the lock. The following registers are still + # valid from before the call: + RSHADOWPTR = self.RSHADOWPTR # r30: &root_stack_top + RFASTGILPTR = self.RFASTGILPTR # r29: &fastgil + RSHADOWOLD = self.RSHADOWOLD # r28: previous val of root_stack_top + + # Equivalent of 'r10 = __sync_lock_test_and_set(&rpy_fastgil, 1);' + self.mc.li(r.r9.value, 1) + retry_label = self.mc.currpos() + self.mc.ldarx(r.r10.value, 0, RFASTGILPTR.value) # load the lock value + self.mc.stdcxx(r.r9.value, 0, RFASTGILPTR.value) # try to claim lock + self.mc.bc(6, 2, retry_label - self.mc.currpos()) # retry if failed + self.mc.isync() + + self.mc.cmpdi(0, r.r10.value, 0) + b1_location = self.mc.currpos() + self.mc.trap() # boehm: patched with a BEQ: jump if r10 is zero + # shadowstack: patched with BNE instead + + if self.asm.cpu.gc_ll_descr.gcrootmap: + # When doing a call_release_gil with shadowstack, there + # is the risk that the 'rpy_fastgil' was free but the + # current shadowstack can be the one of a different + # thread. So here we check if the shadowstack pointer + # is still the same as before we released the GIL (saved + # in RSHADOWOLD), and if not, we fall back to 'reacqgil_addr'. + self.mc.load(r.r9.value, RSHADOWPTR.value, 0) + self.mc.cmpdi(0, r.r9.value, RSHADOWOLD.value) + bne_location = b1_location + b1_location = self.mc.currpos() + self.mc.trap() + + # revert the rpy_fastgil acquired above, so that the + # general 'reacqgil_addr' below can acquire it again... + # (here, r10 is conveniently zero) + self.mc.std(r.r10.value, RFASTGILPTR.value, 0) + + pmc = OverwritingBuilder(self.mc, bne_location, 1) + pmc.bne(self.mc.currpos() - bne_location) + pmc.overwrite() + # + # Yes, we need to call the reacqgil() function. + # save the result we just got + RSAVEDRES = RFASTGILPTR # can reuse this reg here + reg = self.resloc + if reg is not None: + if reg.is_core_reg(): + self.mc.mr(RSAVEDRES.value, reg.value) + elif reg.is_fp_reg(): + self.mc.stfd(reg.value, r.SP.value, + PARAM_SAVE_AREA_OFFSET + 7 * WORD) + self.mc.load_imm(self.mc.RAW_CALL_REG, self.asm.reacqgil_addr) + self.mc.raw_call() + if reg is not None: + if reg.is_core_reg(): + self.mc.mr(reg.value, RSAVEDRES.value) + elif reg.is_fp_reg(): + self.mc.lfd(reg.value, r.SP.value, + PARAM_SAVE_AREA_OFFSET + 7 * WORD) + + # replace b1_location with BEQ(here) + pmc = OverwritingBuilder(self.mc, b1_location, 1) + pmc.beq(self.mc.currpos() - b1_location) + pmc.overwrite() + + if not we_are_translated(): # for testing: now we can access + self.mc.addi(r.SPP.value, r.SPP.value, -1) # r31 again + + + def write_real_errno(self, save_err): + if save_err & rffi.RFFI_READSAVED_ERRNO: + # Just before a call, read '*_errno' and write it into the + # real 'errno'. A lot of registers are free here, notably + # r11 and r0. + if save_err & rffi.RFFI_ALT_ERRNO: + rpy_errno = llerrno.get_alt_errno_offset(self.asm.cpu) + else: + rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu) + p_errno = llerrno.get_p_errno_offset(self.asm.cpu) + self.mc.ld(r.r11.value, r.SP.value, + THREADLOCAL_ADDR_OFFSET + self.subtracted_to_sp) + self.mc.lwz(r.r0.value, r.r11.value, rpy_errno) + self.mc.ld(r.r11.value, r.r11.value, p_errno) + self.mc.stw(r.r0.value, r.r11.value, 0) + elif save_err & rffi.RFFI_ZERO_ERRNO_BEFORE: + # Same, but write zero. + p_errno = llerrno.get_p_errno_offset(self.asm.cpu) + self.mc.ld(r.r11.value, r.SP.value, + THREADLOCAL_ADDR_OFFSET + self.subtracted_to_sp) + self.mc.ld(r.r11.value, r.r11.value, p_errno) + self.mc.li(r.r0.value, 0) + self.mc.stw(r.r0.value, r.r11.value, 0) + + def read_real_errno(self, save_err): + if save_err & rffi.RFFI_SAVE_ERRNO: + # Just after a call, read the real 'errno' and save a copy of + # it inside our thread-local '*_errno'. Registers r4-r10 + # never contain anything after the call. + if save_err & rffi.RFFI_ALT_ERRNO: + rpy_errno = llerrno.get_alt_errno_offset(self.asm.cpu) + else: + rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu) + p_errno = llerrno.get_p_errno_offset(self.asm.cpu) + self.mc.ld(r.r9.value, r.SP.value, THREADLOCAL_ADDR_OFFSET) + self.mc.ld(r.r10.value, r.r9.value, p_errno) + self.mc.lwz(r.r10.value, r.r10.value, 0) + self.mc.stw(r.r10.value, r.r9.value, rpy_errno) diff --git a/rpython/jit/backend/ppc/codebuilder.py b/rpython/jit/backend/ppc/codebuilder.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/ppc/codebuilder.py @@ -0,0 +1,1292 @@ +import os +from rpython.jit.backend.ppc.ppc_form import PPCForm as Form +from rpython.jit.backend.ppc.locations import RegisterLocation +from rpython.jit.backend.ppc.ppc_field import ppc_fields +from rpython.jit.backend.ppc.arch import (IS_PPC_32, WORD, IS_PPC_64, + LR_BC_OFFSET, IS_BIG_ENDIAN, IS_LITTLE_ENDIAN) +import rpython.jit.backend.ppc.register as r +import rpython.jit.backend.ppc.condition as c +from rpython.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin +from rpython.jit.backend.llsupport.assembler import GuardToken +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.jit.metainterp.resoperation import rop +from rpython.tool.udir import udir +from rpython.rlib.objectmodel import we_are_translated + +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.jit.backend.ppc.rassemblermaker import make_rassembler + + +# the following instructions can't accept "r0" as the second argument +# (i.e. the base address): it is recognized as "0" instead, or is +# even invalid (load-with-update, store-with-update). +# +# any load or store instruction +# addi rD, r0, immed +# subi rD, r0, immed +# addis rD, r0, immed +# subis rD, r0, immed + + +A = Form("frD", "frA", "frB", "XO3", "Rc") +A1 = Form("frD", "frB", "XO3", "Rc") +A2 = Form("frD", "frA", "frC", "XO3", "Rc") +A3 = Form("frD", "frA", "frC", "frB", "XO3", "Rc") + +I = Form("LI", "AA", "LK") + +B = Form("BO", "BI", "BD", "AA", "LK") + +SC = Form("AA") # fudge + +DD = Form("rD", "rA", "SIMM") +DDO = Form("rD", "rA", "ds", "XO4") +DS = Form("rA", "rS", "UIMM") + +X = Form("XO1") +XS = Form("rA", "rS", "rB", "XO1", "Rc") +XSO = Form("rS", "rA", "rB", "XO1") +XD = Form("rD", "rA", "rB", "XO1") +XO = Form("rD", "rA", "rB", "OE", "XO2", "Rc") +XO0 = Form("rD", "rA", "OE", "XO2", "Rc") +XDB = Form("frD", "frB", "XO1", "Rc") +XS0 = Form("rA", "rS", "XO1", "Rc") +X0 = Form("rA", "rB", "XO1") +XcAB = Form("crfD", "rA", "rB", "XO1") +XN = Form("rD", "rA", "NB", "XO1") +XL = Form("crbD", "crbA", "crbB", "XO1") +XL1 = Form("crfD", "crfS") +XL2 = Form("crbD", "XO1", "Rc") +XFL = Form("FM", "frB", "XO1", "Rc") +XFX = Form("CRM", "rS", "XO1") +XLL = Form("LL", "XO1") + +MI = Form("rA", "rS", "SH", "MB", "ME", "Rc") +MB = Form("rA", "rS", "rB", "MB", "ME", "Rc") +MDI = Form("rA", "rS", "sh", "mbe", "XO5", "Rc") +MDS = Form("rA", "rS", "rB", "mbe", "XO7", "Rc") + +class BasicPPCAssembler(object): + + def disassemble(cls, inst, labels={}, pc=0): + cache = cls.__dict__.get('idesc cache') + if cache is None: + idescs = cls.get_idescs() + cache = {} + for n, i in idescs: + cache.setdefault(i.specializations[ppc_fields['opcode']], + []).append((n,i)) + setattr(cls, 'idesc cache', cache) + matches = [] + idescs = cache[ppc_fields['opcode'].decode(inst)] + for name, idesc in idescs: + m = idesc.match(inst) + if m > 0: + matches.append((m, idesc, name)) + if matches: + score, idesc, name = max(matches) + return idesc.disassemble(name, inst, labels, pc) + disassemble = classmethod(disassemble) + + # "basic" means no simplified mnemonics + + # I form + b = I(18, AA=0, LK=0) + ba = I(18, AA=1, LK=0) + bl = I(18, AA=0, LK=1) + bla = I(18, AA=1, LK=1) + + # B form + bc = B(16, AA=0, LK=0) + bcl = B(16, AA=0, LK=1) + bca = B(16, AA=1, LK=0) + bcla = B(16, AA=1, LK=1) + + # SC form + sc = SC(17, AA=1) # it's not really the aa field... + + # D form + addi = DD(14) + addic = DD(12) + addicx = DD(13) + addis = DD(15) + + andix = DS(28) + andisx = DS(29) + + cmpi = Form("crfD", "L", "rA", "SIMM")(11) + cmpi.default(L=0).default(crfD=0) + cmpli = Form("crfD", "L", "rA", "UIMM")(10) + cmpli.default(L=0).default(crfD=0) + + lbz = DD(34) + lbzu = DD(35) + ld = DDO(58, XO4=0) + ldu = DDO(58, XO4=1) + lfd = DD(50) + lfdu = DD(51) + lfs = DD(48) + lfsu = DD(49) + lha = DD(42) + lhau = DD(43) + lhz = DD(40) + lhzu = DD(41) + lmw = DD(46) + lwa = DDO(58, XO4=2) + lwz = DD(32) + lwzu = DD(33) + + mulli = DD(7) + ori = DS(24) + oris = DS(25) + + stb = DD(38) + stbu = DD(39) + std = DDO(62, XO4=0) + stdu = DDO(62, XO4=1) + stfd = DD(54) + stfdu = DD(55) + stfs = DD(52) + stfsu = DD(53) + sth = DD(44) + sthu = DD(45) + stmw = DD(47) + stw = DD(36) + stwu = DD(37) + + subfic = DD(8) + tdi = Form("TO", "rA", "SIMM")(2) + twi = Form("TO", "rA", "SIMM")(3) + xori = DS(26) + xoris = DS(27) + + # X form + + and_ = XS(31, XO1=28, Rc=0) + and_x = XS(31, XO1=28, Rc=1) + + andc_ = XS(31, XO1=60, Rc=0) + andc_x = XS(31, XO1=60, Rc=1) + + # is the L bit for 64 bit compares? hmm + cmp = Form("crfD", "L", "rA", "rB", "XO1")(31, XO1=0) + cmp.default(L=0).default(crfD=0) + cmpl = Form("crfD", "L", "rA", "rB", "XO1")(31, XO1=32) + cmpl.default(L=0).default(crfD=0) + + cntlzd = XS0(31, XO1=58, Rc=0) + cntlzdx = XS0(31, XO1=58, Rc=1) + cntlzw = XS0(31, XO1=26, Rc=0) + cntlzwx = XS0(31, XO1=26, Rc=1) + + dcba = X0(31, XO1=758) + dcbf = X0(31, XO1=86) + dcbi = X0(31, XO1=470) + dcbst = X0(31, XO1=54) + dcbt = X0(31, XO1=278) + dcbtst = X0(31, XO1=246) + dcbz = X0(31, XO1=1014) + + eciwx = XD(31, XO1=310) + ecowx = XS(31, XO1=438, Rc=0) + + eieio = X(31, XO1=854) + + eqv = XS(31, XO1=284, Rc=0) + eqvx = XS(31, XO1=284, Rc=1) + + extsb = XS0(31, XO1=954, Rc=0) + extsbx = XS0(31, XO1=954, Rc=1) + + extsh = XS0(31, XO1=922, Rc=0) + extshx = XS0(31, XO1=922, Rc=1) + + extsw = XS0(31, XO1=986, Rc=0) + extswx = XS0(31, XO1=986, Rc=1) + + fabs = XDB(63, XO1=264, Rc=0) + fabsx = XDB(63, XO1=264, Rc=1) + + fcmpo = XcAB(63, XO1=32) + fcmpu = XcAB(63, XO1=0) + + fcfid = XDB(63, XO1=846, Rc=0) + fcfidx = XDB(63, XO1=846, Rc=1) + + fctid = XDB(63, XO1=814, Rc=0) + fctidx = XDB(63, XO1=814, Rc=1) + + fctidz = XDB(63, XO1=815, Rc=0) + fctidzx = XDB(63, XO1=815, Rc=1) + + fctiw = XDB(63, XO1=14, Rc=0) + fctiwx = XDB(63, XO1=14, Rc=1) + + fctiwz = XDB(63, XO1=15, Rc=0) + fctiwzx = XDB(63, XO1=15, Rc=1) + + fmr = XDB(63, XO1=72, Rc=0) + fmrx = XDB(63, XO1=72, Rc=1) + + fnabs = XDB(63, XO1=136, Rc=0) + fnabsx = XDB(63, XO1=136, Rc=1) + + fneg = XDB(63, XO1=40, Rc=0) + fnegx = XDB(63, XO1=40, Rc=1) + + frsp = XDB(63, XO1=12, Rc=0) + frspx = XDB(63, XO1=12, Rc=1) + + fsqrt = XDB(63, XO1=22, Rc=0) + + mffgpr = XS(31, XO1=607, Rc=0) + mftgpr = XS(31, XO1=735, Rc=0) + + icbi = X0(31, XO1=982) + + lbzux = XD(31, XO1=119) + lbzx = XD(31, XO1=87) + ldarx = XD(31, XO1=84) + ldux = XD(31, XO1=53) + ldx = XD(31, XO1=21) + lfdux = XD(31, XO1=631) + lfdx = XD(31, XO1=599) + lfsux = XD(31, XO1=567) + lfsx = XD(31, XO1=535) + lhaux = XD(31, XO1=375) + lhax = XD(31, XO1=343) + lhbrx = XD(31, XO1=790) + lhzux = XD(31, XO1=311) + lhzx = XD(31, XO1=279) + lswi = XD(31, XO1=597) + lswx = XD(31, XO1=533) + lwarx = XD(31, XO1=20) + lwaux = XD(31, XO1=373) + lwax = XD(31, XO1=341) + lwbrx = XD(31, XO1=534) + lwzux = XD(31, XO1=55) + lwzx = XD(31, XO1=23) + + mcrfs = Form("crfD", "crfS", "XO1")(63, XO1=64) + mcrxr = Form("crfD", "XO1")(31, XO1=512) + mfcr = Form("rD", "XO1")(31, XO1=19) + mffs = Form("frD", "XO1", "Rc")(63, XO1=583, Rc=0) + mffsx = Form("frD", "XO1", "Rc")(63, XO1=583, Rc=1) + mfmsr = Form("rD", "XO1")(31, XO1=83) + mfsr = Form("rD", "SR", "XO1")(31, XO1=595) + mfsrin = XDB(31, XO1=659, Rc=0) + + add = XO(31, XO2=266, OE=0, Rc=0) + addx = XO(31, XO2=266, OE=0, Rc=1) + addo = XO(31, XO2=266, OE=1, Rc=0) + addox = XO(31, XO2=266, OE=1, Rc=1) + + addc = XO(31, XO2=10, OE=0, Rc=0) + addcx = XO(31, XO2=10, OE=0, Rc=1) + addco = XO(31, XO2=10, OE=1, Rc=0) + addcox = XO(31, XO2=10, OE=1, Rc=1) + + adde = XO(31, XO2=138, OE=0, Rc=0) + addex = XO(31, XO2=138, OE=0, Rc=1) + addeo = XO(31, XO2=138, OE=1, Rc=0) + addeox = XO(31, XO2=138, OE=1, Rc=1) + + addme = XO(31, rB=0, XO2=234, OE=0, Rc=0) + addmex = XO(31, rB=0, XO2=234, OE=0, Rc=1) + addmeo = XO(31, rB=0, XO2=234, OE=1, Rc=0) + addmeox = XO(31, rB=0, XO2=234, OE=1, Rc=1) + + addze = XO(31, rB=0, XO2=202, OE=0, Rc=0) + addzex = XO(31, rB=0, XO2=202, OE=0, Rc=1) + addzeo = XO(31, rB=0, XO2=202, OE=1, Rc=0) + addzeox = XO(31, rB=0, XO2=202, OE=1, Rc=1) + + bcctr = Form("BO", "BI", "XO1", "LK")(19, XO1=528, LK=0) + bcctrl = Form("BO", "BI", "XO1", "LK")(19, XO1=528, LK=1) + + bclr = Form("BO", "BI", "XO1", "LK")(19, XO1=16, LK=0) + bclrl = Form("BO", "BI", "XO1", "LK")(19, XO1=16, LK=1) + + crand = XL(19, XO1=257) + crandc = XL(19, XO1=129) + creqv = XL(19, XO1=289) + crnand = XL(19, XO1=225) + crnor = XL(19, XO1=33) + cror = XL(19, XO1=449) + crorc = XL(19, XO1=417) + crxor = XL(19, XO1=193) + + divd = XO(31, XO2=489, OE=0, Rc=0) + divdx = XO(31, XO2=489, OE=0, Rc=1) + divdo = XO(31, XO2=489, OE=1, Rc=0) + divdox = XO(31, XO2=489, OE=1, Rc=1) + + divdu = XO(31, XO2=457, OE=0, Rc=0) + divdux = XO(31, XO2=457, OE=0, Rc=1) + divduo = XO(31, XO2=457, OE=1, Rc=0) + divduox = XO(31, XO2=457, OE=1, Rc=1) + + divw = XO(31, XO2=491, OE=0, Rc=0) + divwx = XO(31, XO2=491, OE=0, Rc=1) + divwo = XO(31, XO2=491, OE=1, Rc=0) + divwox = XO(31, XO2=491, OE=1, Rc=1) + + divwu = XO(31, XO2=459, OE=0, Rc=0) + divwux = XO(31, XO2=459, OE=0, Rc=1) + divwuo = XO(31, XO2=459, OE=1, Rc=0) + divwuox = XO(31, XO2=459, OE=1, Rc=1) + + fadd = A(63, XO3=21, Rc=0) + faddx = A(63, XO3=21, Rc=1) + fadds = A(59, XO3=21, Rc=0) + faddsx = A(59, XO3=21, Rc=1) + + fdiv = A(63, XO3=18, Rc=0) + fdivx = A(63, XO3=18, Rc=1) + fdivs = A(59, XO3=18, Rc=0) + fdivsx = A(59, XO3=18, Rc=1) + + fmadd = A3(63, XO3=19, Rc=0) + fmaddx = A3(63, XO3=19, Rc=1) + fmadds = A3(59, XO3=19, Rc=0) + fmaddsx = A3(59, XO3=19, Rc=1) + + fmsub = A3(63, XO3=28, Rc=0) + fmsubx = A3(63, XO3=28, Rc=1) + fmsubs = A3(59, XO3=28, Rc=0) + fmsubsx = A3(59, XO3=28, Rc=1) + + fmul = A2(63, XO3=25, Rc=0) + fmulx = A2(63, XO3=25, Rc=1) + fmuls = A2(59, XO3=25, Rc=0) + fmulsx = A2(59, XO3=25, Rc=1) + + fnmadd = A3(63, XO3=31, Rc=0) + fnmaddx = A3(63, XO3=31, Rc=1) + fnmadds = A3(59, XO3=31, Rc=0) + fnmaddsx = A3(59, XO3=31, Rc=1) + + fnmsub = A3(63, XO3=30, Rc=0) + fnmsubx = A3(63, XO3=30, Rc=1) + fnmsubs = A3(59, XO3=30, Rc=0) + fnmsubsx = A3(59, XO3=30, Rc=1) + + fres = A1(59, XO3=24, Rc=0) + fresx = A1(59, XO3=24, Rc=1) + + frsp = A1(63, XO3=12, Rc=0) + frspx = A1(63, XO3=12, Rc=1) + + frsqrte = A1(63, XO3=26, Rc=0) + frsqrtex = A1(63, XO3=26, Rc=1) + + fsel = A3(63, XO3=23, Rc=0) + fselx = A3(63, XO3=23, Rc=1) + + frsqrt = A1(63, XO3=22, Rc=0) + frsqrtx = A1(63, XO3=22, Rc=1) + frsqrts = A1(59, XO3=22, Rc=0) + frsqrtsx = A1(59, XO3=22, Rc=1) + + fsub = A(63, XO3=20, Rc=0) + fsubx = A(63, XO3=20, Rc=1) + fsubs = A(59, XO3=20, Rc=0) + fsubsx = A(59, XO3=20, Rc=1) + + isync = X(19, XO1=150) + + mcrf = XL1(19) + + mfspr = Form("rD", "spr", "XO1")(31, XO1=339) + mftb = Form("rD", "spr", "XO1")(31, XO1=371) + + mtcrf = XFX(31, XO1=144) + + mtfsb0 = XL2(63, XO1=70, Rc=0) + mtfsb0x = XL2(63, XO1=70, Rc=1) + mtfsb1 = XL2(63, XO1=38, Rc=0) + mtfsb1x = XL2(63, XO1=38, Rc=1) + + mtfsf = XFL(63, XO1=711, Rc=0) + mtfsfx = XFL(63, XO1=711, Rc=1) + + mtfsfi = Form("crfD", "IMM", "XO1", "Rc")(63, XO1=134, Rc=0) + mtfsfix = Form("crfD", "IMM", "XO1", "Rc")(63, XO1=134, Rc=1) + + mtmsr = Form("rS", "XO1")(31, XO1=146) + + mtspr = Form("rS", "spr", "XO1")(31, XO1=467) + + mtsr = Form("rS", "SR", "XO1")(31, XO1=210) + mtsrin = Form("rS", "rB", "XO1")(31, XO1=242) + + mulhd = XO(31, OE=0, XO2=73, Rc=0) + mulhdx = XO(31, OE=0, XO2=73, Rc=1) + + mulhdu = XO(31, OE=0, XO2=9, Rc=0) + mulhdux = XO(31, OE=0, XO2=9, Rc=1) + + mulld = XO(31, OE=0, XO2=233, Rc=0) + mulldx = XO(31, OE=0, XO2=233, Rc=1) + mulldo = XO(31, OE=1, XO2=233, Rc=0) + mulldox = XO(31, OE=1, XO2=233, Rc=1) + + mulhw = XO(31, OE=0, XO2=75, Rc=0) + mulhwx = XO(31, OE=0, XO2=75, Rc=1) + + mulhwu = XO(31, OE=0, XO2=11, Rc=0) + mulhwux = XO(31, OE=0, XO2=11, Rc=1) + + mullw = XO(31, OE=0, XO2=235, Rc=0) + mullwx = XO(31, OE=0, XO2=235, Rc=1) + mullwo = XO(31, OE=1, XO2=235, Rc=0) + mullwox = XO(31, OE=1, XO2=235, Rc=1) + + nand = XS(31, XO1=476, Rc=0) + nandx = XS(31, XO1=476, Rc=1) + + neg = XO0(31, OE=0, XO2=104, Rc=0) + negx = XO0(31, OE=0, XO2=104, Rc=1) + nego = XO0(31, OE=1, XO2=104, Rc=0) + negox = XO0(31, OE=1, XO2=104, Rc=1) + + nor = XS(31, XO1=124, Rc=0) + norx = XS(31, XO1=124, Rc=1) + + or_ = XS(31, XO1=444, Rc=0) + or_x = XS(31, XO1=444, Rc=1) + + orc = XS(31, XO1=412, Rc=0) + orcx = XS(31, XO1=412, Rc=1) + + rfi = X(19, XO1=50) + + rfid = X(19, XO1=18) + + rldcl = MDS(30, XO7=8, Rc=0) + rldclx = MDS(30, XO7=8, Rc=1) + rldcr = MDS(30, XO7=9, Rc=0) + rldcrx = MDS(30, XO7=9, Rc=1) + + rldic = MDI(30, XO5=2, Rc=0) + rldicx = MDI(30, XO5=2, Rc=1) + rldicl = MDI(30, XO5=0, Rc=0) + rldiclx = MDI(30, XO5=0, Rc=1) + rldicr = MDI(30, XO5=1, Rc=0) + rldicrx = MDI(30, XO5=1, Rc=1) + rldimi = MDI(30, XO5=3, Rc=0) + rldimix = MDI(30, XO5=3, Rc=1) + + rlwimi = MI(20, Rc=0) + rlwimix = MI(20, Rc=1) + + rlwinm = MI(21, Rc=0) + rlwinmx = MI(21, Rc=1) + + rlwnm = MB(23, Rc=0) + rlwnmx = MB(23, Rc=1) + + sld = XS(31, XO1=27, Rc=0) + sldx = XS(31, XO1=27, Rc=1) + + slw = XS(31, XO1=24, Rc=0) + slwx = XS(31, XO1=24, Rc=1) + + srad = XS(31, XO1=794, Rc=0) + sradx = XS(31, XO1=794, Rc=1) + + sradi = Form("rA", "rS", "SH", "XO6", "sh", "Rc")(31, XO6=413, Rc=0) + sradix = Form("rA", "rS", "SH", "XO6", "sh", "Rc")(31, XO6=413, Rc=1) + + sraw = XS(31, XO1=792, Rc=0) + srawx = XS(31, XO1=792, Rc=1) + + srawi = Form("rA", "rS", "SH", "XO1", "Rc")(31, XO1=824, Rc=0) + srawix = Form("rA", "rS", "SH", "XO1", "Rc")(31, XO1=824, Rc=1) + + srd = XS(31, XO1=539, Rc=0) + srdx = XS(31, XO1=539, Rc=1) + + srw = XS(31, XO1=536, Rc=0) + srwx = XS(31, XO1=536, Rc=1) + + stbux = XSO(31, XO1=247) + stbx = XSO(31, XO1=215) + stdcxx = Form("rS", "rA", "rB", "XO1", "Rc")(31, XO1=214, Rc=1) + stdux = XSO(31, XO1=181) + stdx = XSO(31, XO1=149) + stfdux = XSO(31, XO1=759) + stfdx = XSO(31, XO1=727) + stfiwx = XSO(31, XO1=983) + stfsux = XSO(31, XO1=695) + stfsx = XSO(31, XO1=663) + sthbrx = XSO(31, XO1=918) + sthux = XSO(31, XO1=439) + sthx = XSO(31, XO1=407) + stswi = Form("rS", "rA", "NB", "XO1")(31, XO1=725) + stswx = XSO(31, XO1=661) + stwbrx = XSO(31, XO1=662) + stwcxx = Form("rS", "rA", "rB", "XO1", "Rc")(31, XO1=150, Rc=1) + stwux = XSO(31, XO1=183) + stwx = XSO(31, XO1=151) + + subf = XO(31, XO2=40, OE=0, Rc=0) + subfx = XO(31, XO2=40, OE=0, Rc=1) + subfo = XO(31, XO2=40, OE=1, Rc=0) + subfox = XO(31, XO2=40, OE=1, Rc=1) + + subfc = XO(31, XO2=8, OE=0, Rc=0) + subfcx = XO(31, XO2=8, OE=0, Rc=1) + subfco = XO(31, XO2=8, OE=1, Rc=0) + subfcox = XO(31, XO2=8, OE=1, Rc=1) + + subfe = XO(31, XO2=136, OE=0, Rc=0) + subfex = XO(31, XO2=136, OE=0, Rc=1) + subfeo = XO(31, XO2=136, OE=1, Rc=0) + subfeox = XO(31, XO2=136, OE=1, Rc=1) + + subfme = XO0(31, OE=0, XO2=232, Rc=0) + subfmex = XO0(31, OE=0, XO2=232, Rc=1) + subfmeo = XO0(31, OE=1, XO2=232, Rc=0) + subfmeox= XO0(31, OE=1, XO2=232, Rc=1) + + subfze = XO0(31, OE=0, XO2=200, Rc=0) + subfzex = XO0(31, OE=0, XO2=200, Rc=1) + subfzeo = XO0(31, OE=1, XO2=200, Rc=0) + subfzeox= XO0(31, OE=1, XO2=200, Rc=1) + + sync = XLL(31, LL=0, XO1=598) + lwsync = XLL(31, LL=1, XO1=598) + + tlbia = X(31, XO1=370) + tlbie = Form("rB", "XO1")(31, XO1=306) + tlbsync = X(31, XO1=566) + + td = Form("TO", "rA", "rB", "XO1")(31, XO1=68) + tw = Form("TO", "rA", "rB", "XO1")(31, XO1=4) + + xor = XS(31, XO1=316, Rc=0) + xorx = XS(31, XO1=316, Rc=1) + +class PPCAssembler(BasicPPCAssembler): + BA = BasicPPCAssembler + + # awkward mnemonics: + # mftb + # most of the branch mnemonics... + + # F.2 Simplified Mnemonics for Subtract Instructions + + def subi(self, rD, rA, value): + self.addi(rD, rA, -value) + def subis(self, rD, rA, value): + self.addis(rD, rA, -value) + def subic(self, rD, rA, value): + self.addic(rD, rA, -value) + def subicx(self, rD, rA, value): + self.addicx(rD, rA, -value) + + def sub(self, rD, rA, rB): + self.subf(rD, rB, rA) + def subc(self, rD, rA, rB): + self.subfc(rD, rB, rA) + def subx(self, rD, rA, rB): + self.subfx(rD, rB, rA) + def subcx(self, rD, rA, rB): + self.subfcx(rD, rB, rA) + def subo(self, rD, rA, rB): + self.subfo(rD, rB, rA) + def subco(self, rD, rA, rB): + self.subfco(rD, rB, rA) + def subox(self, rD, rA, rB): + self.subfox(rD, rB, rA) + def subcox(self, rD, rA, rB): + self.subfcox(rD, rB, rA) + + # F.3 Simplified Mnemonics for Compare Instructions + + cmpdi = BA.cmpi(L=1) + cmpwi = BA.cmpi(L=0) + cmpldi = BA.cmpli(L=1) + cmplwi = BA.cmpli(L=0) + cmpd = BA.cmp(L=1) + cmpw = BA.cmp(L=0) + cmpld = BA.cmpl(L=1) + cmplw = BA.cmpl(L=0) + + # F.4 Simplified Mnemonics for Rotate and Shift Instructions + + def extlwi(self, rA, rS, n, b): + self.rlwinm(rA, rS, b, 0, n-1) + + def extrwi(self, rA, rS, n, b): + self.rlwinm(rA, rS, b+n, 32-n, 31) + + def inslwi(self, rA, rS, n, b): + self.rwlimi(rA, rS, 32-b, b, b + n -1) + + def insrwi(self, rA, rS, n, b): + self.rwlimi(rA, rS, 32-(b+n), b, b + n -1) + + def rotlwi(self, rA, rS, n): + self.rlwinm(rA, rS, n, 0, 31) + + def rotrwi(self, rA, rS, n): + self.rlwinm(rA, rS, 32-n, 0, 31) + + def rotlw(self, rA, rS, rB): + self.rlwnm(rA, rS, rB, 0, 31) + + def slwi(self, rA, rS, n): + self.rlwinm(rA, rS, n, 0, 31-n) + + def srwi(self, rA, rS, n): + self.rlwinm(rA, rS, 32-n, n, 31) + + def sldi(self, rA, rS, n): + self.rldicr(rA, rS, n, 63-n) + + def srdi(self, rA, rS, n): + self.rldicl(rA, rS, 64-n, n) + + # F.5 Simplified Mnemonics for Branch Instructions + + # there's a lot of these! + bt = BA.bc(BO=12) + bf = BA.bc(BO=4) + bdnz = BA.bc(BO=16, BI=0) + bdnzt = BA.bc(BO=8) + bdnzf = BA.bc(BO=0) + bdz = BA.bc(BO=18, BI=0) + bdzt = BA.bc(BO=10) + bdzf = BA.bc(BO=2) + + bta = BA.bca(BO=12) + bfa = BA.bca(BO=4) + bdnza = BA.bca(BO=16, BI=0) + bdnzta = BA.bca(BO=8) + bdnzfa = BA.bca(BO=0) + bdza = BA.bca(BO=18, BI=0) + bdzta = BA.bca(BO=10) + bdzfa = BA.bca(BO=2) + + btl = BA.bcl(BO=12) + bfl = BA.bcl(BO=4) + bdnzl = BA.bcl(BO=16, BI=0) + bdnztl = BA.bcl(BO=8) + bdnzfl = BA.bcl(BO=0) + bdzl = BA.bcl(BO=18, BI=0) + bdztl = BA.bcl(BO=10) + bdzfl = BA.bcl(BO=2) + + btla = BA.bcla(BO=12) + bfla = BA.bcla(BO=4) + bdnzla = BA.bcla(BO=16, BI=0) + bdnztla = BA.bcla(BO=8) + bdnzfla = BA.bcla(BO=0) + bdzla = BA.bcla(BO=18, BI=0) + bdztla = BA.bcla(BO=10) + bdzfla = BA.bcla(BO=2) + + blr = BA.bclr(BO=20, BI=0) + btlr = BA.bclr(BO=12) + bflr = BA.bclr(BO=4) + bdnzlr = BA.bclr(BO=16, BI=0) + bdnztlr = BA.bclr(BO=8) + bdnzflr = BA.bclr(BO=0) + bdzlr = BA.bclr(BO=18, BI=0) + bdztlr = BA.bclr(BO=10) + bdzflr = BA.bclr(BO=2) + + bctr = BA.bcctr(BO=20, BI=0) + btctr = BA.bcctr(BO=12) + bfctr = BA.bcctr(BO=4) + + blrl = BA.bclrl(BO=20, BI=0) + btlrl = BA.bclrl(BO=12) + bflrl = BA.bclrl(BO=4) + bdnzlrl = BA.bclrl(BO=16, BI=0) + bdnztlrl = BA.bclrl(BO=8) + bdnzflrl = BA.bclrl(BO=0) + bdzlrl = BA.bclrl(BO=18, BI=0) + bdztlrl = BA.bclrl(BO=10) + bdzflrl = BA.bclrl(BO=2) + + bctrl = BA.bcctrl(BO=20, BI=0) + btctrl = BA.bcctrl(BO=12) + bfctrl = BA.bcctrl(BO=4) + + # these should/could take a[n optional] crf argument, but it's a + # bit hard to see how to arrange that. + + blt = BA.bc(BO=12, BI=0) + ble = BA.bc(BO=4, BI=1) + beq = BA.bc(BO=12, BI=2) + bge = BA.bc(BO=4, BI=0) + bgt = BA.bc(BO=12, BI=1) + bnl = BA.bc(BO=4, BI=0) + bne = BA.bc(BO=4, BI=2) + bng = BA.bc(BO=4, BI=1) + bso = BA.bc(BO=12, BI=3) + bns = BA.bc(BO=4, BI=3) + bun = BA.bc(BO=12, BI=3) + bnu = BA.bc(BO=4, BI=3) + + blta = BA.bca(BO=12, BI=0) + blea = BA.bca(BO=4, BI=1) + beqa = BA.bca(BO=12, BI=2) + bgea = BA.bca(BO=4, BI=0) + bgta = BA.bca(BO=12, BI=1) + bnla = BA.bca(BO=4, BI=0) + bnea = BA.bca(BO=4, BI=2) + bnga = BA.bca(BO=4, BI=1) + bsoa = BA.bca(BO=12, BI=3) + bnsa = BA.bca(BO=4, BI=3) + buna = BA.bca(BO=12, BI=3) + bnua = BA.bca(BO=4, BI=3) + + bltl = BA.bcl(BO=12, BI=0) + blel = BA.bcl(BO=4, BI=1) + beql = BA.bcl(BO=12, BI=2) + bgel = BA.bcl(BO=4, BI=0) + bgtl = BA.bcl(BO=12, BI=1) + bnll = BA.bcl(BO=4, BI=0) + bnel = BA.bcl(BO=4, BI=2) + bngl = BA.bcl(BO=4, BI=1) + bsol = BA.bcl(BO=12, BI=3) + bnsl = BA.bcl(BO=4, BI=3) + bunl = BA.bcl(BO=12, BI=3) + bnul = BA.bcl(BO=4, BI=3) + + bltla = BA.bcla(BO=12, BI=0) + blela = BA.bcla(BO=4, BI=1) + beqla = BA.bcla(BO=12, BI=2) + bgela = BA.bcla(BO=4, BI=0) + bgtla = BA.bcla(BO=12, BI=1) + bnlla = BA.bcla(BO=4, BI=0) + bnela = BA.bcla(BO=4, BI=2) + bngla = BA.bcla(BO=4, BI=1) + bsola = BA.bcla(BO=12, BI=3) + bnsla = BA.bcla(BO=4, BI=3) + bunla = BA.bcla(BO=12, BI=3) + bnula = BA.bcla(BO=4, BI=3) + + bltlr = BA.bclr(BO=12, BI=0) + blelr = BA.bclr(BO=4, BI=1) + beqlr = BA.bclr(BO=12, BI=2) + bgelr = BA.bclr(BO=4, BI=0) + bgtlr = BA.bclr(BO=12, BI=1) + bnllr = BA.bclr(BO=4, BI=0) + bnelr = BA.bclr(BO=4, BI=2) + bnglr = BA.bclr(BO=4, BI=1) + bsolr = BA.bclr(BO=12, BI=3) + bnslr = BA.bclr(BO=4, BI=3) + bunlr = BA.bclr(BO=12, BI=3) + bnulr = BA.bclr(BO=4, BI=3) + + bltctr = BA.bcctr(BO=12, BI=0) + blectr = BA.bcctr(BO=4, BI=1) + beqctr = BA.bcctr(BO=12, BI=2) + bgectr = BA.bcctr(BO=4, BI=0) + bgtctr = BA.bcctr(BO=12, BI=1) + bnlctr = BA.bcctr(BO=4, BI=0) + bnectr = BA.bcctr(BO=4, BI=2) + bngctr = BA.bcctr(BO=4, BI=1) + bsoctr = BA.bcctr(BO=12, BI=3) + bnsctr = BA.bcctr(BO=4, BI=3) + bunctr = BA.bcctr(BO=12, BI=3) + bnuctr = BA.bcctr(BO=4, BI=3) + + bltlrl = BA.bclrl(BO=12, BI=0) + blelrl = BA.bclrl(BO=4, BI=1) + beqlrl = BA.bclrl(BO=12, BI=2) + bgelrl = BA.bclrl(BO=4, BI=0) + bgtlrl = BA.bclrl(BO=12, BI=1) + bnllrl = BA.bclrl(BO=4, BI=0) + bnelrl = BA.bclrl(BO=4, BI=2) + bnglrl = BA.bclrl(BO=4, BI=1) + bsolrl = BA.bclrl(BO=12, BI=3) + bnslrl = BA.bclrl(BO=4, BI=3) + bunlrl = BA.bclrl(BO=12, BI=3) + bnulrl = BA.bclrl(BO=4, BI=3) + + bltctrl = BA.bcctrl(BO=12, BI=0) + blectrl = BA.bcctrl(BO=4, BI=1) + beqctrl = BA.bcctrl(BO=12, BI=2) + bgectrl = BA.bcctrl(BO=4, BI=0) + bgtctrl = BA.bcctrl(BO=12, BI=1) + bnlctrl = BA.bcctrl(BO=4, BI=0) + bnectrl = BA.bcctrl(BO=4, BI=2) + bngctrl = BA.bcctrl(BO=4, BI=1) + bsoctrl = BA.bcctrl(BO=12, BI=3) + bnsctrl = BA.bcctrl(BO=4, BI=3) + bunctrl = BA.bcctrl(BO=12, BI=3) + bnuctrl = BA.bcctrl(BO=4, BI=3) + + # whew! and we haven't even begun the predicted versions... + + # F.6 Simplified Mnemonics for Condition Register + # Logical Instructions + + crset = BA.creqv(crbA="crbD", crbB="crbD") + crclr = BA.crxor(crbA="crbD", crbB="crbD") + crmove = BA.cror(crbA="crbB") + crnot = BA.crnor(crbA="crbB") + + # F.7 Simplified Mnemonics for Trap Instructions + + trap = BA.tw(TO=31, rA=0, rB=0) + twlt = BA.tw(TO=16) + twle = BA.tw(TO=20) + tweq = BA.tw(TO=4) + twge = BA.tw(TO=12) + twgt = BA.tw(TO=8) + twnl = BA.tw(TO=12) + twng = BA.tw(TO=24) + twllt = BA.tw(TO=2) + twlle = BA.tw(TO=6) + twlge = BA.tw(TO=5) + twlgt = BA.tw(TO=1) + twlnl = BA.tw(TO=5) + twlng = BA.tw(TO=6) + + twlti = BA.twi(TO=16) + twlei = BA.twi(TO=20) + tweqi = BA.twi(TO=4) + twgei = BA.twi(TO=12) + twgti = BA.twi(TO=8) + twnli = BA.twi(TO=12) + twnei = BA.twi(TO=24) + twngi = BA.twi(TO=20) + twllti = BA.twi(TO=2) + twllei = BA.twi(TO=6) + twlgei = BA.twi(TO=5) + twlgti = BA.twi(TO=1) + twlnli = BA.twi(TO=5) + twlngi = BA.twi(TO=6) + + # F.8 Simplified Mnemonics for Special-Purpose + # Registers + + mfctr = BA.mfspr(spr=9) + mflr = BA.mfspr(spr=8) + mftbl = BA.mftb(spr=268) + mftbu = BA.mftb(spr=269) + mfxer = BA.mfspr(spr=1) + + mtctr = BA.mtspr(spr=9) + mtlr = BA.mtspr(spr=8) + mtxer = BA.mtspr(spr=1) + + # F.9 Recommended Simplified Mnemonics + + nop = BA.ori(rS=0, rA=0, UIMM=0) + + li = BA.addi(rA=0) + lis = BA.addis(rA=0) + + mr = BA.or_(rB="rS") + mrx = BA.or_x(rB="rS") + + not_ = BA.nor(rB="rS") + not_x = BA.norx(rB="rS") + + mtcr = BA.mtcrf(CRM=0xFF) + +PPCAssembler = make_rassembler(PPCAssembler) + +def hi(w): + return w >> 16 + +def ha(w): + if (w >> 15) & 1: + return (w >> 16) + 1 + else: + return w >> 16 + +def lo(w): + return w & 0x0000FFFF + +def la(w): + v = w & 0x0000FFFF + if v & 0x8000: + return -((v ^ 0xFFFF) + 1) # "sign extend" to 32 bits + return v + +def highest(w): + return w >> 48 + +def higher(w): + return (w >> 32) & 0x0000FFFF + +def high(w): + return (w >> 16) & 0x0000FFFF + +_eci = ExternalCompilationInfo(post_include_bits=[ + '#define rpython_flush_icache() asm("isync":::"memory")\n' + ]) +flush_icache = rffi.llexternal( + "rpython_flush_icache", + [], + lltype.Void, + compilation_info=_eci, + _nowrapper=True, + sandboxsafe=True) + + +class PPCGuardToken(GuardToken): + def __init__(self, cpu, gcmap, descr, failargs, faillocs, + guard_opnum, frame_depth, fcond=c.cond_none): + GuardToken.__init__(self, cpu, gcmap, descr, failargs, faillocs, + guard_opnum, frame_depth) + self.fcond = fcond + + +class OverwritingBuilder(PPCAssembler): + def __init__(self, mc, start, num_insts=0): + PPCAssembler.__init__(self) + self.mc = mc + self.index = start + + def currpos(self): + assert 0, "not implemented" + + def write32(self, word): + index = self.index + if IS_BIG_ENDIAN: + self.mc.overwrite(index, chr((word >> 24) & 0xff)) + self.mc.overwrite(index + 1, chr((word >> 16) & 0xff)) + self.mc.overwrite(index + 2, chr((word >> 8) & 0xff)) + self.mc.overwrite(index + 3, chr(word & 0xff)) + elif IS_LITTLE_ENDIAN: + self.mc.overwrite(index , chr(word & 0xff)) + self.mc.overwrite(index + 1, chr((word >> 8) & 0xff)) + self.mc.overwrite(index + 2, chr((word >> 16) & 0xff)) + self.mc.overwrite(index + 3, chr((word >> 24) & 0xff)) + self.index = index + 4 + + def overwrite(self): + pass + +class PPCBuilder(BlockBuilderMixin, PPCAssembler): + def __init__(self): + PPCAssembler.__init__(self) + self.init_block_builder() + self.ops_offset = {} + + def mark_op(self, op): + pos = self.get_relative_pos() + self.ops_offset[op] = pos + + def check(self, desc, v, *args): + desc.__get__(self)(*args) + ins = self.insts.pop() + expected = ins.assemble() + if expected < 0: + expected += 1<<32 + assert v == expected + + def load_imm(self, dest_reg, word): + rD = dest_reg.value + if word <= 32767 and word >= -32768: + self.li(rD, word) + elif IS_PPC_32 or (word <= 2147483647 and word >= -2147483648): + self.lis(rD, hi(word)) + if word & 0xFFFF != 0: + self.ori(rD, rD, lo(word)) + else: + self.load_imm(dest_reg, word>>32) + self.sldi(rD, rD, 32) + if word & 0xFFFF0000 != 0: + self.oris(rD, rD, high(word)) + if word & 0xFFFF != 0: + self.ori(rD, rD, lo(word)) + + def load_imm_plus(self, dest_reg, word): + """Like load_imm(), but with one instruction less, and + leaves the loaded value off by some signed 16-bit difference. + Returns that difference.""" + diff = rffi.cast(lltype.Signed, rffi.cast(rffi.SHORT, word)) + word -= diff + assert word & 0xFFFF == 0 + self.load_imm(dest_reg, word) + return diff + + def load_from_addr(self, rD, addr): + assert rD is not r.r0 + diff = self.load_imm_plus(rD, addr) + if IS_PPC_32: + self.lwz(rD.value, rD.value, diff) + else: + self.ld(rD.value, rD.value, diff) + + def b_offset(self, target): + curpos = self.currpos() + offset = target - curpos + assert offset < (1 << 24) + self.b(offset) + + def b_cond_offset(self, offset, condition): + assert condition != c.cond_none + BI, BO = c.encoding[condition] + + pos = self.currpos() + target_ofs = offset - pos + self.bc(BO, BI, target_ofs) + + def b_cond_abs(self, addr, condition): + assert condition != c.cond_none + BI, BO = c.encoding[condition] + + with scratch_reg(self): + self.load_imm(r.SCRATCH, addr) + self.mtctr(r.SCRATCH.value) + self.bcctr(BO, BI) + + def b_abs(self, address, trap=False): + with scratch_reg(self): + self.load_imm(r.SCRATCH, address) + self.mtctr(r.SCRATCH.value) + if trap: + self.trap() + self.bctr() + + def bl_abs(self, address): + with scratch_reg(self): + self.load_imm(r.SCRATCH, address) + self.mtctr(r.SCRATCH.value) + self.bctrl() + + if IS_BIG_ENDIAN: + RAW_CALL_REG = r.r2 + else: + RAW_CALL_REG = r.r12 + + def raw_call(self, call_reg=RAW_CALL_REG): + """Emit a call to the address stored in the register 'call_reg', + which must be either RAW_CALL_REG or r12. This is a regular C + function pointer, which means on big-endian that it is actually + the address of a three-words descriptor. + """ + if IS_BIG_ENDIAN: + # Load the function descriptor (currently in r2) from memory: + # [r2 + 0] -> ctr + # [r2 + 16] -> r11 + # [r2 + 8] -> r2 (= TOC) + assert self.RAW_CALL_REG is r.r2 + assert call_reg is r.r2 or call_reg is r.r12 + self.ld(r.SCRATCH.value, call_reg.value, 0) + self.ld(r.r11.value, call_reg.value, 16) + self.mtctr(r.SCRATCH.value) + self.ld(r.TOC.value, call_reg.value, 8) # must be last: TOC is r2 + elif IS_LITTLE_ENDIAN: + assert self.RAW_CALL_REG is r.r12 # 'r12' is fixed by this ABI + assert call_reg is r.r12 + self.mtctr(r.r12.value) + # Call the function + self.bctrl() + + + def load(self, target_reg, base_reg, offset): + if IS_PPC_32: + self.lwz(target_reg, base_reg, offset) + else: + self.ld(target_reg, base_reg, offset) + + def loadx(self, target_reg, base_reg, offset_reg): + if IS_PPC_32: + self.lwzx(target_reg, base_reg, offset_reg) + else: + self.ldx(target_reg, base_reg, offset_reg) + + def store(self, from_reg, base_reg, offset): + if IS_PPC_32: + self.stw(from_reg, base_reg, offset) + else: + self.std(from_reg, base_reg, offset) + + def storex(self, from_reg, base_reg, offset_reg): + if IS_PPC_32: + self.stwx(from_reg, base_reg, offset_reg) + else: + self.stdx(from_reg, base_reg, offset_reg) + + def store_update(self, target_reg, from_reg, offset): + if IS_PPC_32: + self.stwu(target_reg, from_reg, offset) + else: + self.stdu(target_reg, from_reg, offset) + + def srli_op(self, target_reg, from_reg, numbits): + if IS_PPC_32: + self.srwi(target_reg, from_reg, numbits) + else: + self.srdi(target_reg, from_reg, numbits) + + def sl_op(self, target_reg, from_reg, numbit_reg): + if IS_PPC_32: + self.slw(target_reg, from_reg, numbit_reg) + else: + self.sld(target_reg, from_reg, numbit_reg) + + def _dump_trace(self, addr, name, formatter=-1): + if not we_are_translated(): + if formatter != -1: + name = name % formatter + dir = udir.ensure('asm', dir=True) + f = dir.join(name).open('wb') + data = rffi.cast(rffi.CCHARP, addr) + for i in range(self.currpos()): + f.write(data[i]) + f.close() + + def write32(self, word): + if IS_BIG_ENDIAN: + self.writechar(chr((word >> 24) & 0xFF)) + self.writechar(chr((word >> 16) & 0xFF)) + self.writechar(chr((word >> 8) & 0xFF)) + self.writechar(chr(word & 0xFF)) + elif IS_LITTLE_ENDIAN: + self.writechar(chr(word & 0xFF)) + self.writechar(chr((word >> 8) & 0xFF)) + self.writechar(chr((word >> 16) & 0xFF)) + self.writechar(chr((word >> 24) & 0xFF)) + + def write64(self, word): + if IS_BIG_ENDIAN: + self.writechar(chr((word >> 56) & 0xFF)) + self.writechar(chr((word >> 48) & 0xFF)) + self.writechar(chr((word >> 40) & 0xFF)) + self.writechar(chr((word >> 32) & 0xFF)) + self.writechar(chr((word >> 24) & 0xFF)) + self.writechar(chr((word >> 16) & 0xFF)) + self.writechar(chr((word >> 8) & 0xFF)) + self.writechar(chr(word & 0xFF)) + elif IS_LITTLE_ENDIAN: + self.writechar(chr(word & 0xFF)) + self.writechar(chr((word >> 8) & 0xFF)) + self.writechar(chr((word >> 16) & 0xFF)) + self.writechar(chr((word >> 24) & 0xFF)) + self.writechar(chr((word >> 32) & 0xFF)) + self.writechar(chr((word >> 40) & 0xFF)) + self.writechar(chr((word >> 48) & 0xFF)) + self.writechar(chr((word >> 56) & 0xFF)) + + def currpos(self): + return self.get_relative_pos() + + def copy_to_raw_memory(self, addr): + self._copy_to_raw_memory(addr) + if we_are_translated(): + flush_icache() + self._dump(addr, "jit-backend-dump", 'ppc') + + def cmp_op(self, block, a, b, imm=False, signed=True, fp=False): + if fp == True: + self.fcmpu(block, a, b) + elif IS_PPC_32: + if signed: + if imm: + # 32 bit immediate signed + self.cmpwi(block, a, b) + else: + # 32 bit signed + self.cmpw(block, a, b) + else: + if imm: + # 32 bit immediate unsigned + self.cmplwi(block, a, b) + else: + # 32 bit unsigned + self.cmplw(block, a, b) + else: + if signed: + if imm: + # 64 bit immediate signed + self.cmpdi(block, a, b) + else: + # 64 bit signed + self.cmpd(block, a, b) + else: + if imm: + # 64 bit immediate unsigned + self.cmpldi(block, a, b) + else: + # 64 bit unsigned + self.cmpld(block, a, b) + + def alloc_scratch_reg(self): + pass + #assert not self.r0_in_use + #self.r0_in_use = True + + def free_scratch_reg(self): + pass + #assert self.r0_in_use + #self.r0_in_use = False + + def get_assembler_function(self): + "NOT_RPYTHON: tests only" + from rpython.jit.backend.llsupport.asmmemmgr import AsmMemoryManager + class FakeCPU: + HAS_CODEMAP = False + asmmemmgr = AsmMemoryManager() + addr = self.materialize(FakeCPU(), []) + if IS_BIG_ENDIAN: + mc = PPCBuilder() + mc.write64(addr) # the 3-words descriptor + mc.write64(0) + mc.write64(0) + addr = mc.materialize(FakeCPU(), []) + return rffi.cast(lltype.Ptr(lltype.FuncType([], lltype.Signed)), addr) + + +class scratch_reg(object): + def __init__(self, mc): + self.mc = mc + + def __enter__(self): + self.mc.alloc_scratch_reg() + + def __exit__(self, *args): + self.mc.free_scratch_reg() + +class BranchUpdater(PPCAssembler): + def __init__(self): + PPCAssembler.__init__(self) + self.init_block_builder() + + def write_to_mem(self, addr): + self.assemble() + self.copy_to_raw_memory(addr) + + def assemble(self, dump=os.environ.has_key('PYPY_DEBUG')): + insns = self.assemble0(dump) + for i in insns: + self.emit(i) + +def b(n): + r = [] + for i in range(32): + r.append(n&1) + n >>= 1 + r.reverse() + return ''.join(map(str, r)) + +def make_operations(): + def not_implemented(builder, trace_op, cpu, *rest_args): + import pdb; pdb.set_trace() + + oplist = [None] * (rop._LAST + 1) + for key, val in rop.__dict__.items(): + if key.startswith("_"): + continue + opname = key.lower() + methname = "emit_%s" % opname + if hasattr(PPCBuilder, methname): + oplist[val] = getattr(PPCBuilder, methname).im_func + else: + oplist[val] = not_implemented + return oplist + +PPCBuilder.operations = make_operations() diff --git a/rpython/jit/backend/ppc/condition.py b/rpython/jit/backend/ppc/condition.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/ppc/condition.py @@ -0,0 +1,32 @@ +EQ = 0 +NE = 1 +LE = 2 +GT = 3 +LT = 4 +GE = 5 +SO = 6 +NS = 7 +cond_none = -1 # invalid + +def negate(cond): + return cond ^ 1 + +assert negate(EQ) == NE From noreply at buildbot.pypy.org Fri Oct 16 13:40:19 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 13:40:19 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: tweaks Message-ID: <20151016114019.396B71C01DC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80271:5bfafb68e4e1 Date: 2015-10-16 13:40 +0200 http://bitbucket.org/pypy/pypy/changeset/5bfafb68e4e1/ Log: tweaks diff --git a/pypy/doc/discussion/rawrefcount.rst b/pypy/doc/discussion/rawrefcount.rst --- a/pypy/doc/discussion/rawrefcount.rst +++ b/pypy/doc/discussion/rawrefcount.rst @@ -39,9 +39,8 @@ rawrefcount.from_obj(p) - If there is a link from object 'p', and 'p' is not a - W_CPyExtPlaceHolderObject, returns the corresponding 'ob'. - Otherwise, returns NULL. + If there is a link from object 'p' made with create_link_pypy(), + returns the corresponding 'ob'. Otherwise, returns NULL. rawrefcount.to_obj(Class, ob) @@ -141,10 +140,13 @@ PyPy side. In this case, the PyTupleObject needs to hold real references to the PyObject items, and we use create_link_pypy()/ REFCNT_FROM_PYPY. In all cases, we have a C array of PyObjects -that we can return from PySequence_Fast_ITEMS. +that we can directly return from PySequence_Fast_ITEMS, PyTuple_ITEMS, +PyTuple_GetItem, and so on. For objects coming from PyPy, we can use a cpyext list strategy. The list turns into a PyListObject, as if it had been allocated from C in the first place. The special strategy can hold -(only) a direct reference to the PyListObject, and we can use -create_link_pyobj(). PySequence_Fast_ITEMS then works for lists too. +(only) a direct reference to the PyListObject, and we can use either +create_link_pyobj() or create_link_pypy() (to be decided). +PySequence_Fast_ITEMS then works for lists too, and PyList_GetItem +can return a borrowed reference, and so on. From noreply at buildbot.pypy.org Fri Oct 16 15:42:11 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 16 Oct 2015 15:42:11 +0200 (CEST) Subject: [pypy-commit] pypy default: vectorization is disabled by default, also remove slightly inaccurate connection between optresult-unroll and minor JIT slowdown Message-ID: <20151016134211.6E1AB1C1230@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80272:bd3de357fc95 Date: 2015-10-16 16:42 +0300 http://bitbucket.org/pypy/pypy/changeset/bd3de357fc95/ Log: vectorization is disabled by default, also remove slightly inaccurate connection between optresult-unroll and minor JIT slowdown diff --git a/pypy/doc/release-15.11.0.rst b/pypy/doc/release-15.11.0.rst --- a/pypy/doc/release-15.11.0.rst +++ b/pypy/doc/release-15.11.0.rst @@ -5,7 +5,8 @@ We're pleased and proud to unleash PyPy 15.11, a major update of the PyPy python2.7.10 compatible interpreter with a Just In Time compiler. We have improved `warmup time and memory overhead used for tracing`_, added -`vectorization`_ for numpy and general loops where possible on x86 hardware, +`vectorization`_ for numpy and general loops where possible on x86 hardware +(disabled by default), refactored rough edges in rpython, and increased functionality of numpy. You can download the PyPy 15.11 release here: @@ -35,22 +36,26 @@ Availability of SIMD hardware is detected at run time, without needing to precompile various code paths into the executable. +The first version of the vectorization has been merged in this release, since +it is so new it is off by default. To enable the vectorization in built-in JIT +drivers (like numpy ufuncs), add `--jit vec=1`, to enable all implemented +vectorization add `--jit vec_all=1` + Internal Refactoring and Warmup Time Improvement ================================================ Maciej Fijalkowski and Armin Rigo refactored internals of rpython that now allow PyPy to more efficiently use `guards`_ in jitted code. They also rewrote unrolling, -leading to a warmup time improvement of 20% or so at the cost of a minor -regression in jitted code speed. +leading to a warmup time improvement of 20% or so. Numpy ===== -Our implementation of numpy continues to improve. ndarray and the numeric dtypes +Our implementation of `numpy`_ continues to improve. ndarray and the numeric dtypes are very close to feature-complete; record, string and unicode dtypes are mostly supported. We have reimplemented numpy linalg, random and fft as cffi-1.0 modules that call out to the same underlying libraries that upstream numpy uses. -Please try it out, especially using the new vectorization (via --jit vec=1 on the +Please try it out, especially using the new vectorization (via `--jit vec=1` on the command line) and let us know what is missing for your code. CFFI @@ -64,12 +69,12 @@ .. _`warmup time and memory overhead used for tracing`: http://morepypy.blogspot.com/2015/10 .. _`vectorization`: http://pypyvecopt.blogspot.co.at/ .. _`guards`: http://rpython.readthedocs.org/en/latest/glossary.html - .. _`PyPy`: http://doc.pypy.org .. _`RPython`: https://rpython.readthedocs.org .. _`cffi`: https://cffi.readthedocs.org .. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly .. _`help`: http://doc.pypy.org/en/latest/project-ideas.html +.. _`numpy`: https://bitbucket.org/pypy/numpy What is PyPy? ============= From noreply at buildbot.pypy.org Fri Oct 16 16:12:56 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Fri, 16 Oct 2015 16:12:56 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: work in progress commit, added argument types to provide correct input for the test cases Message-ID: <20151016141256.6E4161C01DC@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80273:caf09586d9e8 Date: 2015-10-16 15:56 +0200 http://bitbucket.org/pypy/pypy/changeset/caf09586d9e8/ Log: work in progress commit, added argument types to provide correct input for the test cases diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -27,24 +27,31 @@ class Operand(object): pass -def arguments(args_str): - """ - Available names: - r - register - i4 - immediate 4 bits (signed) - u4 - immediate 4 bits (unsigend) - bd - base displacement - l4db - length base displacement (4 bit) - l8db - length base displacement (8 bit) - """ - def impl(func): - func._arguments_ = args_str.split(',') - return func - return impl +class builder(object): + """ NOT_RPYTHON """ + @staticmethod + def arguments(args_str): + """ NOT_RPYTHON """ + """ + Available names: + r - register + r/m - register or mask + iX - immediate X bits (signed) + uX - immediate X bits (unsigend) + bd - base displacement + ibd - index base displacement + l4bd - length base displacement (4 bit) + l8bd - length base displacement (8 bit) + """ + def impl(func): + func._arguments_ = args_str.split(',') + return func + return impl BIT_MASK_4 = 0xF BIT_MASK_12 = 0xFFF BIT_MASK_20 = 0xFFFFF +BIT_MASK_32 = 0xFFFFFFFF @always_inline def encode_base_displace(mc, base_displace): @@ -55,6 +62,7 @@ mc.writechar(chr(displace & 0xff)) def build_rr(mnemonic, (opcode,)): + @builder.arguments('r,r') def encode_rr(self, reg1, reg2): self.writechar(opcode) operands = ((reg1 & 0x0f) << 4) | (reg2 & 0xf) @@ -63,6 +71,7 @@ def build_rre(mnemonic, (opcode,)): opcode1,opcode2 = opcode + @builder.arguments('r,r') def encode_rr(self, reg1, reg2): self.writechar(opcode1) self.writechar(opcode2) @@ -72,6 +81,7 @@ return encode_rr def build_rx(mnemonic, (opcode,)): + @builder.arguments('r/m,ibd') def encode_rx(self, reg_or_mask, idxbasedisp): self.writechar(opcode) index = idxbasedisp.index @@ -85,6 +95,7 @@ return encode_rx def build_rxy(mnemonic, (opcode1,opcode2)): + @builder.arguments('r/m,ibdl') def encode_rxy(self, reg_or_mask, idxbasedisp): self.writechar(opcode1) index = idxbasedisp.index @@ -101,6 +112,7 @@ return encode_rxy def build_ri(mnemonic, (opcode,halfopcode)): + @builder.arguments('r/m,i16') def encode_ri(self, reg_or_mask, imm16): self.writechar(opcode) byte = (reg_or_mask & 0xf) << 4 | (ord(halfopcode) & 0xf) @@ -109,7 +121,18 @@ self.writechar(chr(imm16 & 0xff)) return encode_ri +def build_ril(mnemonic, (opcode,halfopcode)): + @builder.arguments('r/m,a32') + def encode_ri(self, reg_or_mask, imm32): + self.writechar(opcode) + byte = (reg_or_mask & 0xf) << 4 | (ord(halfopcode) & 0xf) + self.writechar(chr(byte)) + self.write_s32(imm32) + return encode_ri + + def build_si(mnemonic, (opcode,)): + @builder.arguments('bd,u8') def encode_si(self, base_displace, uimm8): self.writechar(opcode) self.writechar(chr(uimm8)) @@ -117,6 +140,7 @@ return encode_si def build_siy(mnemonic, (opcode1,opcode2)): + @builder.arguments('bd,u8') def encode_siy(self, base_displace, uimm8): self.writechar(opcode1) self.writechar(chr(uimm8)) @@ -127,6 +151,7 @@ return encode_siy def build_ssa(mnemonic, (opcode1,)): + @builder.arguments('l8bd,bd') def encode_ssa(self, len_base_disp, base_displace): self.writechar(opcode1) self.writechar(chr(len_base_disp.length & 0xff)) @@ -135,6 +160,7 @@ return encode_ssa def build_ssb(mnemonic, (opcode1,)): + @builder.arguments('l8bd,l8bd') def encode_ssb(self, len_base_disp1, len_base_disp2): self.writechar(opcode1) byte = (len_base_disp1.length & 0xf) << 4 | len_base_disp2.length & 0xf @@ -144,8 +170,8 @@ return encode_ssb def build_ssc(mnemonic, (opcode1,)): - @arguments('lbp,lbp,u4') - def encode_ssc(self, len_base_disp1, len_base_disp2, uimm4): + @builder.arguments('u4,l4bd,l4bd') + def encode_ssc(self, uimm4, len_base_disp1, len_base_disp2): self.writechar(opcode1) byte = (len_base_disp1.length & 0xf) << 4 | uimm4 & 0xf self.writechar(chr(byte)) @@ -154,7 +180,7 @@ return encode_ssc def build_ssd(mnemonic, (opcode,)): - @arguments('rbd,bd,r') + @builder.arguments('ibd,bd,r') def encode_ssd(self, index_base_disp, base_disp, reg): self.writechar(opcode) byte = (index_base_disp.index & 0xf) << 4 | reg & 0xf @@ -164,7 +190,7 @@ return encode_ssd def build_sse(mnemonic, (opcode,)): - @arguments('r,bd,r,bd') + @builder.arguments('r,r,bd,bd') def encode_sse(self, reg1, reg3, base_disp2, base_disp4): self.writechar(opcode) byte = (reg1 & BIT_MASK_4) << 4 | reg3 & BIT_MASK_4 @@ -174,6 +200,7 @@ return encode_sse def build_ssf(mnemonic, (opcode,)): + @builder.arguments('bd,l8bd') def encode_ssf(self, base_disp, len_base_disp): self.writechar(opcode) self.writechar(chr(len_base_disp.length & 0xff)) @@ -198,25 +225,21 @@ 'MVCK': (build_ssd, ['\xD9']), 'LMD': (build_sse, ['\xEF']), 'PKA': (build_ssf, ['\xE9']), + 'BRASL': (build_ril, ['\xC0','\x05']), } def build_instr_codes(clazz): - for mnemonic, (builder, args) in _mnemonic_codes.items(): func = builder(mnemonic, args) name = mnemonic + "_" + builder.__name__.split("_")[1] setattr(clazz, name, func) class AbstractZARCHBuilder(object): - def write32(self, word): + def write_s32(self, word): + self.writechar(chr((word >> 24) & 0xFF)) + self.writechar(chr((word >> 16) & 0xFF)) + self.writechar(chr((word >> 8) & 0xFF)) self.writechar(chr(word & 0xFF)) - self.writechar(chr((word >> 8) & 0xFF)) - self.writechar(chr((word >> 16) & 0xFF)) - self.writechar(chr((word >> 24) & 0xFF)) - - def AR_rr(self, reg1, reg2): - self.writechar(chr(0x1A)) - self.writechar(encode_rr(reg1, reg2)) build_instr_codes(AbstractZARCHBuilder) @@ -262,8 +285,6 @@ def currpos(self): return self.get_relative_pos() -#define_instructions(AbstractARMBuilder) - _classes = (AbstractZARCHBuilder,) # Used to build the MachineCodeBlockWrapper diff --git a/rpython/jit/backend/zarch/test/test_auto_encoding.py b/rpython/jit/backend/zarch/test/test_auto_encoding.py --- a/rpython/jit/backend/zarch/test/test_auto_encoding.py +++ b/rpython/jit/backend/zarch/test/test_auto_encoding.py @@ -27,14 +27,11 @@ if (char == self.accept_unnecessary_prefix and self.index == self.instrindex): return # ignore the extra character '\x40' - print self.op - post = self.expected[self.index+1:self.index+1+15] + post = self.expected[self.index+1:self.index+15] generated = "\x09from codebuilder.py: " + hexdump(self.expected[self.instrindex:self.index]) + "!" + \ - hexdump([char])+ "!" +hexdump(post) - print generated + hexdump([char])+ "!" +hexdump(post) + "..." expected = "\x09from gnu as: " + hexdump(self.expected[self.instrindex:self.index+15])+"..." - print expected - raise Exception("Differs:\n" + generated + "\n" + expected) + raise Exception("Asm line:" + self.op + "\n" + generated + "\n" + expected) self.index += 1 def done(self): @@ -105,12 +102,15 @@ __repr__ = __str__ -def test_range(bits, signed=False, count=24): +def test_range(bits, signed=False, count=24, alignment=0): if isinstance(bits, tuple): bits, signed = bits if signed: bits -= 1 maximum = 2**bits + if alignment == 16: + # TODO + return [-32,-16,0,16,32] return [-maximum,-1,0,1,maximum-1] + [random.randrange(-maximum,maximum) for i in range(count)] maximum = 2**bits return [0,1,maximum-1] + [random.randrange(0,maximum) for i in range(count)] @@ -126,97 +126,52 @@ break return results +REGS = range(15+1) +REGNAMES = ['%%r%d' % i for i in REGS] +TEST_CASE_GENERATE = { + 'r': REGS, + 'r/m': REGS, + 'i4': test_range(4, signed=True), + 'i8': test_range(8, signed=True), + 'i16': test_range(16, signed=True), + 'i32': test_range(32, signed=True), + 'a32': test_range(32, signed=True, alignment=16), + 'i64': test_range(64, signed=True), + 'u4': test_range(4), + 'u8': test_range(8), + 'u16': test_range(16), + 'u32': test_range(32), + 'u64': test_range(64), + 'bd': build_fake(FakeBaseDisplace,4,12), + 'ibd': build_fake(FakeIndexBaseDisplace,4,4,12), + 'ibdl': build_fake(FakeIndexBaseDisplace,4,4,(20,True)), + 'l8bd': build_fake(FakeLengthBaseDisplace,8,4,12), + 'l4bd': build_fake(FakeLengthBaseDisplace,4,4,12), +} + class TestZARCH(object): WORD = 8 TESTDIR = 'zarch' - REGS = range(15+1) - REGNAMES = ['%%r%d' % i for i in REGS] accept_unnecessary_prefix = None methname = '?' - BASE_DISPLACE = build_fake(FakeBaseDisplace,4,12) - BASE_DISPLACE_LONG = build_fake(FakeBaseDisplace,4,(20,True)) - INDEX_BASE_DISPLACE = build_fake(FakeIndexBaseDisplace,4,4,12) - INDEX_BASE_DISPLACE_LONG = build_fake(FakeIndexBaseDisplace,4,4,(20,True)) - LENGTH4_BASE_DISPLACE = build_fake(FakeLengthBaseDisplace,4,4,12) - LENGTH8_BASE_DISPLACE = build_fake(FakeLengthBaseDisplace,8,4,12) - def reg_tests(self): - return self.REGS - - def stack_bp_tests(self, count=COUNT1): - return ([0, 4, -4, 124, 128, -128, -132] + - [random.randrange(-0x20000000, 0x20000000) * 4 - for i in range(count)]) - - def stack_sp_tests(self, count=COUNT1): - return ([0, 4, 124, 128] + - [random.randrange(0, 0x20000000) * 4 - for i in range(count)]) - - def memory_tests(self): - return [(reg, ofs) - for reg in self.NONSPECREGS - for ofs in self.stack_bp_tests(5) - ] - - def array_tests(self): - return [(reg1, reg2, scaleshift, ofs) - for reg1 in self.NONSPECREGS - for reg2 in self.NONSPECREGS - for scaleshift in [0, 1, 2, 3] - for ofs in self.stack_bp_tests(1) - ] - - def imm_tests(self, name, modes, index): + def get_func_arg_types(self, methodname): from rpython.jit.backend.zarch.codebuilder import AbstractZARCHBuilder import inspect - func = getattr(AbstractZARCHBuilder, name) - args = inspect.getargspec(func).args - # 1 off, self is first arg - match = re.compile("(u?imm\d+)").match(args[index+1]) - assert match - return getattr(self, match.group(1) + "_tests")() - - def uimm16_tests(self): return test_range(16) - def imm16_tests(self): return test_range(16,signed=True) - def imm8_tests(self): return test_range(8,signed=True) - def uimm8_tests(self): return test_range(8) - def uimm4_tests(self): return test_range(4) - def imm32_tests(self): return test_range(32, signed=True) - - def relative_tests(self): - py.test.skip("explicit test required for %r" % (self.methname,)) + func = getattr(AbstractZARCHBuilder, methodname) + return func._arguments_ def assembler_operand_reg(self, regnum): - return self.REGNAMES[regnum] + return REGNAMES[regnum] - def get_mapping_asm_to_str(self): - return { + def operand_combinations(self, methodname, modes, arguments): + mapping = { 'r': self.assembler_operand_reg, - 's': lambda x: str(x), - 'x': lambda x: str(x), - 'y': lambda x: str(x), - 'i': lambda x: str(x), - 'l': lambda x: str(x), - 'L': lambda x: str(x), + 'r/m': self.assembler_operand_reg, } - - def operand_combinations(self, modes, arguments): - remap = { - 'rre': 'rr', - 'rxy': 'rx', - 'siy': 'si', - 'ssa': 'Ls', - 'ssb': 'll', - 'ssc': 'lsi', - 'ssd': 'xsr', - 'sse': 'rrss', - 'ssf': 'sL', - } - mapping = self.get_mapping_asm_to_str() - modes = remap.get(modes, modes) - for mode, args in zip(modes, arguments): - yield mapping[mode](args) + arg_types = self.get_func_arg_types(methodname) + for mode, args in zip(arg_types, arguments): + yield mapping.get(mode, lambda x: str(x))(args) def run_test(self, methname, instrname, argmodes, args_lists, instr_suffix=None): @@ -234,7 +189,7 @@ if instr_suffix is not None: suffix = instr_suffix # overwrite # - ops = self.operand_combinations(argmodes, args) + ops = self.operand_combinations(methname, argmodes, args) op = '\t%s%s %s' % (instrname.lower(), suffix, ', '.join(ops)) g.write('%s\n' % op) @@ -272,34 +227,12 @@ return mode def make_all_tests(self, methname, modes, args=[]): - tests = { - 'r': lambda i: self.REGS, - 'x': lambda i: self.INDEX_BASE_DISPLACE, - 'y': lambda i: self.INDEX_BASE_DISPLACE_LONG, - 'i': lambda i: self.imm_tests(methname, modes, i), - 's': lambda i: self.BASE_DISPLACE, - 'L': lambda i: self.LENGTH8_BASE_DISPLACE, - 'l': lambda i: self.LENGTH4_BASE_DISPLACE, - } - tests_all = { - 'rxy': (tests['r'], tests['y']), - 'siy': (lambda i: self.BASE_DISPLACE_LONG, tests['i']), - 'rre': (tests['r'], tests['r']), - 'ssa': (tests['L'], tests['s']), - 'ssb': (tests['l'], tests['l']), - 'ssc': (tests['l'], tests['s'], tests['i']), - 'ssd': (tests['x'], tests['s'], tests['r']), - 'sse': (tests['r'], tests['r'], tests['s'], tests['s']), - 'ssf': (tests['s'], tests['L']), - } - if modes in tests_all: - combinations = [f(i) for i,f in enumerate(tests_all[modes])] - else: - combinations = [] - for i,m in enumerate(modes): - elems = tests[m](i) - random.shuffle(elems) - combinations.append(elems) + arg_types = self.get_func_arg_types(methname) + combinations = [] + for i,m in enumerate(arg_types): + elems = TEST_CASE_GENERATE[m] + random.shuffle(elems) + combinations.append(elems) results = [] for args in itertools.product(*combinations): results.append(args) From noreply at buildbot.pypy.org Fri Oct 16 16:12:58 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Fri, 16 Oct 2015 16:12:58 +0200 (CEST) Subject: [pypy-commit] pypy default: accidentally omitted the first instruction when entering vectorization (in the test suite this removed only a debug merge point) Message-ID: <20151016141258.955E21C01DC@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: Changeset: r80274:4ac4bedc5ad0 Date: 2015-10-16 16:12 +0200 http://bitbucket.org/pypy/pypy/changeset/4ac4bedc5ad0/ Log: accidentally omitted the first instruction when entering vectorization (in the test suite this removed only a debug merge point) diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py --- a/rpython/jit/metainterp/optimizeopt/vector.py +++ b/rpython/jit/metainterp/optimizeopt/vector.py @@ -103,7 +103,8 @@ user_code = not jitdriver_sd.vec and warmstate.vec_all e = len(loop_ops)-1 assert e > 0 - loop = VectorLoop(loop_info.label_op, loop_ops[1:e], loop_ops[-1]) + assert loop_ops[e].is_final() + loop = VectorLoop(loop_info.label_op, loop_ops[:e], loop_ops[-1]) if user_code and user_loop_bail_fast_path(loop, warmstate): return loop_info, loop_ops # the original loop (output of optimize_unroll) From noreply at buildbot.pypy.org Fri Oct 16 16:13:00 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Fri, 16 Oct 2015 16:13:00 +0200 (CEST) Subject: [pypy-commit] pypy default: merged Message-ID: <20151016141300.C6EF91C01DC@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: Changeset: r80275:7a10abc047fb Date: 2015-10-16 16:13 +0200 http://bitbucket.org/pypy/pypy/changeset/7a10abc047fb/ Log: merged diff --git a/pypy/doc/release-15.11.0.rst b/pypy/doc/release-15.11.0.rst --- a/pypy/doc/release-15.11.0.rst +++ b/pypy/doc/release-15.11.0.rst @@ -5,7 +5,8 @@ We're pleased and proud to unleash PyPy 15.11, a major update of the PyPy python2.7.10 compatible interpreter with a Just In Time compiler. We have improved `warmup time and memory overhead used for tracing`_, added -`vectorization`_ for numpy and general loops where possible on x86 hardware, +`vectorization`_ for numpy and general loops where possible on x86 hardware +(disabled by default), refactored rough edges in rpython, and increased functionality of numpy. You can download the PyPy 15.11 release here: @@ -35,22 +36,26 @@ Availability of SIMD hardware is detected at run time, without needing to precompile various code paths into the executable. +The first version of the vectorization has been merged in this release, since +it is so new it is off by default. To enable the vectorization in built-in JIT +drivers (like numpy ufuncs), add `--jit vec=1`, to enable all implemented +vectorization add `--jit vec_all=1` + Internal Refactoring and Warmup Time Improvement ================================================ Maciej Fijalkowski and Armin Rigo refactored internals of rpython that now allow PyPy to more efficiently use `guards`_ in jitted code. They also rewrote unrolling, -leading to a warmup time improvement of 20% or so at the cost of a minor -regression in jitted code speed. +leading to a warmup time improvement of 20% or so. Numpy ===== -Our implementation of numpy continues to improve. ndarray and the numeric dtypes +Our implementation of `numpy`_ continues to improve. ndarray and the numeric dtypes are very close to feature-complete; record, string and unicode dtypes are mostly supported. We have reimplemented numpy linalg, random and fft as cffi-1.0 modules that call out to the same underlying libraries that upstream numpy uses. -Please try it out, especially using the new vectorization (via --jit vec=1 on the +Please try it out, especially using the new vectorization (via `--jit vec=1` on the command line) and let us know what is missing for your code. CFFI @@ -64,12 +69,12 @@ .. _`warmup time and memory overhead used for tracing`: http://morepypy.blogspot.com/2015/10 .. _`vectorization`: http://pypyvecopt.blogspot.co.at/ .. _`guards`: http://rpython.readthedocs.org/en/latest/glossary.html - .. _`PyPy`: http://doc.pypy.org .. _`RPython`: https://rpython.readthedocs.org .. _`cffi`: https://cffi.readthedocs.org .. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly .. _`help`: http://doc.pypy.org/en/latest/project-ideas.html +.. _`numpy`: https://bitbucket.org/pypy/numpy What is PyPy? ============= From noreply at buildbot.pypy.org Fri Oct 16 16:50:07 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Fri, 16 Oct 2015 16:50:07 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: address of branch relative and save long is no correctly encoded (half word addressed) Message-ID: <20151016145007.358D91C1230@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80276:4b25446959ac Date: 2015-10-16 16:50 +0200 http://bitbucket.org/pypy/pypy/changeset/4b25446959ac/ Log: address of branch relative and save long is no correctly encoded (half word addressed) diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -127,7 +127,8 @@ self.writechar(opcode) byte = (reg_or_mask & 0xf) << 4 | (ord(halfopcode) & 0xf) self.writechar(chr(byte)) - self.write_s32(imm32) + # half word boundary, addressing bytes + self.write_s32(imm32 >> 1 & BIT_MASK_32) return encode_ri @@ -170,13 +171,13 @@ return encode_ssb def build_ssc(mnemonic, (opcode1,)): - @builder.arguments('u4,l4bd,l4bd') - def encode_ssc(self, uimm4, len_base_disp1, len_base_disp2): + @builder.arguments('l4bd,bd,u4') + def encode_ssc(self, len_base_disp, base_disp, uimm4): self.writechar(opcode1) - byte = (len_base_disp1.length & 0xf) << 4 | uimm4 & 0xf + byte = (len_base_disp.length & 0xf) << 4 | uimm4 & 0xf self.writechar(chr(byte)) - encode_base_displace(self, len_base_disp1) - encode_base_displace(self, len_base_disp2) + encode_base_displace(self, len_base_disp) + encode_base_displace(self, base_disp) return encode_ssc def build_ssd(mnemonic, (opcode,)): diff --git a/rpython/jit/backend/zarch/test/test_auto_encoding.py b/rpython/jit/backend/zarch/test/test_auto_encoding.py --- a/rpython/jit/backend/zarch/test/test_auto_encoding.py +++ b/rpython/jit/backend/zarch/test/test_auto_encoding.py @@ -108,9 +108,6 @@ if signed: bits -= 1 maximum = 2**bits - if alignment == 16: - # TODO - return [-32,-16,0,16,32] return [-maximum,-1,0,1,maximum-1] + [random.randrange(-maximum,maximum) for i in range(count)] maximum = 2**bits return [0,1,maximum-1] + [random.randrange(0,maximum) for i in range(count)] @@ -150,7 +147,7 @@ } class TestZARCH(object): - WORD = 8 + WORD = 4 TESTDIR = 'zarch' accept_unnecessary_prefix = None methname = '?' @@ -195,7 +192,7 @@ g.write('%s\n' % op) oplist.append(op) g.write('\t.string "%s"\n' % END_TAG) - proc = subprocess.Popen(['as', '-m' + str(self.WORD*8), '-mzarch', + proc = subprocess.Popen(['as', '-m64', '-mzarch', inputname, '-o', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -231,7 +228,7 @@ combinations = [] for i,m in enumerate(arg_types): elems = TEST_CASE_GENERATE[m] - random.shuffle(elems) + #random.shuffle(elems) combinations.append(elems) results = [] for args in itertools.product(*combinations): From noreply at buildbot.pypy.org Fri Oct 16 16:52:58 2015 From: noreply at buildbot.pypy.org (sbauman) Date: Fri, 16 Oct 2015 16:52:58 +0200 (CEST) Subject: [pypy-commit] pypy remove-getfield-pure: Merge changes Message-ID: <20151016145258.C8F6F1C1248@cobra.cs.uni-duesseldorf.de> Author: Spenser Bauman Branch: remove-getfield-pure Changeset: r80277:95c98ba9cf43 Date: 2015-10-09 18:16 -0400 http://bitbucket.org/pypy/pypy/changeset/95c98ba9cf43/ Log: Merge changes diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -1,3 +1,4 @@ +from rpython.rtyper.rclass import IR_QUASIIMMUTABLE from rpython.jit.metainterp.optimizeopt.optimizer import Optimization, REMOVED from rpython.jit.metainterp.resoperation import rop, OpHelpers, AbstractResOp,\ ResOperation @@ -192,6 +193,22 @@ return self.emit_operation(op) + def optimize_GETFIELD_GC_PURE_I(self, op): + from rpython.rlib.objectmodel import we_are_translated + # check that the descr is pure + # XXX quasi immutable descrs, are they pure or not? + if not we_are_translated(): + descr = op.getdescr() + # Kind of weird that this returns a boolean or one of the IR_* + # family + assert descr.is_always_pure() in (True, IR_QUASIIMMUTABLE) + return self.optimize_default(op) + optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_PURE_I + optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_PURE_I + optimize_GETARRAYITEM_GC_PURE_I = optimize_GETFIELD_GC_PURE_I + optimize_GETARRAYITEM_GC_PURE_R = optimize_GETFIELD_GC_PURE_I + optimize_GETARRAYITEM_GC_PURE_F = optimize_GETFIELD_GC_PURE_I + def flush(self): assert self.postponed_op is None diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -1089,9 +1089,9 @@ def test_getfield_gc_pure_1(self): ops = """ [i] - p1 = new_with_vtable(descr=nodesize) - setfield_gc(p1, i, descr=valuedescr) - i1 = getfield_gc_pure_i(p1, descr=valuedescr) + p1 = new_with_vtable(descr=nodesize3) + setfield_gc(p1, i, descr=valuedescr3) + i1 = getfield_gc_pure_i(p1, descr=valuedescr3) jump(i1) """ expected = """ @@ -1103,14 +1103,13 @@ def test_getfield_gc_pure_2(self): ops = """ [i] - i1 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i1 = getfield_gc_pure_i(ConstPtr(myptr3), descr=valuedescr3) jump(i1) """ expected = """ [i] - jump(5) - """ - self.node.value = 5 + jump(7) + """ self.optimize_loop(ops, expected) def test_getfield_gc_nonpure_2(self): @@ -1532,7 +1531,7 @@ setfield_gc(p1, i1, descr=valuedescr) # # some operations on which the above setfield_gc cannot have effect - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) setarrayitem_gc(p3, 0, i5, descr=arraydescr) @@ -1544,7 +1543,7 @@ expected = """ [p1, i1, i2, p3] # - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) # @@ -1786,7 +1785,7 @@ ops = """ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) - i4 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i4 = getfield_gc_pure_i(ConstPtr(myptr3), descr=valuedescr3) p5 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) escape_n(i4) @@ -1797,7 +1796,7 @@ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) - escape_n(5) + escape_n(7) escape_n(p3) jump(p1, p2) """ @@ -5869,15 +5868,15 @@ def test_getarrayitem_gc_pure_not_invalidated(self): ops = """ [p0] - i1 = getarrayitem_gc_pure_i(p0, 1, descr=arraydescr) + i1 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) escape_n(p0) - i2 = getarrayitem_gc_pure_i(p0, 1, descr=arraydescr) + i2 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) escape_n(i2) jump(p0) """ expected = """ [p0] - i1 = getarrayitem_gc_pure_i(p0, 1, descr=arraydescr) + i1 = getarrayitem_gc_pure_i(p0, 1, descr=arrayimmutdescr) escape_n(p0) escape_n(i1) jump(p0) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1423,7 +1423,7 @@ def test_getfield_gc_pure_2(self): ops = """ [i] - i1 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i1 = getfield_gc_pure_i(ConstPtr(myptr3), descr=valuedescr3) jump(i1) """ expected = """ @@ -1437,16 +1437,16 @@ ops = """ [] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_pure_r(p1, descr=nextdescr3) escape_n(p2) - p3 = getfield_gc_pure_r(p1, descr=nextdescr) + p3 = getfield_gc_pure_r(p1, descr=nextdescr3) escape_n(p3) jump() """ expected = """ [] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_pure_r(p1, descr=nextdescr3) escape_n(p2) escape_n(p2) jump() @@ -2317,7 +2317,7 @@ setfield_gc(p1, i1, descr=valuedescr) # # some operations on which the above setfield_gc cannot have effect - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) setarrayitem_gc(p3, 0, i5, descr=arraydescr) @@ -2330,7 +2330,7 @@ preamble = """ [p1, i1, i2, p3] # - i3 = getarrayitem_gc_pure_i(p3, 1, descr=arraydescr) + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) # @@ -2338,11 +2338,12 @@ setfield_gc(p1, i4, descr=nextdescr) setarrayitem_gc(p3, 0, i5, descr=arraydescr) escape_n() - jump(p1, i1, i2, p3, i3) - """ - expected = """ - [p1, i1, i2, p3, i3] + jump(p1, i1, i2, p3) + """ + expected = """ + [p1, i1, i2, p3] # + i3 = getarrayitem_gc_i(p3, 1, descr=arraydescr) i4 = getarrayitem_gc_i(p3, i3, descr=arraydescr) i5 = int_add(i3, i4) # @@ -2350,8 +2351,7 @@ setfield_gc(p1, i4, descr=nextdescr) setarrayitem_gc(p3, 0, i5, descr=arraydescr) escape_n() - ifoo = arraylen_gc(p3, descr=arraydescr) # killed by the backend - jump(p1, i1, i2, p3, i3) + jump(p1, i1, i2, p3) """ self.optimize_loop(ops, expected, preamble) @@ -2667,7 +2667,7 @@ ops = """ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) - i4 = getfield_gc_pure_i(ConstPtr(myptr), descr=valuedescr) + i4 = getfield_gc_pure_i(ConstPtr(myptr3), descr=valuedescr3) p5 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) escape_n(i4) @@ -2678,7 +2678,7 @@ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) - escape_n(5) + escape_n(7) escape_n(p3) jump(p1, p2) """ @@ -3303,8 +3303,8 @@ [p8, p11, i24] p26 = new(descr=ssize) setfield_gc(p26, i24, descr=adescr) - i34 = getfield_gc_pure_i(p11, descr=abisdescr) - i35 = getfield_gc_pure_i(p26, descr=adescr) + i34 = getfield_gc_i(p11, descr=abisdescr) + i35 = getfield_gc_i(p26, descr=adescr) i36 = int_add_ovf(i34, i35) guard_no_overflow() [] jump(p8, p11, i35) @@ -4879,11 +4879,11 @@ def test_add_sub_ovf_virtual_unroll(self): ops = """ [p15] - i886 = getfield_gc_pure_i(p15, descr=valuedescr) + i886 = getfield_gc_i(p15, descr=valuedescr) i888 = int_sub_ovf(i886, 1) guard_no_overflow() [] escape_n(i888) - i4360 = getfield_gc_pure_i(p15, descr=valuedescr) + i4360 = getfield_gc_i(p15, descr=valuedescr) i4362 = int_add_ovf(i4360, 1) guard_no_overflow() [] i4360p = int_sub_ovf(i4362, 1) @@ -4973,8 +4973,8 @@ def test_pure(self): ops = """ [p42] - p53 = getfield_gc_r(ConstPtr(myptr), descr=nextdescr) - p59 = getfield_gc_pure_r(p53, descr=valuedescr) + p53 = getfield_gc_r(ConstPtr(myptr3), descr=nextdescr3) + p59 = getfield_gc_pure_r(p53, descr=valuedescr3) i61 = call_i(1, p59, descr=nonwritedescr) jump(p42) """ @@ -5003,7 +5003,7 @@ ops = """ [p3] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_pure_r(p1, descr=nextdescr3) setfield_gc(p1, p3, descr=otherdescr) escape_n(p2) jump(p3) @@ -5011,7 +5011,7 @@ expected = """ [p3] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_pure_r(p1, descr=nextdescr3) setfield_gc(p1, p3, descr=otherdescr) escape_n(p2) jump(p3) @@ -5022,7 +5022,7 @@ ops = """ [] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr) p3 = escape_r() setfield_gc(p3, p1, descr=nextdescr) jump() @@ -7496,7 +7496,7 @@ def test_forced_virtual_pure_getfield(self): ops = """ [p0] - p1 = getfield_gc_pure_r(p0, descr=valuedescr) + p1 = getfield_gc_pure_r(p0, descr=valuedescr3) jump(p1) """ self.optimize_loop(ops, ops) @@ -7780,14 +7780,14 @@ def test_loopinvariant_getarrayitem_gc_pure(self): ops = """ [p9, i1] - i843 = getarrayitem_gc_pure_i(p9, i1, descr=arraydescr) + i843 = getarrayitem_gc_pure_i(p9, i1, descr=arrayimmutdescr) call_n(i843, descr=nonwritedescr) jump(p9, i1) """ expected = """ [p9, i1, i843] call_n(i843, descr=nonwritedescr) - ifoo = arraylen_gc(p9, descr=arraydescr) + ifoo = arraylen_gc(p9, descr=arrayimmutdescr) jump(p9, i1, i843) """ self.optimize_loop(ops, expected) @@ -7796,7 +7796,7 @@ ops = """ [p0] p1 = getfield_gc_r(p0, descr=nextdescr) - p2 = getarrayitem_gc_pure_r(p1, 7, descr=gcarraydescr) + p2 = getarrayitem_gc_r(p1, 7, descr=gcarraydescr) call_n(p2, descr=nonwritedescr) jump(p0) """ @@ -7811,14 +7811,14 @@ i1 = arraylen_gc(p1, descr=gcarraydescr) i2 = int_ge(i1, 8) guard_true(i2) [] - p2 = getarrayitem_gc_pure_r(p1, 7, descr=gcarraydescr) - jump(p2, p1) - """ - expected = """ - [p0, p2, p1] + p2 = getarrayitem_gc_r(p1, 7, descr=gcarraydescr) + jump(p1, p2) + """ + expected = """ + [p0, p1, p2] call_n(p2, descr=nonwritedescr) i3 = arraylen_gc(p1, descr=gcarraydescr) # Should be killed by backend - jump(p0, p2, p1) + jump(p0, p1, p2) """ self.optimize_loop(ops, expected, expected_short=short) @@ -7993,7 +7993,7 @@ def test_dont_mixup_equal_boxes(self): ops = """ [p8] - i9 = getfield_gc_pure_i(p8, descr=valuedescr) + i9 = getfield_gc_pure_i(p8, descr=valuedescr3) i10 = int_gt(i9, 0) guard_true(i10) [] i29 = int_lshift(i9, 1) @@ -8129,14 +8129,14 @@ ops = """ [p0] p10 = getfield_gc_r(ConstPtr(myptr), descr=otherdescr) - guard_value(p10, ConstPtr(myptr2)) [] + guard_value(p10, ConstPtr(myptrb)) [] call_n(p10, descr=nonwritedescr) - setfield_gc(ConstPtr(myptr), ConstPtr(myptr2), descr=otherdescr) + setfield_gc(ConstPtr(myptr), ConstPtr(myptrb), descr=otherdescr) jump(p0) """ expected = """ [p0] - call_n(ConstPtr(myptr2), descr=nonwritedescr) + call_n(ConstPtr(myptrb), descr=nonwritedescr) jump(p0) """ self.optimize_loop(ops, expected) @@ -8160,14 +8160,14 @@ ops = """ [p0] p10 = getfield_gc_r(p0, descr=otherdescr) - guard_value(p10, ConstPtr(myptr2)) [] + guard_value(p10, ConstPtr(myptrb)) [] call_n(p10, descr=nonwritedescr) - setfield_gc(p0, ConstPtr(myptr2), descr=otherdescr) + setfield_gc(p0, ConstPtr(myptrb), descr=otherdescr) jump(p0) """ expected = """ [p0] - call_n(ConstPtr(myptr2), descr=nonwritedescr) + call_n(ConstPtr(myptrb), descr=nonwritedescr) jump(p0) """ self.optimize_loop(ops, expected) @@ -8552,7 +8552,7 @@ [p10] p52 = getfield_gc_r(p10, descr=nextdescr) # inst_storage p54 = getarrayitem_gc_r(p52, 0, descr=arraydescr) - p69 = getfield_gc_pure_r(p54, descr=otherdescr) # inst_w_function + p69 = getfield_gc_r(p54, descr=otherdescr) # inst_w_function quasiimmut_field(p69, descr=quasiimmutdescr) guard_not_invalidated() [] @@ -8562,7 +8562,7 @@ p106 = new_with_vtable(descr=nodesize) p108 = new_array(3, descr=arraydescr) p110 = new_with_vtable(descr=nodesize) - setfield_gc(p110, ConstPtr(myptr2), descr=otherdescr) # inst_w_function + setfield_gc(p110, ConstPtr(myptrb), descr=otherdescr) # inst_w_function setarrayitem_gc(p108, 0, p110, descr=arraydescr) setfield_gc(p106, p108, descr=nextdescr) # inst_storage jump(p106) @@ -8780,13 +8780,13 @@ def test_virtual_back_and_forth(self): ops = """ [p0] - p1 = getfield_gc_pure_r(p0, descr=bdescr) + p1 = getfield_gc_pure_r(p0, descr=valuedescr3) ptemp = new_with_vtable(descr=nodesize) setfield_gc(ptemp, p1, descr=nextdescr) p2 = getfield_gc_r(ptemp, descr=nextdescr) - ix = getarrayitem_gc_pure_i(p2, 0, descr=arraydescr) + ix = getarrayitem_gc_pure_i(p2, 0, descr=arrayimmutdescr) pfoo = getfield_gc_r(ptemp, descr=nextdescr) - guard_value(pfoo, ConstPtr(myarray)) [] + guard_value(pfoo, ConstPtr(immutarray)) [] ifoo = int_add(ix, 13) escape_n(ix) jump(p0) @@ -8816,13 +8816,13 @@ def test_constant_float_pure(self): ops = """ [p0] - f0 = getarrayitem_gc_pure_f(p0, 3, descr=floatarraydescr) + f0 = getarrayitem_gc_pure_f(p0, 3, descr=floatarrayimmutdescr) guard_value(f0, 1.03) [] jump(p0) """ expected = """ [p0] - ifoo = arraylen_gc(p0, descr=floatarraydescr) + ifoo = arraylen_gc(p0, descr=floatarrayimmutdescr) jump(p0) """ self.optimize_loop(ops, expected) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py --- a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py @@ -220,10 +220,10 @@ def test_double_getfield_plus_pure(self): loop = """ [p0] - pc = getfield_gc_pure_r(p0, descr=nextdescr) + pc = getfield_gc_pure_r(p0, descr=nextdescr3) escape_n(p0) # that should flush the caches - p1 = getfield_gc_r(pc, descr=nextdescr) - i0 = getfield_gc_i(p1, descr=valuedescr) + p1 = getfield_gc_r(pc, descr=nextdescr3) + i0 = getfield_gc_i(p1, descr=valuedescr3) jump(p0) """ es, loop, preamble = self.optimize(loop) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -132,13 +132,21 @@ node2.parent.parent.typeptr = node_vtable2 node2addr = lltype.cast_opaque_ptr(llmemory.GCREF, node2) myptr = lltype.cast_opaque_ptr(llmemory.GCREF, node) - mynode2 = lltype.malloc(NODE) + mynodeb = lltype.malloc(NODE) myarray = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(lltype.GcArray(lltype.Signed), 13, zero=True)) - mynode2.parent.typeptr = node_vtable - myptr2 = lltype.cast_opaque_ptr(llmemory.GCREF, mynode2) - mynode3 = lltype.malloc(NODE2) - mynode3.parent.parent.typeptr = node_vtable2 + mynodeb.parent.typeptr = node_vtable + myptrb = lltype.cast_opaque_ptr(llmemory.GCREF, mynodeb) + myptr2 = lltype.malloc(NODE2) + myptr2.parent.parent.typeptr = node_vtable2 + myptr2 = lltype.cast_opaque_ptr(llmemory.GCREF, myptr2) + nullptr = lltype.nullptr(llmemory.GCREF.TO) + + mynode3 = lltype.malloc(NODE3) + mynode3.parent.typeptr = node_vtable3 + mynode3.value = 7 + mynode3.next = mynode3 myptr3 = lltype.cast_opaque_ptr(llmemory.GCREF, mynode3) + nullptr = lltype.nullptr(llmemory.GCREF.TO) #nodebox2 = InputArgRef(lltype.cast_opaque_ptr(llmemory.GCREF, node2)) nodesize = cpu.sizeof(NODE, node_vtable) @@ -197,7 +205,6 @@ immut_ptrval = cpu.fielddescrof(PTROBJ_IMMUT, 'ptrval') arraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed)) - floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) arraydescr_tid = arraydescr.get_type_id() array = lltype.malloc(lltype.GcArray(lltype.Signed), 15, zero=True) arrayref = lltype.cast_opaque_ptr(llmemory.GCREF, array) @@ -207,6 +214,11 @@ gcarraydescr_tid = gcarraydescr.get_type_id() floatarraydescr = cpu.arraydescrof(lltype.GcArray(lltype.Float)) + arrayimmutdescr = cpu.arraydescrof(lltype.GcArray(lltype.Signed, hints={"immutable": True})) + immutarray = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(arrayimmutdescr.A, 13, zero=True)) + gcarrayimmutdescr = cpu.arraydescrof(lltype.GcArray(llmemory.GCREF, hints={"immutable": True})) + floatarrayimmutdescr = cpu.arraydescrof(lltype.GcArray(lltype.Float, hints={"immutable": True})) + # a GcStruct not inheriting from OBJECT tpl = lltype.malloc(S, zero=True) tupleaddr = lltype.cast_opaque_ptr(llmemory.GCREF, tpl) @@ -238,7 +250,7 @@ tsize = cpu.sizeof(T, None) cdescr = cpu.fielddescrof(T, 'c') ddescr = cpu.fielddescrof(T, 'd') - arraydescr3 = cpu.arraydescrof(lltype.GcArray(lltype.Ptr(NODE))) + arraydescr3 = cpu.arraydescrof(lltype.GcArray(lltype.Ptr(NODE3))) U = lltype.GcStruct('U', ('parent', OBJECT), diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -1103,8 +1103,8 @@ jump(p0) """ self.optimize_bridge(loops, bridge, loops[0], 'Loop0', [self.myptr]) - self.optimize_bridge(loops, bridge, loops[1], 'Loop1', [self.myptr3]) - self.optimize_bridge(loops[0], bridge, 'RETRACE', [self.myptr3]) + self.optimize_bridge(loops, bridge, loops[1], 'Loop1', [self.myptr2]) + self.optimize_bridge(loops[0], bridge, 'RETRACE', [self.myptr2]) self.optimize_bridge(loops, loops[0], loops[0], 'Loop0', [self.nullptr]) self.optimize_bridge(loops, loops[1], loops[1], 'Loop1', [self.nullptr]) From noreply at buildbot.pypy.org Fri Oct 16 16:53:01 2015 From: noreply at buildbot.pypy.org (sbauman) Date: Fri, 16 Oct 2015 16:53:01 +0200 (CEST) Subject: [pypy-commit] pypy remove-getfield-pure: Remove GETFIELD_PURE_* operations from the JIT Message-ID: <20151016145301.456771C1248@cobra.cs.uni-duesseldorf.de> Author: Spenser Bauman Branch: remove-getfield-pure Changeset: r80278:92efa3b835f1 Date: 2015-10-16 10:52 -0400 http://bitbucket.org/pypy/pypy/changeset/92efa3b835f1/ Log: Remove GETFIELD_PURE_* operations from the JIT - That information is now entirely encoded in the field descriptor diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -675,9 +675,6 @@ emit_op_getfield_gc_i = _genop_getfield emit_op_getfield_gc_r = _genop_getfield emit_op_getfield_gc_f = _genop_getfield - emit_op_getfield_gc_pure_i = _genop_getfield - emit_op_getfield_gc_pure_r = _genop_getfield - emit_op_getfield_gc_pure_f = _genop_getfield emit_op_getfield_raw_i = _genop_getfield emit_op_getfield_raw_f = _genop_getfield emit_op_getfield_raw_pure_i = _genop_getfield diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -853,9 +853,6 @@ prepare_op_getfield_raw_f = _prepare_op_getfield prepare_op_getfield_raw_pure_i = _prepare_op_getfield prepare_op_getfield_raw_pure_f = _prepare_op_getfield - prepare_op_getfield_gc_pure_i = _prepare_op_getfield - prepare_op_getfield_gc_pure_r = _prepare_op_getfield - prepare_op_getfield_gc_pure_f = _prepare_op_getfield def prepare_op_increment_debug_counter(self, op, fcond): boxes = op.getarglist() diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -578,9 +578,6 @@ p = support.cast_arg(lltype.Ptr(descr.S), p) return support.cast_result(descr.FIELD, getattr(p, descr.fieldname)) - bh_getfield_gc_pure_i = bh_getfield_gc - bh_getfield_gc_pure_r = bh_getfield_gc - bh_getfield_gc_pure_f = bh_getfield_gc bh_getfield_gc_i = bh_getfield_gc bh_getfield_gc_r = bh_getfield_gc bh_getfield_gc_f = bh_getfield_gc diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1378,9 +1378,6 @@ genop_getfield_raw_f = _genop_getfield genop_getfield_raw_pure_i = _genop_getfield genop_getfield_raw_pure_f = _genop_getfield - genop_getfield_gc_pure_i = _genop_getfield - genop_getfield_gc_pure_r = _genop_getfield - genop_getfield_gc_pure_f = _genop_getfield def _genop_getarrayitem(self, op, arglocs, resloc): base_loc, ofs_loc, size_loc, ofs, sign_loc = arglocs diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1096,9 +1096,6 @@ consider_getfield_raw_f = _consider_getfield consider_getfield_raw_pure_i = _consider_getfield consider_getfield_raw_pure_f = _consider_getfield - consider_getfield_gc_pure_i = _consider_getfield - consider_getfield_gc_pure_r = _consider_getfield - consider_getfield_gc_pure_f = _consider_getfield def consider_increment_debug_counter(self, op): base_loc = self.loc(op.getarg(0)) diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -168,9 +168,6 @@ elif (opnum != rop.GETFIELD_GC_R and opnum != rop.GETFIELD_GC_I and opnum != rop.GETFIELD_GC_F and - opnum != rop.GETFIELD_GC_PURE_R and - opnum != rop.GETFIELD_GC_PURE_I and - opnum != rop.GETFIELD_GC_PURE_F and opnum != rop.PTR_EQ and opnum != rop.PTR_NE and opnum != rop.INSTANCE_PTR_EQ and diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -789,9 +789,6 @@ if 'getfield_gc' in check: assert check.pop('getfield_gc') == 0 check['getfield_gc_i'] = check['getfield_gc_r'] = check['getfield_gc_f'] = 0 - if 'getfield_gc_pure' in check: - assert check.pop('getfield_gc_pure') == 0 - check['getfield_gc_pure_i'] = check['getfield_gc_pure_r'] = check['getfield_gc_pure_f'] = 0 if 'getarrayitem_gc_pure' in check: assert check.pop('getarrayitem_gc_pure') == 0 check['getarrayitem_gc_pure_i'] = check['getarrayitem_gc_pure_r'] = check['getarrayitem_gc_pure_f'] = 0 diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -507,19 +507,6 @@ optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I - def optimize_GETFIELD_GC_PURE_I(self, op): - structinfo = self.ensure_ptr_info_arg0(op) - cf = self.field_cache(op.getdescr()) - field = cf.getfield_from_cache(self, structinfo, op.getdescr()) - if field is not None: - self.make_equal_to(op, field) - return - # default case: produce the operation - self.make_nonnull(op.getarg(0)) - self.emit_operation(op) - optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_PURE_I - optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_PURE_I - def optimize_SETFIELD_GC(self, op): self.setfield(op) #opnum = OpHelpers.getfield_pure_for_descr(op.getdescr()) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -1,4 +1,3 @@ -from rpython.rtyper.rclass import IR_QUASIIMMUTABLE from rpython.jit.metainterp.optimizeopt.optimizer import Optimization, REMOVED from rpython.jit.metainterp.resoperation import rop, OpHelpers, AbstractResOp,\ ResOperation @@ -66,7 +65,7 @@ class OptPure(Optimization): def __init__(self): self.postponed_op = None - self._pure_operations = [None] * (rop._ALWAYS_PURE_LAST - + self._pure_operations = [None] * (rop._NOSIDEEFFECT_LAST - rop._ALWAYS_PURE_FIRST) self.call_pure_positions = [] self.extra_call_pure = [] @@ -193,22 +192,6 @@ return self.emit_operation(op) - def optimize_GETFIELD_GC_PURE_I(self, op): - from rpython.rlib.objectmodel import we_are_translated - # check that the descr is pure - # XXX quasi immutable descrs, are they pure or not? - if not we_are_translated(): - descr = op.getdescr() - # Kind of weird that this returns a boolean or one of the IR_* - # family - assert descr.is_always_pure() in (True, IR_QUASIIMMUTABLE) - return self.optimize_default(op) - optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_PURE_I - optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_PURE_I - optimize_GETARRAYITEM_GC_PURE_I = optimize_GETFIELD_GC_PURE_I - optimize_GETARRAYITEM_GC_PURE_R = optimize_GETFIELD_GC_PURE_I - optimize_GETARRAYITEM_GC_PURE_F = optimize_GETFIELD_GC_PURE_I - def flush(self): assert self.postponed_op is None diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -1086,12 +1086,12 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_1(self): + def test_getfield_gc_1(self): ops = """ [i] p1 = new_with_vtable(descr=nodesize3) setfield_gc(p1, i, descr=valuedescr3) - i1 = getfield_gc_pure_i(p1, descr=valuedescr3) + i1 = getfield_gc_i(p1, descr=valuedescr3) jump(i1) """ expected = """ @@ -1100,10 +1100,10 @@ """ self.optimize_loop(ops, expected) - def test_getfield_gc_pure_2(self): + def test_getfield_gc_2(self): ops = """ [i] - i1 = getfield_gc_pure_i(ConstPtr(myptr3), descr=valuedescr3) + i1 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) jump(i1) """ expected = """ @@ -1785,7 +1785,7 @@ ops = """ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) - i4 = getfield_gc_pure_i(ConstPtr(myptr3), descr=valuedescr3) + i4 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) p5 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) escape_n(i4) @@ -5482,7 +5482,7 @@ [] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i0 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i0 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) i1 = call_pure_i(123, i0, descr=nonwritedescr) finish(i1) """ diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -1411,7 +1411,7 @@ [i] p1 = new_with_vtable(descr=nodesize) setfield_gc(p1, i, descr=valuedescr) - i1 = getfield_gc_pure_i(p1, descr=valuedescr) + i1 = getfield_gc_i(p1, descr=valuedescr) jump(i1) """ expected = """ @@ -1423,7 +1423,7 @@ def test_getfield_gc_pure_2(self): ops = """ [i] - i1 = getfield_gc_pure_i(ConstPtr(myptr3), descr=valuedescr3) + i1 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) jump(i1) """ expected = """ @@ -1437,16 +1437,16 @@ ops = """ [] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr3) + p2 = getfield_gc_r(p1, descr=nextdescr3) escape_n(p2) - p3 = getfield_gc_pure_r(p1, descr=nextdescr3) + p3 = getfield_gc_r(p1, descr=nextdescr3) escape_n(p3) jump() """ expected = """ [] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr3) + p2 = getfield_gc_r(p1, descr=nextdescr3) escape_n(p2) escape_n(p2) jump() @@ -2667,7 +2667,7 @@ ops = """ [p1, p2] p3 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) - i4 = getfield_gc_pure_i(ConstPtr(myptr3), descr=valuedescr3) + i4 = getfield_gc_i(ConstPtr(myptr3), descr=valuedescr3) p5 = getarrayitem_gc_r(p1, 0, descr=arraydescr2) escape_n(p3) escape_n(i4) @@ -3331,8 +3331,8 @@ setfield_gc(p26, i24, descr=adescr) i28 = int_add(i17, 1) setfield_gc(p8, i28, descr=valuedescr) - i34 = getfield_gc_pure_i(p11, descr=valuedescr3) - i35 = getfield_gc_pure_i(p26, descr=adescr) + i34 = getfield_gc_i(p11, descr=valuedescr3) + i35 = getfield_gc_i(p26, descr=adescr) guard_nonnull(p12) [] i36 = int_add_ovf(i34, i35) guard_no_overflow() [] @@ -3523,14 +3523,14 @@ def test_residual_call_does_not_invalidate_immutable_caches(self): ops = """ [p1] - i1 = getfield_gc_pure_i(p1, descr=valuedescr3) + i1 = getfield_gc_i(p1, descr=valuedescr3) i2 = call_i(i1, descr=writevalue3descr) - i3 = getfield_gc_pure_i(p1, descr=valuedescr3) + i3 = getfield_gc_i(p1, descr=valuedescr3) jump(p1) """ expected_preamble = """ [p1] - i1 = getfield_gc_pure_i(p1, descr=valuedescr3) + i1 = getfield_gc_i(p1, descr=valuedescr3) i2 = call_i(i1, descr=writevalue3descr) jump(p1, i1) """ @@ -4974,17 +4974,15 @@ ops = """ [p42] p53 = getfield_gc_r(ConstPtr(myptr3), descr=nextdescr3) - p59 = getfield_gc_pure_r(p53, descr=valuedescr3) + p59 = getfield_gc_r(p53, descr=valuedescr3) i61 = call_i(1, p59, descr=nonwritedescr) jump(p42) """ expected = """ - [p42, p59] - i61 = call_i(1, p59, descr=nonwritedescr) - jump(p42, p59) - - """ - self.node.value = 5 + [p42] + i61 = call_i(1, 7, descr=nonwritedescr) + jump(p42) + """ self.optimize_loop(ops, expected) def test_complains_getfieldpure_setfield(self): @@ -4993,7 +4991,7 @@ ops = """ [p3] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr) + p2 = getfield_gc_r(p1, descr=nextdescr) setfield_gc(p1, p3, descr=nextdescr) jump(p3) """ @@ -5003,7 +5001,7 @@ ops = """ [p3] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr3) + p2 = getfield_gc_r(p1, descr=nextdescr3) setfield_gc(p1, p3, descr=otherdescr) escape_n(p2) jump(p3) @@ -5011,7 +5009,7 @@ expected = """ [p3] p1 = escape_r() - p2 = getfield_gc_pure_r(p1, descr=nextdescr3) + p2 = getfield_gc_r(p1, descr=nextdescr3) setfield_gc(p1, p3, descr=otherdescr) escape_n(p2) jump(p3) @@ -6168,14 +6166,14 @@ def test_bug_unroll_with_immutables(self): ops = """ [p0] - i2 = getfield_gc_pure_i(p0, descr=immut_intval) + i2 = getfield_gc_i(p0, descr=immut_intval) p1 = new_with_vtable(descr=immut_descr) setfield_gc(p1, 1242, descr=immut_intval) jump(p1) """ preamble = """ [p0] - i2 = getfield_gc_pure_i(p0, descr=immut_intval) + i2 = getfield_gc_i(p0, descr=immut_intval) jump() """ expected = """ @@ -7157,13 +7155,13 @@ [p0, p1, i0] quasiimmut_field(p0, descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(p0, descr=quasifielddescr) + i1 = getfield_gc_i(p0, descr=quasifielddescr) escape_n(i1) jump(p1, p0, i1) """ expected = """ [p0, p1, i0] - i1 = getfield_gc_pure_i(p0, descr=quasifielddescr) + i1 = getfield_gc_i(p0, descr=quasifielddescr) escape_n(i1) jump(p1, p0, i1) """ @@ -7174,7 +7172,7 @@ [] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i1 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) escape_n(i1) jump() """ @@ -7226,11 +7224,11 @@ [i0a, i0b] quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i1 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) call_may_force_n(i0b, descr=mayforcevirtdescr) quasiimmut_field(ConstPtr(quasiptr), descr=quasiimmutdescr) guard_not_invalidated() [] - i2 = getfield_gc_pure_i(ConstPtr(quasiptr), descr=quasifielddescr) + i2 = getfield_gc_i(ConstPtr(quasiptr), descr=quasifielddescr) i3 = escape_i(i1) i4 = escape_i(i2) jump(i3, i4) @@ -7253,11 +7251,11 @@ setfield_gc(p, 421, descr=quasifielddescr) quasiimmut_field(p, descr=quasiimmutdescr) guard_not_invalidated() [] - i1 = getfield_gc_pure_i(p, descr=quasifielddescr) + i1 = getfield_gc_i(p, descr=quasifielddescr) call_may_force_n(i0b, descr=mayforcevirtdescr) quasiimmut_field(p, descr=quasiimmutdescr) guard_not_invalidated() [] - i2 = getfield_gc_pure_i(p, descr=quasifielddescr) + i2 = getfield_gc_i(p, descr=quasifielddescr) i3 = escape_i(i1) i4 = escape_i(i2) jump(i3, i4) @@ -7496,7 +7494,7 @@ def test_forced_virtual_pure_getfield(self): ops = """ [p0] - p1 = getfield_gc_pure_r(p0, descr=valuedescr3) + p1 = getfield_gc_r(p0, descr=valuedescr3) jump(p1) """ self.optimize_loop(ops, ops) @@ -7506,7 +7504,7 @@ p1 = new_with_vtable(descr=nodesize3) setfield_gc(p1, p0, descr=valuedescr3) escape_n(p1) - p2 = getfield_gc_pure_r(p1, descr=valuedescr3) + p2 = getfield_gc_r(p1, descr=valuedescr3) escape_n(p2) jump(p0) """ @@ -7993,7 +7991,7 @@ def test_dont_mixup_equal_boxes(self): ops = """ [p8] - i9 = getfield_gc_pure_i(p8, descr=valuedescr3) + i9 = getfield_gc_i(p8, descr=valuedescr3) i10 = int_gt(i9, 0) guard_true(i10) [] i29 = int_lshift(i9, 1) @@ -8088,9 +8086,9 @@ py.test.skip("would be fixed by make heap optimizer aware of virtual setfields") ops = """ [p5, p8] - i9 = getfield_gc_pure_i(p5, descr=valuedescr) + i9 = getfield_gc_i(p5, descr=valuedescr) call_n(i9, descr=nonwritedescr) - i11 = getfield_gc_pure_i(p8, descr=valuedescr) + i11 = getfield_gc_i(p8, descr=valuedescr) i13 = int_add_ovf(i11, 1) guard_no_overflow() [] p22 = new_with_vtable(descr=nodesize) @@ -8556,7 +8554,7 @@ quasiimmut_field(p69, descr=quasiimmutdescr) guard_not_invalidated() [] - p71 = getfield_gc_pure_r(p69, descr=quasifielddescr) # inst_code + p71 = getfield_gc_r(p69, descr=quasifielddescr) # inst_code guard_value(p71, -4247) [] p106 = new_with_vtable(descr=nodesize) @@ -8578,7 +8576,7 @@ [p69] quasiimmut_field(p69, descr=quasiimmutdescr) guard_not_invalidated() [] - p71 = getfield_gc_pure_r(p69, descr=quasifielddescr) # inst_code + p71 = getfield_gc_r(p69, descr=quasifielddescr) # inst_code guard_value(p71, -4247) [] jump(ConstPtr(myptr)) """ @@ -8780,7 +8778,7 @@ def test_virtual_back_and_forth(self): ops = """ [p0] - p1 = getfield_gc_pure_r(p0, descr=valuedescr3) + p1 = getfield_gc_r(p0, descr=valuedescr3) ptemp = new_with_vtable(descr=nodesize) setfield_gc(ptemp, p1, descr=nextdescr) p2 = getfield_gc_r(ptemp, descr=nextdescr) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py --- a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py @@ -220,16 +220,16 @@ def test_double_getfield_plus_pure(self): loop = """ [p0] - pc = getfield_gc_pure_r(p0, descr=nextdescr3) + pc = getfield_gc_r(p0, descr=nextdescr3) escape_n(p0) # that should flush the caches p1 = getfield_gc_r(pc, descr=nextdescr3) i0 = getfield_gc_i(p1, descr=valuedescr3) jump(p0) """ es, loop, preamble = self.optimize(loop) - assert len(es.short_boxes) == 4 + assert len(es.short_boxes) == 7 # both getfields are available as - # well as getfield_gc_pure + # well as getfield_gc def test_p123_anti_nested(self): loop = """ diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py --- a/rpython/jit/metainterp/optimizeopt/virtualize.py +++ b/rpython/jit/metainterp/optimizeopt/virtualize.py @@ -182,12 +182,6 @@ optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I - # note: the following line does not mean that the two operations are - # completely equivalent, because GETFIELD_GC_PURE is_always_pure(). - optimize_GETFIELD_GC_PURE_I = optimize_GETFIELD_GC_I - optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_I - optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_I - def optimize_SETFIELD_GC(self, op): struct = op.getarg(0) opinfo = self.getptrinfo(struct) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -12,7 +12,7 @@ from rpython.jit.metainterp.jitprof import EmptyProfiler from rpython.jit.metainterp.logger import Logger from rpython.jit.metainterp.optimizeopt.util import args_dict -from rpython.jit.metainterp.resoperation import rop, OpHelpers, GuardResOp +from rpython.jit.metainterp.resoperation import rop, OpHelpers, GuardResOp, is_pure_getfield from rpython.rlib import nonconst, rstack from rpython.rlib.debug import debug_start, debug_stop, debug_print from rpython.rlib.debug import have_debug_prints, make_sure_not_resized @@ -661,48 +661,52 @@ rop.INT_ADD, None, indexbox, lenbox) return indexbox + # @arguments("box", "descr") + # def opimpl_getfield_gc_i(self, box, fielddescr): + # return self._opimpl_getfield_gc_any_pureornot( + # rop.GETFIELD_GC_I, box, fielddescr, 'i') + # @arguments("box", "descr") + # def opimpl_getfield_gc_r(self, box, fielddescr): + # return self._opimpl_getfield_gc_any_pureornot( + # rop.GETFIELD_GC_R, box, fielddescr, 'r') + # @arguments("box", "descr") + # def opimpl_getfield_gc_f(self, box, fielddescr): + # return self._opimpl_getfield_gc_any_pureornot( + # rop.GETFIELD_GC_F, box, fielddescr, 'f') + @arguments("box", "descr") def opimpl_getfield_gc_i(self, box, fielddescr): + if fielddescr.is_always_pure() and isinstance(box, ConstPtr): + # if 'box' is directly a ConstPtr, bypass the heapcache completely + resbox = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETFIELD_GC_I, fielddescr, box) + return ConstInt(resbox) return self._opimpl_getfield_gc_any_pureornot( rop.GETFIELD_GC_I, box, fielddescr, 'i') + + @arguments("box", "descr") + def opimpl_getfield_gc_f(self, box, fielddescr): + if fielddescr.is_always_pure() and isinstance(box, ConstPtr): + # if 'box' is directly a ConstPtr, bypass the heapcache completely + resvalue = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETFIELD_GC_F, fielddescr, box) + return ConstFloat(resvalue) + return self._opimpl_getfield_gc_any_pureornot( + rop.GETFIELD_GC_F, box, fielddescr, 'f') + @arguments("box", "descr") def opimpl_getfield_gc_r(self, box, fielddescr): + if fielddescr.is_always_pure() and isinstance(box, ConstPtr): + # if 'box' is directly a ConstPtr, bypass the heapcache completely + val = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETFIELD_GC_R, fielddescr, box) + return ConstPtr(val) return self._opimpl_getfield_gc_any_pureornot( rop.GETFIELD_GC_R, box, fielddescr, 'r') - @arguments("box", "descr") - def opimpl_getfield_gc_f(self, box, fielddescr): - return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_F, box, fielddescr, 'f') - - @arguments("box", "descr") - def opimpl_getfield_gc_i_pure(self, box, fielddescr): - if isinstance(box, ConstPtr): - # if 'box' is directly a ConstPtr, bypass the heapcache completely - resbox = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETFIELD_GC_PURE_I, fielddescr, box) - return ConstInt(resbox) - return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_PURE_I, box, fielddescr, 'i') - - @arguments("box", "descr") - def opimpl_getfield_gc_f_pure(self, box, fielddescr): - if isinstance(box, ConstPtr): - # if 'box' is directly a ConstPtr, bypass the heapcache completely - resvalue = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETFIELD_GC_PURE_F, fielddescr, box) - return ConstFloat(resvalue) - return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_PURE_F, box, fielddescr, 'f') - - @arguments("box", "descr") - def opimpl_getfield_gc_r_pure(self, box, fielddescr): - if isinstance(box, ConstPtr): - # if 'box' is directly a ConstPtr, bypass the heapcache completely - val = executor.execute(self.metainterp.cpu, self.metainterp, - rop.GETFIELD_GC_PURE_R, fielddescr, box) - return ConstPtr(val) - return self._opimpl_getfield_gc_any_pureornot( - rop.GETFIELD_GC_PURE_R, box, fielddescr, 'r') + + opimpl_getfield_gc_i_pure = opimpl_getfield_gc_i + opimpl_getfield_gc_r_pure = opimpl_getfield_gc_r + opimpl_getfield_gc_f_pure = opimpl_getfield_gc_f @arguments("box", "box", "descr") def opimpl_getinteriorfield_gc_i(self, array, index, descr): @@ -743,7 +747,7 @@ @arguments("box", "descr", "orgpc") def _opimpl_getfield_gc_greenfield_any(self, box, fielddescr, pc): ginfo = self.metainterp.jitdriver_sd.greenfield_info - opnum = OpHelpers.getfield_pure_for_descr(fielddescr) + opnum = OpHelpers.getfield_for_descr(fielddescr) if (ginfo is not None and fielddescr in ginfo.green_field_descrs and not self._nonstandard_virtualizable(pc, box, fielddescr)): # fetch the result, but consider it as a Const box and don't @@ -2095,6 +2099,9 @@ resvalue = executor.execute(self.cpu, self, opnum, descr, *argboxes) if rop._ALWAYS_PURE_FIRST <= opnum <= rop._ALWAYS_PURE_LAST: return self._record_helper_pure(opnum, resvalue, descr, *argboxes) + if is_pure_getfield(opnum, descr): + # TODO Don't base purity of an operation solely on opnum + return self._record_helper_pure(opnum, resvalue, descr, *argboxes) if rop._OVF_FIRST <= opnum <= rop._OVF_LAST: return self._record_helper_ovf(opnum, resvalue, descr, *argboxes) return self._record_helper_nonpure_varargs(opnum, resvalue, descr, diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -84,6 +84,10 @@ def get_forwarded(self): return self._forwarded +def is_pure_getfield(opnum, descr): + if opnum not in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R): + return False + return descr is not None and descr.is_always_pure() != False class AbstractResOp(AbstractResOpOrInputArg): """The central ResOperation class, representing one operation.""" @@ -267,9 +271,7 @@ return self.opnum in (rop.SAME_AS_I, rop.SAME_AS_F, rop.SAME_AS_R) def is_getfield(self): - return self.opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, - rop.GETFIELD_GC_R, rop.GETFIELD_GC_PURE_I, - rop.GETFIELD_GC_PURE_R, rop.GETFIELD_GC_PURE_F) + return self.opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R) def is_getarrayitem(self): return self.opnum in (rop.GETARRAYITEM_GC_I, rop.GETARRAYITEM_GC_F, @@ -340,6 +342,11 @@ _descr = None + def is_always_pure(self): + if self.is_getfield(): + return self._descr.is_always_pure() != False + return AbstractResOp.is_always_pure(self) + def getdescr(self): return self._descr @@ -770,7 +777,6 @@ 'ARRAYLEN_GC/1d/i', 'STRLEN/1/i', 'STRGETITEM/2/i', - 'GETFIELD_GC_PURE/1d/rfi', 'GETFIELD_RAW_PURE/1d/rfi', 'GETARRAYITEM_GC_PURE/2d/rfi', 'GETARRAYITEM_RAW_PURE/2d/fi', @@ -1110,14 +1116,6 @@ return rop.CALL_LOOPINVARIANT_N @staticmethod - def getfield_pure_for_descr(descr): - if descr.is_pointer_field(): - return rop.GETFIELD_GC_PURE_R - elif descr.is_float_field(): - return rop.GETFIELD_GC_PURE_F - return rop.GETFIELD_GC_PURE_I - - @staticmethod def getfield_for_descr(descr): if descr.is_pointer_field(): return rop.GETFIELD_GC_R diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -320,7 +320,7 @@ assert res == 252 self.check_trace_count(1) self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2, - 'getfield_gc_pure_i': 1, 'int_mul': 1, + 'getfield_gc_i': 1, 'int_mul': 1, 'guard_true': 2, 'int_sub': 2}) def test_loops_are_transient(self): @@ -1405,7 +1405,7 @@ return tup[1] res = self.interp_operations(f, [3, 5]) assert res == 5 - self.check_operations_history(setfield_gc=2, getfield_gc_pure_i=0) + self.check_operations_history(setfield_gc=2, getfield_gc_i=0) def test_oosend_look_inside_only_one(self): class A: @@ -2522,7 +2522,7 @@ if counter > 10: return 7 assert self.meta_interp(build, []) == 7 - self.check_resops(getfield_gc_pure_r=2) + self.check_resops(getfield_gc_r=2) def test_args_becomming_equal(self): myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a', 'b']) diff --git a/rpython/jit/metainterp/test/test_immutable.py b/rpython/jit/metainterp/test/test_immutable.py --- a/rpython/jit/metainterp/test/test_immutable.py +++ b/rpython/jit/metainterp/test/test_immutable.py @@ -19,7 +19,7 @@ return y.x + 5 res = self.interp_operations(f, [23]) assert res == 28 - self.check_operations_history(getfield_gc_i=0, getfield_gc_pure_i=1, int_add=1) + self.check_operations_history(getfield_gc_i=1, int_add=1) def test_fields_subclass(self): class X(object): @@ -41,8 +41,7 @@ return z.x + z.y + 5 res = self.interp_operations(f, [23, 11]) assert res == 39 - self.check_operations_history(getfield_gc_i=0, getfield_gc_pure_i=2, - int_add=2) + self.check_operations_history(getfield_gc_i=2, int_add=2) def f(x, y): # this time, the field 'x' only shows up on subclass 'Y' @@ -50,8 +49,7 @@ return z.x + z.y + 5 res = self.interp_operations(f, [23, 11]) assert res == 39 - self.check_operations_history(getfield_gc_i=0, getfield_gc_pure_i=2, - int_add=2) + self.check_operations_history(getfield_gc_i=2, int_add=2) def test_array(self): class X(object): @@ -66,8 +64,7 @@ return a.y[index] res = self.interp_operations(f, [2], listops=True) assert res == 30 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=1, - getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1) + self.check_operations_history(getfield_gc_r=1, getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1) def test_array_index_error(self): class X(object): @@ -89,8 +86,7 @@ return a.get(index) res = self.interp_operations(f, [2], listops=True) assert res == 30 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=1, - getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1) + self.check_operations_history(getfield_gc_r=1, getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1) def test_array_in_immutable(self): class X(object): @@ -106,8 +102,7 @@ return y.lst[index] + y.y + 5 res = self.interp_operations(f, [23, 0], listops=True) assert res == 23 + 24 + 5 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=1, - getfield_gc_pure_i=1, + self.check_operations_history(getfield_gc_r=1, getfield_gc_i=1, getarrayitem_gc_i=0, getarrayitem_gc_pure_i=1, int_add=3) diff --git a/rpython/jit/metainterp/test/test_quasiimmut.py b/rpython/jit/metainterp/test/test_quasiimmut.py --- a/rpython/jit/metainterp/test/test_quasiimmut.py +++ b/rpython/jit/metainterp/test/test_quasiimmut.py @@ -74,7 +74,7 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_resops(guard_not_invalidated=2, getfield_gc=0) + self.check_resops(guard_not_invalidated=2) # from rpython.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -101,7 +101,7 @@ res = self.meta_interp(f, [100, 7], enable_opts="") assert res == 700 # there should be no getfields, even though optimizations are turned off - self.check_resops(guard_not_invalidated=1, getfield_gc=0) + self.check_resops(guard_not_invalidated=1) def test_nonopt_1(self): myjitdriver = JitDriver(greens=[], reds=['x', 'total', 'lst']) @@ -124,8 +124,7 @@ assert f(100, 7) == 721 res = self.meta_interp(f, [100, 7]) assert res == 721 - self.check_resops(guard_not_invalidated=0, getfield_gc_r=1, - getfield_gc_pure_i=2) + self.check_resops(guard_not_invalidated=0, getfield_gc_r=1, getfield_gc_i=2) # from rpython.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -156,7 +155,7 @@ # res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_resops(guard_not_invalidated=2, getfield_gc=0) + self.check_resops(guard_not_invalidated=2) def test_change_during_tracing_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -208,7 +207,7 @@ assert f(100, 7) == 700 res = self.meta_interp(f, [100, 7]) assert res == 700 - self.check_resops(guard_not_invalidated=0, getfield_gc=0) + self.check_resops(guard_not_invalidated=0) def test_change_invalidate_reentering(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -234,7 +233,7 @@ assert g(100, 7) == 700707 res = self.meta_interp(g, [100, 7]) assert res == 700707 - self.check_resops(guard_not_invalidated=4, getfield_gc=0) + self.check_resops(guard_not_invalidated=4) def test_invalidate_while_running(self): jitdriver = JitDriver(greens=['foo'], reds=['i', 'total']) @@ -348,7 +347,7 @@ res = self.meta_interp(f, [100, 30]) assert res == 6019 self.check_resops(guard_not_invalidated=8, guard_not_forced=0, - call_may_force=0, getfield_gc=0) + call_may_force=0) def test_list_simple_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) @@ -374,8 +373,7 @@ getarrayitem_gc_pure_r=0, getarrayitem_gc_i=0, getarrayitem_gc_r=0, - getfield_gc_i=0, getfield_gc_pure_i=0, - getfield_gc_r=0, getfield_gC_pure_r=0) + getfield_gc_i=0, getfield_gc_r=0) # from rpython.jit.metainterp.warmspot import get_stats loops = get_stats().loops @@ -405,9 +403,7 @@ assert res == 700 # operations must have been removed by the frontend self.check_resops(getarrayitem_gc_pure_i=0, guard_not_invalidated=1, - getarrayitem_gc_i=0, - getfield_gc=0, getfield_gc_pure_i=0, - getfield_gc_pure_r=0) + getarrayitem_gc_i=0, getfield_gc_i=0, getfield_gc_r=0) def test_list_length_1(self): myjitdriver = JitDriver(greens=['foo'], reds=['x', 'total']) diff --git a/rpython/jit/metainterp/test/test_tracingopts.py b/rpython/jit/metainterp/test/test_tracingopts.py --- a/rpython/jit/metainterp/test/test_tracingopts.py +++ b/rpython/jit/metainterp/test/test_tracingopts.py @@ -436,10 +436,10 @@ return p.x[0] + p.x[1] res = self.interp_operations(fn, [7]) assert res == 7 + 7 + 1 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=0) + self.check_operations_history(getfield_gc_r=0) res = self.interp_operations(fn, [-7]) assert res == -7 - 7 + 1 - self.check_operations_history(getfield_gc_r=0, getfield_gc_pure_r=0) + self.check_operations_history(getfield_gc_r=0) def test_heap_caching_and_elidable_function(self): class A: @@ -517,12 +517,12 @@ return a1[0] + a2[0] + gn(a1, a2) res = self.interp_operations(fn, [7]) assert res == 2 * 7 + 2 * 6 - self.check_operations_history(getfield_gc_pure_i=0, - getfield_gc_pure_r=0) + self.check_operations_history(getfield_gc_i=0, + getfield_gc_r=0) res = self.interp_operations(fn, [-7]) assert res == 2 * -7 + 2 * -8 - self.check_operations_history(getfield_gc_pure_i=0, - getfield_gc_pure_r=0) + self.check_operations_history(getfield_gc_i=0, + getfield_gc_r=0) def test_heap_caching_multiple_arrays(self): class Gbl(object): From noreply at buildbot.pypy.org Fri Oct 16 17:18:16 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Fri, 16 Oct 2015 17:18:16 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: rs encoding and rsy (extended version of rs) Message-ID: <20151016151816.A213C1C0165@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80279:bfd63406a3e9 Date: 2015-10-16 17:18 +0200 http://bitbucket.org/pypy/pypy/changeset/bfd63406a3e9/ Log: rs encoding and rsy (extended version of rs) diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -38,10 +38,13 @@ r/m - register or mask iX - immediate X bits (signed) uX - immediate X bits (unsigend) - bd - base displacement + bd - base displacement (12 bit) + bdl - base displacement long (20 bit) ibd - index base displacement l4bd - length base displacement (4 bit) l8bd - length base displacement (8 bit) + + note that a suffix 'l' means long, and a prefix length """ def impl(func): func._arguments_ = args_str.split(',') @@ -55,12 +58,32 @@ @always_inline def encode_base_displace(mc, base_displace): - displace = base_displace.displace # & 0x3ff + """ + +---------------------------------+ + | ... | base | length[0:11] | ... | + +---------------------------------+ + """ + displace = base_displace.displace base = base_displace.base & 0xf byte = (displace >> 8 & 0xf) | base << 4 mc.writechar(chr(byte)) mc.writechar(chr(displace & 0xff)) + at always_inline +def encode_base_displace_long(mc, basedisp): + """ + +-------------------------------------------------+ + | ... | base | length[0:11] | length[12:20] | ... | + +-------------------------------------------------+ + """ + displace = basedisp.displace & 0xfffff + base = basedisp.base & 0xf + byte = displace >> 8 & 0xf | base << 4 + mc.writechar(chr(byte)) + mc.writechar(chr(displace & 0xff)) + byte = displace >> 12 & 0xff + mc.writechar(chr(byte)) + def build_rr(mnemonic, (opcode,)): @builder.arguments('r,r') def encode_rr(self, reg1, reg2): @@ -101,13 +124,7 @@ index = idxbasedisp.index byte = (reg_or_mask & 0x0f) << 4 | index & 0xf self.writechar(chr(byte)) - displace = idxbasedisp.displace & 0xfffff - base = idxbasedisp.base & 0xf - byte = displace >> 8 & 0xf | base << 4 - self.writechar(chr(byte)) - self.writechar(chr(displace & 0xff)) - byte = displace >> 12 & 0xff - self.writechar(chr(byte)) + encode_base_displace_long(self, idxbasedisp) self.writechar(opcode2) return encode_rxy @@ -122,7 +139,7 @@ return encode_ri def build_ril(mnemonic, (opcode,halfopcode)): - @builder.arguments('r/m,a32') + @builder.arguments('r/m,i32') def encode_ri(self, reg_or_mask, imm32): self.writechar(opcode) byte = (reg_or_mask & 0xf) << 4 | (ord(halfopcode) & 0xf) @@ -209,6 +226,23 @@ encode_base_displace(self, len_base_disp) return encode_ssf +def build_rs(mnemonic, (opcode,)): + @builder.arguments('r,r,bd') + def encode_rs(self, reg1, reg3, base_displace): + self.writechar(opcode) + self.writechar(chr((reg1 & BIT_MASK_4) << 4 | reg3 & BIT_MASK_4)) + encode_base_displace(self, base_displace) + return encode_rs + +def build_rsy(mnemonic, (opcode1,opcode2)): + @builder.arguments('r,r,bdl') + def encode_ssa(self, reg1, reg3, base_displace): + self.writechar(opcode1) + self.writechar(chr((reg1 & BIT_MASK_4) << 4 | reg3 & BIT_MASK_4)) + encode_base_displace_long(self, base_displace) + self.writechar(opcode2) + return encode_ssa + _mnemonic_codes = { 'AR': (build_rr, ['\x1A']), 'AGR': (build_rre, ['\xB9\x08']), @@ -227,6 +261,8 @@ 'LMD': (build_sse, ['\xEF']), 'PKA': (build_ssf, ['\xE9']), 'BRASL': (build_ril, ['\xC0','\x05']), + 'BXH': (build_rs, ['\x86']), + 'BXHG': (build_rsy, ['\xEB','\x44']), } def build_instr_codes(clazz): diff --git a/rpython/jit/backend/zarch/test/test_auto_encoding.py b/rpython/jit/backend/zarch/test/test_auto_encoding.py --- a/rpython/jit/backend/zarch/test/test_auto_encoding.py +++ b/rpython/jit/backend/zarch/test/test_auto_encoding.py @@ -102,7 +102,7 @@ __repr__ = __str__ -def test_range(bits, signed=False, count=24, alignment=0): +def test_range(bits, signed=False, count=24): if isinstance(bits, tuple): bits, signed = bits if signed: @@ -132,7 +132,6 @@ 'i8': test_range(8, signed=True), 'i16': test_range(16, signed=True), 'i32': test_range(32, signed=True), - 'a32': test_range(32, signed=True, alignment=16), 'i64': test_range(64, signed=True), 'u4': test_range(4), 'u8': test_range(8), @@ -140,6 +139,7 @@ 'u32': test_range(32), 'u64': test_range(64), 'bd': build_fake(FakeBaseDisplace,4,12), + 'bdl': build_fake(FakeBaseDisplace,4,19), 'ibd': build_fake(FakeIndexBaseDisplace,4,4,12), 'ibdl': build_fake(FakeIndexBaseDisplace,4,4,(20,True)), 'l8bd': build_fake(FakeLengthBaseDisplace,8,4,12), From noreply at buildbot.pypy.org Fri Oct 16 17:32:30 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Fri, 16 Oct 2015 17:32:30 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: rsi encoding e.g. branch relative long on index high Message-ID: <20151016153230.755EB1C12DC@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80280:59735521f42f Date: 2015-10-16 17:32 +0200 http://bitbucket.org/pypy/pypy/changeset/59735521f42f/ Log: rsi encoding e.g. branch relative long on index high diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -53,6 +53,7 @@ BIT_MASK_4 = 0xF BIT_MASK_12 = 0xFFF +BIT_MASK_16 = 0xFFFF BIT_MASK_20 = 0xFFFFF BIT_MASK_32 = 0xFFFFFFFF @@ -145,7 +146,7 @@ byte = (reg_or_mask & 0xf) << 4 | (ord(halfopcode) & 0xf) self.writechar(chr(byte)) # half word boundary, addressing bytes - self.write_s32(imm32 >> 1 & BIT_MASK_32) + self.write_i32(imm32 >> 1 & BIT_MASK_32) return encode_ri @@ -243,6 +244,15 @@ self.writechar(opcode2) return encode_ssa +def build_rsi(mnemonic, (opcode,)): + @builder.arguments('r,r,i16') + def encode_ri(self, reg1, reg2, imm16): + self.writechar(opcode) + byte = (reg1 & BIT_MASK_4) << 4 | (reg2 & BIT_MASK_4) + self.writechar(chr(byte)) + self.write_i16(imm16 >> 1 & BIT_MASK_16) + return encode_ri + _mnemonic_codes = { 'AR': (build_rr, ['\x1A']), 'AGR': (build_rre, ['\xB9\x08']), @@ -263,6 +273,7 @@ 'BRASL': (build_ril, ['\xC0','\x05']), 'BXH': (build_rs, ['\x86']), 'BXHG': (build_rsy, ['\xEB','\x44']), + 'BRXH': (build_rsi, ['\x84']), } def build_instr_codes(clazz): @@ -272,11 +283,14 @@ setattr(clazz, name, func) class AbstractZARCHBuilder(object): - def write_s32(self, word): + def write_i32(self, word): self.writechar(chr((word >> 24) & 0xFF)) self.writechar(chr((word >> 16) & 0xFF)) self.writechar(chr((word >> 8) & 0xFF)) self.writechar(chr(word & 0xFF)) + def write_i16(self, word): + self.writechar(chr((word >> 8) & 0xFF)) + self.writechar(chr(word & 0xFF)) build_instr_codes(AbstractZARCHBuilder) From noreply at buildbot.pypy.org Fri Oct 16 17:35:50 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Fri, 16 Oct 2015 17:35:50 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: rie extended version of rsi Message-ID: <20151016153550.264971C12DC@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80281:656f0a91ab03 Date: 2015-10-16 17:36 +0200 http://bitbucket.org/pypy/pypy/changeset/656f0a91ab03/ Log: rie extended version of rsi diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -253,6 +253,17 @@ self.write_i16(imm16 >> 1 & BIT_MASK_16) return encode_ri +def build_rie(mnemonic, (opcode1,opcode2)): + @builder.arguments('r,r,i16') + def encode_ri(self, reg1, reg2, imm16): + self.writechar(opcode1) + byte = (reg1 & BIT_MASK_4) << 4 | (reg2 & BIT_MASK_4) + self.writechar(chr(byte)) + self.write_i16(imm16 >> 1 & BIT_MASK_16) + self.writechar(chr(0x0)) + self.writechar(opcode2) + return encode_ri + _mnemonic_codes = { 'AR': (build_rr, ['\x1A']), 'AGR': (build_rre, ['\xB9\x08']), @@ -274,6 +285,7 @@ 'BXH': (build_rs, ['\x86']), 'BXHG': (build_rsy, ['\xEB','\x44']), 'BRXH': (build_rsi, ['\x84']), + 'BRXLG': (build_rie, ['\xEC','\x45']), } def build_instr_codes(clazz): From noreply at buildbot.pypy.org Fri Oct 16 17:47:10 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 16 Oct 2015 17:47:10 +0200 (CEST) Subject: [pypy-commit] pypy no-class-specialize: Use get_param() in maybe_return_immutable_list() Message-ID: <20151016154710.3FB561C1248@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: no-class-specialize Changeset: r80282:6faf42db48c9 Date: 2015-10-16 16:07 +0100 http://bitbucket.org/pypy/pypy/changeset/6faf42db48c9/ Log: Use get_param() in maybe_return_immutable_list() diff --git a/rpython/annotator/classdesc.py b/rpython/annotator/classdesc.py --- a/rpython/annotator/classdesc.py +++ b/rpython/annotator/classdesc.py @@ -769,9 +769,9 @@ search2 = '%s?[*]' % (attr,) cdesc = self while cdesc is not None: - if '_immutable_fields_' in cdesc.classdict: - if (search1 in cdesc.classdict['_immutable_fields_'].value or - search2 in cdesc.classdict['_immutable_fields_'].value): + immutable_fields = cdesc.get_param('_immutable_fields_', inherit=False) + if immutable_fields is not None: + if (search1 in immutable_fields or search2 in immutable_fields): s_result.listdef.never_resize() s_copy = s_result.listdef.offspring() s_copy.listdef.mark_as_immutable() From noreply at buildbot.pypy.org Fri Oct 16 17:47:12 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Fri, 16 Oct 2015 17:47:12 +0200 (CEST) Subject: [pypy-commit] pypy no-class-specialize: read stuff directly from the actual class dict in get_param() Message-ID: <20151016154712.708171C1248@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: no-class-specialize Changeset: r80283:36d40d759dfc Date: 2015-10-16 16:47 +0100 http://bitbucket.org/pypy/pypy/changeset/36d40d759dfc/ Log: read stuff directly from the actual class dict in get_param() diff --git a/rpython/annotator/classdesc.py b/rpython/annotator/classdesc.py --- a/rpython/annotator/classdesc.py +++ b/rpython/annotator/classdesc.py @@ -689,16 +689,11 @@ return cdesc def get_param(self, name, default=None, inherit=True): + cls = self.pyobj if inherit: - try: - return self.read_attribute(name).value - except AttributeError: - return default + return getattr(cls, name, default) else: - try: - return self.classdict[name].value - except KeyError: - return default + return cls.__dict__.get(name, default) def read_attribute(self, name, default=NODEFAULT): cdesc = self.lookup(name) From noreply at buildbot.pypy.org Fri Oct 16 17:51:05 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 17:51:05 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Adapt to the new version of rawrefcount.rst Message-ID: <20151016155105.9DB7D1C1248@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80284:d6b9e7532feb Date: 2015-10-16 14:05 +0200 http://bitbucket.org/pypy/pypy/changeset/d6b9e7532feb/ Log: Adapt to the new version of rawrefcount.rst diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -1,7 +1,7 @@ # # See documentation in pypy/doc/discussion/rawrefcount.rst # -import weakref +import sys, weakref from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rtyper.extregistry import ExtRegistryEntry @@ -9,14 +9,14 @@ from rpython.rlib import rgc -REFCNT_FROM_PYPY_OBJECT = 80 # == 0x50 +REFCNT_FROM_PYPY = 80 +REFCNT_FROM_PYPY_DIRECT = REFCNT_FROM_PYPY + (sys.maxint//2+1) def _reset_state(): - global _p_list, _o_list, _s_list, _adr2pypy, _pypy2ob + global _p_list, _o_list, _adr2pypy, _pypy2ob _p_list = [] # not rpython _o_list = [] # not rpython - _s_list = [] # not rpython _adr2pypy = [None] # not rpython _pypy2ob = {} # not rpython _reset_state() @@ -32,7 +32,6 @@ assert p not in _pypy2ob assert not ob.c_ob_pypy_link ob.c_ob_pypy_link = _build_pypy_link(p) - ob.c_ob_refcnt += REFCNT_FROM_PYPY_OBJECT _pypy2ob[p] = ob _p_list.append(ob) @@ -42,18 +41,8 @@ assert p not in _pypy2ob assert not ob.c_ob_pypy_link ob.c_ob_pypy_link = _build_pypy_link(p) - ob.c_ob_refcnt += REFCNT_FROM_PYPY_OBJECT _o_list.append(ob) -def create_link_shared(p, ob): - """NOT_RPYTHON: a link where both p and ob contain some data. - from_obj() will not work on this 'p'.""" - assert p not in _pypy2ob - assert not ob.c_ob_pypy_link - ob.c_ob_pypy_link = _build_pypy_link(p) - ob.c_ob_refcnt += REFCNT_FROM_PYPY_OBJECT - _s_list.append(ob) - def from_obj(OB_PTR_TYPE, p): "NOT_RPYTHON" ob = _pypy2ob.get(p) @@ -81,7 +70,7 @@ from the O list. """ def detach(ob, wr_list): - assert ob.c_ob_refcnt >= REFCNT_FROM_PYPY_OBJECT + assert ob.c_ob_refcnt >= REFCNT_FROM_PYPY assert ob.c_ob_pypy_link p = _adr2pypy[ob.c_ob_pypy_link] assert p is not None @@ -93,7 +82,7 @@ wr_p_list = [] new_p_list = [] for ob in _p_list: - if ob.c_ob_refcnt > REFCNT_FROM_PYPY_OBJECT: + if ob.c_ob_refcnt not in (REFCNT_FROM_PYPY, REFCNT_FROM_PYPY_DIRECT): new_p_list.append(ob) else: p = detach(ob, wr_p_list) @@ -102,16 +91,6 @@ ob = None _p_list = Ellipsis - wr_s_list = [] - new_s_list = [] - for ob in _s_list: - if ob.c_ob_refcnt > REFCNT_FROM_PYPY_OBJECT: - new_s_list.append(ob) - else: - detach(ob, wr_s_list) - ob = None - _s_list = Ellipsis - wr_o_list = [] for ob in _o_list: detach(ob, wr_o_list) @@ -122,7 +101,7 @@ rgc.collect() def attach(ob, wr, final_list): - assert ob.c_ob_refcnt >= REFCNT_FROM_PYPY_OBJECT + assert ob.c_ob_refcnt >= REFCNT_FROM_PYPY p = wr() if p is not None: assert ob.c_ob_pypy_link @@ -130,23 +109,23 @@ final_list.append(ob) return p else: - ob.c_ob_refcnt -= REFCNT_FROM_PYPY_OBJECT ob.c_ob_pypy_link = 0 - if ob.c_ob_refcnt == 0 and dealloc is not None: - dealloc.append(ob) + if ob.c_ob_refcnt == REFCNT_FROM_PYPY_DIRECT: + pass # freed + elif ob.c_ob_refcnt > REFCNT_FROM_PYPY_DIRECT: + ob.c_ob_refcnt -= REFCNT_FROM_PYPY_DIRECT + else: + ob.c_ob_refcnt -= REFCNT_FROM_PYPY + if ob.c_ob_refcnt == 0: + dealloc.append(ob) return None + dealloc = [] _p_list = new_p_list - dealloc = None for ob, wr in wr_p_list: p = attach(ob, wr, _p_list) if p: _pypy2ob[p] = ob - # - dealloc = [] - _s_list = new_s_list - for ob, wr in wr_s_list: - attach(ob, wr, _s_list) _o_list = [] for ob, wr in wr_o_list: attach(ob, wr, _o_list) diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py --- a/rpython/rlib/test/test_rawrefcount.py +++ b/rpython/rlib/test/test_rawrefcount.py @@ -1,5 +1,6 @@ import weakref from rpython.rlib import rawrefcount +from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY, REFCNT_FROM_PYPY_DIRECT from rpython.rtyper.lltypesystem import lltype, llmemory class W_Root(object): @@ -24,6 +25,8 @@ assert rawrefcount.from_obj(PyObject, p) == lltype.nullptr(PyObjectS) assert rawrefcount.to_obj(W_Root, ob) == None rawrefcount.create_link_pypy(p, ob) + assert ob.c_ob_refcnt == 0 + ob.c_ob_refcnt += REFCNT_FROM_PYPY_DIRECT assert rawrefcount.from_obj(PyObject, p) == ob assert rawrefcount.to_obj(W_Root, ob) == p @@ -34,16 +37,8 @@ assert rawrefcount.from_obj(PyObject, p) == lltype.nullptr(PyObjectS) assert rawrefcount.to_obj(W_Root, ob) == None rawrefcount.create_link_pyobj(p, ob) - assert rawrefcount.from_obj(PyObject, p) == lltype.nullptr(PyObjectS) - assert rawrefcount.to_obj(W_Root, ob) == p - - def test_create_link_shared(self): - p = W_Root(42) - ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, - track_allocation=False) - assert rawrefcount.from_obj(PyObject, p) == lltype.nullptr(PyObjectS) - assert rawrefcount.to_obj(W_Root, ob) == None - rawrefcount.create_link_shared(p, ob) + assert ob.c_ob_refcnt == 0 + ob.c_ob_refcnt += REFCNT_FROM_PYPY assert rawrefcount.from_obj(PyObject, p) == lltype.nullptr(PyObjectS) assert rawrefcount.to_obj(W_Root, ob) == p @@ -52,6 +47,7 @@ ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) rawrefcount.create_link_pypy(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY_DIRECT assert rawrefcount._p_list == [ob] wr_ob = weakref.ref(ob) wr_p = weakref.ref(p) @@ -66,6 +62,7 @@ ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) rawrefcount.create_link_pypy(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY_DIRECT assert rawrefcount._p_list == [ob] wr_ob = weakref.ref(ob) wr_p = weakref.ref(p) @@ -84,6 +81,7 @@ ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) rawrefcount.create_link_pypy(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY_DIRECT assert rawrefcount._p_list == [ob] wr_ob = weakref.ref(ob) del ob # p remains @@ -99,6 +97,7 @@ ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) rawrefcount.create_link_pyobj(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY assert rawrefcount._o_list == [ob] wr_ob = weakref.ref(ob) wr_p = weakref.ref(p) @@ -116,6 +115,7 @@ track_allocation=False) p.pyobj = ob rawrefcount.create_link_pyobj(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY assert rawrefcount._o_list == [ob] wr_ob = weakref.ref(ob) wr_p = weakref.ref(p) @@ -135,6 +135,7 @@ track_allocation=False) p.pyobj = ob rawrefcount.create_link_pyobj(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY assert rawrefcount._o_list == [ob] wr_ob = weakref.ref(ob) del ob # p remains @@ -150,8 +151,9 @@ p = W_Root(42) ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) - rawrefcount.create_link_shared(p, ob) - assert rawrefcount._s_list == [ob] + rawrefcount.create_link_pypy(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY + assert rawrefcount._p_list == [ob] wr_ob = weakref.ref(ob) wr_p = weakref.ref(p) del ob, p @@ -159,7 +161,7 @@ ob = wr_ob() assert ob is not None assert dealloc == [ob] - assert rawrefcount._s_list == [] + assert rawrefcount._p_list == [] assert wr_p() is None def test_collect_s_keepalive_pyobject(self): @@ -167,8 +169,9 @@ ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) p.pyobj = ob - rawrefcount.create_link_shared(p, ob) - assert rawrefcount._s_list == [ob] + rawrefcount.create_link_pypy(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY + assert rawrefcount._p_list == [ob] wr_ob = weakref.ref(ob) wr_p = weakref.ref(p) ob.c_ob_refcnt += 1 # <= @@ -177,7 +180,7 @@ ob = wr_ob() p = wr_p() assert ob is not None and p is not None - assert rawrefcount._s_list == [ob] + assert rawrefcount._p_list == [ob] assert rawrefcount.to_obj(W_Root, ob) == p def test_collect_s_keepalive_w_root(self): @@ -185,13 +188,14 @@ ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, track_allocation=False) p.pyobj = ob - rawrefcount.create_link_shared(p, ob) - assert rawrefcount._s_list == [ob] + rawrefcount.create_link_pypy(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY + assert rawrefcount._p_list == [ob] wr_ob = weakref.ref(ob) del ob # p remains dealloc = rawrefcount._collect() assert dealloc == [] ob = wr_ob() assert ob is not None - assert rawrefcount._s_list == [ob] + assert rawrefcount._p_list == [ob] assert rawrefcount.to_obj(W_Root, ob) == p From noreply at buildbot.pypy.org Fri Oct 16 17:51:07 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 17:51:07 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Start implementing rawrefcount in the GC Message-ID: <20151016155107.CE37E1C1248@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80285:62d0fd30e60a Date: 2015-10-16 17:51 +0200 http://bitbucket.org/pypy/pypy/changeset/62d0fd30e60a/ Log: Start implementing rawrefcount in the GC diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1080,35 +1080,19 @@ "odd-valued (i.e. tagged) pointer unexpected here") return self.nursery <= addr < self.nursery + self.nursery_size - def appears_to_be_young(self, addr): - # "is a valid addr to a young object?" - # but it's ok to occasionally return True accidentally. - # Maybe the best implementation would be a bloom filter - # of some kind instead of the dictionary lookup that is - # sometimes done below. But the expected common answer - # is "Yes" because addr points to the nursery, so it may - # not be useful to optimize the other case too much. - # - # First, if 'addr' appears to be a pointer to some place within - # the nursery, return True - if not self.translated_to_c: - # When non-translated, filter out tagged pointers explicitly. - # When translated, it may occasionally give a wrong answer - # of True if 'addr' is a tagged pointer with just the wrong value. - if not self.is_valid_gc_object(addr): - return False - + def is_young_object(self, addr): + # Check if the object at 'addr' is young. + if not self.is_valid_gc_object(addr): + return False # filter out tagged pointers explicitly. if self.nursery <= addr < self.nursery_top: return True # addr is in the nursery - # # Else, it may be in the set 'young_rawmalloced_objects' return (bool(self.young_rawmalloced_objects) and self.young_rawmalloced_objects.contains(addr)) - appears_to_be_young._always_inline_ = True def debug_is_old_object(self, addr): return (self.is_valid_gc_object(addr) - and not self.appears_to_be_young(addr)) + and not self.is_young_object(addr)) def is_forwarded(self, obj): """Returns True if the nursery obj is marked as forwarded. @@ -2745,3 +2729,41 @@ (obj + offset).address[0] = llmemory.NULL self.old_objects_with_weakrefs.delete() self.old_objects_with_weakrefs = new_with_weakref + + + # ---------- + # RawRefCount + + OB_REFCNT = 0 + OB_PYPY_LINK = 1 + + rrc_enabled = False + + def rawrefcount_init(self): + # see pypy/doc/discussion/rawrefcount.rst + if not self.rrc_enabled: + self.rrc_p_list_young = self.AddressStack() + self.rrc_p_list_old = self.AddressStack() + self.rrc_o_list_young = self.AddressStack() + self.rrc_o_list_old = self.AddressStack() + self.rrc_dict = self.AddressDict() + self.rrc_enabled = True + + def rawrefcount_create_link_pypy(self, gcobj, pyobject): + ll_assert(self.rrc_enabled, "rawrefcount.init not called") + obj = llmemory.cast_ptr_to_adr(gcobj) + if self.is_young_object(obj): + self.rrc_p_list_young.append(obj) + else: + self.rrc_p_list_old.append(obj) + objint = llmemory.cast_adr_to_int(obj, mode="symbolic") + pyobject.signed[self.OB_PYPY_LINK] = objint + self.rrc_dict.setitem(obj, pyobject) + + def rawrefcount_from_obj(self, gcobj): + obj = llmemory.cast_ptr_to_adr(gcobj) + return self.rrc_dict.get(obj) + + def rawrefcount_to_obj(self, pyobject): + obj = llmemory.cast_int_to_adr(pyobject.signed[self.OB_PYPY_LINK]) + return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py new file mode 100644 --- /dev/null +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -0,0 +1,115 @@ +from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.memory.gc.incminimark import IncrementalMiniMarkGC +from rpython.memory.gc.test.test_direct import BaseDirectGCTest +from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY +from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_DIRECT + +PYOBJ_HDR = lltype.Array(lltype.Signed, hints={'nolength': True}) +PYOBJ_HDR_PTR = lltype.Ptr(PYOBJ_HDR) + +OB_REFCNT = IncrementalMiniMarkGC.OB_REFCNT +OB_PYPY_LINK = IncrementalMiniMarkGC.OB_PYPY_LINK + +S = lltype.GcForwardReference() +S.become(lltype.GcStruct('S', + ('x', lltype.Signed), + ('prev', lltype.Ptr(S)), + ('next', lltype.Ptr(S)))) + + +class TestRawRefCount(BaseDirectGCTest): + GCClass = IncrementalMiniMarkGC + + def _rawrefcount_pair(self, intval, is_direct=False, is_pyobj=False): + if is_direct: + rc = REFCNT_FROM_PYPY_DIRECT + else: + rc = REFCNT_FROM_PYPY + # + p1 = self.malloc(S) + p1.x = intval + p1ref = lltype.cast_opaque_ptr(llmemory.GCREF, p1) + r1 = lltype.malloc(PYOBJ_HDR, 3, flavor='raw') + r1[OB_REFCNT] = rc + r1[OB_PYPY_LINK] = 0 + r1addr = llmemory.cast_ptr_to_adr(r1) + self.gc.rawrefcount_init() + if is_pyobj: + assert not is_direct + self.gc.rawrefcount_create_link_pyobj(p1ref, r1addr) + else: + self.gc.rawrefcount_create_link_pypy(p1ref, r1addr) + assert r1[OB_REFCNT] == rc + assert r1[OB_PYPY_LINK] != 0 + return p1, p1ref, r1, r1addr + + def test_rawrefcount_objects_basic(self): + p1, p1ref, r1, r1addr = self._rawrefcount_pair(42, is_direct=True) + p2 = self.malloc(S) + p2.x = 84 + p2ref = lltype.cast_opaque_ptr(llmemory.GCREF, p2) + r2 = lltype.malloc(PYOBJ_HDR, 3, flavor='raw') + r2[0] = 1 + r2[1] = 0 + r2addr = llmemory.cast_ptr_to_adr(r2) + # p2 and r2 are not linked + assert r1[1] != 0 + assert r2[1] == 0 + assert self.gc.rawrefcount_from_obj(p1ref) == r1addr + assert self.gc.rawrefcount_from_obj(p2ref) == llmemory.NULL + assert self.gc.rawrefcount_to_obj(r1addr) == p1ref + assert self.gc.rawrefcount_to_obj(r2addr) == lltype.nullptr( + llmemory.GCREF.TO) + lltype.free(r1, flavor='raw') + lltype.free(r2, flavor='raw') + + def test_rawrefcount_objects_collection_survives_from_raw(self): + for do_collect in [self.gc.minor_collection, self.gc.collect] * 2: + p1, p1ref, r1, r1addr = self._rawrefcount_pair(42, is_direct=True) + assert r1.c_ob_refcnt == REFCNT_FROM_PYPY_DIRECT + r1.ob_refcnt += 1 + do_collect() + assert r1.ob_refcnt == REFCNT_FROM_PYPY_OBJECT + 1 + assert r1.ob_pypy_link != llmemory.NULL + p1ref = self.gc.rawrefcount_to_obj(r1addr) + assert lltype.cast_opaque_ptr(lltype.Ptr(S), p1ref).x == 42 + assert self.gc.rawrefcount_from_obj(p1ref) == r1addr + + def test_rawrefcount_objects_collection_survives_from_obj(self): + for do_collect in [self.gc.minor_collection, self.gc.collect] * 2: + p1, p1ref, r1, r1addr = self._rawrefcount_pair(42) + assert r1.ob_refcnt == REFCNT_FROM_PYPY_OBJECT + self.stackroots.append(p1) + do_collect() + assert r1.ob_refcnt == REFCNT_FROM_PYPY_OBJECT + assert r1.ob_pypy_link != llmemory.NULL + p1ref = self.gc.rawrefcount_to_obj(r1addr) + assert lltype.cast_opaque_ptr(lltype.Ptr(S), p1ref).x == 42 + assert self.gc.rawrefcount_from_obj(p1ref) == r1addr + + def test_rawrefcount_objects_collection_dies(self): + p1, p1ref, r1, r1addr = self._rawrefcount_pair(43) + seen = [] + self.gc.rawrefcount_set_callback(seen.append) + self.gc.minor_collection() + assert r1.ob_refcnt == REFCNT_FROM_PYPY_OBJECT + assert r1.ob_pypy_link != llmemory.NULL + p1ref = self.gc.rawrefcount_to_obj(r1addr) + assert seen == [p1ref] + assert lltype.cast_opaque_ptr(lltype.Ptr(S), p1ref).x == 43 + assert self.gc.rawrefcount_from_obj(p1ref) == r1addr + # + del seen[:] + self.gc.minor_collection() + assert seen == [] + self.gc.collect() + assert seen == [p1ref] + assert r1.ob_pypy_link == llmemory.cast_ptr_to_adr(p1ref) + + def test_rawrefcount_objects_detach(self): + p1, p1ref, r1, r1addr = self._rawrefcount_pair(43) + self.gc.rawrefcount_detach(r1addr) + assert r1.ob_pypy_link == llmemory.NULL + assert self.gc.rawrefcount_from_obj(p1ref) == llmemory.NULL + assert self.gc.rawrefcount_to_obj(r1addr) == lltype.nullptr( + llmemory.GCREF.TO) diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -5,7 +5,6 @@ from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rtyper.extregistry import ExtRegistryEntry -from rpython.rtyper import annlowlevel from rpython.rlib import rgc @@ -13,22 +12,22 @@ REFCNT_FROM_PYPY_DIRECT = REFCNT_FROM_PYPY + (sys.maxint//2+1) -def _reset_state(): - global _p_list, _o_list, _adr2pypy, _pypy2ob - _p_list = [] # not rpython - _o_list = [] # not rpython - _adr2pypy = [None] # not rpython - _pypy2ob = {} # not rpython -_reset_state() - def _build_pypy_link(p): res = len(_adr2pypy) _adr2pypy.append(p) return res +def init(): + "NOT_RPYTHON: set up rawrefcount with the GC" + global _p_list, _o_list, _adr2pypy, _pypy2ob + _p_list = [] + _o_list = [] + _adr2pypy = [None] + _pypy2ob = {} + def create_link_pypy(p, ob): - "NOT_RPYTHON: a link where the PyPy object contains all the data" + "NOT_RPYTHON: a link where the PyPy object contains some or all the data" assert p not in _pypy2ob assert not ob.c_ob_pypy_link ob.c_ob_pypy_link = _build_pypy_link(p) @@ -51,18 +50,14 @@ assert lltype.typeOf(ob) == OB_PTR_TYPE return ob - at specialize.arg(0) def to_obj(Class, ob): + "NOT_RPYTHON" link = ob.c_ob_pypy_link - if we_are_translated(): - pypy_gcref = lltype.cast_int_to_ptr(llmemory.GCREF, link) - return annlowlevel.cast_gcref_to_instance(Class, pypy_gcref) - else: - if link == 0: - return None - p = _adr2pypy[link] - assert isinstance(p, Class) - return p + if link == 0: + return None + p = _adr2pypy[link] + assert isinstance(p, Class) + return p def _collect(): """NOT_RPYTHON: for tests only. Emulates a GC collection. @@ -78,7 +73,7 @@ wr_list.append((ob, weakref.ref(p))) return p - global _p_list, _o_list, _s_list + global _p_list, _o_list wr_p_list = [] new_p_list = [] for ob in _p_list: @@ -133,5 +128,60 @@ # ____________________________________________________________ -## class Entry(ExtRegistryEntry): -## _about_ = create_link_from_pypy +class Entry(ExtRegistryEntry): + _about_ = init + + def compute_result_annotation(self): + pass + + def specialize_call(self, hop): + hop.exception_cannot_occur() + hop.genop('gc_rawrefcount_init', []) + +class Entry(ExtRegistryEntry): + _about_ = (create_link_pypy, create_link_pyobj) + + def compute_result_annotation(self, s_p, s_ob): + pass + + def specialize_call(self, hop): + if self.instance is create_link_pypy: + name = 'gc_rawrefcount_create_link_pypy' + elif self.instance is create_link_pyobj: + name = 'gc_rawrefcount_create_link_pyobj' + hop.exception_cannot_occur() + hop.genop(name, hop.args_v) + +class Entry(ExtRegistryEntry): + _about_ = from_obj + + def compute_result_annotation(self, s_OB_PTR_TYPE, s_p): + from rpython.annotator import model as annmodel + from rpython.rtyper.llannotation import lltype_to_annotation + assert (isinstance(s_p, annmodel.SomeInstance) or + annmodel.s_None.contains(s_p)) + assert s_OB_PTR_TYPE.is_constant() + return lltype_to_annotation(s_OB_PTR_TYPE.const) + + def specialize_call(self, hop): + hop.exception_cannot_occur() + [v_p] = hop.inputargs(hop.args_r[1].lowleveltype) + return hop.genop('gc_rawrefcount_from_obj', [v_p], + resulttype = hop.r_result.lowleveltype) + +class Entry(ExtRegistryEntry): + _about_ = to_obj + + def compute_result_annotation(self, s_Class, s_ob): + from rpython.annotator import model as annmodel + from rpython.rtyper.llannotation import SomePtr + assert isinstance(s_ob, SomePtr) + assert s_Class.is_constant() + classdef = self.bookkeeper.getuniqueclassdef(s_Class.const) + return annmodel.SomeInstance(classdef, can_be_None=True) + + def specialize_call(self, hop): + hop.exception_cannot_occur() + v_ob = hop.inputargs(hop.args_r[1].lowleveltype) + return hop.genop('gc_rawrefcount_to_obj', [v_ob], + resulttype = hop.r_result.lowleveltype) diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py --- a/rpython/rlib/test/test_rawrefcount.py +++ b/rpython/rlib/test/test_rawrefcount.py @@ -16,7 +16,7 @@ class TestRawRefCount: def setup_method(self, meth): - rawrefcount._reset_state() + rawrefcount.init() def test_create_link_pypy(self): p = W_Root(42) diff --git a/rpython/rtyper/lltypesystem/lloperation.py b/rpython/rtyper/lltypesystem/lloperation.py --- a/rpython/rtyper/lltypesystem/lloperation.py +++ b/rpython/rtyper/lltypesystem/lloperation.py @@ -502,6 +502,12 @@ 'gc_gcflag_extra' : LLOp(), 'gc_add_memory_pressure': LLOp(), + 'gc_rawrefcount_init': LLOp(), + 'gc_rawrefcount_create_link_pypy': LLOp(), + 'gc_rawrefcount_create_link_pyobj': LLOp(), + 'gc_rawrefcount_from_obj': LLOp(sideeffects=False), + 'gc_rawrefcount_to_obj': LLOp(sideeffects=False), + # ------- JIT & GC interaction, only for some GCs ---------- 'gc_adr_of_nursery_free' : LLOp(), From noreply at buildbot.pypy.org Fri Oct 16 19:08:58 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 19:08:58 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Pass one test and a half Message-ID: <20151016170858.F35451C12D6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80286:937683219fcd Date: 2015-10-16 19:09 +0200 http://bitbucket.org/pypy/pypy/changeset/937683219fcd/ Log: Pass one test and a half diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1602,6 +1602,10 @@ self._visit_old_objects_pointing_to_pinned, None) current_old_objects_pointing_to_pinned.delete() # + # visit the P list from rawrefcount, if enabled. + if self.rrc_enabled: + self.rrc_minor_collection_trace() + # while True: # If we are using card marking, do a partial trace of the arrays # that are flagged with GCFLAG_CARDS_SET. @@ -1650,6 +1654,10 @@ if self.young_rawmalloced_objects: self.free_young_rawmalloced_objects() # + # visit the P and O lists from rawrefcount, if enabled. + if self.rrc_enabled: + self.rrc_minor_collection_free() + # # All live nursery objects are out of the nursery or pinned inside # the nursery. Create nursery barriers to protect the pinned objects, # fill the rest of the nursery with zeros and reset the current nursery @@ -2734,10 +2742,16 @@ # ---------- # RawRefCount - OB_REFCNT = 0 - OB_PYPY_LINK = 1 + rrc_enabled = False - rrc_enabled = False + _ADDRARRAY = lltype.Array(llmemory.Address, hints={'nolength': True}) + PYOBJ_HDR = lltype.Struct('GCHdr_PyObject', + ('ob_refcnt', lltype.Signed), + ('ob_pypy_link', lltype.Signed)) + PYOBJ_HDR_PTR = lltype.Ptr(PYOBJ_HDR) + + def _pyobj(self, pyobjaddr): + return llmemory.cast_adr_to_ptr(pyobjaddr, self.PYOBJ_HDR_PTR) def rawrefcount_init(self): # see pypy/doc/discussion/rawrefcount.rst @@ -2747,17 +2761,20 @@ self.rrc_o_list_young = self.AddressStack() self.rrc_o_list_old = self.AddressStack() self.rrc_dict = self.AddressDict() + p = lltype.malloc(self._ADDRARRAY, 1, flavor='raw', + track_allocation=False) + self.rrc_singleaddr = llmemory.cast_ptr_to_adr(p) self.rrc_enabled = True def rawrefcount_create_link_pypy(self, gcobj, pyobject): ll_assert(self.rrc_enabled, "rawrefcount.init not called") obj = llmemory.cast_ptr_to_adr(gcobj) if self.is_young_object(obj): - self.rrc_p_list_young.append(obj) + self.rrc_p_list_young.append(pyobject) else: - self.rrc_p_list_old.append(obj) + self.rrc_p_list_old.append(pyobject) objint = llmemory.cast_adr_to_int(obj, mode="symbolic") - pyobject.signed[self.OB_PYPY_LINK] = objint + self._pyobj(pyobject).ob_pypy_link = objint self.rrc_dict.setitem(obj, pyobject) def rawrefcount_from_obj(self, gcobj): @@ -2765,5 +2782,67 @@ return self.rrc_dict.get(obj) def rawrefcount_to_obj(self, pyobject): - obj = llmemory.cast_int_to_adr(pyobject.signed[self.OB_PYPY_LINK]) + obj = llmemory.cast_int_to_adr(self._pyobj(pyobject).ob_pypy_link) return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) + + + def rrc_minor_collection_trace(self): + self.rrc_p_list_young.foreach(self._rrc_minor_trace, + self.rrc_singleaddr) + + def _rrc_minor_trace(self, pyobject, singleaddr): + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_DIRECT + # + rc = self._pyobj(pyobject).ob_refcnt + if rc == REFCNT_FROM_PYPY or rc == REFCNT_FROM_PYPY_DIRECT: + pass # the corresponding object may die + else: + # force the corresponding object to be alive + intobj = self._pyobj(pyobject).ob_pypy_link + singleaddr.address[0] = llmemory.cast_int_to_adr(intobj) + self._trace_drag_out(singleaddr, llmemory.NULL) + + def rrc_minor_collection_free(self): + lst = self.rrc_p_list_young + while lst.non_empty(): + self._rrc_minor_free(lst.pop(), self.rrc_p_list_old) + + def _rrc_minor_free(self, pyobject, add_into_list): + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_DIRECT + # + intobj = self._pyobj(pyobject).ob_pypy_link + obj = llmemory.cast_int_to_adr(intobj) + if self.is_in_nursery(obj): + if self.is_forwarded(obj): + # Common case: survives and moves + obj = self.get_forwarding_address(obj) + intobj = llmemory.cast_adr_to_int(obj, mode="symbolic") + self._pyobj(pyobject).ob_pypy_link = intobj + self.rrc_dict.setitem(obj, pyobject) + surviving = True + else: + surviving = False + elif (bool(self.young_rawmalloced_objects) and + self.young_rawmalloced_objects.contains(pointing_to)): + # young weakref to a young raw-malloced object + if self.header(pointing_to).tid & GCFLAG_VISITED_RMY: + surviving = True # survives, but does not move + else: + surviving = False + # + if surviving: + add_into_list.append(obj) + else: + rc = self._pyobj(pyobject).ob_refcnt + if rc == self.REFCNT_FROM_PYPY_DIRECT: + llmemory.raw_free(pyobject) + else: + if rc > self.REFCNT_FROM_PYPY_DIRECT: + rc -= self.REFCNT_FROM_PYPY_DIRECT + else: + rc -= self.REFCNT_FROM_PYPY + if rc == 0: + xxx # _Py_Dealloc(pyobject) + self._pyobj(pyobject).ob_refcnt = rc diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py --- a/rpython/memory/gc/test/test_rawrefcount.py +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -4,11 +4,8 @@ from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_DIRECT -PYOBJ_HDR = lltype.Array(lltype.Signed, hints={'nolength': True}) -PYOBJ_HDR_PTR = lltype.Ptr(PYOBJ_HDR) - -OB_REFCNT = IncrementalMiniMarkGC.OB_REFCNT -OB_PYPY_LINK = IncrementalMiniMarkGC.OB_PYPY_LINK +PYOBJ_HDR = IncrementalMiniMarkGC.PYOBJ_HDR +PYOBJ_HDR_PTR = IncrementalMiniMarkGC.PYOBJ_HDR_PTR S = lltype.GcForwardReference() S.become(lltype.GcStruct('S', @@ -29,9 +26,9 @@ p1 = self.malloc(S) p1.x = intval p1ref = lltype.cast_opaque_ptr(llmemory.GCREF, p1) - r1 = lltype.malloc(PYOBJ_HDR, 3, flavor='raw') - r1[OB_REFCNT] = rc - r1[OB_PYPY_LINK] = 0 + r1 = lltype.malloc(PYOBJ_HDR, flavor='raw') + r1.ob_refcnt = rc + r1.ob_pypy_link = 0 r1addr = llmemory.cast_ptr_to_adr(r1) self.gc.rawrefcount_init() if is_pyobj: @@ -39,8 +36,8 @@ self.gc.rawrefcount_create_link_pyobj(p1ref, r1addr) else: self.gc.rawrefcount_create_link_pypy(p1ref, r1addr) - assert r1[OB_REFCNT] == rc - assert r1[OB_PYPY_LINK] != 0 + assert r1.ob_refcnt == rc + assert r1.ob_pypy_link != 0 return p1, p1ref, r1, r1addr def test_rawrefcount_objects_basic(self): @@ -48,13 +45,13 @@ p2 = self.malloc(S) p2.x = 84 p2ref = lltype.cast_opaque_ptr(llmemory.GCREF, p2) - r2 = lltype.malloc(PYOBJ_HDR, 3, flavor='raw') - r2[0] = 1 - r2[1] = 0 + r2 = lltype.malloc(PYOBJ_HDR, flavor='raw') + r2.ob_refcnt = 1 + r2.ob_pypy_link = 0 r2addr = llmemory.cast_ptr_to_adr(r2) # p2 and r2 are not linked - assert r1[1] != 0 - assert r2[1] == 0 + assert r1.ob_pypy_link != 0 + assert r2.ob_pypy_link == 0 assert self.gc.rawrefcount_from_obj(p1ref) == r1addr assert self.gc.rawrefcount_from_obj(p2ref) == llmemory.NULL assert self.gc.rawrefcount_to_obj(r1addr) == p1ref @@ -66,11 +63,11 @@ def test_rawrefcount_objects_collection_survives_from_raw(self): for do_collect in [self.gc.minor_collection, self.gc.collect] * 2: p1, p1ref, r1, r1addr = self._rawrefcount_pair(42, is_direct=True) - assert r1.c_ob_refcnt == REFCNT_FROM_PYPY_DIRECT + assert r1.ob_refcnt == REFCNT_FROM_PYPY_DIRECT r1.ob_refcnt += 1 do_collect() - assert r1.ob_refcnt == REFCNT_FROM_PYPY_OBJECT + 1 - assert r1.ob_pypy_link != llmemory.NULL + assert r1.ob_refcnt == REFCNT_FROM_PYPY_DIRECT + 1 + assert r1.ob_pypy_link != 0 p1ref = self.gc.rawrefcount_to_obj(r1addr) assert lltype.cast_opaque_ptr(lltype.Ptr(S), p1ref).x == 42 assert self.gc.rawrefcount_from_obj(p1ref) == r1addr From noreply at buildbot.pypy.org Fri Oct 16 19:41:17 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 19:41:17 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Second half of the test Message-ID: <20151016174117.B84011C1230@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80287:15ef951bf126 Date: 2015-10-16 19:41 +0200 http://bitbucket.org/pypy/pypy/changeset/15ef951bf126/ Log: Second half of the test diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2170,9 +2170,13 @@ # finalizers/weak references are rare and short which means that # they do not need a separate state and do not need to be # made incremental. + # For now, the same applies to rawrefcount'ed objects. if (not self.objects_to_trace.non_empty() and not self.more_objects_to_trace.non_empty()): # + if self.rrc_enabled: + self.rrc_major_collection_trace() + # if self.objects_with_finalizers.non_empty(): self.deal_with_objects_with_finalizers() elif self.old_objects_with_weakrefs.non_empty(): @@ -2207,6 +2211,10 @@ self.old_objects_pointing_to_pinned = \ new_old_objects_pointing_to_pinned self.updated_old_objects_pointing_to_pinned = True + # + if self.rrc_enabled: + self.rrc_major_collection_free() + # self.gc_state = STATE_SWEEPING #END MARKING elif self.gc_state == STATE_SWEEPING: @@ -2760,7 +2768,7 @@ self.rrc_p_list_old = self.AddressStack() self.rrc_o_list_young = self.AddressStack() self.rrc_o_list_old = self.AddressStack() - self.rrc_dict = self.AddressDict() + self.rrc_p_dict = self.AddressDict() p = lltype.malloc(self._ADDRARRAY, 1, flavor='raw', track_allocation=False) self.rrc_singleaddr = llmemory.cast_ptr_to_adr(p) @@ -2775,11 +2783,11 @@ self.rrc_p_list_old.append(pyobject) objint = llmemory.cast_adr_to_int(obj, mode="symbolic") self._pyobj(pyobject).ob_pypy_link = objint - self.rrc_dict.setitem(obj, pyobject) + self.rrc_p_dict.setitem(obj, pyobject) def rawrefcount_from_obj(self, gcobj): obj = llmemory.cast_ptr_to_adr(gcobj) - return self.rrc_dict.get(obj) + return self.rrc_p_dict.get(obj) def rawrefcount_to_obj(self, pyobject): obj = llmemory.cast_int_to_adr(self._pyobj(pyobject).ob_pypy_link) @@ -2808,10 +2816,7 @@ while lst.non_empty(): self._rrc_minor_free(lst.pop(), self.rrc_p_list_old) - def _rrc_minor_free(self, pyobject, add_into_list): - from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY - from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_DIRECT - # + def _rrc_minor_free(self, pyobject, surviving_list): intobj = self._pyobj(pyobject).ob_pypy_link obj = llmemory.cast_int_to_adr(intobj) if self.is_in_nursery(obj): @@ -2820,7 +2825,7 @@ obj = self.get_forwarding_address(obj) intobj = llmemory.cast_adr_to_int(obj, mode="symbolic") self._pyobj(pyobject).ob_pypy_link = intobj - self.rrc_dict.setitem(obj, pyobject) + self.rrc_p_dict.setitem(obj, pyobject) surviving = True else: surviving = False @@ -2833,16 +2838,65 @@ surviving = False # if surviving: - add_into_list.append(obj) + surviving_list.append(pyobject) else: - rc = self._pyobj(pyobject).ob_refcnt - if rc == self.REFCNT_FROM_PYPY_DIRECT: - llmemory.raw_free(pyobject) + self._rrc_free(pyobject) + + def _rrc_free(self, pyobject): + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_DIRECT + # + rc = self._pyobj(pyobject).ob_refcnt + if rc == REFCNT_FROM_PYPY_DIRECT: + lltype.free(self._pyobj(pyobject), flavor='raw') + else: + if rc > REFCNT_FROM_PYPY_DIRECT: + rc -= REFCNT_FROM_PYPY_DIRECT else: - if rc > self.REFCNT_FROM_PYPY_DIRECT: - rc -= self.REFCNT_FROM_PYPY_DIRECT - else: - rc -= self.REFCNT_FROM_PYPY - if rc == 0: - xxx # _Py_Dealloc(pyobject) - self._pyobj(pyobject).ob_refcnt = rc + rc -= REFCNT_FROM_PYPY + if rc == 0: + xxx # _Py_Dealloc(pyobject) + self._pyobj(pyobject).ob_refcnt = rc + _rrc_free._always_inline_ = True + + def rrc_major_collection_trace(self): + self.rrc_p_list_old.foreach(self._rrc_major_trace, None) + + def _rrc_major_trace(self, pyobject, ignore): + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_DIRECT + # + rc = self._pyobj(pyobject).ob_refcnt + if rc == REFCNT_FROM_PYPY or rc == REFCNT_FROM_PYPY_DIRECT: + pass # the corresponding object may die + else: + # force the corresponding object to be alive + intobj = self._pyobj(pyobject).ob_pypy_link + obj = llmemory.cast_int_to_adr(intobj) + self.objects_to_trace.append(obj) + self.visit_all_objects() + + def rrc_major_collection_free(self): + new_p_list = self.AddressStack() + if self.rrc_p_dict.length() > self.rrc_p_list_old.length() * 2 + 30: + new_p_dict = self.AddressDict() + else: + new_p_dict = self.null_address_dict() + while self.rrc_p_list_old.non_empty(): + self._rrc_major_free(self.rrc_p_list_old.pop(), new_p_list, + new_p_dict) + self.rrc_p_list_old.delete() + self.rrc_p_list_old = new_p_list + if new_p_dict: + self.rrc_p_dict.delete() + self.rrc_p_dict = new_p_dict + + def _rrc_major_free(self, pyobject, surviving_list, surviving_dict): + intobj = self._pyobj(pyobject).ob_pypy_link + obj = llmemory.cast_int_to_adr(intobj) + if self.header(obj).tid & GCFLAG_VISITED: + surviving_list.append(pyobject) + if surviving_dict: + surviving_dict.setitem(obj, pyobject) + else: + self._rrc_free(pyobject) diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py --- a/rpython/memory/gc/test/test_rawrefcount.py +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -1,3 +1,4 @@ +import py from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.memory.gc.incminimark import IncrementalMiniMarkGC from rpython.memory.gc.test.test_direct import BaseDirectGCTest @@ -61,16 +62,24 @@ lltype.free(r2, flavor='raw') def test_rawrefcount_objects_collection_survives_from_raw(self): - for do_collect in [self.gc.minor_collection, self.gc.collect] * 2: - p1, p1ref, r1, r1addr = self._rawrefcount_pair(42, is_direct=True) - assert r1.ob_refcnt == REFCNT_FROM_PYPY_DIRECT - r1.ob_refcnt += 1 - do_collect() - assert r1.ob_refcnt == REFCNT_FROM_PYPY_DIRECT + 1 + def check_alive(extra_refcount): + assert r1.ob_refcnt == REFCNT_FROM_PYPY_DIRECT + extra_refcount assert r1.ob_pypy_link != 0 p1ref = self.gc.rawrefcount_to_obj(r1addr) assert lltype.cast_opaque_ptr(lltype.Ptr(S), p1ref).x == 42 assert self.gc.rawrefcount_from_obj(p1ref) == r1addr + p1, p1ref, r1, r1addr = self._rawrefcount_pair(42, is_direct=True) + assert r1.ob_refcnt == REFCNT_FROM_PYPY_DIRECT + r1.ob_refcnt += 1 + self.gc.minor_collection() + check_alive(+1) + self.gc.collect() + check_alive(+1) + r1.ob_refcnt -= 1 + self.gc.minor_collection() + check_alive(0) + self.gc.collect() + py.test.raises(RuntimeError, "r1.ob_refcnt") # dead def test_rawrefcount_objects_collection_survives_from_obj(self): for do_collect in [self.gc.minor_collection, self.gc.collect] * 2: From noreply at buildbot.pypy.org Fri Oct 16 19:50:53 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 19:50:53 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Add asserts Message-ID: <20151016175053.A9AD01C1230@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80288:8d47155c7ac8 Date: 2015-10-16 19:46 +0200 http://bitbucket.org/pypy/pypy/changeset/8d47155c7ac8/ Log: Add asserts diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2850,9 +2850,12 @@ if rc == REFCNT_FROM_PYPY_DIRECT: lltype.free(self._pyobj(pyobject), flavor='raw') else: + ll_assert(rc < int(REFCNT_FROM_PYPY_DIRECT * 0.99), + "refcount underflow from REFCNT_FROM_PYPY_DIRECT?") if rc > REFCNT_FROM_PYPY_DIRECT: rc -= REFCNT_FROM_PYPY_DIRECT else: + ll_assert(rc >= REFCNT_FROM_PYPY, "refcount underflow?") rc -= REFCNT_FROM_PYPY if rc == 0: xxx # _Py_Dealloc(pyobject) From noreply at buildbot.pypy.org Fri Oct 16 19:50:55 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 19:50:55 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Adapt the 3rd test Message-ID: <20151016175055.E09951C1230@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80289:0e4355b1faf4 Date: 2015-10-16 19:51 +0200 http://bitbucket.org/pypy/pypy/changeset/0e4355b1faf4/ Log: Adapt the 3rd test diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2774,6 +2774,13 @@ self.rrc_singleaddr = llmemory.cast_ptr_to_adr(p) self.rrc_enabled = True + def check_no_more_rawrefcount_state(self): + "NOT_RPYTHON: for tests" + assert self.rrc_p_list_young.length() == 0 + assert self.rrc_p_list_old .length() == 0 + assert self.rrc_o_list_young.length() == 0 + assert self.rrc_o_list_old .length() == 0 + def rawrefcount_create_link_pypy(self, gcobj, pyobject): ll_assert(self.rrc_enabled, "rawrefcount.init not called") obj = llmemory.cast_ptr_to_adr(gcobj) diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py --- a/rpython/memory/gc/test/test_rawrefcount.py +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -69,7 +69,7 @@ assert lltype.cast_opaque_ptr(lltype.Ptr(S), p1ref).x == 42 assert self.gc.rawrefcount_from_obj(p1ref) == r1addr p1, p1ref, r1, r1addr = self._rawrefcount_pair(42, is_direct=True) - assert r1.ob_refcnt == REFCNT_FROM_PYPY_DIRECT + check_alive(0) r1.ob_refcnt += 1 self.gc.minor_collection() check_alive(+1) @@ -80,42 +80,25 @@ check_alive(0) self.gc.collect() py.test.raises(RuntimeError, "r1.ob_refcnt") # dead + self.gc.check_no_more_rawrefcount_state() def test_rawrefcount_objects_collection_survives_from_obj(self): - for do_collect in [self.gc.minor_collection, self.gc.collect] * 2: - p1, p1ref, r1, r1addr = self._rawrefcount_pair(42) - assert r1.ob_refcnt == REFCNT_FROM_PYPY_OBJECT - self.stackroots.append(p1) - do_collect() - assert r1.ob_refcnt == REFCNT_FROM_PYPY_OBJECT - assert r1.ob_pypy_link != llmemory.NULL + def check_alive(extra_refcount): + assert r1.ob_refcnt == REFCNT_FROM_PYPY_DIRECT + extra_refcount + assert r1.ob_pypy_link != 0 p1ref = self.gc.rawrefcount_to_obj(r1addr) assert lltype.cast_opaque_ptr(lltype.Ptr(S), p1ref).x == 42 assert self.gc.rawrefcount_from_obj(p1ref) == r1addr - - def test_rawrefcount_objects_collection_dies(self): - p1, p1ref, r1, r1addr = self._rawrefcount_pair(43) - seen = [] - self.gc.rawrefcount_set_callback(seen.append) + p1, p1ref, r1, r1addr = self._rawrefcount_pair(42, is_direct=True) + check_alive(0) + self.stackroots.append(p1) self.gc.minor_collection() - assert r1.ob_refcnt == REFCNT_FROM_PYPY_OBJECT - assert r1.ob_pypy_link != llmemory.NULL - p1ref = self.gc.rawrefcount_to_obj(r1addr) - assert seen == [p1ref] - assert lltype.cast_opaque_ptr(lltype.Ptr(S), p1ref).x == 43 - assert self.gc.rawrefcount_from_obj(p1ref) == r1addr - # - del seen[:] + check_alive(0) + self.gc.collect() + check_alive(0) + self.stackroots.pop() self.gc.minor_collection() - assert seen == [] + check_alive(0) self.gc.collect() - assert seen == [p1ref] - assert r1.ob_pypy_link == llmemory.cast_ptr_to_adr(p1ref) - - def test_rawrefcount_objects_detach(self): - p1, p1ref, r1, r1addr = self._rawrefcount_pair(43) - self.gc.rawrefcount_detach(r1addr) - assert r1.ob_pypy_link == llmemory.NULL - assert self.gc.rawrefcount_from_obj(p1ref) == llmemory.NULL - assert self.gc.rawrefcount_to_obj(r1addr) == lltype.nullptr( - llmemory.GCREF.TO) + py.test.raises(RuntimeError, "r1.ob_refcnt") # dead + self.gc.check_no_more_rawrefcount_state() From noreply at buildbot.pypy.org Fri Oct 16 19:56:57 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 19:56:57 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Tweak the tests Message-ID: <20151016175657.6535F1C1230@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80290:b67cad237592 Date: 2015-10-16 19:57 +0200 http://bitbucket.org/pypy/pypy/changeset/b67cad237592/ Log: Tweak the tests diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py --- a/rpython/memory/gc/test/test_rawrefcount.py +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -39,10 +39,20 @@ self.gc.rawrefcount_create_link_pypy(p1ref, r1addr) assert r1.ob_refcnt == rc assert r1.ob_pypy_link != 0 - return p1, p1ref, r1, r1addr + + def check_alive(extra_refcount): + assert r1.ob_refcnt == rc + extra_refcount + assert r1.ob_pypy_link != 0 + p1ref = self.gc.rawrefcount_to_obj(r1addr) + p1 = lltype.cast_opaque_ptr(lltype.Ptr(S), p1ref) + assert p1.x == 42 + assert self.gc.rawrefcount_from_obj(p1ref) == r1addr + return p1 + return p1, p1ref, r1, r1addr, check_alive def test_rawrefcount_objects_basic(self): - p1, p1ref, r1, r1addr = self._rawrefcount_pair(42, is_direct=True) + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_direct=True)) p2 = self.malloc(S) p2.x = 84 p2ref = lltype.cast_opaque_ptr(llmemory.GCREF, p2) @@ -62,13 +72,8 @@ lltype.free(r2, flavor='raw') def test_rawrefcount_objects_collection_survives_from_raw(self): - def check_alive(extra_refcount): - assert r1.ob_refcnt == REFCNT_FROM_PYPY_DIRECT + extra_refcount - assert r1.ob_pypy_link != 0 - p1ref = self.gc.rawrefcount_to_obj(r1addr) - assert lltype.cast_opaque_ptr(lltype.Ptr(S), p1ref).x == 42 - assert self.gc.rawrefcount_from_obj(p1ref) == r1addr - p1, p1ref, r1, r1addr = self._rawrefcount_pair(42, is_direct=True) + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_direct=True)) check_alive(0) r1.ob_refcnt += 1 self.gc.minor_collection() @@ -77,28 +82,26 @@ check_alive(+1) r1.ob_refcnt -= 1 self.gc.minor_collection() - check_alive(0) + p1 = check_alive(0) self.gc.collect() py.test.raises(RuntimeError, "r1.ob_refcnt") # dead + py.test.raises(RuntimeError, "p1.x") # dead self.gc.check_no_more_rawrefcount_state() def test_rawrefcount_objects_collection_survives_from_obj(self): - def check_alive(extra_refcount): - assert r1.ob_refcnt == REFCNT_FROM_PYPY_DIRECT + extra_refcount - assert r1.ob_pypy_link != 0 - p1ref = self.gc.rawrefcount_to_obj(r1addr) - assert lltype.cast_opaque_ptr(lltype.Ptr(S), p1ref).x == 42 - assert self.gc.rawrefcount_from_obj(p1ref) == r1addr - p1, p1ref, r1, r1addr = self._rawrefcount_pair(42, is_direct=True) + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_direct=True)) check_alive(0) self.stackroots.append(p1) self.gc.minor_collection() check_alive(0) self.gc.collect() check_alive(0) - self.stackroots.pop() + p1 = self.stackroots.pop() self.gc.minor_collection() check_alive(0) + assert p1.x == 42 self.gc.collect() py.test.raises(RuntimeError, "r1.ob_refcnt") # dead + py.test.raises(RuntimeError, "p1.x") # dead self.gc.check_no_more_rawrefcount_state() From noreply at buildbot.pypy.org Fri Oct 16 20:10:24 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 20:10:24 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: test create_link_pypy() with REFCNT_FROM_PYPY Message-ID: <20151016181024.E5DEB1C0165@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80291:97943215d17a Date: 2015-10-16 20:10 +0200 http://bitbucket.org/pypy/pypy/changeset/97943215d17a/ Log: test create_link_pypy() with REFCNT_FROM_PYPY diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2761,7 +2761,7 @@ def _pyobj(self, pyobjaddr): return llmemory.cast_adr_to_ptr(pyobjaddr, self.PYOBJ_HDR_PTR) - def rawrefcount_init(self): + def rawrefcount_init(self, dealloc_callback): # see pypy/doc/discussion/rawrefcount.rst if not self.rrc_enabled: self.rrc_p_list_young = self.AddressStack() @@ -2772,6 +2772,7 @@ p = lltype.malloc(self._ADDRARRAY, 1, flavor='raw', track_allocation=False) self.rrc_singleaddr = llmemory.cast_ptr_to_adr(p) + self.rrc_dealloc_callback = dealloc_callback self.rrc_enabled = True def check_no_more_rawrefcount_state(self): @@ -2854,19 +2855,19 @@ from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_DIRECT # rc = self._pyobj(pyobject).ob_refcnt - if rc == REFCNT_FROM_PYPY_DIRECT: + if rc >= REFCNT_FROM_PYPY_DIRECT: + ll_assert(rc == REFCNT_FROM_PYPY_DIRECT, + "cpyext: rrc_trace() should have marked the pypy obj alive") lltype.free(self._pyobj(pyobject), flavor='raw') else: + ll_assert(rc >= REFCNT_FROM_PYPY, "refcount underflow?") ll_assert(rc < int(REFCNT_FROM_PYPY_DIRECT * 0.99), "refcount underflow from REFCNT_FROM_PYPY_DIRECT?") - if rc > REFCNT_FROM_PYPY_DIRECT: - rc -= REFCNT_FROM_PYPY_DIRECT - else: - ll_assert(rc >= REFCNT_FROM_PYPY, "refcount underflow?") - rc -= REFCNT_FROM_PYPY - if rc == 0: - xxx # _Py_Dealloc(pyobject) + rc -= REFCNT_FROM_PYPY self._pyobj(pyobject).ob_refcnt = rc + self._pyobj(pyobject).ob_pypy_link = 0 + if rc == 0: + self.rrc_dealloc_callback(pyobject) _rrc_free._always_inline_ = True def rrc_major_collection_trace(self): diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py --- a/rpython/memory/gc/test/test_rawrefcount.py +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -31,7 +31,8 @@ r1.ob_refcnt = rc r1.ob_pypy_link = 0 r1addr = llmemory.cast_ptr_to_adr(r1) - self.gc.rawrefcount_init() + self.dealloc = [] + self.gc.rawrefcount_init(self.dealloc.append) if is_pyobj: assert not is_direct self.gc.rawrefcount_create_link_pyobj(p1ref, r1addr) @@ -87,6 +88,7 @@ py.test.raises(RuntimeError, "r1.ob_refcnt") # dead py.test.raises(RuntimeError, "p1.x") # dead self.gc.check_no_more_rawrefcount_state() + assert self.dealloc == [] def test_rawrefcount_objects_collection_survives_from_obj(self): p1, p1ref, r1, r1addr, check_alive = ( @@ -105,3 +107,47 @@ py.test.raises(RuntimeError, "r1.ob_refcnt") # dead py.test.raises(RuntimeError, "p1.x") # dead self.gc.check_no_more_rawrefcount_state() + assert self.dealloc == [] + + def test_pypy_nondirect_survives_from_raw(self): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_direct=False)) + check_alive(0) + r1.ob_refcnt += 1 + self.gc.minor_collection() + check_alive(+1) + self.gc.collect() + check_alive(+1) + r1.ob_refcnt -= 1 + self.gc.minor_collection() + p1 = check_alive(0) + assert self.dealloc == [] + self.gc.collect() + py.test.raises(RuntimeError, "p1.x") # dead + assert r1.ob_refcnt == 0 + assert r1.ob_pypy_link == 0 + assert self.dealloc == [r1addr] + self.gc.check_no_more_rawrefcount_state() + lltype.free(r1, flavor='raw') + + def test_pypy_nondirect_survives_from_obj(self): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_direct=False)) + check_alive(0) + self.stackroots.append(p1) + self.gc.minor_collection() + check_alive(0) + self.gc.collect() + check_alive(0) + p1 = self.stackroots.pop() + self.gc.minor_collection() + check_alive(0) + assert p1.x == 42 + assert self.dealloc == [] + self.gc.collect() + py.test.raises(RuntimeError, "p1.x") # dead + assert r1.ob_refcnt == 0 + assert r1.ob_pypy_link == 0 + assert self.dealloc == [r1addr] + self.gc.check_no_more_rawrefcount_state() + lltype.free(r1, flavor='raw') From noreply at buildbot.pypy.org Fri Oct 16 20:17:40 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 20:17:40 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Extra tests for the cases where the object pair dies before the first Message-ID: <20151016181740.73C281C0165@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80292:c82b950212fd Date: 2015-10-16 20:17 +0200 http://bitbucket.org/pypy/pypy/changeset/c82b950212fd/ Log: Extra tests for the cases where the object pair dies before the first minor collection. The rules are that create_link_pypy() always frees the PyPy object in this case; if tp_dealloc is called later, it will be on a detached PyObject. diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py --- a/rpython/memory/gc/test/test_rawrefcount.py +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -90,6 +90,16 @@ self.gc.check_no_more_rawrefcount_state() assert self.dealloc == [] + def test_rawrefcount_dies_quickly(self): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_direct=True)) + check_alive(0) + self.gc.minor_collection() + py.test.raises(RuntimeError, "r1.ob_refcnt") # dead + py.test.raises(RuntimeError, "p1.x") # dead + self.gc.check_no_more_rawrefcount_state() + assert self.dealloc == [] + def test_rawrefcount_objects_collection_survives_from_obj(self): p1, p1ref, r1, r1addr, check_alive = ( self._rawrefcount_pair(42, is_direct=True)) @@ -151,3 +161,15 @@ assert self.dealloc == [r1addr] self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') + + def test_pypy_nondirect_dies_quickly(self): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_direct=False)) + check_alive(0) + self.gc.minor_collection() + py.test.raises(RuntimeError, "p1.x") # dead + assert r1.ob_refcnt == 0 + assert r1.ob_pypy_link == 0 + assert self.dealloc == [r1addr] + self.gc.check_no_more_rawrefcount_state() + lltype.free(r1, flavor='raw') From noreply at buildbot.pypy.org Fri Oct 16 20:38:15 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 20:38:15 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: the O list Message-ID: <20151016183815.3A96B1C1230@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80293:b1e55d190a36 Date: 2015-10-16 20:38 +0200 http://bitbucket.org/pypy/pypy/changeset/b1e55d190a36/ Log: the O list diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2793,6 +2793,17 @@ self._pyobj(pyobject).ob_pypy_link = objint self.rrc_p_dict.setitem(obj, pyobject) + def rawrefcount_create_link_pyobj(self, gcobj, pyobject): + ll_assert(self.rrc_enabled, "rawrefcount.init not called") + obj = llmemory.cast_ptr_to_adr(gcobj) + if self.is_young_object(obj): + self.rrc_o_list_young.append(pyobject) + else: + self.rrc_o_list_old.append(pyobject) + objint = llmemory.cast_adr_to_int(obj, mode="symbolic") + self._pyobj(pyobject).ob_pypy_link = objint + # there is no rrc_o_dict + def rawrefcount_from_obj(self, gcobj): obj = llmemory.cast_ptr_to_adr(gcobj) return self.rrc_p_dict.get(obj) @@ -2822,9 +2833,15 @@ def rrc_minor_collection_free(self): lst = self.rrc_p_list_young while lst.non_empty(): - self._rrc_minor_free(lst.pop(), self.rrc_p_list_old) + self._rrc_minor_free(lst.pop(), self.rrc_p_list_old, + self.rrc_p_dict) + lst = self.rrc_o_list_young + no_o_dict = self.null_address_dict() + while lst.non_empty(): + self._rrc_minor_free(lst.pop(), self.rrc_o_list_old, + no_o_dict) - def _rrc_minor_free(self, pyobject, surviving_list): + def _rrc_minor_free(self, pyobject, surviving_list, surviving_dict): intobj = self._pyobj(pyobject).ob_pypy_link obj = llmemory.cast_int_to_adr(intobj) if self.is_in_nursery(obj): @@ -2833,7 +2850,8 @@ obj = self.get_forwarding_address(obj) intobj = llmemory.cast_adr_to_int(obj, mode="symbolic") self._pyobj(pyobject).ob_pypy_link = intobj - self.rrc_p_dict.setitem(obj, pyobject) + if surviving_dict: + surviving_dict.setitem(obj, pyobject) surviving = True else: surviving = False @@ -2901,6 +2919,14 @@ if new_p_dict: self.rrc_p_dict.delete() self.rrc_p_dict = new_p_dict + # + new_o_list = self.AddressStack() + no_o_dict = self.null_address_dict() + while self.rrc_o_list_old.non_empty(): + self._rrc_major_free(self.rrc_o_list_old.pop(), new_o_list, + no_o_dict) + self.rrc_o_list_old.delete() + self.rrc_o_list_old = new_o_list def _rrc_major_free(self, pyobject, surviving_list, surviving_dict): intobj = self._pyobj(pyobject).ob_pypy_link diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py --- a/rpython/memory/gc/test/test_rawrefcount.py +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -18,7 +18,8 @@ class TestRawRefCount(BaseDirectGCTest): GCClass = IncrementalMiniMarkGC - def _rawrefcount_pair(self, intval, is_direct=False, is_pyobj=False): + def _rawrefcount_pair(self, intval, is_direct=False, is_pyobj=False, + create_old=False): if is_direct: rc = REFCNT_FROM_PYPY_DIRECT else: @@ -26,6 +27,10 @@ # p1 = self.malloc(S) p1.x = intval + if create_old: + self.stackroots.append(p1) + self.gc.minor_collection() + p1 = self.stackroots.pop() p1ref = lltype.cast_opaque_ptr(llmemory.GCREF, p1) r1 = lltype.malloc(PYOBJ_HDR, flavor='raw') r1.ob_refcnt = rc @@ -46,14 +51,21 @@ assert r1.ob_pypy_link != 0 p1ref = self.gc.rawrefcount_to_obj(r1addr) p1 = lltype.cast_opaque_ptr(lltype.Ptr(S), p1ref) - assert p1.x == 42 - assert self.gc.rawrefcount_from_obj(p1ref) == r1addr + assert p1.x == intval + if not is_pyobj: + assert self.gc.rawrefcount_from_obj(p1ref) == r1addr + else: + # NB. this is not strictly always the case, because of + # dead entries remaining behind; but for these simple + # tests it is useful to detect unexpected rrc_p_dict + # entries + assert self.gc.rawrefcount_from_obj(p1ref) == llmemory.NULL return p1 return p1, p1ref, r1, r1addr, check_alive - def test_rawrefcount_objects_basic(self): + def test_rawrefcount_objects_basic(self, old=False): p1, p1ref, r1, r1addr, check_alive = ( - self._rawrefcount_pair(42, is_direct=True)) + self._rawrefcount_pair(42, is_direct=True, create_old=old)) p2 = self.malloc(S) p2.x = 84 p2ref = lltype.cast_opaque_ptr(llmemory.GCREF, p2) @@ -72,9 +84,9 @@ lltype.free(r1, flavor='raw') lltype.free(r2, flavor='raw') - def test_rawrefcount_objects_collection_survives_from_raw(self): + def test_rawrefcount_objects_collection_survives_from_raw(self, old=False): p1, p1ref, r1, r1addr, check_alive = ( - self._rawrefcount_pair(42, is_direct=True)) + self._rawrefcount_pair(42, is_direct=True, create_old=old)) check_alive(0) r1.ob_refcnt += 1 self.gc.minor_collection() @@ -90,19 +102,22 @@ self.gc.check_no_more_rawrefcount_state() assert self.dealloc == [] - def test_rawrefcount_dies_quickly(self): + def test_rawrefcount_dies_quickly(self, old=False): p1, p1ref, r1, r1addr, check_alive = ( - self._rawrefcount_pair(42, is_direct=True)) + self._rawrefcount_pair(42, is_direct=True, create_old=old)) check_alive(0) self.gc.minor_collection() + if old: + check_alive(0) + self.gc.collect() py.test.raises(RuntimeError, "r1.ob_refcnt") # dead py.test.raises(RuntimeError, "p1.x") # dead self.gc.check_no_more_rawrefcount_state() assert self.dealloc == [] - def test_rawrefcount_objects_collection_survives_from_obj(self): + def test_rawrefcount_objects_collection_survives_from_obj(self, old=False): p1, p1ref, r1, r1addr, check_alive = ( - self._rawrefcount_pair(42, is_direct=True)) + self._rawrefcount_pair(42, is_direct=True, create_old=old)) check_alive(0) self.stackroots.append(p1) self.gc.minor_collection() @@ -119,9 +134,18 @@ self.gc.check_no_more_rawrefcount_state() assert self.dealloc == [] - def test_pypy_nondirect_survives_from_raw(self): + def test_rawrefcount_objects_basic_old(self): + self.test_rawrefcount_objects_basic(old=True) + def test_rawrefcount_objects_collection_survives_from_raw_old(self): + self.test_rawrefcount_objects_collection_survives_from_raw(old=True) + def test_rawrefcount_dies_quickly_old(self): + self.test_rawrefcount_dies_quickly(old=True) + def test_rawrefcount_objects_collection_survives_from_obj_old(self): + self.test_rawrefcount_objects_collection_survives_from_obj(old=True) + + def test_pypy_nondirect_survives_from_raw(self, old=False): p1, p1ref, r1, r1addr, check_alive = ( - self._rawrefcount_pair(42, is_direct=False)) + self._rawrefcount_pair(42, is_direct=False, create_old=old)) check_alive(0) r1.ob_refcnt += 1 self.gc.minor_collection() @@ -140,9 +164,9 @@ self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') - def test_pypy_nondirect_survives_from_obj(self): + def test_pypy_nondirect_survives_from_obj(self, old=False): p1, p1ref, r1, r1addr, check_alive = ( - self._rawrefcount_pair(42, is_direct=False)) + self._rawrefcount_pair(42, is_direct=False, create_old=old)) check_alive(0) self.stackroots.append(p1) self.gc.minor_collection() @@ -162,14 +186,77 @@ self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') - def test_pypy_nondirect_dies_quickly(self): + def test_pypy_nondirect_dies_quickly(self, old=False): p1, p1ref, r1, r1addr, check_alive = ( - self._rawrefcount_pair(42, is_direct=False)) + self._rawrefcount_pair(42, is_direct=False, create_old=old)) check_alive(0) self.gc.minor_collection() + if old: + check_alive(0) + self.gc.collect() py.test.raises(RuntimeError, "p1.x") # dead assert r1.ob_refcnt == 0 assert r1.ob_pypy_link == 0 assert self.dealloc == [r1addr] self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') + + def test_pypy_nondirect_survives_from_raw_old(self): + self.test_pypy_nondirect_survives_from_raw(old=True) + def test_pypy_nondirect_survives_from_obj_old(self): + self.test_pypy_nondirect_survives_from_obj(old=True) + def test_pypy_nondirect_dies_quickly_old(self): + self.test_pypy_nondirect_dies_quickly(old=True) + + def test_pyobject_pypy_link_dies_on_minor_collection(self): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_pyobj=True)) + check_alive(0) + r1.ob_refcnt += 1 # the pyobject is kept alive + self.gc.minor_collection() + assert r1.ob_refcnt == 1 # refcnt dropped to 1 + assert r1.ob_pypy_link == 0 # detached + assert self.dealloc == [] + self.gc.check_no_more_rawrefcount_state() + lltype.free(r1, flavor='raw') + + def test_pyobject_dies(self, old=False): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_pyobj=True, create_old=old)) + check_alive(0) + self.gc.minor_collection() + if old: + check_alive(0) + self.gc.collect() + assert r1.ob_refcnt == 0 # refcnt dropped to 0 + assert r1.ob_pypy_link == 0 # detached + assert self.dealloc == [r1addr] + self.gc.check_no_more_rawrefcount_state() + lltype.free(r1, flavor='raw') + + def test_pyobject_survives_from_obj(self, old=False): + p1, p1ref, r1, r1addr, check_alive = ( + self._rawrefcount_pair(42, is_pyobj=True, create_old=old)) + check_alive(0) + self.stackroots.append(p1) + self.gc.minor_collection() + check_alive(0) + self.gc.collect() + check_alive(0) + p1 = self.stackroots.pop() + self.gc.minor_collection() + check_alive(0) + assert p1.x == 42 + assert self.dealloc == [] + self.gc.collect() + py.test.raises(RuntimeError, "p1.x") # dead + assert r1.ob_refcnt == 0 + assert r1.ob_pypy_link == 0 + assert self.dealloc == [r1addr] + self.gc.check_no_more_rawrefcount_state() + lltype.free(r1, flavor='raw') + + def test_pyobject_dies_old(self): + self.test_pyobject_dies(old=True) + def test_pyobject_survives_from_obj_old(self): + self.test_pyobject_survives_from_obj(old=True) From noreply at buildbot.pypy.org Fri Oct 16 20:41:36 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 20:41:36 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20151016184136.50E621C1230@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r642:30301c9e7633 Date: 2015-10-16 20:42 +0200 http://bitbucket.org/pypy/pypy.org/changeset/30301c9e7633/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $60690 of $105000 (57.8%) + $60704 of $105000 (57.8%)
    @@ -23,7 +23,7 @@
  • From noreply at buildbot.pypy.org Fri Oct 16 21:03:18 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 21:03:18 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: comment Message-ID: <20151016190318.0CBA41C12DC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80294:a0aca291e040 Date: 2015-10-16 20:46 +0200 http://bitbucket.org/pypy/pypy/changeset/a0aca291e040/ Log: comment diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2859,6 +2859,7 @@ self.young_rawmalloced_objects.contains(pointing_to)): # young weakref to a young raw-malloced object if self.header(pointing_to).tid & GCFLAG_VISITED_RMY: + # surviving_dict is already up-to-date: the key doesn't move surviving = True # survives, but does not move else: surviving = False From noreply at buildbot.pypy.org Fri Oct 16 21:03:20 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 21:03:20 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Need more care in handling rrc_p_dict to make sure Message-ID: <20151016190320.548951C12DC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80295:e7d59747605b Date: 2015-10-16 21:03 +0200 http://bitbucket.org/pypy/pypy/changeset/e7d59747605b/ Log: Need more care in handling rrc_p_dict to make sure rawrefcount_from_obj() really returns NULL on unlinked objects. Use NULL-valued entries. diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2781,6 +2781,9 @@ assert self.rrc_p_list_old .length() == 0 assert self.rrc_o_list_young.length() == 0 assert self.rrc_o_list_old .length() == 0 + def check_value_is_null(key, value, ignore): + assert value == llmemory.NULL + self.rrc_p_dict.foreach(check_value_is_null, None) def rawrefcount_create_link_pypy(self, gcobj, pyobject): ll_assert(self.rrc_enabled, "rawrefcount.init not called") @@ -2844,14 +2847,14 @@ def _rrc_minor_free(self, pyobject, surviving_list, surviving_dict): intobj = self._pyobj(pyobject).ob_pypy_link obj = llmemory.cast_int_to_adr(intobj) + if surviving_dict: + surviving_dict.setitem(obj, llmemory.NULL) if self.is_in_nursery(obj): if self.is_forwarded(obj): # Common case: survives and moves obj = self.get_forwarding_address(obj) intobj = llmemory.cast_adr_to_int(obj, mode="symbolic") self._pyobj(pyobject).ob_pypy_link = intobj - if surviving_dict: - surviving_dict.setitem(obj, pyobject) surviving = True else: surviving = False @@ -2859,13 +2862,14 @@ self.young_rawmalloced_objects.contains(pointing_to)): # young weakref to a young raw-malloced object if self.header(pointing_to).tid & GCFLAG_VISITED_RMY: - # surviving_dict is already up-to-date: the key doesn't move surviving = True # survives, but does not move else: surviving = False # if surviving: surviving_list.append(pyobject) + if surviving_dict: + surviving_dict.setitem(obj, pyobject) else: self._rrc_free(pyobject) @@ -2907,19 +2911,15 @@ self.visit_all_objects() def rrc_major_collection_free(self): + length_estimate = self.rrc_p_dict.length() + self.rrc_p_dict.delete() + self.rrc_p_dict = new_p_dict = self.AddressDict(length_estimate) new_p_list = self.AddressStack() - if self.rrc_p_dict.length() > self.rrc_p_list_old.length() * 2 + 30: - new_p_dict = self.AddressDict() - else: - new_p_dict = self.null_address_dict() while self.rrc_p_list_old.non_empty(): self._rrc_major_free(self.rrc_p_list_old.pop(), new_p_list, new_p_dict) self.rrc_p_list_old.delete() self.rrc_p_list_old = new_p_list - if new_p_dict: - self.rrc_p_dict.delete() - self.rrc_p_dict = new_p_dict # new_o_list = self.AddressStack() no_o_dict = self.null_address_dict() @@ -2935,6 +2935,6 @@ if self.header(obj).tid & GCFLAG_VISITED: surviving_list.append(pyobject) if surviving_dict: - surviving_dict.setitem(obj, pyobject) + surviving_dict.insertclean(obj, pyobject) else: self._rrc_free(pyobject) diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py --- a/rpython/memory/gc/test/test_rawrefcount.py +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -55,10 +55,6 @@ if not is_pyobj: assert self.gc.rawrefcount_from_obj(p1ref) == r1addr else: - # NB. this is not strictly always the case, because of - # dead entries remaining behind; but for these simple - # tests it is useful to detect unexpected rrc_p_dict - # entries assert self.gc.rawrefcount_from_obj(p1ref) == llmemory.NULL return p1 return p1, p1ref, r1, r1addr, check_alive From noreply at buildbot.pypy.org Fri Oct 16 21:17:23 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 21:17:23 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: in-progress Message-ID: <20151016191723.CCA3F1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80296:759b503b97b9 Date: 2015-10-16 21:17 +0200 http://bitbucket.org/pypy/pypy/changeset/759b503b97b9/ Log: in-progress diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -18,13 +18,14 @@ return res -def init(): +def init(dealloc_callback=None): "NOT_RPYTHON: set up rawrefcount with the GC" - global _p_list, _o_list, _adr2pypy, _pypy2ob + global _p_list, _o_list, _adr2pypy, _pypy2ob, _dealloc_callback _p_list = [] _o_list = [] _adr2pypy = [None] _pypy2ob = {} + _dealloc_callback = dealloc_callback def create_link_pypy(p, ob): "NOT_RPYTHON: a link where the PyPy object contains some or all the data" @@ -61,8 +62,8 @@ def _collect(): """NOT_RPYTHON: for tests only. Emulates a GC collection. - Returns the list of ob's whose _Py_Dealloc() should be called, - from the O list. + Will invoke dealloc_callback() for all objects whose _Py_Dealloc() + should be called. """ def detach(ob, wr_list): assert ob.c_ob_refcnt >= REFCNT_FROM_PYPY @@ -105,17 +106,18 @@ return p else: ob.c_ob_pypy_link = 0 - if ob.c_ob_refcnt == REFCNT_FROM_PYPY_DIRECT: - pass # freed - elif ob.c_ob_refcnt > REFCNT_FROM_PYPY_DIRECT: - ob.c_ob_refcnt -= REFCNT_FROM_PYPY_DIRECT + if ob.c_ob_refcnt >= REFCNT_FROM_PYPY_DIRECT: + assert ob.c_ob_refcnt == REFCNT_FROM_PYPY_DIRECT + lltype.free(ob, flavor='raw') else: + assert ob.c_ob_refcnt >= REFCNT_FROM_PYPY + assert ob.c_ob_refcnt < int(REFCNT_FROM_PYPY_DIRECT * 0.99) ob.c_ob_refcnt -= REFCNT_FROM_PYPY + ob.c_ob_pypy_link = 0 if ob.c_ob_refcnt == 0: - dealloc.append(ob) + _dealloc_callback(ob) return None - dealloc = [] _p_list = new_p_list for ob, wr in wr_p_list: p = attach(ob, wr, _p_list) @@ -124,19 +126,20 @@ _o_list = [] for ob, wr in wr_o_list: attach(ob, wr, _o_list) - return dealloc # ____________________________________________________________ class Entry(ExtRegistryEntry): _about_ = init - def compute_result_annotation(self): - pass + def compute_result_annotation(self, s_dealloc_callback): + from rpython.rtyper.llannotation import SomePtr + assert isinstance(s_dealloc_callback, SomePtr) # ll-ptr-to-function def specialize_call(self, hop): hop.exception_cannot_occur() - hop.genop('gc_rawrefcount_init', []) + [v_dealloc_callback] = hop.inputargs(hop.args_r[0].lowleveltype) + hop.genop('gc_rawrefcount_init', [v_dealloc_callback]) class Entry(ExtRegistryEntry): _about_ = (create_link_pypy, create_link_pyobj) diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py --- a/rpython/rlib/test/test_rawrefcount.py +++ b/rpython/rlib/test/test_rawrefcount.py @@ -20,8 +20,7 @@ def test_create_link_pypy(self): p = W_Root(42) - ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, - track_allocation=False) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) assert rawrefcount.from_obj(PyObject, p) == lltype.nullptr(PyObjectS) assert rawrefcount.to_obj(W_Root, ob) == None rawrefcount.create_link_pypy(p, ob) @@ -29,11 +28,11 @@ ob.c_ob_refcnt += REFCNT_FROM_PYPY_DIRECT assert rawrefcount.from_obj(PyObject, p) == ob assert rawrefcount.to_obj(W_Root, ob) == p + lltype.free(ob, flavor='raw') def test_create_link_pyobj(self): p = W_Root(42) - ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, - track_allocation=False) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) assert rawrefcount.from_obj(PyObject, p) == lltype.nullptr(PyObjectS) assert rawrefcount.to_obj(W_Root, ob) == None rawrefcount.create_link_pyobj(p, ob) @@ -41,11 +40,11 @@ ob.c_ob_refcnt += REFCNT_FROM_PYPY assert rawrefcount.from_obj(PyObject, p) == lltype.nullptr(PyObjectS) assert rawrefcount.to_obj(W_Root, ob) == p + lltype.free(ob, flavor='raw') def test_collect_p_dies(self): p = W_Root(42) - ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, - track_allocation=False) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) rawrefcount.create_link_pypy(p, ob) ob.c_ob_refcnt += REFCNT_FROM_PYPY_DIRECT assert rawrefcount._p_list == [ob] @@ -59,8 +58,7 @@ def test_collect_p_keepalive_pyobject(self): p = W_Root(42) - ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, - track_allocation=False) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) rawrefcount.create_link_pypy(p, ob) ob.c_ob_refcnt += REFCNT_FROM_PYPY_DIRECT assert rawrefcount._p_list == [ob] @@ -75,11 +73,11 @@ assert rawrefcount._p_list == [ob] assert rawrefcount.to_obj(W_Root, ob) == p assert rawrefcount.from_obj(PyObject, p) == ob + lltype.free(ob, flavor='raw') def test_collect_p_keepalive_w_root(self): p = W_Root(42) - ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, - track_allocation=False) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) rawrefcount.create_link_pypy(p, ob) ob.c_ob_refcnt += REFCNT_FROM_PYPY_DIRECT assert rawrefcount._p_list == [ob] @@ -91,28 +89,31 @@ assert rawrefcount._p_list == [ob] assert rawrefcount.to_obj(W_Root, ob) == p assert rawrefcount.from_obj(PyObject, p) == ob + lltype.free(ob, flavor='raw') def test_collect_o_dies(self): + dealloc = []; rawrefcount.init(dealloc.append) p = W_Root(42) - ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, - track_allocation=False) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) rawrefcount.create_link_pyobj(p, ob) ob.c_ob_refcnt += REFCNT_FROM_PYPY assert rawrefcount._o_list == [ob] wr_ob = weakref.ref(ob) wr_p = weakref.ref(p) del ob, p - dealloc = rawrefcount._collect() + rawrefcount._collect() ob = wr_ob() assert ob is not None assert dealloc == [ob] assert rawrefcount._o_list == [] assert wr_p() is None + assert ob.c_ob_refcnt == 0 + assert ob.c_ob_pypy_link == 0 + lltype.free(ob, flavor='raw') def test_collect_o_keepalive_pyobject(self): p = W_Root(42) - ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, - track_allocation=False) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) p.pyobj = ob rawrefcount.create_link_pyobj(p, ob) ob.c_ob_refcnt += REFCNT_FROM_PYPY @@ -121,53 +122,54 @@ wr_p = weakref.ref(p) ob.c_ob_refcnt += 1 # <= del p - dealloc = rawrefcount._collect() - assert dealloc == [] + rawrefcount._collect() p = wr_p() assert p is None # was unlinked assert ob.c_ob_refcnt == 1 # != REFCNT_FROM_PYPY_OBJECT + 1 assert rawrefcount._o_list == [] assert rawrefcount.to_obj(W_Root, ob) == None + lltype.free(ob, flavor='raw') def test_collect_o_keepalive_w_root(self): p = W_Root(42) - ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, - track_allocation=False) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) p.pyobj = ob rawrefcount.create_link_pyobj(p, ob) ob.c_ob_refcnt += REFCNT_FROM_PYPY assert rawrefcount._o_list == [ob] wr_ob = weakref.ref(ob) del ob # p remains - dealloc = rawrefcount._collect() - assert dealloc == [] + rawrefcount._collect() ob = wr_ob() assert ob is not None assert rawrefcount._o_list == [ob] assert rawrefcount.to_obj(W_Root, ob) == p assert p.pyobj == ob + lltype.free(ob, flavor='raw') def test_collect_s_dies(self): + dealloc = []; rawrefcount.init(dealloc.append) p = W_Root(42) - ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, - track_allocation=False) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) rawrefcount.create_link_pypy(p, ob) ob.c_ob_refcnt += REFCNT_FROM_PYPY assert rawrefcount._p_list == [ob] wr_ob = weakref.ref(ob) wr_p = weakref.ref(p) del ob, p - dealloc = rawrefcount._collect() + rawrefcount._collect() ob = wr_ob() assert ob is not None assert dealloc == [ob] assert rawrefcount._p_list == [] assert wr_p() is None + assert ob.c_ob_refcnt == 0 + assert ob.c_ob_pypy_link == 0 + lltype.free(ob, flavor='raw') def test_collect_s_keepalive_pyobject(self): p = W_Root(42) - ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, - track_allocation=False) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) p.pyobj = ob rawrefcount.create_link_pypy(p, ob) ob.c_ob_refcnt += REFCNT_FROM_PYPY @@ -182,20 +184,20 @@ assert ob is not None and p is not None assert rawrefcount._p_list == [ob] assert rawrefcount.to_obj(W_Root, ob) == p + lltype.free(ob, flavor='raw') def test_collect_s_keepalive_w_root(self): p = W_Root(42) - ob = lltype.malloc(PyObjectS, flavor='raw', zero=True, - track_allocation=False) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) p.pyobj = ob rawrefcount.create_link_pypy(p, ob) ob.c_ob_refcnt += REFCNT_FROM_PYPY assert rawrefcount._p_list == [ob] wr_ob = weakref.ref(ob) del ob # p remains - dealloc = rawrefcount._collect() - assert dealloc == [] + rawrefcount._collect() ob = wr_ob() assert ob is not None assert rawrefcount._p_list == [ob] assert rawrefcount.to_obj(W_Root, ob) == p + lltype.free(ob, flavor='raw') From noreply at buildbot.pypy.org Fri Oct 16 22:04:34 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 22:04:34 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Translates (and segfault for now) Message-ID: <20151016200434.3C38E1C12D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80297:14ff1bcf152c Date: 2015-10-16 22:04 +0200 http://bitbucket.org/pypy/pypy/changeset/14ff1bcf152c/ Log: Translates (and segfault for now) diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2757,6 +2757,8 @@ ('ob_refcnt', lltype.Signed), ('ob_pypy_link', lltype.Signed)) PYOBJ_HDR_PTR = lltype.Ptr(PYOBJ_HDR) + RAWREFCOUNT_DEALLOC = lltype.Ptr(lltype.FuncType([llmemory.Address], + lltype.Void)) def _pyobj(self, pyobjaddr): return llmemory.cast_adr_to_ptr(pyobjaddr, self.PYOBJ_HDR_PTR) @@ -2792,7 +2794,7 @@ self.rrc_p_list_young.append(pyobject) else: self.rrc_p_list_old.append(pyobject) - objint = llmemory.cast_adr_to_int(obj, mode="symbolic") + objint = llmemory.cast_adr_to_int(obj, "symbolic") self._pyobj(pyobject).ob_pypy_link = objint self.rrc_p_dict.setitem(obj, pyobject) @@ -2803,7 +2805,7 @@ self.rrc_o_list_young.append(pyobject) else: self.rrc_o_list_old.append(pyobject) - objint = llmemory.cast_adr_to_int(obj, mode="symbolic") + objint = llmemory.cast_adr_to_int(obj, "symbolic") self._pyobj(pyobject).ob_pypy_link = objint # there is no rrc_o_dict @@ -2853,18 +2855,21 @@ if self.is_forwarded(obj): # Common case: survives and moves obj = self.get_forwarding_address(obj) - intobj = llmemory.cast_adr_to_int(obj, mode="symbolic") + intobj = llmemory.cast_adr_to_int(obj, "symbolic") self._pyobj(pyobject).ob_pypy_link = intobj surviving = True else: surviving = False elif (bool(self.young_rawmalloced_objects) and - self.young_rawmalloced_objects.contains(pointing_to)): + self.young_rawmalloced_objects.contains(obj)): # young weakref to a young raw-malloced object - if self.header(pointing_to).tid & GCFLAG_VISITED_RMY: + if self.header(obj).tid & GCFLAG_VISITED_RMY: surviving = True # survives, but does not move else: surviving = False + else: + ll_assert(False, "rrc_X_list_young contains non-young obj") + return # if surviving: surviving_list.append(pyobject) diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -157,6 +157,7 @@ else: # for regular translation: pick the GC from the config GCClass, GC_PARAMS = choose_gc_from_config(translator.config) + self.GCClass = GCClass if hasattr(translator, '_jit2gc'): self.layoutbuilder = translator._jit2gc['layoutbuilder'] @@ -487,6 +488,26 @@ annmodel.SomeInteger(nonneg=True)], annmodel.s_None) + if hasattr(GCClass, 'rawrefcount_init'): + self.rawrefcount_init_ptr = getfn( + GCClass.rawrefcount_init, + [s_gc, SomePtr(GCClass.RAWREFCOUNT_DEALLOC)], + annmodel.s_None) + self.rawrefcount_create_link_pypy_ptr = getfn( + GCClass.rawrefcount_create_link_pypy, + [s_gc, s_gcref, SomeAddress()], + annmodel.s_None) + self.rawrefcount_create_link_pyobj_ptr = getfn( + GCClass.rawrefcount_create_link_pyobj, + [s_gc, s_gcref, SomeAddress()], + annmodel.s_None) + self.rawrefcount_from_obj_ptr = getfn( + GCClass.rawrefcount_from_obj, [s_gc, s_gcref], SomeAddress(), + inline = True) + self.rawrefcount_to_obj_ptr = getfn( + GCClass.rawrefcount_to_obj, [s_gc, SomeAddress()], s_gcref, + inline = True) + if GCClass.can_usually_pin_objects: self.pin_ptr = getfn(GCClass.pin, [s_gc, SomeAddress()], @@ -1232,6 +1253,44 @@ resultvar=hop.spaceop.result) self.pop_roots(hop, livevars) + def gct_gc_rawrefcount_init(self, hop): + [v_fnptr] = hop.spaceop.args + assert v_fnptr.concretetype == self.GCClass.RAWREFCOUNT_DEALLOC + hop.genop("direct_call", + [self.rawrefcount_init_ptr, self.c_const_gc, v_fnptr]) + + def gct_gc_rawrefcount_create_link_pypy(self, hop): + [v_gcobj, v_pyobject] = hop.spaceop.args + assert v_gcobj.concretetype == llmemory.GCREF + assert v_pyobject.concretetype == llmemory.Address + hop.genop("direct_call", + [self.rawrefcount_create_link_pypy_ptr, self.c_const_gc, + v_gcobj, v_pyobject]) + + def gct_gc_rawrefcount_create_link_pyobj(self, hop): + [v_gcobj, v_pyobject] = hop.spaceop.args + assert v_gcobj.concretetype == llmemory.GCREF + assert v_pyobject.concretetype == llmemory.Address + hop.genop("direct_call", + [self.rawrefcount_create_link_pyobj_ptr, self.c_const_gc, + v_gcobj, v_pyobject]) + + def gct_gc_rawrefcount_from_obj(self, hop): + [v_gcobj] = hop.spaceop.args + assert v_gcobj.concretetype == llmemory.GCREF + assert hop.spaceop.result.concretetype == llmemory.Address + hop.genop("direct_call", + [self.rawrefcount_from_obj_ptr, self.c_const_gc, v_gcobj], + resultvar=hop.spaceop.result) + + def gct_gc_rawrefcount_to_obj(self, hop): + [v_pyobject] = hop.spaceop.args + assert v_pyobject.concretetype == llmemory.Address + assert hop.spaceop.result.concretetype == llmemory.GCREF + hop.genop("direct_call", + [self.rawrefcount_to_obj_ptr, self.c_const_gc, v_pyobject], + resultvar=hop.spaceop.result) + def _set_into_gc_array_part(self, op): if op.opname == 'setarrayitem': return op.args[1] diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -11,6 +11,9 @@ REFCNT_FROM_PYPY = 80 REFCNT_FROM_PYPY_DIRECT = REFCNT_FROM_PYPY + (sys.maxint//2+1) +RAWREFCOUNT_DEALLOC = lltype.Ptr(lltype.FuncType([llmemory.Address], + lltype.Void)) + def _build_pypy_link(p): res = len(_adr2pypy) @@ -129,6 +132,28 @@ # ____________________________________________________________ + +def _unspec_p(hop, v_p): + assert isinstance(v_p.concretetype, lltype.Ptr) + assert v_p.concretetype.TO._gckind == 'gc' + return hop.genop('cast_opaque_ptr', [v_p], resulttype=llmemory.GCREF) + +def _unspec_ob(hop, v_ob): + assert isinstance(v_ob.concretetype, lltype.Ptr) + assert v_ob.concretetype.TO._gckind == 'raw' + return hop.genop('cast_ptr_to_adr', [v_ob], resulttype=llmemory.Address) + +def _spec_p(hop, v_p): + assert v_p.concretetype == llmemory.GCREF + return hop.genop('cast_opaque_ptr', [v_p], + resulttype=hop.r_result.lowleveltype) + +def _spec_ob(hop, v_ob): + assert v_ob.concretetype == llmemory.Address + return hop.genop('cast_adr_to_ptr', [v_ob], + resulttype=hop.r_result.lowleveltype) + + class Entry(ExtRegistryEntry): _about_ = init @@ -138,9 +163,10 @@ def specialize_call(self, hop): hop.exception_cannot_occur() - [v_dealloc_callback] = hop.inputargs(hop.args_r[0].lowleveltype) + [v_dealloc_callback] = hop.inputargs(hop.args_r[0]) hop.genop('gc_rawrefcount_init', [v_dealloc_callback]) + class Entry(ExtRegistryEntry): _about_ = (create_link_pypy, create_link_pyobj) @@ -152,8 +178,10 @@ name = 'gc_rawrefcount_create_link_pypy' elif self.instance is create_link_pyobj: name = 'gc_rawrefcount_create_link_pyobj' + v_p, v_ob = hop.inputargs(*hop.args_r) hop.exception_cannot_occur() - hop.genop(name, hop.args_v) + hop.genop(name, [_unspec_p(hop, v_p), _unspec_ob(hop, v_ob)]) + class Entry(ExtRegistryEntry): _about_ = from_obj @@ -168,9 +196,10 @@ def specialize_call(self, hop): hop.exception_cannot_occur() - [v_p] = hop.inputargs(hop.args_r[1].lowleveltype) - return hop.genop('gc_rawrefcount_from_obj', [v_p], - resulttype = hop.r_result.lowleveltype) + v_p = hop.inputarg(hop.args_r[1], arg=1) + v_ob = hop.genop('gc_rawrefcount_from_obj', [_unspec_p(hop, v_p)], + resulttype = llmemory.Address) + return _spec_ob(hop, v_ob) class Entry(ExtRegistryEntry): _about_ = to_obj @@ -185,6 +214,7 @@ def specialize_call(self, hop): hop.exception_cannot_occur() - v_ob = hop.inputargs(hop.args_r[1].lowleveltype) - return hop.genop('gc_rawrefcount_to_obj', [v_ob], - resulttype = hop.r_result.lowleveltype) + v_ob = hop.inputarg(hop.args_r[1], arg=1) + v_p = hop.genop('gc_rawrefcount_to_obj', [_unspec_ob(hop, v_ob)], + resulttype = llmemory.GCREF) + return _spec_p(hop, v_p) diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py --- a/rpython/rlib/test/test_rawrefcount.py +++ b/rpython/rlib/test/test_rawrefcount.py @@ -1,7 +1,11 @@ import weakref -from rpython.rlib import rawrefcount +from rpython.rlib import rawrefcount, objectmodel, rgc from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY, REFCNT_FROM_PYPY_DIRECT from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rtyper.annlowlevel import llhelper +from rpython.translator.c.test.test_standalone import StandaloneTests +from rpython.config.translationoption import get_combined_translation_config + class W_Root(object): def __init__(self, intval=0): @@ -201,3 +205,53 @@ assert rawrefcount._p_list == [ob] assert rawrefcount.to_obj(W_Root, ob) == p lltype.free(ob, flavor='raw') + + +class TestTranslated(StandaloneTests): + + def test_full_translation(self): + class State: + pass + state = State() + state.seen = [] + def dealloc_callback(ob): + state.seen.append(ob) + + def make_p(): + p = W_Root(42) + ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) + rawrefcount.create_link_pyobj(p, ob) + ob.c_ob_refcnt += REFCNT_FROM_PYPY + assert rawrefcount.from_obj(PyObject, p) == ob + assert rawrefcount.to_obj(W_Root, ob) == p + return ob, p + + FTYPE = rawrefcount.RAWREFCOUNT_DEALLOC + + def entry_point(argv): + ll_dealloc_callback = llhelper(FTYPE, dealloc_callback) + rawrefcount.init(ll_dealloc_callback) + ob, p = make_p() + if state.seen != []: + print "OB COLLECTED REALLY TOO SOON" + return 1 + rgc.collect() + if state.seen != []: + print "OB COLLECTED TOO SOON" + return 1 + objectmodel.keepalive_until_here(p) + p = None + rgc.collect() + if state.seen == [llmemory.cast_ptr_to_adr(ob)]: + print "OK!" + lltype.free(ob, flavor='raw') + return 0 + else: + print "OB NOT COLLECTED" + return 1 + + self.config = get_combined_translation_config(translating=True) + self.config.translation.gc = "incminimark" + t, cbuilder = self.compile(entry_point) + data = cbuilder.cmdexec('hi there') + assert data.startswith('OK!\n') From noreply at buildbot.pypy.org Fri Oct 16 23:15:14 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 23:15:14 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: fix test Message-ID: <20151016211514.75F831C12D8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80298:1b5c3358bb1a Date: 2015-10-16 23:15 +0200 http://bitbucket.org/pypy/pypy/changeset/1b5c3358bb1a/ Log: fix test diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py --- a/rpython/rlib/test/test_rawrefcount.py +++ b/rpython/rlib/test/test_rawrefcount.py @@ -220,7 +220,7 @@ def make_p(): p = W_Root(42) ob = lltype.malloc(PyObjectS, flavor='raw', zero=True) - rawrefcount.create_link_pyobj(p, ob) + rawrefcount.create_link_pypy(p, ob) ob.c_ob_refcnt += REFCNT_FROM_PYPY assert rawrefcount.from_obj(PyObject, p) == ob assert rawrefcount.to_obj(W_Root, ob) == p From noreply at buildbot.pypy.org Fri Oct 16 23:57:56 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 23:57:56 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: random progress Message-ID: <20151016215756.4C1801C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80299:78020d53f0c5 Date: 2015-10-16 23:43 +0200 http://bitbucket.org/pypy/pypy/changeset/78020d53f0c5/ Log: random progress diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -541,6 +541,12 @@ return False return hasattr(TYPE.TO, 'c_ob_refcnt') and hasattr(TYPE.TO, 'c_ob_type') +class BORROWED: pass + + at specialize.memo() +def is_BORROWED(TYPE): + return TYPE is BORROWED + # a pointer to PyObject PyObjectP = rffi.CArrayPtr(PyObject) @@ -1230,68 +1236,53 @@ # don't inline, as a hack to guarantee that no GC pointer is alive # anywhere in call_external_function + assert decref_args #ZZZ + @specialize.ll() def generic_cpy_call(space, func, *args): boxed_args = () - to_decref = [] + keepalives = () assert len(args) == len(FT.ARGS) for i, ARG in unrolling_arg_types: arg = args[i] if is_PyObject(ARG): - if arg is None: - boxed_args += (lltype.nullptr(PyObject.TO),) - elif isinstance(arg, W_Root): - ref = make_ref(space, arg) - boxed_args += (ref,) - if decref_args: - to_decref.append(ref) - else: - boxed_args += (arg,) - else: - boxed_args += (arg,) + if not is_pyobj(arg): + keepalives += (arg,) + arg = as_pyobj(arg) + boxed_args += (arg,) try: - # create a new container for borrowed references - state = space.fromcache(RefcountState) - old_container = state.swap_borrow_container(None) - try: - # Call the function - result = call_external_function(func, *boxed_args) - finally: - state.swap_borrow_container(old_container) + # Call the function + result = call_external_function(func, *boxed_args) + finally: + keepalive_until_here(*keepalives) - if is_PyObject(RESULT_TYPE): - if result is None: - ret = result - elif isinstance(result, W_Root): - ret = result - else: - ret = from_ref(space, result) - # The object reference returned from a C function - # that is called from Python must be an owned reference - # - ownership is transferred from the function to its caller. - if result: - Py_DecRef(space, result) + if is_PyObject(RESULT_TYPE): + if not is_pyobj(result): + ret = result + else: + ret = from_ref(space, result) + # The object reference returned from a C function + # that is called from Python must be an owned reference + # - ownership is transferred from the function to its caller. + if result: + result.c_ob_refcnt -= 1 - # Check for exception consistency - has_error = PyErr_Occurred(space) is not None - has_result = ret is not None - if has_error and has_result: - raise OperationError(space.w_SystemError, space.wrap( - "An exception was set, but function returned a value")) - elif not expect_null and not has_error and not has_result: - raise OperationError(space.w_SystemError, space.wrap( - "Function returned a NULL result without setting an exception")) + # Check for exception consistency + has_error = PyErr_Occurred(space) is not None + has_result = ret is not None + if has_error and has_result: + raise OperationError(space.w_SystemError, space.wrap( + "An exception was set, but function returned a value")) + elif not expect_null and not has_error and not has_result: + raise OperationError(space.w_SystemError, space.wrap( + "Function returned a NULL result without setting an exception")) - if has_error: - state = space.fromcache(State) - state.check_and_raise_exception() + if has_error: + state = space.fromcache(State) + state.check_and_raise_exception() - return ret - return result - finally: - if decref_args: - for ref in to_decref: - Py_DecRef(space, ref) + return ret + return result + return generic_cpy_call - diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -381,6 +381,7 @@ if DEBUG_REFCOUNT: debug_refcount("DECREF", obj, obj.c_ob_refcnt, frame_stackdepth=3) if obj.c_ob_refcnt == 0: + return #ZZZ state = space.fromcache(RefcountState) ptr = rffi.cast(ADDR, obj) if ptr not in state.py_objects_r2w: diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py --- a/pypy/module/cpyext/state.py +++ b/pypy/module/cpyext/state.py @@ -3,6 +3,7 @@ from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import lltype from rpython.rlib.rdynload import DLLHANDLE +from rpython.rlib import rawrefcount import sys class State: @@ -82,6 +83,7 @@ refcountstate = space.fromcache(RefcountState) refcountstate.init_r2w_from_w2r() + rawrefcount.init(lambda ob: ZZZ) for func in INIT_FUNCTIONS: func(space) self.check_and_raise_exception() diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -92,7 +92,7 @@ return str(pydname) def freeze_refcnts(self): - ZZZ + return #ZZZ state = self.space.fromcache(RefcountState) self.frozen_refcounts = {} for w_obj, obj in state.py_objects_w2r.iteritems(): @@ -128,6 +128,7 @@ state.reset_borrowed_references() def check_and_print_leaks(self): + return #ZZZ # check for sane refcnts import gc @@ -213,8 +214,8 @@ cls.space.getbuiltinmodule("cpyext") from pypy.module.imp.importing import importhook importhook(cls.space, "os") # warm up reference counts - state = cls.space.fromcache(RefcountState) - state.non_heaptypes_w[:] = [] + #state = cls.space.fromcache(RefcountState) ZZZ + #state.non_heaptypes_w[:] = [] def setup_method(self, func): @unwrap_spec(name=str) @@ -368,6 +369,7 @@ def teardown_method(self, func): for name in self.imported_module_names: self.unimport_module(name) + return #ZZZ self.cleanup_references(self.space) # XXX: find out how to disable check_and_print_leaks() if the # test failed... From noreply at buildbot.pypy.org Fri Oct 16 23:57:58 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 16 Oct 2015 23:57:58 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Tweak tweak but no correct object lifetime management here Message-ID: <20151016215758.7271F1C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80300:e01fa1dab750 Date: 2015-10-16 23:58 +0200 http://bitbucket.org/pypy/pypy/changeset/e01fa1dab750/ Log: Tweak tweak but no correct object lifetime management here diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -192,7 +192,7 @@ class ApiFunction: def __init__(self, argtypes, restype, callable, error=_NOT_SPECIFIED, - c_name=None, gil=None): + c_name=None, gil=None, return_borrowed=False): self.argtypes = argtypes self.restype = restype self.functype = lltype.Ptr(lltype.FuncType(argtypes, restype)) @@ -209,6 +209,7 @@ self.argnames = argnames[1:] assert len(self.argnames) == len(self.argtypes) self.gil = gil + self.return_borrowed = return_borrowed def _freeze_(self): return True @@ -245,6 +246,10 @@ - set `gil` to "acquire", "release" or "around" to acquire the GIL, release the GIL, or both """ + return_borrowed = restype is BORROW + if return_borrowed: + restype = PyObject + if isinstance(restype, lltype.Typedef): real_restype = restype.OF else: @@ -267,7 +272,8 @@ else: c_name = func_name api_function = ApiFunction(argtypes, restype, func, error, - c_name=c_name, gil=gil) + c_name=c_name, gil=gil, + return_borrowed=return_borrowed) func.api_func = api_function if external: @@ -541,7 +547,7 @@ return False return hasattr(TYPE.TO, 'c_ob_refcnt') and hasattr(TYPE.TO, 'c_ob_type') -class BORROWED: pass +class BORROW: pass @specialize.memo() def is_BORROWED(TYPE): @@ -611,11 +617,12 @@ gil_acquire = (gil == "acquire" or gil == "around") gil_release = (gil == "release" or gil == "around") assert gil is None or gil_acquire or gil_release + return_borrowed = callable.api_func.return_borrowed @specialize.ll() def wrapper(*args): from pypy.module.cpyext.pyobject import make_ref, from_ref - from pypy.module.cpyext.pyobject import Reference + from pypy.module.cpyext.pyobject import Reference, as_pyobj, is_pyobj # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer if gil_acquire: @@ -669,16 +676,12 @@ retval = error_value elif is_PyObject(callable.api_func.restype): - if result is None: - retval = rffi.cast(callable.api_func.restype, - make_ref(space, None)) - elif isinstance(result, Reference): - retval = result.get_ref(space) - elif not rffi._isllptr(result): - retval = rffi.cast(callable.api_func.restype, - make_ref(space, result)) + if is_pyobj(result): + retval = result else: - retval = result + retval = as_pyobj(result) + if not return_borrowed and retval: + retval.c_ob_refcnt += 1 elif callable.api_func.restype is not lltype.Void: retval = rffi.cast(callable.api_func.restype, result) except Exception, e: @@ -1211,7 +1214,7 @@ @specialize.memo() def make_generic_cpy_call(FT, decref_args, expect_null): from pypy.module.cpyext.pyobject import make_ref, from_ref, Py_DecRef - from pypy.module.cpyext.pyobject import RefcountState + from pypy.module.cpyext.pyobject import RefcountState, is_pyobj, as_pyobj from pypy.module.cpyext.pyerrors import PyErr_Occurred unrolling_arg_types = unrolling_iterable(enumerate(FT.ARGS)) RESULT_TYPE = FT.RESULT @@ -1261,7 +1264,7 @@ if not is_pyobj(result): ret = result else: - ret = from_ref(space, result) + ret = from_ref(result) # The object reference returned from a C function # that is called from Python must be an owned reference # - ownership is transferred from the function to its caller. diff --git a/pypy/module/cpyext/modsupport.py b/pypy/module/cpyext/modsupport.py --- a/pypy/module/cpyext/modsupport.py +++ b/pypy/module/cpyext/modsupport.py @@ -1,7 +1,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import cpython_api, cpython_struct, \ METH_STATIC, METH_CLASS, METH_COEXIST, CANNOT_FAIL, CONST_STRING -from pypy.module.cpyext.pyobject import PyObject, borrow_from +from pypy.module.cpyext.pyobject import PyObject, BORROW from pypy.interpreter.module import Module from pypy.module.cpyext.methodobject import ( W_PyCFunctionObject, PyCFunction_NewEx, PyDescr_NewMethod, @@ -34,7 +34,7 @@ # This is actually the Py_InitModule4 function, # renamed to refuse modules built against CPython headers. @cpython_api([CONST_STRING, lltype.Ptr(PyMethodDef), CONST_STRING, - PyObject, rffi.INT_real], PyObject) + PyObject, rffi.INT_real], BORROW) def _Py_InitPyPyModule(space, name, methods, doc, w_self, apiver): """ Create a new module object based on a name and table of functions, returning @@ -69,7 +69,7 @@ if doc: space.setattr(w_mod, space.wrap("__doc__"), space.wrap(rffi.charp2str(doc))) - return borrow_from(None, w_mod) + return w_mod def convert_method_defs(space, dict_w, methods, w_type, w_self=None, name=None): diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -3,7 +3,7 @@ from pypy.interpreter.baseobjspace import W_Root, SpaceCache from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from pypy.module.cpyext.api import ( - cpython_api, bootstrap_function, PyObject, PyObjectP, ADDR, + cpython_api, bootstrap_function, PyObject, PyObjectP, ADDR, BORROW, CANNOT_FAIL, Py_TPFLAGS_HEAPTYPE, PyTypeObjectPtr, is_PyObject) from pypy.module.cpyext.state import State from pypy.objspace.std.typeobject import W_TypeObject From noreply at buildbot.pypy.org Mon Oct 19 09:11:35 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Oct 2015 09:11:35 +0200 (CEST) Subject: [pypy-commit] pypy default: Marking these tests as xfail is wrong, because it doesn't always Message-ID: <20151019071135.D62301C1185@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80327:65422948292c Date: 2015-10-19 09:11 +0200 http://bitbucket.org/pypy/pypy/changeset/65422948292c/ Log: Marking these tests as xfail is wrong, because it doesn't always fail (and passes are reported as unexpected). Too bad, let's use AssertionError in the py.test.raises in this case diff --git a/rpython/rlib/test/test_rweakkeydict.py b/rpython/rlib/test/test_rweakkeydict.py --- a/rpython/rlib/test/test_rweakkeydict.py +++ b/rpython/rlib/test/test_rweakkeydict.py @@ -121,8 +121,6 @@ f(1) interpret(f, [1]) - at py.test.mark.xfail( - reason="may fail with AssertionError, depending on annotation order") def test_rpython_merge_RWeakKeyDictionary3(): def g(x): if x: @@ -131,11 +129,10 @@ d = RWeakKeyDictionary(KY, VX) d.set(KX(), VX()) - with py.test.raises(UnionError): + # may fail with AssertionError, depending on annotation order + with py.test.raises((UnionError, AssertionError)): interpret(g, [1]) - at py.test.mark.xfail( - reason="may fail with AssertionError, depending on annotation order") def test_rpython_merge_RWeakKeyDictionary4(): def g(x): if x: @@ -144,7 +141,8 @@ d = RWeakKeyDictionary(KX, VY) d.set(KX(), VX()) - with py.test.raises(UnionError): + # may fail with AssertionError, depending on annotation order + with py.test.raises((UnionError, AssertionError)): interpret(g, [1]) @py.test.mark.xfail(reason="not implemented, messy") From noreply at buildbot.pypy.org Mon Oct 19 09:32:01 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Oct 2015 09:32:01 +0200 (CEST) Subject: [pypy-commit] pypy default: Document branch Message-ID: <20151019073201.961671C1396@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80328:6862ec9894d0 Date: 2015-10-19 09:17 +0200 http://bitbucket.org/pypy/pypy/changeset/6862ec9894d0/ Log: Document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,4 +5,6 @@ .. this is a revision shortly after release-15.11.0 .. startrev: d924723d483b +.. branch: ppc-updated-backend +The PowerPC JIT backend is merged. From noreply at buildbot.pypy.org Mon Oct 19 09:32:03 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Oct 2015 09:32:03 +0200 (CEST) Subject: [pypy-commit] pypy default: fix in the llgraph backend: multiple redirections of compiled loops didn't work Message-ID: <20151019073203.B9D4B1C1396@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80329:6d8880c45110 Date: 2015-10-19 09:28 +0200 http://bitbucket.org/pypy/pypy/changeset/6d8880c45110/ Log: fix in the llgraph backend: multiple redirections of compiled loops didn't work diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -371,16 +371,16 @@ trace.invalid = True def redirect_call_assembler(self, oldlooptoken, newlooptoken): - oldtrace = oldlooptoken.compiled_loop_token._llgraph_loop - newtrace = newlooptoken.compiled_loop_token._llgraph_loop + oldc = oldlooptoken.compiled_loop_token + newc = newlooptoken.compiled_loop_token + oldtrace = oldc._llgraph_loop + newtrace = newc._llgraph_loop OLD = [box.type for box in oldtrace.inputargs] NEW = [box.type for box in newtrace.inputargs] assert OLD == NEW - assert not hasattr(oldlooptoken, '_llgraph_redirected') - oldlooptoken.compiled_loop_token._llgraph_redirected = True - oldlooptoken.compiled_loop_token._llgraph_loop = newtrace - alltraces = newlooptoken.compiled_loop_token._llgraph_alltraces - oldlooptoken.compiled_loop_token._llgraph_alltraces = alltraces + assert not hasattr(oldc, '_llgraph_redirected') + oldc._llgraph_redirected = newc + oldc._llgraph_alltraces = newc._llgraph_alltraces def free_loop_and_bridges(self, compiled_loop_token): for c in compiled_loop_token._llgraph_alltraces: @@ -393,7 +393,10 @@ return self._execute_token def _execute_token(self, loop_token, *args): - lltrace = loop_token.compiled_loop_token._llgraph_loop + loopc = loop_token.compiled_loop_token + while hasattr(loopc, '_llgraph_redirected'): + loopc = loopc._llgraph_redirected + lltrace = loopc._llgraph_loop frame = LLFrame(self, lltrace.inputargs, args) try: frame.execute(lltrace) From noreply at buildbot.pypy.org Mon Oct 19 09:38:21 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Oct 2015 09:38:21 +0200 (CEST) Subject: [pypy-commit] pypy default: Add a test for the correct #pragma in pyconfig.h Message-ID: <20151019073821.8B6ED1C13AE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80330:fcfc28ee7164 Date: 2015-10-19 09:38 +0200 http://bitbucket.org/pypy/pypy/changeset/fcfc28ee7164/ Log: Add a test for the correct #pragma in pyconfig.h diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py --- a/pypy/module/cpyext/test/test_version.py +++ b/pypy/module/cpyext/test/test_version.py @@ -1,6 +1,16 @@ +import py from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase +def test_pragma_version(): + from pypy.module.sys.version import PYPY_VERSION + rootdir = py.path.local(__file__).join('..', '..') + pyconfig_h = rootdir.join('include', 'pyconfig.h') + version = '%d%d' % (PYPY_VERSION[0], PYPY_VERSION[1]) + pragma = 'pragma comment(lib,"python%s.lib")' % version + assert pragma in pyconfig_h.read() + + class AppTestVersion(AppTestCpythonExtensionBase): def test_versions(self): From noreply at buildbot.pypy.org Mon Oct 19 09:41:31 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Oct 2015 09:41:31 +0200 (CEST) Subject: [pypy-commit] pypy default: of course, I'm confusing the PyPy and the CPython version now that they Message-ID: <20151019074131.A4ECE1C1214@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80331:33863d9924f6 Date: 2015-10-19 09:41 +0200 http://bitbucket.org/pypy/pypy/changeset/33863d9924f6/ Log: of course, I'm confusing the PyPy and the CPython version now that they are both at 2.7 diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py --- a/pypy/module/cpyext/test/test_version.py +++ b/pypy/module/cpyext/test/test_version.py @@ -3,10 +3,10 @@ def test_pragma_version(): - from pypy.module.sys.version import PYPY_VERSION + from pypy.module.sys.version import CPYTHON_VERSION rootdir = py.path.local(__file__).join('..', '..') pyconfig_h = rootdir.join('include', 'pyconfig.h') - version = '%d%d' % (PYPY_VERSION[0], PYPY_VERSION[1]) + version = '%d%d' % (CPYTHON_VERSION[0], CPYTHON_VERSION[1]) pragma = 'pragma comment(lib,"python%s.lib")' % version assert pragma in pyconfig_h.read() From noreply at buildbot.pypy.org Mon Oct 19 09:44:23 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Oct 2015 09:44:23 +0200 (CEST) Subject: [pypy-commit] pypy py3k: Add a test for the correct #pragma in pyconfig.h Message-ID: <20151019074423.305C21C1214@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: py3k Changeset: r80332:f56628f44911 Date: 2015-10-19 09:42 +0200 http://bitbucket.org/pypy/pypy/changeset/f56628f44911/ Log: Add a test for the correct #pragma in pyconfig.h diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py --- a/pypy/module/cpyext/test/test_version.py +++ b/pypy/module/cpyext/test/test_version.py @@ -1,6 +1,16 @@ +import py from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase +def test_pragma_version(): + from pypy.module.sys.version import CPYTHON_VERSION + rootdir = py.path.local(__file__).join('..', '..') + pyconfig_h = rootdir.join('include', 'pyconfig.h') + version = '%d%d' % (CPYTHON_VERSION[0], CPYTHON_VERSION[1]) + pragma = 'pragma comment(lib,"python%s.lib")' % version + assert pragma in pyconfig_h.read() + + class AppTestVersion(AppTestCpythonExtensionBase): def test_versions(self): From noreply at buildbot.pypy.org Mon Oct 19 09:44:25 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Oct 2015 09:44:25 +0200 (CEST) Subject: [pypy-commit] pypy py3.3: Add a test for the correct #pragma in pyconfig.h, and fix it: wrong #pragma! Message-ID: <20151019074425.7B3C91C1214@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: py3.3 Changeset: r80333:f0f52b8b046c Date: 2015-10-19 09:44 +0200 http://bitbucket.org/pypy/pypy/changeset/f0f52b8b046c/ Log: Add a test for the correct #pragma in pyconfig.h, and fix it: wrong #pragma! diff --git a/pypy/module/cpyext/include/pyconfig.h b/pypy/module/cpyext/include/pyconfig.h --- a/pypy/module/cpyext/include/pyconfig.h +++ b/pypy/module/cpyext/include/pyconfig.h @@ -38,9 +38,9 @@ * taken care of by distutils.) */ # ifdef _DEBUG # error("debug first with cpython") -# pragma comment(lib,"python32.lib") +# pragma comment(lib,"python33.lib") # else -# pragma comment(lib,"python32.lib") +# pragma comment(lib,"python33.lib") # endif /* _DEBUG */ # endif #endif /* _MSC_VER */ diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py --- a/pypy/module/cpyext/test/test_version.py +++ b/pypy/module/cpyext/test/test_version.py @@ -1,6 +1,16 @@ +import py from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase +def test_pragma_version(): + from pypy.module.sys.version import CPYTHON_VERSION + rootdir = py.path.local(__file__).join('..', '..') + pyconfig_h = rootdir.join('include', 'pyconfig.h') + version = '%d%d' % (CPYTHON_VERSION[0], CPYTHON_VERSION[1]) + pragma = 'pragma comment(lib,"python%s.lib")' % version + assert pragma in pyconfig_h.read() + + class AppTestVersion(AppTestCpythonExtensionBase): def test_versions(self): From noreply at buildbot.pypy.org Mon Oct 19 11:07:37 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Mon, 19 Oct 2015 11:07:37 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: added more assembler functions (branching, loading, ...) and added first small test that assembles a real assembler block and executes it Message-ID: <20151019090737.08D041C135C@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80334:7dae60560404 Date: 2015-10-19 11:07 +0200 http://bitbucket.org/pypy/pypy/changeset/7dae60560404/ Log: added more assembler functions (branching, loading, ...) and added first small test that assembles a real assembler block and executes it diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -1,8 +1,65 @@ from rpython.jit.backend.llsupport.assembler import GuardToken, BaseAssembler +from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper +from rpython.jit.backend.zarch import registers as reg +from rpython.jit.backend.zarch import locations as loc +from rpython.jit.backend.zarch.codebuilder import InstrBuilder from rpython.jit.metainterp.resoperation import rop +from rpython.rlib.objectmodel import we_are_translated, specialize, compute_unique_id class AssemblerZARCH(BaseAssembler): + def __init__(self, cpu, translate_support_code=False): + BaseAssembler.__init__(self, cpu, translate_support_code) + self.mc = None + self.pending_guards = None + self.current_clt = None + self._regalloc = None + self.datablockwrapper = None + self.propagate_exception_path = 0 + self.stack_check_slowpath = 0 + self.loop_run_counters = [] + self.gcrootmap_retaddr_forced = 0 + + def setup(self, looptoken): + BaseAssembler.setup(self, looptoken) + assert self.memcpy_addr != 0, 'setup_once() not called?' + if we_are_translated(): + self.debug = False + self.current_clt = looptoken.compiled_loop_token + self.mc = InstrBuilder() + self.pending_guards = [] + #assert self.datablockwrapper is None --- but obscure case + # possible, e.g. getting MemoryError and continuing + allblocks = self.get_asmmemmgr_blocks(looptoken) + self.datablockwrapper = MachineDataBlockWrapper(self.cpu.asmmemmgr, + allblocks) + self.mc.datablockwrapper = self.datablockwrapper + self.target_tokens_currently_compiling = {} + self.frame_depth_to_patch = [] + + def teardown(self): + self.current_clt = None + self._regalloc = None + self.mc = None + self.pending_guards = None + + def get_asmmemmgr_blocks(self, looptoken): + clt = looptoken.compiled_loop_token + if clt.asmmemmgr_blocks is None: + clt.asmmemmgr_blocks = [] + return clt.asmmemmgr_blocks + + def gen_func_prolog(self): + self.mc.STMG(reg.r0, reg.r15, loc.addr(reg.sp, -160)) + #self.mc.LAY(reg.r15, loc.addr(reg.sp, -160)) + + def gen_func_epilog(self): + self.mc.LMG(reg.r0, reg.r15, loc.addr(reg.sp, -160)) + self.jmpto(reg.r14) + + def jmpto(self, register): + self.mc.BCR_rr(0xf, register.value) + def _build_failure_recovery(self, exc, withfloats=False): pass # TODO diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -1,7 +1,9 @@ from rpython.jit.backend.zarch import conditions as cond from rpython.jit.backend.zarch import registers as reg +from rpython.jit.backend.zarch import locations as loc from rpython.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.unroll import unrolling_iterable from rpython.rtyper.lltypesystem import lltype, rffi, llmemory from rpython.tool.udir import udir from rpython.jit.backend.detect_cpu import autodetect @@ -40,9 +42,10 @@ uX - immediate X bits (unsigend) bd - base displacement (12 bit) bdl - base displacement long (20 bit) - ibd - index base displacement - l4bd - length base displacement (4 bit) - l8bd - length base displacement (8 bit) + bid - index base displacement + bidl - index base displacement (20 bit) + l4bd - length base displacement (4 bit) + l8bd - length base displacement (8 bit) note that a suffix 'l' means long, and a prefix length """ @@ -105,7 +108,7 @@ return encode_rr def build_rx(mnemonic, (opcode,)): - @builder.arguments('r/m,ibd') + @builder.arguments('r/m,bid') def encode_rx(self, reg_or_mask, idxbasedisp): self.writechar(opcode) index = idxbasedisp.index @@ -119,7 +122,7 @@ return encode_rx def build_rxy(mnemonic, (opcode1,opcode2)): - @builder.arguments('r/m,ibdl') + @builder.arguments('r/m,bidl') def encode_rxy(self, reg_or_mask, idxbasedisp): self.writechar(opcode1) index = idxbasedisp.index @@ -199,7 +202,7 @@ return encode_ssc def build_ssd(mnemonic, (opcode,)): - @builder.arguments('ibd,bd,r') + @builder.arguments('bid,bd,r') def encode_ssd(self, index_base_disp, base_disp, reg): self.writechar(opcode) byte = (index_base_disp.index & 0xf) << 4 | reg & 0xf @@ -273,26 +276,51 @@ 'AG': (build_rxy, ['\xE3','\x08']), 'AGF': (build_rxy, ['\xE3','\x18']), 'AHI': (build_ri, ['\xA7','\x0A']), + # + 'BRASL': (build_ril, ['\xC0','\x05']), + 'BXH': (build_rs, ['\x86']), + 'BXHG': (build_rsy, ['\xEB','\x44']), + 'BRXH': (build_rsi, ['\x84']), + 'BRXLG': (build_rie, ['\xEC','\x45']), + 'BCR': (build_rr, ['\x07']), + # 'NI': (build_si, ['\x94']), 'NIY': (build_siy, ['\xEB','\x54']), 'NC': (build_ssa, ['\xD4']), 'AP': (build_ssb, ['\xFA']), 'SRP': (build_ssc, ['\xF0']), 'MVCK': (build_ssd, ['\xD9']), + + 'LAY': (build_rxy, ['\xE3','\x71']), 'LMD': (build_sse, ['\xEF']), + 'LMG': (build_rsy, ['\xEB','\x04']), + 'LGHI': (build_ri, ['\xA7','\x09']), + 'PKA': (build_ssf, ['\xE9']), - 'BRASL': (build_ril, ['\xC0','\x05']), - 'BXH': (build_rs, ['\x86']), - 'BXHG': (build_rsy, ['\xEB','\x44']), - 'BRXH': (build_rsi, ['\x84']), - 'BRXLG': (build_rie, ['\xEC','\x45']), + 'STMG': (build_rsy, ['\xEB','\x24']), } +def build_unpack_func(mnemonic, func): + def function(self, *args): + newargs = [None] * len(args) + for i,arg in enumerate(unrolling_iterable(func._arguments_)): + if arg == 'r' or arg == 'r/m': + newargs[i] = args[i].value + elif arg.startswith('i') or arg.startswith('u'): + newargs[i] = args[i].value + else: + newargs[i] = args[i] + return func(self, *newargs) + function.__name__ = mnemonic + return function + def build_instr_codes(clazz): for mnemonic, (builder, args) in _mnemonic_codes.items(): func = builder(mnemonic, args) - name = mnemonic + "_" + builder.__name__.split("_")[1] + instrtype = builder.__name__.split("_")[1] + name = mnemonic + "_" + instrtype setattr(clazz, name, func) + setattr(clazz, mnemonic, build_unpack_func(mnemonic, func)) class AbstractZARCHBuilder(object): def write_i32(self, word): @@ -300,13 +328,13 @@ self.writechar(chr((word >> 16) & 0xFF)) self.writechar(chr((word >> 8) & 0xFF)) self.writechar(chr(word & 0xFF)) + def write_i16(self, word): self.writechar(chr((word >> 8) & 0xFF)) self.writechar(chr(word & 0xFF)) build_instr_codes(AbstractZARCHBuilder) - class InstrBuilder(BlockBuilderMixin, AbstractZARCHBuilder): def __init__(self): diff --git a/rpython/jit/backend/zarch/locations.py b/rpython/jit/backend/zarch/locations.py --- a/rpython/jit/backend/zarch/locations.py +++ b/rpython/jit/backend/zarch/locations.py @@ -167,9 +167,23 @@ def as_key(self): # a word >= 1000, and < 1000 + size of SP frame return self.value + 1000 +class AddressLocation(AssemblerLocation): + _immutable_ = True + + def __init__(self, basereg, indexreg, displace): + self.base = basereg.value + self.displace = displace + self.index = -1 + if indexreg: + self.index = indexreg.value + +def addr(basereg, displace, indexreg=None): + return AddressLocation(basereg, indexreg, displace) def imm(i): return ImmLocation(i) def get_fp_offset(base_ofs, position): return base_ofs + WORD * (position + JITFRAME_FIXED_SIZE) + + diff --git a/rpython/jit/backend/zarch/registers.py b/rpython/jit/backend/zarch/registers.py --- a/rpython/jit/backend/zarch/registers.py +++ b/rpython/jit/backend/zarch/registers.py @@ -9,5 +9,8 @@ [r0,r1,r2,r3,r4,r5,r6,r7,r8, r9,r10,r11,r12,r13,r14,r15] = registers +sp = r15 +raddr = r14 + [f0,f1,f2,f3,f4,f5,f6,f7,f8, f9,f10,f11,f12,f13,f14,f15] = fpregisters diff --git a/rpython/jit/backend/zarch/test/support.py b/rpython/jit/backend/zarch/test/support.py --- a/rpython/jit/backend/zarch/test/support.py +++ b/rpython/jit/backend/zarch/test/support.py @@ -1,4 +1,9 @@ +from rpython.rtyper.lltypesystem import lltype, rffi - -def run_asm(): - pass +def run_asm(asm): + BOOTSTRAP_TP = lltype.FuncType([], lltype.Signed) + addr = asm.mc.materialize(asm.cpu, [], None) + assert addr % 8 == 0 + func = rffi.cast(lltype.Ptr(BOOTSTRAP_TP), addr) + asm.mc._dump_trace(addr, 'test.asm') + return func() diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -1,5 +1,5 @@ -from rpython.jit.backend.zarch import conditions as c -from rpython.jit.backend.zarch import registers as r +from rpython.jit.backend.zarch import conditions as con +from rpython.jit.backend.zarch import registers as reg from rpython.jit.backend.zarch.assembler import AssemblerZARCH from rpython.jit.backend.zarch.locations import imm from rpython.jit.backend.zarch.test.support import run_asm @@ -34,3 +34,15 @@ from rpython.jit.backend.zarch import assembler assert assembler.asm_operations[i] \ is AssemblerZARCH.emit_op_int_add.im_func + + def test_load_small_int_to_reg(self): + self.a.mc.LGHI(reg.r2, imm(123)) + self.a.jmpto(reg.r14) + assert run_asm(self.a) == 123 + + #def test_load_small_int_to_reg_func(self): + # self.a.gen_func_prolog() + # self.a.mc.LGHI(r.r2, imm(123)) + # self.a.gen_func_epilog() + # assert run_asm(self.a) == 123 + diff --git a/rpython/jit/backend/zarch/test/test_auto_encoding.py b/rpython/jit/backend/zarch/test/test_auto_encoding.py --- a/rpython/jit/backend/zarch/test/test_auto_encoding.py +++ b/rpython/jit/backend/zarch/test/test_auto_encoding.py @@ -140,8 +140,8 @@ 'u64': test_range(64), 'bd': build_fake(FakeBaseDisplace,4,12), 'bdl': build_fake(FakeBaseDisplace,4,19), - 'ibd': build_fake(FakeIndexBaseDisplace,4,4,12), - 'ibdl': build_fake(FakeIndexBaseDisplace,4,4,(20,True)), + 'bid': build_fake(FakeIndexBaseDisplace,4,4,12), + 'bidl': build_fake(FakeIndexBaseDisplace,4,4,(20,True)), 'l8bd': build_fake(FakeLengthBaseDisplace,8,4,12), 'l4bd': build_fake(FakeLengthBaseDisplace,4,4,12), } From noreply at buildbot.pypy.org Mon Oct 19 15:29:30 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Mon, 19 Oct 2015 15:29:30 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: called first subroutine in assembler Message-ID: <20151019132930.0B86C1C13AE@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80335:2b4c4bd98951 Date: 2015-10-19 15:29 +0200 http://bitbucket.org/pypy/pypy/changeset/2b4c4bd98951/ Log: called first subroutine in assembler diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -50,11 +50,12 @@ return clt.asmmemmgr_blocks def gen_func_prolog(self): - self.mc.STMG(reg.r0, reg.r15, loc.addr(reg.sp, -160)) - #self.mc.LAY(reg.r15, loc.addr(reg.sp, -160)) + self.mc.STMG(reg.r11, reg.r15, loc.addr(reg.sp, -96)) + self.mc.AHI(reg.sp, loc.imm(-96)) + #self.mc.LAY(reg.r15, loc.addr(reg.sp, -)) def gen_func_epilog(self): - self.mc.LMG(reg.r0, reg.r15, loc.addr(reg.sp, -160)) + self.mc.LMG(reg.r11, reg.r15, loc.addr(reg.sp, 0)) self.jmpto(reg.r14) def jmpto(self, register): diff --git a/rpython/jit/backend/zarch/locations.py b/rpython/jit/backend/zarch/locations.py --- a/rpython/jit/backend/zarch/locations.py +++ b/rpython/jit/backend/zarch/locations.py @@ -173,7 +173,7 @@ def __init__(self, basereg, indexreg, displace): self.base = basereg.value self.displace = displace - self.index = -1 + self.index = 0 # designates the absense of an index register! if indexreg: self.index = indexreg.value diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -1,7 +1,7 @@ from rpython.jit.backend.zarch import conditions as con from rpython.jit.backend.zarch import registers as reg from rpython.jit.backend.zarch.assembler import AssemblerZARCH -from rpython.jit.backend.zarch.locations import imm +from rpython.jit.backend.zarch import locations as loc from rpython.jit.backend.zarch.test.support import run_asm from rpython.jit.backend.detect_cpu import getcpuclass from rpython.jit.metainterp.resoperation import rop @@ -36,13 +36,28 @@ is AssemblerZARCH.emit_op_int_add.im_func def test_load_small_int_to_reg(self): - self.a.mc.LGHI(reg.r2, imm(123)) + self.a.mc.LGHI(reg.r2, loc.imm(123)) self.a.jmpto(reg.r14) assert run_asm(self.a) == 123 - #def test_load_small_int_to_reg_func(self): - # self.a.gen_func_prolog() - # self.a.mc.LGHI(r.r2, imm(123)) - # self.a.gen_func_epilog() - # assert run_asm(self.a) == 123 + def test_prolog_epilog(self): + self.a.gen_func_prolog() + self.a.mc.LGHI(reg.r2, loc.imm(123)) + self.a.gen_func_epilog() + assert run_asm(self.a) == 123 + def test_simple_func(self): + # enter + self.a.mc.STMG(reg.r11, reg.r15, loc.addr(reg.sp, -96)) + self.a.mc.AHI(reg.sp, loc.imm(-96)) + self.a.mc.BRASL(reg.r14, loc.imm(8+6)) + self.a.mc.LMG(reg.r11, reg.r15, loc.addr(reg.sp, 0)) + self.a.jmpto(reg.r14) + + addr = self.a.mc.get_relative_pos() + assert addr & 0x1 == 0 + self.a.gen_func_prolog() + self.a.mc.LGHI(reg.r2, loc.imm(321)) + self.a.gen_func_epilog() + assert run_asm(self.a) == 321 + From noreply at buildbot.pypy.org Mon Oct 19 16:42:00 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Mon, 19 Oct 2015 16:42:00 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: first loop (that includes a branching instruction on condition), substract and register move added to instructions Message-ID: <20151019144200.1123A1C1186@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80336:cff394b2f479 Date: 2015-10-19 16:42 +0200 http://bitbucket.org/pypy/pypy/changeset/cff394b2f479/ Log: first loop (that includes a branching instruction on condition), substract and register move added to instructions diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -50,12 +50,11 @@ return clt.asmmemmgr_blocks def gen_func_prolog(self): - self.mc.STMG(reg.r11, reg.r15, loc.addr(reg.sp, -96)) + self.mc.STMG(reg.r11, reg.r15, loc.addr(-96, reg.sp)) self.mc.AHI(reg.sp, loc.imm(-96)) - #self.mc.LAY(reg.r15, loc.addr(reg.sp, -)) def gen_func_epilog(self): - self.mc.LMG(reg.r11, reg.r15, loc.addr(reg.sp, 0)) + self.mc.LMG(reg.r11, reg.r15, loc.addr(0, reg.sp)) self.jmpto(reg.r14) def jmpto(self, register): diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -96,8 +96,7 @@ self.writechar(chr(operands)) return encode_rr -def build_rre(mnemonic, (opcode,)): - opcode1,opcode2 = opcode +def build_rre(mnemonic, (opcode1,opcode2)): @builder.arguments('r,r') def encode_rr(self, reg1, reg2): self.writechar(opcode1) @@ -269,8 +268,8 @@ _mnemonic_codes = { 'AR': (build_rr, ['\x1A']), - 'AGR': (build_rre, ['\xB9\x08']), - 'AGFR': (build_rre, ['\xB9\x18']), + 'AGR': (build_rre, ['\xB9','\x08']), + 'AGFR': (build_rre, ['\xB9','\x18']), 'A': (build_rx, ['\x5A']), 'AY': (build_rxy, ['\xE3','\x5A']), 'AG': (build_rxy, ['\xE3','\x08']), @@ -283,6 +282,9 @@ 'BRXH': (build_rsi, ['\x84']), 'BRXLG': (build_rie, ['\xEC','\x45']), 'BCR': (build_rr, ['\x07']), + 'BC': (build_rx, ['\x47']), + 'BRC': (build_ri, ['\xA7','\x04']), + 'BRCL': (build_ril, ['\xC0','\x04']), # 'NI': (build_si, ['\x94']), 'NIY': (build_siy, ['\xEB','\x54']), @@ -295,9 +297,13 @@ 'LMD': (build_sse, ['\xEF']), 'LMG': (build_rsy, ['\xEB','\x04']), 'LGHI': (build_ri, ['\xA7','\x09']), + 'LR': (build_rr, ['\x18']), + 'LGR': (build_rre, ['\xB9','\x04']), 'PKA': (build_ssf, ['\xE9']), 'STMG': (build_rsy, ['\xEB','\x24']), + 'SR': (build_rr, ['\x1B']), + 'SGR': (build_rre, ['\xB9','\x09']), } def build_unpack_func(mnemonic, func): diff --git a/rpython/jit/backend/zarch/conditions.py b/rpython/jit/backend/zarch/conditions.py --- a/rpython/jit/backend/zarch/conditions.py +++ b/rpython/jit/backend/zarch/conditions.py @@ -0,0 +1,9 @@ + +from rpython.jit.backend.zarch import locations as loc + +EQ = loc.imm(0x8) +LT = loc.imm(0x4) +GT = loc.imm(0x2) +LE = loc.imm(EQ.value | LT.value) +GE = loc.imm(EQ.value | GT.value) +OVERFLOW = loc.imm(0x1) diff --git a/rpython/jit/backend/zarch/locations.py b/rpython/jit/backend/zarch/locations.py --- a/rpython/jit/backend/zarch/locations.py +++ b/rpython/jit/backend/zarch/locations.py @@ -171,13 +171,16 @@ _immutable_ = True def __init__(self, basereg, indexreg, displace): - self.base = basereg.value self.displace = displace - self.index = 0 # designates the absense of an index register! + # designates the absense of an index/base register! + self.base = 0 + self.index = 0 + if basereg: + self.base = basereg.value if indexreg: self.index = indexreg.value -def addr(basereg, displace, indexreg=None): +def addr(displace, basereg=None, indexreg=None): return AddressLocation(basereg, indexreg, displace) def imm(i): diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -48,10 +48,11 @@ def test_simple_func(self): # enter - self.a.mc.STMG(reg.r11, reg.r15, loc.addr(reg.sp, -96)) + self.a.mc.STMG(reg.r11, reg.r15, loc.addr(-96, reg.sp)) self.a.mc.AHI(reg.sp, loc.imm(-96)) + # from the start of BRASL to end of jmpto there are 8+6 bytes self.a.mc.BRASL(reg.r14, loc.imm(8+6)) - self.a.mc.LMG(reg.r11, reg.r15, loc.addr(reg.sp, 0)) + self.a.mc.LMG(reg.r11, reg.r15, loc.addr(0, reg.sp)) self.a.jmpto(reg.r14) addr = self.a.mc.get_relative_pos() @@ -61,3 +62,14 @@ self.a.gen_func_epilog() assert run_asm(self.a) == 321 + def test_simple_loop(self): + self.a.mc.LGHI(reg.r3, loc.imm(2**15-1)) + self.a.mc.LGHI(reg.r4, loc.imm(1)) + L1 = self.a.mc.get_relative_pos() + self.a.mc.SGR(reg.r3, reg.r4) + LJ = self.a.mc.get_relative_pos() + self.a.mc.BRCL(loc.imm(0x2), loc.imm(L1-LJ)) + self.a.mc.LGR(reg.r2, reg.r3) + self.a.jmpto(reg.r14) + assert run_asm(self.a) == 0 + From noreply at buildbot.pypy.org Mon Oct 19 18:48:19 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Oct 2015 18:48:19 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: in-progress: from_pyobj() Message-ID: <20151019164819.5DB661C136E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80337:21a3dead2ef1 Date: 2015-10-19 15:32 +0200 http://bitbucket.org/pypy/pypy/changeset/21a3dead2ef1/ Log: in-progress: from_pyobj() diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py --- a/pypy/module/cpyext/intobject.py +++ b/pypy/module/cpyext/intobject.py @@ -19,11 +19,12 @@ @bootstrap_function def init_intobject(space): "Type description of PyIntObject" - from pypy.objspace.std.intobject import W_AbstractIntObject + from pypy.objspace.std.intobject import W_AbstractIntObject, W_IntObject setup_class_for_cpyext(W_AbstractIntObject, basestruct=PyIntObject.TO, - fill_pyobj=int_fill_pyobj) - #realize=int_realize) + fill_pyobj=int_fill_pyobj, + realize=int_realize, + realize_subclass_of=W_IntObject) def int_fill_pyobj(space, w_obj, py_int): """ @@ -32,15 +33,9 @@ """ py_int.c_ob_ival = space.int_w(w_obj) -def int_realize(space, obj): - intval = rffi.cast(lltype.Signed, rffi.cast(PyIntObject, obj).c_ob_ival) - w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) - w_obj = space.allocate_instance(W_IntObject, w_type) - w_obj.__init__(intval) - track_reference(space, obj, w_obj) - state = space.fromcache(RefcountState) - state.set_lifeline(w_obj, obj) - return w_obj +def int_realize(space, w_obj, py_obj): + intval = rffi.cast(lltype.Signed, rffi.cast(PyIntObject, py_obj).c_ob_ival) + W_IntObject.__init__(w_obj, intval) PyInt_Check, PyInt_CheckExact = build_type_checkers("Int") diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -134,6 +134,7 @@ else: return _get_typedescr_1(typedef) + #________________________________________________________ # type description @@ -143,17 +144,20 @@ basestruct: The basic structure to allocate alloc_pyobj: default create_pyobj calls this to get the PyObject fill_pyobj: default create_pyobj calls this after attaching is done - realize : Function called to create a pypy object from a raw struct + realize : Function called to create a pypy object from a PyObject dealloc : a cpython_api(external=False), similar to PyObject_dealloc """ tp_basestruct = kw.pop('basestruct', PyObject.TO) tp_alloc_pyobj = kw.pop('alloc_pyobj', None) tp_fill_pyobj = kw.pop('fill_pyobj', None) - force_create_pyobj = kw.pop('force_create_pyobj', False) - #tp_realize = kw.pop('realize', None) + tp_realize = kw.pop('realize', None) + force_create_pyobj = kw.pop('force_create_pyobj', False) + realize_subclass_of = kw.pop('realize_subclass_of', None) #tp_dealloc = kw.pop('dealloc', None) - assert not kw, "Extra arguments to make_typedescr" + assert not kw, "Extra arguments to make_typedescr: %s" % kw.keys() + + assert 'cpyext_basestruct' not in W_Class.__dict__ # double set if tp_alloc_pyobj or tp_fill_pyobj or force_create_pyobj: # @@ -186,6 +190,22 @@ keepalive_until_here(self) W_Class.cpyext_fill_prebuilt_pyobj = cpyext_fill_prebuilt_pyobj + if tp_realize or realize_subclass_of: + W_CPyExtPlaceHolder = get_cpyextplaceholder_subclass( + realize_subclass_of or W_Class) + if tp_realize: + tp_realize._always_inline_ = True + # + def cpyext_realize(space, pyobj): + w_obj = W_CPyExtPlaceHolder(pyobj) + if tp_realize: + tp_realize(space, w_obj, pyobj) + return w_obj + # + typedef = realize_subclass_of.typedef + assert not hasattr(typedef, 'cpyext_realize') + typedef.cpyext_realize = cpyext_realize + W_Class.cpyext_basestruct = tp_basestruct @@ -210,13 +230,62 @@ ob = w_type.cpyext_create_pyobj(space) pto = rffi.cast(PyTypeObjectPtr, ob) return pto - W_TypeObject.cpyext_c_type_object = lltype.nullptr(PyTypeObjectPtr.TO) - @bootstrap_function def init_pyobject(space): - setup_class_for_cpyext(W_Root, force_create_pyobj=True) + setup_class_for_cpyext(W_Root, force_create_pyobj=True, + realize_subclass_of=W_ObjectObject) + + +#________________________________________________________ +# W_CPyExtPlaceHolderObject + +# When we ask for the convertion of a PyObject to a W_Root and there +# is none, we look up the correct W_Root subclass to use (W_IntObject, +# etc., or W_ObjectObject by default), take the W_CPyExtPlaceHolder +# special subclass of it, and instantiate that. W_CPyExtPlaceHolder +# adds the field "cpyext_pyobj" pointing back to the PyObject. +# W_CPyExtPlaceHolder is made using the following memo function. + + at specialize.memo() +def get_cpyextplaceholder_subclass(W_Class): + try: + return W_Class.__dict__['_cpyextplaceholder_subclass'] + except KeyError: + pass + assert W_Class is not W_TypeObject + + class W_CPyExtPlaceHolder(W_Class): + def __init__(self, pyobj): + self.cpyext_pyobj = pyobj + def cpyext_as_pyobj(self, space): + return self.cpyext_pyobj + + W_CPyExtPlaceHolder.__name__ = W_Class.__name__ + '_CPyExtPlaceHolder' + W_Class._cpyextplaceholder_subclass = W_CPyExtPlaceHolder + return W_CPyExtPlaceHolder + + + +def _default_cpyext_as_pyobj(self, space): + """Default implementation for most classes in PyPy. + Overridden by the W_CPyExtPlaceHolder subclasses.""" + ob = rawrefcount.from_obj(PyObject, self) + if not ob: + ob = self.cpyext_create_pyobj(space) + return ob +W_Root.cpyext_as_pyobj = _default_cpyext_as_pyobj + +def _type_cpyext_as_pyobj(self, space): + ob = get_c_ob_type(space, self) + return rffi.cast(PyObject, ob) +W_TypeObject.cpyext_as_pyobj = _type_cpyext_as_pyobj +W_TypeObject._cpyextplaceholder_subclass = W_TypeObject + +def _create_w_obj_from_pyobj(space, pyobj): + w_type = from_pyobj(pyobj.c_ob_type) + return w_type.instancetypedef.cpyext_realize(space, pyobj) #________________________________________________________ # refcounted object support @@ -385,13 +454,7 @@ use keepalive_until_here(w_obj) some time later. """ assert not is_pyobj(w_obj) - assert w_obj is not None - #if isinstance(w_obj, W_CPyExtPlaceHolderObject): - # xxx - ob = rawrefcount.from_obj(PyObject, w_obj) - if not ob: - ob = w_obj.cpyext_create_pyobj(space) - return ob + return w_obj.cpyext_as_pyobj(space) as_pyobj._always_inline_ = True def as_xpyobj(space, w_obj): @@ -408,7 +471,7 @@ pyobj = rffi.cast(PyObject, pyobj) w_obj = rawrefcount.to_obj(W_Root, pyobj) if w_obj is None: - w_obj = _create_w_obj_from_pyobj(pyobj) + w_obj = _create_w_obj_from_pyobj(space, pyobj) return w_obj from_pyobj._always_inline_ = True @@ -432,10 +495,7 @@ _about_ = is_pyobj def compute_result_annotation(self, s_x): from rpython.rtyper.llannotation import SomePtr - if isinstance(s_x, SomePtr): - return self.bookkeeper.immutablevalue(True) - else: - return self.bookkeeper.immutablevalue(False) + return self.bookkeeper.immutablevalue(isinstance(s_x, SomePtr)) def specialize_call(self, hop): hop.exception_cannot_occur() return hop.inputconst(lltype.Bool, hop.s_result.const) From noreply at buildbot.pypy.org Mon Oct 19 19:48:48 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Oct 2015 19:48:48 +0200 (CEST) Subject: [pypy-commit] pypy default: Pff pff pff what a mess Message-ID: <20151019174848.7DFED1C137D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80338:5cbff711b321 Date: 2015-10-19 19:48 +0200 http://bitbucket.org/pypy/pypy/changeset/5cbff711b321/ Log: Pff pff pff what a mess diff --git a/rpython/translator/c/src/stacklet/switch_arm_gcc.h b/rpython/translator/c/src/stacklet/switch_arm_gcc.h --- a/rpython/translator/c/src/stacklet/switch_arm_gcc.h +++ b/rpython/translator/c/src/stacklet/switch_arm_gcc.h @@ -57,7 +57,20 @@ [save_state]"r"(save_state), [extra]"r"(extra) : "r0", "r1", "r4", "r5", "r6", "r12", "lr", - "memory", "cc", "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7" + "memory", "cc" +#ifndef __SOFTFP__ + , "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7" + , "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15" +/* messsssssssssssss quite unsure it is the correct way */ +# if defined(__TARGET_FPU_SOFTVFP_VFPV3) || \ + defined(__TARGET_FPU_SOFTVFP_VFPV3_FP16) || \ + defined(__TARGET_FPU_VFPV3) || \ + defined(__TARGET_FPU_VFPV3_FP16) || \ + defined(__TARGET_FPU_VFPV4) + , "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23" + , "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31" +# endif +#endif ); return result; } From noreply at buildbot.pypy.org Mon Oct 19 20:10:24 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Oct 2015 20:10:24 +0200 (CEST) Subject: [pypy-commit] pypy default: :-( Message-ID: <20151019181024.096641C1185@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80339:b5e12e911be2 Date: 2015-10-19 20:10 +0200 http://bitbucket.org/pypy/pypy/changeset/b5e12e911be2/ Log: :-( diff --git a/rpython/translator/c/src/stacklet/switch_arm_gcc.h b/rpython/translator/c/src/stacklet/switch_arm_gcc.h --- a/rpython/translator/c/src/stacklet/switch_arm_gcc.h +++ b/rpython/translator/c/src/stacklet/switch_arm_gcc.h @@ -62,6 +62,15 @@ , "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7" , "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15" /* messsssssssssssss quite unsure it is the correct way */ +/* Actually it seems there is no way. These macros are defined by ARM's + * own compiler but not by GCC. On GCC, by looking at its sources it + * seems that we'd like to know the internal TARGET_VFPD32 flag, but + * there is no way to access it because it's not exported as a macro. + * We loose. If you compile for some architecture with 32 "d" + * registers, gcc will likely move the registers to save (d8-d15) + * into some of d16-d31, and they will then be clobbered. + * I don't see any solution. :-(( + */ # if defined(__TARGET_FPU_SOFTVFP_VFPV3) || \ defined(__TARGET_FPU_SOFTVFP_VFPV3_FP16) || \ defined(__TARGET_FPU_VFPV3) || \ From noreply at buildbot.pypy.org Mon Oct 19 20:46:53 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 19 Oct 2015 20:46:53 +0200 (CEST) Subject: [pypy-commit] pypy default: Maybe found a solution Message-ID: <20151019184653.9F4591C135C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80340:01e9463e526f Date: 2015-10-19 20:47 +0200 http://bitbucket.org/pypy/pypy/changeset/01e9463e526f/ Log: Maybe found a solution diff --git a/rpython/translator/c/src/stacklet/switch_arm_gcc.h b/rpython/translator/c/src/stacklet/switch_arm_gcc.h --- a/rpython/translator/c/src/stacklet/switch_arm_gcc.h +++ b/rpython/translator/c/src/stacklet/switch_arm_gcc.h @@ -7,6 +7,10 @@ static void *slp_switch(void *(*save_state)(void*, void*), void *(*restore_state)(void*, void*), + void *extra) __attribute__((noinline)); + +static void *slp_switch(void *(*save_state)(void*, void*), + void *(*restore_state)(void*, void*), void *extra) { void *result; @@ -23,6 +27,20 @@ "and r1, r0, #-16\n" "mov sp, r1\n" "push {r0, r2, r3, r7, r8, r9, r10, r11}\n" /* total 8, still aligned */ +#ifndef __SOFTFP__ + /* We also push d8-d15 to preserve them explicitly. This assumes + * that this code is in a function that doesn't use floating-point + * at all, and so don't touch the "d" registers (that's why we mark + * it as non-inlinable). So here by pushing/poping d8-d15 we are + * saving precisely the callee-saved registers in all cases. We + * could also try to list all "d" registers as clobbered, but it + * doesn't work: there is no way I could find to know if we have 16 + * or 32 "d" registers (depends on the exact -mcpu=... and we don't + * know it from the C code). If we have 32, then gcc would "save" + * d8-d15 by copying them into d16-d23 for example, and it doesn't + * work. */ + "vpush {d8, d9, d10, d11, d12, d13, d14, d15}\n" /* 16 words, still aligned */ +#endif /* save values in callee saved registers for later */ "mov r4, %[restore_state]\n" /* can't be r0 or r1: marked clobbered */ @@ -47,6 +65,9 @@ /* The stack's content is now restored. */ "zero:\n" +#ifndef __SOFTFP__ + "vpop {d8, d9, d10, d11, d12, d13, d14, d15}\n" +#endif "pop {r1, r2, r3, r7, r8, r9, r10, r11}\n" "mov sp, r1\n" "mov %[result], r0\n" @@ -58,28 +79,6 @@ [extra]"r"(extra) : "r0", "r1", "r4", "r5", "r6", "r12", "lr", "memory", "cc" -#ifndef __SOFTFP__ - , "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7" - , "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15" -/* messsssssssssssss quite unsure it is the correct way */ -/* Actually it seems there is no way. These macros are defined by ARM's - * own compiler but not by GCC. On GCC, by looking at its sources it - * seems that we'd like to know the internal TARGET_VFPD32 flag, but - * there is no way to access it because it's not exported as a macro. - * We loose. If you compile for some architecture with 32 "d" - * registers, gcc will likely move the registers to save (d8-d15) - * into some of d16-d31, and they will then be clobbered. - * I don't see any solution. :-(( - */ -# if defined(__TARGET_FPU_SOFTVFP_VFPV3) || \ - defined(__TARGET_FPU_SOFTVFP_VFPV3_FP16) || \ - defined(__TARGET_FPU_VFPV3) || \ - defined(__TARGET_FPU_VFPV3_FP16) || \ - defined(__TARGET_FPU_VFPV4) - , "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23" - , "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31" -# endif -#endif ); return result; } From noreply at buildbot.pypy.org Tue Oct 20 00:48:06 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 20 Oct 2015 00:48:06 +0200 (CEST) Subject: [pypy-commit] buildbot default: Update requirements.txt Message-ID: <20151019224806.A32361C1185@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r966:624b0ebfca9a Date: 2015-10-19 23:48 +0100 http://bitbucket.org/pypy/buildbot/changeset/624b0ebfca9a/ Log: Update requirements.txt diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -1,18 +1,16 @@ -Flask==0.9 -Jinja2==2.7.1 -MarkupSafe==0.18 -SQLAlchemy==0.7.9 -Tempita==0.5.1 -Twisted==13.1.0 -Werkzeug==0.8.3 -argparse==1.2.1 -buildbot==0.8.8 -buildbot-slave==0.8.6p1 -decorator==3.4.0 -mock==1.0.1 -py==1.4.18 -pytest==2.2.4 +buildbot==0.8.10 +buildbot-slave==0.8.10 +decorator==4.0.4 +Flask==0.10.1 +itsdangerous==0.24 +Jinja2==2.8 +MarkupSafe==0.23 +py==1.4.30 +pytest==2.8.2 python-dateutil==1.5 +SQLAlchemy==0.7.10 sqlalchemy-migrate==0.7.2 -wsgiref==0.1.2 -zope.interface==4.0.5 +Tempita==0.5.2 +Twisted==15.4.0 +Werkzeug==0.10.4 +zope.interface==4.1.3 From noreply at buildbot.pypy.org Tue Oct 20 08:09:15 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Oct 2015 08:09:15 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix on 32-bit Message-ID: <20151020060915.C99851C00E2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80341:ec634b035df3 Date: 2015-10-20 08:09 +0200 http://bitbucket.org/pypy/pypy/changeset/ec634b035df3/ Log: Fix on 32-bit diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -5068,7 +5068,9 @@ 'faildescr6': BasicFailDescr(6)}) looptoken = JitCellToken() self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) - deadframe = self.cpu.execute_token(looptoken, 12.25, 123456.01) + deadframe = self.cpu.execute_token(looptoken, + longlong.getfloatstorage(12.25), + longlong.getfloatstorage(123456.01)) fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 res = longlong.getrealfloat(self.cpu.get_float_value(deadframe, 0)) From noreply at buildbot.pypy.org Tue Oct 20 08:13:40 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Oct 2015 08:13:40 +0200 (CEST) Subject: [pypy-commit] pypy default: fix Message-ID: <20151020061340.0FE5E1C00E2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80342:8390117c7748 Date: 2015-10-20 08:13 +0200 http://bitbucket.org/pypy/pypy/changeset/8390117c7748/ Log: fix diff --git a/rpython/jit/backend/arm/test/test_runner.py b/rpython/jit/backend/arm/test/test_runner.py --- a/rpython/jit/backend/arm/test/test_runner.py +++ b/rpython/jit/backend/arm/test/test_runner.py @@ -27,14 +27,11 @@ # ====> ../../test/runner_test.py add_loop_instructions = 'ldr; adds; cmp; beq; b;$' - bridge_loop_instructions = ('ldr; mov; nop; cmp; bge; ' - 'push; mov; mov; push; mov; mov; ' - 'blx; mov; mov; bx;$') arch_version = detect_arch_version() if arch_version == 7: - bridge_loop_instructions = ('ldr; mov; nop; cmp; bge; ' - 'push; mov; mov; push; mov; mov; ' - 'blx; mov; mov; bx;$') + bridge_loop_instructions = ('ldr; movw; nop; cmp; bge; ' + 'push; movw; movt; push; movw; movt; ' + 'blx; movw; movt; bx;$') else: bridge_loop_instructions = ('ldr; mov; nop; nop; nop; cmp; bge; ' 'push; ldr; mov; ' From noreply at buildbot.pypy.org Tue Oct 20 08:43:56 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Oct 2015 08:43:56 +0200 (CEST) Subject: [pypy-commit] pypy default: Give up running this test nightly before translation: not reliable. Message-ID: <20151020064356.A0E9C1C0036@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80343:e81b61ff533f Date: 2015-10-20 06:39 +0000 http://bitbucket.org/pypy/pypy/changeset/e81b61ff533f/ Log: Give up running this test nightly before translation: not reliable. Keep it only as an appdirect test, where it seems to work fine. diff --git a/pypy/module/thread/test/test_fork.py b/pypy/module/thread/test/test_fork.py --- a/pypy/module/thread/test/test_fork.py +++ b/pypy/module/thread/test/test_fork.py @@ -11,6 +11,8 @@ if not hasattr(os, 'fork'): skip("No fork on this platform") + if not self.runappdirect: + skip("Not reliable before translation") def busy_thread(): print 'sleep' @@ -18,7 +20,7 @@ time.sleep(0) done.append(None) - for i in range(5): + for i in range(150): run = True done = [] try: From noreply at buildbot.pypy.org Tue Oct 20 09:11:31 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Oct 2015 09:11:31 +0200 (CEST) Subject: [pypy-commit] pypy default: Issue #2166: the 'read' function in C returns a ssize_t. Checking if Message-ID: <20151020071131.C5D521C0036@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80344:21e8a29ec071 Date: 2015-10-20 09:11 +0200 http://bitbucket.org/pypy/pypy/changeset/21e8a29ec071/ Log: Issue #2166: the 'read' function in C returns a ssize_t. Checking if '(size_t)-1' is positive will say True! diff --git a/pypy/module/_file/readinto.py b/pypy/module/_file/readinto.py --- a/pypy/module/_file/readinto.py +++ b/pypy/module/_file/readinto.py @@ -9,7 +9,7 @@ os_read = rffi.llexternal(UNDERSCORE_ON_WIN32 + 'read', [rffi.INT, rffi.CCHARP, rffi.SIZE_T], - rffi.SIZE_T, save_err=rffi.RFFI_SAVE_ERRNO) + rffi.SSIZE_T, save_err=rffi.RFFI_SAVE_ERRNO) def direct_readinto(self, w_rwbuffer): @@ -61,6 +61,7 @@ stream.flush() while True: got = os_read(fd, rffi.ptradd(target_address, target_pos), size) + got = rffi.cast(lltype.Signed, got) if got > 0: target_pos += got size -= got diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -572,6 +572,17 @@ assert len(a) == 10 assert a.tostring() == 'foobar6789' + @py.test.mark.skipif("os.name != 'posix'") + def test_readinto_error(self): + import _socket, posix, array + s = _socket.socket() + buff = array.array("c", "X" * 65) + fh = posix.fdopen(posix.dup(s.fileno()), 'rb') + # "Transport endpoint is not connected" + raises(IOError, fh.readinto, buff) + fh.close() + s.close() + def test_weakref(self): """Files are weakrefable.""" import weakref diff --git a/rpython/rtyper/module/ll_os.py b/rpython/rtyper/module/ll_os.py --- a/rpython/rtyper/module/ll_os.py +++ b/rpython/rtyper/module/ll_os.py @@ -1022,7 +1022,7 @@ def register_os_read(self): os_read = self.llexternal(UNDERSCORE_ON_WIN32 + 'read', [rffi.INT, rffi.VOIDP, rffi.SIZE_T], - rffi.SIZE_T, save_err=rffi.RFFI_SAVE_ERRNO) + rffi.SSIZE_T, save_err=rffi.RFFI_SAVE_ERRNO) def os_read_llimpl(fd, count): if count < 0: From noreply at buildbot.pypy.org Tue Oct 20 09:18:06 2015 From: noreply at buildbot.pypy.org (__item4__) Date: Tue, 20 Oct 2015 09:18:06 +0200 (CEST) Subject: [pypy-commit] pypy osx-libffi: Use pkg-config info for osx user Message-ID: <20151020071806.3C6B21C0036@cobra.cs.uni-duesseldorf.de> Author: Kim Jin Su Branch: osx-libffi Changeset: r80345:bb49df282132 Date: 2015-10-20 00:26 +0900 http://bitbucket.org/pypy/pypy/changeset/bb49df282132/ Log: Use pkg-config info for osx user I think no one install libffi in old path. I installed libffi by brew and encountered libffi issue. diff --git a/rpython/translator/platform/darwin.py b/rpython/translator/platform/darwin.py --- a/rpython/translator/platform/darwin.py +++ b/rpython/translator/platform/darwin.py @@ -34,10 +34,14 @@ + args) def _include_dirs_for_libffi(self): - return ['/usr/include/ffi'] + return self._pkg_config("libffi", "--cflags-only-I", + ['/usr/include/libffi'], + check_result_dir=True) def _library_dirs_for_libffi(self): - return ['/usr/lib'] + return self._pkg_config("libffi", "--libs-only-L", + ['/usr/lib/libffi'], + check_result_dir=True) def _frameworks(self, frameworks): args = [] From noreply at buildbot.pypy.org Tue Oct 20 09:18:08 2015 From: noreply at buildbot.pypy.org (__item4__) Date: Tue, 20 Oct 2015 09:18:08 +0200 (CEST) Subject: [pypy-commit] pypy osx-libffi: change default path for non-brew user Message-ID: <20151020071808.53F6D1C00E2@cobra.cs.uni-duesseldorf.de> Author: Kim Jin Su Branch: osx-libffi Changeset: r80346:7392123860f3 Date: 2015-10-20 01:52 +0900 http://bitbucket.org/pypy/pypy/changeset/7392123860f3/ Log: change default path for non-brew user diff --git a/rpython/translator/platform/darwin.py b/rpython/translator/platform/darwin.py --- a/rpython/translator/platform/darwin.py +++ b/rpython/translator/platform/darwin.py @@ -35,12 +35,12 @@ def _include_dirs_for_libffi(self): return self._pkg_config("libffi", "--cflags-only-I", - ['/usr/include/libffi'], + ['/usr/include/ffi'], check_result_dir=True) def _library_dirs_for_libffi(self): return self._pkg_config("libffi", "--libs-only-L", - ['/usr/lib/libffi'], + ['/usr/lib'], check_result_dir=True) def _frameworks(self, frameworks): From noreply at buildbot.pypy.org Tue Oct 20 09:18:10 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Oct 2015 09:18:10 +0200 (CEST) Subject: [pypy-commit] pypy default: Merged in __item4__/pypy/osx-libffi (pull request #346) Message-ID: <20151020071810.68E781C06F2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80347:0e33d5f31303 Date: 2015-10-20 09:18 +0200 http://bitbucket.org/pypy/pypy/changeset/0e33d5f31303/ Log: Merged in __item4__/pypy/osx-libffi (pull request #346) Use pkg-config info for osx user and libffi diff --git a/rpython/translator/platform/darwin.py b/rpython/translator/platform/darwin.py --- a/rpython/translator/platform/darwin.py +++ b/rpython/translator/platform/darwin.py @@ -34,10 +34,14 @@ + args) def _include_dirs_for_libffi(self): - return ['/usr/include/ffi'] + return self._pkg_config("libffi", "--cflags-only-I", + ['/usr/include/ffi'], + check_result_dir=True) def _library_dirs_for_libffi(self): - return ['/usr/lib'] + return self._pkg_config("libffi", "--libs-only-L", + ['/usr/lib'], + check_result_dir=True) def _frameworks(self, frameworks): args = [] From noreply at buildbot.pypy.org Tue Oct 20 10:01:23 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Oct 2015 10:01:23 +0200 (CEST) Subject: [pypy-commit] cffi default: Issue #225: Ignore basic SAL annotations on Windows. Message-ID: <20151020080123.0A6F91C0036@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2352:fbe55ed7e5e2 Date: 2015-10-20 10:02 +0200 http://bitbucket.org/cffi/cffi/changeset/fbe55ed7e5e2/ Log: Issue #225: Ignore basic SAL annotations on Windows. diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -29,6 +29,8 @@ _r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") _r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") _r_cdecl = re.compile(r"\b__cdecl\b") +_r_SAL = re.compile(r"([(,]\s*)(_In_|_Inout_|_Out_|_Outptr_|" + r"_In_opt_|_Inout_opt_|_Out_opt_|_Outptr_opt_)\b") def _get_parser(): global _parser_cache @@ -55,6 +57,8 @@ csource = _r_stdcall2.sub(' volatile volatile const(', csource) csource = _r_stdcall1.sub(' volatile volatile const ', csource) csource = _r_cdecl.sub(' ', csource) + if sys.platform == 'win32': + csource = _r_SAL.sub(r'\1 ', csource) # Replace "[...]" with "[__dotdotdotarray__]" csource = _r_partial_array.sub('[__dotdotdotarray__]', csource) # Replace "...}" with "__dotdotdotNUM__}". This construction should diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -32,6 +32,12 @@ which had unwanted side-effects. Try saying ``import setuptools`` first, which patches distutils... +* Windows: basic SAL annotations can be given in the cdef() and are + ignored. More precisely, ``_In_``, ``_Inout_``, ``_Out_``, + ``_Outptr_``, ``In_opt_``, ``_Inout_opt_``, ``_Out_opt_`` and + ``_Outptr_opt_`` are ignored if they are following a ``(`` or a + ``,`` (which is where function parameters are). + .. _`ffi.memmove()`: using.html#memmove .. __: https://bugs.python.org/issue23246 .. __: https://bitbucket.org/cffi/cffi/pull-requests/65/remove-_hack_at_distutils-which-imports/diff diff --git a/testing/cffi0/test_parsing.py b/testing/cffi0/test_parsing.py --- a/testing/cffi0/test_parsing.py +++ b/testing/cffi0/test_parsing.py @@ -384,3 +384,10 @@ "" % (stdcall, stdcall)) + +def test_basic_SAL_annotations_on_windows(): + if sys.platform != 'win32': + py.test.skip("Only for Windows") + ffi = FFI() + tp = ffi.typeof("int(*)(_In_ int *abc, _Out_opt_ int *bcd)") + assert str(tp) == "" From noreply at buildbot.pypy.org Tue Oct 20 10:55:08 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Tue, 20 Oct 2015 10:55:08 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: split up the code builder into several files, branch relative shifts a zero value on the LSB before calculating the address, this case is now correctly handled Message-ID: <20151020085508.121AC1C00E2@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80348:e505fe63db6c Date: 2015-10-20 10:55 +0200 http://bitbucket.org/pypy/pypy/changeset/e505fe63db6c/ Log: split up the code builder into several files, branch relative shifts a zero value on the LSB before calculating the address, this case is now correctly handled diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -1,6 +1,7 @@ from rpython.jit.backend.zarch import conditions as cond from rpython.jit.backend.zarch import registers as reg from rpython.jit.backend.zarch import locations as loc +from rpython.jit.backend.zarch.instruction_builder import build_instr_codes from rpython.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.unroll import unrolling_iterable @@ -29,305 +30,6 @@ class Operand(object): pass -class builder(object): - """ NOT_RPYTHON """ - @staticmethod - def arguments(args_str): - """ NOT_RPYTHON """ - """ - Available names: - r - register - r/m - register or mask - iX - immediate X bits (signed) - uX - immediate X bits (unsigend) - bd - base displacement (12 bit) - bdl - base displacement long (20 bit) - bid - index base displacement - bidl - index base displacement (20 bit) - l4bd - length base displacement (4 bit) - l8bd - length base displacement (8 bit) - - note that a suffix 'l' means long, and a prefix length - """ - def impl(func): - func._arguments_ = args_str.split(',') - return func - return impl - -BIT_MASK_4 = 0xF -BIT_MASK_12 = 0xFFF -BIT_MASK_16 = 0xFFFF -BIT_MASK_20 = 0xFFFFF -BIT_MASK_32 = 0xFFFFFFFF - - at always_inline -def encode_base_displace(mc, base_displace): - """ - +---------------------------------+ - | ... | base | length[0:11] | ... | - +---------------------------------+ - """ - displace = base_displace.displace - base = base_displace.base & 0xf - byte = (displace >> 8 & 0xf) | base << 4 - mc.writechar(chr(byte)) - mc.writechar(chr(displace & 0xff)) - - at always_inline -def encode_base_displace_long(mc, basedisp): - """ - +-------------------------------------------------+ - | ... | base | length[0:11] | length[12:20] | ... | - +-------------------------------------------------+ - """ - displace = basedisp.displace & 0xfffff - base = basedisp.base & 0xf - byte = displace >> 8 & 0xf | base << 4 - mc.writechar(chr(byte)) - mc.writechar(chr(displace & 0xff)) - byte = displace >> 12 & 0xff - mc.writechar(chr(byte)) - -def build_rr(mnemonic, (opcode,)): - @builder.arguments('r,r') - def encode_rr(self, reg1, reg2): - self.writechar(opcode) - operands = ((reg1 & 0x0f) << 4) | (reg2 & 0xf) - self.writechar(chr(operands)) - return encode_rr - -def build_rre(mnemonic, (opcode1,opcode2)): - @builder.arguments('r,r') - def encode_rr(self, reg1, reg2): - self.writechar(opcode1) - self.writechar(opcode2) - self.writechar('\x00') - operands = ((reg1 & 0x0f) << 4) | (reg2 & 0xf) - self.writechar(chr(operands)) - return encode_rr - -def build_rx(mnemonic, (opcode,)): - @builder.arguments('r/m,bid') - def encode_rx(self, reg_or_mask, idxbasedisp): - self.writechar(opcode) - index = idxbasedisp.index - byte = (reg_or_mask & 0x0f) << 4 | index & 0xf - self.writechar(chr(byte)) - displace = idxbasedisp.displace & BIT_MASK_12 - base = idxbasedisp.base & 0xf - byte = displace >> 8 & 0xf | base << 4 - self.writechar(chr(byte)) - self.writechar(chr(displace & 0xff)) - return encode_rx - -def build_rxy(mnemonic, (opcode1,opcode2)): - @builder.arguments('r/m,bidl') - def encode_rxy(self, reg_or_mask, idxbasedisp): - self.writechar(opcode1) - index = idxbasedisp.index - byte = (reg_or_mask & 0x0f) << 4 | index & 0xf - self.writechar(chr(byte)) - encode_base_displace_long(self, idxbasedisp) - self.writechar(opcode2) - return encode_rxy - -def build_ri(mnemonic, (opcode,halfopcode)): - @builder.arguments('r/m,i16') - def encode_ri(self, reg_or_mask, imm16): - self.writechar(opcode) - byte = (reg_or_mask & 0xf) << 4 | (ord(halfopcode) & 0xf) - self.writechar(chr(byte)) - self.writechar(chr(imm16 >> 8 & 0xff)) - self.writechar(chr(imm16 & 0xff)) - return encode_ri - -def build_ril(mnemonic, (opcode,halfopcode)): - @builder.arguments('r/m,i32') - def encode_ri(self, reg_or_mask, imm32): - self.writechar(opcode) - byte = (reg_or_mask & 0xf) << 4 | (ord(halfopcode) & 0xf) - self.writechar(chr(byte)) - # half word boundary, addressing bytes - self.write_i32(imm32 >> 1 & BIT_MASK_32) - return encode_ri - - -def build_si(mnemonic, (opcode,)): - @builder.arguments('bd,u8') - def encode_si(self, base_displace, uimm8): - self.writechar(opcode) - self.writechar(chr(uimm8)) - encode_base_displace(self, base_displace) - return encode_si - -def build_siy(mnemonic, (opcode1,opcode2)): - @builder.arguments('bd,u8') - def encode_siy(self, base_displace, uimm8): - self.writechar(opcode1) - self.writechar(chr(uimm8)) - encode_base_displace(self, base_displace) - displace = base_displace.displace - self.writechar(chr(displace >> 12 & 0xff)) - self.writechar(opcode2) - return encode_siy - -def build_ssa(mnemonic, (opcode1,)): - @builder.arguments('l8bd,bd') - def encode_ssa(self, len_base_disp, base_displace): - self.writechar(opcode1) - self.writechar(chr(len_base_disp.length & 0xff)) - encode_base_displace(self, len_base_disp) - encode_base_displace(self, base_displace) - return encode_ssa - -def build_ssb(mnemonic, (opcode1,)): - @builder.arguments('l8bd,l8bd') - def encode_ssb(self, len_base_disp1, len_base_disp2): - self.writechar(opcode1) - byte = (len_base_disp1.length & 0xf) << 4 | len_base_disp2.length & 0xf - self.writechar(chr(byte)) - encode_base_displace(self, len_base_disp1) - encode_base_displace(self, len_base_disp2) - return encode_ssb - -def build_ssc(mnemonic, (opcode1,)): - @builder.arguments('l4bd,bd,u4') - def encode_ssc(self, len_base_disp, base_disp, uimm4): - self.writechar(opcode1) - byte = (len_base_disp.length & 0xf) << 4 | uimm4 & 0xf - self.writechar(chr(byte)) - encode_base_displace(self, len_base_disp) - encode_base_displace(self, base_disp) - return encode_ssc - -def build_ssd(mnemonic, (opcode,)): - @builder.arguments('bid,bd,r') - def encode_ssd(self, index_base_disp, base_disp, reg): - self.writechar(opcode) - byte = (index_base_disp.index & 0xf) << 4 | reg & 0xf - self.writechar(chr(byte)) - encode_base_displace(self, index_base_disp) - encode_base_displace(self, base_disp) - return encode_ssd - -def build_sse(mnemonic, (opcode,)): - @builder.arguments('r,r,bd,bd') - def encode_sse(self, reg1, reg3, base_disp2, base_disp4): - self.writechar(opcode) - byte = (reg1 & BIT_MASK_4) << 4 | reg3 & BIT_MASK_4 - self.writechar(chr(byte)) - encode_base_displace(self, base_disp2) - encode_base_displace(self, base_disp4) - return encode_sse - -def build_ssf(mnemonic, (opcode,)): - @builder.arguments('bd,l8bd') - def encode_ssf(self, base_disp, len_base_disp): - self.writechar(opcode) - self.writechar(chr(len_base_disp.length & 0xff)) - encode_base_displace(self, base_disp) - encode_base_displace(self, len_base_disp) - return encode_ssf - -def build_rs(mnemonic, (opcode,)): - @builder.arguments('r,r,bd') - def encode_rs(self, reg1, reg3, base_displace): - self.writechar(opcode) - self.writechar(chr((reg1 & BIT_MASK_4) << 4 | reg3 & BIT_MASK_4)) - encode_base_displace(self, base_displace) - return encode_rs - -def build_rsy(mnemonic, (opcode1,opcode2)): - @builder.arguments('r,r,bdl') - def encode_ssa(self, reg1, reg3, base_displace): - self.writechar(opcode1) - self.writechar(chr((reg1 & BIT_MASK_4) << 4 | reg3 & BIT_MASK_4)) - encode_base_displace_long(self, base_displace) - self.writechar(opcode2) - return encode_ssa - -def build_rsi(mnemonic, (opcode,)): - @builder.arguments('r,r,i16') - def encode_ri(self, reg1, reg2, imm16): - self.writechar(opcode) - byte = (reg1 & BIT_MASK_4) << 4 | (reg2 & BIT_MASK_4) - self.writechar(chr(byte)) - self.write_i16(imm16 >> 1 & BIT_MASK_16) - return encode_ri - -def build_rie(mnemonic, (opcode1,opcode2)): - @builder.arguments('r,r,i16') - def encode_ri(self, reg1, reg2, imm16): - self.writechar(opcode1) - byte = (reg1 & BIT_MASK_4) << 4 | (reg2 & BIT_MASK_4) - self.writechar(chr(byte)) - self.write_i16(imm16 >> 1 & BIT_MASK_16) - self.writechar(chr(0x0)) - self.writechar(opcode2) - return encode_ri - -_mnemonic_codes = { - 'AR': (build_rr, ['\x1A']), - 'AGR': (build_rre, ['\xB9','\x08']), - 'AGFR': (build_rre, ['\xB9','\x18']), - 'A': (build_rx, ['\x5A']), - 'AY': (build_rxy, ['\xE3','\x5A']), - 'AG': (build_rxy, ['\xE3','\x08']), - 'AGF': (build_rxy, ['\xE3','\x18']), - 'AHI': (build_ri, ['\xA7','\x0A']), - # - 'BRASL': (build_ril, ['\xC0','\x05']), - 'BXH': (build_rs, ['\x86']), - 'BXHG': (build_rsy, ['\xEB','\x44']), - 'BRXH': (build_rsi, ['\x84']), - 'BRXLG': (build_rie, ['\xEC','\x45']), - 'BCR': (build_rr, ['\x07']), - 'BC': (build_rx, ['\x47']), - 'BRC': (build_ri, ['\xA7','\x04']), - 'BRCL': (build_ril, ['\xC0','\x04']), - # - 'NI': (build_si, ['\x94']), - 'NIY': (build_siy, ['\xEB','\x54']), - 'NC': (build_ssa, ['\xD4']), - 'AP': (build_ssb, ['\xFA']), - 'SRP': (build_ssc, ['\xF0']), - 'MVCK': (build_ssd, ['\xD9']), - - 'LAY': (build_rxy, ['\xE3','\x71']), - 'LMD': (build_sse, ['\xEF']), - 'LMG': (build_rsy, ['\xEB','\x04']), - 'LGHI': (build_ri, ['\xA7','\x09']), - 'LR': (build_rr, ['\x18']), - 'LGR': (build_rre, ['\xB9','\x04']), - - 'PKA': (build_ssf, ['\xE9']), - 'STMG': (build_rsy, ['\xEB','\x24']), - 'SR': (build_rr, ['\x1B']), - 'SGR': (build_rre, ['\xB9','\x09']), -} - -def build_unpack_func(mnemonic, func): - def function(self, *args): - newargs = [None] * len(args) - for i,arg in enumerate(unrolling_iterable(func._arguments_)): - if arg == 'r' or arg == 'r/m': - newargs[i] = args[i].value - elif arg.startswith('i') or arg.startswith('u'): - newargs[i] = args[i].value - else: - newargs[i] = args[i] - return func(self, *newargs) - function.__name__ = mnemonic - return function - -def build_instr_codes(clazz): - for mnemonic, (builder, args) in _mnemonic_codes.items(): - func = builder(mnemonic, args) - instrtype = builder.__name__.split("_")[1] - name = mnemonic + "_" + instrtype - setattr(clazz, name, func) - setattr(clazz, mnemonic, build_unpack_func(mnemonic, func)) - class AbstractZARCHBuilder(object): def write_i32(self, word): self.writechar(chr((word >> 24) & 0xFF)) @@ -386,4 +88,4 @@ # Used to build the MachineCodeBlockWrapper all_instructions = sorted([name for cls in _classes for name in cls.__dict__ \ - if name.split('_')[0].isupper()]) + if name.split('_')[0].isupper() and '_' in name]) diff --git a/rpython/jit/backend/zarch/instruction_builder.py b/rpython/jit/backend/zarch/instruction_builder.py --- a/rpython/jit/backend/zarch/instruction_builder.py +++ b/rpython/jit/backend/zarch/instruction_builder.py @@ -0,0 +1,278 @@ +from rpython.jit.backend.zarch.instructions import (all_mnemonic_codes, + arith_mnemic_codes, branch_mnemoic_codes) +from rpython.rtyper.lltypesystem.rbuilder import always_inline + + +class builder(object): + """ NOT_RPYTHON """ + @staticmethod + def arguments(args_str): + """ NOT_RPYTHON """ + """ + Available names: + r - register + r/m - register or mask + iX - immediate X bits (signed) + uX - immediate X bits (unsigend) + bd - base displacement (12 bit) + bdl - base displacement long (20 bit) + bid - index base displacement + bidl - index base displacement (20 bit) + l4bd - length base displacement (4 bit) + l8bd - length base displacement (8 bit) + + note that a suffix 'l' means long, and a prefix length + """ + def impl(func): + func._arguments_ = args_str.split(',') + return func + return impl + +BIT_MASK_4 = 0xF +BIT_MASK_12 = 0xFFF +BIT_MASK_16 = 0xFFFF +BIT_MASK_20 = 0xFFFFF +BIT_MASK_32 = 0xFFFFFFFF + + at always_inline +def encode_base_displace(mc, base_displace): + """ + +---------------------------------+ + | ... | base | length[0:11] | ... | + +---------------------------------+ + """ + displace = base_displace.displace + base = base_displace.base & 0xf + byte = (displace >> 8 & 0xf) | base << 4 + mc.writechar(chr(byte)) + mc.writechar(chr(displace & 0xff)) + + at always_inline +def encode_base_displace_long(mc, basedisp): + """ + +-------------------------------------------------+ + | ... | base | length[0:11] | length[12:20] | ... | + +-------------------------------------------------+ + """ + displace = basedisp.displace & 0xfffff + base = basedisp.base & 0xf + byte = displace >> 8 & 0xf | base << 4 + mc.writechar(chr(byte)) + mc.writechar(chr(displace & 0xff)) + byte = displace >> 12 & 0xff + mc.writechar(chr(byte)) + +def build_rr(mnemonic, (opcode,)): + @builder.arguments('r,r') + def encode_rr(self, reg1, reg2): + self.writechar(opcode) + operands = ((reg1 & 0x0f) << 4) | (reg2 & 0xf) + self.writechar(chr(operands)) + return encode_rr + +def build_rre(mnemonic, (opcode1,opcode2)): + @builder.arguments('r,r') + def encode_rr(self, reg1, reg2): + self.writechar(opcode1) + self.writechar(opcode2) + self.writechar('\x00') + operands = ((reg1 & 0x0f) << 4) | (reg2 & 0xf) + self.writechar(chr(operands)) + return encode_rr + +def build_rx(mnemonic, (opcode,)): + @builder.arguments('r/m,bid') + def encode_rx(self, reg_or_mask, idxbasedisp): + self.writechar(opcode) + index = idxbasedisp.index + byte = (reg_or_mask & 0x0f) << 4 | index & 0xf + self.writechar(chr(byte)) + displace = idxbasedisp.displace & BIT_MASK_12 + base = idxbasedisp.base & 0xf + byte = displace >> 8 & 0xf | base << 4 + self.writechar(chr(byte)) + self.writechar(chr(displace & 0xff)) + return encode_rx + +def build_rxy(mnemonic, (opcode1,opcode2)): + @builder.arguments('r/m,bidl') + def encode_rxy(self, reg_or_mask, idxbasedisp): + self.writechar(opcode1) + index = idxbasedisp.index + byte = (reg_or_mask & 0x0f) << 4 | index & 0xf + self.writechar(chr(byte)) + encode_base_displace_long(self, idxbasedisp) + self.writechar(opcode2) + return encode_rxy + +def build_ri(mnemonic, (opcode,halfopcode)): + br = is_branch_relative(mnemonic) + @builder.arguments('r/m,i16') + def encode_ri(self, reg_or_mask, imm16): + self.writechar(opcode) + byte = (reg_or_mask & 0xf) << 4 | (ord(halfopcode) & 0xf) + self.writechar(chr(byte)) + if br: + imm16 = imm16 >> 1 + self.writechar(chr(imm16 >> 8 & 0xff)) + self.writechar(chr(imm16 & 0xff)) + return encode_ri + +def build_ril(mnemonic, (opcode,halfopcode)): + br = is_branch_relative(mnemonic) + @builder.arguments('r/m,i32') + def encode_ri(self, reg_or_mask, imm32): + self.writechar(opcode) + byte = (reg_or_mask & 0xf) << 4 | (ord(halfopcode) & 0xf) + self.writechar(chr(byte)) + if br: + imm32 = imm32 >> 1 + # half word boundary, addressing bytes + self.write_i32(imm32 & BIT_MASK_32) + return encode_ri + + +def build_si(mnemonic, (opcode,)): + @builder.arguments('bd,u8') + def encode_si(self, base_displace, uimm8): + self.writechar(opcode) + self.writechar(chr(uimm8)) + encode_base_displace(self, base_displace) + return encode_si + +def build_siy(mnemonic, (opcode1,opcode2)): + @builder.arguments('bd,u8') + def encode_siy(self, base_displace, uimm8): + self.writechar(opcode1) + self.writechar(chr(uimm8)) + encode_base_displace(self, base_displace) + displace = base_displace.displace + self.writechar(chr(displace >> 12 & 0xff)) + self.writechar(opcode2) + return encode_siy + +def build_ssa(mnemonic, (opcode1,)): + @builder.arguments('l8bd,bd') + def encode_ssa(self, len_base_disp, base_displace): + self.writechar(opcode1) + self.writechar(chr(len_base_disp.length & 0xff)) + encode_base_displace(self, len_base_disp) + encode_base_displace(self, base_displace) + return encode_ssa + +def build_ssb(mnemonic, (opcode1,)): + @builder.arguments('l8bd,l8bd') + def encode_ssb(self, len_base_disp1, len_base_disp2): + self.writechar(opcode1) + byte = (len_base_disp1.length & 0xf) << 4 | len_base_disp2.length & 0xf + self.writechar(chr(byte)) + encode_base_displace(self, len_base_disp1) + encode_base_displace(self, len_base_disp2) + return encode_ssb + +def build_ssc(mnemonic, (opcode1,)): + @builder.arguments('l4bd,bd,u4') + def encode_ssc(self, len_base_disp, base_disp, uimm4): + self.writechar(opcode1) + byte = (len_base_disp.length & 0xf) << 4 | uimm4 & 0xf + self.writechar(chr(byte)) + encode_base_displace(self, len_base_disp) + encode_base_displace(self, base_disp) + return encode_ssc + +def build_ssd(mnemonic, (opcode,)): + @builder.arguments('bid,bd,r') + def encode_ssd(self, index_base_disp, base_disp, reg): + self.writechar(opcode) + byte = (index_base_disp.index & 0xf) << 4 | reg & 0xf + self.writechar(chr(byte)) + encode_base_displace(self, index_base_disp) + encode_base_displace(self, base_disp) + return encode_ssd + +def build_sse(mnemonic, (opcode,)): + @builder.arguments('r,r,bd,bd') + def encode_sse(self, reg1, reg3, base_disp2, base_disp4): + self.writechar(opcode) + byte = (reg1 & BIT_MASK_4) << 4 | reg3 & BIT_MASK_4 + self.writechar(chr(byte)) + encode_base_displace(self, base_disp2) + encode_base_displace(self, base_disp4) + return encode_sse + +def build_ssf(mnemonic, (opcode,)): + @builder.arguments('bd,l8bd') + def encode_ssf(self, base_disp, len_base_disp): + self.writechar(opcode) + self.writechar(chr(len_base_disp.length & 0xff)) + encode_base_displace(self, base_disp) + encode_base_displace(self, len_base_disp) + return encode_ssf + +def build_rs(mnemonic, (opcode,)): + @builder.arguments('r,r,bd') + def encode_rs(self, reg1, reg3, base_displace): + self.writechar(opcode) + self.writechar(chr((reg1 & BIT_MASK_4) << 4 | reg3 & BIT_MASK_4)) + encode_base_displace(self, base_displace) + return encode_rs + +def build_rsy(mnemonic, (opcode1,opcode2)): + @builder.arguments('r,r,bdl') + def encode_ssa(self, reg1, reg3, base_displace): + self.writechar(opcode1) + self.writechar(chr((reg1 & BIT_MASK_4) << 4 | reg3 & BIT_MASK_4)) + encode_base_displace_long(self, base_displace) + self.writechar(opcode2) + return encode_ssa + +def build_rsi(mnemonic, (opcode,)): + br = is_branch_relative(mnemonic) + @builder.arguments('r,r,i16') + def encode_ri(self, reg1, reg2, imm16): + self.writechar(opcode) + byte = (reg1 & BIT_MASK_4) << 4 | (reg2 & BIT_MASK_4) + self.writechar(chr(byte)) + if br: + imm16 = imm16 >> 1 + self.write_i16(imm16 & BIT_MASK_16) + return encode_ri + +def build_rie(mnemonic, (opcode1,opcode2)): + br = is_branch_relative(mnemonic) + @builder.arguments('r,r,i16') + def encode_ri(self, reg1, reg2, imm16): + self.writechar(opcode1) + byte = (reg1 & BIT_MASK_4) << 4 | (reg2 & BIT_MASK_4) + self.writechar(chr(byte)) + if br: + imm16 = imm16 >> 1 + self.write_i16(imm16 & BIT_MASK_16) + self.writechar(chr(0x0)) + self.writechar(opcode2) + return encode_ri + +def build_unpack_func(mnemonic, func): + def function(self, *args): + newargs = [None] * len(args) + for i,arg in enumerate(unrolling_iterable(func._arguments_)): + if arg == 'r' or arg == 'r/m': + newargs[i] = args[i].value + elif arg.startswith('i') or arg.startswith('u'): + newargs[i] = args[i].value + else: + newargs[i] = args[i] + return func(self, *newargs) + function.__name__ = mnemonic + return function + +def is_branch_relative(name): + return name.startswith('BR') + +def build_instr_codes(clazz): + for mnemonic, (instrtype, args) in all_mnemonic_codes.items(): + builder = globals()['build_' + instrtype] + func = builder(mnemonic, args) + name = mnemonic + "_" + instrtype + setattr(clazz, name, func) + setattr(clazz, mnemonic, build_unpack_func(mnemonic, func)) diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/instructions.py @@ -0,0 +1,49 @@ + +branch_mnemoic_codes = { + 'BRASL': ('ril', ['\xC0','\x05']), + 'BCR': ('rr', ['\x07']), + 'BC': ('rx', ['\x47']), + 'BRC': ('ri', ['\xA7','\x04']), + 'BRCL': ('ril', ['\xC0','\x04']), +} + +arith_mnemic_codes = { + 'AR': ('rr', ['\x1A']), + 'AGR': ('rre', ['\xB9','\x08']), + 'AGFR': ('rre', ['\xB9','\x18']), + 'A': ('rx', ['\x5A']), + 'SR': ('rr', ['\x1B']), + 'SGR': ('rre', ['\xB9','\x09']), +} + +all_mnemonic_codes = { + 'AY': ('rxy', ['\xE3','\x5A']), + 'AG': ('rxy', ['\xE3','\x08']), + 'AGF': ('rxy', ['\xE3','\x18']), + 'AHI': ('ri', ['\xA7','\x0A']), + # + 'BXH': ('rs', ['\x86']), + 'BXHG': ('rsy', ['\xEB','\x44']), + 'BRXH': ('rsi', ['\x84']), + 'BRXLG': ('rie', ['\xEC','\x45']), + # + 'NI': ('si', ['\x94']), + 'NIY': ('siy', ['\xEB','\x54']), + 'NC': ('ssa', ['\xD4']), + 'AP': ('ssb', ['\xFA']), + 'SRP': ('ssc', ['\xF0']), + 'MVCK': ('ssd', ['\xD9']), + + 'LAY': ('rxy', ['\xE3','\x71']), + 'LMD': ('sse', ['\xEF']), + 'LMG': ('rsy', ['\xEB','\x04']), + 'LGHI': ('ri', ['\xA7','\x09']), + 'LR': ('rr', ['\x18']), + 'LGR': ('rre', ['\xB9','\x04']), + + 'PKA': ('ssf', ['\xE9']), + 'STMG': ('rsy', ['\xEB','\x24']), +} +all_mnemonic_codes.update(arith_mnemic_codes) +all_mnemonic_codes.update(branch_mnemoic_codes) + diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -68,7 +68,7 @@ L1 = self.a.mc.get_relative_pos() self.a.mc.SGR(reg.r3, reg.r4) LJ = self.a.mc.get_relative_pos() - self.a.mc.BRCL(loc.imm(0x2), loc.imm(L1-LJ)) + self.a.mc.BRCL(con.GT, loc.imm(L1-LJ)) self.a.mc.LGR(reg.r2, reg.r3) self.a.jmpto(reg.r14) assert run_asm(self.a) == 0 diff --git a/rpython/jit/backend/zarch/test/test_auto_encoding.py b/rpython/jit/backend/zarch/test/test_auto_encoding.py --- a/rpython/jit/backend/zarch/test/test_auto_encoding.py +++ b/rpython/jit/backend/zarch/test/test_auto_encoding.py @@ -102,7 +102,7 @@ __repr__ = __str__ -def test_range(bits, signed=False, count=24): +def range_of_bits(bits, signed=False, count=24): if isinstance(bits, tuple): bits, signed = bits if signed: @@ -113,7 +113,7 @@ return [0,1,maximum-1] + [random.randrange(0,maximum) for i in range(count)] def build_fake(clazz, *arg_bits): - possibilities = itertools.product(*[test_range(b) for b in arg_bits]) + possibilities = itertools.product(*[range_of_bits(b) for b in arg_bits]) results = [] i = 0 for args in possibilities: @@ -128,16 +128,16 @@ TEST_CASE_GENERATE = { 'r': REGS, 'r/m': REGS, - 'i4': test_range(4, signed=True), - 'i8': test_range(8, signed=True), - 'i16': test_range(16, signed=True), - 'i32': test_range(32, signed=True), - 'i64': test_range(64, signed=True), - 'u4': test_range(4), - 'u8': test_range(8), - 'u16': test_range(16), - 'u32': test_range(32), - 'u64': test_range(64), + 'i4': range_of_bits(4, signed=True), + 'i8': range_of_bits(8, signed=True), + 'i16': range_of_bits(16, signed=True), + 'i32': range_of_bits(32, signed=True), + 'i64': range_of_bits(64, signed=True), + 'u4': range_of_bits(4), + 'u8': range_of_bits(8), + 'u16': range_of_bits(16), + 'u32': range_of_bits(32), + 'u64': range_of_bits(64), 'bd': build_fake(FakeBaseDisplace,4,12), 'bdl': build_fake(FakeBaseDisplace,4,19), 'bid': build_fake(FakeIndexBaseDisplace,4,4,12), @@ -154,7 +154,6 @@ def get_func_arg_types(self, methodname): from rpython.jit.backend.zarch.codebuilder import AbstractZARCHBuilder - import inspect func = getattr(AbstractZARCHBuilder, methodname) return func._arguments_ From noreply at buildbot.pypy.org Tue Oct 20 11:42:28 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Oct 2015 11:42:28 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: slowly-in-progress Message-ID: <20151020094228.6F98A1C00E2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80349:8294b8657c71 Date: 2015-10-20 11:42 +0200 http://bitbucket.org/pypy/pypy/changeset/8294b8657c71/ Log: slowly-in-progress diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -681,9 +681,11 @@ print 'Fatal error in cpyext, CPython compatibility layer, calling', callable.__name__ print 'Either report a bug or consider not using this particular extension' if not we_are_translated(): + tb = sys.exc_info()[2] import traceback traceback.print_exc() - print str(e) + if sys.stdout == sys.__stdout__: + import pdb; pdb.post_mortem(tb) # we can't do much here, since we're in ctypes, swallow else: print str(e) @@ -728,9 +730,9 @@ compilation_info=eci, _nowrapper=True) def init_types(space): from pypy.module.cpyext.typeobject import py_type_ready - #py_type_ready(space, get_buffer_type()) ZZZ - #py_type_ready(space, get_cobject_type()) ZZZ - #py_type_ready(space, get_capsule_type()) ZZZ + py_type_ready(space, get_buffer_type()) + py_type_ready(space, get_cobject_type()) + py_type_ready(space, get_capsule_type()) INIT_FUNCTIONS.append(init_types) from pypy.module.posix.interp_posix import add_fork_hook reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void, diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py --- a/pypy/module/cpyext/intobject.py +++ b/pypy/module/cpyext/intobject.py @@ -23,7 +23,7 @@ setup_class_for_cpyext(W_AbstractIntObject, basestruct=PyIntObject.TO, fill_pyobj=int_fill_pyobj, - realize=int_realize, + fill_pypy=int_fill_pypy, realize_subclass_of=W_IntObject) def int_fill_pyobj(space, w_obj, py_int): @@ -33,8 +33,12 @@ """ py_int.c_ob_ival = space.int_w(w_obj) -def int_realize(space, w_obj, py_obj): - intval = rffi.cast(lltype.Signed, rffi.cast(PyIntObject, py_obj).c_ob_ival) +def int_fill_pypy(space, w_obj, py_obj): + """ + Fills a W_IntObject from a PyIntObject. + """ + py_int = rffi.cast(PyIntObject, py_obj) + intval = rffi.cast(lltype.Signed, py_int.c_ob_ival) W_IntObject.__init__(w_obj, intval) PyInt_Check, PyInt_CheckExact = build_type_checkers("Int") diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -12,7 +12,7 @@ METH_STATIC, METH_VARARGS, PyObject, PyObjectFields, bootstrap_function, build_type_checkers, cpython_api, cpython_struct, generic_cpy_call) from pypy.module.cpyext.pyobject import ( - Py_DecRef, from_ref, make_ref, make_typedescr) + Py_DecRef, from_pyobj, make_ref, make_typedescr) PyCFunction_typedef = rffi.COpaquePtr(typedef='PyCFunction') PyCFunction = lltype.Ptr(lltype.FuncType([PyObject, PyObject], PyObject)) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -142,16 +142,18 @@ """NOT_RPYTHON basestruct: The basic structure to allocate - alloc_pyobj: default create_pyobj calls this to get the PyObject - fill_pyobj: default create_pyobj calls this after attaching is done - realize : Function called to create a pypy object from a PyObject + alloc_pyobj: function called to get the PyObject + fill_pyobj: called to fill the PyObject after attaching is done + alloc_pypy: function called to create a PyPy object from a PyObject + fill_pypy: called to fill the PyPy object after attaching is done dealloc : a cpython_api(external=False), similar to PyObject_dealloc """ tp_basestruct = kw.pop('basestruct', PyObject.TO) tp_alloc_pyobj = kw.pop('alloc_pyobj', None) tp_fill_pyobj = kw.pop('fill_pyobj', None) - tp_realize = kw.pop('realize', None) + tp_alloc_pypy = kw.pop('alloc_pypy', None) + tp_fill_pypy = kw.pop('fill_pypy', None) force_create_pyobj = kw.pop('force_create_pyobj', False) realize_subclass_of = kw.pop('realize_subclass_of', None) #tp_dealloc = kw.pop('dealloc', None) @@ -173,10 +175,10 @@ pass # def cpyext_create_pyobj(self, space): - py_obj, light = tp_alloc_pyobj(space, self) + py_obj, is_light = tp_alloc_pyobj(space, self) ob = rffi.cast(PyObject, py_obj) ob_type = get_c_ob_type(space, space.type(self)) - init_link_pypy(self, ob, ob_type, light) + init_link_from_pypy(self, ob, ob_type, is_light) tp_fill_pyobj(space, self, py_obj) return ob W_Class.cpyext_create_pyobj = cpyext_create_pyobj @@ -190,37 +192,56 @@ keepalive_until_here(self) W_Class.cpyext_fill_prebuilt_pyobj = cpyext_fill_prebuilt_pyobj - if tp_realize or realize_subclass_of: - W_CPyExtPlaceHolder = get_cpyextplaceholder_subclass( - realize_subclass_of or W_Class) - if tp_realize: - tp_realize._always_inline_ = True + if tp_alloc_pyobj or tp_fill_pyobj or realize_subclass_of: + if realize_subclass_of is None: + realize_subclass_of = W_Class + assert 'typedef' in realize_subclass_of.__dict__, ( + "no 'typedef' exactly on %s" % (realize_subclass_of,)) # - def cpyext_realize(space, pyobj): - w_obj = W_CPyExtPlaceHolder(pyobj) - if tp_realize: - tp_realize(space, w_obj, pyobj) + if not tp_alloc_pypy: + W_CPyExtPlaceHolder = get_cpyextplaceholder_subclass( + realize_subclass_of) + def tp_alloc_pypy(space, pyobj): + w_obj = W_CPyExtPlaceHolder(pyobj) + return w_obj, True + tp_alloc_pypy._always_inline_ = True + # + if not tp_fill_pypy: + def tp_fill_pypy(space, w_obj, pyobj): + pass + # + def cpyext_create_pypy(space, pyobj): + w_obj, is_transient = tp_alloc_pypy(space, pyobj) + init_link_from_pyobj(w_obj, pyobj, is_transient) + tp_fill_pypy(space, w_obj, pyobj) return w_obj # typedef = realize_subclass_of.typedef - assert not hasattr(typedef, 'cpyext_realize') - typedef.cpyext_realize = cpyext_realize + assert 'cpyext_create_pypy' not in typedef.__dict__ + typedef.cpyext_create_pypy = cpyext_create_pypy W_Class.cpyext_basestruct = tp_basestruct -def init_link_pypy(w_obj, ob, ob_type, light): - if light: +def init_link_from_pypy(w_obj, ob, ob_type, is_light): + if is_light: ob.c_ob_refcnt = rawrefcount.REFCNT_FROM_PYPY_LIGHT else: ob.c_ob_refcnt = rawrefcount.REFCNT_FROM_PYPY - ob.c_ob_pypy_link = 0 ob.c_ob_type = ob_type rawrefcount.create_link_pypy(w_obj, ob) +def init_link_from_pyobj(w_obj, ob, is_transient): + if is_transient: + rawrefcount.create_link_pyobj(w_obj, ob) + else: + rawrefcount.create_link_pypy(w_obj, ob) + ob.c_ob_refcnt += rawrefcount.REFCNT_FROM_PYPY + def setup_prebuilt_pyobj(w_obj, py_obj): assert lltype.typeOf(py_obj) == PyObject - init_link_pypy(w_obj, py_obj, lltype.nullptr(PyTypeObjectPtr.TO), False) + init_link_from_pypy(w_obj, py_obj, lltype.nullptr(PyTypeObjectPtr.TO), + False) if isinstance(w_obj, W_TypeObject): w_obj.cpyext_c_type_object = rffi.cast(PyTypeObjectPtr, py_obj) @@ -236,6 +257,9 @@ def init_pyobject(space): setup_class_for_cpyext(W_Root, force_create_pyobj=True, realize_subclass_of=W_ObjectObject) + # use this cpyext_create_pypy as the default for all other TypeDefs + from pypy.interpreter.typedef import TypeDef + TypeDef.cpyext_create_pypy = W_ObjectObject.typedef.cpyext_create_pypy #________________________________________________________ @@ -262,6 +286,8 @@ def cpyext_as_pyobj(self, space): return self.cpyext_pyobj + # ZZZ getclass(), getweakref(), etc.? like interpreter/typedef.py + W_CPyExtPlaceHolder.__name__ = W_Class.__name__ + '_CPyExtPlaceHolder' W_Class._cpyextplaceholder_subclass = W_CPyExtPlaceHolder return W_CPyExtPlaceHolder @@ -284,8 +310,8 @@ W_TypeObject._cpyextplaceholder_subclass = W_TypeObject def _create_w_obj_from_pyobj(space, pyobj): - w_type = from_pyobj(pyobj.c_ob_type) - return w_type.instancetypedef.cpyext_realize(space, pyobj) + w_type = from_pyobj(space, pyobj.c_ob_type) + return w_type.instancetypedef.cpyext_create_pypy(space, pyobj) #________________________________________________________ # refcounted object support diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py --- a/pypy/module/cpyext/state.py +++ b/pypy/module/cpyext/state.py @@ -82,8 +82,8 @@ if we_are_translated(): refcountstate = space.fromcache(RefcountState) refcountstate.init_r2w_from_w2r() + rawrefcount.init(lambda ob: ZZZ) - rawrefcount.init(lambda ob: ZZZ) for func in INIT_FUNCTIONS: func(space) self.check_and_raise_exception() diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -1,7 +1,7 @@ import os from rpython.rlib import jit -from rpython.rlib.objectmodel import specialize +from rpython.rlib.objectmodel import specialize, instantiate from rpython.rlib.rstring import rsplit from rpython.rtyper.annlowlevel import llhelper from rpython.rtyper.lltypesystem import rffi, lltype @@ -280,34 +280,13 @@ check_descr(space, w_self, self.w_type) PyMember_SetOne(space, w_self, self.member, w_value) -class W_PyCTypeObject(W_TypeObject): - @jit.dont_look_inside - def __init__(self, space, pto): - bases_w = space.fixedview(from_ref(space, pto.c_tp_bases)) - dict_w = {} - - add_operators(space, dict_w, pto) - convert_method_defs(space, dict_w, pto.c_tp_methods, self) - convert_getset_defs(space, dict_w, pto.c_tp_getset, self) - convert_member_defs(space, dict_w, pto.c_tp_members, self) - - name = rffi.charp2str(pto.c_tp_name) - - W_TypeObject.__init__(self, space, name, - bases_w or [space.w_object], dict_w) - if not space.is_true(space.issubtype(self, space.w_type)): - self.flag_cpytype = True - self.flag_heaptype = False - if pto.c_tp_doc: - self.w_doc = space.wrap(rffi.charp2str(pto.c_tp_doc)) - @bootstrap_function def init_typeobject(space): setup_class_for_cpyext(W_TypeObject, basestruct=PyTypeObject, alloc_pyobj=type_alloc_pyobj, - fill_pyobj=type_fill_pyobj) - #realize=type_realize, + fill_pyobj=type_fill_pyobj, + alloc_pypy=type_alloc_pypy) #dealloc=type_dealloc) # some types are difficult to create because of cycles. @@ -482,6 +461,7 @@ def type_alloc_pyobj(space, w_type): pto = lltype.malloc(PyTypeObject, flavor='raw', zero=True, track_allocation=False) + pto.c_tp_flags |= Py_TPFLAGS_READYING return pto, False def type_fill_pyobj(space, w_type, pto): @@ -489,6 +469,9 @@ Fills a newly allocated PyTypeObject from an existing w_type. """ from pypy.module.cpyext.object import PyObject_Del + from rpython.rlib import rawrefcount + + assert w_type == rawrefcount.to_obj(W_Root, pto) assert isinstance(w_type, W_TypeObject) w_type.cpyext_c_type_object = pto @@ -526,39 +509,99 @@ w_base = best_base(space, w_type.bases_w) pto.c_tp_base = rffi.cast(PyTypeObjectPtr, get_pyobj_and_xincref(space, w_base)) + w_bases = space.newtuple(w_type.bases_w) + pto.c_tp_bases = get_pyobj_and_incref(space, w_bases) - finish_type_1(space, pto, w_type) finish_type_2(space, pto, w_type) #pto.c_tp_basicsize = rffi.sizeof(typedescr.basestruct) ZZZ + pto.c_tp_basicsize = rffi.sizeof(PyObject.TO) # ZZZ if pto.c_tp_base: if pto.c_tp_base.c_tp_basicsize > pto.c_tp_basicsize: pto.c_tp_basicsize = pto.c_tp_base.c_tp_basicsize update_all_slots(space, w_type, pto) + pto.c_tp_flags &= ~Py_TPFLAGS_READYING pto.c_tp_flags |= Py_TPFLAGS_READY def py_type_ready(space, pto): - if pto.c_tp_flags & Py_TPFLAGS_READY: - return - type_realize(space, rffi.cast(PyObject, pto)) + if pto.c_tp_flags & Py_TPFLAGS_READY == 0: + # this builds the W_TypeObject, and in doing so makes sure the + # PyTypeObject is ready. + if not pto.c_ob_type: + typetype = get_pyobj_and_incref(space, space.w_type) + pto.c_ob_type = rffi.cast(PyTypeObjectPtr, typetype) + from_pyobj(space, pto) + assert pto.c_tp_flags & Py_TPFLAGS_READY @cpython_api([PyTypeObjectPtr], rffi.INT_real, error=-1) def PyType_Ready(space, pto): py_type_ready(space, pto) return 0 -def type_realize(space, py_obj): - ZZZ + at jit.dont_look_inside +def type_alloc_pypy(space, py_obj): pto = rffi.cast(PyTypeObjectPtr, py_obj) + assert pto.c_tp_flags & Py_TPFLAGS_READY == 0 assert pto.c_tp_flags & Py_TPFLAGS_READYING == 0 pto.c_tp_flags |= Py_TPFLAGS_READYING try: - w_obj = _type_realize(space, py_obj) + w_type = _type_realize(space, pto) finally: pto.c_tp_flags &= ~Py_TPFLAGS_READYING pto.c_tp_flags |= Py_TPFLAGS_READY - return w_obj + return w_type, False + +def _type_realize(space, pto): + assert pto.c_ob_type + # ^^^ we can't reach this place if c_ob_type is still NULL + + if not pto.c_tp_base: + base = get_pyobj_and_incref(space, space.w_object) + pto.c_tp_base = rffi.cast(PyTypeObjectPtr, base) + PyType_Ready(space, pto.c_tp_base) + + if not pto.c_tp_bases: + w_bases = space.newtuple([from_pyobj(space, pto.c_tp_base)]) + pto.c_tp_bases = get_pyobj_and_incref(space, w_bases) + else: + w_bases = from_pyobj(space, pto.c_tp_bases) + + w_metatype = from_pyobj(space, pto.c_ob_type) + w_type = space.allocate_instance(W_TypeObject, w_metatype) + + bases_w = space.fixedview(w_bases) or [space.w_object] + name = rffi.charp2str(pto.c_tp_name) + dict_w = {} + + # best we can do about tp_dict: copy all its string keys into dict_w, + # and ignore any non-string key + if pto.c_tp_dict: + w_org_dict = from_pyobj(space, pto.c_tp_dict) + for w_key in space.unpackiterable(w_org_dict): + try: + key = space.str_w(w_key) + dict_w[key] = space.getitem(w_org_dict, w_key) + except OperationError, e: + if e.async(self): + raise + + add_operators(space, dict_w, pto) + convert_method_defs(space, dict_w, pto.c_tp_methods, w_type) + convert_getset_defs(space, dict_w, pto.c_tp_getset, w_type) + convert_member_defs(space, dict_w, pto.c_tp_members, w_type) + + W_TypeObject.__init__(w_type, space, name, bases_w, dict_w) + + if not space.is_true(space.issubtype(self, space.w_type)): + w_type.flag_cpytype = True + w_type.flag_heaptype = False + if pto.c_tp_doc: + w_type.w_doc = space.wrap(rffi.charp2str(pto.c_tp_doc)) + + finish_type_2(space, pto, w_type) + w_type.ready() + return w_type def solid_base(space, w_type): typedef = w_type.instancetypedef @@ -584,58 +627,12 @@ if not pto.c_tp_setattro: pto.c_tp_setattro = base.c_tp_setattro -def _type_realize(space, py_obj): - """ - Creates an interpreter type from a PyTypeObject structure. - """ - # missing: - # inheriting tp_as_* slots - # unsupported: - # tp_mro, tp_subclasses - py_type = rffi.cast(PyTypeObjectPtr, py_obj) - - if not py_type.c_tp_base: - base = get_pyobj_and_incref(space, space.w_object) - py_type.c_tp_base = rffi.cast(PyTypeObjectPtr, base) - - finish_type_1(space, py_type) - - w_metatype = from_ref(space, rffi.cast(PyObject, py_type.c_ob_type)) - - w_obj = space.allocate_instance(W_PyCTypeObject, w_metatype) - track_reference(space, py_obj, w_obj) - w_obj.__init__(space, py_type) - w_obj.ready() - - finish_type_2(space, py_type, w_obj) - - state = space.fromcache(RefcountState) - state.non_heaptypes_w.append(w_obj) - - return w_obj - -def finish_type_1(space, pto, w_type): - """ - Sets up tp_bases, necessary before creating the interpreter type. - """ - #ZZZ - #base = pto.c_tp_base - #base_pyo = rffi.cast(PyObject, pto.c_tp_base) - #if base and not base.c_tp_flags & Py_TPFLAGS_READY: - # type_realize(space, base_pyo) - #if base and not pto.c_ob_type: # will be filled later - # pto.c_ob_type = base.c_ob_type - assert pto.c_ob_type # ZZZ - if not pto.c_tp_bases: - w_bases = space.newtuple(w_type.bases_w) - pto.c_tp_bases = get_pyobj_and_incref(space, w_bases) - -def finish_type_2(space, pto, w_obj): +def finish_type_2(space, pto, w_type): """ Sets up other attributes, when the interpreter type has been created. """ if not pto.c_tp_mro: - pto.c_tp_mro = get_pyobj_and_incref(space, space.newtuple(w_obj.mro_w)) + pto.c_tp_mro = get_pyobj_and_incref(space, space.newtuple(w_type.mro_w)) base = pto.c_tp_base if base: inherit_special(space, pto, base) @@ -648,11 +645,11 @@ PyObject_GenericSetAttr.api_func.functype, PyObject_GenericSetAttr.api_func.get_wrapper(space)) - if w_obj.is_cpytype(): - xxxxxxxx - Py_DecRef(space, pto.c_tp_dict) - w_dict = w_obj.getdict(space) - pto.c_tp_dict = make_ref(space, w_dict) + w_dict = w_type.getdict(space) + old = pto.c_tp_dict + pto.c_tp_dict = get_pyobj_and_incref(space, w_dict) + if old: + Py_DecRef(old) @cpython_api([PyTypeObjectPtr, PyTypeObjectPtr], rffi.INT_real, error=CANNOT_FAIL) def PyType_IsSubtype(space, a, b): diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -32,8 +32,9 @@ def create_link_pypy(p, ob): "NOT_RPYTHON: a link where the PyPy object contains some or all the data" + #print 'create_link_pypy\n\t%s\n\t%s' % (p, ob) assert p not in _pypy2ob - assert not ob.c_ob_pypy_link + #assert not ob.c_ob_pypy_link ob.c_ob_pypy_link = _build_pypy_link(p) _pypy2ob[p] = ob _p_list.append(ob) @@ -41,8 +42,9 @@ def create_link_pyobj(p, ob): """NOT_RPYTHON: a link where the PyObject contains all the data. from_obj() will not work on this 'p'.""" + #print 'create_link_pyobj\n\t%s\n\t%s' % (p, ob) assert p not in _pypy2ob - assert not ob.c_ob_pypy_link + #assert not ob.c_ob_pypy_link ob.c_ob_pypy_link = _build_pypy_link(p) _o_list.append(ob) From noreply at buildbot.pypy.org Tue Oct 20 12:00:51 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Oct 2015 12:00:51 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Test and fix Message-ID: <20151020100051.1451F1C00E2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80350:c6004c0c00ee Date: 2015-10-20 12:00 +0200 http://bitbucket.org/pypy/pypy/changeset/c6004c0c00ee/ Log: Test and fix diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -126,7 +126,7 @@ _p_list = new_p_list for ob, wr in wr_p_list: p = attach(ob, wr, _p_list) - if p: + if p is not None: _pypy2ob[p] = ob _o_list = [] for ob, wr in wr_o_list: diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py --- a/rpython/rlib/test/test_rawrefcount.py +++ b/rpython/rlib/test/test_rawrefcount.py @@ -10,6 +10,8 @@ class W_Root(object): def __init__(self, intval=0): self.intval = intval + def __nonzero__(self): + raise Exception("you cannot do that, you must use space.is_true()") PyObjectS = lltype.Struct('PyObjectS', ('c_ob_refcnt', lltype.Signed), From noreply at buildbot.pypy.org Tue Oct 20 12:01:50 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Oct 2015 12:01:50 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: progress Message-ID: <20151020100150.C61901C00E2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80351:6d455e8db843 Date: 2015-10-20 12:01 +0200 http://bitbucket.org/pypy/pypy/changeset/6d455e8db843/ Log: progress diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -159,7 +159,7 @@ self.doc = doc self.func = func pyo = rffi.cast(PyObject, pto) - w_type = from_ref(space, pyo) + w_type = from_pyobj(space, pyo) assert isinstance(w_type, W_TypeObject) self.w_objclass = w_type diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -235,7 +235,7 @@ return pyo = rffi.cast(PyObject, pto) dict_w["__new__"] = PyCFunction_NewEx(space, get_new_method_def(space), - from_ref(space, pyo), None) + from_pyobj(space, pyo), None) def inherit_special(space, pto, base_pto): # XXX missing: copy basicsize and flags in a magical way @@ -286,7 +286,8 @@ basestruct=PyTypeObject, alloc_pyobj=type_alloc_pyobj, fill_pyobj=type_fill_pyobj, - alloc_pypy=type_alloc_pypy) + alloc_pypy=type_alloc_pypy, + fill_pypy=type_fill_pypy) #dealloc=type_dealloc) # some types are difficult to create because of cycles. @@ -544,17 +545,10 @@ pto = rffi.cast(PyTypeObjectPtr, py_obj) assert pto.c_tp_flags & Py_TPFLAGS_READY == 0 assert pto.c_tp_flags & Py_TPFLAGS_READYING == 0 + assert pto.c_ob_type + # ^^^ shouldn't reach this place if these conditions fail + pto.c_tp_flags |= Py_TPFLAGS_READYING - try: - w_type = _type_realize(space, pto) - finally: - pto.c_tp_flags &= ~Py_TPFLAGS_READYING - pto.c_tp_flags |= Py_TPFLAGS_READY - return w_type, False - -def _type_realize(space, pto): - assert pto.c_ob_type - # ^^^ we can't reach this place if c_ob_type is still NULL if not pto.c_tp_base: base = get_pyobj_and_incref(space, space.w_object) @@ -564,12 +558,16 @@ if not pto.c_tp_bases: w_bases = space.newtuple([from_pyobj(space, pto.c_tp_base)]) pto.c_tp_bases = get_pyobj_and_incref(space, w_bases) - else: - w_bases = from_pyobj(space, pto.c_tp_bases) w_metatype = from_pyobj(space, pto.c_ob_type) w_type = space.allocate_instance(W_TypeObject, w_metatype) + return w_type, False +def type_fill_pypy(space, w_type, py_obj): + pto = rffi.cast(PyTypeObjectPtr, py_obj) + assert pto.c_tp_flags & Py_TPFLAGS_READYING + + w_bases = from_pyobj(space, pto.c_tp_bases) bases_w = space.fixedview(w_bases) or [space.w_object] name = rffi.charp2str(pto.c_tp_name) dict_w = {} @@ -593,7 +591,7 @@ W_TypeObject.__init__(w_type, space, name, bases_w, dict_w) - if not space.is_true(space.issubtype(self, space.w_type)): + if not space.is_true(space.issubtype(w_type, space.w_type)): # ZZZ? w_type.flag_cpytype = True w_type.flag_heaptype = False if pto.c_tp_doc: @@ -601,6 +599,8 @@ finish_type_2(space, pto, w_type) w_type.ready() + pto.c_tp_flags &= ~Py_TPFLAGS_READYING + pto.c_tp_flags |= Py_TPFLAGS_READY return w_type def solid_base(space, w_type): From noreply at buildbot.pypy.org Tue Oct 20 12:08:29 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Oct 2015 12:08:29 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: progress Message-ID: <20151020100829.1FB831C00E2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80352:68f7ce2c461c Date: 2015-10-20 12:08 +0200 http://bitbucket.org/pypy/pypy/changeset/68f7ce2c461c/ Log: progress diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -232,11 +232,11 @@ rawrefcount.create_link_pypy(w_obj, ob) def init_link_from_pyobj(w_obj, ob, is_transient): + ob.c_ob_refcnt += rawrefcount.REFCNT_FROM_PYPY if is_transient: rawrefcount.create_link_pyobj(w_obj, ob) else: rawrefcount.create_link_pypy(w_obj, ob) - ob.c_ob_refcnt += rawrefcount.REFCNT_FROM_PYPY def setup_prebuilt_pyobj(w_obj, py_obj): assert lltype.typeOf(py_obj) == PyObject diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -567,6 +567,8 @@ pto = rffi.cast(PyTypeObjectPtr, py_obj) assert pto.c_tp_flags & Py_TPFLAGS_READYING + w_type.cpyext_c_type_object = pto + w_bases = from_pyobj(space, pto.c_tp_bases) bases_w = space.fixedview(w_bases) or [space.w_object] name = rffi.charp2str(pto.c_tp_name) From noreply at buildbot.pypy.org Tue Oct 20 12:17:43 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Tue, 20 Oct 2015 12:17:43 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: variations of AND, OR, XOR (reg-reg, reg-mem) Message-ID: <20151020101743.CC82F1C0036@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80353:43eb605709b5 Date: 2015-10-20 12:06 +0200 http://bitbucket.org/pypy/pypy/changeset/43eb605709b5/ Log: variations of AND, OR, XOR (reg-reg, reg-mem) diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -8,7 +8,6 @@ from rpython.rtyper.lltypesystem import lltype, rffi, llmemory from rpython.tool.udir import udir from rpython.jit.backend.detect_cpu import autodetect -from rpython.rtyper.lltypesystem.rbuilder import always_inline clear_cache = rffi.llexternal( "__clear_cache", diff --git a/rpython/jit/backend/zarch/instruction_builder.py b/rpython/jit/backend/zarch/instruction_builder.py --- a/rpython/jit/backend/zarch/instruction_builder.py +++ b/rpython/jit/backend/zarch/instruction_builder.py @@ -1,6 +1,6 @@ -from rpython.jit.backend.zarch.instructions import (all_mnemonic_codes, - arith_mnemic_codes, branch_mnemoic_codes) +from rpython.jit.backend.zarch.instructions import (all_mnemonic_codes,) from rpython.rtyper.lltypesystem.rbuilder import always_inline +from rpython.rlib.unroll import unrolling_iterable class builder(object): @@ -14,9 +14,9 @@ r/m - register or mask iX - immediate X bits (signed) uX - immediate X bits (unsigend) - bd - base displacement (12 bit) + bd - base displacement (unsigned 12 bit) bdl - base displacement long (20 bit) - bid - index base displacement + bid - index base displacement (unsigned 12 bit) bidl - index base displacement (20 bit) l4bd - length base displacement (4 bit) l8bd - length base displacement (8 bit) @@ -118,6 +118,12 @@ self.writechar(chr(imm16 & 0xff)) return encode_ri +def build_ri_u(mnemonic, (opcode,halfopcode)): + # unsigned version of ri + func = build_ri(mnemonic, (opcode,halfopcode)) + func._arguments_[1] = 'u16' + return func + def build_ril(mnemonic, (opcode,halfopcode)): br = is_branch_relative(mnemonic) @builder.arguments('r/m,i32') @@ -270,7 +276,12 @@ return name.startswith('BR') def build_instr_codes(clazz): - for mnemonic, (instrtype, args) in all_mnemonic_codes.items(): + for mnemonic, params in all_mnemonic_codes.items(): + options = {} + if len(params) == 2: + (instrtype, args) = params + else: + (instrtype, args, options) = params builder = globals()['build_' + instrtype] func = builder(mnemonic, args) name = mnemonic + "_" + instrtype diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -1,5 +1,5 @@ -branch_mnemoic_codes = { +branch_mnemonic_codes = { 'BRASL': ('ril', ['\xC0','\x05']), 'BCR': ('rr', ['\x07']), 'BC': ('rx', ['\x47']), @@ -7,7 +7,7 @@ 'BRCL': ('ril', ['\xC0','\x04']), } -arith_mnemic_codes = { +arith_mnemonic_codes = { 'AR': ('rr', ['\x1A']), 'AGR': ('rre', ['\xB9','\x08']), 'AGFR': ('rre', ['\xB9','\x18']), @@ -16,6 +16,47 @@ 'SGR': ('rre', ['\xB9','\x09']), } +logic_mnemonic_codes = { + # AND operations + 'NGR': ('rre', ['\xB9','\x80']), + 'NG': ('rxy', ['\xE3','\x80']), + # and one byte and store it back at the op2 position + 'NI': ('si', ['\x94']), + 'NIY': ('siy', ['\xEB','\x54']), + + # AND immediate + 'NIHH': ('ri_u', ['\xA5', '\x04']), + 'NIHL': ('ri_u', ['\xA5', '\x05']), + 'NILH': ('ri_u', ['\xA5', '\x06']), + 'NILL': ('ri_u', ['\xA5', '\x07']), + + # OR operations + 'OGR': ('rre', ['\xB9','\x81']), + 'OG': ('rxy', ['\xE3','\x81']), + # or one byte and store it back at the op2 position + 'OI': ('si', ['\x96']), + 'OIY': ('siy', ['\xEB','\x56']), + + # OR immediate + 'OIHH': ('ri_u', ['\xA5', '\x08']), + 'OIHL': ('ri_u', ['\xA5', '\x09']), + 'OILH': ('ri_u', ['\xA5', '\x0A']), + 'OILL': ('ri_u', ['\xA5', '\x0B']), + + # XOR operations + 'XGR': ('rre', ['\xB9','\x82']), + 'XG': ('rxy', ['\xE3','\x82']), + # or one byte and store it back at the op2 position + 'XI': ('si', ['\x97']), + 'XIY': ('siy', ['\xEB','\x57']), + + # OR immediate + 'OIHH': ('ri_u', ['\xA5', '\x08']), + 'OIHL': ('ri_u', ['\xA5', '\x09']), + 'OILH': ('ri_u', ['\xA5', '\x0A']), + 'OILL': ('ri_u', ['\xA5', '\x0B']), +} + all_mnemonic_codes = { 'AY': ('rxy', ['\xE3','\x5A']), 'AG': ('rxy', ['\xE3','\x08']), @@ -44,6 +85,12 @@ 'PKA': ('ssf', ['\xE9']), 'STMG': ('rsy', ['\xEB','\x24']), } -all_mnemonic_codes.update(arith_mnemic_codes) -all_mnemonic_codes.update(branch_mnemoic_codes) +all_mnemonic_codes.update(arith_mnemonic_codes) +all_mnemonic_codes.update(logic_mnemonic_codes) +all_mnemonic_codes.update(branch_mnemonic_codes) + +if __name__ == "__main__": + print("%d instructions:" % len(all_mnemonic_codes)) + for name, (typeinstr, _) in all_mnemonic_codes.items(): + print(" %s\t(type: %s)" % (name, typeinstr)) diff --git a/rpython/jit/backend/zarch/test/test_auto_encoding.py b/rpython/jit/backend/zarch/test/test_auto_encoding.py --- a/rpython/jit/backend/zarch/test/test_auto_encoding.py +++ b/rpython/jit/backend/zarch/test/test_auto_encoding.py @@ -239,7 +239,7 @@ def complete_test(self, methname): if '_' in methname: - instrname, argmodes = methname.split('_') + instrname, argmodes = methname.split('_')[:2] else: instrname, argmodes = methname, '' argmodes = self.modes(argmodes) From noreply at buildbot.pypy.org Tue Oct 20 12:17:45 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Tue, 20 Oct 2015 12:17:45 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: fun with and, or, xor in real assembler programs (tests added) Message-ID: <20151020101745.DDA911C0036@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80354:d564b27b0ae4 Date: 2015-10-20 12:17 +0200 http://bitbucket.org/pypy/pypy/changeset/d564b27b0ae4/ Log: fun with and,or,xor in real assembler programs (tests added) diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -73,3 +73,23 @@ self.a.jmpto(reg.r14) assert run_asm(self.a) == 0 + def test_and_imm(self): + self.a.mc.NIHH(reg.r2, loc.imm(0)) + self.a.mc.NIHL(reg.r2, loc.imm(0)) + self.a.mc.NILL(reg.r2, loc.imm(0)) + self.a.mc.NILH(reg.r2, loc.imm(0)) + self.a.jmpto(reg.r14) + assert run_asm(self.a) == 0 + + def test_or_imm(self): + self.a.mc.OIHH(reg.r2, loc.imm(0xffff)) + self.a.mc.OIHL(reg.r2, loc.imm(0xffff)) + self.a.mc.OILL(reg.r2, loc.imm(0xffff)) + self.a.mc.OILH(reg.r2, loc.imm(0xffff)) + self.a.jmpto(reg.r14) + assert run_asm(self.a) == -1 + + def test_xor(self): + self.a.mc.XGR(reg.r2, reg.r2) + self.a.jmpto(reg.r14) + assert run_asm(self.a) == 0 From noreply at buildbot.pypy.org Tue Oct 20 16:48:56 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Oct 2015 16:48:56 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Tweak the dictionary pypy->pyobj: split it into two dicts, should Message-ID: <20151020144856.DE7851C0036@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80355:c5471ee4a011 Date: 2015-10-20 16:48 +0200 http://bitbucket.org/pypy/pypy/changeset/c5471ee4a011/ Log: Tweak the dictionary pypy->pyobj: split it into two dicts, should avoid the dict filling quickly with many NULLs until the next major collection diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2770,7 +2770,8 @@ self.rrc_p_list_old = self.AddressStack() self.rrc_o_list_young = self.AddressStack() self.rrc_o_list_old = self.AddressStack() - self.rrc_p_dict = self.AddressDict() + self.rrc_p_dict = self.AddressDict() # non-nursery keys only + self.rrc_p_dict_nurs = self.AddressDict() # nursery keys only p = lltype.malloc(self._ADDRARRAY, 1, flavor='raw', track_allocation=False) self.rrc_singleaddr = llmemory.cast_ptr_to_adr(p) @@ -2786,17 +2787,23 @@ def check_value_is_null(key, value, ignore): assert value == llmemory.NULL self.rrc_p_dict.foreach(check_value_is_null, None) + self.rrc_p_dict_nurs.foreach(check_value_is_null, None) def rawrefcount_create_link_pypy(self, gcobj, pyobject): ll_assert(self.rrc_enabled, "rawrefcount.init not called") obj = llmemory.cast_ptr_to_adr(gcobj) - if self.is_young_object(obj): - self.rrc_p_list_young.append(pyobject) - else: - self.rrc_p_list_old.append(pyobject) objint = llmemory.cast_adr_to_int(obj, "symbolic") self._pyobj(pyobject).ob_pypy_link = objint - self.rrc_p_dict.setitem(obj, pyobject) + # + lst = self.rrc_p_list_young + if self.is_in_nursery(obj): + dct = self.rrc_p_dict_nurs + else: + dct = self.rrc_p_dict + if not self.is_young_object(obj): + lst = self.rrc_p_list_old + lst.append(pyobject) + dct.setitem(obj, pyobject) def rawrefcount_create_link_pyobj(self, gcobj, pyobject): ll_assert(self.rrc_enabled, "rawrefcount.init not called") @@ -2811,7 +2818,11 @@ def rawrefcount_from_obj(self, gcobj): obj = llmemory.cast_ptr_to_adr(gcobj) - return self.rrc_p_dict.get(obj) + if self.is_in_nursery(obj): + dct = self.rrc_p_dict_nurs + else: + dct = self.rrc_p_dict + return dct.get(obj) def rawrefcount_to_obj(self, pyobject): obj = llmemory.cast_int_to_adr(self._pyobj(pyobject).ob_pypy_link) @@ -2819,6 +2830,9 @@ def rrc_minor_collection_trace(self): + length_estimate = self.rrc_p_dict_nurs.length() + self.rrc_p_dict_nurs.delete() + self.rrc_p_dict_nurs = self.AddressDict(length_estimate) self.rrc_p_list_young.foreach(self._rrc_minor_trace, self.rrc_singleaddr) @@ -2836,6 +2850,7 @@ self._trace_drag_out(singleaddr, llmemory.NULL) def rrc_minor_collection_free(self): + ll_assert(self.rrc_p_dict_nurs.length() == 0, "p_dict_nurs not empty 1") lst = self.rrc_p_list_young while lst.non_empty(): self._rrc_minor_free(lst.pop(), self.rrc_p_list_old, @@ -2849,8 +2864,6 @@ def _rrc_minor_free(self, pyobject, surviving_list, surviving_dict): intobj = self._pyobj(pyobject).ob_pypy_link obj = llmemory.cast_int_to_adr(intobj) - if surviving_dict: - surviving_dict.setitem(obj, llmemory.NULL) if self.is_in_nursery(obj): if self.is_forwarded(obj): # Common case: survives and moves @@ -2858,6 +2871,10 @@ intobj = llmemory.cast_adr_to_int(obj, "symbolic") self._pyobj(pyobject).ob_pypy_link = intobj surviving = True + if surviving_dict: + # Surviving nursery object: was originally in + # rrc_p_dict_nurs and now must be put into rrc_p_dict + surviving_dict.setitem(obj, pyobject) else: surviving = False elif (bool(self.young_rawmalloced_objects) and @@ -2867,14 +2884,16 @@ surviving = True # survives, but does not move else: surviving = False + if surviving_dict: + # Dying young large object: was in rrc_p_dict, + # must be deleted + surviving_dict.setitem(obj, llmemory.NULL) else: ll_assert(False, "rrc_X_list_young contains non-young obj") return # if surviving: surviving_list.append(pyobject) - if surviving_dict: - surviving_dict.setitem(obj, pyobject) else: self._rrc_free(pyobject) @@ -2920,6 +2939,7 @@ self.visit_all_objects() def rrc_major_collection_free(self): + ll_assert(self.rrc_p_dict_nurs.length() == 0, "p_dict_nurs not empty 2") length_estimate = self.rrc_p_dict.length() self.rrc_p_dict.delete() self.rrc_p_dict = new_p_dict = self.AddressDict(length_estimate) From noreply at buildbot.pypy.org Tue Oct 20 17:47:50 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 20 Oct 2015 17:47:50 +0200 (CEST) Subject: [pypy-commit] buildbot default: Don't handle xpasses and xfails as test failures Message-ID: <20151020154750.F30391C0036@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r967:3f895b5359db Date: 2015-10-20 06:52 +0100 http://bitbucket.org/pypy/buildbot/changeset/3f895b5359db/ Log: Don't handle xpasses and xfails as test failures diff --git a/bot2/pypybuildbot/summary.py b/bot2/pypybuildbot/summary.py --- a/bot2/pypybuildbot/summary.py +++ b/bot2/pypybuildbot/summary.py @@ -55,6 +55,7 @@ self.failed = set() self.skipped = set() self._xfailed = 0 + self._xpassed = 0 self.longreprs = {} self._run_info = run_info @@ -88,6 +89,8 @@ pass elif shortrepr == 'x': self._xfailed += 1 + elif shortrepr == 'X': + self._xpassed += 1 else: self.failed.add(namekey) @@ -101,8 +104,8 @@ @property def numpassed(self): - return (len(self._outcomes) - len(self.skipped) - len(self.failed) - - self._xfailed) + return (len(self._outcomes) - len(self.skipped) - len(self.failed) - + self._xfailed - self._xpassed) @property def numxfailed(self): @@ -246,7 +249,7 @@ if self._failed is None: self._failed = set() for prefix, outcome in self.map.items(): - self._failed.update([(prefix,)+ namekey for namekey in + self._failed.update([(prefix,) + namekey for namekey in outcome.failed]) return self._failed @@ -497,7 +500,7 @@ combination = 0 for i, (label, outcome_set) in enumerate(by_label): letter = outcome_set.get_outcome(failure) - failed = letter.lower() not in ('s', '.', ' ') + failed = letter.lower() not in ('s', '.', ' ', 'x') if failed: combination |= 1 << i if outcome_set.get_longrepr(failure): diff --git a/bot2/pypybuildbot/test/test_summary.py b/bot2/pypybuildbot/test/test_summary.py --- a/bot2/pypybuildbot/test/test_summary.py +++ b/bot2/pypybuildbot/test/test_summary.py @@ -150,11 +150,12 @@ rev_outcome_set = summary.RevisionOutcomeSet('50000') log = StringIO("""x a/b.py EXC +X a/b.py::test_1 """) rev_outcome_set.populate(log) - assert rev_outcome_set.numxfailed == 1 + assert not rev_outcome_set.failed def test_absent_outcome(self): @@ -204,9 +205,10 @@ log = StringIO("""F a/b.py:test_one some traceback -. a/b.py:test_two -s a/b.py:test_three -x a/b.py:test_four +. a/b.py::test_two +s a/b.py::test_three +x a/b.py::test_four +X a/b.py::test_five """) rev_outcome_set_foo.populate(log) @@ -215,9 +217,9 @@ key_bar = ('bar', 7) rev_outcome_set_bar = summary.RevisionOutcomeSet('50000', key_bar) - log = StringIO(""". a/b.py:test_one -. a/b.py:test_two -s a/b.py:test_three + log = StringIO(""". a/b.py::test_one +. a/b.py::test_two +s a/b.py::test_three """) rev_outcome_set_bar.populate(log) From noreply at buildbot.pypy.org Tue Oct 20 18:31:15 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Oct 2015 18:31:15 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Support varsized arrays at the end of structs Message-ID: <20151020163115.24C4A1C0036@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80356:5dbf9e6ec173 Date: 2015-10-20 17:10 +0200 http://bitbucket.org/pypy/pypy/changeset/5dbf9e6ec173/ Log: Support varsized arrays at the end of structs diff --git a/rpython/rtyper/tool/rffi_platform.py b/rpython/rtyper/tool/rffi_platform.py --- a/rpython/rtyper/tool/rffi_platform.py +++ b/rpython/rtyper/tool/rffi_platform.py @@ -313,7 +313,9 @@ offset = info['fldofs ' + fieldname] size = info['fldsize ' + fieldname] sign = info.get('fldunsigned ' + fieldname, False) - if (size, sign) != rffi.size_and_sign(fieldtype): + if is_array_nolength(fieldtype): + pass # ignore size and sign + elif (size, sign) != rffi.size_and_sign(fieldtype): fieldtype = fixup_ctype(fieldtype, fieldname, (size, sign)) layout_addfield(layout, offset, fieldtype, fieldname) @@ -682,8 +684,14 @@ def __repr__(self): return '' % (self.name, self.ctype) +def is_array_nolength(TYPE): + return isinstance(TYPE, lltype.Array) and TYPE._hints.get('nolength', False) + def layout_addfield(layout, offset, ctype, prefix): - size = _sizeof(ctype) + if is_array_nolength(ctype): + size = len(layout) - offset # all the rest of the struct + else: + size = _sizeof(ctype) name = prefix i = 0 while name in layout: diff --git a/rpython/rtyper/tool/test/test_rffi_platform.py b/rpython/rtyper/tool/test/test_rffi_platform.py --- a/rpython/rtyper/tool/test/test_rffi_platform.py +++ b/rpython/rtyper/tool/test/test_rffi_platform.py @@ -270,6 +270,19 @@ [("d_name", lltype.FixedSizeArray(rffi.CHAR, 1))]) assert dirent.c_d_name.length == 32 +def test_array_varsized_struct(): + dirent = rffi_platform.getstruct("struct dirent", + """ + struct dirent /* for this example only, not the exact dirent */ + { + int d_off; + char d_name[1]; + }; + """, + [("d_name", rffi.CArray(rffi.CHAR))]) + assert rffi.offsetof(dirent, 'c_d_name') == 4 + assert dirent.c_d_name == rffi.CArray(rffi.CHAR) + def test_has_0001(): assert rffi_platform.has("x", "int x = 3;") assert not rffi_platform.has("x", "") From noreply at buildbot.pypy.org Tue Oct 20 18:31:17 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Oct 2015 18:31:17 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Start to pass a few tests in test_intobject, after performance Message-ID: <20151020163117.845551C0036@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80357:45142ab484f1 Date: 2015-10-20 18:18 +0200 http://bitbucket.org/pypy/pypy/changeset/45142ab484f1/ Log: Start to pass a few tests in test_intobject, after performance improvements to intobject.py and other fixes diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -381,6 +381,14 @@ INTERPLEVEL_API = {} FUNCTIONS = {} +def constant_pyobj(space, name): + if we_are_translated(): + ZZZ # should return the C symbol "Py" + name, constant-folded + else: + from pypy.module.cpyext.pyobject import as_pyobj + w_obj = INTERPLEVEL_API[name] + return as_pyobj(space, w_obj) + # These are C symbols which cpyext will export, but which are defined in .c # files somewhere in the implementation of cpyext (rather than being defined in # RPython). @@ -496,7 +504,7 @@ GLOBALS['%s#' % (cpyname, )] = ('PyTypeObject*', pypyexpr) for cpyname in '''PyMethodObject PyListObject PyLongObject - PyDictObject PyTupleObject PyClassObject'''.split(): + PyDictObject PyClassObject'''.split(): FORWARD_DECLS.append('typedef struct { PyObject_HEAD } %s' % (cpyname, )) build_exported_objects() @@ -559,25 +567,31 @@ if name in TYPES: TYPES[name].become(TYPE) -def build_type_checkers(type_name, cls=None): +def build_type_checkers3(type_name, cls=None): """ Builds two api functions: Py_XxxCheck() and Py_XxxCheckExact(). - if `cls` is None, the type is space.w_[type]. - if `cls` is a string, it is the name of a space attribute, e.g. 'w_str'. - else `cls` must be a W_Class with a typedef. """ + py_type_name = "Py" + type_name + "_Type" + check_name = "Py" + type_name + "_Check" + if cls is None: - attrname = "w_" + type_name.lower() - def get_w_type(space): - return getattr(space, attrname) - elif isinstance(cls, str): + cls = "w_" + type_name.lower() + if isinstance(cls, str): def get_w_type(space): return getattr(space, cls) + def _PyXxx_Type(space): + return rffi.cast(PyTypeObjectPtr, + constant_pyobj(space, py_type_name)) else: @specialize.memo() def get_w_type(space): return space.gettypeobject(cls.typedef) - check_name = "Py" + type_name + "_Check" + def _PyXxx_Type(): + ZZZ + _PyXxx_Type = func_with_new_name(_PyXxx_Type, '_' + py_type_name) def check(space, w_obj): "Implements the Py_Xxx_Check function" @@ -587,15 +601,18 @@ space.is_true(space.issubtype(w_obj_type, w_type))) def check_exact(space, py_obj): "Implements the Py_Xxx_CheckExact function" - py_type = get_w_type(space).cpyext_c_type_object - assert py_type - return py_obj.c_ob_type == py_type + #py_type = get_w_type(space).cpyext_c_type_object + return py_obj.c_ob_type == _PyXxx_Type(space) check = cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL)( func_with_new_name(check, check_name)) check_exact = cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL)( func_with_new_name(check_exact, check_name + "Exact")) - return check, check_exact + return check, check_exact, _PyXxx_Type + +def build_type_checkers(type_name, cls=None): + # backward compatibility + return build_type_checkers3(type_name, cls=cls)[:2] pypy_debug_catch_fatal_exception = rffi.llexternal('pypy_debug_catch_fatal_exception', [], lltype.Void) @@ -793,7 +810,8 @@ struct PyPyAPI { %(members)s } _pypyAPI; - RPY_EXTERN struct PyPyAPI* pypyAPI = &_pypyAPI; + RPY_EXTERN struct PyPyAPI* pypyAPI; + struct PyPyAPI* pypyAPI = &_pypyAPI; """ % dict(members=structmembers) functions = generate_decls_and_callbacks(db, export_symbols) @@ -848,6 +866,7 @@ INTERPLEVEL_API[name] = w_obj + orgname = name name = name.replace('Py', 'cpyexttest') if isptr: ptr = ctypes.c_void_p.in_dll(bridge, name) diff --git a/pypy/module/cpyext/include/tupleobject.h b/pypy/module/cpyext/include/tupleobject.h --- a/pypy/module/cpyext/include/tupleobject.h +++ b/pypy/module/cpyext/include/tupleobject.h @@ -7,6 +7,11 @@ extern "C" { #endif +typedef struct { + PyObject_VAR_HEAD + PyObject *ob_item[1]; +} PyTupleObject; + /* defined in varargswrapper.c */ PyAPI_FUNC(PyObject *) PyTuple_Pack(Py_ssize_t, ...); diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py --- a/pypy/module/cpyext/intobject.py +++ b/pypy/module/cpyext/intobject.py @@ -2,10 +2,10 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.error import OperationError from pypy.module.cpyext.api import ( - cpython_api, cpython_struct, build_type_checkers, bootstrap_function, + cpython_api, cpython_struct, build_type_checkers3, bootstrap_function, PyObject, PyObjectFields, CONST_STRING, CANNOT_FAIL, Py_ssize_t) -from pypy.module.cpyext.pyobject import ( - setup_class_for_cpyext, track_reference, RefcountState, from_ref) +from pypy.module.cpyext.pyobject import (setup_class_for_cpyext, new_pyobj, + from_pyobj) from rpython.rlib.rarithmetic import r_uint, intmask, LONG_TEST, r_ulonglong from pypy.objspace.std.intobject import W_IntObject import sys @@ -16,15 +16,39 @@ (("ob_ival", rffi.LONG),) cpython_struct("PyIntObject", PyIntObjectFields, PyIntObjectStruct) +PyInt_Check, PyInt_CheckExact, _PyInt_Type = build_type_checkers3("Int") + + @bootstrap_function def init_intobject(space): "Type description of PyIntObject" from pypy.objspace.std.intobject import W_AbstractIntObject, W_IntObject - setup_class_for_cpyext(W_AbstractIntObject, - basestruct=PyIntObject.TO, - fill_pyobj=int_fill_pyobj, - fill_pypy=int_fill_pypy, - realize_subclass_of=W_IntObject) + setup_class_for_cpyext( + # --the base class of all 'int' objects inside PyPy-- + W_AbstractIntObject, + + # --the structure type derived from PyObject-- + basestruct=PyIntObjectStruct, + + # --after a PyIntObject is allocated, we call this function to + # fill it. It gets attached as RRC_PERMANENT_LIGHT by default, + # which means the association is permanent (the PyIntObject is + # alive and won't appear to move as long as the W_IntObject is + # alive) and light (the PyIntObject can be freed with free()).-- + fill_pyobj=int_fill_pyobj, + + # --reverse direction: from a PyIntObject, we make a W_IntObject + # by instantiating a custom subclass of W_IntObject-- + realize_subclass_of=W_IntObject, + + # --and then we call this function to initialize the W_IntObject-- + fill_pypy=int_fill_pypy, + + # --in this case, and if PyInt_CheckExact() returns True, then + # the link can be light, i.e. the original PyIntObject might + # be freed with free() by the GC-- + alloc_pypy_light_if=PyInt_CheckExact, + ) def int_fill_pyobj(space, w_obj, py_int): """ @@ -41,8 +65,6 @@ intval = rffi.cast(lltype.Signed, py_int.c_ob_ival) W_IntObject.__init__(w_obj, intval) -PyInt_Check, PyInt_CheckExact = build_type_checkers("Int") - @cpython_api([], lltype.Signed, error=CANNOT_FAIL) def PyInt_GetMax(space): """Return the system's idea of the largest integer it can handle (LONG_MAX, @@ -52,19 +74,23 @@ @cpython_api([lltype.Signed], PyObject) def PyInt_FromLong(space, ival): """Create a new integer object with a value of ival. - """ - return space.wrap(ival) + py_int = new_pyobj(PyIntObjectStruct, _PyInt_Type(space)) + py_int.c_ob_ival = ival + return rffi.cast(PyObject, py_int) @cpython_api([PyObject], lltype.Signed, error=-1) -def PyInt_AsLong(space, w_obj): +def PyInt_AsLong(space, py_obj): """Will first attempt to cast the object to a PyIntObject, if it is not already one, and then return its value. If there is an error, -1 is returned, and the caller should check PyErr_Occurred() to find out whether there was an error, or whether the value just happened to be -1.""" - if w_obj is None: + if not py_obj: raise OperationError(space.w_TypeError, space.wrap("an integer is required, got NULL")) + if PyInt_Check(space, py_obj): + return PyInt_AS_LONG(space, py_obj) + w_obj = from_pyobj(space, py_obj) return space.int_w(space.int(w_obj)) @cpython_api([PyObject], lltype.Unsigned, error=-1) @@ -108,9 +134,10 @@ return num.ulonglongmask() @cpython_api([PyObject], lltype.Signed, error=CANNOT_FAIL) -def PyInt_AS_LONG(space, w_int): +def PyInt_AS_LONG(space, py_obj): """Return the value of the object w_int. No error checking is performed.""" - return space.int_w(w_int) + py_int = rffi.cast(PyIntObject, py_obj) + return py_int.c_ob_ival @cpython_api([PyObject], Py_ssize_t, error=-1) def PyInt_AsSsize_t(space, w_obj): diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -14,130 +14,21 @@ from rpython.rtyper.annlowlevel import llhelper from rpython.rlib import rawrefcount -#________________________________________________________ -# type description ZZZ SEE BELOW - -class BaseCpyTypedescr(object): - basestruct = PyObject.TO - W_BaseObject = W_ObjectObject - - def get_dealloc(self, space): - from pypy.module.cpyext.typeobject import subtype_dealloc - return llhelper( - subtype_dealloc.api_func.functype, - subtype_dealloc.api_func.get_wrapper(space)) - - def allocate(self, space, w_type, itemcount=0): - # similar to PyType_GenericAlloc? - # except that it's not related to any pypy object. - - pytype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_type)) - # Don't increase refcount for non-heaptypes - if pytype: - flags = rffi.cast(lltype.Signed, pytype.c_tp_flags) - if not flags & Py_TPFLAGS_HEAPTYPE: - Py_DecRef(space, w_type) - - if pytype: - size = pytype.c_tp_basicsize - else: - size = rffi.sizeof(self.basestruct) - if itemcount: - size += itemcount * pytype.c_tp_itemsize - buf = lltype.malloc(rffi.VOIDP.TO, size, - flavor='raw', zero=True) - pyobj = rffi.cast(PyObject, buf) - pyobj.c_ob_refcnt = 1 - pyobj.c_ob_type = pytype - return pyobj - - def attach(self, space, pyobj, w_obj): - pass - - def realize(self, space, obj): - w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) - w_obj = space.allocate_instance(self.W_BaseObject, w_type) - track_reference(space, obj, w_obj) - if w_type is not space.gettypefor(self.W_BaseObject): - state = space.fromcache(RefcountState) - state.set_lifeline(w_obj, obj) - return w_obj - -def make_typedescr(typedef, **kw): - return #ZZZ - """NOT_RPYTHON - - basestruct: The basic structure to allocate - alloc : allocate and basic initialization of a raw PyObject - attach : Function called to tie a raw structure to a pypy object - realize : Function called to create a pypy object from a raw struct - dealloc : a cpython_api(external=False), similar to PyObject_dealloc - """ - - tp_basestruct = kw.pop('basestruct', PyObject.TO) - tp_alloc = kw.pop('alloc', None) - tp_attach = kw.pop('attach', None) - tp_realize = kw.pop('realize', None) - tp_dealloc = kw.pop('dealloc', None) - assert not kw, "Extra arguments to make_typedescr" - - null_dealloc = lltype.nullptr(lltype.FuncType([PyObject], lltype.Void)) - - class CpyTypedescr(BaseCpyTypedescr): - basestruct = tp_basestruct - - if tp_alloc: - def allocate(self, space, w_type, itemcount=0): - return tp_alloc(space, w_type) - - if tp_dealloc: - def get_dealloc(self, space): - return llhelper( - tp_dealloc.api_func.functype, - tp_dealloc.api_func.get_wrapper(space)) - - if tp_attach: - def attach(self, space, pyobj, w_obj): - tp_attach(space, pyobj, w_obj) - - if tp_realize: - def realize(self, space, ref): - return tp_realize(space, ref) - if typedef: - CpyTypedescr.__name__ = "CpyTypedescr_%s" % (typedef.name,) - - typedescr_cache[typedef] = CpyTypedescr() - -#@bootstrap_function ZZZ -def init_pyobject(space): - from pypy.module.cpyext.object import PyObject_dealloc - # typedescr for the 'object' type - make_typedescr(space.w_object.instancetypedef, - dealloc=PyObject_dealloc) - # almost all types, which should better inherit from object. - make_typedescr(None) - - at specialize.memo() -def _get_typedescr_1(typedef): - ZZZ - try: - return typedescr_cache[typedef] - except KeyError: - if typedef.bases: - return _get_typedescr_1(typedef.bases[0]) - return typedescr_cache[None] - -def get_typedescr(typedef): - ZZZ - if typedef is None: - return typedescr_cache[None] - else: - return _get_typedescr_1(typedef) - #________________________________________________________ # type description +def make_typedescr(arg0, *args, **kwds): + print "ZZZ: make_typedescr(%r)" % (arg0,) +def get_typedescr(*args, **kwds): + ZZZ +RefcountState = "ZZZ" + +RRC_PERMANENT = 'P' # the link pyobj<->pypy is permanent +RRC_PERMANENT_LIGHT = 'p' # same, but tp_dealloc can be replaced with free() +RRC_TRANSIENT = 'T' # the pypy object is transient and can go away +RRC_TRANSIENT_LIGHT = 't' # same, but tp_dealloc can be replaced with free() + def setup_class_for_cpyext(W_Class, **kw): """NOT_RPYTHON @@ -156,6 +47,7 @@ tp_fill_pypy = kw.pop('fill_pypy', None) force_create_pyobj = kw.pop('force_create_pyobj', False) realize_subclass_of = kw.pop('realize_subclass_of', None) + alloc_pypy_light_if = kw.pop('alloc_pypy_light_if', None) #tp_dealloc = kw.pop('dealloc', None) assert not kw, "Extra arguments to make_typedescr: %s" % kw.keys() @@ -167,7 +59,7 @@ def tp_alloc_pyobj(space, w_obj): ob = lltype.malloc(tp_basestruct, flavor='raw', track_allocation=False) - return ob, True + return ob, RRC_PERMANENT_LIGHT tp_alloc_pyobj._always_inline_ = True # if not tp_fill_pyobj: @@ -175,10 +67,11 @@ pass # def cpyext_create_pyobj(self, space): - py_obj, is_light = tp_alloc_pyobj(space, self) + py_obj, strength = tp_alloc_pyobj(space, self) ob = rffi.cast(PyObject, py_obj) - ob_type = get_c_ob_type(space, space.type(self)) - init_link_from_pypy(self, ob, ob_type, is_light) + ob.c_ob_refcnt = 0 + ob.c_ob_type = get_c_ob_type(space, space.type(self)) + rawrefcount_init_link(self, ob, strength) tp_fill_pyobj(space, self, py_obj) return ob W_Class.cpyext_create_pyobj = cpyext_create_pyobj @@ -203,7 +96,11 @@ realize_subclass_of) def tp_alloc_pypy(space, pyobj): w_obj = W_CPyExtPlaceHolder(pyobj) - return w_obj, True + strength = RRC_TRANSIENT + if alloc_pypy_light_if is not None: + if alloc_pypy_light_if(space, pyobj): + strength = RRC_TRANSIENT_LIGHT + return w_obj, strength tp_alloc_pypy._always_inline_ = True # if not tp_fill_pypy: @@ -211,8 +108,8 @@ pass # def cpyext_create_pypy(space, pyobj): - w_obj, is_transient = tp_alloc_pypy(space, pyobj) - init_link_from_pyobj(w_obj, pyobj, is_transient) + w_obj, strength = tp_alloc_pypy(space, pyobj) + rawrefcount_init_link(w_obj, pyobj, strength) tp_fill_pypy(space, w_obj, pyobj) return w_obj # @@ -223,25 +120,30 @@ W_Class.cpyext_basestruct = tp_basestruct -def init_link_from_pypy(w_obj, ob, ob_type, is_light): - if is_light: - ob.c_ob_refcnt = rawrefcount.REFCNT_FROM_PYPY_LIGHT +def rawrefcount_init_link(w_obj, ob, strength): + if strength == RRC_PERMANENT: + ob.c_ob_refcnt += rawrefcount.REFCNT_FROM_PYPY + rawrefcount.create_link_pypy(w_obj, ob) + # + elif strength == RRC_PERMANENT_LIGHT: + ob.c_ob_refcnt += rawrefcount.REFCNT_FROM_PYPY_LIGHT + rawrefcount.create_link_pypy(w_obj, ob) + # + elif strength == RRC_TRANSIENT: + ob.c_ob_refcnt += rawrefcount.REFCNT_FROM_PYPY + rawrefcount.create_link_pyobj(w_obj, ob) + # + elif strength == RRC_TRANSIENT_LIGHT: + ob.c_ob_refcnt += rawrefcount.REFCNT_FROM_PYPY_LIGHT + rawrefcount.create_link_pyobj(w_obj, ob) + # else: - ob.c_ob_refcnt = rawrefcount.REFCNT_FROM_PYPY - ob.c_ob_type = ob_type - rawrefcount.create_link_pypy(w_obj, ob) + assert False, "rawrefcount_init_link: strength=%r" % (strength,) -def init_link_from_pyobj(w_obj, ob, is_transient): - ob.c_ob_refcnt += rawrefcount.REFCNT_FROM_PYPY - if is_transient: - rawrefcount.create_link_pyobj(w_obj, ob) - else: - rawrefcount.create_link_pypy(w_obj, ob) def setup_prebuilt_pyobj(w_obj, py_obj): assert lltype.typeOf(py_obj) == PyObject - init_link_from_pypy(w_obj, py_obj, lltype.nullptr(PyTypeObjectPtr.TO), - False) + rawrefcount_init_link(w_obj, py_obj, RRC_PERMANENT) if isinstance(w_obj, W_TypeObject): w_obj.cpyext_c_type_object = rffi.cast(PyTypeObjectPtr, py_obj) @@ -316,108 +218,6 @@ #________________________________________________________ # refcounted object support -class RefcountState: - def __init__(self, space): - ZZZ - self.space = space - self.py_objects_w2r = {} # { w_obj -> raw PyObject } - self.py_objects_r2w = {} # { addr of raw PyObject -> w_obj } - - self.lifeline_dict = RWeakKeyDictionary(W_Root, PyOLifeline) - - self.borrow_mapping = {None: {}} - # { w_container -> { w_containee -> None } } - # the None entry manages references borrowed during a call to - # generic_cpy_call() - - # For tests - self.non_heaptypes_w = [] - - def _cleanup_(self): - assert self.borrow_mapping == {None: {}} - self.py_objects_r2w.clear() # is not valid anymore after translation - - def init_r2w_from_w2r(self): - """Rebuilds the dict py_objects_r2w on startup""" - for w_obj, obj in self.py_objects_w2r.items(): - ptr = rffi.cast(ADDR, obj) - self.py_objects_r2w[ptr] = w_obj - - def print_refcounts(self): - print "REFCOUNTS" - for w_obj, obj in self.py_objects_w2r.items(): - print "%r: %i" % (w_obj, obj.c_ob_refcnt) - - def get_from_lifeline(self, w_obj): - lifeline = self.lifeline_dict.get(w_obj) - if lifeline is not None: # make old PyObject ready for use in C code - py_obj = lifeline.pyo - assert py_obj.c_ob_refcnt == 0 - return py_obj - else: - return lltype.nullptr(PyObject.TO) - - def set_lifeline(self, w_obj, py_obj): - self.lifeline_dict.set(w_obj, - PyOLifeline(self.space, py_obj)) - - def make_borrowed(self, w_container, w_borrowed): - """ - Create a borrowed reference, which will live as long as the container - has a living reference (as a PyObject!) - """ - ref = make_ref(self.space, w_borrowed) - obj_ptr = rffi.cast(ADDR, ref) - - borrowees = self.borrow_mapping.setdefault(w_container, {}) - if w_borrowed in borrowees: - Py_DecRef(self.space, w_borrowed) # cancel incref from make_ref() - else: - borrowees[w_borrowed] = None - - return ref - - def reset_borrowed_references(self): - "Used in tests" - for w_container, w_borrowed in self.borrow_mapping.items(): - Py_DecRef(self.space, w_borrowed) - self.borrow_mapping = {None: {}} - - def delete_borrower(self, w_obj): - """ - Called when a potential container for borrowed references has lost its - last reference. Removes the borrowed references it contains. - """ - if w_obj in self.borrow_mapping: # move to lifeline __del__ - for w_containee in self.borrow_mapping[w_obj]: - self.forget_borrowee(w_containee) - del self.borrow_mapping[w_obj] - - def swap_borrow_container(self, container): - """switch the current default contained with the given one.""" - if container is None: - old_container = self.borrow_mapping[None] - self.borrow_mapping[None] = {} - return old_container - else: - old_container = self.borrow_mapping[None] - self.borrow_mapping[None] = container - for w_containee in old_container: - self.forget_borrowee(w_containee) - - def forget_borrowee(self, w_obj): - "De-register an object from the list of borrowed references" - ref = self.py_objects_w2r.get(w_obj, lltype.nullptr(PyObject.TO)) - if not ref: - if DEBUG_REFCOUNT: - print >>sys.stderr, "Borrowed object is already gone!" - return - - Py_DecRef(self.space, ref) - -class InvalidPointerException(Exception): - pass - DEBUG_REFCOUNT = False def debug_refcount(*args, **kwargs): @@ -568,6 +368,15 @@ return w_obj + at specialize.ll() +def new_pyobj(PYOBJ_TYPE, ob_type): + ob = lltype.malloc(PYOBJ_TYPE, flavor='raw', track_allocation=False) + ob.c_ob_refcnt = 1 + ob.c_ob_type = ob_type + ob.c_ob_pypy_link = 0 + return ob + + def make_ref(space, w_obj): ZZZ diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -15,7 +15,7 @@ from pypy.module.cpyext import api from pypy.module.cpyext.state import State from pypy.module.cpyext.pyobject import RefcountState, debug_collect -from pypy.module.cpyext.pyobject import Py_DecRef, InvalidPointerException +from pypy.module.cpyext.pyobject import Py_DecRef from rpython.tool.identity_dict import identity_dict from rpython.tool import leakfinder from rpython.rlib import rawrefcount diff --git a/pypy/module/cpyext/test/test_intobject.py b/pypy/module/cpyext/test/test_intobject.py --- a/pypy/module/cpyext/test/test_intobject.py +++ b/pypy/module/cpyext/test/test_intobject.py @@ -1,3 +1,4 @@ +from pypy.module.cpyext.pyobject import from_pyobj from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase import sys @@ -12,7 +13,7 @@ y = api.PyInt_AS_LONG(space.wrap(i)) assert x == i assert y == i - w_x = api.PyInt_FromLong(x + 1) + w_x = from_pyobj(space, api.PyInt_FromLong(x + 1)) assert space.type(w_x) is space.w_int assert space.eq_w(w_x, space.wrap(i + 1)) diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -1,16 +1,33 @@ from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import (cpython_api, Py_ssize_t, CANNOT_FAIL, - build_type_checkers) + cpython_struct, PyObjectFields, build_type_checkers, bootstrap_function) from pypy.module.cpyext.pyobject import (PyObject, PyObjectP, Py_DecRef, - borrow_from, make_ref, from_ref) + setup_class_for_cpyext) from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall -from pypy.objspace.std.tupleobject import W_TupleObject +from pypy.objspace.std.tupleobject import W_TupleObject, W_AbstractTupleObject + +PyTupleObjectStruct = lltype.ForwardReference() +PyTupleObject = lltype.Ptr(PyTupleObjectStruct) +PyTupleObjectFields = PyObjectFields + \ + (("ob_item", rffi.CArray(PyObject)),) +cpython_struct("PyTupleObject", PyTupleObjectFields, PyTupleObjectStruct) PyTuple_Check, PyTuple_CheckExact = build_type_checkers("Tuple") + at bootstrap_function +def init_intobject(space): + "Type description of PyTupleObject" + setup_class_for_cpyext(W_AbstractTupleObject, + basestruct=PyTupleObject.TO, + ) + #fill_pyobj=int_fill_pyobj, + #fill_pypy=int_fill_pypy, + #realize_subclass_of=W_IntObject) + @cpython_api([Py_ssize_t], PyObject) def PyTuple_New(space, size): + ZZZ return W_TupleObject([space.w_None] * size) @cpython_api([PyObject, Py_ssize_t, PyObject], rffi.INT_real, error=-1) @@ -30,10 +47,13 @@ assert isinstance(w_t, W_TupleObject) w_t.wrappeditems[pos] = w_obj - at cpython_api([PyObject, Py_ssize_t], PyObject) + at cpython_api([PyObject, Py_ssize_t], PyObject, result_borrowed=True) def PyTuple_GetItem(space, w_t, pos): - if not PyTuple_Check(space, w_t): + if not isinstance(w_t, W_AbstractTupleObject): PyErr_BadInternalCall(space) + #if w_t.cpyext_returned_items_can_be_borrowed: + ZZZ.x.x.x + xxxxxxx w_obj = space.getitem(w_t, space.wrap(pos)) return borrow_from(w_t, w_obj) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -2,6 +2,7 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import specialize, instantiate +from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rstring import rsplit from rpython.rtyper.annlowlevel import llhelper from rpython.rtyper.lltypesystem import rffi, lltype @@ -22,7 +23,7 @@ from pypy.module.cpyext.pyobject import ( PyObject, make_ref, create_ref, get_typedescr, from_pyobj, as_pyobj, setup_class_for_cpyext, get_pyobj_and_incref, get_pyobj_and_xincref, - track_reference, RefcountState, borrow_from, Py_DecRef) + track_reference, RefcountState, borrow_from, Py_DecRef, RRC_PERMANENT) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) from pypy.module.cpyext.state import State @@ -200,11 +201,9 @@ w_subtype = args_w[0] w_args = space.newtuple(args_w[1:]) - subtype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_subtype)) - try: - w_obj = generic_cpy_call(space, tp_new, subtype, w_args, w_kwds) - finally: - Py_DecRef(space, w_subtype) + subtype = rffi.cast(PyTypeObjectPtr, as_pyobj(space, w_subtype)) + w_obj = generic_cpy_call(space, tp_new, subtype, w_args, w_kwds) + keepalive_until_here(w_subtype) return w_obj @specialize.memo() @@ -463,7 +462,7 @@ pto = lltype.malloc(PyTypeObject, flavor='raw', zero=True, track_allocation=False) pto.c_tp_flags |= Py_TPFLAGS_READYING - return pto, False + return pto, RRC_PERMANENT def type_fill_pyobj(space, w_type, pto): """ @@ -561,7 +560,7 @@ w_metatype = from_pyobj(space, pto.c_ob_type) w_type = space.allocate_instance(W_TypeObject, w_metatype) - return w_type, False + return w_type, RRC_PERMANENT def type_fill_pypy(space, w_type, py_obj): pto = rffi.cast(PyTypeObjectPtr, py_obj) diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -132,6 +132,8 @@ Cls_oo = make_specialised_class((object, object)) Cls_ff = make_specialised_class((float, float)) +Cls_oo.cpyext_returned_items_can_be_borrowed = True + def makespecialisedtuple(space, list_w): from pypy.objspace.std.intobject import W_IntObject from pypy.objspace.std.floatobject import W_FloatObject diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -32,6 +32,7 @@ class W_AbstractTupleObject(W_Root): __slots__ = () + cpyext_returned_items_can_be_borrowed = False def __repr__(self): """representation for debugging purposes""" @@ -248,6 +249,7 @@ class W_TupleObject(W_AbstractTupleObject): _immutable_fields_ = ['wrappeditems[*]'] + cpyext_returned_items_can_be_borrowed = True def __init__(self, wrappeditems): make_sure_not_resized(wrappeditems) diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -112,8 +112,11 @@ else: ob.c_ob_pypy_link = 0 if ob.c_ob_refcnt >= REFCNT_FROM_PYPY_LIGHT: - assert ob.c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT - lltype.free(ob, flavor='raw', track_allocation=track_allocation) + ob.c_ob_refcnt -= REFCNT_FROM_PYPY_LIGHT + ob.c_ob_pypy_link = 0 + if ob.c_ob_refcnt == 0: + lltype.free(ob, flavor='raw', + track_allocation=track_allocation) else: assert ob.c_ob_refcnt >= REFCNT_FROM_PYPY assert ob.c_ob_refcnt < int(REFCNT_FROM_PYPY_LIGHT * 0.99) From noreply at buildbot.pypy.org Tue Oct 20 18:31:19 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Oct 2015 18:31:19 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Performance Message-ID: <20151020163119.AC5061C0036@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80358:02459f7a6d8f Date: 2015-10-20 18:31 +0200 http://bitbucket.org/pypy/pypy/changeset/02459f7a6d8f/ Log: Performance diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py --- a/pypy/module/cpyext/intobject.py +++ b/pypy/module/cpyext/intobject.py @@ -5,7 +5,7 @@ cpython_api, cpython_struct, build_type_checkers3, bootstrap_function, PyObject, PyObjectFields, CONST_STRING, CANNOT_FAIL, Py_ssize_t) from pypy.module.cpyext.pyobject import (setup_class_for_cpyext, new_pyobj, - from_pyobj) + from_pyobj, get_pyobj_and_incref) from rpython.rlib.rarithmetic import r_uint, intmask, LONG_TEST, r_ulonglong from pypy.objspace.std.intobject import W_IntObject import sys @@ -65,19 +65,23 @@ intval = rffi.cast(lltype.Signed, py_int.c_ob_ival) W_IntObject.__init__(w_obj, intval) + @cpython_api([], lltype.Signed, error=CANNOT_FAIL) def PyInt_GetMax(space): """Return the system's idea of the largest integer it can handle (LONG_MAX, as defined in the system header files).""" return sys.maxint +def new_pyint(space, ival): + py_int = new_pyobj(PyIntObjectStruct, _PyInt_Type(space)) + py_int.c_ob_ival = ival + return rffi.cast(PyObject, py_int) + @cpython_api([lltype.Signed], PyObject) def PyInt_FromLong(space, ival): """Create a new integer object with a value of ival. """ - py_int = new_pyobj(PyIntObjectStruct, _PyInt_Type(space)) - py_int.c_ob_ival = ival - return rffi.cast(PyObject, py_int) + return new_pyint(space, ival) @cpython_api([PyObject], lltype.Signed, error=-1) def PyInt_AsLong(space, py_obj): @@ -90,14 +94,15 @@ space.wrap("an integer is required, got NULL")) if PyInt_Check(space, py_obj): return PyInt_AS_LONG(space, py_obj) - w_obj = from_pyobj(space, py_obj) - return space.int_w(space.int(w_obj)) + else: + w_obj = from_pyobj(space, py_obj) + return space.int_w(space.int(w_obj)) # XXX win64: check range @cpython_api([PyObject], lltype.Unsigned, error=-1) def PyInt_AsUnsignedLong(space, w_obj): """Return a C unsigned long representation of the contents of pylong. If pylong is greater than ULONG_MAX, an OverflowError is - raised.""" + raised. (NOT ON CPYTHON)""" if w_obj is None: raise OperationError(space.w_TypeError, space.wrap("an integer is required, got NULL")) @@ -105,34 +110,39 @@ @cpython_api([PyObject], rffi.ULONG, error=-1) -def PyInt_AsUnsignedLongMask(space, w_obj): +def PyInt_AsUnsignedLongMask(space, py_obj): """Will first attempt to cast the object to a PyIntObject or PyLongObject, if it is not already one, and then return its value as unsigned long. This function does not check for overflow. """ - w_int = space.int(w_obj) - if space.isinstance_w(w_int, space.w_int): - num = space.int_w(w_int) - return r_uint(num) + if not py_obj: + raise OperationError(space.w_TypeError, + space.wrap("an integer is required, got NULL")) + if PyInt_Check(space, py_obj): + return rffi.cast(rffi.ULONG, PyInt_AS_LONG(space, py_obj)) else: - num = space.bigint_w(w_int) + w_obj = from_pyobj(space, py_obj) + num = space.bigint_w(space.int(w_obj)) return num.uintmask() @cpython_api([PyObject], rffi.ULONGLONG, error=-1) -def PyInt_AsUnsignedLongLongMask(space, w_obj): +def PyInt_AsUnsignedLongLongMask(space, py_obj): """Will first attempt to cast the object to a PyIntObject or PyLongObject, if it is not already one, and then return its value as unsigned long long, without checking for overflow. """ - w_int = space.int(w_obj) - if space.isinstance_w(w_int, space.w_int): - num = space.int_w(w_int) - return r_ulonglong(num) + if not py_obj: + raise OperationError(space.w_TypeError, + space.wrap("an integer is required, got NULL")) + if PyInt_Check(space, py_obj): + return rffi.cast(rffi.ULONGLONG, PyInt_AS_LONG(space, py_obj)) else: - num = space.bigint_w(w_int) + w_obj = from_pyobj(space, py_obj) + num = space.bigint_w(space.int(w_obj)) return num.ulonglongmask() + @cpython_api([PyObject], lltype.Signed, error=CANNOT_FAIL) def PyInt_AS_LONG(space, py_obj): """Return the value of the object w_int. No error checking is performed.""" @@ -140,15 +150,19 @@ return py_int.c_ob_ival @cpython_api([PyObject], Py_ssize_t, error=-1) -def PyInt_AsSsize_t(space, w_obj): +def PyInt_AsSsize_t(space, py_obj): """Will first attempt to cast the object to a PyIntObject or PyLongObject, if it is not already one, and then return its value as Py_ssize_t. """ - if w_obj is None: + if not py_obj: raise OperationError(space.w_TypeError, space.wrap("an integer is required, got NULL")) - return space.int_w(w_obj) # XXX this is wrong on win64 + if PyInt_Check(space, py_obj): + return rffi.cast(Py_ssize_t, PyInt_AS_LONG(space, py_obj)) + else: + w_obj = from_pyobj(space, py_obj) + return space.int_w(space.int(w_obj)) LONG_MAX = int(LONG_TEST - 1) @@ -158,8 +172,9 @@ LONG_MAX, a long integer object is returned. """ if ival <= LONG_MAX: - return space.wrap(intmask(ival)) - return space.wrap(ival) + return new_pyint(rffi.cast(rffi.LONG, ival)) + else: + return get_pyobj_and_incref(space.wrap(ival)) @cpython_api([Py_ssize_t], PyObject) def PyInt_FromSsize_t(space, ival): @@ -167,7 +182,8 @@ than LONG_MAX or smaller than LONG_MIN, a long integer object is returned. """ - return space.wrap(ival) + # XXX win64 + return new_pyint(ival) @cpython_api([CONST_STRING, rffi.CCHARPP, rffi.INT_real], PyObject) def PyInt_FromString(space, str, pend, base): From noreply at buildbot.pypy.org Tue Oct 20 20:00:08 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Oct 2015 20:00:08 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Work on tuples Message-ID: <20151020180008.4A0611C1359@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80359:c9dbf734d8a9 Date: 2015-10-20 20:00 +0200 http://bitbucket.org/pypy/pypy/changeset/c9dbf734d8a9/ Log: Work on tuples diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -593,9 +593,10 @@ ZZZ _PyXxx_Type = func_with_new_name(_PyXxx_Type, '_' + py_type_name) - def check(space, w_obj): + def check(space, py_obj): "Implements the Py_Xxx_Check function" - w_obj_type = space.type(w_obj) + from pypy.module.cpyext.pyobject import from_pyobj + w_obj_type = from_pyobj(space, py_obj.c_ob_type) w_type = get_w_type(space) return (space.is_w(w_obj_type, w_type) or space.is_true(space.issubtype(w_obj_type, w_type))) diff --git a/pypy/module/cpyext/include/intobject.h b/pypy/module/cpyext/include/intobject.h --- a/pypy/module/cpyext/include/intobject.h +++ b/pypy/module/cpyext/include/intobject.h @@ -7,11 +7,16 @@ extern "C" { #endif + typedef struct { PyObject_HEAD long ob_ival; } PyIntObject; +/* Macro, trading safety for speed */ +#define PyInt_AS_LONG(op) (((PyIntObject *)(op))->ob_ival) + + #ifdef __cplusplus } #endif diff --git a/pypy/module/cpyext/include/tupleobject.h b/pypy/module/cpyext/include/tupleobject.h --- a/pypy/module/cpyext/include/tupleobject.h +++ b/pypy/module/cpyext/include/tupleobject.h @@ -15,8 +15,13 @@ /* defined in varargswrapper.c */ PyAPI_FUNC(PyObject *) PyTuple_Pack(Py_ssize_t, ...); -#define PyTuple_SET_ITEM PyTuple_SetItem -#define PyTuple_GET_ITEM PyTuple_GetItem + +/* Macro, trading safety for speed */ +#define PyTuple_GET_ITEM(op, i) (((PyTupleObject *)(op))->ob_item[i]) +#define PyTuple_GET_SIZE(op) Py_SIZE(op) + +/* Macro, *only* to be used to fill in brand new tuples */ +#define PyTuple_SET_ITEM(op, i, v) (((PyTupleObject *)(op))->ob_item[i] = v) #ifdef __cplusplus diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py --- a/pypy/module/cpyext/intobject.py +++ b/pypy/module/cpyext/intobject.py @@ -30,11 +30,12 @@ # --the structure type derived from PyObject-- basestruct=PyIntObjectStruct, - # --after a PyIntObject is allocated, we call this function to - # fill it. It gets attached as RRC_PERMANENT_LIGHT by default, - # which means the association is permanent (the PyIntObject is - # alive and won't appear to move as long as the W_IntObject is - # alive) and light (the PyIntObject can be freed with free()).-- + # --from a W_IntObject, we allocate a PyIntObject and then we + # call this function to fill it. It gets attached as + # RRC_PERMANENT_LIGHT by default, which means the + # association is permanent (the PyIntObject is alive and + # won't appear to move as long as the W_IntObject is alive) + # and light (the PyIntObject can be freed with free()).-- fill_pyobj=int_fill_pyobj, # --reverse direction: from a PyIntObject, we make a W_IntObject @@ -93,7 +94,7 @@ raise OperationError(space.w_TypeError, space.wrap("an integer is required, got NULL")) if PyInt_Check(space, py_obj): - return PyInt_AS_LONG(space, py_obj) + return _PyInt_AS_LONG(py_obj) else: w_obj = from_pyobj(space, py_obj) return space.int_w(space.int(w_obj)) # XXX win64: check range @@ -119,7 +120,7 @@ raise OperationError(space.w_TypeError, space.wrap("an integer is required, got NULL")) if PyInt_Check(space, py_obj): - return rffi.cast(rffi.ULONG, PyInt_AS_LONG(space, py_obj)) + return rffi.cast(rffi.ULONG, _PyInt_AS_LONG(py_obj)) else: w_obj = from_pyobj(space, py_obj) num = space.bigint_w(space.int(w_obj)) @@ -136,15 +137,14 @@ raise OperationError(space.w_TypeError, space.wrap("an integer is required, got NULL")) if PyInt_Check(space, py_obj): - return rffi.cast(rffi.ULONGLONG, PyInt_AS_LONG(space, py_obj)) + return rffi.cast(rffi.ULONGLONG, _PyInt_AS_LONG(py_obj)) else: w_obj = from_pyobj(space, py_obj) num = space.bigint_w(space.int(w_obj)) return num.ulonglongmask() - at cpython_api([PyObject], lltype.Signed, error=CANNOT_FAIL) -def PyInt_AS_LONG(space, py_obj): +def _PyInt_AS_LONG(py_obj): """Return the value of the object w_int. No error checking is performed.""" py_int = rffi.cast(PyIntObject, py_obj) return py_int.c_ob_ival @@ -159,7 +159,7 @@ raise OperationError(space.w_TypeError, space.wrap("an integer is required, got NULL")) if PyInt_Check(space, py_obj): - return rffi.cast(Py_ssize_t, PyInt_AS_LONG(space, py_obj)) + return rffi.cast(Py_ssize_t, _PyInt_AS_LONG(py_obj)) else: w_obj = from_pyobj(space, py_obj) return space.int_w(space.int(w_obj)) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -88,8 +88,6 @@ if tp_alloc_pyobj or tp_fill_pyobj or realize_subclass_of: if realize_subclass_of is None: realize_subclass_of = W_Class - assert 'typedef' in realize_subclass_of.__dict__, ( - "no 'typedef' exactly on %s" % (realize_subclass_of,)) # if not tp_alloc_pypy: W_CPyExtPlaceHolder = get_cpyextplaceholder_subclass( @@ -290,6 +288,9 @@ return lltype.nullptr(PyObject.TO) +def pyobj_has_w_obj(pyobj): + return rawrefcount.to_obj(W_Root, pyobj) is not None + @specialize.ll() def from_pyobj(space, pyobj): assert is_pyobj(pyobj) @@ -369,8 +370,13 @@ @specialize.ll() -def new_pyobj(PYOBJ_TYPE, ob_type): - ob = lltype.malloc(PYOBJ_TYPE, flavor='raw', track_allocation=False) +def new_pyobj(PYOBJ_TYPE, ob_type, length=None): + if length is None: + ob = lltype.malloc(PYOBJ_TYPE, flavor='raw', track_allocation=False) + else: + ob = lltype.malloc(PYOBJ_TYPE, length, flavor='raw', + track_allocation=False) + ob.c_ob_size = length ob.c_ob_refcnt = 1 ob.c_ob_type = ob_type ob.c_ob_pypy_link = 0 diff --git a/pypy/module/cpyext/test/test_tupleobject.py b/pypy/module/cpyext/test/test_tupleobject.py --- a/pypy/module/cpyext/test/test_tupleobject.py +++ b/pypy/module/cpyext/test/test_tupleobject.py @@ -1,6 +1,8 @@ import py -from pypy.module.cpyext.pyobject import PyObject, PyObjectP, make_ref, from_ref +from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_LIGHT +from pypy.module.cpyext.pyobject import PyObject, PyObjectP, from_pyobj +from pypy.module.cpyext.pyobject import pyobj_has_w_obj, get_pyobj_and_incref from pypy.module.cpyext.test.test_api import BaseApiTest from rpython.rtyper.lltypesystem import rffi, lltype @@ -9,13 +11,66 @@ def test_tupleobject(self, space, api): assert not api.PyTuple_Check(space.w_None) - assert api.PyTuple_SetItem(space.w_None, 0, space.w_None) == -1 - atuple = space.newtuple([0, 1, 'yay']) + py_none = get_pyobj_and_incref(space, space.w_None) + assert api.PyTuple_SetItem(space.w_None, 0, py_none) == -1 + atuple = space.newtuple([space.wrap(0), space.wrap(1), + space.wrap('yay')]) assert api.PyTuple_Size(atuple) == 3 - assert api.PyTuple_GET_SIZE(atuple) == 3 + #assert api.PyTuple_GET_SIZE(atuple) == 3 --- a C macro raises(TypeError, api.PyTuple_Size(space.newlist([]))) api.PyErr_Clear() - + + def test_tupleobject_spec_ii(self, space, api): + atuple = space.newtuple([space.wrap(10), space.wrap(11)]) + assert api.PyTuple_Size(atuple) == 2 + w_obj1 = from_pyobj(space, api.PyTuple_GetItem(atuple, 0)) + w_obj2 = from_pyobj(space, api.PyTuple_GetItem(atuple, 1)) + assert space.eq_w(w_obj1, space.wrap(10)) + assert space.eq_w(w_obj2, space.wrap(11)) + + def test_tupleobject_spec_oo(self, space, api): + w_obj1 = space.newlist([]) + w_obj2 = space.newlist([]) + atuple = space.newtuple([w_obj1, w_obj2]) + assert api.PyTuple_Size(atuple) == 2 + assert from_pyobj(space, api.PyTuple_GetItem(atuple, 0)) is w_obj1 + assert from_pyobj(space, api.PyTuple_GetItem(atuple, 1)) is w_obj2 + + def test_new_setitem(self, space, api): + w_obj1 = space.newlist([]) + pyobj1 = get_pyobj_and_incref(space, w_obj1) + w_obj2 = space.newlist([]) + pyobj2 = get_pyobj_and_incref(space, w_obj2) + py_tuple = api.PyTuple_New(2) + assert not pyobj_has_w_obj(py_tuple) + + assert pyobj1.c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + 1 + assert pyobj2.c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + 1 + api.PyTuple_SetItem(py_tuple, 0, pyobj1) + api.PyTuple_SetItem(py_tuple, 1, pyobj2) + assert pyobj1.c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + 1 + assert pyobj2.c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + 1 + + assert api.PyTuple_GetItem(py_tuple, 0) == pyobj1 + assert pyobj1.c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + 1 + + api.PyTuple_SetItem(py_tuple, 0, pyobj2) + assert pyobj1.c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + 0 + assert pyobj2.c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + 1 + + assert not pyobj_has_w_obj(py_tuple) + w_tup = from_pyobj(space, py_tuple) + assert w_tup is from_pyobj(space, py_tuple) + assert api.PyTuple_GetItem(py_tuple, 1) == pyobj2 + assert pyobj1.c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + 0 + assert pyobj2.c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + 1 + + assert space.getitem(w_tup, space.wrap(0)) is w_obj2 + assert space.getitem(w_tup, space.wrap(1)) is w_obj2 + + assert api.PyTuple_SetItem(py_tuple, 0, pyobj1) == -1 + api.PyErr_Clear() + def test_tuple_resize(self, space, api): py_tuple = api.PyTuple_New(3) ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') @@ -31,18 +86,6 @@ api.Py_DecRef(ar[0]) lltype.free(ar, flavor='raw') - def test_setitem(self, space, api): - atuple = space.newtuple([space.wrap(0), space.wrap("hello")]) - assert api.PyTuple_Size(atuple) == 2 - assert space.eq_w(space.getitem(atuple, space.wrap(0)), space.wrap(0)) - assert space.eq_w(space.getitem(atuple, space.wrap(1)), space.wrap("hello")) - w_obj = space.wrap(1) - api.Py_IncRef(w_obj) - api.PyTuple_SetItem(atuple, 1, w_obj) - assert api.PyTuple_Size(atuple) == 2 - assert space.eq_w(space.getitem(atuple, space.wrap(0)), space.wrap(0)) - assert space.eq_w(space.getitem(atuple, space.wrap(1)), space.wrap(1)) - def test_getslice(self, space, api): w_tuple = space.newtuple([space.wrap(i) for i in range(10)]) w_slice = api.PyTuple_GetSlice(w_tuple, 3, -3) diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -1,79 +1,114 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import (cpython_api, Py_ssize_t, CANNOT_FAIL, - cpython_struct, PyObjectFields, build_type_checkers, bootstrap_function) + cpython_struct, PyVarObjectFields, build_type_checkers3, bootstrap_function) from pypy.module.cpyext.pyobject import (PyObject, PyObjectP, Py_DecRef, - setup_class_for_cpyext) + setup_class_for_cpyext, as_pyobj, get_pyobj_and_incref, from_pyobj, + pyobj_has_w_obj, RRC_PERMANENT, RRC_PERMANENT_LIGHT, new_pyobj) from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.objspace.std.tupleobject import W_TupleObject, W_AbstractTupleObject PyTupleObjectStruct = lltype.ForwardReference() PyTupleObject = lltype.Ptr(PyTupleObjectStruct) -PyTupleObjectFields = PyObjectFields + \ +PyTupleObjectFields = PyVarObjectFields + \ (("ob_item", rffi.CArray(PyObject)),) cpython_struct("PyTupleObject", PyTupleObjectFields, PyTupleObjectStruct) -PyTuple_Check, PyTuple_CheckExact = build_type_checkers("Tuple") +PyTuple_Check, PyTuple_CheckExact, _PyTuple_Type = build_type_checkers3("Tuple") + @bootstrap_function def init_intobject(space): "Type description of PyTupleObject" - setup_class_for_cpyext(W_AbstractTupleObject, - basestruct=PyTupleObject.TO, - ) - #fill_pyobj=int_fill_pyobj, - #fill_pypy=int_fill_pypy, - #realize_subclass_of=W_IntObject) + setup_class_for_cpyext( + W_AbstractTupleObject, + basestruct=PyTupleObjectStruct, + + # --from a W_TupleObject, we call this function to allocate and + # fill a PyTupleObject -- + alloc_pyobj=tuple_alloc_pyobj, + + # --reverse direction: from a PyTupleObject, we make a W_TupleObject + # by instantiating a custom subclass of W_TupleObject-- + realize_subclass_of=W_TupleObject, + + # --and then we call this function to initialize the W_TupleObject-- + fill_pypy=tuple_fill_pypy, + ) + +def tuple_alloc_pyobj(space, w_obj): + """ + Makes a PyTupleObject from a W_AbstractTupleObject. + """ + assert isinstance(w_obj, W_AbstractTupleObject) + lst_w = w_obj.tolist() + ob = lltype.malloc(PyTupleObjectStruct, len(lst_w), flavor='raw', + track_allocation=False) + ob.c_ob_size = len(lst_w) + if w_obj.cpyext_returned_items_can_be_borrowed: + for i in range(len(lst_w)): + ob.c_ob_item[i] = as_pyobj(space, lst_w[i]) + return ob, RRC_PERMANENT_LIGHT + else: + for i in range(len(lst_w)): + ob.c_ob_item[i] = get_pyobj_and_incref(space, lst_w[i]) + return ob, RRC_PERMANENT + +def tuple_fill_pypy(space, w_obj, py_obj): + """ + Fills in a W_TupleObject from a PyTupleObject. + """ + py_tuple = rffi.cast(PyTupleObject, py_obj) + objects_w = [from_pyobj(space, py_tuple.c_ob_item[i]) + for i in range(py_tuple.c_ob_size)] + W_TupleObject.__init__(w_obj, objects_w) + @cpython_api([Py_ssize_t], PyObject) def PyTuple_New(space, size): - ZZZ - return W_TupleObject([space.w_None] * size) + py_tuple = new_pyobj(PyTupleObjectStruct, _PyTuple_Type(space), size) + for i in range(size): + py_tuple.c_ob_item[i] = lltype.nullptr(PyObject.TO) + return py_tuple @cpython_api([PyObject, Py_ssize_t, PyObject], rffi.INT_real, error=-1) -def PyTuple_SetItem(space, w_t, pos, w_obj): - if not PyTuple_Check(space, w_t): - # XXX this should also steal a reference, test it!!! +def PyTuple_SetItem(space, py_t, pos, py_obj): + if not PyTuple_Check(space, py_t) or py_t.c_ob_refcnt != 1: + Py_DecRef(space, py_obj) PyErr_BadInternalCall(space) - _setitem_tuple(w_t, pos, w_obj) - Py_DecRef(space, w_obj) # SetItem steals a reference! + py_tuple = rffi.cast(PyTupleObject, py_t) + if pos < 0 or pos >= py_tuple.c_ob_size: + raise oefmt(w_IndexError, "tuple assignment index out of range") + + olditem = py_tuple.c_ob_item[pos] + py_tuple.c_ob_item[pos] = py_obj + + if olditem: + Py_DecRef(space, olditem) return 0 -def _setitem_tuple(w_t, pos, w_obj): - # this function checks that w_t is really a W_TupleObject. It - # should only ever be called with a freshly built tuple from - # PyTuple_New(), which always return a W_TupleObject, even if there - # are also other implementations of tuples. - assert isinstance(w_t, W_TupleObject) - w_t.wrappeditems[pos] = w_obj + at cpython_api([PyObject, Py_ssize_t], PyObject, result_borrowed=True) +def PyTuple_GetItem(space, py_t, pos): + if not PyTuple_Check(space, py_t): + PyErr_BadInternalCall(space) + py_tuple = rffi.cast(PyTupleObject, py_t) + if pos < 0 or pos >= py_tuple.c_ob_size: + raise oefmt(w_IndexError, "tuple assignment index out of range") - at cpython_api([PyObject, Py_ssize_t], PyObject, result_borrowed=True) -def PyTuple_GetItem(space, w_t, pos): - if not isinstance(w_t, W_AbstractTupleObject): - PyErr_BadInternalCall(space) - #if w_t.cpyext_returned_items_can_be_borrowed: - ZZZ.x.x.x - xxxxxxx - w_obj = space.getitem(w_t, space.wrap(pos)) - return borrow_from(w_t, w_obj) - - at cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) -def PyTuple_GET_SIZE(space, w_t): - """Return the size of the tuple p, which must be non-NULL and point to a tuple; - no error checking is performed. """ - return space.int_w(space.len(w_t)) + return py_tuple.c_ob_item[pos] # borrowed @cpython_api([PyObject], Py_ssize_t, error=-1) -def PyTuple_Size(space, ref): +def PyTuple_Size(space, py_t): """Take a pointer to a tuple object, and return the size of that tuple.""" - if not PyTuple_Check(space, ref): - raise OperationError(space.w_TypeError, - space.wrap("expected tuple object")) - return PyTuple_GET_SIZE(space, ref) + if not PyTuple_Check(space, py_t): + PyErr_BadInternalCall(space) + py_tuple = rffi.cast(PyTupleObject, py_t) + return py_tuple.c_ob_size @cpython_api([PyObjectP, Py_ssize_t], rffi.INT_real, error=-1) def _PyTuple_Resize(space, ref, newsize): + ZZZ """Can be used to resize a tuple. newsize will be the new length of the tuple. Because tuples are supposed to be immutable, this should only be used if there is only one reference to the object. Do not use this if the tuple may already @@ -101,6 +136,7 @@ @cpython_api([PyObject, Py_ssize_t, Py_ssize_t], PyObject) def PyTuple_GetSlice(space, w_obj, low, high): + ZZZ """Take a slice of the tuple pointed to by p from low to high and return it as a new tuple. """ diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -515,8 +515,10 @@ struct_use_ctypes_storage(struct_container, struct_storage) struct_container._setparentstructure(container, field_name) elif isinstance(FIELDTYPE, lltype.Array): - assert FIELDTYPE._hints.get('nolength', False) == False - arraycontainer = _array_of_known_length(FIELDTYPE) + if FIELDTYPE._hints.get('nolength', False): + arraycontainer = _array_of_unknown_length(FIELDTYPE) + else: + arraycontainer = _array_of_known_length(FIELDTYPE) arraycontainer._storage = ctypes.pointer( getattr(ctypes_storage.contents, field_name)) arraycontainer._setparentstructure(container, field_name) @@ -934,7 +936,8 @@ REAL_TYPE = T.TO if T.TO._arrayfld is not None: carray = getattr(cobj.contents, T.TO._arrayfld) - container = lltype._struct(T.TO, carray.length) + length = getattr(carray, 'length', 9999) # XXX + container = lltype._struct(T.TO, length) else: # special treatment of 'OBJECT' subclasses if get_rtyper() and lltype._castdepth(REAL_TYPE, OBJECT) >= 0: From noreply at buildbot.pypy.org Tue Oct 20 20:10:07 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Oct 2015 20:10:07 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: finish tuples Message-ID: <20151020181007.212E71C0036@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80360:d6427723a4a6 Date: 2015-10-20 20:10 +0200 http://bitbucket.org/pypy/pypy/changeset/d6427723a4a6/ Log: finish tuples diff --git a/pypy/module/cpyext/test/test_tupleobject.py b/pypy/module/cpyext/test/test_tupleobject.py --- a/pypy/module/cpyext/test/test_tupleobject.py +++ b/pypy/module/cpyext/test/test_tupleobject.py @@ -74,16 +74,16 @@ def test_tuple_resize(self, space, api): py_tuple = api.PyTuple_New(3) ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') - ar[0] = rffi.cast(PyObject, make_ref(space, py_tuple)) + ar[0] = py_tuple api._PyTuple_Resize(ar, 2) - py_tuple = from_ref(space, ar[0]) - assert space.int_w(space.len(py_tuple)) == 2 - + py_tuple = ar[0] + assert api.PyTuple_Size(py_tuple) == 2 + api._PyTuple_Resize(ar, 10) - py_tuple = from_ref(space, ar[0]) - assert space.int_w(space.len(py_tuple)) == 10 - - api.Py_DecRef(ar[0]) + py_tuple = ar[0] + assert api.PyTuple_Size(py_tuple) == 10 + + api.Py_DecRef(py_tuple) lltype.free(ar, flavor='raw') def test_getslice(self, space, api): diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -69,7 +69,7 @@ py_tuple = new_pyobj(PyTupleObjectStruct, _PyTuple_Type(space), size) for i in range(size): py_tuple.c_ob_item[i] = lltype.nullptr(PyObject.TO) - return py_tuple + return rffi.cast(PyObject, py_tuple) @cpython_api([PyObject, Py_ssize_t, PyObject], rffi.INT_real, error=-1) def PyTuple_SetItem(space, py_t, pos, py_obj): @@ -108,7 +108,6 @@ @cpython_api([PyObjectP, Py_ssize_t], rffi.INT_real, error=-1) def _PyTuple_Resize(space, ref, newsize): - ZZZ """Can be used to resize a tuple. newsize will be the new length of the tuple. Because tuples are supposed to be immutable, this should only be used if there is only one reference to the object. Do not use this if the tuple may already @@ -119,24 +118,29 @@ this function. If the object referenced by *p is replaced, the original *p is destroyed. On failure, returns -1 and sets *p to NULL, and raises MemoryError or SystemError.""" - py_tuple = from_ref(space, ref[0]) - if not PyTuple_Check(space, py_tuple): + py_t = ref[0] + if not PyTuple_Check(space, py_t) or py_t.c_ob_refcnt != 1: PyErr_BadInternalCall(space) - py_newtuple = PyTuple_New(space, newsize) - - to_cp = newsize - oldsize = space.int_w(space.len(py_tuple)) - if oldsize < newsize: - to_cp = oldsize - for i in range(to_cp): - _setitem_tuple(py_newtuple, i, space.getitem(py_tuple, space.wrap(i))) - Py_DecRef(space, ref[0]) - ref[0] = make_ref(space, py_newtuple) + + py_oldtuple = rffi.cast(PyTupleObject, py_t) + py_newtuple = rffi.cast(PyTupleObject, PyTuple_New(space, newsize)) + + oldsize = py_oldtuple.c_ob_size + if oldsize > newsize: + to_copy = newsize + for i in range(to_copy, oldsize): + Py_DecRef(space, py_oldtuple.c_ob_item[i]) + else: + to_copy = oldsize + for i in range(to_copy): + py_newtuple.c_ob_item[i] = py_oldtuple.c_ob_item[i] + + ref[0] = rffi.cast(PyObject, py_newtuple) + Py_DecRef(space, py_oldtuple) return 0 @cpython_api([PyObject, Py_ssize_t, Py_ssize_t], PyObject) def PyTuple_GetSlice(space, w_obj, low, high): - ZZZ """Take a slice of the tuple pointed to by p from low to high and return it as a new tuple. """ From noreply at buildbot.pypy.org Tue Oct 20 22:06:46 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 20 Oct 2015 22:06:46 +0200 (CEST) Subject: [pypy-commit] pypy default: backout 65422948292c now that the buildbot reports xpasses correctly Message-ID: <20151020200646.1C0C51C0036@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r80361:001457fc6b64 Date: 2015-10-20 21:06 +0100 http://bitbucket.org/pypy/pypy/changeset/001457fc6b64/ Log: backout 65422948292c now that the buildbot reports xpasses correctly diff --git a/rpython/rlib/test/test_rweakkeydict.py b/rpython/rlib/test/test_rweakkeydict.py --- a/rpython/rlib/test/test_rweakkeydict.py +++ b/rpython/rlib/test/test_rweakkeydict.py @@ -121,6 +121,8 @@ f(1) interpret(f, [1]) + at py.test.mark.xfail( + reason="may fail with AssertionError, depending on annotation order") def test_rpython_merge_RWeakKeyDictionary3(): def g(x): if x: @@ -129,10 +131,11 @@ d = RWeakKeyDictionary(KY, VX) d.set(KX(), VX()) - # may fail with AssertionError, depending on annotation order - with py.test.raises((UnionError, AssertionError)): + with py.test.raises(UnionError): interpret(g, [1]) + at py.test.mark.xfail( + reason="may fail with AssertionError, depending on annotation order") def test_rpython_merge_RWeakKeyDictionary4(): def g(x): if x: @@ -141,8 +144,7 @@ d = RWeakKeyDictionary(KX, VY) d.set(KX(), VX()) - # may fail with AssertionError, depending on annotation order - with py.test.raises((UnionError, AssertionError)): + with py.test.raises(UnionError): interpret(g, [1]) @py.test.mark.xfail(reason="not implemented, messy") From noreply at buildbot.pypy.org Tue Oct 20 23:10:55 2015 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 20 Oct 2015 23:10:55 +0200 (CEST) Subject: [pypy-commit] pypy default: add merged branch here Message-ID: <20151020211055.1A2AD1C06F2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80362:2030b6ea3b82 Date: 2015-10-20 23:10 +0200 http://bitbucket.org/pypy/pypy/changeset/2030b6ea3b82/ Log: add merged branch here diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -8,3 +8,5 @@ .. branch: ppc-updated-backend The PowerPC JIT backend is merged. + +.. branch: osx-libffi From noreply at buildbot.pypy.org Wed Oct 21 01:19:25 2015 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 21 Oct 2015 01:19:25 +0200 (CEST) Subject: [pypy-commit] pypy ndarray-promote: use jit.promote for arr.implementation and other attributes Message-ID: <20151020231925.D21821C1359@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: ndarray-promote Changeset: r80363:085cd1fc6e97 Date: 2015-10-19 17:04 +0800 http://bitbucket.org/pypy/pypy/changeset/085cd1fc6e97/ Log: use jit.promote for arr.implementation and other attributes diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -150,7 +150,7 @@ chunks[axis] = Chunk(axis_start, axis_start + arr.get_shape()[axis], 1, arr.get_shape()[axis]) view = new_view(space, res, chunks) - view.implementation.setslice(space, arr) + view.get_implementation().setslice(space, arr) axis_start += arr.get_shape()[axis] return res @@ -166,7 +166,7 @@ for i in range(repeats): chunks = [Chunk(i, shape[0] - repeats + i, repeats, orig_size)] view = new_view(space, w_res, chunks) - view.implementation.setslice(space, arr) + view.get_implementation().setslice(space, arr) else: axis = space.int_w(w_axis) shape = arr.get_shape()[:] @@ -178,7 +178,7 @@ chunks[axis] = Chunk(i, shape[axis] - repeats + i, repeats, orig_size) view = new_view(space, w_res, chunks) - view.implementation.setslice(space, arr) + view.get_implementation().setslice(space, arr) return w_res diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -1,6 +1,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from rpython.tool.pairtype import extendabletype +from rpython.rlib import jit from rpython.rlib.rarithmetic import ovfcheck from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY @@ -122,8 +123,8 @@ def new_slice(space, offset, strides, backstrides, shape, parent, w_arr, dtype=None): from pypy.module.micronumpy import concrete w_base = w_arr - if w_arr.implementation.base() is not None: - w_base = w_arr.implementation.base() + if w_arr.get_implementation().base() is not None: + w_base = w_arr.get_implementation().base() impl = concrete.SliceArray(offset, strides, backstrides, shape, parent, w_base, dtype) return wrap_impl(space, space.type(w_arr), w_arr, impl) @@ -145,19 +146,30 @@ return w_arr def get_shape(self): - return self.implementation.get_shape() + return self.get_implementation().get_shape() + + def get_implementation(self): + implementation = self.implementation + jit.hint(implementation, promote=True) + return implementation def get_dtype(self, space=None): - return self.implementation.dtype + dtype = self.get_implementation().dtype + jit.hint(dtype, promote=True) + return dtype def get_order(self): - return self.implementation.order + order = self.get_implementation().order + jit.hint(order, promote=True) + return order def get_start(self): - return self.implementation.start + start = self.get_implementation().start + jit.hint(start, promote=True) + return start def get_flags(self): - return self.implementation.flags + return self.get_implementation().get_flags() def ndims(self): return len(self.get_shape()) diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -57,7 +57,9 @@ return backstrides def get_flags(self): - return self.flags + flags = self.flags + jit.hint(flags, promote=True) + return flags def getitem(self, index): return self.dtype.read(self, index, 0) @@ -84,7 +86,7 @@ ','.join([str(x) for x in self.get_shape()]), ) shape = shape_agreement(space, self.get_shape(), arr) - impl = arr.implementation + impl = arr.get_implementation() if impl.storage == self.storage: impl = impl.copy(space) loop.setslice(space, shape, self, impl) @@ -293,7 +295,7 @@ w_value = convert_to_array(space, w_value) chunks = self._prepare_slice_args(space, w_index) view = new_view(space, orig_arr, chunks) - view.implementation.setslice(space, w_value) + view.get_implementation().setslice(space, w_value) def transpose(self, orig_array, axes=None): if len(self.get_shape()) < 2: @@ -347,7 +349,7 @@ nd = len(self.get_shape()) or 1 w_res = W_NDimArray.from_shape(space, [s, nd], index_type) loop.nonzero(w_res, self, box) - w_res = w_res.implementation.swapaxes(space, w_res, 0, 1) + w_res = w_res.get_implementation().swapaxes(space, w_res, 0, 1) l_w = [w_res.descr_getitem(space, space.wrap(d)) for d in range(nd)] return space.newtuple(l_w) diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -77,7 +77,7 @@ shape = w_res.get_shape() if len(shape) < ndmin: shape = [1] * (ndmin - len(shape)) + shape - impl = w_res.implementation.set_shape(space, w_res, shape) + impl = w_res.get_implementation().set_shape(space, w_res, shape) if w_res is w_object: return W_NDimArray(impl) else: @@ -127,12 +127,12 @@ w_arr = W_NDimArray.from_shape(space, shape, dtype, order=npy_order) if support.product(shape) == 1: w_arr.set_scalar_value(dtype.coerce(space, - w_object.implementation.getitem(0))) + w_object.get_implementation().getitem(0))) else: - loop.setslice(space, shape, w_arr.implementation, w_object.implementation) + loop.setslice(space, shape, w_arr.get_implementation(), w_object.implementation) return w_arr else: - imp = w_object.implementation + imp = w_object.get_implementation() w_base = w_object if imp.base() is not None: w_base = imp.base() @@ -302,7 +302,7 @@ dtype = descriptor.variable_dtype(space, dtype.char + '1') if npy_order in (NPY.KEEPORDER, NPY.ANYORDER): # Try to copy the stride pattern - impl = w_a.implementation.astype(space, dtype, NPY.KEEPORDER) + impl = w_a.get_implementation().astype(space, dtype, NPY.KEEPORDER) if subok: w_type = space.type(w_a) else: diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -15,7 +15,7 @@ self._base = base self.dtype = base.get_dtype() self.shape = [base.get_size()] - self.storage = self._base.implementation.storage + self.storage = self._base.get_implementation().storage def base(self): return self._base diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -358,7 +358,7 @@ out_iter.track_index = False shape = w_arr.get_shape() shapelen = len(shape) - inner_iter, outer_iter = split_iter(w_arr.implementation, axis_flags) + inner_iter, outer_iter = split_iter(w_arr.get_implementation(), axis_flags) assert outer_iter.size == out_iter.size if identity is not None: @@ -423,7 +423,7 @@ arr_shape = w_arr.get_shape() temp_shape = arr_shape[:axis] + arr_shape[axis + 1:] temp = W_NDimArray.from_shape(space, temp_shape, calc_dtype, w_instance=w_arr) - temp_iter = AxisIter(temp.implementation, w_arr.get_shape(), axis) + temp_iter = AxisIter(temp.get_implementation(), w_arr.get_shape(), axis) temp_state = temp_iter.reset() arr_iter, arr_state = w_arr.create_iter() arr_iter.track_index = False @@ -521,7 +521,7 @@ shapelen = len(w_arr.get_shape()) axis_flags = [False] * shapelen axis_flags[axis] = True - inner_iter, outer_iter = split_iter(w_arr.implementation, axis_flags) + inner_iter, outer_iter = split_iter(w_arr.get_implementation(), axis_flags) outer_state = outer_iter.reset() out_iter, out_state = w_out.create_iter() while not outer_iter.done(outer_state): @@ -590,8 +590,8 @@ ''' left_shape = left.get_shape() right_shape = right.get_shape() - left_impl = left.implementation - right_impl = right.implementation + left_impl = left.get_implementation() + right_impl = right.get_implementation() assert left_shape[-1] == right_shape[right_critical_dim] assert result.get_dtype() == dtype outi, outs = result.create_iter() @@ -644,7 +644,7 @@ if arr.is_scalar(): return arr.get_dtype().itemtype.bool(arr.get_scalar_value()) else: - return count_all_true_concrete(arr.implementation) + return count_all_true_concrete(arr.get_implementation()) nonzero_driver = jit.JitDriver(name = 'numpy_nonzero', greens = ['shapelen', 'dims', 'dtype'], @@ -784,11 +784,11 @@ iter, state = arr.create_iter() w_res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype()) itemsize = arr.get_dtype().elsize - with w_res_str.implementation as storage: + with w_res_str.get_implementation() as storage: res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), support.get_storage_as_int(storage)) while not iter.done(state): - w_res_str.implementation.setitem(0, iter.getitem(state)) + w_res_str.get_implementation().setitem(0, iter.getitem(state)) for i in range(itemsize): builder.append(res_str_casted[i]) state = iter.next(state) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -57,7 +57,7 @@ def descr_set_shape(self, space, w_new_shape): shape = get_shape_from_iterable(space, self.get_size(), w_new_shape) - self.implementation = self.implementation.set_shape(space, self, shape) + self.implementation = self.get_implementation().set_shape(space, self, shape) w_cls = space.type(self) if not space.is_w(w_cls, space.gettypefor(W_NDimArray)): # numpy madness - allow __array_finalize__(self, obj) @@ -65,11 +65,11 @@ wrap_impl(space, w_cls, self, self.implementation) def descr_get_strides(self, space): - strides = self.implementation.get_strides() + strides = self.get_implementation().get_strides() return space.newtuple([space.wrap(i) for i in strides]) def descr_get_dtype(self, space): - return self.implementation.dtype + return self.get_dtype() def descr_set_dtype(self, space, w_dtype): dtype = space.interp_w(descriptor.W_Dtype, space.call_function( @@ -104,7 +104,7 @@ order = support.get_order_as_CF(self.get_order(), order) arr = self if order != arr.get_order(): - arr = W_NDimArray(self.implementation.transpose(self, None)) + arr = W_NDimArray(self.get_implementation().transpose(self, None)) return space.wrap(loop.tostring(space, arr)) def getitem_filter(self, space, arr): @@ -186,7 +186,7 @@ self._prepare_array_index(space, w_index) if iter_shape is None: # w_index is a list of slices, return a view - chunks = self.implementation._prepare_slice_args(space, w_index) + chunks = self.get_implementation()._prepare_slice_args(space, w_index) return new_view(space, self, chunks) shape = res_shape + self.get_shape()[len(indexes):] w_res = W_NDimArray.from_shape(space, shape, self.get_dtype(), @@ -202,9 +202,9 @@ self._prepare_array_index(space, w_index) if iter_shape is None: # w_index is a list of slices - chunks = self.implementation._prepare_slice_args(space, w_index) + chunks = self.get_implementation()._prepare_slice_args(space, w_index) view = new_view(space, self, chunks) - view.implementation.setslice(space, val_arr) + view.get_implementation().setslice(space, val_arr) return if support.product(iter_shape) == 0: return @@ -227,7 +227,7 @@ "interpreted as a valid boolean index") else: try: - w_ret = self.implementation.descr_getitem(space, self, w_idx) + w_ret = self.get_implementation().descr_getitem(space, self, w_idx) except ArrayArgumentException: w_ret = self.getitem_array_int(space, w_idx) if isinstance(w_ret, boxes.W_ObjectBox): @@ -236,10 +236,10 @@ return w_ret def getitem(self, space, index_list): - return self.implementation.getitem_index(space, index_list) + return self.get_implementation().getitem_index(space, index_list) def setitem(self, space, index_list, w_value): - self.implementation.setitem_index(space, index_list, w_value) + self.get_implementation().setitem_index(space, index_list, w_value) def descr_setitem(self, space, w_idx, w_value): if self.get_dtype().is_record(): @@ -247,10 +247,10 @@ idx = space.str_w(w_idx) view = self.getfield(space, idx) w_value = convert_to_array(space, w_value) - view.implementation.setslice(space, w_value) + view.get_implementation().setslice(space, w_value) return if space.is_w(w_idx, space.w_Ellipsis): - self.implementation.setslice(space, convert_to_array(space, w_value)) + self.get_implementation().setslice(space, convert_to_array(space, w_value)) return # TODO: multiarray/mapping.c calls a subclass's __getitem__ here, which # is a big performance hit but necessary for the matrix class. The original @@ -271,7 +271,7 @@ self.setitem_filter(space, w_idx, convert_to_array(space, w_value)) return try: - self.implementation.descr_setitem(space, self, w_idx, w_value) + self.get_implementation().descr_setitem(space, self, w_idx, w_value) except ArrayArgumentException: self.setitem_array_int(space, w_idx, w_value) @@ -279,7 +279,7 @@ dtype = self.get_dtype() if field not in dtype.fields: raise oefmt(space.w_ValueError, "field named %s not found", field) - arr = self.implementation + arr = self.get_implementation() ofs, subdtype = arr.dtype.fields[field][:2] # ofs only changes start # create a view of the original array by extending @@ -345,28 +345,28 @@ return s.build() def create_iter(self, shape=None, backward_broadcast=False): - assert isinstance(self.implementation, BaseConcreteArray) - return self.implementation.create_iter( + assert isinstance(self.get_implementation(), BaseConcreteArray) + return self.get_implementation().create_iter( shape=shape, backward_broadcast=backward_broadcast) def is_scalar(self): return self.ndims() == 0 def set_scalar_value(self, w_val): - return self.implementation.setitem(self.implementation.start, w_val) + return self.get_implementation().setitem(self.implementation.start, w_val) def fill(self, space, box): - self.implementation.fill(space, box) + self.get_implementation().fill(space, box) def descr_get_size(self, space): return space.wrap(self.get_size()) def get_size(self): - return self.implementation.get_size() + return self.get_implementation().get_size() def get_scalar_value(self): assert self.get_size() == 1 - return self.implementation.getitem(self.implementation.start) + return self.get_implementation().getitem(self.implementation.start) def descr_copy(self, space, w_order=None): if w_order is None: @@ -375,45 +375,45 @@ order = space.int_w(w_order) else: order = order_converter(space, w_order, NPY.KEEPORDER) - copy = self.implementation.copy(space, order) + copy = self.get_implementation().copy(space, order) w_subtype = space.type(self) return wrap_impl(space, w_subtype, self, copy) def descr_get_real(self, space): - ret = self.implementation.get_real(space, self) + ret = self.get_implementation().get_real(space, self) return wrap_impl(space, space.type(self), self, ret) def descr_get_imag(self, space): - ret = self.implementation.get_imag(space, self) + ret = self.get_implementation().get_imag(space, self) return wrap_impl(space, space.type(self), self, ret) def descr_set_real(self, space, w_value): # copy (broadcast) values into self - self.implementation.set_real(space, self, w_value) + self.get_implementation().set_real(space, self, w_value) def descr_set_imag(self, space, w_value): # if possible, copy (broadcast) values into self if not self.get_dtype().is_complex(): raise oefmt(space.w_TypeError, 'array does not have imaginary part to set') - self.implementation.set_imag(space, self, w_value) + self.get_implementation().set_imag(space, self, w_value) def reshape(self, space, w_shape, order): new_shape = get_shape_from_iterable(space, self.get_size(), w_shape) - new_impl = self.implementation.reshape(self, new_shape, order) + new_impl = self.get_implementation().reshape(self, new_shape, order) if new_impl is not None: return wrap_impl(space, space.type(self), self, new_impl) # Create copy with contiguous data arr = self.descr_copy(space, space.wrap(order)) if arr.get_size() > 0: - new_implementation = arr.implementation.reshape(self, new_shape, order) + new_implementation = arr.get_implementation().reshape(self, new_shape, order) if new_implementation is None: raise oefmt(space.w_ValueError, 'could not reshape array of size %d to shape %s', arr.get_size(), str(new_shape)) arr.implementation = new_implementation else: - arr.implementation.shape = new_shape + arr.get_implementation().shape = new_shape return arr def descr_reshape(self, space, __args__): @@ -449,7 +449,7 @@ return self.reshape(space, w_shape, order) def descr_get_transpose(self, space, axes=None): - return W_NDimArray(self.implementation.transpose(self, axes)) + return W_NDimArray(self.get_implementation().transpose(self, axes)) def descr_transpose(self, space, args_w): if len(args_w) == 1 and space.isinstance_w(args_w[0], space.w_tuple): @@ -501,11 +501,11 @@ raise oefmt(space.w_ValueError, "bad axis1 argument to swapaxes") if axis2 < 0 or axis2 >= n: raise oefmt(space.w_ValueError, "bad axis2 argument to swapaxes") - return self.implementation.swapaxes(space, self, axis1, axis2) + return self.get_implementation().swapaxes(space, self, axis1, axis2) def descr_nonzero(self, space): index_type = get_dtype_cache(space).w_int64dtype - return self.implementation.nonzero(space, index_type) + return self.get_implementation().nonzero(space, index_type) def descr_tolist(self, space): if self.ndims() == 0: @@ -542,7 +542,7 @@ # scalars have no storage return self.reshape(space, space.wrap(1), order) w_res = self.descr_ravel(space, w_order) - if w_res.implementation.storage == self.implementation.storage: + if w_res.get_implementation().storage == self.get_implementation().storage: return w_res.descr_copy(space) return w_res @@ -555,7 +555,7 @@ dtype = self.get_dtype() w_arr = convert_to_array(space, w_obj) if dtype.is_record(): - return self.implementation.setslice(space, w_arr) + return self.get_implementation().setslice(space, w_arr) loop.flatiter_setitem(space, dtype, w_arr, iter, state, 1, iter.size) def descr_get_flatiter(self, space): @@ -607,7 +607,7 @@ # sz cannot overflow since self is valid sz = support.product(self.get_shape()) * self.get_dtype().elsize return W_NDimArray.from_shape_and_storage( - space, self.get_shape(), self.implementation.storage, + space, self.get_shape(), self.get_implementation().storage, self.get_dtype(), storage_bytes=sz, w_base=self) def descr_array_iface(self, space): @@ -615,7 +615,7 @@ Note: arr.__array__.data[0] is a pointer so arr must be kept alive while it is in use ''' - with self.implementation as storage: + with self.get_implementation() as storage: addr = support.get_storage_as_int(storage, self.get_start()) # will explode if it can't w_d = space.newdict() @@ -623,7 +623,7 @@ space.newtuple([space.wrap(addr), space.w_False])) space.setitem_str(w_d, 'shape', self.descr_get_shape(space)) space.setitem_str(w_d, 'typestr', self.get_dtype().descr_get_str(space)) - if self.implementation.order == NPY.CORDER: + if self.get_order() == NPY.CORDER: # Array is contiguous, no strides in the interface. strides = space.w_None else: @@ -657,7 +657,7 @@ if self.is_scalar(): return space.wrap(0) dtype = self.get_dtype().descr_newbyteorder(space, NPY.NATIVE) - contig = self.implementation.astype(space, dtype, self.get_order()) + contig = self.get_implementation().astype(space, dtype, self.get_order()) return contig.argsort(space, w_axis) @unwrap_spec(order=str, casting=str, subok=bool, copy=bool) @@ -673,7 +673,7 @@ elsize = 0 itype = cur_dtype.itemtype for i in range(self.get_size()): - elsize = max(elsize, len(itype.str_format(self.implementation.getitem(i), add_quotes=False))) + elsize = max(elsize, len(itype.str_format(self.get_implementation().getitem(i), add_quotes=False))) new_dtype = descriptor.variable_dtype( space, 'S' + str(elsize)) @@ -687,7 +687,7 @@ and (order in (NPY.KEEPORDER, NPY.ANYORDER) or order == self.get_order()) and (subok or type(self) is W_NDimArray)): return self - impl = self.implementation + impl = self.get_implementation() new_impl = impl.astype(space, new_dtype, order) if new_impl is None: return self @@ -698,7 +698,7 @@ return wrap_impl(space, w_type, self, new_impl) def descr_get_base(self, space): - impl = self.implementation + impl = self.get_implementation() ret = impl.base() if ret is None: return space.w_None @@ -707,7 +707,7 @@ @unwrap_spec(inplace=bool) def descr_byteswap(self, space, inplace=False): if inplace: - loop.byteswap(self.implementation, self.implementation) + loop.byteswap(self.get_implementation(), self.get_implementation()) return self else: w_res = W_NDimArray.from_shape(space, self.get_shape(), @@ -748,19 +748,19 @@ return w_result def buffer_w(self, space, flags): - return self.implementation.get_buffer(space, True) + return self.get_implementation().get_buffer(space, True) def readbuf_w(self, space): - return self.implementation.get_buffer(space, True) + return self.get_implementation().get_buffer(space, True) def writebuf_w(self, space): - return self.implementation.get_buffer(space, False) + return self.get_implementation().get_buffer(space, False) def charbuf_w(self, space): - return self.implementation.get_buffer(space, True).as_str() + return self.get_implementation().get_buffer(space, True).as_str() def descr_get_data(self, space): - return space.newbuffer(self.implementation.get_buffer(space, False)) + return space.newbuffer(self.get_implementation().get_buffer(space, False)) @unwrap_spec(offset=int, axis1=int, axis2=int) def descr_diagonal(self, space, offset=0, axis1=0, axis2=1): @@ -775,7 +775,7 @@ if axis1 == axis2: raise OperationError(space.w_ValueError, space.wrap( "axis1 and axis2 cannot be the same")) - return arrayops.diagonal(space, self.implementation, offset, axis1, axis2) + return arrayops.diagonal(space, self.get_implementation(), offset, axis1, axis2) @unwrap_spec(offset=int, axis1=int, axis2=int) def descr_trace(self, space, offset=0, axis1=0, axis2=1, @@ -884,7 +884,7 @@ # modify the array in-place if self.is_scalar(): return - return self.implementation.sort(space, w_axis, w_order) + return self.get_implementation().sort(space, w_axis, w_order) def descr_squeeze(self, space, w_axis=None): cur_shape = self.get_shape() @@ -905,7 +905,7 @@ return self # XXX need to call __array_wrap__ return wrap_impl(space, space.type(self), self, - self.implementation.get_view( + self.get_implementation().get_view( space, self, self.get_dtype(), new_shape)) def descr_strides(self, space): @@ -935,7 +935,7 @@ dtype = self.get_dtype() old_itemsize = self.get_dtype().elsize new_itemsize = dtype.elsize - impl = self.implementation + impl = self.get_implementation() if new_itemsize == 0: raise OperationError(space.w_TypeError, space.wrap( "data-type must not be 0-sized")) @@ -1129,7 +1129,7 @@ matches = True if dtype != out.get_dtype(): matches = False - elif not out.implementation.order == NPY.CORDER: + elif not out.get_order() == NPY.CORDER: matches = False elif out.ndims() != len(out_shape): matches = False @@ -1306,16 +1306,16 @@ if self.get_dtype().is_object(): raise oefmt(space.w_NotImplementedError, "reduce for 'object' dtype not supported yet") - if isinstance(self.implementation, SliceArray): - iter, state = self.implementation.create_iter() + if isinstance(self.get_implementation(), SliceArray): + iter, state = self.get_implementation().create_iter() while not iter.done(state): box = iter.getitem(state) builder.append(box.raw_str()) state = iter.next(state) else: - with self.implementation as storage: + with self.get_implementation() as storage: builder.append_charpsize(storage, - self.implementation.get_storage_size()) + self.get_implementation().get_storage_size()) state = space.newtuple([ space.wrap(1), # version @@ -1349,7 +1349,7 @@ self.implementation = W_NDimArray.from_shape_and_storage( space, [space.int_w(i) for i in space.listview(shape)], rffi.str2charp(space.str_w(storage), track_allocation=False), - dtype, storage_bytes=space.len_w(storage), owning=True).implementation + dtype, storage_bytes=space.len_w(storage), owning=True).get_implementation() def descr___array_finalize__(self, space, w_obj): pass diff --git a/pypy/module/micronumpy/nditer.py b/pypy/module/micronumpy/nditer.py --- a/pypy/module/micronumpy/nditer.py +++ b/pypy/module/micronumpy/nditer.py @@ -460,7 +460,7 @@ if self.tracked_index != "": order = self.order if order == NPY.KEEPORDER: - order = self.seq[0].implementation.order + order = self.seq[0].get_order() if self.tracked_index == "multi": backward = False else: @@ -477,7 +477,7 @@ if not self_d: self.dtypes[i] = seq_d elif self_d != seq_d: - impl = self.seq[i].implementation + impl = self.seq[i].get_implementation() if self.buffered or 'r' in self.op_flags[i].tmp_copy: if not can_cast_array( space, self.seq[i], self_d, self.casting): @@ -527,7 +527,7 @@ def get_iter(self, space, i): arr = self.seq[i] - imp = arr.implementation + imp = arr.get_implementation() if arr.is_scalar(): return ConcreteIter(imp, 1, [], [], [], self.op_flags[i], self) shape = self.shape diff --git a/pypy/module/micronumpy/selection.py b/pypy/module/micronumpy/selection.py --- a/pypy/module/micronumpy/selection.py +++ b/pypy/module/micronumpy/selection.py @@ -134,7 +134,7 @@ # create array of indexes dtype = descriptor.get_dtype_cache(space).w_longdtype index_arr = W_NDimArray.from_shape(space, arr.get_shape(), dtype) - with index_arr.implementation as storage, arr as arr_storage: + with index_arr.get_implementation() as storage, arr as arr_storage: if len(arr.get_shape()) == 1: for i in range(arr.get_size()): raw_storage_setitem(storage, i * INT_SIZE, i) @@ -149,7 +149,7 @@ raise oefmt(space.w_IndexError, "Wrong axis %d", axis) arr_iter = AllButAxisIter(arr, axis) arr_state = arr_iter.reset() - index_impl = index_arr.implementation + index_impl = index_arr.get_implementation() index_iter = AllButAxisIter(index_impl, axis) index_state = index_iter.reset() stride_size = arr.strides[axis] diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -79,7 +79,7 @@ def new_view(space, w_arr, chunks): - arr = w_arr.implementation + arr = w_arr.get_implementation() r = calculate_slice_strides(space, arr.shape, arr.start, arr.get_strides(), arr.get_backstrides(), chunks) shape, start, strides, backstrides = r diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -305,12 +305,12 @@ if keepdims: shape = [1] * len(obj_shape) out = W_NDimArray.from_shape(space, shape, dtype, w_instance=obj) - out.implementation.setitem(0, res) + out.get_implementation().setitem(0, res) res = out elif not space.is_w(space.type(w_obj), space.gettypefor(W_NDimArray)): # subtypes return a ndarray subtype, not a scalar out = W_NDimArray.from_shape(space, [1], dtype, w_instance=obj) - out.implementation.setitem(0, res) + out.get_implementation().setitem(0, res) res = out if call__array_wrap__: res = space.call_method(obj, '__array_wrap__', res, space.w_None) @@ -869,7 +869,7 @@ _arg = allargs[i] assert isinstance(_arg, W_NDimArray) start_dim = len(iter_shape) - steps += _arg.implementation.strides[start_dim:] + steps += _arg.get_implementation().strides[start_dim:] func.set_dims_and_steps(space, dims, steps) else: # it is a function, ready to be called by the iterator, @@ -1042,7 +1042,7 @@ if len(arg_shapes[i]) != curarg.ndims(): # reshape sz = product(curarg.get_shape()) * curarg.get_dtype().elsize - with curarg.implementation as storage: + with curarg.get_implementation() as storage: inargs[i] = W_NDimArray.from_shape_and_storage( space, arg_shapes[i], storage, curarg.get_dtype(), storage_bytes=sz, w_base=curarg) @@ -1056,7 +1056,7 @@ elif len(arg_shapes[i]) != curarg.ndims(): # reshape sz = product(curarg.get_shape()) * curarg.get_dtype().elsize - with curarg.implementation as storage: + with curarg.get_implementation() as storage: outargs[i] = W_NDimArray.from_shape_and_storage( space, arg_shapes[i], storage, curarg.get_dtype(), storage_bytes=sz, w_base=curarg) @@ -1541,7 +1541,7 @@ raise OperationError(space.w_NotImplementedError, space.wrap("cannot mix ndarray and %r (arg %d) in call to ufunc" % ( arg_i, i))) - with arg_i.implementation as storage: + with arg_i.get_implementation() as storage: addr = get_storage_as_int(storage, arg_i.get_start()) raw_storage_setitem(dataps, CCHARP_SIZE * i, rffi.cast(rffi.CCHARP, addr)) #This assumes we iterate over the whole array (it should be a view...) @@ -1551,7 +1551,7 @@ for i in range(len(args_w)): arg_i = args_w[i] assert isinstance(arg_i, W_NDimArray) - with arg_i.implementation as storage: + with arg_i.get_implementation() as storage: addr = get_storage_as_int(storage, arg_i.get_start()) raw_storage_setitem(dataps, CCHARP_SIZE * i, rffi.cast(rffi.CCHARP, addr)) try: From noreply at buildbot.pypy.org Wed Oct 21 01:19:28 2015 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 21 Oct 2015 01:19:28 +0200 (CEST) Subject: [pypy-commit] pypy default: copy logic from getitem to setitem for int idexing of record ndarrays Message-ID: <20151020231928.051581C135C@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80364:1f1f0b0f5f07 Date: 2015-10-21 08:49 +1100 http://bitbucket.org/pypy/pypy/changeset/1f1f0b0f5f07/ Log: copy logic from getitem to setitem for int idexing of record ndarrays diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -591,6 +591,14 @@ def descr_setitem(self, space, w_item, w_value): if space.isinstance_w(w_item, space.w_basestring): item = space.str_w(w_item) + elif space.isinstance_w(w_item, space.w_int): + indx = space.int_w(w_item) + try: + item = self.dtype.names[indx][0] + except IndexError: + if indx < 0: + indx += len(self.dtype.names) + raise oefmt(space.w_IndexError, "invalid index (%d)", indx) else: raise oefmt(space.w_IndexError, "invalid index") try: diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -645,6 +645,12 @@ for i in xrange(5): assert a[i] == i + def test_setitem_record(self): + from numpy import zeros + trie = zeros(200, dtype= [ ("A","uint32"),("C","uint32"), ]) + trie[0][0] = 1 + assert trie[0]['A'] == 1 + def test_setitem_array(self): import numpy as np a = np.array((-1., 0, 1))/0. From noreply at buildbot.pypy.org Wed Oct 21 01:19:30 2015 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 21 Oct 2015 01:19:30 +0200 (CEST) Subject: [pypy-commit] pypy default: change new release name to 4.0.0, update for latest blog posts and branches Message-ID: <20151020231930.2A91F1C1359@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80365:9397d7c6f5aa Date: 2015-10-21 10:01 +1100 http://bitbucket.org/pypy/pypy/changeset/9397d7c6f5aa/ Log: change new release name to 4.0.0, update for latest blog posts and branches diff --git a/pypy/doc/release-15.11.0.rst b/pypy/doc/release-4.0.0.rst rename from pypy/doc/release-15.11.0.rst rename to pypy/doc/release-4.0.0.rst --- a/pypy/doc/release-15.11.0.rst +++ b/pypy/doc/release-4.0.0.rst @@ -1,15 +1,15 @@ ============ -PyPy 15.11.0 +PyPy 4.0.0 ============ -We're pleased and proud to unleash PyPy 15.11, a major update of the PyPy +We're pleased and proud to unleash PyPy 4.0.0, a major update of the PyPy python2.7.10 compatible interpreter with a Just In Time compiler. We have improved `warmup time and memory overhead used for tracing`_, added `vectorization`_ for numpy and general loops where possible on x86 hardware (disabled by default), refactored rough edges in rpython, and increased functionality of numpy. -You can download the PyPy 15.11 release here: +You can download the PyPy 4.0.0 release here: http://pypy.org/download.html @@ -22,12 +22,19 @@ improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ with making RPython's JIT even better. +New Version Numbering +===================== + +Since the past release, PyPy 2.6.1, we decided to update the PyPy 2.x.x +versioning directly to PyPy 4.x.x, to avoid confusion with CPython 2.7 +and 3.5. Note that this version of PyPy uses the stdlib and implements the +syntax of CPython 2.7.10. Vectorization ============= Richard Plangger began work in March and continued over a Google Summer of Code -to add a vectorization step to the trace optimizer. The step recognizes common +to add a `vectorization` step to the trace optimizer. The step recognizes common constructs and emits SIMD code where possible, much as any modern compiler does. This vectorization happens while tracing running code, so it is actually easier at run-time to determine the @@ -41,6 +48,8 @@ drivers (like numpy ufuncs), add `--jit vec=1`, to enable all implemented vectorization add `--jit vec_all=1` +Benchmarks and a summary of this work appear `here`_ + Internal Refactoring and Warmup Time Improvement ================================================ @@ -83,20 +92,25 @@ CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) due to its integrated tracing JIT compiler. +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + This release supports **x86** machines on most common operating systems (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD_, freebsd_), as well as newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux. -We also welcome developers of other -`dynamic languages`_ to see what RPython can do for them. +We also introduce `support for the 64 bit PowerPC`_ hardware, specifically +Linux running the big- and little-endian variants of ppc64. .. _`pypy and cpython 2.7.x`: http://speed.pypy.org .. _OpenBSD: http://cvsweb.openbsd.org/cgi-bin/cvsweb/ports/lang/pypy .. _freebsd: https://svnweb.freebsd.org/ports/head/lang/pypy/ .. _`dynamic languages`: http://pypyjs.org +.. _`support for the 64 bit PowerPC`: http://morepypy.blogspot.com/2015/10/powerpc-backend-for-jit.html +.. _`here`: http://morepypy.blogspot.com/2015/10/automatic-simd-vectorization-support-in.html -Highlights (since 2.6.1 release two months ago) -=============================================== +Other Highlights (since 2.6.1 release two months ago) +===================================================== * Bug Fixes diff --git a/pypy/doc/whatsnew-15.11.0.rst b/pypy/doc/whatsnew-4.0.0.rst rename from pypy/doc/whatsnew-15.11.0.rst rename to pypy/doc/whatsnew-4.0.0.rst --- a/pypy/doc/whatsnew-15.11.0.rst +++ b/pypy/doc/whatsnew-4.0.0.rst @@ -1,5 +1,5 @@ ======================== -What's new in PyPy 15.11 +What's new in PyPy 4.0.0 ======================== .. this is a revision shortly after release-2.6.1 From noreply at buildbot.pypy.org Wed Oct 21 01:19:32 2015 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 21 Oct 2015 01:19:32 +0200 (CEST) Subject: [pypy-commit] pypy release-4.0.x: start new release numbering Message-ID: <20151020231932.3C0471C1359@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-4.0.x Changeset: r80366:3a8f5481dab4 Date: 2015-10-21 10:05 +1100 http://bitbucket.org/pypy/pypy/changeset/3a8f5481dab4/ Log: start new release numbering diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,7 +29,7 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "2.7.0-alpha0" +#define PYPY_VERSION "4.0.0" /* Subversion Revision number of this file (not of the repository). * Empty since Mercurial migration. */ diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (2, 7, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (4, 0, 0, "final", 0) #XXX # sync patchlevel.h import pypy From noreply at buildbot.pypy.org Wed Oct 21 01:19:34 2015 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 21 Oct 2015 01:19:34 +0200 (CEST) Subject: [pypy-commit] pypy default: more doc updates for 4.0.0 Message-ID: <20151020231934.538811C1359@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80367:b8ed720d0260 Date: 2015-10-21 10:11 +1100 http://bitbucket.org/pypy/pypy/changeset/b8ed720d0260/ Log: more doc updates for 4.0.0 diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,7 +6,7 @@ .. toctree:: - release-15.11.0.rst + release-4.0.0.rst release-2.6.1.rst release-2.6.0.rst release-2.5.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,7 +7,7 @@ .. toctree:: whatsnew-head.rst - whatsnew-15.11.0.rst + whatsnew-4.0.0.rst whatsnew-2.6.1.rst whatsnew-2.6.0.rst whatsnew-2.5.1.rst diff --git a/pypy/doc/whatsnew-4.0.0.rst b/pypy/doc/whatsnew-4.0.0.rst --- a/pypy/doc/whatsnew-4.0.0.rst +++ b/pypy/doc/whatsnew-4.0.0.rst @@ -3,7 +3,7 @@ ======================== .. this is a revision shortly after release-2.6.1 -.. startrev: 07769be4057b +.. startrev: 3a8f5481dab4 .. branch: keys_with_hash Improve the performance of dict.update() and a bunch of methods from @@ -85,3 +85,10 @@ .. branch: vecopt-merge A new optimization pass to use emit vectorized loops + +.. branch: ppc-updated-backend + +The PowerPC JIT backend is merged. + +.. branch: osx-libffi + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,12 +1,8 @@ ========================= -What's new in PyPy 15.11+ +What's new in PyPy 4.0.+ ========================= -.. this is a revision shortly after release-15.11.0 -.. startrev: d924723d483b +.. this is a revision shortly after release-4.0.0 +.. startrev: 3a8f5481dab4 -.. branch: ppc-updated-backend -The PowerPC JIT backend is merged. - -.. branch: osx-libffi From noreply at buildbot.pypy.org Wed Oct 21 01:19:36 2015 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 21 Oct 2015 01:19:36 +0200 (CEST) Subject: [pypy-commit] pypy release-4.0.x: merge default into branch Message-ID: <20151020231936.6D9A91C1359@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-4.0.x Changeset: r80368:65ac40781e5e Date: 2015-10-21 10:11 +1100 http://bitbucket.org/pypy/pypy/changeset/65ac40781e5e/ Log: merge default into branch diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,7 +6,7 @@ .. toctree:: - release-15.11.0.rst + release-4.0.0.rst release-2.6.1.rst release-2.6.0.rst release-2.5.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,7 +7,7 @@ .. toctree:: whatsnew-head.rst - whatsnew-15.11.0.rst + whatsnew-4.0.0.rst whatsnew-2.6.1.rst whatsnew-2.6.0.rst whatsnew-2.5.1.rst diff --git a/pypy/doc/whatsnew-4.0.0.rst b/pypy/doc/whatsnew-4.0.0.rst --- a/pypy/doc/whatsnew-4.0.0.rst +++ b/pypy/doc/whatsnew-4.0.0.rst @@ -3,7 +3,7 @@ ======================== .. this is a revision shortly after release-2.6.1 -.. startrev: 07769be4057b +.. startrev: 3a8f5481dab4 .. branch: keys_with_hash Improve the performance of dict.update() and a bunch of methods from @@ -85,3 +85,10 @@ .. branch: vecopt-merge A new optimization pass to use emit vectorized loops + +.. branch: ppc-updated-backend + +The PowerPC JIT backend is merged. + +.. branch: osx-libffi + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,12 +1,8 @@ ========================= -What's new in PyPy 15.11+ +What's new in PyPy 4.0.+ ========================= -.. this is a revision shortly after release-15.11.0 -.. startrev: d924723d483b +.. this is a revision shortly after release-4.0.0 +.. startrev: 3a8f5481dab4 -.. branch: ppc-updated-backend -The PowerPC JIT backend is merged. - -.. branch: osx-libffi From noreply at buildbot.pypy.org Wed Oct 21 01:28:56 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Wed, 21 Oct 2015 01:28:56 +0200 (CEST) Subject: [pypy-commit] buildbot default: backout 624b0ebfca9a Message-ID: <20151020232856.96D431C1359@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r968:7d750631b5ac Date: 2015-10-21 00:30 +0100 http://bitbucket.org/pypy/buildbot/changeset/7d750631b5ac/ Log: backout 624b0ebfca9a diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -1,16 +1,18 @@ -buildbot==0.8.10 -buildbot-slave==0.8.10 -decorator==4.0.4 -Flask==0.10.1 -itsdangerous==0.24 -Jinja2==2.8 -MarkupSafe==0.23 -py==1.4.30 -pytest==2.8.2 +Flask==0.9 +Jinja2==2.7.1 +MarkupSafe==0.18 +SQLAlchemy==0.7.9 +Tempita==0.5.1 +Twisted==13.1.0 +Werkzeug==0.8.3 +argparse==1.2.1 +buildbot==0.8.8 +buildbot-slave==0.8.6p1 +decorator==3.4.0 +mock==1.0.1 +py==1.4.18 +pytest==2.2.4 python-dateutil==1.5 -SQLAlchemy==0.7.10 sqlalchemy-migrate==0.7.2 -Tempita==0.5.2 -Twisted==15.4.0 -Werkzeug==0.10.4 -zope.interface==4.1.3 +wsgiref==0.1.2 +zope.interface==4.0.5 From noreply at buildbot.pypy.org Wed Oct 21 09:37:53 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Oct 2015 09:37:53 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Add some object-management functions of pyobject to INTERPLEVEL_API Message-ID: <20151021073753.599111C13F1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80369:7fd91c0e2266 Date: 2015-10-21 09:33 +0200 http://bitbucket.org/pypy/pypy/changeset/7fd91c0e2266/ Log: Add some object-management functions of pyobject to INTERPLEVEL_API to make them easier to access from tests (useful in pdb to call them even if the current module didn't explicitly import them) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -378,7 +378,7 @@ TYPES[configname] = forward return forward -INTERPLEVEL_API = {} +INTERPLEVEL_API = {} # only for untranslated tests FUNCTIONS = {} def constant_pyobj(space, name): diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -5,7 +5,8 @@ from rpython.rtyper.extregistry import ExtRegistryEntry from pypy.module.cpyext.api import ( cpython_api, bootstrap_function, PyObject, PyObjectP, ADDR, - CANNOT_FAIL, Py_TPFLAGS_HEAPTYPE, PyTypeObjectPtr, is_PyObject) + CANNOT_FAIL, Py_TPFLAGS_HEAPTYPE, PyTypeObjectPtr, is_PyObject, + INTERPLEVEL_API) from pypy.module.cpyext.state import State from pypy.objspace.std.typeobject import W_TypeObject from pypy.objspace.std.objectobject import W_ObjectObject @@ -280,16 +281,19 @@ assert not is_pyobj(w_obj) return w_obj.cpyext_as_pyobj(space) as_pyobj._always_inline_ = True +INTERPLEVEL_API['as_pyobj'] = as_pyobj def as_xpyobj(space, w_obj): if w_obj is not None: return as_pyobj(space, w_obj) else: return lltype.nullptr(PyObject.TO) +INTERPLEVEL_API['as_xpyobj'] = as_xpyobj def pyobj_has_w_obj(pyobj): return rawrefcount.to_obj(W_Root, pyobj) is not None +INTERPLEVEL_API['pyobj_has_w_obj'] = staticmethod(pyobj_has_w_obj) @specialize.ll() def from_pyobj(space, pyobj): @@ -301,6 +305,7 @@ w_obj = _create_w_obj_from_pyobj(space, pyobj) return w_obj from_pyobj._always_inline_ = True +INTERPLEVEL_API['from_pyobj'] = from_pyobj @specialize.ll() def from_xpyobj(space, pyobj): @@ -308,6 +313,7 @@ return from_pyobj(space, pyobj) else: return None +INTERPLEVEL_API['from_xpyobj'] = from_xpyobj def is_pyobj(x): @@ -317,6 +323,7 @@ return True else: raise TypeError(repr(type(x))) +INTERPLEVEL_API['is_pyobj'] = staticmethod(is_pyobj) class Entry(ExtRegistryEntry): _about_ = is_pyobj @@ -342,6 +349,7 @@ if not is_pyobj(obj): keepalive_until_here(obj) return pyobj +INTERPLEVEL_API['get_pyobj_and_incref'] = get_pyobj_and_incref @specialize.ll() def get_pyobj_and_xincref(space, obj): @@ -349,6 +357,7 @@ return get_pyobj_and_incref(space, obj) else: return lltype.nullptr(PyObject.TO) +INTERPLEVEL_API['get_pyobj_and_xincref'] = get_pyobj_and_xincref @specialize.ll() def get_w_obj_and_decref(space, obj): @@ -367,6 +376,7 @@ assert pyobj.c_ob_refcnt >= rawrefcount.REFCNT_FROM_PYPY keepalive_until_here(w_obj) return w_obj +INTERPLEVEL_API['get_w_obj_and_decref'] = get_w_obj_and_decref @specialize.ll() @@ -381,6 +391,7 @@ ob.c_ob_type = ob_type ob.c_ob_pypy_link = 0 return ob +INTERPLEVEL_API['new_pyobj'] = staticmethod(new_pyobj) def make_ref(space, w_obj): diff --git a/pypy/module/cpyext/test/test_number.py b/pypy/module/cpyext/test/test_number.py --- a/pypy/module/cpyext/test/test_number.py +++ b/pypy/module/cpyext/test/test_number.py @@ -1,7 +1,6 @@ from rpython.rtyper.lltypesystem import lltype from pypy.module.cpyext.test.test_api import BaseApiTest -from pypy.module.cpyext.pyobject import PyObjectP, as_pyobj -from pypy.module.cpyext.pyobject import get_w_obj_and_decref +from pypy.module.cpyext.pyobject import PyObjectP from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase class TestIterator(BaseApiTest): @@ -44,12 +43,12 @@ w_obj1 = space.wrap(123) w_obj2 = space.wrap(456.789) pp1 = lltype.malloc(PyObjectP.TO, 1, flavor='raw') - pp1[0] = as_pyobj(space, w_obj1) + pp1[0] = api.as_pyobj(w_obj1) pp2 = lltype.malloc(PyObjectP.TO, 1, flavor='raw') - pp2[0] = as_pyobj(space, w_obj2) + pp2[0] = api.as_pyobj(w_obj2) assert api.PyNumber_Coerce(pp1, pp2) == 0 - w_res1 = get_w_obj_and_decref(space, pp1[0]) - w_res2 = get_w_obj_and_decref(space, pp2[0]) + w_res1 = api.get_w_obj_and_decref(pp1[0]) + w_res2 = api.get_w_obj_and_decref(pp2[0]) lltype.free(pp1, flavor='raw') lltype.free(pp2, flavor='raw') assert space.str_w(space.repr(w_res1)) == '123.0' @@ -60,14 +59,14 @@ w_objf = space.wrap(42.5) ppl = lltype.malloc(PyObjectP.TO, 1, flavor='raw') ppf = lltype.malloc(PyObjectP.TO, 1, flavor='raw') - ppl[0] = as_pyobj(space, w_objl) - ppf[0] = as_pyobj(space, w_objf) + ppl[0] = api.as_pyobj(w_objl) + ppf[0] = api.as_pyobj(w_objf) ret = api.PyNumber_CoerceEx(ppl, ppf) assert ret == 0 - w_resl = get_w_obj_and_decref(space, ppl[0]) - w_resf = get_w_obj_and_decref(space, ppf[0]) + w_resl = api.get_w_obj_and_decref(ppl[0]) + w_resf = api.get_w_obj_and_decref(ppf[0]) lltype.free(ppl, flavor='raw') lltype.free(ppf, flavor='raw') diff --git a/pypy/module/cpyext/test/test_tupleobject.py b/pypy/module/cpyext/test/test_tupleobject.py --- a/pypy/module/cpyext/test/test_tupleobject.py +++ b/pypy/module/cpyext/test/test_tupleobject.py @@ -1,8 +1,7 @@ import py from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_LIGHT -from pypy.module.cpyext.pyobject import PyObject, PyObjectP, from_pyobj -from pypy.module.cpyext.pyobject import pyobj_has_w_obj, get_pyobj_and_incref +from pypy.module.cpyext.pyobject import PyObject, PyObjectP from pypy.module.cpyext.test.test_api import BaseApiTest from rpython.rtyper.lltypesystem import rffi, lltype @@ -11,7 +10,7 @@ def test_tupleobject(self, space, api): assert not api.PyTuple_Check(space.w_None) - py_none = get_pyobj_and_incref(space, space.w_None) + py_none = api.get_pyobj_and_incref(space.w_None) assert api.PyTuple_SetItem(space.w_None, 0, py_none) == -1 atuple = space.newtuple([space.wrap(0), space.wrap(1), space.wrap('yay')]) @@ -23,8 +22,8 @@ def test_tupleobject_spec_ii(self, space, api): atuple = space.newtuple([space.wrap(10), space.wrap(11)]) assert api.PyTuple_Size(atuple) == 2 - w_obj1 = from_pyobj(space, api.PyTuple_GetItem(atuple, 0)) - w_obj2 = from_pyobj(space, api.PyTuple_GetItem(atuple, 1)) + w_obj1 = api.from_pyobj(api.PyTuple_GetItem(atuple, 0)) + w_obj2 = api.from_pyobj(api.PyTuple_GetItem(atuple, 1)) assert space.eq_w(w_obj1, space.wrap(10)) assert space.eq_w(w_obj2, space.wrap(11)) @@ -33,14 +32,14 @@ w_obj2 = space.newlist([]) atuple = space.newtuple([w_obj1, w_obj2]) assert api.PyTuple_Size(atuple) == 2 - assert from_pyobj(space, api.PyTuple_GetItem(atuple, 0)) is w_obj1 - assert from_pyobj(space, api.PyTuple_GetItem(atuple, 1)) is w_obj2 + assert api.from_pyobj(api.PyTuple_GetItem(atuple, 0)) is w_obj1 + assert api.from_pyobj(api.PyTuple_GetItem(atuple, 1)) is w_obj2 def test_new_setitem(self, space, api): w_obj1 = space.newlist([]) - pyobj1 = get_pyobj_and_incref(space, w_obj1) + pyobj1 = api.get_pyobj_and_incref(w_obj1) w_obj2 = space.newlist([]) - pyobj2 = get_pyobj_and_incref(space, w_obj2) + pyobj2 = api.get_pyobj_and_incref(w_obj2) py_tuple = api.PyTuple_New(2) assert not pyobj_has_w_obj(py_tuple) @@ -59,8 +58,8 @@ assert pyobj2.c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + 1 assert not pyobj_has_w_obj(py_tuple) - w_tup = from_pyobj(space, py_tuple) - assert w_tup is from_pyobj(space, py_tuple) + w_tup = api.from_pyobj(py_tuple) + assert w_tup is api.from_pyobj(py_tuple) assert api.PyTuple_GetItem(py_tuple, 1) == pyobj2 assert pyobj1.c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + 0 assert pyobj2.c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + 1 diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -289,55 +289,6 @@ fill_pypy=type_fill_pypy) #dealloc=type_dealloc) - # some types are difficult to create because of cycles. - # - object.ob_type = type - # - type.ob_type = type - # - tuple.ob_type = type - # - type.tp_base = object - # - tuple.tp_base = object - # - type.tp_bases is a tuple - # - object.tp_bases is a tuple - # - tuple.tp_bases is a tuple - - return # ZZZ - - # insert null placeholders to please create_ref() - track_reference(space, lltype.nullptr(PyObject.TO), space.w_type) - track_reference(space, lltype.nullptr(PyObject.TO), space.w_object) - track_reference(space, lltype.nullptr(PyObject.TO), space.w_tuple) - track_reference(space, lltype.nullptr(PyObject.TO), space.w_str) - - # create the objects - py_type = create_ref(space, space.w_type) - py_object = create_ref(space, space.w_object) - py_tuple = create_ref(space, space.w_tuple) - py_str = create_ref(space, space.w_str) - - # form cycles - pto_type = rffi.cast(PyTypeObjectPtr, py_type) - py_type.c_ob_type = pto_type - py_object.c_ob_type = pto_type - py_tuple.c_ob_type = pto_type - - pto_object = rffi.cast(PyTypeObjectPtr, py_object) - pto_type.c_tp_base = pto_object - pto_tuple = rffi.cast(PyTypeObjectPtr, py_tuple) - pto_tuple.c_tp_base = pto_object - - pto_type.c_tp_bases.c_ob_type = pto_tuple - pto_object.c_tp_bases.c_ob_type = pto_tuple - pto_tuple.c_tp_bases.c_ob_type = pto_tuple - - for typ in (py_type, py_object, py_tuple, py_str): - heaptype = rffi.cast(PyHeapTypeObject, typ) - heaptype.c_ht_name.c_ob_type = pto_type - - # Restore the mapping - track_reference(space, py_type, space.w_type, replace=True) - track_reference(space, py_object, space.w_object, replace=True) - track_reference(space, py_tuple, space.w_tuple, replace=True) - track_reference(space, py_str, space.w_str, replace=True) - @cpython_api([PyObject], lltype.Void, external=False) def subtype_dealloc(space, obj): diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -82,7 +82,7 @@ global _p_list, _o_list wr_p_list = [] new_p_list = [] - for ob in _p_list: + for ob in reversed(_p_list): if ob.c_ob_refcnt not in (REFCNT_FROM_PYPY, REFCNT_FROM_PYPY_LIGHT): new_p_list.append(ob) else: @@ -93,7 +93,7 @@ _p_list = Ellipsis wr_o_list = [] - for ob in _o_list: + for ob in reversed(_o_list): detach(ob, wr_o_list) _o_list = Ellipsis From noreply at buildbot.pypy.org Wed Oct 21 10:03:55 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Oct 2015 10:03:55 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: tweaks tweaks Message-ID: <20151021080355.250A21C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80370:4df1301d6ea2 Date: 2015-10-21 10:03 +0200 http://bitbucket.org/pypy/pypy/changeset/4df1301d6ea2/ Log: tweaks tweaks diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -372,7 +372,17 @@ config = CConfig else: config = CConfig2 - setattr(config, configname, rffi_platform.Struct(name, fields)) + adtmeths = {} + if ("ob_type", PyTypeObjectPtr) in fields: + def my_str(ob): + r = name + tp = ob.c_ob_type + if tp: + r += " of type '%s'" % (rffi.charp2str(tp.c_tp_name),) + return r + adtmeths['__str__'] = my_str + setattr(config, configname, rffi_platform.Struct(name, fields, + adtmeths=adtmeths)) if forward is None: forward = lltype.ForwardReference() TYPES[configname] = forward @@ -790,7 +800,7 @@ # Do not call this more than once per process def build_bridge(space): "NOT_RPYTHON" - from pypy.module.cpyext.pyobject import setup_prebuilt_pyobj + from pypy.module.cpyext.pyobject import setup_prebuilt_pyobj, _Py_Dealloc from rpython.rlib import rawrefcount export_symbols = list(FUNCTIONS) + SYMBOLS_C + list(GLOBALS) @@ -850,7 +860,7 @@ space.fromcache(State).install_dll(eci) - rawrefcount.init(lambda ob: ZZZ) + rawrefcount.init(lambda ob: _Py_Dealloc(space, ob)) # populate static data to_fill = [] @@ -1214,20 +1224,15 @@ @specialize.ll() def generic_cpy_call(space, func, *args): FT = lltype.typeOf(func).TO - return make_generic_cpy_call(FT, True, False)(space, func, *args) - - at specialize.ll() -def generic_cpy_call_dont_decref(space, func, *args): - FT = lltype.typeOf(func).TO - return make_generic_cpy_call(FT, False, False)(space, func, *args) + return make_generic_cpy_call(FT, False)(space, func, *args) @specialize.ll() def generic_cpy_call_expect_null(space, func, *args): FT = lltype.typeOf(func).TO - return make_generic_cpy_call(FT, True, True)(space, func, *args) + return make_generic_cpy_call(FT, True)(space, func, *args) @specialize.memo() -def make_generic_cpy_call(FT, decref_args, expect_null): +def make_generic_cpy_call(FT, expect_null): from pypy.module.cpyext.pyobject import is_pyobj, as_xpyobj from pypy.module.cpyext.pyobject import get_w_obj_and_decref from pypy.module.cpyext.pyerrors import PyErr_Occurred @@ -1254,8 +1259,6 @@ # don't inline, as a hack to guarantee that no GC pointer is alive # anywhere in call_external_function - assert decref_args #ZZZ - @specialize.ll() def generic_cpy_call(space, func, *args): boxed_args = () diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h --- a/pypy/module/cpyext/include/object.h +++ b/pypy/module/cpyext/include/object.h @@ -48,12 +48,12 @@ #else /* Fast version */ #define Py_INCREF(ob) (((PyObject *)ob)->ob_refcnt++) -#define Py_DECREF(ob) \ +#define Py_DECREF(op) \ do { \ - if (((PyObject *)ob)->ob_refcnt > 1) \ - ((PyObject *)ob)->ob_refcnt--; \ + if (--((PyObject*)(op))->ob_refcnt != 0) \ + ; \ else \ - Py_DecRef((PyObject *)ob); \ + _Py_Dealloc((PyObject *)(op)); \ } while (0) #define Py_XINCREF(op) do { if ((op) == NULL) ; else Py_INCREF(op); } while (0) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -120,6 +120,7 @@ def rawrefcount_init_link(w_obj, ob, strength): + assert lltype.typeOf(ob) == PyObject if strength == RRC_PERMANENT: ob.c_ob_refcnt += rawrefcount.REFCNT_FROM_PYPY rawrefcount.create_link_pypy(w_obj, ob) @@ -141,7 +142,6 @@ def setup_prebuilt_pyobj(w_obj, py_obj): - assert lltype.typeOf(py_obj) == PyObject rawrefcount_init_link(w_obj, py_obj, RRC_PERMANENT) if isinstance(w_obj, W_TypeObject): w_obj.cpyext_c_type_object = rffi.cast(PyTypeObjectPtr, py_obj) @@ -394,6 +394,36 @@ INTERPLEVEL_API['new_pyobj'] = staticmethod(new_pyobj) + at specialize.ll() +def incref(space, obj): + get_pyobj_and_incref(space, obj) +INTERPLEVEL_API['incref'] = incref + + at specialize.ll() +def xincref(space, obj): + get_pyobj_and_xincref(space, obj) +INTERPLEVEL_API['xincref'] = xincref + + at specialize.ll() +def decref(space, obj): + if is_pyobj(obj): + obj = rffi.cast(PyObject, obj) + assert obj.c_ob_refcnt > 0 + obj.c_ob_refcnt -= 1 + if obj.c_ob_refcnt == 0: + _Py_Dealloc(space, obj) + else: + get_w_obj_and_decref(space, obj) +INTERPLEVEL_API['decref'] = decref + + at specialize.ll() +def xdecref(space, obj): + if obj: + decref(space, obj) +INTERPLEVEL_API['xdecref'] = xdecref + +# ---------- + def make_ref(space, w_obj): ZZZ @@ -424,62 +454,30 @@ return get_typedescr(w_type.instancetypedef).realize(space, ref) -# XXX Optimize these functions and put them into macro definitions + at cpython_api([PyObject], lltype.Void) +def Py_IncRef(space, obj): + xincref(space, obj) + @cpython_api([PyObject], lltype.Void) def Py_DecRef(space, obj): - if not obj: - return - assert lltype.typeOf(obj) == PyObject + xdecref(space, obj) - obj.c_ob_refcnt -= 1 - if DEBUG_REFCOUNT: - debug_refcount("DECREF", obj, obj.c_ob_refcnt, frame_stackdepth=3) - if obj.c_ob_refcnt == 0: - return #ZZZ - state = space.fromcache(RefcountState) - ptr = rffi.cast(ADDR, obj) - if ptr not in state.py_objects_r2w: - # this is a half-allocated object, lets call the deallocator - # without modifying the r2w/w2r dicts - _Py_Dealloc(space, obj) - else: - w_obj = state.py_objects_r2w[ptr] - del state.py_objects_r2w[ptr] - w_type = space.type(w_obj) - if not w_type.is_cpytype(): - _Py_Dealloc(space, obj) - del state.py_objects_w2r[w_obj] - # if the object was a container for borrowed references - state.delete_borrower(w_obj) - else: - if not we_are_translated() and obj.c_ob_refcnt < 0: - message = "Negative refcount for obj %s with type %s" % ( - obj, rffi.charp2str(obj.c_ob_type.c_tp_name)) - print >>sys.stderr, message - assert False, message - - at cpython_api([PyObject], lltype.Void) -def Py_IncRef(space, obj): - if not obj: - return - obj.c_ob_refcnt += 1 - assert obj.c_ob_refcnt > 0 - if DEBUG_REFCOUNT: - debug_refcount("INCREF", obj, obj.c_ob_refcnt, frame_stackdepth=3) @cpython_api([PyObject], lltype.Void) def _Py_NewReference(space, obj): + ZZZ obj.c_ob_refcnt = 1 w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) assert isinstance(w_type, W_TypeObject) get_typedescr(w_type.instancetypedef).realize(space, obj) + at cpython_api([PyObject], lltype.Void) def _Py_Dealloc(space, obj): - from pypy.module.cpyext.api import generic_cpy_call_dont_decref + from pypy.module.cpyext.api import generic_cpy_call pto = obj.c_ob_type #print >>sys.stderr, "Calling dealloc slot", pto.c_tp_dealloc, "of", obj, \ # "'s type which is", rffi.charp2str(pto.c_tp_name) - generic_cpy_call_dont_decref(space, pto.c_tp_dealloc, obj) + generic_cpy_call(space, pto.c_tp_dealloc, obj) #___________________________________________________________ # Support for "lifelines" diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -107,6 +107,7 @@ spaceconfig = dict(usemodules=['cpyext', 'thread', '_rawffi', 'array', 'itertools', 'time', 'binascii', 'micronumpy']) spaceconfig['std.withmethodcache'] = True + spaceconfig['std.withspecialisedtuple'] = True enable_leak_checking = True diff --git a/pypy/module/cpyext/test/test_tupleobject.py b/pypy/module/cpyext/test/test_tupleobject.py --- a/pypy/module/cpyext/test/test_tupleobject.py +++ b/pypy/module/cpyext/test/test_tupleobject.py @@ -41,7 +41,7 @@ w_obj2 = space.newlist([]) pyobj2 = api.get_pyobj_and_incref(w_obj2) py_tuple = api.PyTuple_New(2) - assert not pyobj_has_w_obj(py_tuple) + assert not api.pyobj_has_w_obj(py_tuple) assert pyobj1.c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + 1 assert pyobj2.c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + 1 @@ -57,7 +57,7 @@ assert pyobj1.c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + 0 assert pyobj2.c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + 1 - assert not pyobj_has_w_obj(py_tuple) + assert not api.pyobj_has_w_obj(py_tuple) w_tup = api.from_pyobj(py_tuple) assert w_tup is api.from_pyobj(py_tuple) assert api.PyTuple_GetItem(py_tuple, 1) == pyobj2 diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -615,11 +615,14 @@ return object.__hash__(self) def __repr__(self): + if '__str__' in self._TYPE._adtmeths: + r = self._TYPE._adtmeths['__str__'](self) + else: + r = 'C object %s' % (self._TYPE,) if self._storage is None: - return '' % (self._TYPE,) + return '' % (r,) else: - return '' % (self._TYPE, - fixid(self._addressof_storage())) + return '<%s at 0x%x>' % (r, fixid(self._addressof_storage())) def __str__(self): return repr(self) diff --git a/rpython/rtyper/tool/rffi_platform.py b/rpython/rtyper/tool/rffi_platform.py --- a/rpython/rtyper/tool/rffi_platform.py +++ b/rpython/rtyper/tool/rffi_platform.py @@ -263,10 +263,11 @@ """An entry in a CConfig class that stands for an externally defined structure. """ - def __init__(self, name, interesting_fields, ifdef=None): + def __init__(self, name, interesting_fields, ifdef=None, adtmeths={}): self.name = name self.interesting_fields = interesting_fields self.ifdef = ifdef + self.adtmeths = adtmeths def prepare_code(self): if self.ifdef is not None: @@ -355,7 +356,7 @@ name = name[7:] else: hints['typedef'] = True - kwds = {'hints': hints} + kwds = {'hints': hints, 'adtmeths': self.adtmeths} return rffi.CStruct(name, *fields, **kwds) class SimpleType(CConfigEntry): From noreply at buildbot.pypy.org Wed Oct 21 10:50:55 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Oct 2015 10:50:55 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Deallocators Message-ID: <20151021085055.CDC8B1C13E6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80371:59c2d992bc0e Date: 2015-10-21 10:48 +0200 http://bitbucket.org/pypy/pypy/changeset/59c2d992bc0e/ Log: Deallocators diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -262,6 +262,7 @@ rffi.cast(restype, 0) == 0) def decorate(func): + func._always_inline_ = 'try' func_name = func.func_name if external: c_name = None @@ -355,7 +356,6 @@ return res unwrapper.func = func unwrapper.api_func = api_function - unwrapper._always_inline_ = 'try' return unwrapper unwrapper_catch = make_unwrapper(True) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -38,7 +38,7 @@ fill_pyobj: called to fill the PyObject after attaching is done alloc_pypy: function called to create a PyPy object from a PyObject fill_pypy: called to fill the PyPy object after attaching is done - dealloc : a cpython_api(external=False), similar to PyObject_dealloc + dealloc: function called with the pyobj when the refcount drops to zero """ tp_basestruct = kw.pop('basestruct', PyObject.TO) @@ -46,11 +46,11 @@ tp_fill_pyobj = kw.pop('fill_pyobj', None) tp_alloc_pypy = kw.pop('alloc_pypy', None) tp_fill_pypy = kw.pop('fill_pypy', None) + tp_dealloc = kw.pop('dealloc', None) force_create_pyobj = kw.pop('force_create_pyobj', False) realize_subclass_of = kw.pop('realize_subclass_of', None) alloc_pypy_light_if = kw.pop('alloc_pypy_light_if', None) - #tp_dealloc = kw.pop('dealloc', None) - assert not kw, "Extra arguments to make_typedescr: %s" % kw.keys() + assert not kw, "Extra arguments to setup_class_for_cpyext: %s" % kw.keys() assert 'cpyext_basestruct' not in W_Class.__dict__ # double set @@ -86,7 +86,7 @@ keepalive_until_here(self) W_Class.cpyext_fill_prebuilt_pyobj = cpyext_fill_prebuilt_pyobj - if tp_alloc_pyobj or tp_fill_pyobj or realize_subclass_of: + if tp_alloc_pyobj or tp_fill_pyobj or realize_subclass_of or tp_dealloc: if realize_subclass_of is None: realize_subclass_of = W_Class # @@ -116,6 +116,19 @@ assert 'cpyext_create_pypy' not in typedef.__dict__ typedef.cpyext_create_pypy = cpyext_create_pypy + if tp_dealloc: + @cpython_api([PyObject], lltype.Void, + external=False, error=CANNOT_FAIL) + def dealloc(space, py_obj): + tp_dealloc(space, rffi.cast(lltype.Ptr(tp_basestruct), py_obj)) + # + def cpyext_get_dealloc(space): + return llhelper(dealloc.api_func.functype, + dealloc.api_func.get_wrapper(space)) + # + assert 'cpyext_get_dealloc' not in typedef.__dict__ + typedef.cpyext_get_dealloc = cpyext_get_dealloc + W_Class.cpyext_basestruct = tp_basestruct @@ -157,10 +170,17 @@ @bootstrap_function def init_pyobject(space): setup_class_for_cpyext(W_Root, force_create_pyobj=True, - realize_subclass_of=W_ObjectObject) + realize_subclass_of=W_ObjectObject, + dealloc=_default_dealloc) # use this cpyext_create_pypy as the default for all other TypeDefs from pypy.interpreter.typedef import TypeDef - TypeDef.cpyext_create_pypy = W_ObjectObject.typedef.cpyext_create_pypy + TypeDef.cpyext_create_pypy = staticmethod( + W_ObjectObject.typedef.cpyext_create_pypy) + TypeDef.cpyext_get_dealloc = staticmethod( + W_ObjectObject.typedef.cpyext_get_dealloc) + +def _default_dealloc(space, py_obj): + lltype.free(py_obj, flavor='raw', track_allocation=False) #________________________________________________________ @@ -479,24 +499,6 @@ # "'s type which is", rffi.charp2str(pto.c_tp_name) generic_cpy_call(space, pto.c_tp_dealloc, obj) -#___________________________________________________________ -# Support for "lifelines" -# -# Object structure must stay alive even when not referenced -# by any C code. - -class PyOLifeline(object): - def __init__(self, space, pyo): - ZZZ - self.pyo = pyo - self.space = space - - def __del__(self): - if self.pyo: - assert self.pyo.c_ob_refcnt == 0 - _Py_Dealloc(self.space, self.pyo) - self.pyo = lltype.nullptr(PyObject.TO) - # XXX handle borrowed objects here #___________________________________________________________ # Support for borrowed references diff --git a/pypy/module/cpyext/test/test_tupleobject.py b/pypy/module/cpyext/test/test_tupleobject.py --- a/pypy/module/cpyext/test/test_tupleobject.py +++ b/pypy/module/cpyext/test/test_tupleobject.py @@ -1,7 +1,7 @@ import py from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_LIGHT -from pypy.module.cpyext.pyobject import PyObject, PyObjectP +from pypy.module.cpyext.pyobject import PyObject, PyObjectP, debug_collect from pypy.module.cpyext.test.test_api import BaseApiTest from rpython.rtyper.lltypesystem import rffi, lltype @@ -26,6 +26,14 @@ w_obj2 = api.from_pyobj(api.PyTuple_GetItem(atuple, 1)) assert space.eq_w(w_obj1, space.wrap(10)) assert space.eq_w(w_obj2, space.wrap(11)) + # + # one reference from the PyTupleObject + assert api.as_pyobj(w_obj1).c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + 1 + assert api.as_pyobj(w_obj2).c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + 1 + del atuple + debug_collect() + assert api.as_pyobj(w_obj1).c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + assert api.as_pyobj(w_obj2).c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT def test_tupleobject_spec_oo(self, space, api): w_obj1 = space.newlist([]) @@ -34,6 +42,14 @@ assert api.PyTuple_Size(atuple) == 2 assert api.from_pyobj(api.PyTuple_GetItem(atuple, 0)) is w_obj1 assert api.from_pyobj(api.PyTuple_GetItem(atuple, 1)) is w_obj2 + # + # no reference from the PyTupleObject: it is borrowed + assert api.as_pyobj(w_obj1).c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + assert api.as_pyobj(w_obj2).c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + del atuple + debug_collect() + assert api.as_pyobj(w_obj1).c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT + assert api.as_pyobj(w_obj2).c_ob_refcnt == REFCNT_FROM_PYPY_LIGHT def test_new_setitem(self, space, api): w_obj1 = space.newlist([]) diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -4,7 +4,7 @@ cpython_struct, PyVarObjectFields, build_type_checkers3, bootstrap_function) from pypy.module.cpyext.pyobject import (PyObject, PyObjectP, Py_DecRef, setup_class_for_cpyext, as_pyobj, get_pyobj_and_incref, from_pyobj, - pyobj_has_w_obj, RRC_PERMANENT, RRC_PERMANENT_LIGHT, new_pyobj) + pyobj_has_w_obj, RRC_PERMANENT, RRC_PERMANENT_LIGHT, new_pyobj, xdecref) from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.objspace.std.tupleobject import W_TupleObject, W_AbstractTupleObject @@ -34,6 +34,10 @@ # --and then we call this function to initialize the W_TupleObject-- fill_pypy=tuple_fill_pypy, + + # --deallocator, *not* called if tuple_alloc_pyobj() made a + # PyTupleObject of borrowed items-- + dealloc=tuple_dealloc, ) def tuple_alloc_pyobj(space, w_obj): @@ -63,6 +67,11 @@ for i in range(py_tuple.c_ob_size)] W_TupleObject.__init__(w_obj, objects_w) +def tuple_dealloc(space, py_tup): + for i in range(py_tup.c_ob_size): + xdecref(space, py_tup.c_ob_item[i]) + lltype.free(py_tup, flavor='raw', track_allocation=False) + @cpython_api([Py_ssize_t], PyObject) def PyTuple_New(space, size): diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -428,7 +428,8 @@ w_type.cpyext_c_type_object = pto # dealloc - #pto.c_tp_dealloc = typedescr.get_dealloc(space) ZZZ + pto.c_tp_dealloc = w_type.instancetypedef.cpyext_get_dealloc(space) + # buffer protocol if space.is_w(w_type, space.w_str): setup_string_buffer_procs(space, pto) From noreply at buildbot.pypy.org Wed Oct 21 10:53:18 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Wed, 21 Oct 2015 10:53:18 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: added BRAS (short version), load from memory 64 bit (LG), testing constant pool access Message-ID: <20151021085318.A12231C13F1@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80372:81756cc1745e Date: 2015-10-21 10:50 +0200 http://bitbucket.org/pypy/pypy/changeset/81756cc1745e/ Log: added BRAS (short version), load from memory 64 bit (LG), testing constant pool access diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -40,6 +40,10 @@ self.writechar(chr((word >> 8) & 0xFF)) self.writechar(chr(word & 0xFF)) + def write(self, bytestr): + for char in bytestr: + self.writechar(char) + build_instr_codes(AbstractZARCHBuilder) class InstrBuilder(BlockBuilderMixin, AbstractZARCHBuilder): @@ -78,7 +82,7 @@ def copy_to_raw_memory(self, addr): self._copy_to_raw_memory(addr) self.clear_cache(addr) - self._dump(addr, "jit-backend-dump", 'arm') + self._dump(addr, "jit-backend-dump", "s390x") def currpos(self): return self.get_relative_pos() diff --git a/rpython/jit/backend/zarch/instruction_builder.py b/rpython/jit/backend/zarch/instruction_builder.py --- a/rpython/jit/backend/zarch/instruction_builder.py +++ b/rpython/jit/backend/zarch/instruction_builder.py @@ -1,7 +1,15 @@ from rpython.jit.backend.zarch.instructions import (all_mnemonic_codes,) from rpython.rtyper.lltypesystem.rbuilder import always_inline from rpython.rlib.unroll import unrolling_iterable +from rpython.jit.backend.zarch import locations as loc +def dummy_argument(arg): + """ NOT_RPYTHON """ + if arg == 'r' or arg == 'r/m': + return 0 + if arg.startswith('i') or arg.startswith('u'): + return 0 + return loc.addr(0) class builder(object): """ NOT_RPYTHON """ @@ -23,8 +31,22 @@ note that a suffix 'l' means long, and a prefix length """ + class Counter(object): + def __init__(self): + self.counter = 0 + def writechar(self, char): + self.counter += 1 + def write_i16(self, _): + self.counter += 2 + def write_i32(self, _): + self.counter += 4 def impl(func): func._arguments_ = args_str.split(',') + args = [dummy_argument(a) for a in func._arguments_] + c = Counter() + # invoke it once and get the amount of bytes + func(c, *args) + func._byte_count = c.counter return func return impl @@ -270,6 +292,7 @@ newargs[i] = args[i] return func(self, *newargs) function.__name__ = mnemonic + function._byte_count = func._byte_count return function def is_branch_relative(name): @@ -277,11 +300,7 @@ def build_instr_codes(clazz): for mnemonic, params in all_mnemonic_codes.items(): - options = {} - if len(params) == 2: - (instrtype, args) = params - else: - (instrtype, args, options) = params + (instrtype, args) = params builder = globals()['build_' + instrtype] func = builder(mnemonic, args) name = mnemonic + "_" + instrtype diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -1,6 +1,7 @@ branch_mnemonic_codes = { 'BRASL': ('ril', ['\xC0','\x05']), + 'BRAS': ('ri', ['\xA7','\x05']), 'BCR': ('rr', ['\x07']), 'BC': ('rx', ['\x47']), 'BRC': ('ri', ['\xA7','\x04']), @@ -14,6 +15,11 @@ 'A': ('rx', ['\x5A']), 'SR': ('rr', ['\x1B']), 'SGR': ('rre', ['\xB9','\x09']), + + 'AY': ('rxy', ['\xE3','\x5A']), + 'AG': ('rxy', ['\xE3','\x08']), + 'AGF': ('rxy', ['\xE3','\x18']), + 'AHI': ('ri', ['\xA7','\x0A']), } logic_mnemonic_codes = { @@ -23,6 +29,7 @@ # and one byte and store it back at the op2 position 'NI': ('si', ['\x94']), 'NIY': ('siy', ['\xEB','\x54']), + 'NC': ('ssa', ['\xD4']), # AND immediate 'NIHH': ('ri_u', ['\xA5', '\x04']), @@ -58,10 +65,6 @@ } all_mnemonic_codes = { - 'AY': ('rxy', ['\xE3','\x5A']), - 'AG': ('rxy', ['\xE3','\x08']), - 'AGF': ('rxy', ['\xE3','\x18']), - 'AHI': ('ri', ['\xA7','\x0A']), # 'BXH': ('rs', ['\x86']), 'BXHG': ('rsy', ['\xEB','\x44']), @@ -70,7 +73,6 @@ # 'NI': ('si', ['\x94']), 'NIY': ('siy', ['\xEB','\x54']), - 'NC': ('ssa', ['\xD4']), 'AP': ('ssb', ['\xFA']), 'SRP': ('ssc', ['\xF0']), 'MVCK': ('ssd', ['\xD9']), @@ -81,6 +83,7 @@ 'LGHI': ('ri', ['\xA7','\x09']), 'LR': ('rr', ['\x18']), 'LGR': ('rre', ['\xB9','\x04']), + 'LG': ('rxy', ['\xE3','\x04']), 'PKA': ('ssf', ['\xE9']), 'STMG': ('rsy', ['\xEB','\x24']), diff --git a/rpython/jit/backend/zarch/locations.py b/rpython/jit/backend/zarch/locations.py --- a/rpython/jit/backend/zarch/locations.py +++ b/rpython/jit/backend/zarch/locations.py @@ -170,18 +170,21 @@ class AddressLocation(AssemblerLocation): _immutable_ = True - def __init__(self, basereg, indexreg, displace): + def __init__(self, basereg, indexreg, displace, length): self.displace = displace # designates the absense of an index/base register! self.base = 0 self.index = 0 + self.length = 0 if basereg: self.base = basereg.value if indexreg: self.index = indexreg.value + if length: + self.length = length.value -def addr(displace, basereg=None, indexreg=None): - return AddressLocation(basereg, indexreg, displace) +def addr(displace, basereg=None, indexreg=None, length=None): + return AddressLocation(basereg, indexreg, displace, length) def imm(i): return ImmLocation(i) diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -18,6 +18,9 @@ CPU = getcpuclass() +def byte_count(func): + return func._byte_count + class TestRunningAssembler(object): def setup_method(self, method): cpu = CPU(None, None) @@ -28,6 +31,7 @@ clt.allgcrefs = [] token.compiled_loop_token = clt self.a.setup(token) + self.mc = self.a.mc def test_make_operation_list(self): i = rop.INT_ADD @@ -35,6 +39,10 @@ assert assembler.asm_operations[i] \ is AssemblerZARCH.emit_op_int_add.im_func + def test_byte_count_instr(self): + byte_count(self.mc.BRC) == 4 + byte_count(self.mc.LG) == 6 + def test_load_small_int_to_reg(self): self.a.mc.LGHI(reg.r2, loc.imm(123)) self.a.jmpto(reg.r14) @@ -93,3 +101,13 @@ self.a.mc.XGR(reg.r2, reg.r2) self.a.jmpto(reg.r14) assert run_asm(self.a) == 0 + + def test_literal_pool(self): + self.a.gen_func_prolog() + self.a.mc.BRAS(reg.r13, loc.imm(8 + byte_count(self.mc.BRAS))) + self.a.mc.write('\x08\x07\x06\x05\x04\x03\x02\x01') + self.a.mc.LG(reg.r2, loc.addr(0, reg.r13)) + self.a.gen_func_epilog() + assert run_asm(self.a) == 0x0807060504030201 + + From noreply at buildbot.pypy.org Wed Oct 21 10:56:11 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Oct 2015 10:56:11 +0200 (CEST) Subject: [pypy-commit] pypy default: Fix this too Message-ID: <20151021085611.C86FC1C13F1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80373:7889231eaf0f Date: 2015-10-21 10:52 +0200 http://bitbucket.org/pypy/pypy/changeset/7889231eaf0f/ Log: Fix this too diff --git a/rpython/rlib/test/test_rweakvaldict.py b/rpython/rlib/test/test_rweakvaldict.py --- a/rpython/rlib/test/test_rweakvaldict.py +++ b/rpython/rlib/test/test_rweakvaldict.py @@ -138,6 +138,10 @@ f(1) interpret(f, [1]) + + at py.test.mark.xfail( + reason="may fail with AssertionError, depending on annotation order") +def test_rpython_merge_RWeakValueDictionary3(): def g(x): if x: d = RWeakValueDictionary(str, X) From noreply at buildbot.pypy.org Wed Oct 21 11:02:21 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Oct 2015 11:02:21 +0200 (CEST) Subject: [pypy-commit] pypy default: Issue #2168: fix error message Message-ID: <20151021090221.4E5EE1C13F1@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80374:5540d6ac9103 Date: 2015-10-21 10:57 +0200 http://bitbucket.org/pypy/pypy/changeset/5540d6ac9103/ Log: Issue #2168: fix error message diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -100,7 +100,7 @@ ) if binary and encoding is not None: raise OperationError(space.w_ValueError, - space.wrap("binary mode doesn't take an errors argument") + space.wrap("binary mode doesn't take an encoding argument") ) if binary and newline is not None: raise OperationError(space.w_ValueError, From noreply at buildbot.pypy.org Wed Oct 21 11:26:30 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Oct 2015 11:26:30 +0200 (CEST) Subject: [pypy-commit] cffi default: Backed out changeset fbe55ed7e5e2 Message-ID: <20151021092630.20D161C1328@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2353:613c9d67d629 Date: 2015-10-21 11:04 +0200 http://bitbucket.org/cffi/cffi/changeset/613c9d67d629/ Log: Backed out changeset fbe55ed7e5e2 It's probably better written in the docs instead. diff --git a/cffi/cparser.py b/cffi/cparser.py --- a/cffi/cparser.py +++ b/cffi/cparser.py @@ -29,8 +29,6 @@ _r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") _r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") _r_cdecl = re.compile(r"\b__cdecl\b") -_r_SAL = re.compile(r"([(,]\s*)(_In_|_Inout_|_Out_|_Outptr_|" - r"_In_opt_|_Inout_opt_|_Out_opt_|_Outptr_opt_)\b") def _get_parser(): global _parser_cache @@ -57,8 +55,6 @@ csource = _r_stdcall2.sub(' volatile volatile const(', csource) csource = _r_stdcall1.sub(' volatile volatile const ', csource) csource = _r_cdecl.sub(' ', csource) - if sys.platform == 'win32': - csource = _r_SAL.sub(r'\1 ', csource) # Replace "[...]" with "[__dotdotdotarray__]" csource = _r_partial_array.sub('[__dotdotdotarray__]', csource) # Replace "...}" with "__dotdotdotNUM__}". This construction should diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -32,12 +32,6 @@ which had unwanted side-effects. Try saying ``import setuptools`` first, which patches distutils... -* Windows: basic SAL annotations can be given in the cdef() and are - ignored. More precisely, ``_In_``, ``_Inout_``, ``_Out_``, - ``_Outptr_``, ``In_opt_``, ``_Inout_opt_``, ``_Out_opt_`` and - ``_Outptr_opt_`` are ignored if they are following a ``(`` or a - ``,`` (which is where function parameters are). - .. _`ffi.memmove()`: using.html#memmove .. __: https://bugs.python.org/issue23246 .. __: https://bitbucket.org/cffi/cffi/pull-requests/65/remove-_hack_at_distutils-which-imports/diff diff --git a/testing/cffi0/test_parsing.py b/testing/cffi0/test_parsing.py --- a/testing/cffi0/test_parsing.py +++ b/testing/cffi0/test_parsing.py @@ -384,10 +384,3 @@ "" % (stdcall, stdcall)) - -def test_basic_SAL_annotations_on_windows(): - if sys.platform != 'win32': - py.test.skip("Only for Windows") - ffi = FFI() - tp = ffi.typeof("int(*)(_In_ int *abc, _Out_opt_ int *bcd)") - assert str(tp) == "" From noreply at buildbot.pypy.org Wed Oct 21 11:26:32 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Oct 2015 11:26:32 +0200 (CEST) Subject: [pypy-commit] cffi default: Document the trick of re.sub() to remove SAL annotations on Windows Message-ID: <20151021092632.1FF791C1328@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2354:134c16dc7d9b Date: 2015-10-21 11:14 +0200 http://bitbucket.org/cffi/cffi/changeset/134c16dc7d9b/ Log: Document the trick of re.sub() to remove SAL annotations on Windows diff --git a/doc/source/cdef.rst b/doc/source/cdef.rst --- a/doc/source/cdef.rst +++ b/doc/source/cdef.rst @@ -209,6 +209,31 @@ Also, this has no effect on structs declared with ``"...;"``---more about it later in `Letting the C compiler fill the gaps`_.) +Note that you can use the type-qualifiers ``const`` and ``restrict`` +(but not ``__restrict`` or ``__restrict__``) in the ``cdef()``, but +this has no effect on the cdata objects that you get at run-time (they +are never ``const``). The effect is limited to knowing if a global +variable is meant to be a constant or not. Also, *new in version +1.3:* when using ``set_source()`` or ``verify()``, these two +qualifiers are copied from the cdef to the generated C code; this +fixes warnings by the C compiler. + +Note a trick if you copy-paste code from sources in which there are +extra macros (for example, the Windows documentation uses SAL +annotations like ``_In_`` or ``_Out_``). These hints must be removed +in the string given to cdef(), but it can be done programmatically +like this:: + + ffi.cdef(re.sub(r"\b(_In_|_Inout_|_Out_|_Outptr_)(opt_)?\b", " ", + """ + DWORD WINAPI GetModuleFileName( + _In_opt_ HMODULE hModule, + _Out_ LPTSTR lpFilename, + _In_ DWORD nSize + ); + """)) + + .. _`ffi.set_unicode()`: **ffi.set_unicode(enabled_flag)**: Windows: if ``enabled_flag`` is @@ -232,15 +257,6 @@ ``TCHAR`` and friends where hard-coded as unicode, but ``UNICODE`` was, inconsistently, not defined by default.) -Note that you can use the type-qualifiers ``const`` and ``restrict`` -(but not ``__restrict`` or ``__restrict__``) in the ``cdef()``, but -this has no effect on the cdata objects that you get at run-time (they -are never ``const``). The effect is limited to knowing if a global -variable is meant to be a constant or not. Also, *new in version -1.3:* when using ``set_source()`` or ``verify()``, these two -qualifiers are copied from the cdef to the generated C code; this -fixes warnings by the C compiler. - .. _loading-libraries: From noreply at buildbot.pypy.org Wed Oct 21 12:19:00 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Oct 2015 12:19:00 +0200 (CEST) Subject: [pypy-commit] cffi default: Argh. Message-ID: <20151021101900.8440D1C130A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2355:ad45ea3e4614 Date: 2015-10-21 11:55 +0200 http://bitbucket.org/cffi/cffi/changeset/ad45ea3e4614/ Log: Argh. diff --git a/c/_cffi_backend.c b/c/_cffi_backend.c --- a/c/_cffi_backend.c +++ b/c/_cffi_backend.c @@ -3664,6 +3664,11 @@ Py_DECREF(key); goto error; } + /* Haaaack for our reference count hack: gcmodule.c must not see this + dictionary. The problem is that any PyDict_SetItem() notices that + 'x' is tracked and re-tracks the unique_cache dictionary. So here + we re-untrack it again... */ + PyObject_GC_UnTrack(unique_cache); assert(x->ct_unique_key == NULL); x->ct_unique_key = key; /* the key will be freed in ctypedescr_dealloc() */ From noreply at buildbot.pypy.org Wed Oct 21 13:31:17 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Oct 2015 13:31:17 +0200 (CEST) Subject: [pypy-commit] pypy default: import cffi/ad45ea3e4614 (version 1.3.0) Message-ID: <20151021113117.1B09F1C0036@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r80375:2b91e218456f Date: 2015-10-21 12:55 +0200 http://bitbucket.org/pypy/pypy/changeset/2b91e218456f/ Log: import cffi/ad45ea3e4614 (version 1.3.0) diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -86,9 +86,13 @@ # but should be fine for all the common types. look_for_words = set(COMMON_TYPES) look_for_words.add(';') + look_for_words.add(',') + look_for_words.add('(') + look_for_words.add(')') look_for_words.add('typedef') words_used = set() is_typedef = False + paren = 0 previous_word = '' for word in _r_words.findall(csource): if word in look_for_words: @@ -99,6 +103,15 @@ is_typedef = False elif word == 'typedef': is_typedef = True + paren = 0 + elif word == '(': + paren += 1 + elif word == ')': + paren -= 1 + elif word == ',': + if is_typedef and paren == 0: + words_used.discard(previous_word) + look_for_words.discard(previous_word) else: # word in COMMON_TYPES words_used.add(word) previous_word = word diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py @@ -434,6 +434,7 @@ py.test.skip("Windows-only test") if self.Backend is CTypesBackend: py.test.skip("not with the ctypes backend") + win64 = (sys.maxsize > 2**32) # ffi = FFI(backend=self.Backend()) ffi.cdef(""" @@ -457,8 +458,11 @@ """) m = ffi.dlopen("Kernel32.dll") tps = ffi.typeof(m.QueryPerformanceFrequency) - assert tps is not tpc - assert str(tps) == "" + if win64: + assert tps is tpc + else: + assert tps is not tpc + assert str(tps) == "" # ffi = FFI(backend=self.Backend()) ffi.cdef("typedef int (__cdecl *fnc_t)(int);") @@ -466,21 +470,27 @@ tpc = ffi.typeof("fnc_t") tps = ffi.typeof("fns_t") assert str(tpc) == "" - assert str(tps) == "" + if win64: + assert tps is tpc + else: + assert str(tps) == "" # fnc = ffi.cast("fnc_t", 0) fns = ffi.cast("fns_t", 0) ffi.new("fnc_t[]", [fnc]) - py.test.raises(TypeError, ffi.new, "fnc_t[]", [fns]) - py.test.raises(TypeError, ffi.new, "fns_t[]", [fnc]) + if not win64: + py.test.raises(TypeError, ffi.new, "fnc_t[]", [fns]) + py.test.raises(TypeError, ffi.new, "fns_t[]", [fnc]) ffi.new("fns_t[]", [fns]) def test_stdcall_only_on_windows(self): - if sys.platform == 'win32': - py.test.skip("not-Windows-only test") ffi = FFI(backend=self.Backend()) ffi.cdef("double __stdcall sin(double x);") # stdcall ignored m = ffi.dlopen(lib_m) - assert "double(*)(double)" in str(ffi.typeof(m.sin)) + if (sys.platform == 'win32' and sys.maxint < 2**32 and + self.Backend is not CTypesBackend): + assert "double(__stdcall *)(double)" in str(ffi.typeof(m.sin)) + else: + assert "double(*)(double)" in str(ffi.typeof(m.sin)) x = m.sin(1.23) assert x == math.sin(1.23) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py @@ -259,6 +259,12 @@ assert repr(ffi.cast("FILE", 123)) == "" % prefix ffi.cdef("typedef char int32_t;") assert repr(ffi.cast("int32_t", 123)) == "" % prefix + ffi = FFI() + ffi.cdef("typedef int bool, *FILE;") + assert repr(ffi.cast("bool", 123)) == "" + assert repr(ffi.cast("FILE", 123)) == "" + ffi = FFI() + ffi.cdef("typedef bool (*fn_t)(bool, bool);") # "bool," but within "( )" def test_bool(): ffi = FFI() @@ -371,7 +377,7 @@ tp = ffi.typeof("int(*)(int __stdcall x(int)," " long (__cdecl*y)(void)," " short(WINAPI *z)(short))") - if sys.platform == 'win32': + if sys.platform == 'win32' and sys.maxsize < 2**32: stdcall = '__stdcall ' else: stdcall = '' diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py @@ -2286,7 +2286,7 @@ #print '...' assert res == -500*999*3 #print 'done' - if sys.platform == 'win32': + if sys.platform == 'win32' and sys.maxsize < 2**32: assert '__stdcall' in str(ffi.typeof(cb2)) assert '__stdcall' not in str(ffi.typeof(cb1)) py.test.raises(TypeError, lib.call1, cb2) @@ -2412,7 +2412,7 @@ return result; } """) - if sys.platform == 'win32': + if sys.platform == 'win32' and sys.maxsize < 2**32: py.test.raises(TypeError, lib.call1, lib.cb2) py.test.raises(TypeError, lib.call2, lib.cb1) pt = lib.call1(lib.cb1) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_realize_c_type.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_realize_c_type.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_realize_c_type.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_realize_c_type.py @@ -54,7 +54,7 @@ ffi = _cffi_backend.FFI() ct = ffi.typeof(ffi.callback(input, lambda: None)) assert isinstance(ct, ffi.CType) - if sys.platform != 'win32': + if sys.platform != 'win32' or sys.maxsize > 2**32: expected_output = expected_output.replace('__stdcall *', '*') assert ct.cname == expected_output diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -22,7 +22,7 @@ kwds.setdefault('undef_macros', ['NDEBUG']) module_name = '_CFFI_' + module_name ffi.set_source(module_name, source) - if 1: # test the .cpp mode too + if not os.environ.get('NO_CPP'): # test the .cpp mode too kwds.setdefault('source_extension', '.cpp') source = 'extern "C" {\n%s\n}' % (source,) else: @@ -199,7 +199,7 @@ vals = ['42', '-42', '0x80000000', '-2147483648', '0', '9223372036854775809ULL', '-9223372036854775807LL'] - if sys.maxsize <= 2**32: + if sys.maxsize <= 2**32 or sys.platform == 'win32': vals.remove('-2147483648') ffi = FFI() cdef_lines = ['#define FOO_%d_%d %s' % (i, j, vals[i]) @@ -459,7 +459,7 @@ ffi.cdef("typedef enum { AA=%d } e1;" % sys.maxsize) lib = verify(ffi, 'test_verify_anonymous_enum_with_typedef2', "typedef enum { AA=%d } e1;" % sys.maxsize) - assert lib.AA == sys.maxsize + assert lib.AA == int(ffi.cast("long", sys.maxsize)) assert ffi.sizeof("e1") == ffi.sizeof("long") def test_unique_types(): @@ -1321,7 +1321,7 @@ res = lib.call2(cb2) assert res == -500*999*3 assert res == ffi.addressof(lib, 'call2')(cb2) - if sys.platform == 'win32': + if sys.platform == 'win32' and not sys.maxsize > 2**32: assert '__stdcall' in str(ffi.typeof(cb2)) assert '__stdcall' not in str(ffi.typeof(cb1)) py.test.raises(TypeError, lib.call1, cb2) @@ -1409,7 +1409,7 @@ """) ptr_call1 = ffi.addressof(lib, 'call1') ptr_call2 = ffi.addressof(lib, 'call2') - if sys.platform == 'win32': + if sys.platform == 'win32' and not sys.maxsize > 2**32: py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) @@ -1465,7 +1465,7 @@ """) ptr_call1 = ffi.addressof(lib, 'call1') ptr_call2 = ffi.addressof(lib, 'call2') - if sys.platform == 'win32': + if sys.platform == 'win32' and not sys.maxsize > 2**32: py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) From noreply at buildbot.pypy.org Wed Oct 21 13:34:55 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Oct 2015 13:34:55 +0200 (CEST) Subject: [pypy-commit] cffi release-1.3: make release branch Message-ID: <20151021113455.81B351C00F3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-1.3 Changeset: r2356:e79c5d1b43f8 Date: 2015-10-21 12:58 +0200 http://bitbucket.org/cffi/cffi/changeset/e79c5d1b43f8/ Log: make release branch From noreply at buildbot.pypy.org Wed Oct 21 13:34:57 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Oct 2015 13:34:57 +0200 (CEST) Subject: [pypy-commit] cffi release-1.3: md5/sha1 Message-ID: <20151021113457.6638C1C00F3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-1.3 Changeset: r2357:fb4ef4551c6c Date: 2015-10-21 12:59 +0200 http://bitbucket.org/cffi/cffi/changeset/fb4ef4551c6c/ Log: md5/sha1 diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -53,9 +53,9 @@ * http://pypi.python.org/packages/source/c/cffi/cffi-1.3.0.tar.gz - - MD5: ... + - MD5: a40ed8c8ac653c8fc7d5603711b06eaf - - SHA: ... + - SHA: 54a0b2dbbc2f5d99131aa337e217b636652641a9 * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` From noreply at buildbot.pypy.org Wed Oct 21 13:44:36 2015 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 21 Oct 2015 13:44:36 +0200 (CEST) Subject: [pypy-commit] pypy release-4.0.x: merge default into branch Message-ID: <20151021114436.5E3371C1328@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-4.0.x Changeset: r80376:cede6aa8bd5d Date: 2015-10-21 13:07 +0200 http://bitbucket.org/pypy/pypy/changeset/cede6aa8bd5d/ Log: merge default into branch diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -86,9 +86,13 @@ # but should be fine for all the common types. look_for_words = set(COMMON_TYPES) look_for_words.add(';') + look_for_words.add(',') + look_for_words.add('(') + look_for_words.add(')') look_for_words.add('typedef') words_used = set() is_typedef = False + paren = 0 previous_word = '' for word in _r_words.findall(csource): if word in look_for_words: @@ -99,6 +103,15 @@ is_typedef = False elif word == 'typedef': is_typedef = True + paren = 0 + elif word == '(': + paren += 1 + elif word == ')': + paren -= 1 + elif word == ',': + if is_typedef and paren == 0: + words_used.discard(previous_word) + look_for_words.discard(previous_word) else: # word in COMMON_TYPES words_used.add(word) previous_word = word diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -100,7 +100,7 @@ ) if binary and encoding is not None: raise OperationError(space.w_ValueError, - space.wrap("binary mode doesn't take an errors argument") + space.wrap("binary mode doesn't take an encoding argument") ) if binary and newline is not None: raise OperationError(space.w_ValueError, diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_function.py @@ -434,6 +434,7 @@ py.test.skip("Windows-only test") if self.Backend is CTypesBackend: py.test.skip("not with the ctypes backend") + win64 = (sys.maxsize > 2**32) # ffi = FFI(backend=self.Backend()) ffi.cdef(""" @@ -457,8 +458,11 @@ """) m = ffi.dlopen("Kernel32.dll") tps = ffi.typeof(m.QueryPerformanceFrequency) - assert tps is not tpc - assert str(tps) == "" + if win64: + assert tps is tpc + else: + assert tps is not tpc + assert str(tps) == "" # ffi = FFI(backend=self.Backend()) ffi.cdef("typedef int (__cdecl *fnc_t)(int);") @@ -466,21 +470,27 @@ tpc = ffi.typeof("fnc_t") tps = ffi.typeof("fns_t") assert str(tpc) == "" - assert str(tps) == "" + if win64: + assert tps is tpc + else: + assert str(tps) == "" # fnc = ffi.cast("fnc_t", 0) fns = ffi.cast("fns_t", 0) ffi.new("fnc_t[]", [fnc]) - py.test.raises(TypeError, ffi.new, "fnc_t[]", [fns]) - py.test.raises(TypeError, ffi.new, "fns_t[]", [fnc]) + if not win64: + py.test.raises(TypeError, ffi.new, "fnc_t[]", [fns]) + py.test.raises(TypeError, ffi.new, "fns_t[]", [fnc]) ffi.new("fns_t[]", [fns]) def test_stdcall_only_on_windows(self): - if sys.platform == 'win32': - py.test.skip("not-Windows-only test") ffi = FFI(backend=self.Backend()) ffi.cdef("double __stdcall sin(double x);") # stdcall ignored m = ffi.dlopen(lib_m) - assert "double(*)(double)" in str(ffi.typeof(m.sin)) + if (sys.platform == 'win32' and sys.maxint < 2**32 and + self.Backend is not CTypesBackend): + assert "double(__stdcall *)(double)" in str(ffi.typeof(m.sin)) + else: + assert "double(*)(double)" in str(ffi.typeof(m.sin)) x = m.sin(1.23) assert x == math.sin(1.23) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py @@ -259,6 +259,12 @@ assert repr(ffi.cast("FILE", 123)) == "" % prefix ffi.cdef("typedef char int32_t;") assert repr(ffi.cast("int32_t", 123)) == "" % prefix + ffi = FFI() + ffi.cdef("typedef int bool, *FILE;") + assert repr(ffi.cast("bool", 123)) == "" + assert repr(ffi.cast("FILE", 123)) == "" + ffi = FFI() + ffi.cdef("typedef bool (*fn_t)(bool, bool);") # "bool," but within "( )" def test_bool(): ffi = FFI() @@ -371,7 +377,7 @@ tp = ffi.typeof("int(*)(int __stdcall x(int)," " long (__cdecl*y)(void)," " short(WINAPI *z)(short))") - if sys.platform == 'win32': + if sys.platform == 'win32' and sys.maxsize < 2**32: stdcall = '__stdcall ' else: stdcall = '' diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py @@ -2286,7 +2286,7 @@ #print '...' assert res == -500*999*3 #print 'done' - if sys.platform == 'win32': + if sys.platform == 'win32' and sys.maxsize < 2**32: assert '__stdcall' in str(ffi.typeof(cb2)) assert '__stdcall' not in str(ffi.typeof(cb1)) py.test.raises(TypeError, lib.call1, cb2) @@ -2412,7 +2412,7 @@ return result; } """) - if sys.platform == 'win32': + if sys.platform == 'win32' and sys.maxsize < 2**32: py.test.raises(TypeError, lib.call1, lib.cb2) py.test.raises(TypeError, lib.call2, lib.cb1) pt = lib.call1(lib.cb1) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_realize_c_type.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_realize_c_type.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_realize_c_type.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_realize_c_type.py @@ -54,7 +54,7 @@ ffi = _cffi_backend.FFI() ct = ffi.typeof(ffi.callback(input, lambda: None)) assert isinstance(ct, ffi.CType) - if sys.platform != 'win32': + if sys.platform != 'win32' or sys.maxsize > 2**32: expected_output = expected_output.replace('__stdcall *', '*') assert ct.cname == expected_output diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -22,7 +22,7 @@ kwds.setdefault('undef_macros', ['NDEBUG']) module_name = '_CFFI_' + module_name ffi.set_source(module_name, source) - if 1: # test the .cpp mode too + if not os.environ.get('NO_CPP'): # test the .cpp mode too kwds.setdefault('source_extension', '.cpp') source = 'extern "C" {\n%s\n}' % (source,) else: @@ -199,7 +199,7 @@ vals = ['42', '-42', '0x80000000', '-2147483648', '0', '9223372036854775809ULL', '-9223372036854775807LL'] - if sys.maxsize <= 2**32: + if sys.maxsize <= 2**32 or sys.platform == 'win32': vals.remove('-2147483648') ffi = FFI() cdef_lines = ['#define FOO_%d_%d %s' % (i, j, vals[i]) @@ -459,7 +459,7 @@ ffi.cdef("typedef enum { AA=%d } e1;" % sys.maxsize) lib = verify(ffi, 'test_verify_anonymous_enum_with_typedef2', "typedef enum { AA=%d } e1;" % sys.maxsize) - assert lib.AA == sys.maxsize + assert lib.AA == int(ffi.cast("long", sys.maxsize)) assert ffi.sizeof("e1") == ffi.sizeof("long") def test_unique_types(): @@ -1321,7 +1321,7 @@ res = lib.call2(cb2) assert res == -500*999*3 assert res == ffi.addressof(lib, 'call2')(cb2) - if sys.platform == 'win32': + if sys.platform == 'win32' and not sys.maxsize > 2**32: assert '__stdcall' in str(ffi.typeof(cb2)) assert '__stdcall' not in str(ffi.typeof(cb1)) py.test.raises(TypeError, lib.call1, cb2) @@ -1409,7 +1409,7 @@ """) ptr_call1 = ffi.addressof(lib, 'call1') ptr_call2 = ffi.addressof(lib, 'call2') - if sys.platform == 'win32': + if sys.platform == 'win32' and not sys.maxsize > 2**32: py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) @@ -1465,7 +1465,7 @@ """) ptr_call1 = ffi.addressof(lib, 'call1') ptr_call2 = ffi.addressof(lib, 'call2') - if sys.platform == 'win32': + if sys.platform == 'win32' and not sys.maxsize > 2**32: py.test.raises(TypeError, lib.call1, ffi.addressof(lib, 'cb2')) py.test.raises(TypeError, ptr_call1, ffi.addressof(lib, 'cb2')) py.test.raises(TypeError, lib.call2, ffi.addressof(lib, 'cb1')) diff --git a/rpython/rlib/test/test_rweakvaldict.py b/rpython/rlib/test/test_rweakvaldict.py --- a/rpython/rlib/test/test_rweakvaldict.py +++ b/rpython/rlib/test/test_rweakvaldict.py @@ -138,6 +138,10 @@ f(1) interpret(f, [1]) + + at py.test.mark.xfail( + reason="may fail with AssertionError, depending on annotation order") +def test_rpython_merge_RWeakValueDictionary3(): def g(x): if x: d = RWeakValueDictionary(str, X) From noreply at buildbot.pypy.org Wed Oct 21 21:35:30 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Wed, 21 Oct 2015 21:35:30 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: syscall write working properly. string put into literal pool Message-ID: <20151021193530.0FB0D1C0290@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80380:6ad3bdfddaa3 Date: 2015-10-21 14:42 +0200 http://bitbucket.org/pypy/pypy/changeset/6ad3bdfddaa3/ Log: syscall write working properly. string put into literal pool diff --git a/rpython/jit/backend/zarch/instruction_builder.py b/rpython/jit/backend/zarch/instruction_builder.py --- a/rpython/jit/backend/zarch/instruction_builder.py +++ b/rpython/jit/backend/zarch/instruction_builder.py @@ -84,6 +84,13 @@ byte = displace >> 12 & 0xff mc.writechar(chr(byte)) +def build_i(mnemonic, (opcode,)): + @builder.arguments('u8') + def encode_i(self, imm): + self.writechar(opcode) + self.writechar(chr(imm)) + return encode_i + def build_rr(mnemonic, (opcode,)): @builder.arguments('r,r') def encode_rr(self, reg1, reg2): diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -77,6 +77,7 @@ 'SRP': ('ssc', ['\xF0']), 'MVCK': ('ssd', ['\xD9']), + 'LA': ('rx', ['\x41']), 'LAY': ('rxy', ['\xE3','\x71']), 'LMD': ('sse', ['\xEF']), 'LMG': ('rsy', ['\xEB','\x04']), @@ -87,6 +88,8 @@ 'PKA': ('ssf', ['\xE9']), 'STMG': ('rsy', ['\xEB','\x24']), + + 'SVC': ('i', ['\x0A']), } all_mnemonic_codes.update(arith_mnemonic_codes) all_mnemonic_codes.update(logic_mnemonic_codes) diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -123,7 +123,6 @@ return ctxmgr() def patch_branch_imm16(self, base, imm): - print "branch to", imm, "base", base, self.cur(), self.pos('lit.end'), self.pos('lit') imm = (imm & 0xffff) >> 1 self.mc.overwrite(base, chr((imm >> 8) & 0xFF)) self.mc.overwrite(base+1, chr(imm & 0xFF)) @@ -141,7 +140,6 @@ def jump_to(self, reg, label): val = (self.pos(label) - self.cur()) - print "val", val self.mc.BRAS(reg, loc.imm(val)) def test_stmg(self): @@ -172,3 +170,17 @@ self.a.jmpto(reg.r14) assert run_asm(self.a) == 120 + def test_printf(self): + with self.label('func', func=True): + with self.label('lit'): + self.mc.BRAS(reg.r13, loc.imm(0)) + for c in "hello syscall\n": + self.mc.writechar(c) + self.jump_here(self.mc.BRAS, 'lit') + self.mc.LGHI(reg.r2, loc.imm(1)) # stderr + self.mc.LA(reg.r3, loc.addr(0, reg.r13)) # char* + self.mc.LGHI(reg.r4, loc.imm(14)) # length + # write sys call + self.mc.SVC(loc.imm(4)) + self.a.jmpto(reg.r14) + assert run_asm(self.a) == 14 From noreply at buildbot.pypy.org Wed Oct 21 21:28:11 2015 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 21 Oct 2015 21:28:11 +0200 (CEST) Subject: [pypy-commit] pypy default: better document cffi 1.3 Message-ID: <20151021192811.8FBFA1C0036@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80378:fda97fe4a9d5 Date: 2015-10-21 23:16 +1100 http://bitbucket.org/pypy/pypy/changeset/fda97fe4a9d5/ Log: better document cffi 1.3 diff --git a/pypy/doc/release-4.0.0.rst b/pypy/doc/release-4.0.0.rst --- a/pypy/doc/release-4.0.0.rst +++ b/pypy/doc/release-4.0.0.rst @@ -73,7 +73,8 @@ While not applicable only to PyPy, `cffi`_ is arguably our most significant contribution to the python ecosystem. Armin Rigo continued improving it, and PyPy reaps the benefits of cffi-1.3: improved manangement of object -lifetimes, __stdcall on Win32, ffi.memmove(), ... +lifetimes, __stdcall on Win32, ffi.memmove(), and percolate ``const``, +``restrict`` keywords from cdef to C code. .. _`warmup time and memory overhead used for tracing`: http://morepypy.blogspot.com/2015/10 .. _`vectorization`: http://pypyvecopt.blogspot.co.at/ From noreply at buildbot.pypy.org Thu Oct 22 02:07:55 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 22 Oct 2015 02:07:55 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: assembler recursion: testing if it the assembler can correctly invoke a recursive function. added helper functions to label positions (in the test suite) Message-ID: <20151022000755.DA48C1C130A@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80377:aa2a464734d2 Date: 2015-10-21 13:58 +0200 http://bitbucket.org/pypy/pypy/changeset/aa2a464734d2/ Log: assembler recursion: testing if it the assembler can correctly invoke a recursive function. added helper functions to label positions (in the test suite) diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -50,14 +50,17 @@ return clt.asmmemmgr_blocks def gen_func_prolog(self): - self.mc.STMG(reg.r11, reg.r15, loc.addr(-96, reg.sp)) - self.mc.AHI(reg.sp, loc.imm(-96)) + STACK_FRAME_SIZE = 40 + self.mc.STMG(reg.r11, reg.r15, loc.addr(-STACK_FRAME_SIZE, reg.sp)) + self.mc.AHI(reg.sp, loc.imm(-STACK_FRAME_SIZE)) def gen_func_epilog(self): self.mc.LMG(reg.r11, reg.r15, loc.addr(0, reg.sp)) self.jmpto(reg.r14) def jmpto(self, register): + # TODO, manual says this is a performance killer, there + # might be another operation for unconditional JMP? self.mc.BCR_rr(0xf, register.value) def _build_failure_recovery(self, exc, withfloats=False): diff --git a/rpython/jit/backend/zarch/instruction_builder.py b/rpython/jit/backend/zarch/instruction_builder.py --- a/rpython/jit/backend/zarch/instruction_builder.py +++ b/rpython/jit/backend/zarch/instruction_builder.py @@ -76,9 +76,9 @@ | ... | base | length[0:11] | length[12:20] | ... | +-------------------------------------------------+ """ - displace = basedisp.displace & 0xfffff + displace = basedisp.displace & BIT_MASK_20 base = basedisp.base & 0xf - byte = displace >> 8 & 0xf | base << 4 + byte = (displace >> 8) & 0xf | base << 4 mc.writechar(chr(byte)) mc.writechar(chr(displace & 0xff)) byte = displace >> 12 & 0xff diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -29,7 +29,7 @@ # and one byte and store it back at the op2 position 'NI': ('si', ['\x94']), 'NIY': ('siy', ['\xEB','\x54']), - 'NC': ('ssa', ['\xD4']), + 'NC': ('ssa', ['\xD4']), # AND immediate 'NIHH': ('ri_u', ['\xA5', '\x04']), diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -110,4 +110,65 @@ self.a.gen_func_epilog() assert run_asm(self.a) == 0x0807060504030201 + def label(self, name, func=False): + self.mc.mark_op(name) + class ctxmgr(object): + def __enter__(_self): + if func: + self.a.gen_func_prolog() + def __exit__(_self, a, b, c): + if func: + self.a.gen_func_epilog() + self.mc.mark_op(name + '.end') + return ctxmgr() + def patch_branch_imm16(self, base, imm): + print "branch to", imm, "base", base, self.cur(), self.pos('lit.end'), self.pos('lit') + imm = (imm & 0xffff) >> 1 + self.mc.overwrite(base, chr((imm >> 8) & 0xFF)) + self.mc.overwrite(base+1, chr(imm & 0xFF)) + + def pos(self, name): + return self.mc.ops_offset[name] + def cur(self): + return self.mc.get_relative_pos() + + def jump_here(self, func, name): + if func.__name__ == 'BRAS': + self.patch_branch_imm16(self.pos(name)+2, self.cur() - self.pos(name)) + else: + raise NotImplementedError + + def jump_to(self, reg, label): + val = (self.pos(label) - self.cur()) + print "val", val + self.mc.BRAS(reg, loc.imm(val)) + + def test_stmg(self): + self.mc.LGR(reg.r2, reg.r15) + self.a.jmpto(reg.r14) + print hex(run_asm(self.a)) + + def test_recursion(self): + with self.label('func', func=True): + with self.label('lit'): + self.mc.BRAS(reg.r13, loc.imm(0)) + self.mc.write('\x00\x00\x00\x00\x00\x00\x00\x00') + self.jump_here(self.mc.BRAS, 'lit') + # recurse X times + self.mc.XGR(reg.r2, reg.r2) + self.mc.LGHI(reg.r9, loc.imm(15)) + with self.label('L1'): + self.mc.BRAS(reg.r14, loc.imm(0)) + with self.label('rec', func=True): + self.mc.AGR(reg.r2, reg.r9) + self.mc.AHI(reg.r9, loc.imm(-1)) + # if not entered recursion, return from activation record + # implicitly generated here by with statement + self.mc.BRC(con.GT, loc.imm(self.pos('rec') - self.cur())) + self.jump_here(self.mc.BRAS, 'L1') + # call rec... recursivly + self.jump_to(reg.r14, 'rec') + self.a.jmpto(reg.r14) + assert run_asm(self.a) == 120 + From noreply at buildbot.pypy.org Thu Oct 22 02:32:05 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 22 Oct 2015 02:32:05 +0200 (CEST) Subject: [pypy-commit] buildbot default: try to start fresh virtualenv each time (untested) Message-ID: <20151022003205.5BEB31C1365@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r969:0a79c55367c5 Date: 2015-10-21 23:21 +1100 http://bitbucket.org/pypy/buildbot/changeset/0a79c55367c5/ Log: try to start fresh virtualenv each time (untested) diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -919,7 +919,7 @@ self.addStep(ShellCmd( description='create virtualenv', haltOnFailure=True, - command='virtualenv ../venv')) + command='virtualenv --clear ../venv')) # install deps self.addStep(ShellCmd( description="install dependencies", From noreply at buildbot.pypy.org Thu Oct 22 02:57:07 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 22 Oct 2015 02:57:07 +0200 (CEST) Subject: [pypy-commit] pypy default: tweak release document Message-ID: <20151022005707.4A2B81C13E6@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80379:b86e4b638d20 Date: 2015-10-21 23:40 +1100 http://bitbucket.org/pypy/pypy/changeset/b86e4b638d20/ Log: tweak release document diff --git a/pypy/doc/release-4.0.0.rst b/pypy/doc/release-4.0.0.rst --- a/pypy/doc/release-4.0.0.rst +++ b/pypy/doc/release-4.0.0.rst @@ -3,7 +3,7 @@ ============ We're pleased and proud to unleash PyPy 4.0.0, a major update of the PyPy -python2.7.10 compatible interpreter with a Just In Time compiler. +python 2.7.10 compatible interpreter with a Just In Time compiler. We have improved `warmup time and memory overhead used for tracing`_, added `vectorization`_ for numpy and general loops where possible on x86 hardware (disabled by default), @@ -72,16 +72,17 @@ While not applicable only to PyPy, `cffi`_ is arguably our most significant contribution to the python ecosystem. Armin Rigo continued improving it, -and PyPy reaps the benefits of cffi-1.3: improved manangement of object +and PyPy reaps the benefits of `cffi-1.3`_: improved manangement of object lifetimes, __stdcall on Win32, ffi.memmove(), and percolate ``const``, ``restrict`` keywords from cdef to C code. -.. _`warmup time and memory overhead used for tracing`: http://morepypy.blogspot.com/2015/10 +.. _`warmup time and memory overhead used for tracing`: http://morepypy.blogspot.com/2015/10/pypy-memory-and-warmup-improvements-2.html .. _`vectorization`: http://pypyvecopt.blogspot.co.at/ .. _`guards`: http://rpython.readthedocs.org/en/latest/glossary.html .. _`PyPy`: http://doc.pypy.org .. _`RPython`: https://rpython.readthedocs.org .. _`cffi`: https://cffi.readthedocs.org +.. _`cffi-1.3`: http://cffi.readthedocs.org/en/latest/whatsnew.html#v1-3-0 .. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly .. _`help`: http://doc.pypy.org/en/latest/project-ideas.html .. _`numpy`: https://bitbucket.org/pypy/numpy From noreply at buildbot.pypy.org Thu Oct 22 03:08:08 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 22 Oct 2015 03:08:08 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: catchup with default Message-ID: <20151022010808.CAB571C1CD1@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80381:a239179e43b6 Date: 2015-10-21 15:02 +0200 http://bitbucket.org/pypy/pypy/changeset/a239179e43b6/ Log: catchup with default diff too long, truncating to 2000 out of 27093 lines diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,14 @@ .hg .svn +# VIM +*.swp +*.swo + *.pyc *.pyo *~ +__pycache__/ bin/pypy-c include/*.h @@ -22,4 +27,6 @@ pypy/translator/goal/pypy-c pypy/translator/goal/target*-c release/ +!pypy/tool/release/ rpython/_cache/ +__pycache__/ diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -86,9 +86,13 @@ # but should be fine for all the common types. look_for_words = set(COMMON_TYPES) look_for_words.add(';') + look_for_words.add(',') + look_for_words.add('(') + look_for_words.add(')') look_for_words.add('typedef') words_used = set() is_typedef = False + paren = 0 previous_word = '' for word in _r_words.findall(csource): if word in look_for_words: @@ -99,6 +103,15 @@ is_typedef = False elif word == 'typedef': is_typedef = True + paren = 0 + elif word == '(': + paren += 1 + elif word == ')': + paren -= 1 + elif word == ',': + if is_typedef and paren == 0: + words_used.discard(previous_word) + look_for_words.discard(previous_word) else: # word in COMMON_TYPES words_used.add(word) previous_word = word diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -14,17 +14,7 @@ LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs', 'extra_objects', 'depends'] -def _hack_at_distutils(): - # Windows-only workaround for some configurations: see - # https://bugs.python.org/issue23246 (Python 2.7.9) - if sys.platform == "win32": - try: - import setuptools # for side-effects, patches distutils - except ImportError: - pass - def get_extension(srcfilename, modname, sources=(), **kwds): - _hack_at_distutils() # *before* the following import from distutils.core import Extension allsources = [srcfilename] allsources.extend(sources) @@ -47,7 +37,6 @@ def _build(tmpdir, ext): # XXX compact but horrible :-( - _hack_at_distutils() from distutils.core import Distribution import distutils.errors # diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -22,6 +22,16 @@ s = s.encode('ascii') super(NativeIO, self).write(s) +def _hack_at_distutils(): + # Windows-only workaround for some configurations: see + # https://bugs.python.org/issue23246 (Python 2.7 with + # a specific MS compiler suite download) + if sys.platform == "win32": + try: + import setuptools # for side-effects, patches distutils + except ImportError: + pass + class Verifier(object): @@ -112,6 +122,7 @@ return basename def get_extension(self): + _hack_at_distutils() # backward compatibility hack if not self._has_source: with self.ffi._lock: if not self._has_source: diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -76,6 +76,11 @@ if "cppyy" in working_modules: working_modules.remove("cppyy") # depends on ctypes +if sys.platform.startswith("linux"): + _mach = os.popen('uname -m', 'r').read().strip() + if _mach.startswith('ppc'): + working_modules.remove("_continuation") + module_dependencies = { '_multiprocessing': [('objspace.usemodules.time', True), diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-4.0.0.rst release-2.6.1.rst release-2.6.0.rst release-2.5.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-4.0.0.rst whatsnew-2.6.1.rst whatsnew-2.6.0.rst whatsnew-2.5.1.rst diff --git a/pypy/doc/release-4.0.0.rst b/pypy/doc/release-4.0.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-4.0.0.rst @@ -0,0 +1,210 @@ +============ +PyPy 4.0.0 +============ + +We're pleased and proud to unleash PyPy 4.0.0, a major update of the PyPy +python 2.7.10 compatible interpreter with a Just In Time compiler. +We have improved `warmup time and memory overhead used for tracing`_, added +`vectorization`_ for numpy and general loops where possible on x86 hardware +(disabled by default), +refactored rough edges in rpython, and increased functionality of numpy. + +You can download the PyPy 4.0.0 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors (7 new ones since PyPy 2.6.0) and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +New Version Numbering +===================== + +Since the past release, PyPy 2.6.1, we decided to update the PyPy 2.x.x +versioning directly to PyPy 4.x.x, to avoid confusion with CPython 2.7 +and 3.5. Note that this version of PyPy uses the stdlib and implements the +syntax of CPython 2.7.10. + +Vectorization +============= + +Richard Plangger began work in March and continued over a Google Summer of Code +to add a `vectorization` step to the trace optimizer. The step recognizes common +constructs and emits SIMD code where possible, much as any modern compiler does. +This vectorization happens while tracing running code, so it is actually easier +at run-time to determine the +availability of possible vectorization than it is for ahead-of-time compilers. + +Availability of SIMD hardware is detected at run time, without needing to +precompile various code paths into the executable. + +The first version of the vectorization has been merged in this release, since +it is so new it is off by default. To enable the vectorization in built-in JIT +drivers (like numpy ufuncs), add `--jit vec=1`, to enable all implemented +vectorization add `--jit vec_all=1` + +Benchmarks and a summary of this work appear `here`_ + +Internal Refactoring and Warmup Time Improvement +================================================ + +Maciej Fijalkowski and Armin Rigo refactored internals of rpython that now allow +PyPy to more efficiently use `guards`_ in jitted code. They also rewrote unrolling, +leading to a warmup time improvement of 20% or so. + +Numpy +===== + +Our implementation of `numpy`_ continues to improve. ndarray and the numeric dtypes +are very close to feature-complete; record, string and unicode dtypes are mostly +supported. We have reimplemented numpy linalg, random and fft as cffi-1.0 +modules that call out to the same underlying libraries that upstream numpy uses. +Please try it out, especially using the new vectorization (via `--jit vec=1` on the +command line) and let us know what is missing for your code. + +CFFI +==== + +While not applicable only to PyPy, `cffi`_ is arguably our most significant +contribution to the python ecosystem. Armin Rigo continued improving it, +and PyPy reaps the benefits of `cffi-1.3`_: improved manangement of object +lifetimes, __stdcall on Win32, ffi.memmove(), and percolate ``const``, +``restrict`` keywords from cdef to C code. + +.. _`warmup time and memory overhead used for tracing`: http://morepypy.blogspot.com/2015/10/pypy-memory-and-warmup-improvements-2.html +.. _`vectorization`: http://pypyvecopt.blogspot.co.at/ +.. _`guards`: http://rpython.readthedocs.org/en/latest/glossary.html +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`cffi`: https://cffi.readthedocs.org +.. _`cffi-1.3`: http://cffi.readthedocs.org/en/latest/whatsnew.html#v1-3-0 +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html +.. _`numpy`: https://bitbucket.org/pypy/numpy + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD_, freebsd_), +as well as newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux. + +We also introduce `support for the 64 bit PowerPC`_ hardware, specifically +Linux running the big- and little-endian variants of ppc64. + +.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _OpenBSD: http://cvsweb.openbsd.org/cgi-bin/cvsweb/ports/lang/pypy +.. _freebsd: https://svnweb.freebsd.org/ports/head/lang/pypy/ +.. _`dynamic languages`: http://pypyjs.org +.. _`support for the 64 bit PowerPC`: http://morepypy.blogspot.com/2015/10/powerpc-backend-for-jit.html +.. _`here`: http://morepypy.blogspot.com/2015/10/automatic-simd-vectorization-support-in.html + +Other Highlights (since 2.6.1 release two months ago) +===================================================== + +* Bug Fixes + + * Applied OPENBSD downstream fixes + + * Fix a crash on non-linux when running more than 20 threads + + * In cffi, ffi.new_handle() is more cpython compliant + + * Accept unicode in functions inside the _curses cffi backend exactly like cpython + + * Fix a segfault in itertools.islice() + + * Use gcrootfinder=shadowstack by default, asmgcc on linux only + + * Fix ndarray.copy() for upstream compatability when copying non-contiguous arrays + + * Fix assumption that lltype.UniChar is unsigned + + * Fix a subtle bug with stacklets on shadowstack + + * Improve support for the cpython capi in cpyext (our capi compatibility + layer). Fixing these issues inspired some thought about cpyext in general, + stay tuned for more improvements + + * When loading dynamic libraries, in case of a certain loading error, retry + loading the library assuming it is actually a linker script, like on Arch + and Gentoo + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy + +* New features: + + * Add an optimization pass to vectorize loops using x86 SIMD intrinsics. + + * Support __stdcall on Windows in CFFI + + * Improve debug logging when using PYPYLOG=??? + + * Deal with platforms with no RAND_egd() in OpenSSL + +* Numpy: + + * Add support for ndarray.ctypes + + * Fast path for mixing numpy scalars and floats + + * Add support for creating Fortran-ordered ndarrays + + * Fix casting failures in linalg (by extending ufunc casting) + + * Recognize and disallow (for now) pickling of ndarrays with objects + embedded in them + +* Performance improvements and refactorings: + + * Reuse hashed keys across dictionaries and sets + + * Refactor JIT interals to improve warmup time by 20% or so at the cost of a + minor regression in JIT speed + + * Recognize patterns of common sequences in the JIT backends and optimize them + + * Make the garbage collecter more incremental over external_malloc() calls + + * Share guard resume data where possible which reduces memory usage + + * Fast path for zip(list, list) + + * Reduce the number of checks in the JIT for lst[a:] + + * Move the non-optimizable part of callbacks outside the JIT + + * Factor in field immutability when invalidating heap information + + * Unroll itertools.izip_longest() with two sequences + + * Minor optimizations after analyzing output from `vmprof`_ and trace logs + + * Remove many class attributes in rpython classes + + * Handle getfield_gc_pure* and getfield_gc_* uniformly in heap.py + +.. _`vmprof`: https://vmprof.readthedocs.org +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-15.11.0.html + +Please try it out and let us know what you think. We welcome feedback, +we know you are using PyPy, please tell us about it! + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-4.0.0.rst b/pypy/doc/whatsnew-4.0.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-4.0.0.rst @@ -0,0 +1,94 @@ +======================== +What's new in PyPy 4.0.0 +======================== + +.. this is a revision shortly after release-2.6.1 +.. startrev: 3a8f5481dab4 + +.. branch: keys_with_hash +Improve the performance of dict.update() and a bunch of methods from +sets, by reusing the hash value stored in one dict when inspecting +or changing another dict with that key. + +.. branch: optresult-unroll +A major refactoring of the ResOperations that kills Box. Also rewrote +unrolling to enable future enhancements. Should improve warmup time +by 20% or so. + +.. branch: optimize-cond-call +Optimize common sequences of operations like +``int_lt/cond_call`` in the JIT backends + +.. branch: missing_openssl_include +Fix for missing headers in OpenBSD, already applied in downstream ports + +.. branch: gc-more-incremental +Remove a source of non-incremental-ness in the GC: now +external_malloc() no longer runs gc_step_until() any more. If there +is a currently-running major collection, we do only so many steps +before returning. This number of steps depends on the size of the +allocated object. It is controlled by tracking the general progress +of these major collection steps and the size of old objects that +keep adding up between them. + +.. branch: remember-tracing-counts +Reenable jithooks + +.. branch: detect_egd2 + +.. branch: shadowstack-no-move-2 +Issue #2141: fix a crash on Windows and OS/X and ARM when running +at least 20 threads. + +.. branch: numpy-ctypes + +Add support for ndarray.ctypes property. + +.. branch: share-guard-info + +Share guard resume data between consecutive guards that have only +pure operations and guards in between. + +.. branch: issue-2148 + +Fix performance regression on operations mixing numpy scalars and Python +floats, cf. issue #2148. + +.. branch: cffi-stdcall +Win32: support '__stdcall' in CFFI. + +.. branch: callfamily + +Refactorings of annotation and rtyping of function calls. + +.. branch: fortran-order + +Allow creation of fortran-ordered ndarrays + +.. branch: type_system-cleanup + +Remove some remnants of the old ootypesystem vs lltypesystem dichotomy. + +.. branch: cffi-handle-lifetime + +ffi.new_handle() returns handles that work more like CPython's: they +remain valid as long as the target exists (unlike the previous +version, where handles become invalid *before* the __del__ is called). + +.. branch: ufunc-casting + +allow automatic casting in ufuncs (and frompypyfunc) to cast the +arguments to the allowed function type declarations, fixes various +failures in linalg cffi functions + +.. branch: vecopt +.. branch: vecopt-merge + +A new optimization pass to use emit vectorized loops + +.. branch: ppc-updated-backend + +The PowerPC JIT backend is merged. + +.. branch: osx-libffi + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,76 +1,8 @@ -======================= -What's new in PyPy 2.6+ -======================= +========================= +What's new in PyPy 4.0.+ +========================= -.. this is a revision shortly after release-2.6.1 -.. startrev: 07769be4057b +.. this is a revision shortly after release-4.0.0 +.. startrev: 3a8f5481dab4 -.. branch: keys_with_hash -Improve the performance of dict.update() and a bunch of methods from -sets, by reusing the hash value stored in one dict when inspecting -or changing another dict with that key. -.. branch: optresult-unroll -A major refactoring of the ResOperations that kills Box. Also rewrote -unrolling to enable future enhancements. Should improve warmup time -by 20% or so. - -.. branch: optimize-cond-call -Optimize common sequences of operations like -``int_lt/cond_call`` in the JIT backends - -.. branch: missing_openssl_include -Fix for missing headers in OpenBSD, already applied in downstream ports - -.. branch: gc-more-incremental -Remove a source of non-incremental-ness in the GC: now -external_malloc() no longer runs gc_step_until() any more. If there -is a currently-running major collection, we do only so many steps -before returning. This number of steps depends on the size of the -allocated object. It is controlled by tracking the general progress -of these major collection steps and the size of old objects that -keep adding up between them. - -.. branch: remember-tracing-counts -Reenable jithooks - -.. branch: detect_egd2 - -.. branch: shadowstack-no-move-2 -Issue #2141: fix a crash on Windows and OS/X and ARM when running -at least 20 threads. - -.. branch: numpy-ctypes - -Add support for ndarray.ctypes property. - -.. branch: share-guard-info - -Share guard resume data between consecutive guards that have only -pure operations and guards in between. - -.. branch: issue-2148 - -Fix performance regression on operations mixing numpy scalars and Python -floats, cf. issue #2148. - -.. branch: cffi-stdcall -Win32: support '__stdcall' in CFFI. - -.. branch: callfamily - -Refactorings of annotation and rtyping of function calls. - -.. branch: fortran-order - -Allow creation of fortran-ordered ndarrays - -.. branch: type_system-cleanup - -Remove some remnants of the old ootypesystem vs lltypesystem dichotomy. - -.. branch: cffi-handle-lifetime - -ffi.new_handle() returns handles that work more like CPython's: they -remain valid as long as the target exists (unlike the previous -version, where handles become invalid *before* the __del__ is called). diff --git a/pypy/module/_file/readinto.py b/pypy/module/_file/readinto.py --- a/pypy/module/_file/readinto.py +++ b/pypy/module/_file/readinto.py @@ -9,7 +9,7 @@ os_read = rffi.llexternal(UNDERSCORE_ON_WIN32 + 'read', [rffi.INT, rffi.CCHARP, rffi.SIZE_T], - rffi.SIZE_T, save_err=rffi.RFFI_SAVE_ERRNO) + rffi.SSIZE_T, save_err=rffi.RFFI_SAVE_ERRNO) def direct_readinto(self, w_rwbuffer): @@ -61,6 +61,7 @@ stream.flush() while True: got = os_read(fd, rffi.ptradd(target_address, target_pos), size) + got = rffi.cast(lltype.Signed, got) if got > 0: target_pos += got size -= got diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -572,6 +572,17 @@ assert len(a) == 10 assert a.tostring() == 'foobar6789' + @py.test.mark.skipif("os.name != 'posix'") + def test_readinto_error(self): + import _socket, posix, array + s = _socket.socket() + buff = array.array("c", "X" * 65) + fh = posix.fdopen(posix.dup(s.fileno()), 'rb') + # "Transport endpoint is not connected" + raises(IOError, fh.readinto, buff) + fh.close() + s.close() + def test_weakref(self): """Files are weakrefable.""" import weakref diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -100,7 +100,7 @@ ) if binary and encoding is not None: raise OperationError(space.w_ValueError, - space.wrap("binary mode doesn't take an errors argument") + space.wrap("binary mode doesn't take an encoding argument") ) if binary and newline is not None: raise OperationError(space.w_ValueError, diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -66,6 +66,7 @@ import pypy.module.cpyext.codecs import pypy.module.cpyext.pyfile import pypy.module.cpyext.pystrtod +import pypy.module.cpyext.pytraceback # now that all rffi_platform.Struct types are registered, configure them api.configure_types() diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -126,6 +126,7 @@ #include "fileobject.h" #include "pysignals.h" #include "pythread.h" +#include "traceback.h" /* Missing definitions */ #include "missing.h" diff --git a/pypy/module/cpyext/include/frameobject.h b/pypy/module/cpyext/include/frameobject.h --- a/pypy/module/cpyext/include/frameobject.h +++ b/pypy/module/cpyext/include/frameobject.h @@ -4,7 +4,7 @@ extern "C" { #endif -typedef struct { +typedef struct _frame { PyObject_HEAD PyCodeObject *f_code; PyObject *f_globals; diff --git a/pypy/module/cpyext/include/traceback.h b/pypy/module/cpyext/include/traceback.h --- a/pypy/module/cpyext/include/traceback.h +++ b/pypy/module/cpyext/include/traceback.h @@ -4,7 +4,15 @@ extern "C" { #endif -typedef PyObject PyTracebackObject; +struct _frame; + +typedef struct _traceback { + PyObject_HEAD + struct _traceback *tb_next; + struct _frame *tb_frame; + int tb_lasti; + int tb_lineno; +} PyTracebackObject; #ifdef __cplusplus } diff --git a/pypy/module/cpyext/pytraceback.py b/pypy/module/cpyext/pytraceback.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/pytraceback.py @@ -0,0 +1,50 @@ +from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + PyObjectFields, generic_cpy_call, CONST_STRING, CANNOT_FAIL, Py_ssize_t, + cpython_api, bootstrap_function, cpython_struct, build_type_checkers) +from pypy.module.cpyext.pyobject import ( + PyObject, make_ref, from_ref, Py_DecRef, make_typedescr, borrow_from) +from pypy.module.cpyext.frameobject import PyFrameObject +from rpython.rlib.unroll import unrolling_iterable +from pypy.interpreter.error import OperationError +from pypy.interpreter.pytraceback import PyTraceback +from pypy.interpreter import pycode + + +PyTracebackObjectStruct = lltype.ForwardReference() +PyTracebackObject = lltype.Ptr(PyTracebackObjectStruct) +PyTracebackObjectFields = PyObjectFields + ( + ("tb_next", PyTracebackObject), + ("tb_frame", PyFrameObject), + ("tb_lasti", rffi.INT), + ("tb_lineno", rffi.INT), +) +cpython_struct("PyTracebackObject", PyTracebackObjectFields, PyTracebackObjectStruct) + + at bootstrap_function +def init_traceback(space): + make_typedescr(PyTraceback.typedef, + basestruct=PyTracebackObject.TO, + attach=traceback_attach, + dealloc=traceback_dealloc) + + +def traceback_attach(space, py_obj, w_obj): + py_traceback = rffi.cast(PyTracebackObject, py_obj) + traceback = space.interp_w(PyTraceback, w_obj) + if traceback.next is None: + w_next_traceback = None + else: + w_next_traceback = space.wrap(traceback.next) + py_traceback.c_tb_next = rffi.cast(PyTracebackObject, make_ref(space, w_next_traceback)) + py_traceback.c_tb_frame = rffi.cast(PyFrameObject, make_ref(space, space.wrap(traceback.frame))) + rffi.setintfield(py_traceback, 'c_tb_lasti', traceback.lasti) + rffi.setintfield(py_traceback, 'c_tb_lineno',traceback.get_lineno()) + + at cpython_api([PyObject], lltype.Void, external=False) +def traceback_dealloc(space, py_obj): + py_traceback = rffi.cast(PyTracebackObject, py_obj) + Py_DecRef(space, rffi.cast(PyObject, py_traceback.c_tb_next)) + Py_DecRef(space, rffi.cast(PyObject, py_traceback.c_tb_frame)) + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -61,6 +61,28 @@ args_w = space.fixedview(w_args) return generic_cpy_call(space, func_binary, w_self, args_w[0]) +def wrap_binaryfunc_l(space, w_self, w_args, func): + func_binary = rffi.cast(binaryfunc, func) + check_num_args(space, w_args, 1) + args_w = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self))): + return space.w_NotImplemented + + return generic_cpy_call(space, func_binary, w_self, args_w[0]) + +def wrap_binaryfunc_r(space, w_self, w_args, func): + func_binary = rffi.cast(binaryfunc, func) + check_num_args(space, w_args, 1) + args_w = space.fixedview(w_args) + + if not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self))): + return space.w_NotImplemented + + return generic_cpy_call(space, func_binary, args_w[0], w_self) + def wrap_inquirypred(space, w_self, w_args, func): func_inquiry = rffi.cast(inquiry, func) check_num_args(space, w_args, 0) diff --git a/pypy/module/cpyext/test/test_traceback.py b/pypy/module/cpyext/test/test_traceback.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/test/test_traceback.py @@ -0,0 +1,40 @@ +from rpython.rtyper.lltypesystem import lltype, rffi +from pypy.module.cpyext.test.test_api import BaseApiTest +from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref +from pypy.module.cpyext.pytraceback import PyTracebackObject +from pypy.interpreter.pytraceback import PyTraceback +from pypy.interpreter.pyframe import PyFrame + +class TestPyTracebackObject(BaseApiTest): + def test_traceback(self, space, api): + w_traceback = space.appexec([], """(): + import sys + try: + 1/0 + except: + return sys.exc_info()[2] + """) + py_obj = make_ref(space, w_traceback) + py_traceback = rffi.cast(PyTracebackObject, py_obj) + assert (from_ref(space, rffi.cast(PyObject, py_traceback.c_ob_type)) is + space.gettypeobject(PyTraceback.typedef)) + + traceback = space.interp_w(PyTraceback, w_traceback) + assert traceback.lasti == py_traceback.c_tb_lasti + assert traceback.get_lineno() == py_traceback.c_tb_lineno + assert space.eq_w(space.getattr(w_traceback, space.wrap("tb_lasti")), + space.wrap(py_traceback.c_tb_lasti)) + assert space.is_w(space.getattr(w_traceback, space.wrap("tb_frame")), + from_ref(space, rffi.cast(PyObject, + py_traceback.c_tb_frame))) + + while not space.is_w(w_traceback, space.w_None): + assert space.is_w( + w_traceback, + from_ref(space, rffi.cast(PyObject, py_traceback))) + w_traceback = space.getattr(w_traceback, space.wrap("tb_next")) + py_traceback = py_traceback.c_tb_next + + assert lltype.normalizeptr(py_traceback) is None + + api.Py_DecRef(py_obj) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -589,6 +589,48 @@ assert bool(module.newInt(-1)) raises(ValueError, bool, module.newInt(-42)) + def test_binaryfunc(self): + module = self.import_extension('foo', [ + ("new_obj", "METH_NOARGS", + """ + FooObject *fooObj; + + Foo_Type.tp_as_number = &foo_as_number; + foo_as_number.nb_add = foo_nb_add_call; + if (PyType_Ready(&Foo_Type) < 0) return NULL; + fooObj = PyObject_New(FooObject, &Foo_Type); + if (!fooObj) { + return NULL; + } + + return (PyObject *)fooObj; + """)], + """ + typedef struct + { + PyObject_HEAD + } FooObject; + + static PyObject * + foo_nb_add_call(PyObject *self, PyObject *other) + { + return PyInt_FromLong(42); + } + + PyTypeObject Foo_Type = { + PyObject_HEAD_INIT(0) + /*ob_size*/ 0, + /*tp_name*/ "Foo", + /*tp_basicsize*/ sizeof(FooObject), + }; + static PyNumberMethods foo_as_number; + """) + a = module.new_obj() + b = module.new_obj() + c = 3 + assert (a + b) == 42 + raises(TypeError, "b + c") + def test_tp_new_in_subclass_of_type(self): skip("BROKEN") module = self.import_module(name='foo3') diff --git a/pypy/module/cpyext/test/test_version.py b/pypy/module/cpyext/test/test_version.py --- a/pypy/module/cpyext/test/test_version.py +++ b/pypy/module/cpyext/test/test_version.py @@ -1,6 +1,16 @@ +import py from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase +def test_pragma_version(): + from pypy.module.sys.version import CPYTHON_VERSION + rootdir = py.path.local(__file__).join('..', '..') + pyconfig_h = rootdir.join('include', 'pyconfig.h') + version = '%d%d' % (CPYTHON_VERSION[0], CPYTHON_VERSION[1]) + pragma = 'pragma comment(lib,"python%s.lib")' % version + assert pragma in pyconfig_h.read() + + class AppTestVersion(AppTestCpythonExtensionBase): def test_versions(self): diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -591,6 +591,14 @@ def descr_setitem(self, space, w_item, w_value): if space.isinstance_w(w_item, space.w_basestring): item = space.str_w(w_item) + elif space.isinstance_w(w_item, space.w_int): + indx = space.int_w(w_item) + try: + item = self.dtype.names[indx][0] + except IndexError: + if indx < 0: + indx += len(self.dtype.names) + raise oefmt(space.w_IndexError, "invalid index (%d)", indx) else: raise oefmt(space.w_IndexError, "invalid index") try: diff --git a/pypy/module/micronumpy/casting.py b/pypy/module/micronumpy/casting.py --- a/pypy/module/micronumpy/casting.py +++ b/pypy/module/micronumpy/casting.py @@ -38,7 +38,9 @@ dtypes_w.append(dtype) return find_result_type(space, arrays_w, dtypes_w) - + at jit.look_inside_iff(lambda space, arrays_w, dtypes_w: + jit.loop_unrolling_heuristic(arrays_w, len(arrays_w)) and + jit.loop_unrolling_heuristic(dtypes_w, len(dtypes_w))) def find_result_type(space, arrays_w, dtypes_w): # equivalent to PyArray_ResultType if len(arrays_w) == 1 and not dtypes_w: @@ -89,6 +91,9 @@ NPY.STRINGLTR: 3, NPY.STRINGLTR2: 3, UnicodeType.kind: 3, VoidType.kind: 3, ObjectType.kind: 3} +# this is safe to unroll since it'll only be seen if we look inside +# the find_result_type + at jit.unroll_safe def _use_min_scalar(arrays_w, dtypes_w): """Helper for find_result_type()""" if not arrays_w: diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -2,6 +2,7 @@ It should not be imported by the module itself """ import re +import py from pypy.interpreter import special from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root, ObjSpace from pypy.interpreter.error import OperationError @@ -12,6 +13,10 @@ from pypy.module.micronumpy.ndarray import W_NDimArray from pypy.module.micronumpy.ctors import array from pypy.module.micronumpy.descriptor import get_dtype_cache +from pypy.interpreter.miscutils import ThreadLocals, make_weak_value_dictionary +from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag, + UserDelAction) +from pypy.interpreter.pyframe import PyFrame class BogusBytecode(Exception): @@ -32,12 +37,11 @@ class BadToken(Exception): pass - SINGLE_ARG_FUNCTIONS = ["sum", "prod", "max", "min", "all", "any", "unegative", "flat", "tostring", "count_nonzero", "argsort", "cumsum", "logical_xor_reduce"] -TWO_ARG_FUNCTIONS = ["dot", 'take', 'searchsorted'] -TWO_ARG_FUNCTIONS_OR_NONE = ['view', 'astype'] +TWO_ARG_FUNCTIONS = ["dot", 'take', 'searchsorted', 'multiply'] +TWO_ARG_FUNCTIONS_OR_NONE = ['view', 'astype', 'reshape'] THREE_ARG_FUNCTIONS = ['where'] class W_TypeObject(W_Root): @@ -57,6 +61,10 @@ w_OverflowError = W_TypeObject("OverflowError") w_NotImplementedError = W_TypeObject("NotImplementedError") w_AttributeError = W_TypeObject("AttributeError") + w_StopIteration = W_TypeObject("StopIteration") + w_KeyError = W_TypeObject("KeyError") + w_SystemExit = W_TypeObject("SystemExit") + w_KeyboardInterrupt = W_TypeObject("KeyboardInterrupt") w_None = None w_bool = W_TypeObject("bool") @@ -72,13 +80,26 @@ w_dict = W_TypeObject("dict") w_object = W_TypeObject("object") w_buffer = W_TypeObject("buffer") + w_type = W_TypeObject("type") - def __init__(self): + def __init__(self, config=None): """NOT_RPYTHON""" self.fromcache = InternalSpaceCache(self).getorbuild self.w_Ellipsis = special.Ellipsis() self.w_NotImplemented = special.NotImplemented() + if config is None: + from pypy.config.pypyoption import get_pypy_config + config = get_pypy_config(translating=False) + self.config = config + + self.interned_strings = make_weak_value_dictionary(self, str, W_Root) + self.builtin = DictObject({}) + self.FrameClass = PyFrame + self.threadlocals = ThreadLocals() + self.actionflag = ActionFlag() # changed by the signal module + self.check_signal_action = None # changed by the signal module + def _freeze_(self): return True @@ -89,12 +110,17 @@ return isinstance(w_obj, ListObject) or isinstance(w_obj, W_NDimArray) def len(self, w_obj): - assert isinstance(w_obj, ListObject) - return self.wrap(len(w_obj.items)) + if isinstance(w_obj, ListObject): + return self.wrap(len(w_obj.items)) + elif isinstance(w_obj, DictObject): + return self.wrap(len(w_obj.items)) + raise NotImplementedError def getattr(self, w_obj, w_attr): assert isinstance(w_attr, StringObject) - return w_obj.getdictvalue(self, w_attr.v) + if isinstance(w_obj, DictObject): + return w_obj.getdictvalue(self, w_attr) + return None def isinstance_w(self, w_obj, w_tp): try: @@ -102,6 +128,22 @@ except AttributeError: return False + def iter(self, w_iter): + if isinstance(w_iter, ListObject): + raise NotImplementedError + #return IterObject(space, w_iter.items) + elif isinstance(w_iter, DictObject): + return IterDictObject(self, w_iter) + + def next(self, w_iter): + return w_iter.next() + + def contains(self, w_iter, w_key): + if isinstance(w_iter, DictObject): + return self.wrap(w_key in w_iter.items) + + raise NotImplementedError + def decode_index4(self, w_idx, size): if isinstance(w_idx, IntObject): return (self.int_w(w_idx), 0, 0, 1) @@ -123,6 +165,10 @@ lgt = (stop - start - 1) / step + 1 return (start, stop, step, lgt) + def unicode_from_object(self, w_item): + # XXX + return StringObject("") + @specialize.argtype(1) def wrap(self, obj): if isinstance(obj, float): @@ -145,7 +191,55 @@ def newcomplex(self, r, i): return ComplexObject(r, i) + def newfloat(self, f): + return self.float(f) + + def le(self, w_obj1, w_obj2): + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) + return w_obj1.descr_le(self, w_obj2) + + def lt(self, w_obj1, w_obj2): + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) + return w_obj1.descr_lt(self, w_obj2) + + def ge(self, w_obj1, w_obj2): + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) + return w_obj1.descr_ge(self, w_obj2) + + def add(self, w_obj1, w_obj2): + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) + return w_obj1.descr_add(self, w_obj2) + + def sub(self, w_obj1, w_obj2): + return self.wrap(1) + + def mul(self, w_obj1, w_obj2): + assert isinstance(w_obj1, boxes.W_GenericBox) + assert isinstance(w_obj2, boxes.W_GenericBox) + return w_obj1.descr_mul(self, w_obj2) + + def pow(self, w_obj1, w_obj2, _): + return self.wrap(1) + + def neg(self, w_obj1): + return self.wrap(0) + + def repr(self, w_obj1): + return self.wrap('fake') + def getitem(self, obj, index): + if isinstance(obj, DictObject): + w_dict = obj.getdict(self) + if w_dict is not None: + try: + return w_dict[index] + except KeyError, e: + raise OperationError(self.w_KeyError, self.wrap("key error")) + assert isinstance(obj, ListObject) assert isinstance(index, IntObject) return obj.items[index.intval] @@ -191,12 +285,24 @@ return w_obj.v raise NotImplementedError + def unicode_w(self, w_obj): + # XXX + if isinstance(w_obj, StringObject): + return unicode(w_obj.v) + raise NotImplementedError + def int(self, w_obj): if isinstance(w_obj, IntObject): return w_obj assert isinstance(w_obj, boxes.W_GenericBox) return self.int(w_obj.descr_int(self)) + def long(self, w_obj): + if isinstance(w_obj, LongObject): + return w_obj + assert isinstance(w_obj, boxes.W_GenericBox) + return self.int(w_obj.descr_long(self)) + def str(self, w_obj): if isinstance(w_obj, StringObject): return w_obj @@ -240,9 +346,29 @@ def gettypefor(self, w_obj): return W_TypeObject(w_obj.typedef.name) - def call_function(self, tp, w_dtype): + def call_function(self, tp, w_dtype, *args): + if tp is self.w_float: + if isinstance(w_dtype, boxes.W_Float64Box): + return FloatObject(float(w_dtype.value)) + if isinstance(w_dtype, boxes.W_Float32Box): + return FloatObject(float(w_dtype.value)) + if isinstance(w_dtype, boxes.W_Int64Box): + return FloatObject(float(int(w_dtype.value))) + if isinstance(w_dtype, boxes.W_Int32Box): + return FloatObject(float(int(w_dtype.value))) + if isinstance(w_dtype, boxes.W_Int16Box): + return FloatObject(float(int(w_dtype.value))) + if isinstance(w_dtype, boxes.W_Int8Box): + return FloatObject(float(int(w_dtype.value))) + if isinstance(w_dtype, IntObject): + return FloatObject(float(w_dtype.intval)) + if tp is self.w_int: + if isinstance(w_dtype, FloatObject): + return IntObject(int(w_dtype.floatval)) + return w_dtype + @specialize.arg(2) def call_method(self, w_obj, s, *args): # XXX even the hacks have hacks return getattr(w_obj, 'descr_' + s)(self, *args) @@ -258,21 +384,21 @@ def newtuple(self, list_w): return ListObject(list_w) - def newdict(self): - return {} + def newdict(self, module=True): + return DictObject({}) - def setitem(self, dict, item, value): - dict[item] = value + def newint(self, i): + if isinstance(i, IntObject): + return i + return IntObject(i) - def len_w(self, w_obj): - if isinstance(w_obj, ListObject): - return len(w_obj.items) - # XXX array probably - assert False + def setitem(self, obj, index, value): + obj.items[index] = value def exception_match(self, w_exc_type, w_check_class): - # Good enough for now - raise NotImplementedError + assert isinstance(w_exc_type, W_TypeObject) + assert isinstance(w_check_class, W_TypeObject) + return w_exc_type.name == w_check_class.name class FloatObject(W_Root): tp = FakeSpace.w_float @@ -283,6 +409,9 @@ tp = FakeSpace.w_bool def __init__(self, boolval): self.intval = boolval +FakeSpace.w_True = BoolObject(True) +FakeSpace.w_False = BoolObject(False) + class IntObject(W_Root): tp = FakeSpace.w_int @@ -299,6 +428,33 @@ def __init__(self, items): self.items = items +class DictObject(W_Root): + tp = FakeSpace.w_dict + def __init__(self, items): + self.items = items + + def getdict(self, space): + return self.items + + def getdictvalue(self, space, key): + return self.items[key] + +class IterDictObject(W_Root): + def __init__(self, space, w_dict): + self.space = space + self.items = w_dict.items.items() + self.i = 0 + + def __iter__(self): + return self + + def next(self): + space = self.space + if self.i >= len(self.items): + raise OperationError(space.w_StopIteration, space.wrap("stop iteration")) + self.i += 1 + return self.items[self.i-1][0] + class SliceObject(W_Root): tp = FakeSpace.w_slice def __init__(self, start, stop, step): @@ -414,6 +570,15 @@ w_rhs = IntObject(int(w_rhs.floatval)) assert isinstance(w_lhs, W_NDimArray) w_res = w_lhs.descr_getitem(interp.space, w_rhs) + if isinstance(w_rhs, IntObject): + if isinstance(w_res, boxes.W_Float64Box): + print "access", w_lhs, "[", w_rhs.intval, "] => ", float(w_res.value) + if isinstance(w_res, boxes.W_Float32Box): + print "access", w_lhs, "[", w_rhs.intval, "] => ", float(w_res.value) + if isinstance(w_res, boxes.W_Int64Box): + print "access", w_lhs, "[", w_rhs.intval, "] => ", int(w_res.value) + if isinstance(w_res, boxes.W_Int32Box): + print "access", w_lhs, "[", w_rhs.intval, "] => ", int(w_res.value) else: raise NotImplementedError if (not isinstance(w_res, W_NDimArray) and @@ -425,9 +590,22 @@ def __repr__(self): return '(%r %s %r)' % (self.lhs, self.name, self.rhs) -class FloatConstant(Node): +class NumberConstant(Node): def __init__(self, v): - self.v = float(v) + if isinstance(v, int): + self.v = v + elif isinstance(v, float): + self.v = v + else: + assert isinstance(v, str) + assert len(v) > 0 + c = v[-1] + if c == 'f': + self.v = float(v[:-1]) + elif c == 'i': + self.v = int(v[:-1]) + else: + self.v = float(v) def __repr__(self): return "Const(%s)" % self.v @@ -519,8 +697,24 @@ def execute(self, interp): if self.v == 'int': dtype = get_dtype_cache(interp.space).w_int64dtype + elif self.v == 'int8': + dtype = get_dtype_cache(interp.space).w_int8dtype + elif self.v == 'int16': + dtype = get_dtype_cache(interp.space).w_int16dtype + elif self.v == 'int32': + dtype = get_dtype_cache(interp.space).w_int32dtype + elif self.v == 'uint': + dtype = get_dtype_cache(interp.space).w_uint64dtype + elif self.v == 'uint8': + dtype = get_dtype_cache(interp.space).w_uint8dtype + elif self.v == 'uint16': + dtype = get_dtype_cache(interp.space).w_uint16dtype + elif self.v == 'uint32': + dtype = get_dtype_cache(interp.space).w_uint32dtype elif self.v == 'float': dtype = get_dtype_cache(interp.space).w_float64dtype + elif self.v == 'float32': + dtype = get_dtype_cache(interp.space).w_float32dtype else: raise BadToken('unknown v to dtype "%s"' % self.v) return dtype @@ -556,8 +750,13 @@ raise ArgumentMismatch if self.name == "sum": if len(self.args)>1: - w_res = arr.descr_sum(interp.space, + var = self.args[1] + if isinstance(var, DtypeClass): + w_res = arr.descr_sum(interp.space, None, var.execute(interp)) + else: + w_res = arr.descr_sum(interp.space, self.args[1].execute(interp)) + else: w_res = arr.descr_sum(interp.space) elif self.name == "prod": @@ -577,10 +776,10 @@ w_res = logical_xor.reduce(interp.space, arr, None) elif self.name == "unegative": neg = ufuncs.get(interp.space).negative - w_res = neg.call(interp.space, [arr], None, None, None) + w_res = neg.call(interp.space, [arr], None, 'unsafe', None) elif self.name == "cos": cos = ufuncs.get(interp.space).cos - w_res = cos.call(interp.space, [arr], None, None, None) + w_res = cos.call(interp.space, [arr], None, 'unsafe', None) elif self.name == "flat": w_res = arr.descr_get_flatiter(interp.space) elif self.name == "argsort": @@ -598,6 +797,8 @@ raise ArgumentNotAnArray if self.name == "dot": w_res = arr.descr_dot(interp.space, arg) + elif self.name == 'multiply': + w_res = arr.descr_mul(interp.space, arg) elif self.name == 'take': w_res = arr.descr_take(interp.space, arg) elif self.name == "searchsorted": @@ -617,7 +818,7 @@ if self.name == "where": w_res = where(interp.space, arr, arg1, arg2) else: - assert False + assert False # unreachable code elif self.name in TWO_ARG_FUNCTIONS_OR_NONE: if len(self.args) != 2: raise ArgumentMismatch @@ -626,6 +827,11 @@ w_res = arr.descr_view(interp.space, arg) elif self.name == 'astype': w_res = arr.descr_astype(interp.space, arg) + elif self.name == 'reshape': + w_arg = self.args[1] + assert isinstance(w_arg, ArrayConstant) + order = -1 + w_res = arr.reshape(interp.space, w_arg.wrap(interp.space), order) else: assert False else: @@ -645,7 +851,7 @@ return W_NDimArray.new_scalar(interp.space, dtype, w_res) _REGEXES = [ - ('-?[\d\.]+', 'number'), + ('-?[\d\.]+(i|f)?', 'number'), ('\[', 'array_left'), (':', 'colon'), ('\w+', 'identifier'), @@ -719,7 +925,7 @@ start = 0 else: if tokens.get(0).name != 'colon': - return FloatConstant(start_tok.v) + return NumberConstant(start_tok.v) start = int(start_tok.v) tokens.pop() if not tokens.get(0).name in ['colon', 'number']: @@ -751,8 +957,30 @@ stack.append(ArrayClass()) elif token.v.strip(' ') == 'int': stack.append(DtypeClass('int')) + elif token.v.strip(' ') == 'int8': + stack.append(DtypeClass('int8')) + elif token.v.strip(' ') == 'int16': + stack.append(DtypeClass('int16')) + elif token.v.strip(' ') == 'int32': + stack.append(DtypeClass('int32')) + elif token.v.strip(' ') == 'int64': + stack.append(DtypeClass('int')) + elif token.v.strip(' ') == 'uint': + stack.append(DtypeClass('uint')) + elif token.v.strip(' ') == 'uint8': + stack.append(DtypeClass('uint8')) + elif token.v.strip(' ') == 'uint16': + stack.append(DtypeClass('uint16')) + elif token.v.strip(' ') == 'uint32': + stack.append(DtypeClass('uint32')) + elif token.v.strip(' ') == 'uint64': + stack.append(DtypeClass('uint')) elif token.v.strip(' ') == 'float': stack.append(DtypeClass('float')) + elif token.v.strip(' ') == 'float32': + stack.append(DtypeClass('float32')) + elif token.v.strip(' ') == 'float64': + stack.append(DtypeClass('float')) else: stack.append(Variable(token.v.strip(' '))) elif token.name == 'array_left': @@ -805,7 +1033,7 @@ while True: token = tokens.pop() if token.name == 'number': - elems.append(FloatConstant(token.v)) + elems.append(NumberConstant(token.v)) elif token.name == 'array_left': elems.append(ArrayConstant(self.parse_array_const(tokens))) elif token.name == 'paren_left': diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -70,7 +70,10 @@ @jit.unroll_safe def setslice(self, space, arr): - if len(arr.get_shape()) > len(self.get_shape()): + if arr.get_size() == 1: + # we can always set self[:] = scalar + pass + elif len(arr.get_shape()) > len(self.get_shape()): # record arrays get one extra dimension if not self.dtype.is_record() or \ len(arr.get_shape()) > len(self.get_shape()) + 1: diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -86,6 +86,9 @@ def _array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False): + # numpy testing calls array(type(array([]))) and expects a ValueError + if space.isinstance_w(w_object, space.w_type): + raise oefmt(space.w_ValueError, "cannot create ndarray from type instance") # for anything that isn't already an array, try __array__ method first if not isinstance(w_object, W_NDimArray): w_array = try_array_method(space, w_object, w_dtype) diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -97,7 +97,7 @@ finally: self.iter.reset(self.state, mutate=True) - def descr___array_wrap__(self, space, obj): + def descr___array_wrap__(self, space, obj, w_context=None): return obj W_FlatIterator.typedef = TypeDef("numpy.flatiter", diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -83,6 +83,12 @@ self._indices = indices self.offset = offset + def same(self, other): + if self.offset == other.offset and \ + self.index == other.index and \ + self._indices == other._indices: + return self.iterator.same_shape(other.iterator) + return False class ArrayIter(object): _immutable_fields_ = ['contiguous', 'array', 'size', 'ndim_m1', 'shape_m1[*]', @@ -100,6 +106,7 @@ self.array = array self.size = size self.ndim_m1 = len(shape) - 1 + # self.shape_m1 = [s - 1 for s in shape] self.strides = strides self.backstrides = backstrides @@ -113,6 +120,17 @@ factors[ndim-i-1] = factors[ndim-i] * shape[ndim-i] self.factors = factors + def same_shape(self, other): + """ Iterating over the same element """ + if not self.contiguous or not other.contiguous: + return False + return (self.contiguous == other.contiguous and + self.array.dtype is self.array.dtype and + self.shape_m1 == other.shape_m1 and + self.strides == other.strides and + self.backstrides == other.backstrides and + self.factors == other.factors) + @jit.unroll_safe def reset(self, state=None, mutate=False): index = 0 @@ -138,9 +156,13 @@ indices = state._indices offset = state.offset if self.contiguous: - offset += self.array.dtype.elsize + elsize = self.array.dtype.elsize + jit.promote(elsize) + offset += elsize elif self.ndim_m1 == 0: - offset += self.strides[0] + stride = self.strides[0] + jit.promote(stride) + offset += stride else: for i in xrange(self.ndim_m1, -1, -1): idx = indices[i] @@ -192,7 +214,7 @@ return state.index >= self.size def getitem(self, state): - assert state.iterator is self + # assert state.iterator is self return self.array.getitem(state.offset) def getitem_bool(self, state): @@ -203,7 +225,6 @@ assert state.iterator is self self.array.setitem(state.offset, elem) - def AxisIter(array, shape, axis): strides = array.get_strides() backstrides = array.get_backstrides() diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -2,6 +2,7 @@ operations. This is the place to look for all the computations that iterate over all the array elements. """ +import py from pypy.interpreter.error import OperationError from rpython.rlib import jit from rpython.rlib.rstring import StringBuilder @@ -13,11 +14,6 @@ from pypy.interpreter.argument import Arguments -call2_driver = jit.JitDriver( - name='numpy_call2', - greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], - reds='auto') - def call2(space, shape, func, calc_dtype, w_lhs, w_rhs, out): if w_lhs.get_size() == 1: w_left = w_lhs.get_scalar_value().convert_to(space, calc_dtype) @@ -38,24 +34,104 @@ out_iter, out_state = out.create_iter(shape) shapelen = len(shape) res_dtype = out.get_dtype() - while not out_iter.done(out_state): - call2_driver.jit_merge_point(shapelen=shapelen, func=func, - calc_dtype=calc_dtype, res_dtype=res_dtype) - if left_iter: - w_left = left_iter.getitem(left_state).convert_to(space, calc_dtype) - left_state = left_iter.next(left_state) - if right_iter: - w_right = right_iter.getitem(right_state).convert_to(space, calc_dtype) - right_state = right_iter.next(right_state) - out_iter.setitem(out_state, func(calc_dtype, w_left, w_right).convert_to( - space, res_dtype)) - out_state = out_iter.next(out_state) - return out + call2_func = try_to_share_iterators_call2(left_iter, right_iter, + left_state, right_state, out_state) + params = (space, shapelen, func, calc_dtype, res_dtype, out, + w_left, w_right, left_iter, right_iter, out_iter, + left_state, right_state, out_state) + return call2_func(*params) + +def try_to_share_iterators_call2(left_iter, right_iter, left_state, right_state, out_state): + # these are all possible iterator sharing combinations + # left == right == out + # left == right + # left == out + # right == out + right_out_equal = False + if right_iter: + # rhs is not a scalar + if out_state.same(right_state): + right_out_equal = True + # + if not left_iter: + # lhs is a scalar + if right_out_equal: + return call2_advance_out_left + else: + # worst case, nothing can be shared and lhs is a scalar + return call2_advance_out_left_right + else: + # lhs is NOT a scalar + if out_state.same(left_state): + # (2) out and left are the same -> remove left + if right_out_equal: + # the best case + return call2_advance_out + else: + return call2_advance_out_right + else: + if right_out_equal: + # right and out are equal, only advance left and out + return call2_advance_out_left + else: + if right_iter and right_state.same(left_state): + # left and right are equal, but still need to advance out + return call2_advance_out_left_eq_right + else: + # worst case, nothing can be shared + return call2_advance_out_left_right + + assert 0, "logical problem with the selection of the call2 case" + +def generate_call2_cases(name, left_state, right_state): + call2_driver = jit.JitDriver(name='numpy_call2_' + name, + greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], + reds='auto', vectorize=True) + # + advance_left_state = left_state == "left_state" + advance_right_state = right_state == "right_state" + code = """ + def method(space, shapelen, func, calc_dtype, res_dtype, out, + w_left, w_right, left_iter, right_iter, out_iter, + left_state, right_state, out_state): + while not out_iter.done(out_state): + call2_driver.jit_merge_point(shapelen=shapelen, func=func, + calc_dtype=calc_dtype, res_dtype=res_dtype) + if left_iter: + w_left = left_iter.getitem({left_state}).convert_to(space, calc_dtype) + if right_iter: + w_right = right_iter.getitem({right_state}).convert_to(space, calc_dtype) + w_out = func(calc_dtype, w_left, w_right) + out_iter.setitem(out_state, w_out.convert_to(space, res_dtype)) + out_state = out_iter.next(out_state) + if advance_left_state and left_iter: + left_state = left_iter.next(left_state) + if advance_right_state and right_iter: + right_state = right_iter.next(right_state) + # + # if not set to None, the values will be loop carried + # (for the var,var case), forcing the vectorization to unpack + # the vector registers at the end of the loop + if left_iter: + w_left = None + if right_iter: + w_right = None + return out + """ + exec(py.code.Source(code.format(left_state=left_state,right_state=right_state)).compile(), locals()) + method.__name__ = "call2_" + name + return method + +call2_advance_out = generate_call2_cases("inc_out", "out_state", "out_state") +call2_advance_out_left = generate_call2_cases("inc_out_left", "left_state", "out_state") +call2_advance_out_right = generate_call2_cases("inc_out_right", "out_state", "right_state") +call2_advance_out_left_eq_right = generate_call2_cases("inc_out_left_eq_right", "left_state", "left_state") +call2_advance_out_left_right = generate_call2_cases("inc_out_left_right", "left_state", "right_state") call1_driver = jit.JitDriver( name='numpy_call1', - greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'], - reds='auto') + greens=['shapelen', 'share_iterator', 'func', 'calc_dtype', 'res_dtype'], + reds='auto', vectorize=True) def call1(space, shape, func, calc_dtype, w_obj, w_ret): obj_iter, obj_state = w_obj.create_iter(shape) @@ -63,21 +139,32 @@ out_iter, out_state = w_ret.create_iter(shape) shapelen = len(shape) res_dtype = w_ret.get_dtype() + share_iterator = out_state.same(obj_state) while not out_iter.done(out_state): call1_driver.jit_merge_point(shapelen=shapelen, func=func, + share_iterator=share_iterator, calc_dtype=calc_dtype, res_dtype=res_dtype) - elem = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) + if share_iterator: + # use out state as param to getitem + elem = obj_iter.getitem(out_state).convert_to(space, calc_dtype) + else: + elem = obj_iter.getitem(obj_state).convert_to(space, calc_dtype) out_iter.setitem(out_state, func(calc_dtype, elem).convert_to(space, res_dtype)) - out_state = out_iter.next(out_state) - obj_state = obj_iter.next(obj_state) + if share_iterator: + # only advance out, they share the same iteration space + out_state = out_iter.next(out_state) + else: + out_state = out_iter.next(out_state) + obj_state = obj_iter.next(obj_state) + elem = None return w_ret call_many_to_one_driver = jit.JitDriver( name='numpy_call_many_to_one', - greens=['shapelen', 'nin', 'func', 'res_dtype'], + greens=['shapelen', 'nin', 'func', 'in_dtypes', 'res_dtype'], reds='auto') -def call_many_to_one(space, shape, func, res_dtype, in_args, out): +def call_many_to_one(space, shape, func, in_dtypes, res_dtype, in_args, out): # out must hav been built. func needs no calc_type, is usually an # external ufunc nin = len(in_args) @@ -95,9 +182,9 @@ vals = [None] * nin while not out_iter.done(out_state): call_many_to_one_driver.jit_merge_point(shapelen=shapelen, func=func, - res_dtype=res_dtype, nin=nin) + in_dtypes=in_dtypes, res_dtype=res_dtype, nin=nin) for i in range(nin): - vals[i] = in_iters[i].getitem(in_states[i]) + vals[i] = in_dtypes[i].coerce(space, in_iters[i].getitem(in_states[i])) w_arglist = space.newlist(vals) w_out_val = space.call_args(func, Arguments.frompacked(space, w_arglist)) out_iter.setitem(out_state, res_dtype.coerce(space, w_out_val)) @@ -108,10 +195,10 @@ call_many_to_many_driver = jit.JitDriver( name='numpy_call_many_to_many', - greens=['shapelen', 'nin', 'nout', 'func', 'res_dtype'], + greens=['shapelen', 'nin', 'nout', 'func', 'in_dtypes', 'out_dtypes'], reds='auto') -def call_many_to_many(space, shape, func, res_dtype, in_args, out_args): +def call_many_to_many(space, shape, func, in_dtypes, out_dtypes, in_args, out_args): # out must hav been built. func needs no calc_type, is usually an # external ufunc nin = len(in_args) @@ -134,29 +221,34 @@ out_states[i] = out_state shapelen = len(shape) vals = [None] * nin - while not out_iters[0].done(out_states[0]): + test_iter, test_state = in_iters[-1], in_states[-1] + if nout > 0: + test_iter, test_state = out_iters[0], out_states[0] + while not test_iter.done(test_state): call_many_to_many_driver.jit_merge_point(shapelen=shapelen, func=func, - res_dtype=res_dtype, nin=nin, nout=nout) + in_dtypes=in_dtypes, out_dtypes=out_dtypes, + nin=nin, nout=nout) for i in range(nin): - vals[i] = in_iters[i].getitem(in_states[i]) + vals[i] = in_dtypes[i].coerce(space, in_iters[i].getitem(in_states[i])) w_arglist = space.newlist(vals) w_outvals = space.call_args(func, Arguments.frompacked(space, w_arglist)) - # w_outvals should be a tuple, but func can return a single value as well + # w_outvals should be a tuple, but func can return a single value as well if space.isinstance_w(w_outvals, space.w_tuple): batch = space.listview(w_outvals) for i in range(len(batch)): - out_iters[i].setitem(out_states[i], res_dtype.coerce(space, batch[i])) + out_iters[i].setitem(out_states[i], out_dtypes[i].coerce(space, batch[i])) out_states[i] = out_iters[i].next(out_states[i]) - else: - out_iters[0].setitem(out_states[0], res_dtype.coerce(space, w_outvals)) + elif nout > 0: + out_iters[0].setitem(out_states[0], out_dtypes[0].coerce(space, w_outvals)) out_states[0] = out_iters[0].next(out_states[0]) for i in range(nin): in_states[i] = in_iters[i].next(in_states[i]) + test_state = test_iter.next(test_state) return space.newtuple([convert_to_array(space, o) for o in out_args]) setslice_driver = jit.JitDriver(name='numpy_setslice', greens = ['shapelen', 'dtype'], - reds = 'auto') + reds = 'auto', vectorize=True) def setslice(space, shape, target, source): if not shape: @@ -234,7 +326,8 @@ reduce_flat_driver = jit.JitDriver( name='numpy_reduce_flat', - greens = ['shapelen', 'func', 'done_func', 'calc_dtype'], reds = 'auto') + greens = ['shapelen', 'func', 'done_func', 'calc_dtype'], reds = 'auto', + vectorize = True) def reduce_flat(space, func, w_arr, calc_dtype, done_func, identity): obj_iter, obj_state = w_arr.create_iter() @@ -255,10 +348,10 @@ obj_state = obj_iter.next(obj_state) return cur_value - reduce_driver = jit.JitDriver( name='numpy_reduce', - greens=['shapelen', 'func', 'dtype'], reds='auto') + greens=['shapelen', 'func', 'dtype'], reds='auto', + vectorize=True) def reduce(space, func, w_arr, axis_flags, dtype, out, identity): out_iter, out_state = out.create_iter() @@ -293,7 +386,7 @@ accumulate_flat_driver = jit.JitDriver( name='numpy_accumulate_flat', greens=['shapelen', 'func', 'dtype', 'out_dtype'], - reds='auto') + reds='auto', vectorize=True) def accumulate_flat(space, func, w_arr, calc_dtype, w_out, identity): arr_iter, arr_state = w_arr.create_iter() @@ -320,7 +413,9 @@ accumulate_driver = jit.JitDriver( name='numpy_accumulate', - greens=['shapelen', 'func', 'calc_dtype'], reds='auto') + greens=['shapelen', 'func', 'calc_dtype'], + reds='auto', + vectorize=True) def accumulate(space, func, w_arr, axis, calc_dtype, w_out, identity): @@ -370,7 +465,8 @@ where_driver = jit.JitDriver(name='numpy_where', greens = ['shapelen', 'dtype', 'arr_dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def where(space, out, shape, arr, x, y, dtype): out_iter, out_state = out.create_iter(shape) @@ -411,7 +507,6 @@ state = x_state return out - def _new_argmin_argmax(op_name): arg_driver = jit.JitDriver(name='numpy_' + op_name, greens = ['shapelen', 'dtype'], @@ -476,7 +571,8 @@ dot_driver = jit.JitDriver(name = 'numpy_dot', greens = ['dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def multidim_dot(space, left, right, result, dtype, right_critical_dim): ''' assumes left, right are concrete arrays @@ -519,8 +615,8 @@ lval = left_impl.getitem(i1).convert_to(space, dtype) rval = right_impl.getitem(i2).convert_to(space, dtype) oval = dtype.itemtype.add(oval, dtype.itemtype.mul(lval, rval)) - i1 += s1 - i2 += s2 + i1 += jit.promote(s1) + i2 += jit.promote(s2) outi.setitem(outs, oval) outs = outi.next(outs) rights = righti.next(rights) @@ -530,7 +626,8 @@ count_all_true_driver = jit.JitDriver(name = 'numpy_count', greens = ['shapelen', 'dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def count_all_true_concrete(impl): s = 0 @@ -551,7 +648,8 @@ nonzero_driver = jit.JitDriver(name = 'numpy_nonzero', greens = ['shapelen', 'dims', 'dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def nonzero(res, arr, box): res_iter, res_state = res.create_iter() @@ -573,7 +671,8 @@ getitem_filter_driver = jit.JitDriver(name = 'numpy_getitem_bool', greens = ['shapelen', 'arr_dtype', 'index_dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def getitem_filter(res, arr, index): res_iter, res_state = res.create_iter() @@ -601,7 +700,8 @@ setitem_filter_driver = jit.JitDriver(name = 'numpy_setitem_bool', greens = ['shapelen', 'arr_dtype', 'index_dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def setitem_filter(space, arr, index, value): arr_iter, arr_state = arr.create_iter() @@ -630,7 +730,8 @@ flatiter_getitem_driver = jit.JitDriver(name = 'numpy_flatiter_getitem', greens = ['dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def flatiter_getitem(res, base_iter, base_state, step): ri, rs = res.create_iter() @@ -644,7 +745,8 @@ flatiter_setitem_driver = jit.JitDriver(name = 'numpy_flatiter_setitem', greens = ['dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def flatiter_setitem(space, dtype, val, arr_iter, arr_state, step, length): val_iter, val_state = val.create_iter() @@ -753,7 +855,8 @@ byteswap_driver = jit.JitDriver(name='numpy_byteswap_driver', greens = ['dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def byteswap(from_, to): dtype = from_.dtype @@ -768,7 +871,8 @@ choose_driver = jit.JitDriver(name='numpy_choose_driver', greens = ['shapelen', 'mode', 'dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def choose(space, arr, choices, shape, dtype, out, mode): shapelen = len(shape) @@ -802,7 +906,8 @@ clip_driver = jit.JitDriver(name='numpy_clip_driver', greens = ['shapelen', 'dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def clip(space, arr, shape, min, max, out): assert min or max @@ -837,7 +942,8 @@ round_driver = jit.JitDriver(name='numpy_round_driver', greens = ['shapelen', 'dtype'], - reds = 'auto') + reds = 'auto', + vectorize=True) def round(space, arr, dtype, shape, decimals, out): arr_iter, arr_state = arr.create_iter(shape) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -7,6 +7,7 @@ # structures to describe slicing class BaseChunk(object): + _attrs_ = ['step','out_dim'] pass diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -1,6 +1,6 @@ import py from pypy.module.micronumpy.compile import (numpy_compile, Assignment, - ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, + ArrayConstant, NumberConstant, Operator, Variable, RangeConstant, Execute, FunctionCall, FakeSpace, W_NDimArray) @@ -25,30 +25,30 @@ interp = self.compile(code) assert isinstance(interp.code.statements[0].expr, ArrayConstant) st = interp.code.statements[0] - assert st.expr.items == [FloatConstant(1), FloatConstant(2), - FloatConstant(3)] + assert st.expr.items == [NumberConstant(1), NumberConstant(2), + NumberConstant(3)] def test_array_literal2(self): code = "a = [[1],[2],[3]]" interp = self.compile(code) assert isinstance(interp.code.statements[0].expr, ArrayConstant) st = interp.code.statements[0] - assert st.expr.items == [ArrayConstant([FloatConstant(1)]), - ArrayConstant([FloatConstant(2)]), - ArrayConstant([FloatConstant(3)])] + assert st.expr.items == [ArrayConstant([NumberConstant(1)]), + ArrayConstant([NumberConstant(2)]), + ArrayConstant([NumberConstant(3)])] def test_expr_1(self): code = "b = a + 1" interp = self.compile(code) assert (interp.code.statements[0].expr == - Operator(Variable("a"), "+", FloatConstant(1))) From noreply at buildbot.pypy.org Thu Oct 22 09:30:57 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Oct 2015 09:30:57 +0200 (CEST) Subject: [pypy-commit] cffi release-1.3: typo Message-ID: <20151022073057.C8E1F1C06AD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: release-1.3 Changeset: r2358:f3e2be21a723 Date: 2015-10-21 13:25 +0200 http://bitbucket.org/cffi/cffi/changeset/f3e2be21a723/ Log: typo diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -686,7 +686,7 @@ Python 2/3 compatibility note: you should avoid using ``str(buf)``, because it gives inconsistent results between Python 2 and Python 3. -This is similar to how ``str()`` gives inconsistent results on regular +(This is similar to how ``str()`` gives inconsistent results on regular byte strings). Use ``buf[:]`` instead. **ffi.from_buffer(python_buffer)**: return a ```` that From noreply at buildbot.pypy.org Thu Oct 22 09:30:59 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Oct 2015 09:30:59 +0200 (CEST) Subject: [pypy-commit] cffi default: wchar_t can be signed or not, apparently, even on the same platform (arm Message-ID: <20151022073059.C98421C12C6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2359:ebcda8268d47 Date: 2015-10-22 09:31 +0200 http://bitbucket.org/cffi/cffi/changeset/ebcda8268d47/ Log: wchar_t can be signed or not, apparently, even on the same platform (arm linux) diff --git a/testing/cffi0/backend_tests.py b/testing/cffi0/backend_tests.py --- a/testing/cffi0/backend_tests.py +++ b/testing/cffi0/backend_tests.py @@ -756,10 +756,11 @@ p = ffi.cast("long long", ffi.cast("wchar_t", -1)) if SIZE_OF_WCHAR == 2: # 2 bytes, unsigned assert int(p) == 0xffff - elif platform.machine().startswith(('arm', 'aarch64')): - assert int(p) == 0xffffffff # 4 bytes, unsigned - else: # 4 bytes, signed + elif (sys.platform.startswith('linux') and + platform.machine().startswith('x86')): # known to be signed assert int(p) == -1 + else: # in general, it can be either signed or not + assert int(p) in [-1, 0xffffffff] # e.g. on arm, both cases occur p = ffi.cast("int", u+'\u1234') assert int(p) == 0x1234 diff --git a/testing/cffi1/test_new_ffi_1.py b/testing/cffi1/test_new_ffi_1.py --- a/testing/cffi1/test_new_ffi_1.py +++ b/testing/cffi1/test_new_ffi_1.py @@ -781,10 +781,11 @@ p = ffi.cast("long long", ffi.cast("wchar_t", -1)) if SIZE_OF_WCHAR == 2: # 2 bytes, unsigned assert int(p) == 0xffff - elif platform.machine().startswith(('arm', 'aarch64')): - assert int(p) == 0xffffffff # 4 bytes, unsigned - else: # 4 bytes, signed + elif (sys.platform.startswith('linux') and + platform.machine().startswith('x86')): # known to be signed assert int(p) == -1 + else: # in general, it can be either signed or not + assert int(p) in [-1, 0xffffffff] # e.g. on arm, both cases occur p = ffi.cast("int", u+'\u1234') assert int(p) == 0x1234 From noreply at buildbot.pypy.org Thu Oct 22 09:31:01 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Oct 2015 09:31:01 +0200 (CEST) Subject: [pypy-commit] cffi default: hg merge release-1.3 Message-ID: <20151022073101.CCDA71C06AD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2360:7c5809741094 Date: 2015-10-22 09:31 +0200 http://bitbucket.org/cffi/cffi/changeset/7c5809741094/ Log: hg merge release-1.3 diff --git a/doc/source/installation.rst b/doc/source/installation.rst --- a/doc/source/installation.rst +++ b/doc/source/installation.rst @@ -53,9 +53,9 @@ * http://pypi.python.org/packages/source/c/cffi/cffi-1.3.0.tar.gz - - MD5: ... + - MD5: a40ed8c8ac653c8fc7d5603711b06eaf - - SHA: ... + - SHA: 54a0b2dbbc2f5d99131aa337e217b636652641a9 * Or grab the most current version from the `Bitbucket page`_: ``hg clone https://bitbucket.org/cffi/cffi`` diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -686,7 +686,7 @@ Python 2/3 compatibility note: you should avoid using ``str(buf)``, because it gives inconsistent results between Python 2 and Python 3. -This is similar to how ``str()`` gives inconsistent results on regular +(This is similar to how ``str()`` gives inconsistent results on regular byte strings). Use ``buf[:]`` instead. **ffi.from_buffer(python_buffer)**: return a ```` that From noreply at buildbot.pypy.org Thu Oct 22 09:36:11 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Oct 2015 09:36:11 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: progress Message-ID: <20151022073611.0E5491C06AD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80384:b641872f318e Date: 2015-10-22 09:36 +0200 http://bitbucket.org/pypy/pypy/changeset/b641872f318e/ Log: progress diff --git a/pypy/module/cpyext/include/intobject.h b/pypy/module/cpyext/include/intobject.h --- a/pypy/module/cpyext/include/intobject.h +++ b/pypy/module/cpyext/include/intobject.h @@ -20,4 +20,4 @@ #ifdef __cplusplus } #endif -#endif /* !Py_BOOLOBJECT_H */ +#endif /* !Py_INTOBJECT_H */ diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py --- a/pypy/module/cpyext/intobject.py +++ b/pypy/module/cpyext/intobject.py @@ -172,9 +172,9 @@ LONG_MAX, a long integer object is returned. """ if ival <= LONG_MAX: - return new_pyint(rffi.cast(rffi.LONG, ival)) + return new_pyint(space, rffi.cast(rffi.LONG, ival)) else: - return get_pyobj_and_incref(space.wrap(ival)) + return get_pyobj_and_incref(space, space.wrap(ival)) @cpython_api([Py_ssize_t], PyObject) def PyInt_FromSsize_t(space, ival): @@ -183,7 +183,7 @@ returned. """ # XXX win64 - return new_pyint(ival) + return new_pyint(space, ival) @cpython_api([CONST_STRING, rffi.CCHARPP, rffi.INT_real], PyObject) def PyInt_FromString(space, str, pend, base): diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -1,11 +1,12 @@ -from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib.rarithmetic import intmask, ovfcheck from pypy.module.cpyext.api import ( cpython_api, generic_cpy_call, CANNOT_FAIL, Py_ssize_t, Py_ssize_tP, PyVarObject, Py_buffer, Py_TPFLAGS_HEAPTYPE, Py_LT, Py_LE, Py_EQ, Py_NE, Py_GT, Py_GE, CONST_STRING, FILEP, fwrite) from pypy.module.cpyext.pyobject import ( - PyObject, PyObjectP, create_ref, from_ref, Py_IncRef, Py_DecRef, + PyObject, PyObjectP, create_ref, from_pyobj, Py_IncRef, Py_DecRef, track_reference, get_typedescr, _Py_NewReference, RefcountState) from pypy.module.cpyext.typeobject import PyTypeObjectPtr from pypy.module.cpyext.pyerrors import PyErr_NoMemory, PyErr_BadInternalCall @@ -17,11 +18,12 @@ @cpython_api([Py_ssize_t], rffi.VOIDP) def PyObject_MALLOC(space, size): return lltype.malloc(rffi.VOIDP.TO, size, - flavor='raw', zero=True) + flavor='raw', zero=True, + track_allocation=False) @cpython_api([rffi.VOIDP], lltype.Void) def PyObject_FREE(space, ptr): - lltype.free(ptr, flavor='raw') + lltype.free(ptr, flavor='raw', track_allocation=False) @cpython_api([PyTypeObjectPtr], PyObject) def _PyObject_New(space, type): @@ -29,16 +31,26 @@ @cpython_api([PyTypeObjectPtr, Py_ssize_t], PyObject) def _PyObject_NewVar(space, type, itemcount): - w_type = from_ref(space, rffi.cast(PyObject, type)) - assert isinstance(w_type, W_TypeObject) - typedescr = get_typedescr(w_type.instancetypedef) - py_obj = typedescr.allocate(space, w_type, itemcount=itemcount) - py_obj.c_ob_refcnt = 0 - if type.c_tp_itemsize == 0: - w_obj = PyObject_Init(space, py_obj, type) + # XXX lots and lots of speed improvements pending: these kind of functions + # should be written in C in the first place, mostly as copy-pastes of the + # CPython source code + size = intmask(type.c_tp_basicsize) + assert size > 0 + itemsize = intmask(type.c_tp_itemsize) + if itemsize > 0: + try: + varsize = ovfcheck(itemsize * itemcount) + size = ovfcheck(size + varsize) + except OverflowError: + PyErr_NoMemory(space) + mem = lltype.malloc(rffi.VOIDP.TO, size, flavor='raw', zero=True, + track_allocation=False) + py_obj = rffi.cast(PyObject, mem) + if itemsize == 0: + PyObject_Init(space, py_obj, type) else: py_objvar = rffi.cast(PyVarObject, py_obj) - w_obj = PyObject_InitVar(space, py_objvar, type, itemcount) + PyObject_InitVar(space, py_objvar, type, itemcount) return py_obj @cpython_api([rffi.VOIDP], lltype.Void) @@ -193,6 +205,7 @@ if not obj: PyErr_NoMemory(space) obj.c_ob_type = type + obj.c_ob_pypy_link = 0 obj.c_ob_refcnt = 1 return obj diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -207,6 +207,11 @@ def cpyext_as_pyobj(self, space): return self.cpyext_pyobj + def getclass(self, space): + w_type = from_pyobj(space, self.cpyext_pyobj.c_ob_type) + assert isinstance(w_type, W_TypeObject) + return w_type + # ZZZ getclass(), getweakref(), etc.? like interpreter/typedef.py W_CPyExtPlaceHolder.__name__ = W_Class.__name__ + '_CPyExtPlaceHolder' @@ -232,6 +237,7 @@ def _create_w_obj_from_pyobj(space, pyobj): w_type = from_pyobj(space, pyobj.c_ob_type) + assert isinstance(w_type, W_TypeObject) return w_type.instancetypedef.cpyext_create_pypy(space, pyobj) #________________________________________________________ diff --git a/pypy/module/cpyext/structmember.py b/pypy/module/cpyext/structmember.py --- a/pypy/module/cpyext/structmember.py +++ b/pypy/module/cpyext/structmember.py @@ -5,7 +5,8 @@ from pypy.module.cpyext.api import ADDR, PyObjectP, cpython_api from pypy.module.cpyext.intobject import PyInt_AsLong, PyInt_AsUnsignedLong from pypy.module.cpyext.pyerrors import PyErr_Occurred -from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, from_ref, make_ref +from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, from_pyobj +from pypy.module.cpyext.pyobject import get_pyobj_and_xincref from pypy.module.cpyext.stringobject import ( PyString_FromString, PyString_FromStringAndSize) from pypy.module.cpyext.floatobject import PyFloat_AsDouble @@ -67,13 +68,13 @@ elif member_type == T_OBJECT: obj_ptr = rffi.cast(PyObjectP, addr) if obj_ptr[0]: - w_result = from_ref(space, obj_ptr[0]) + w_result = from_pyobj(space, obj_ptr[0]) else: w_result = space.w_None elif member_type == T_OBJECT_EX: obj_ptr = rffi.cast(PyObjectP, addr) if obj_ptr[0]: - w_result = from_ref(space, obj_ptr[0]) + w_result = from_pyobj(space, obj_ptr[0]) else: w_name = space.wrap(rffi.charp2str(w_member.c_name)) raise OperationError(space.w_AttributeError, w_name) @@ -122,7 +123,7 @@ array = rffi.cast(PyObjectP, addr) if array[0]: Py_DecRef(space, array[0]) - array[0] = make_ref(space, w_value) + array[0] = get_pyobj_and_xincref(space, w_value) else: raise OperationError(space.w_SystemError, space.wrap("bad memberdescr type")) diff --git a/pypy/module/cpyext/test/test_intobject.py b/pypy/module/cpyext/test/test_intobject.py --- a/pypy/module/cpyext/test/test_intobject.py +++ b/pypy/module/cpyext/test/test_intobject.py @@ -10,9 +10,7 @@ assert not api.PyInt_Check(space.wrap((1, 2, 3))) for i in [3, -5, -1, -sys.maxint, sys.maxint - 1]: x = api.PyInt_AsLong(space.wrap(i)) - y = api.PyInt_AS_LONG(space.wrap(i)) assert x == i - assert y == i w_x = from_pyobj(space, api.PyInt_FromLong(x + 1)) assert space.type(w_x) is space.w_int assert space.eq_w(w_x, space.wrap(i + 1)) From noreply at buildbot.pypy.org Thu Oct 22 11:40:21 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 22 Oct 2015 11:40:21 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: testing floating point operations, load float and round it to integer Message-ID: <20151022094021.C00321C0334@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80385:05b9b0babd4d Date: 2015-10-22 11:40 +0200 http://bitbucket.org/pypy/pypy/changeset/05b9b0babd4d/ Log: testing floating point operations, load float and round it to integer diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -13,6 +13,7 @@ MODEL_X86 = 'x86' MODEL_X86_NO_SSE2 = 'x86-without-sse2' MODEL_X86_64 = 'x86-64' +MODEL_X86_64_SSE4 = 'x86-64-sse4' MODEL_ARM = 'arm' MODEL_PPC_64 = 'ppc-64' MODEL_S390_64 = 's390x' diff --git a/rpython/jit/backend/zarch/instruction_builder.py b/rpython/jit/backend/zarch/instruction_builder.py --- a/rpython/jit/backend/zarch/instruction_builder.py +++ b/rpython/jit/backend/zarch/instruction_builder.py @@ -7,6 +7,8 @@ """ NOT_RPYTHON """ if arg == 'r' or arg == 'r/m': return 0 + if arg == 'f': + return 0 if arg.startswith('i') or arg.startswith('u'): return 0 return loc.addr(0) @@ -18,6 +20,7 @@ """ NOT_RPYTHON """ """ Available names: + f - floating point register r - register r/m - register or mask iX - immediate X bits (signed) @@ -287,11 +290,22 @@ self.writechar(opcode2) return encode_ri +def build_rrf(mnemonic, (opcode1,opcode2)): + @builder.arguments('r,r/m,r,r/m') + def encode_rrf(self, r1, rm3, r2, rm4): + self.writechar(opcode1) + self.writechar(opcode2) + byte = (rm3 & BIT_MASK_4) << 4 | (rm4 & BIT_MASK_4) + self.writechar(chr(byte)) + byte = (r1 & BIT_MASK_4) << 4 | (r2 & BIT_MASK_4) + self.writechar(chr(byte)) + return encode_rrf + def build_unpack_func(mnemonic, func): def function(self, *args): newargs = [None] * len(args) for i,arg in enumerate(unrolling_iterable(func._arguments_)): - if arg == 'r' or arg == 'r/m': + if arg == 'r' or arg == 'r/m' or arg == 'f': newargs[i] = args[i].value elif arg.startswith('i') or arg.startswith('u'): newargs[i] = args[i].value diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -20,6 +20,8 @@ 'AG': ('rxy', ['\xE3','\x08']), 'AGF': ('rxy', ['\xE3','\x18']), 'AHI': ('ri', ['\xA7','\x0A']), + + # floating point } logic_mnemonic_codes = { @@ -64,6 +66,39 @@ 'OILL': ('ri_u', ['\xA5', '\x0B']), } +memory_mnemonic_codes = { + # load address + 'LA': ('rx', ['\x41']), + 'LAY': ('rxy', ['\xE3','\x71']), + + # load memory + 'LMD': ('sse', ['\xEF']), + 'LMG': ('rsy', ['\xEB','\x04']), + 'LGHI': ('ri', ['\xA7','\x09']), + 'LR': ('rr', ['\x18']), + 'LGR': ('rre', ['\xB9','\x04']), + 'LG': ('rxy', ['\xE3','\x04']), + + # load binary float + # E -> short (32bit), + # D -> long (64bit), + # X -> extended (128bit) + 'LER': ('rr', ['\x38']), + 'LDR': ('rr', ['\x28']), + 'LE': ('rx', ['\x78']), + 'LD': ('rx', ['\x68']), + 'LEY': ('rxy', ['\xED', '\x64']), + 'LDY': ('rxy', ['\xED', '\x65']), +} + +floatingpoint_mnemonic_codes = { + 'FIEBR': ('rrf', ['\xB3','\x57']), + 'FIDBR': ('rrf', ['\xB3','\x5F']), + + 'CGEBR': ('rrf', ['\xB3','\xA8']), + 'CGDBR': ('rrf', ['\xB3','\xA9']), +} + all_mnemonic_codes = { # 'BXH': ('rs', ['\x86']), @@ -77,15 +112,6 @@ 'SRP': ('ssc', ['\xF0']), 'MVCK': ('ssd', ['\xD9']), - 'LA': ('rx', ['\x41']), - 'LAY': ('rxy', ['\xE3','\x71']), - 'LMD': ('sse', ['\xEF']), - 'LMG': ('rsy', ['\xEB','\x04']), - 'LGHI': ('ri', ['\xA7','\x09']), - 'LR': ('rr', ['\x18']), - 'LGR': ('rre', ['\xB9','\x04']), - 'LG': ('rxy', ['\xE3','\x04']), - 'PKA': ('ssf', ['\xE9']), 'STMG': ('rsy', ['\xEB','\x24']), @@ -93,6 +119,8 @@ } all_mnemonic_codes.update(arith_mnemonic_codes) all_mnemonic_codes.update(logic_mnemonic_codes) +all_mnemonic_codes.update(memory_mnemonic_codes) +all_mnemonic_codes.update(floatingpoint_mnemonic_codes) all_mnemonic_codes.update(branch_mnemonic_codes) diff --git a/rpython/jit/backend/zarch/test/support.py b/rpython/jit/backend/zarch/test/support.py --- a/rpython/jit/backend/zarch/test/support.py +++ b/rpython/jit/backend/zarch/test/support.py @@ -1,9 +1,11 @@ from rpython.rtyper.lltypesystem import lltype, rffi -def run_asm(asm): +def run_asm(asm, return_float=False): BOOTSTRAP_TP = lltype.FuncType([], lltype.Signed) addr = asm.mc.materialize(asm.cpu, [], None) assert addr % 8 == 0 func = rffi.cast(lltype.Ptr(BOOTSTRAP_TP), addr) asm.mc._dump_trace(addr, 'test.asm') + if return_float: + pass return func() diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -1,4 +1,6 @@ +import struct from rpython.jit.backend.zarch import conditions as con +from rpython.jit.backend.zarch import masks as msk from rpython.jit.backend.zarch import registers as reg from rpython.jit.backend.zarch.assembler import AssemblerZARCH from rpython.jit.backend.zarch import locations as loc @@ -15,12 +17,19 @@ from rpython.rtyper.annlowlevel import llhelper from rpython.rlib.objectmodel import specialize from rpython.rlib.debug import ll_assert +from rpython.rlib.longlong2float import float2longlong CPU = getcpuclass() def byte_count(func): return func._byte_count +def BFL(value): + #assert 0x0000000000000000 == float2longlong(0.0) + #assert 0x8000000000000000 == abs(float2longlong(-0.0)) + #assert hex(0xc02e000000000000) == hex(abs(float2longlong(-15.0))) + return struct.pack('>q', float2longlong(value)) + class TestRunningAssembler(object): def setup_method(self, method): cpu = CPU(None, None) @@ -184,3 +193,15 @@ self.mc.SVC(loc.imm(4)) self.a.jmpto(reg.r14) assert run_asm(self.a) == 14 + + def test_float(self): + with self.label('func', func=True): + with self.label('lit'): + self.mc.BRAS(reg.r13, loc.imm(0)) + self.mc.write(BFL(-15.0)) + self.jump_here(self.mc.BRAS, 'lit') + self.mc.LD(reg.f0, loc.addr(0, reg.r13)) + self.mc.FIDBR(reg.f1, msk.RND_CURMODE, reg.f0, loc.imm(0)) + self.mc.CGDBR(reg.r2, msk.RND_CURMODE, reg.f1, loc.imm(0)) + self.a.jmpto(reg.r14) + assert run_asm(self.a) == -15 diff --git a/rpython/jit/backend/zarch/test/test_auto_encoding.py b/rpython/jit/backend/zarch/test/test_auto_encoding.py --- a/rpython/jit/backend/zarch/test/test_auto_encoding.py +++ b/rpython/jit/backend/zarch/test/test_auto_encoding.py @@ -123,10 +123,13 @@ break return results -REGS = range(15+1) +REGS = range(16) REGNAMES = ['%%r%d' % i for i in REGS] +FP_REGS = range(16) +FP_REGNAMES = ['%%f%d' % i for i in FP_REGS] TEST_CASE_GENERATE = { 'r': REGS, + 'f': FP_REGS, 'r/m': REGS, 'i4': range_of_bits(4, signed=True), 'i8': range_of_bits(8, signed=True), @@ -157,13 +160,11 @@ func = getattr(AbstractZARCHBuilder, methodname) return func._arguments_ - def assembler_operand_reg(self, regnum): - return REGNAMES[regnum] - def operand_combinations(self, methodname, modes, arguments): mapping = { - 'r': self.assembler_operand_reg, - 'r/m': self.assembler_operand_reg, + 'r': (lambda num: REGNAMES[num]), + 'r/m': (lambda num: REGNAMES[num]), + 'f': (lambda num: FP_REGNAMES[num]), } arg_types = self.get_func_arg_types(methodname) for mode, args in zip(arg_types, arguments): From noreply at buildbot.pypy.org Thu Oct 22 12:13:25 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 22 Oct 2015 12:13:25 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: made the instruction type RRF more general to support each unsupported parameter Message-ID: <20151022101325.AFBFF1C0036@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80386:535d3fb38323 Date: 2015-10-22 12:13 +0200 http://bitbucket.org/pypy/pypy/changeset/535d3fb38323/ Log: made the instruction type RRF more general to support each unsupported parameter diff --git a/rpython/jit/backend/zarch/instruction_builder.py b/rpython/jit/backend/zarch/instruction_builder.py --- a/rpython/jit/backend/zarch/instruction_builder.py +++ b/rpython/jit/backend/zarch/instruction_builder.py @@ -9,6 +9,8 @@ return 0 if arg == 'f': return 0 + if arg == '-': + return 0 if arg.startswith('i') or arg.startswith('u'): return 0 return loc.addr(0) @@ -20,6 +22,7 @@ """ NOT_RPYTHON """ """ Available names: + - - unused f - floating point register r - register r/m - register or mask @@ -290,22 +293,28 @@ self.writechar(opcode2) return encode_ri -def build_rrf(mnemonic, (opcode1,opcode2)): - @builder.arguments('r,r/m,r,r/m') - def encode_rrf(self, r1, rm3, r2, rm4): - self.writechar(opcode1) - self.writechar(opcode2) - byte = (rm3 & BIT_MASK_4) << 4 | (rm4 & BIT_MASK_4) - self.writechar(chr(byte)) - byte = (r1 & BIT_MASK_4) << 4 | (r2 & BIT_MASK_4) - self.writechar(chr(byte)) - return encode_rrf +def _build_rrf(args): + def build_rff(mnemonic, (opcode1,opcode2)): + @builder.arguments(args) + def encode_rrf(self, r1, rm3, r2, rm4): + self.writechar(opcode1) + self.writechar(opcode2) + byte = (rm3 & BIT_MASK_4) << 4 | (rm4 & BIT_MASK_4) + self.writechar(chr(byte)) + byte = (r1 & BIT_MASK_4) << 4 | (r2 & BIT_MASK_4) + self.writechar(chr(byte)) + return encode_rrf + return build_rff + +build_rrf = _build_rrf('r,u4,r,-') def build_unpack_func(mnemonic, func): def function(self, *args): - newargs = [None] * len(args) + newargs = [None] * len(func._arguments_) for i,arg in enumerate(unrolling_iterable(func._arguments_)): - if arg == 'r' or arg == 'r/m' or arg == 'f': + if arg == '-': + newargs[i] = 0 + elif arg == 'r' or arg == 'r/m' or arg == 'f': newargs[i] = args[i].value elif arg.startswith('i') or arg.startswith('u'): newargs[i] = args[i].value diff --git a/rpython/jit/backend/zarch/masks.py b/rpython/jit/backend/zarch/masks.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/masks.py @@ -0,0 +1,8 @@ +from rpython.jit.backend.zarch import locations as loc + +RND_CURMODE = loc.imm(0x0) +RND_BIASED_NEAREST = loc.imm(0x1) +RND_NEARST = loc.imm(0x4) +RND_TOZERO = loc.imm(0x5) +RND_TO_POSINF = loc.imm(0x6) +RND_TO_NEGINF= loc.imm(0x7) diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -201,7 +201,6 @@ self.mc.write(BFL(-15.0)) self.jump_here(self.mc.BRAS, 'lit') self.mc.LD(reg.f0, loc.addr(0, reg.r13)) - self.mc.FIDBR(reg.f1, msk.RND_CURMODE, reg.f0, loc.imm(0)) - self.mc.CGDBR(reg.r2, msk.RND_CURMODE, reg.f1, loc.imm(0)) + self.mc.CGDBR(reg.r2, msk.RND_CURMODE, reg.f0) self.a.jmpto(reg.r14) assert run_asm(self.a) == -15 diff --git a/rpython/jit/backend/zarch/test/test_auto_encoding.py b/rpython/jit/backend/zarch/test/test_auto_encoding.py --- a/rpython/jit/backend/zarch/test/test_auto_encoding.py +++ b/rpython/jit/backend/zarch/test/test_auto_encoding.py @@ -128,6 +128,7 @@ FP_REGS = range(16) FP_REGNAMES = ['%%f%d' % i for i in FP_REGS] TEST_CASE_GENERATE = { + '-': [], 'r': REGS, 'f': FP_REGS, 'r/m': REGS, From noreply at buildbot.pypy.org Thu Oct 22 13:19:47 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 22 Oct 2015 13:19:47 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: allocated memory, and stored float result into it, correctly read it afterwards Message-ID: <20151022111947.CD5F21C04DA@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80387:ec9e69c4ffde Date: 2015-10-22 13:20 +0200 http://bitbucket.org/pypy/pypy/changeset/ec9e69c4ffde/ Log: allocated memory, and stored float result into it, correctly read it afterwards diff --git a/rpython/jit/backend/zarch/instruction_builder.py b/rpython/jit/backend/zarch/instruction_builder.py --- a/rpython/jit/backend/zarch/instruction_builder.py +++ b/rpython/jit/backend/zarch/instruction_builder.py @@ -90,6 +90,22 @@ byte = displace >> 12 & 0xff mc.writechar(chr(byte)) + at always_inline +def encode_index_base_displace(mc, reg, idxbasedisp): + """ + +----------------------------------------------------+ + | opcode | reg & index | base & displace[0:11] | ... | + +----------------------------------------------------+ + """ + index = idxbasedisp.index + byte = (reg & 0x0f) << 4 | index & 0xf + mc.writechar(chr(byte)) + displace = idxbasedisp.displace & BIT_MASK_12 + base = idxbasedisp.base & 0xf + byte = displace >> 8 & 0xf | base << 4 + mc.writechar(chr(byte)) + mc.writechar(chr(displace & 0xff)) + def build_i(mnemonic, (opcode,)): @builder.arguments('u8') def encode_i(self, imm): @@ -119,14 +135,7 @@ @builder.arguments('r/m,bid') def encode_rx(self, reg_or_mask, idxbasedisp): self.writechar(opcode) - index = idxbasedisp.index - byte = (reg_or_mask & 0x0f) << 4 | index & 0xf - self.writechar(chr(byte)) - displace = idxbasedisp.displace & BIT_MASK_12 - base = idxbasedisp.base & 0xf - byte = displace >> 8 & 0xf | base << 4 - self.writechar(chr(byte)) - self.writechar(chr(displace & 0xff)) + encode_index_base_displace(self, reg_or_mask, idxbasedisp) return encode_rx def build_rxy(mnemonic, (opcode1,opcode2)): @@ -293,20 +302,37 @@ self.writechar(opcode2) return encode_ri -def _build_rrf(args): - def build_rff(mnemonic, (opcode1,opcode2)): - @builder.arguments(args) - def encode_rrf(self, r1, rm3, r2, rm4): - self.writechar(opcode1) - self.writechar(opcode2) - byte = (rm3 & BIT_MASK_4) << 4 | (rm4 & BIT_MASK_4) - self.writechar(chr(byte)) - byte = (r1 & BIT_MASK_4) << 4 | (r2 & BIT_MASK_4) - self.writechar(chr(byte)) - return encode_rrf - return build_rff +def build_rrf(mnemonic, (opcode1,opcode2,argtypes)): + @builder.arguments(argtypes) + def encode_rrf(self, r1, rm3, r2, rm4): + self.writechar(opcode1) + self.writechar(opcode2) + byte = (rm3 & BIT_MASK_4) << 4 | (rm4 & BIT_MASK_4) + self.writechar(chr(byte)) + byte = (r1 & BIT_MASK_4) << 4 | (r2 & BIT_MASK_4) + self.writechar(chr(byte)) + return encode_rrf -build_rrf = _build_rrf('r,u4,r,-') +def build_rxe(mnemonic, (opcode1,opcode2,argtypes)): + @builder.arguments(argtypes) + def encode_rxe(self, reg, idxbasedisp, mask): + self.writechar(opcode1) + encode_index_base_displace(self, reg, idxbasedisp) + self.writechar(chr((mask & 0xf) << 4)) + self.writechar(opcode2) + return encode_rxe + +def build_rxf(mnemonic, (opcode1,opcode2)): + @builder.arguments('r,bidl,r/m') + def encode_rxe(self, reg1, idxbasedisp, reg3): + self.writechar(opcode1) + index = idxbasedisp.index + byte = (reg3 & 0x0f) << 4 | index & 0xf + self.writechar(chr(byte)) + encode_base_displace_long(self, reg, idxbasedisp) + self.writechar(chr((reg1 & 0xf) << 4)) + self.writechar(opcode2) + return encode_rxe def build_unpack_func(mnemonic, func): def function(self, *args): @@ -330,7 +356,12 @@ def build_instr_codes(clazz): for mnemonic, params in all_mnemonic_codes.items(): - (instrtype, args) = params + argtypes = None + if len(params) == 2: + (instrtype, args) = params + else: + (instrtype, args, argtypes) = params + args = tuple(list(args) + [argtypes]) builder = globals()['build_' + instrtype] func = builder(mnemonic, args) name = mnemonic + "_" + instrtype diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -79,6 +79,9 @@ 'LGR': ('rre', ['\xB9','\x04']), 'LG': ('rxy', ['\xE3','\x04']), + 'STE': ('rx', ['\x70']), + 'STD': ('rx', ['\x60']), + # load binary float # E -> short (32bit), # D -> long (64bit), @@ -92,11 +95,17 @@ } floatingpoint_mnemonic_codes = { - 'FIEBR': ('rrf', ['\xB3','\x57']), - 'FIDBR': ('rrf', ['\xB3','\x5F']), + 'FIEBR': ('rrf', ['\xB3','\x57'], 'r,u4,r,-'), + 'FIDBR': ('rrf', ['\xB3','\x5F'], 'r,u4,r,-'), - 'CGEBR': ('rrf', ['\xB3','\xA8']), - 'CGDBR': ('rrf', ['\xB3','\xA9']), + 'CGEBR': ('rrf', ['\xB3','\xA8'], 'r,u4,r,-'), + 'CGDBR': ('rrf', ['\xB3','\xA9'], 'r,u4,r,-'), + + # arithmetic + 'AEBR': ('rre', ['\xB3','\x0A']), + 'ADBR': ('rre', ['\xB3','\x1A']), + 'AEB': ('rxe', ['\xED','\x0A'], 'r,bidl,-'), + 'ADB': ('rxe', ['\xED','\x1A'], 'r,bidl,-'), } all_mnemonic_codes = { diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -10,14 +10,16 @@ from rpython.jit.codewriter import longlong from rpython.rtyper.annlowlevel import llhelper -from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem import lltype, rffi, ll2ctypes from rpython.jit.metainterp.history import JitCellToken from rpython.jit.backend.model import CompiledLoopToken from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rtyper.annlowlevel import llhelper from rpython.rlib.objectmodel import specialize from rpython.rlib.debug import ll_assert -from rpython.rlib.longlong2float import float2longlong +from rpython.rlib.longlong2float import (float2longlong, + DOUBLE_ARRAY_PTR) +import ctypes CPU = getcpuclass() @@ -25,11 +27,14 @@ return func._byte_count def BFL(value): - #assert 0x0000000000000000 == float2longlong(0.0) - #assert 0x8000000000000000 == abs(float2longlong(-0.0)) - #assert hex(0xc02e000000000000) == hex(abs(float2longlong(-15.0))) return struct.pack('>q', float2longlong(value)) +def ADDR(value): + ptr = ll2ctypes.lltype2ctypes(value) + addr = ctypes.addressof(ptr.contents.items) + print hex(addr) + return struct.pack('>Q', addr) + class TestRunningAssembler(object): def setup_method(self, method): cpu = CPU(None, None) @@ -204,3 +209,21 @@ self.mc.CGDBR(reg.r2, msk.RND_CURMODE, reg.f0) self.a.jmpto(reg.r14) assert run_asm(self.a) == -15 + + def test_float_to_memory(self): + with lltype.scoped_alloc(DOUBLE_ARRAY_PTR.TO, 16) as mem: + with self.label('func', func=True): + with self.label('lit'): + self.mc.BRAS(reg.r13, loc.imm(0)) + self.mc.write(BFL(-15.0)) + self.mc.write(ADDR(mem)) + self.jump_here(self.mc.BRAS, 'lit') + self.mc.LD(reg.f0, loc.addr(0, reg.r13)) + self.mc.LDR(reg.f1, reg.f0) + self.mc.ADBR(reg.f0, reg.f1) + self.mc.LG(reg.r11, loc.addr(8, reg.r13)) + self.mc.STD(reg.f0, loc.addr(0, reg.r11)) + self.a.jmpto(reg.r14) + run_asm(self.a) + assert mem[0] == -30.0 + From noreply at buildbot.pypy.org Thu Oct 22 14:15:44 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 22 Oct 2015 14:15:44 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: testing several other values for floating point addition Message-ID: <20151022121544.283451C0036@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80388:8367c6c91fdf Date: 2015-10-22 14:16 +0200 http://bitbucket.org/pypy/pypy/changeset/8367c6c91fdf/ Log: testing several other values for floating point addition diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -1,3 +1,4 @@ +import py import struct from rpython.jit.backend.zarch import conditions as con from rpython.jit.backend.zarch import masks as msk @@ -35,6 +36,10 @@ print hex(addr) return struct.pack('>Q', addr) +def isclose(a,b, rel_tol=1e-9, abs_tol=0.0): + # from PEP 485, added in python 3.5 + return abs(a-b) <= max( rel_tol * max(abs(a), abs(b)), abs_tol ) + class TestRunningAssembler(object): def setup_method(self, method): cpu = CPU(None, None) @@ -210,20 +215,28 @@ self.a.jmpto(reg.r14) assert run_asm(self.a) == -15 - def test_float_to_memory(self): + @py.test.mark.parametrize("v1,v2,res", [ + ( 0.0, 0.0, 0.0), + ( -15.0, -15.0, -30.0), + ( 1.5, -3.22, -1.72), + ( 0.5, 0.0, 0.5), + ( 0.0001, -0.0002, -0.0001), + ]) + def test_float_to_memory(self, v1, v2, res): with lltype.scoped_alloc(DOUBLE_ARRAY_PTR.TO, 16) as mem: with self.label('func', func=True): with self.label('lit'): self.mc.BRAS(reg.r13, loc.imm(0)) - self.mc.write(BFL(-15.0)) + self.mc.write(BFL(v1)) + self.mc.write(BFL(v2)) self.mc.write(ADDR(mem)) self.jump_here(self.mc.BRAS, 'lit') self.mc.LD(reg.f0, loc.addr(0, reg.r13)) - self.mc.LDR(reg.f1, reg.f0) + self.mc.LD(reg.f1, loc.addr(8, reg.r13)) self.mc.ADBR(reg.f0, reg.f1) - self.mc.LG(reg.r11, loc.addr(8, reg.r13)) + self.mc.LG(reg.r11, loc.addr(16, reg.r13)) self.mc.STD(reg.f0, loc.addr(0, reg.r11)) self.a.jmpto(reg.r14) run_asm(self.a) - assert mem[0] == -30.0 + assert isclose(mem[0],res) From noreply at buildbot.pypy.org Thu Oct 22 16:19:27 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Oct 2015 16:19:27 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: in-progress Message-ID: <20151022141927.8CFF61C0290@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80393:8e2b3183d88b Date: 2015-10-22 15:58 +0200 http://bitbucket.org/pypy/pypy/changeset/8e2b3183d88b/ Log: in-progress diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -215,11 +215,7 @@ return True def get_llhelper(self, space): - llh = getattr(self, '_llhelper', None) - if llh is None: - llh = llhelper(self.functype, self.get_wrapper(space)) - self._llhelper = llh - return llh + return llhelper(self.functype, self.get_wrapper(space)) @specialize.memo() def get_wrapper(self, space): @@ -232,6 +228,7 @@ wrapper.c_name = cpyext_namespace.uniquename(self.c_name) return wrapper + def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, external=True, gil=None, result_borrowed=False): """ @@ -402,11 +399,11 @@ # These are C symbols which cpyext will export, but which are defined in .c # files somewhere in the implementation of cpyext (rather than being defined in # RPython). -SYMBOLS_C = [ +SYMBOLS_C = set([ 'Py_FatalError', 'PyOS_snprintf', 'PyOS_vsnprintf', 'PyArg_Parse', 'PyArg_ParseTuple', 'PyArg_UnpackTuple', 'PyArg_ParseTupleAndKeywords', 'PyArg_VaParse', 'PyArg_VaParseTupleAndKeywords', '_PyArg_NoKeywords', - 'PyString_FromFormat', 'PyString_FromFormatV', + 'PyString_FromFormat', 'PyString_FromFormatV', '_PyString_Resize', 'PyModule_AddObject', 'PyModule_AddIntConstant', 'PyModule_AddStringConstant', 'Py_BuildValue', 'Py_VaBuildValue', 'PyTuple_Pack', '_PyArg_Parse_SizeT', '_PyArg_ParseTuple_SizeT', @@ -454,7 +451,7 @@ 'Py_FrozenFlag', 'Py_TabcheckFlag', 'Py_UnicodeFlag', 'Py_IgnoreEnvironmentFlag', 'Py_DivisionWarningFlag', 'Py_DontWriteBytecodeFlag', 'Py_NoUserSiteDirectory', '_Py_QnewFlag', 'Py_Py3kWarningFlag', 'Py_HashRandomizationFlag', '_Py_PackageContext', -] +]) TYPES = {} GLOBALS = { # this needs to include all prebuilt pto, otherwise segfaults occur '_Py_NoneStruct#': ('PyObject*', 'space.w_None'), @@ -803,7 +800,7 @@ from pypy.module.cpyext.pyobject import setup_prebuilt_pyobj, _Py_Dealloc from rpython.rlib import rawrefcount - export_symbols = list(FUNCTIONS) + SYMBOLS_C + list(GLOBALS) + export_symbols = sorted(FUNCTIONS) + sorted(SYMBOLS_C) + sorted(GLOBALS) from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() @@ -1100,7 +1097,7 @@ "NOT_RPYTHON" from pypy.module.cpyext.pyobject import make_ref - export_symbols = list(FUNCTIONS) + SYMBOLS_C + list(GLOBALS) + export_symbols = sorted(FUNCTIONS) + sorted(SYMBOLS_C) + sorted(GLOBALS) from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() diff --git a/pypy/module/cpyext/include/intobject.h b/pypy/module/cpyext/include/intobject.h --- a/pypy/module/cpyext/include/intobject.h +++ b/pypy/module/cpyext/include/intobject.h @@ -13,6 +13,7 @@ long ob_ival; } PyIntObject; + /* Macro, trading safety for speed */ #define PyInt_AS_LONG(op) (((PyIntObject *)(op))->ob_ival) diff --git a/pypy/module/cpyext/include/stringobject.h b/pypy/module/cpyext/include/stringobject.h --- a/pypy/module/cpyext/include/stringobject.h +++ b/pypy/module/cpyext/include/stringobject.h @@ -7,15 +7,19 @@ extern "C" { #endif -#define PyString_GET_SIZE(op) PyString_Size(op) -#define PyString_AS_STRING(op) PyString_AsString(op) +/* Macro, trading safety for speed */ +#define PyString_GET_SIZE(op) Py_SIZE(op) +#define PyString_AS_STRING(op) ( \ + ((PyStringObject *)(op))->ob_sval_pypy[((PyStringObject *)(op))->ob_size] \ + == 0 ? ((PyStringObject *)(op))->ob_sval_pypy : PyString_AsString(op)) + typedef struct { - PyObject_HEAD - char* buffer; - Py_ssize_t size; + PyObject_VAR_HEAD + char ob_sval_pypy[1]; } PyStringObject; +PyAPI_FUNC(int) _PyString_Resize(PyObject **pv, Py_ssize_t newsize); PyAPI_FUNC(PyObject *) PyString_FromFormatV(const char *format, va_list vargs); PyAPI_FUNC(PyObject *) PyString_FromFormat(const char *format, ...); diff --git a/pypy/module/cpyext/include/tupleobject.h b/pypy/module/cpyext/include/tupleobject.h --- a/pypy/module/cpyext/include/tupleobject.h +++ b/pypy/module/cpyext/include/tupleobject.h @@ -12,6 +12,7 @@ PyObject *ob_item[1]; } PyTupleObject; + /* defined in varargswrapper.c */ PyAPI_FUNC(PyObject *) PyTuple_Pack(Py_ssize_t, ...); diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py --- a/pypy/module/cpyext/intobject.py +++ b/pypy/module/cpyext/intobject.py @@ -45,9 +45,9 @@ # --and then we call this function to initialize the W_IntObject-- fill_pypy=int_fill_pypy, - # --in this case, and if PyInt_CheckExact() returns True, then - # the link can be light, i.e. the original PyIntObject might - # be freed with free() by the GC-- + # --if PyInt_CheckExact() returns True, then such a link can + # be light, i.e. the original PyIntObject is freed with free() + # by the GC if there is no more reference to the PyIntObject-- alloc_pypy_light_if=PyInt_CheckExact, ) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -116,6 +116,15 @@ assert 'cpyext_create_pypy' not in typedef.__dict__ typedef.cpyext_create_pypy = cpyext_create_pypy + if tp_basestruct._arrayfld is None: + typedef.cpyext_basicsize = rffi.sizeof(tp_basestruct) + typedef.cpyext_itemsize = 0 + else: + typedef.cpyext_basicsize = rffi.offsetof(tp_basestruct, + tp_basestruct._arrayfld) + ARRAY = getattr(tp_basestruct, tp_basestruct._arrayfld) + typedef.cpyext_itemsize = rffi.sizeof(ARRAY.OF) + if tp_dealloc: @cpython_api([PyObject], lltype.Void, external=False, error=CANNOT_FAIL) @@ -178,6 +187,8 @@ W_ObjectObject.typedef.cpyext_create_pypy) TypeDef.cpyext_get_dealloc = staticmethod( W_ObjectObject.typedef.cpyext_get_dealloc) + TypeDef.cpyext_basicsize = W_ObjectObject.typedef.cpyext_basicsize + TypeDef.cpyext_itemsize = W_ObjectObject.typedef.cpyext_itemsize def _default_dealloc(space, py_obj): lltype.free(py_obj, flavor='raw', track_allocation=False) diff --git a/pypy/module/cpyext/src/stringobject.c b/pypy/module/cpyext/src/stringobject.c --- a/pypy/module/cpyext/src/stringobject.c +++ b/pypy/module/cpyext/src/stringobject.c @@ -248,3 +248,21 @@ va_end(vargs); return ret; } + +int +_PyString_Resize(PyObject **pv, Py_ssize_t newsize) +{ + /* XXX always create a new string so far */ + PyObject *v = *pv; + Py_ssize_t size = PyString_GET_SIZE(v); + PyObject *newv = PyString_FromStringAndSize(NULL, newsize); + if (newv == NULL) { + Py_DECREF(v); + return -1; + } + memcpy(PyString_AS_STRING(newv), PyString_AS_STRING(v), + size < newsize ? size : newsize); + Py_DECREF(v); + *pv = newv; + return 0; +} diff --git a/pypy/module/cpyext/stringobject.py b/pypy/module/cpyext/stringobject.py --- a/pypy/module/cpyext/stringobject.py +++ b/pypy/module/cpyext/stringobject.py @@ -1,12 +1,14 @@ from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( - cpython_api, cpython_struct, bootstrap_function, build_type_checkers, - PyObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) + cpython_api, cpython_struct, bootstrap_function, build_type_checkers3, + PyVarObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) from pypy.module.cpyext.pyerrors import PyErr_BadArgument from pypy.module.cpyext.pyobject import ( - PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, - make_typedescr, get_typedescr) + PyObject, PyObjectP, Py_DecRef, get_pyobj_and_incref, from_pyobj, + setup_class_for_cpyext, RRC_PERMANENT_LIGHT, new_pyobj) +from pypy.objspace.std.bytesobject import W_BytesObject, W_AbstractBytesObject +from pypy.module.cpyext import support ## ## Implementation of PyStringObject @@ -16,7 +18,8 @@ ## ----------- ## ## PyString_AsString() must return a (non-movable) pointer to the underlying -## buffer, whereas pypy strings are movable. C code may temporarily store +## buffer, whereas pypy strings are movable (and also, they are not +## null-terminated at all). The C code may temporarily store ## this address and use it, as long as it owns a reference to the PyObject. ## There is no "release" function to specify that the pointer is not needed ## any more. @@ -52,162 +55,135 @@ PyStringObjectStruct = lltype.ForwardReference() PyStringObject = lltype.Ptr(PyStringObjectStruct) -PyStringObjectFields = PyObjectFields + \ - (("buffer", rffi.CCHARP), ("size", Py_ssize_t)) +PyStringObjectFields = PyVarObjectFields + \ + (("ob_sval_pypy", rffi.CArray(lltype.Char)),) cpython_struct("PyStringObject", PyStringObjectFields, PyStringObjectStruct) +PyString_Check, PyString_CheckExact, _PyString_Type = ( + build_type_checkers3("String", "w_str")) + + @bootstrap_function def init_stringobject(space): "Type description of PyStringObject" - make_typedescr(space.w_str.instancetypedef, - basestruct=PyStringObject.TO, - attach=string_attach, - dealloc=string_dealloc, - realize=string_realize) + setup_class_for_cpyext( + W_AbstractBytesObject, + basestruct=PyStringObjectStruct, -PyString_Check, PyString_CheckExact = build_type_checkers("String", "w_str") + # --from a W_BytesObject, we call this function to allocate + # a PyStringObject, initially without any data-- + alloc_pyobj=string_alloc_pyobj, -def new_empty_str(space, length): + # --reverse direction: from a PyStringObject, we make a W_BytesObject + # by instantiating a custom subclass of W_BytesObject-- + realize_subclass_of=W_BytesObject, + + # --and then we call this function to initialize the W_BytesObject-- + fill_pypy=string_fill_pypy, + + # --in this case, and if PyString_CheckExact() returns True, then + # the link can be light, i.e. the original PyStringObject might + # be freed with free() by the GC-- + alloc_pypy_light_if=PyString_CheckExact, + ) + +def _string_fill_pyobj(s, ob): + rffi.str2chararray(s, ob.c_ob_sval_pypy, len(s)) + ob.c_ob_sval_pypy[len(s)] = '\x00' + +def string_alloc_pyobj(space, w_obj): """ - Allocatse a PyStringObject and its buffer, but without a corresponding - interpreter object. The buffer may be mutated, until string_realize() is - called. + Makes a PyTupleObject from a W_AbstractBytesObject. """ - typedescr = get_typedescr(space.w_str.instancetypedef) - py_obj = typedescr.allocate(space, space.w_str) - py_str = rffi.cast(PyStringObject, py_obj) + assert isinstance(w_obj, W_AbstractBytesObject) + size = w_obj.string_length() + ob = lltype.malloc(PyStringObjectStruct, size + 1, flavor='raw', + track_allocation=False) + ob.c_ob_size = size + if size > 8: + ob.c_ob_sval_pypy[size] = '*' # not filled yet + else: + _string_fill_pyobj(w_obj.str_w(space), ob) + return ob, RRC_PERMANENT_LIGHT - buflen = length + 1 - py_str.c_size = length - py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, buflen, - flavor='raw', zero=True) - return py_str - -def string_attach(space, py_obj, w_obj): - """ - Fills a newly allocated PyStringObject with the given string object. The - buffer must not be modified. - """ - py_str = rffi.cast(PyStringObject, py_obj) - py_str.c_size = len(space.str_w(w_obj)) - py_str.c_buffer = lltype.nullptr(rffi.CCHARP.TO) - -def string_realize(space, py_obj): +def string_fill_pypy(space, w_obj, py_obj): """ Creates the string in the interpreter. The PyStringObject buffer must not be modified after this call. """ py_str = rffi.cast(PyStringObject, py_obj) - s = rffi.charpsize2str(py_str.c_buffer, py_str.c_size) - w_obj = space.wrap(s) - track_reference(space, py_obj, w_obj) - return w_obj - - at cpython_api([PyObject], lltype.Void, external=False) -def string_dealloc(space, py_obj): - """Frees allocated PyStringObject resources. - """ - py_str = rffi.cast(PyStringObject, py_obj) - if py_str.c_buffer: - lltype.free(py_str.c_buffer, flavor="raw") - from pypy.module.cpyext.object import PyObject_dealloc - PyObject_dealloc(space, py_obj) + s = rffi.charpsize2str(rffi.cast(rffi.CCHARP, py_str.c_ob_sval_pypy), + py_str.c_ob_size) + W_BytesObject.__init__(w_obj, s) #_______________________________________________________________________ +def new_empty_str(space, length): + """ + Allocates an uninitialized PyStringObject. The string may be mutated + as long as it has a refcount of 1; notably, until string_fill_pypy() is + called. + """ + py_str = new_pyobj(PyStringObjectStruct, _PyString_Type(space), length + 1) + py_str.c_ob_size = length + py_str.c_ob_sval_pypy[length] = '\x00' + return py_str + @cpython_api([CONST_STRING, Py_ssize_t], PyObject) def PyString_FromStringAndSize(space, char_p, length): + # XXX move to C + py_str = new_empty_str(space, length) if char_p: - s = rffi.charpsize2str(char_p, length) - return make_ref(space, space.wrap(s)) - else: - return rffi.cast(PyObject, new_empty_str(space, length)) + support.memcpy_fn(py_str.c_ob_sval_pypy, char_p, length) + return rffi.cast(PyObject, py_str) @cpython_api([CONST_STRING], PyObject) def PyString_FromString(space, char_p): + # is it better to make an RPython object and lazily copy data to + # the C string, or make a purely C PyStringObject and then usually + # copy the string again to RPython? no clue... ideally, we should + # measure and adapt dynamically s = rffi.charp2str(char_p) return space.wrap(s) @cpython_api([PyObject], rffi.CCHARP, error=0) def PyString_AsString(space, ref): - if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: - pass # typecheck returned "ok" without forcing 'ref' at all - elif not PyString_Check(space, ref): # otherwise, use the alternate way + if not PyString_Check(space, ref): raise OperationError(space.w_TypeError, space.wrap( "PyString_AsString only support strings")) ref_str = rffi.cast(PyStringObject, ref) - if not ref_str.c_buffer: + last_char = ref_str.c_ob_sval_pypy[ref_str.ob_size] + if last_char != '\x00': + assert last_char == '*' # copy string buffer - w_str = from_ref(space, ref) - s = space.str_w(w_str) - ref_str.c_buffer = rffi.str2charp(s) - return ref_str.c_buffer + w_str = from_pyobj(space, ref) + _string_fill_pyobj(w_str.str_w(space), ref_str) + return rffi.cast(rffi.CCHARP, ref_str.c_ob_sval_pypy) @cpython_api([PyObject, rffi.CCHARPP, rffi.CArrayPtr(Py_ssize_t)], rffi.INT_real, error=-1) def PyString_AsStringAndSize(space, ref, buffer, length): - if not PyString_Check(space, ref): - raise OperationError(space.w_TypeError, space.wrap( - "PyString_AsStringAndSize only support strings")) + buffer[0] = Pystring_AsString(space, ref) ref_str = rffi.cast(PyStringObject, ref) - if not ref_str.c_buffer: - # copy string buffer - w_str = from_ref(space, ref) - s = space.str_w(w_str) - ref_str.c_buffer = rffi.str2charp(s) - buffer[0] = ref_str.c_buffer if length: - length[0] = ref_str.c_size + length[0] = ref_str.c_ob_size else: i = 0 - while ref_str.c_buffer[i] != '\0': + while ref_str.c_ob_sval_pypy[i] != '\0': i += 1 - if i != ref_str.c_size: + if i != ref_str.c_ob_size: raise OperationError(space.w_TypeError, space.wrap( "expected string without null bytes")) return 0 @cpython_api([PyObject], Py_ssize_t, error=-1) def PyString_Size(space, ref): - if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: + if PyString_Check(space, ref): ref = rffi.cast(PyStringObject, ref) - return ref.c_size + return ref.c_ob_size else: - w_obj = from_ref(space, ref) + w_obj = from_pyobj(space, ref) return space.len_w(w_obj) - at cpython_api([PyObjectP, Py_ssize_t], rffi.INT_real, error=-1) -def _PyString_Resize(space, ref, newsize): - """A way to resize a string object even though it is "immutable". Only use this to - build up a brand new string object; don't use this if the string may already be - known in other parts of the code. It is an error to call this function if the - refcount on the input string object is not one. Pass the address of an existing - string object as an lvalue (it may be written into), and the new size desired. - On success, *string holds the resized string object and 0 is returned; - the address in *string may differ from its input value. If the reallocation - fails, the original string object at *string is deallocated, *string is - set to NULL, a memory exception is set, and -1 is returned. - """ - # XXX always create a new string so far - py_str = rffi.cast(PyStringObject, ref[0]) - if not py_str.c_buffer: - raise OperationError(space.w_SystemError, space.wrap( - "_PyString_Resize called on already created string")) - try: - py_newstr = new_empty_str(space, newsize) - except MemoryError: - Py_DecRef(space, ref[0]) - ref[0] = lltype.nullptr(PyObject.TO) - raise - to_cp = newsize - oldsize = py_str.c_size - if oldsize < newsize: - to_cp = oldsize - for i in range(to_cp): - py_newstr.c_buffer[i] = py_str.c_buffer[i] - Py_DecRef(space, ref[0]) - ref[0] = rffi.cast(PyObject, py_newstr) - return 0 - @cpython_api([PyObject, PyObject], rffi.INT, error=CANNOT_FAIL) def _PyString_Eq(space, w_str1, w_str2): return space.eq_w(w_str1, w_str2) @@ -228,10 +204,10 @@ Py_DecRef(space, ref[0]) ref[0] = lltype.nullptr(PyObject.TO) return - w_str = from_ref(space, ref[0]) + w_str = from_pyobj(space, ref[0]) w_newstr = space.add(w_str, w_newpart) Py_DecRef(space, ref[0]) - ref[0] = make_ref(space, w_newstr) + ref[0] = get_pyobj_and_incref(space, w_newstr) @cpython_api([PyObjectP, PyObject], lltype.Void) def PyString_ConcatAndDel(space, ref, newpart): diff --git a/pypy/module/cpyext/support.py b/pypy/module/cpyext/support.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/support.py @@ -0,0 +1,8 @@ +from rpython.rtyper.lltypesystem import lltype, rffi + +memcpy_fn = rffi.llexternal('memcpy', [rffi.CCHARP, rffi.CCHARP, + rffi.SIZE_T], lltype.Void, + sandboxsafe=True) +memset_fn = rffi.llexternal('memset', [rffi.CCHARP, rffi.INT, + rffi.SIZE_T], lltype.Void, + sandboxsafe=True) diff --git a/pypy/module/cpyext/test/test_stringobject.py b/pypy/module/cpyext/test/test_stringobject.py --- a/pypy/module/cpyext/test/test_stringobject.py +++ b/pypy/module/cpyext/test/test_stringobject.py @@ -30,6 +30,8 @@ } if(s->ob_type->tp_basicsize != sizeof(void*)*4) result = 0; + if(s->ob_type->tp_itemsize != sizeof(char)) + result = 0; Py_DECREF(s); return PyBool_FromLong(result); """), diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -4,7 +4,6 @@ from rpython.rlib.objectmodel import specialize, instantiate from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib.rstring import rsplit -from rpython.rtyper.annlowlevel import llhelper from rpython.rtyper.lltypesystem import rffi, lltype from pypy.interpreter.baseobjspace import W_Root, DescrMismatch @@ -132,8 +131,7 @@ if get_slot: slot_func_helper = get_slot() elif slot_func: - slot_func_helper = llhelper(slot_func.api_func.functype, - slot_func.api_func.get_wrapper(space)) + slot_func_helper = slot_func.api_func.get_llhelper(space) if slot_func_helper is None: if WARN_ABOUT_MISSING_SLOT_FUNCTIONS: @@ -226,8 +224,7 @@ def setup_new_method_def(space): ptr = get_new_method_def(space) ptr.c_ml_meth = rffi.cast(PyCFunction_typedef, - llhelper(tp_new_wrapper.api_func.functype, - tp_new_wrapper.api_func.get_wrapper(space))) + tp_new_wrapper.api_func.get_llhelper(space)) def add_tp_new_wrapper(space, dict_w, pto): if "__new__" in dict_w: @@ -294,8 +291,7 @@ def subtype_dealloc(space, obj): pto = obj.c_ob_type base = pto - this_func_ptr = llhelper(subtype_dealloc.api_func.functype, - subtype_dealloc.api_func.get_wrapper(space)) + this_func_ptr = subtype_dealloc.api_func.get_llhelper(space) while base.c_tp_dealloc == this_func_ptr: base = base.c_tp_base assert base @@ -354,22 +350,17 @@ def setup_string_buffer_procs(space, pto): c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True) lltype.render_immortal(c_buf) - c_buf.c_bf_getsegcount = llhelper(str_segcount.api_func.functype, - str_segcount.api_func.get_wrapper(space)) - c_buf.c_bf_getreadbuffer = llhelper(str_getreadbuffer.api_func.functype, - str_getreadbuffer.api_func.get_wrapper(space)) - c_buf.c_bf_getcharbuffer = llhelper(str_getcharbuffer.api_func.functype, - str_getcharbuffer.api_func.get_wrapper(space)) + c_buf.c_bf_getsegcount = str_segcount.api_func.get_llhelper(space) + c_buf.c_bf_getreadbuffer = str_getreadbuffer.api_func.get_llhelper(space) + c_buf.c_bf_getcharbuffer = str_getcharbuffer.api_func.get_llhelper(space) pto.c_tp_as_buffer = c_buf pto.c_tp_flags |= Py_TPFLAGS_HAVE_GETCHARBUFFER def setup_buffer_buffer_procs(space, pto): c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True) lltype.render_immortal(c_buf) - c_buf.c_bf_getsegcount = llhelper(str_segcount.api_func.functype, - str_segcount.api_func.get_wrapper(space)) - c_buf.c_bf_getreadbuffer = llhelper(buf_getreadbuffer.api_func.functype, - buf_getreadbuffer.api_func.get_wrapper(space)) + c_buf.c_bf_getsegcount = str_segcount.api_func.get_llhelper(space) + c_buf.c_bf_getreadbuffer = buf_getreadbuffer.api_func.get_llhelper(space) pto.c_tp_as_buffer = c_buf @cpython_api([PyObject], lltype.Void, external=False) @@ -436,10 +427,8 @@ if space.is_w(w_type, space.w_buffer): setup_buffer_buffer_procs(space, pto) - pto.c_tp_free = llhelper(PyObject_Del.api_func.functype, - PyObject_Del.api_func.get_wrapper(space)) - pto.c_tp_alloc = llhelper(PyType_GenericAlloc.api_func.functype, - PyType_GenericAlloc.api_func.get_wrapper(space)) + pto.c_tp_free = PyObject_Del.api_func.get_llhelper(space) + pto.c_tp_alloc = PyType_GenericAlloc.api_func.get_llhelper(space) if pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE: w_typename = space.getattr(w_type, space.wrap('__name__')) heaptype = rffi.cast(PyHeapTypeObject, pto) @@ -450,8 +439,10 @@ # leak the name, but only for types in PyPy that correspond to # non-heap types in CPython pto.c_tp_name = rffi.str2charp(w_type.name, track_allocation=False) - pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out - pto.c_tp_itemsize = 0 + + typedef = w_type.instancetypedef + pto.c_tp_basicsize = typedef.cpyext_basicsize + pto.c_tp_itemsize = typedef.cpyext_itemsize if space.is_w(w_type, space.w_object): pto.c_tp_new = rffi.cast(newfunc, 1) # XXX temp # uninitialized fields: @@ -466,8 +457,6 @@ finish_type_2(space, pto, w_type) - #pto.c_tp_basicsize = rffi.sizeof(typedescr.basestruct) ZZZ - pto.c_tp_basicsize = rffi.sizeof(PyObject.TO) # ZZZ if pto.c_tp_base: if pto.c_tp_base.c_tp_basicsize > pto.c_tp_basicsize: pto.c_tp_basicsize = pto.c_tp_base.c_tp_basicsize @@ -594,9 +583,7 @@ if not pto.c_tp_setattro: from pypy.module.cpyext.object import PyObject_GenericSetAttr - pto.c_tp_setattro = llhelper( - PyObject_GenericSetAttr.api_func.functype, - PyObject_GenericSetAttr.api_func.get_wrapper(space)) + pto.c_tp_setattro = PyObject_GenericSetAttr.api_func.get_llhelper(space) w_dict = w_type.getdict(space) old = pto.c_tp_dict diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -478,6 +478,9 @@ def _len(self): return len(self._value) + def string_length(self): # for cpyext + return self._len() + _val = str_w @staticmethod diff --git a/pypy/objspace/std/strbufobject.py b/pypy/objspace/std/strbufobject.py --- a/pypy/objspace/std/strbufobject.py +++ b/pypy/objspace/std/strbufobject.py @@ -43,6 +43,9 @@ def readbuf_w(self, space): return StringBuffer(self.force()) + def string_length(self): # for cpyext + return self.length + def descr_len(self, space): return space.wrap(self.length) From noreply at buildbot.pypy.org Thu Oct 22 16:23:59 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 22 Oct 2015 16:23:59 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: instrs float division Message-ID: <20151022142359.485291C0290@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80394:70f5409c0a1c Date: 2015-10-22 15:19 +0200 http://bitbucket.org/pypy/pypy/changeset/70f5409c0a1c/ Log: instrs float division diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -118,7 +118,7 @@ 'AEB': ('rxe', ['\xED','\x0A'], 'r,bidl,-'), 'ADB': ('rxe', ['\xED','\x1A'], 'r,bidl,-'), - # SUBSTRACT + # SUBSTRACTION 'SEBR': ('rre', ['\xB3','\x0B']), 'SDBR': ('rre', ['\xB3','\x1B']), 'SEB': ('rxe', ['\xED','\x0B'], 'r,bidl,-'), @@ -128,7 +128,11 @@ 'MDBR': ('rre', ['\xB3','\x1C']), 'MDB': ('rxe', ['\xED','\x1C'], 'r,bidl,-'), - + # DIVISION + 'DEBR': ('rre', ['\xB3','\x0D']), + 'DDBR': ('rre', ['\xB3','\x1D']), + 'DEB': ('rxe', ['\xED','\x0D'], 'r,bidl,-'), + 'DDB': ('rxe', ['\xED','\x1D'], 'r,bidl,-'), } all_mnemonic_codes = { From noreply at buildbot.pypy.org Thu Oct 22 16:24:01 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 22 Oct 2015 16:24:01 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: testing single float to float cast, testing int64 to float cast, added div (with remainer) instr Message-ID: <20151022142401.76A1B1C0290@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80395:20f482e7b1ef Date: 2015-10-22 16:01 +0200 http://bitbucket.org/pypy/pypy/changeset/20f482e7b1ef/ Log: testing single float to float cast, testing int64 to float cast, added div (with remainer) instr diff --git a/rpython/jit/backend/zarch/instruction_builder.py b/rpython/jit/backend/zarch/instruction_builder.py --- a/rpython/jit/backend/zarch/instruction_builder.py +++ b/rpython/jit/backend/zarch/instruction_builder.py @@ -5,11 +5,7 @@ def dummy_argument(arg): """ NOT_RPYTHON """ - if arg == 'r' or arg == 'r/m': - return 0 - if arg == 'f': - return 0 - if arg == '-': + if arg in ('r', 'r/m', 'm', 'f', '-'): return 0 if arg.startswith('i') or arg.startswith('u'): return 0 @@ -25,6 +21,7 @@ - - unused f - floating point register r - register + m - mask r/m - register or mask iX - immediate X bits (signed) uX - immediate X bits (unsigend) diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -102,15 +102,23 @@ 'LNEBR': ('rre', ['\xB3','\x01']), 'LNDBR': ('rre', ['\xB3','\x11']), + + # load lengthended + 'LDEBR': ('rre', ['\xB3','\x04']), } floatingpoint_mnemonic_codes = { 'FIEBR': ('rrf', ['\xB3','\x57'], 'r,u4,r,-'), 'FIDBR': ('rrf', ['\xB3','\x5F'], 'r,u4,r,-'), + # convert to fixed 'CGEBR': ('rrf', ['\xB3','\xA8'], 'r,u4,r,-'), 'CGDBR': ('rrf', ['\xB3','\xA9'], 'r,u4,r,-'), + # convert from fixed + 'CEGBR': ('rre', ['\xB3','\xA4']), + 'CDGBR': ('rre', ['\xB3','\xA5']), + # arithmetic # ADDITION 'AEBR': ('rre', ['\xB3','\x0A']), @@ -133,6 +141,11 @@ 'DDBR': ('rre', ['\xB3','\x1D']), 'DEB': ('rxe', ['\xED','\x0D'], 'r,bidl,-'), 'DDB': ('rxe', ['\xED','\x1D'], 'r,bidl,-'), + + # DIVIDE (+mod) + 'DIEBR': ('rrf', ['\xB3','\x53'], 'r,r,r,m'), + 'DIDBR': ('rrf', ['\xB3','\x5B'], 'r,r,r,m'), + } all_mnemonic_codes = { diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -20,7 +20,7 @@ from rpython.rlib.objectmodel import specialize from rpython.rlib.debug import ll_assert from rpython.rlib.longlong2float import (float2longlong, - DOUBLE_ARRAY_PTR) + DOUBLE_ARRAY_PTR, singlefloat2uint_emulator) import ctypes CPU = getcpuclass() @@ -28,13 +28,14 @@ def byte_count(func): return func._byte_count -def BFL(value): +def BFL(value, short=False): + if short: + return struct.pack('f', value) return struct.pack('>q', float2longlong(value)) def ADDR(value): ptr = ll2ctypes.lltype2ctypes(value) addr = ctypes.addressof(ptr.contents.items) - print hex(addr) return struct.pack('>Q', addr) def isclose(a,b, rel_tol=1e-9, abs_tol=0.0): @@ -65,6 +66,12 @@ def float(self, val): self.asm.mc.write(BFL(val)) + def single_float(self, val): + self.asm.mc.write(BFL(val, short=True)) + + def int64(self, val): + self.asm.mc.write(struct.pack('>q', val)) + class LabelCtx(object): def __init__(self, asm, name): self.asm = asm @@ -315,3 +322,32 @@ self.mc.STD(reg.f0, loc.addr(0, reg.r11)) run_asm(self.a) assert isclose(mem[0], 0.0) + + def test_cast_single_float_to_float(self): + with lltype.scoped_alloc(DOUBLE_ARRAY_PTR.TO, 16) as mem: + with ActivationRecordCtx(self): + with LiteralPoolCtx(self) as pool: + pool.single_float(6.66) + pool.addr(mem) + self.mc.LEY(reg.f1, loc.addr(0, reg.r13)) + ## cast short to long! + self.mc.LDEBR(reg.f0, reg.f1) + self.mc.LG(reg.r11, loc.addr(4, reg.r13)) + self.mc.STD(reg.f0, loc.addr(0, reg.r11)) + run_asm(self.a) + assert isclose(mem[0], 6.66, abs_tol=0.05) + + def test_cast_int64_to_float(self): + with lltype.scoped_alloc(DOUBLE_ARRAY_PTR.TO, 16) as mem: + with ActivationRecordCtx(self): + with LiteralPoolCtx(self) as pool: + pool.int64(12345) + pool.addr(mem) + self.mc.LG(reg.r12, loc.addr(0, reg.r13)) + # cast int to float! + self.mc.CDGBR(reg.f0, reg.r12) + self.mc.LG(reg.r11, loc.addr(8, reg.r13)) + self.mc.STD(reg.f0, loc.addr(0, reg.r11)) + run_asm(self.a) + assert isclose(mem[0], 12345.0) + diff --git a/rpython/jit/backend/zarch/test/test_auto_encoding.py b/rpython/jit/backend/zarch/test/test_auto_encoding.py --- a/rpython/jit/backend/zarch/test/test_auto_encoding.py +++ b/rpython/jit/backend/zarch/test/test_auto_encoding.py @@ -132,6 +132,7 @@ 'r': REGS, 'f': FP_REGS, 'r/m': REGS, + 'm': range_of_bits(4), 'i4': range_of_bits(4, signed=True), 'i8': range_of_bits(8, signed=True), 'i16': range_of_bits(16, signed=True), From noreply at buildbot.pypy.org Thu Oct 22 16:35:41 2015 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 22 Oct 2015 16:35:41 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: fixes Message-ID: <20151022143541.20AD31C04DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80396:3444872d2fb9 Date: 2015-10-22 16:10 +0200 http://bitbucket.org/pypy/pypy/changeset/3444872d2fb9/ Log: fixes diff --git a/pypy/module/cpyext/stringobject.py b/pypy/module/cpyext/stringobject.py --- a/pypy/module/cpyext/stringobject.py +++ b/pypy/module/cpyext/stringobject.py @@ -66,6 +66,7 @@ @bootstrap_function def init_stringobject(space): "Type description of PyStringObject" + global _basic_size setup_class_for_cpyext( W_AbstractBytesObject, basestruct=PyStringObjectStruct, @@ -86,6 +87,8 @@ # be freed with free() by the GC-- alloc_pypy_light_if=PyString_CheckExact, ) + W_BytesObject.typedef.cpyext_basicsize += 1 # includes the NULL + _basic_size = W_BytesObject.typedef.cpyext_basicsize def _string_fill_pyobj(s, ob): rffi.str2chararray(s, ob.c_ob_sval_pypy, len(s)) @@ -97,7 +100,7 @@ """ assert isinstance(w_obj, W_AbstractBytesObject) size = w_obj.string_length() - ob = lltype.malloc(PyStringObjectStruct, size + 1, flavor='raw', + ob = lltype.malloc(PyStringObjectStruct, _basic_size + size, flavor='raw', track_allocation=False) ob.c_ob_size = size if size > 8: @@ -152,7 +155,7 @@ raise OperationError(space.w_TypeError, space.wrap( "PyString_AsString only support strings")) ref_str = rffi.cast(PyStringObject, ref) - last_char = ref_str.c_ob_sval_pypy[ref_str.ob_size] + last_char = ref_str.c_ob_sval_pypy[ref_str.c_ob_size] if last_char != '\x00': assert last_char == '*' # copy string buffer diff --git a/pypy/module/cpyext/test/test_stringobject.py b/pypy/module/cpyext/test/test_stringobject.py --- a/pypy/module/cpyext/test/test_stringobject.py +++ b/pypy/module/cpyext/test/test_stringobject.py @@ -28,7 +28,7 @@ if(PyString_Size(s) == 11) { result = 1; } - if(s->ob_type->tp_basicsize != sizeof(void*)*4) + if(s->ob_type->tp_basicsize != sizeof(void*) * 4 + 1) result = 0; if(s->ob_type->tp_itemsize != sizeof(char)) result = 0; @@ -79,8 +79,7 @@ ]) s = module.getstring() assert len(s) == 4 - assert s == 'ab\x00c' - + assert s[:2] == 'ab' and s[3] == 'c' # s[2] undefined def test_AsString(self): From noreply at buildbot.pypy.org Thu Oct 22 16:42:57 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 22 Oct 2015 16:42:57 +0200 (CEST) Subject: [pypy-commit] pypy s390x-backend: floating point comparison operations Message-ID: <20151022144257.EA8791C0036@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80397:6f08bc334a9b Date: 2015-10-22 16:15 +0200 http://bitbucket.org/pypy/pypy/changeset/6f08bc334a9b/ Log: floating point comparison operations diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -146,6 +146,11 @@ 'DIEBR': ('rrf', ['\xB3','\x53'], 'r,r,r,m'), 'DIDBR': ('rrf', ['\xB3','\x5B'], 'r,r,r,m'), + # COMPARISON + 'CEBR': ('rre', ['\xB3','\x09']), + 'CDBR': ('rre', ['\xB3','\x19']), + 'CEB': ('rxe', ['\xED','\x09'], 'r,bidl,-'), + 'CDB': ('rxe', ['\xED','\x19'], 'r,bidl,-'), } all_mnemonic_codes = { diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -351,3 +351,16 @@ run_asm(self.a) assert isclose(mem[0], 12345.0) + def test_float_cmp(self): + with ActivationRecordCtx(self): + with LiteralPoolCtx(self) as pool: + pool.float(1.0) + pool.float(2.0) + self.mc.LD(reg.f0, loc.addr(0, reg.r13)) + self.mc.LD(reg.f1, loc.addr(8, reg.r13)) + self.mc.CDBR(reg.f0, reg.f1) + self.mc.LGHI(reg.r2, loc.imm(0)) + self.mc.BCR(con.EQ, reg.r14) # must not branch + self.mc.LGHI(reg.r2, loc.imm(1)) + self.a.jmpto(reg.r14) + assert run_asm(self.a) == 1 From noreply at buildbot.pypy.org Thu Oct 22 17:36:15 2015 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 22 Oct 2015 17:36:15 +0200 (CEST) Subject: [pypy-commit] pypy no-class-specialize: pypy cleanup: _settled_ is now ignored Message-ID: <20151022153615.AF7721C0290@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: no-class-specialize Changeset: r80398:e1d57129043d Date: 2015-10-22 15:59 +0100 http://bitbucket.org/pypy/pypy/changeset/e1d57129043d/ Log: pypy cleanup: _settled_ is now ignored diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -28,7 +28,6 @@ """This is the abstract root class of all wrapped objects that live in a 'normal' object space like StdObjSpace.""" __slots__ = () - _settled_ = True user_overridden_class = False def getdict(self, space): From noreply at buildbot.pypy.org Fri Oct 23 14:21:30 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 23 Oct 2015 20:21:30 +0200 (CEST) Subject: [pypy-commit] pypy default: merge lazy-fast2locals Message-ID: <20151023182130.880D71C1DEA@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r80416:e96988647017 Date: 2015-10-23 20:21 +0200 http://bitbucket.org/pypy/pypy/changeset/e96988647017/ Log: merge lazy-fast2locals improve the performance of simple trace functions by lazily calling fast2locals and locals2fast only if f_locals is actually accessed. diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -327,10 +327,14 @@ w_arg = space.newtuple([operr.w_type, w_value, space.wrap(operr.get_traceback())]) - frame.fast2locals() + d = frame.getorcreatedebug() + if d.w_locals is not None: + # only update the w_locals dict if it exists + # if it does not exist yet and the tracer accesses it via + # frame.f_locals, it is filled by PyFrame.getdictscope + frame.fast2locals() self.is_tracing += 1 try: - d = frame.getorcreatedebug() try: w_result = space.call_function(w_callback, space.wrap(frame), space.wrap(event), w_arg) if space.is_w(w_result, space.w_None): @@ -343,7 +347,8 @@ raise finally: self.is_tracing -= 1 - frame.locals2fast() + if d.w_locals is not None: + frame.locals2fast() # Profile cases if self.profilefunc is not None: diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -1,10 +1,14 @@ from rpython.tool import udir from pypy.conftest import option +from pypy.interpreter.gateway import interp2app +def check_no_w_locals(space, w_frame): + return space.wrap(w_frame.getorcreatedebug().w_locals is None) class AppTestPyFrame: def setup_class(cls): + space = cls.space cls.w_udir = cls.space.wrap(str(udir.udir)) cls.w_tempfile1 = cls.space.wrap(str(udir.udir.join('tempfile1'))) if not option.runappdirect: @@ -17,6 +21,8 @@ w_call_further.code.hidden_applevel = True # hack cls.w_call_further = w_call_further + cls.w_check_no_w_locals = space.wrap(interp2app(check_no_w_locals)) + # test for the presence of the attributes, not functionality def test_f_locals(self): @@ -493,6 +499,25 @@ sys.settrace(None) assert res == 42 + def test_fast2locals_called_lazily(self): + import sys + class FrameHolder: + pass + fh = FrameHolder() + def trace(frame, what, arg): + # trivial trace function, does not access f_locals + fh.frame = frame + return trace + def f(x): + x += 1 + return x + sys.settrace(trace) + res = f(1) + sys.settrace(None) + assert res == 2 + if hasattr(self, "check_no_w_locals"): # not appdirect + assert self.check_no_w_locals(fh.frame) + def test_set_unset_f_trace(self): import sys seen = [] From noreply at buildbot.pypy.org Sat Oct 24 10:13:34 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Oct 2015 16:13:34 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: fix test Message-ID: <20151024141334.F17931C1046@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80422:af4c22859892 Date: 2015-10-24 14:53 +0200 http://bitbucket.org/pypy/pypy/changeset/af4c22859892/ Log: fix test diff --git a/pypy/module/cpyext/test/test_listobject.py b/pypy/module/cpyext/test/test_listobject.py --- a/pypy/module/cpyext/test/test_listobject.py +++ b/pypy/module/cpyext/test/test_listobject.py @@ -38,10 +38,10 @@ assert api.PyList_Insert(w_l, 0, space.wrap(1)) == 0 assert api.PyList_Size(w_l) == 3 assert api.PyList_Insert(w_l, 99, space.wrap(2)) == 0 - assert space.unwrap(api.PyList_GetItem(w_l, 3)) == 2 + assert space.unwrap(api.from_pyobj(api.PyList_GetItem(w_l, 3))) == 2 # insert at index -1: next-to-last assert api.PyList_Insert(w_l, -1, space.wrap(3)) == 0 - assert space.unwrap(api.PyList_GetItem(w_l, 3)) == 3 + assert space.unwrap(api.from_pyobj(api.PyList_GetItem(w_l, 3))) == 3 def test_sort(self, space, api): l = space.newlist([space.wrap(1), space.wrap(0), space.wrap(7000)]) From noreply at buildbot.pypy.org Fri Oct 23 16:43:47 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Oct 2015 22:43:47 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Some more translation fixes Message-ID: <20151023204347.4C74C1C1EED@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80417:ea45d10498f2 Date: 2015-10-23 21:47 +0100 http://bitbucket.org/pypy/pypy/changeset/ea45d10498f2/ Log: Some more translation fixes diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -36,7 +36,7 @@ import pypy.module.cpyext.object import pypy.module.cpyext.stringobject import pypy.module.cpyext.tupleobject -import pypy.module.cpyext.ndarrayobject +#import pypy.module.cpyext.ndarrayobject ZZZ import pypy.module.cpyext.setobject import pypy.module.cpyext.dictobject import pypy.module.cpyext.intobject diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -388,14 +388,15 @@ FUNCTIONS = {} @specialize.memo() -def constant_pyobj(space, name): - # returns the C symbol "Py" + name, constant-folded +def constant_pytypeobj(space, name): + # returns the C symbol given by "name", constant-folded, + # of type "PyTypeObject *" if space.config.translating: - return rffi.CConstant("((PyObject *)&PyPy%s)" % (name,), PyObject) + return rffi.CConstant("(&%s)" % (name,), PyTypeObjectPtr) else: from pypy.module.cpyext.pyobject import as_pyobj w_obj = INTERPLEVEL_API[name] - return as_pyobj(space, w_obj) + return rffi.cast(PyTypeObjectPtr, as_pyobj(space, w_obj)) # These are C symbols which cpyext will export, but which are defined in .c # files somewhere in the implementation of cpyext (rather than being defined in @@ -591,8 +592,7 @@ def get_w_type(space): return getattr(space, cls) def _PyXxx_Type(space): - return rffi.cast(PyTypeObjectPtr, - constant_pyobj(space, py_type_name)) + return constant_pytypeobj(space, py_type_name) else: @specialize.memo() def get_w_type(space): @@ -600,6 +600,7 @@ def _PyXxx_Type(space): from rpython.rlib.debug import fatalerror fatalerror(py_type_name + " not implemented ZZZ") + assert 0 _PyXxx_Type = func_with_new_name(_PyXxx_Type, '_' + py_type_name) def check(space, py_obj): @@ -1090,7 +1091,7 @@ source_dir / "capsule.c", source_dir / "pysignals.c", source_dir / "pythread.c", - source_dir / "ndarrayobject.c", + #source_dir / "ndarrayobject.c", ZZZ source_dir / "missing.c", ], separate_module_sources=separate_module_sources, diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py --- a/pypy/module/cpyext/frameobject.py +++ b/pypy/module/cpyext/frameobject.py @@ -68,6 +68,9 @@ @cpython_api([PyThreadState, PyCodeObject, PyObject, PyObject], PyFrameObject) def PyFrame_New(space, tstate, w_code, w_globals, w_locals): + from rpython.rlib.debug import fatalerror + fatalerror("PyFrame_New not implemented ZZZ") + assert 0 typedescr = get_typedescr(PyFrame.typedef) py_obj = typedescr.allocate(space, space.gettypeobject(PyFrame.typedef)) py_frame = rffi.cast(PyFrameObject, py_obj) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -61,7 +61,7 @@ ob = lltype.malloc(tp_basestruct, flavor='raw', track_allocation=False) return ob, RRC_PERMANENT_LIGHT - tp_alloc_pyobj._always_inline_ = True + tp_alloc_pyobj._always_inline_ = 'try' # if not tp_fill_pyobj: def tp_fill_pyobj(space, w_obj, py_obj): @@ -100,7 +100,7 @@ if alloc_pypy_light_if(space, pyobj): strength = RRC_TRANSIENT_LIGHT return w_obj, strength - tp_alloc_pypy._always_inline_ = True + tp_alloc_pypy._always_inline_ = 'try' # if not tp_fill_pypy: def tp_fill_pypy(space, w_obj, pyobj): @@ -160,7 +160,7 @@ rawrefcount.create_link_pyobj(w_obj, ob) # else: - assert False, "rawrefcount_init_link: strength=%r" % (strength,) + assert False, "rawrefcount_init_link: strength=%s" % (strength,) def setup_prebuilt_pyobj(w_obj, py_obj): @@ -317,7 +317,7 @@ """ assert not is_pyobj(w_obj) return w_obj.cpyext_as_pyobj(space) -as_pyobj._always_inline_ = True +as_pyobj._always_inline_ = 'try' INTERPLEVEL_API['as_pyobj'] = as_pyobj def as_xpyobj(space, w_obj): @@ -341,7 +341,7 @@ if w_obj is None: w_obj = _create_w_obj_from_pyobj(space, pyobj) return w_obj -from_pyobj._always_inline_ = True +from_pyobj._always_inline_ = 'try' INTERPLEVEL_API['from_pyobj'] = from_pyobj @specialize.ll() diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -48,6 +48,9 @@ interpreter object. The buffer may be mutated, until unicode_realize() is called. """ + from rpython.rlib.debug import fatalerror + fatalerror("new_empty_unicode not implemented ZZZ") + assert 0 typedescr = get_typedescr(space.w_unicode.instancetypedef) py_obj = typedescr.allocate(space, space.w_unicode) py_uni = rffi.cast(PyUnicodeObject, py_obj) From noreply at buildbot.pypy.org Sat Oct 24 10:13:37 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Oct 2015 16:13:37 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: hack hack hack hack hack buffer objects Message-ID: <20151024141337.35E561C13DD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80423:33558aa4c963 Date: 2015-10-24 16:13 +0200 http://bitbucket.org/pypy/pypy/changeset/33558aa4c963/ Log: hack hack hack hack hack buffer objects diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -870,7 +870,17 @@ space.fromcache(State).install_dll(eci) - rawrefcount.init(lambda ob: _Py_Dealloc(space, ob)) + def dealloc_trigger(): + print 'dealloc_trigger...' + while True: + ob = rawrefcount.next_dead(PyObject) + if not ob: + break + print ob + _Py_Dealloc(space, ob) + print 'dealloc_trigger DONE' + return "RETRY" + rawrefcount.init(dealloc_trigger) # populate static data to_fill = [] diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -4,7 +4,9 @@ from pypy.module.cpyext.api import ( cpython_api, Py_ssize_t, cpython_struct, bootstrap_function, PyObjectFields, PyObject) -from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef +from pypy.module.cpyext.pyobject import ( + setup_class_for_cpyext, RRC_PERMANENT, get_pyobj_and_xincref, xdecref) +from pypy.module.cpyext import support from pypy.module.array.interp_array import ArrayBuffer from pypy.objspace.std.bufferobject import W_Buffer @@ -18,6 +20,7 @@ ("b_offset", Py_ssize_t), ("b_readonly", rffi.INT), ("b_hash", rffi.LONG), + ("_b_data_pypy", rffi.CArray(lltype.Char)), ) cpython_struct("PyBufferObject", PyBufferObjectFields, PyBufferObjectStruct) @@ -25,61 +28,67 @@ @bootstrap_function def init_bufferobject(space): "Type description of PyBufferObject" - make_typedescr(space.w_buffer.instancetypedef, - basestruct=PyBufferObject.TO, - attach=buffer_attach, - dealloc=buffer_dealloc, - realize=buffer_realize) + setup_class_for_cpyext( + W_Buffer, + basestruct=PyBufferObjectStruct, -def buffer_attach(space, py_obj, w_obj): + # --from a W_Buffer, we call this function to create and fill a + # new PyBufferObject -- + alloc_pyobj=buffer_alloc_pyobj, + + # --deallocator-- + dealloc=buffer_dealloc, + ) + +def buffer_alloc_pyobj(space, w_obj): """ - Fills a newly allocated PyBufferObject with the given (str) buffer object. + Fills a newly allocated PyBufferObject with the given W_Buffer object. """ - py_buf = rffi.cast(PyBufferObject, py_obj) - py_buf.c_b_offset = 0 - rffi.setintfield(py_buf, 'c_b_readonly', 1) - rffi.setintfield(py_buf, 'c_b_hash', -1) assert isinstance(w_obj, W_Buffer) buf = w_obj.buf + # If buf already allocated a fixed buffer, use it, and keep a + # reference to buf. + # Otherwise, b_base stays NULL, and the b_ptr points inside the + # allocated object. + + try: + ptr = buf.get_raw_address() + except ValueError: + srcstring = buf.as_str() + size = len(srcstring) + w_base = None + ptr = lltype.nullptr(rffi.VOIDP.TO) + else: + srcstring = '' + if isinstance(buf, ArrayBuffer): + w_base = buf.array + else: + w_base = w_obj + size = buf.getlength() + ptr = rffi.cast(rffi.VOIDP, ptr) + + py_buf = lltype.malloc(PyBufferObjectStruct, len(srcstring), flavor='raw', + track_allocation=False) + py_buf.c_b_base = get_pyobj_and_xincref(space, w_base) + + if w_base is None: + ptr = py_buf._b_data_pypy + rffi.str2rawmem(srcstring, ptr, 0, size) + ptr = rffi.cast(rffi.VOIDP, ptr) + py_buf.c_b_ptr = ptr + py_buf.c_b_size = size if isinstance(buf, SubBuffer): py_buf.c_b_offset = buf.offset - buf = buf.buffer + else: + py_buf.c_b_offset = 0 + rffi.setintfield(py_buf, 'c_b_readonly', 1) + rffi.setintfield(py_buf, 'c_b_hash', -1) - # If buf already allocated a fixed buffer, use it, and keep a - # reference to buf. - # Otherwise, b_base stays NULL, and we own the b_ptr. + return py_buf, RRC_PERMANENT - ZZZ - if isinstance(buf, StringBuffer): - py_buf.c_b_base = lltype.nullptr(PyObject.TO) - py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(buf.value)) - py_buf.c_b_size = buf.getlength() - elif isinstance(buf, ArrayBuffer): - w_base = buf.array - py_buf.c_b_base = make_ref(space, w_base) - py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, buf.array._charbuf_start()) - py_buf.c_b_size = buf.getlength() - else: - raise OperationError(space.w_NotImplementedError, space.wrap( - "buffer flavor not supported")) - -def buffer_realize(space, py_obj): - """ - Creates the buffer in the PyPy interpreter from a cpyext representation. - """ - raise OperationError(space.w_NotImplementedError, space.wrap( - "Don't know how to realize a buffer")) - - - at cpython_api([PyObject], lltype.Void, external=False) -def buffer_dealloc(space, py_obj): - py_buf = rffi.cast(PyBufferObject, py_obj) - if py_buf.c_b_base: - Py_DecRef(space, py_buf.c_b_base) - else: - rffi.free_charp(rffi.cast(rffi.CCHARP, py_buf.c_b_ptr)) - from pypy.module.cpyext.object import PyObject_dealloc - PyObject_dealloc(space, py_obj) +def buffer_dealloc(space, py_buf): + xdecref(space, py_buf.c_b_base) + lltype.free(py_buf, flavor='raw', track_allocation=False) diff --git a/pypy/module/cpyext/include/bufferobject.h b/pypy/module/cpyext/include/bufferobject.h --- a/pypy/module/cpyext/include/bufferobject.h +++ b/pypy/module/cpyext/include/bufferobject.h @@ -17,6 +17,7 @@ Py_ssize_t b_offset; int b_readonly; long b_hash; + char _b_data_pypy[1]; } PyBufferObject; diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -55,7 +55,7 @@ @cpython_api([rffi.VOIDP], lltype.Void) def PyObject_Del(space, obj): - lltype.free(obj, flavor='raw') + lltype.free(obj, flavor='raw', track_allocation=False) @cpython_api([PyObject], lltype.Void) def PyObject_dealloc(space, obj): diff --git a/pypy/module/cpyext/test/test_bufferobject.py b/pypy/module/cpyext/test/test_bufferobject.py --- a/pypy/module/cpyext/test/test_bufferobject.py +++ b/pypy/module/cpyext/test/test_bufferobject.py @@ -61,4 +61,4 @@ a = array.array('c', 'text') b = buffer(a) assert module.roundtrip(b) == 'text' - + del a diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -374,6 +374,7 @@ def teardown_method(self, func): for name in self.imported_module_names: self.unimport_module(name) + self.check_and_print_leaks() return #ZZZ self.cleanup_references(self.space) # XXX: find out how to disable check_and_print_leaks() if the diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -146,7 +146,9 @@ attach(ob, wr, _o_list) if _d_list: - _dealloc_trigger_callback() + res = _dealloc_trigger_callback() + if res == "RETRY": + _collect(track_allocation=track_allocation) _keepalive_forever = set() def _dont_free_any_more(): diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -803,6 +803,12 @@ return length str2chararray._annenforceargs_ = [strtype, None, int] + # s[start:start+length] -> already-existing char[], + # all characters including zeros + def str2rawmem(s, array, start, length): + ll_s = llstrtype(s) + copy_string_to_raw(ll_s, array, start, length) + # char* -> str # doesn't free char* def charp2str(cp): @@ -953,19 +959,19 @@ return (str2charp, free_charp, charp2str, get_nonmovingbuffer, free_nonmovingbuffer, alloc_buffer, str_from_buffer, keep_buffer_alive_until_here, - charp2strn, charpsize2str, str2chararray, + charp2strn, charpsize2str, str2chararray, str2rawmem, ) (str2charp, free_charp, charp2str, get_nonmovingbuffer, free_nonmovingbuffer, alloc_buffer, str_from_buffer, keep_buffer_alive_until_here, - charp2strn, charpsize2str, str2chararray, + charp2strn, charpsize2str, str2chararray, str2rawmem, ) = make_string_mappings(str) (unicode2wcharp, free_wcharp, wcharp2unicode, get_nonmoving_unicodebuffer, free_nonmoving_unicodebuffer, alloc_unicodebuffer, unicode_from_buffer, keep_unicodebuffer_alive_until_here, - wcharp2unicoden, wcharpsize2unicode, unicode2wchararray, + wcharp2unicoden, wcharpsize2unicode, unicode2wchararray, unicode2rawmem, ) = make_string_mappings(unicode) # char** From noreply at buildbot.pypy.org Fri Oct 23 12:09:43 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 23 Oct 2015 18:09:43 +0200 (CEST) Subject: [pypy-commit] pypy lazy-fast2locals: call fast2locals and locals2fast lazily around the trace function Message-ID: <20151023160943.E99191C00E2@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: lazy-fast2locals Changeset: r80412:c1f0a192c100 Date: 2015-10-23 18:09 +0200 http://bitbucket.org/pypy/pypy/changeset/c1f0a192c100/ Log: call fast2locals and locals2fast lazily around the trace function diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -327,10 +327,14 @@ w_arg = space.newtuple([operr.w_type, w_value, space.wrap(operr.get_traceback())]) - frame.fast2locals() + d = frame.getorcreatedebug() + if d.w_locals is not None: + # only update the w_locals dict if it exists + # if it does not exist yet and the tracer accesses it via + # frame.f_locals, it is filled by PyFrame.getdictscope + frame.fast2locals() self.is_tracing += 1 try: - d = frame.getorcreatedebug() try: w_result = space.call_function(w_callback, space.wrap(frame), space.wrap(event), w_arg) if space.is_w(w_result, space.w_None): @@ -343,7 +347,8 @@ raise finally: self.is_tracing -= 1 - frame.locals2fast() + if d.w_locals is not None: + frame.locals2fast() # Profile cases if self.profilefunc is not None: From noreply at buildbot.pypy.org Sat Oct 24 05:03:08 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Oct 2015 11:03:08 +0200 (CEST) Subject: [pypy-commit] cffi default: Clarify documentation for ffi.from_buffer(): it also works on read-only Message-ID: <20151024090308.A0B431C1352@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2361:f1329dc74b88 Date: 2015-10-24 11:04 +0200 http://bitbucket.org/cffi/cffi/changeset/f1329dc74b88/ Log: Clarify documentation for ffi.from_buffer(): it also works on read- only buffer objects diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -692,13 +692,16 @@ **ffi.from_buffer(python_buffer)**: return a ```` that points to the data of the given Python object, which must support the buffer interface. This is the opposite of ``ffi.buffer()``. It gives -a (read-write) reference to the existing data, not a copy; for this +a reference to the existing data, not a copy; for this reason, and for PyPy compatibility, it does not work with the built-in types str or unicode or bytearray (or buffers/memoryviews on them). It is meant to be used on objects containing large quantities of raw data, like ``array.array`` or numpy arrays. It supports both the old buffer API (in Python 2.x) and the -new memoryview API. The original object is kept alive (and, in case +new memoryview API. Note that if you pass a read-only buffer object, +you still get a regular ````; it is your responsibility +not to write there if the original buffer doesn't expect you to. +The original object is kept alive (and, in case of memoryview, locked) as long as the cdata object returned by ``ffi.from_buffer()`` is alive. *New in version 0.9.* From noreply at buildbot.pypy.org Sat Oct 24 10:20:55 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Oct 2015 16:20:55 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: fix test (notably, foo_bar() now prints the same thing as on CPython) Message-ID: <20151024142055.125CB1C1DCF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80424:d73986739e27 Date: 2015-10-24 16:21 +0200 http://bitbucket.org/pypy/pypy/changeset/d73986739e27/ Log: fix test (notably, foo_bar() now prints the same thing as on CPython) diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -112,7 +112,7 @@ @staticmethod def cleanup_references(space): - ZZZ + return #ZZZ state = space.fromcache(RefcountState) import gc; gc.collect() @@ -374,8 +374,6 @@ def teardown_method(self, func): for name in self.imported_module_names: self.unimport_module(name) - self.check_and_print_leaks() - return #ZZZ self.cleanup_references(self.space) # XXX: find out how to disable check_and_print_leaks() if the # test failed... @@ -656,7 +654,7 @@ Py_DECREF(true_obj); Py_DECREF(true_obj); fprintf(stderr, "REFCNT %i %i\\n", refcnt, refcnt_after); - return PyBool_FromLong(refcnt_after == refcnt+2 && refcnt < 3); + return PyBool_FromLong(refcnt_after == refcnt + 2); } static PyObject* foo_bar(PyObject* self, PyObject *args) { @@ -672,7 +670,7 @@ refcnt_after = true_obj->ob_refcnt; Py_DECREF(tup); fprintf(stderr, "REFCNT2 %i %i\\n", refcnt, refcnt_after); - return PyBool_FromLong(refcnt_after == refcnt); + return PyBool_FromLong(refcnt_after == refcnt + 1); } static PyMethodDef methods[] = { From noreply at buildbot.pypy.org Sat Oct 24 06:17:56 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Oct 2015 12:17:56 +0200 (CEST) Subject: [pypy-commit] cffi default: typo Message-ID: <20151024101756.CE65F1C12C6@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r2362:775ead7b90d7 Date: 2015-10-24 12:18 +0200 http://bitbucket.org/cffi/cffi/changeset/775ead7b90d7/ Log: typo diff --git a/doc/source/cdef.rst b/doc/source/cdef.rst --- a/doc/source/cdef.rst +++ b/doc/source/cdef.rst @@ -253,7 +253,7 @@ Usually, the right thing to do is to call this method with True. Be aware (particularly on Python 2) that, afterwards, you need to pass unicode -strings as arguments instead of not byte strings. (Before cffi version 0.9, +strings as arguments instead of byte strings. (Before cffi version 0.9, ``TCHAR`` and friends where hard-coded as unicode, but ``UNICODE`` was, inconsistently, not defined by default.) From noreply at buildbot.pypy.org Sat Oct 24 10:25:16 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Oct 2015 16:25:16 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: fix test Message-ID: <20151024142516.B8B591C2020@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80426:f16f221988c7 Date: 2015-10-24 16:25 +0200 http://bitbucket.org/pypy/pypy/changeset/f16f221988c7/ Log: fix test diff --git a/pypy/module/cpyext/test/test_eval.py b/pypy/module/cpyext/test/test_eval.py --- a/pypy/module/cpyext/test/test_eval.py +++ b/pypy/module/cpyext/test/test_eval.py @@ -149,10 +149,10 @@ rffi.free_charp(filename) def test_getbuiltins(self, space, api): - assert api.PyEval_GetBuiltins() is space.builtin.w_dict + assert api.from_pyobj(api.PyEval_GetBuiltins()) is space.builtin.w_dict def cpybuiltins(space): - return api.PyEval_GetBuiltins() + return api.from_pyobj(api.PyEval_GetBuiltins()) w_cpybuiltins = space.wrap(interp2app(cpybuiltins)) w_result = space.appexec([w_cpybuiltins], """(cpybuiltins): @@ -167,12 +167,12 @@ assert space.len_w(w_result) == 1 def test_getglobals(self, space, api): - assert api.PyEval_GetLocals() is None - assert api.PyEval_GetGlobals() is None + assert api.from_xpyobj(api.PyEval_GetLocals()) is None + assert api.from_xpyobj(api.PyEval_GetGlobals()) is None def cpyvars(space): - return space.newtuple([api.PyEval_GetGlobals(), - api.PyEval_GetLocals()]) + return space.newtuple([api.from_xpyobj(api.PyEval_GetGlobals()), + api.from_xpyobj(api.PyEval_GetLocals())]) w_cpyvars = space.wrap(interp2app(cpyvars)) w_result = space.appexec([w_cpyvars], """(cpyvars): From noreply at buildbot.pypy.org Sat Oct 24 05:27:41 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Oct 2015 11:27:41 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Translates (interestingly, as this code unexpectedly *is* RPython) Message-ID: <20151024092741.0A4661C1FFE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80420:cfafdd5f1139 Date: 2015-10-24 10:31 +0100 http://bitbucket.org/pypy/pypy/changeset/cfafdd5f1139/ Log: Translates (interestingly, as this code unexpectedly *is* RPython) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -146,7 +146,6 @@ assert len(slot_names) == 2 struct = getattr(pto, slot_names[0]) if not struct: - assert not space.config.translating assert not pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE if slot_names[0] == 'c_tp_as_number': STRUCT_TYPE = PyNumberMethods From noreply at buildbot.pypy.org Fri Oct 23 13:12:14 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Oct 2015 19:12:14 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: translation in-progress Message-ID: <20151023171214.2E8461C11C2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80415:346702273784 Date: 2015-10-23 18:15 +0100 http://bitbucket.org/pypy/pypy/changeset/346702273784/ Log: translation in-progress diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -597,8 +597,9 @@ @specialize.memo() def get_w_type(space): return space.gettypeobject(cls.typedef) - def _PyXxx_Type(): - ZZZ + def _PyXxx_Type(space): + from rpython.rlib.debug import fatalerror + fatalerror(py_type_name + " not implemented ZZZ") _PyXxx_Type = func_with_new_name(_PyXxx_Type, '_' + py_type_name) def check(space, py_obj): @@ -701,6 +702,7 @@ retval = as_pyobj(space, result) else: retval = get_pyobj_and_incref(space, result) + retval = rffi.cast(callable.api_func.restype, retval) else: retval = lltype.nullptr(PyObject.TO) elif callable.api_func.restype is not lltype.Void: @@ -719,6 +721,7 @@ else: print str(e) pypy_debug_catch_fatal_exception() + assert False rffi.stackcounter.stacks_counter -= 1 if gil_release: before = rffi.aroundstate.before diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -21,8 +21,9 @@ def make_typedescr(arg0, *args, **kwds): print "ZZZ: make_typedescr(%r)" % (arg0,) -def get_typedescr(*args, **kwds): - ZZZ +def get_typedescr(*args): + from rpython.rlib.debug import fatalerror + fatalerror("get_typedescr not fully removed ZZZ") RRC_PERMANENT = 'P' # the link pyobj<->pypy is permanent RRC_PERMANENT_LIGHT = 'p' # same, but tp_dealloc can be replaced with free() @@ -472,11 +473,7 @@ @cpython_api([PyObject], lltype.Void) def _Py_NewReference(space, obj): - ZZZ obj.c_ob_refcnt = 1 - w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type)) - assert isinstance(w_type, W_TypeObject) - get_typedescr(w_type.instancetypedef).realize(space, obj) @cpython_api([PyObject], lltype.Void) def _Py_Dealloc(space, obj): diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py --- a/pypy/module/cpyext/state.py +++ b/pypy/module/cpyext/state.py @@ -148,7 +148,7 @@ from pypy.module.cpyext.pyobject import PyObject, _Py_Dealloc while True: - py_obj = rawrefcount.next_dead(PyObject.TO) + py_obj = rawrefcount.next_dead(PyObject) if not py_obj: break _Py_Dealloc(self.space, py_obj) diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -87,7 +87,7 @@ PyErr_BadInternalCall(space) py_tuple = rffi.cast(PyTupleObject, py_t) if pos < 0 or pos >= py_tuple.c_ob_size: - raise oefmt(w_IndexError, "tuple assignment index out of range") + raise oefmt(space.w_IndexError, "tuple assignment index out of range") olditem = py_tuple.c_ob_item[pos] py_tuple.c_ob_item[pos] = py_obj @@ -102,7 +102,7 @@ PyErr_BadInternalCall(space) py_tuple = rffi.cast(PyTupleObject, py_t) if pos < 0 or pos >= py_tuple.c_ob_size: - raise oefmt(w_IndexError, "tuple assignment index out of range") + raise oefmt(space.w_IndexError, "tuple assignment index out of range") return py_tuple.c_ob_item[pos] # borrowed diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -20,7 +20,7 @@ PyDescr_NewWrapper, PyCFunction_NewEx, PyCFunction_typedef) from pypy.module.cpyext.modsupport import convert_method_defs from pypy.module.cpyext.pyobject import ( - PyObject, create_ref, get_typedescr, from_pyobj, as_pyobj, + PyObject, create_ref, get_typedescr, from_pyobj, as_pyobj, as_xpyobj, setup_class_for_cpyext, get_pyobj_and_incref, get_pyobj_and_xincref, track_reference, Py_DecRef, RRC_PERMANENT) from pypy.module.cpyext.slotdefs import ( @@ -592,8 +592,8 @@ def PyType_IsSubtype(space, a, b): """Return true if a is a subtype of b. """ - w_type1 = from_ref(space, rffi.cast(PyObject, a)) - w_type2 = from_ref(space, rffi.cast(PyObject, b)) + w_type1 = from_pyobj(space, a) + w_type2 = from_pyobj(space, b) return int(abstract_issubclass_w(space, w_type1, w_type2)) #XXX correct? @cpython_api([PyTypeObjectPtr, Py_ssize_t], PyObject) @@ -610,16 +610,14 @@ def _PyType_Lookup(space, type, w_name): """Internal API to look for a name through the MRO. This returns a borrowed reference, and doesn't set an exception!""" - w_type = from_ref(space, rffi.cast(PyObject, type)) + w_type = from_pyobj(space, type) assert isinstance(w_type, W_TypeObject) - if not space.isinstance_w(w_name, space.w_str): - return None name = space.str_w(w_name) w_obj = w_type.lookup(name) # return a borrowed ref. assumes lookup() returns already-referenced # objs OR that the result will not be used for long - return as_pyobj(space, w_obj) + return as_xpyobj(space, w_obj) @cpython_api([PyTypeObjectPtr], lltype.Void) def PyType_Modified(space, w_obj): diff --git a/pypy/module/cpyext/weakrefobject.py b/pypy/module/cpyext/weakrefobject.py --- a/pypy/module/cpyext/weakrefobject.py +++ b/pypy/module/cpyext/weakrefobject.py @@ -42,7 +42,7 @@ """Similar to PyWeakref_GetObject(), but implemented as a macro that does no error checking. """ - return as_xpyobj(space.call_function(w_ref)) # borrowed + return as_xpyobj(space, space.call_function(w_ref)) # borrowed @cpython_api([PyObject], PyObject) def PyWeakref_LockObject(space, w_ref): From noreply at buildbot.pypy.org Fri Oct 23 13:12:12 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Oct 2015 19:12:12 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Some translation fixes Message-ID: <20151023171212.136011C1106@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80414:dc4368563c7b Date: 2015-10-23 15:58 +0100 http://bitbucket.org/pypy/pypy/changeset/dc4368563c7b/ Log: Some translation fixes diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -30,7 +30,6 @@ from rpython.rlib.rposix import is_valid_fd, validate_fd from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import specialize -from rpython.rlib.exports import export_struct from pypy.module import exceptions from pypy.module.exceptions import interp_exceptions # CPython 2.4 compatibility @@ -210,13 +209,14 @@ assert len(self.argnames) == len(self.argtypes) self.gil = gil self.result_borrowed = result_borrowed + # + def get_llhelper(space): + return llhelper(self.functype, self.get_wrapper(space)) + self.get_llhelper = get_llhelper def _freeze_(self): return True - def get_llhelper(self, space): - return llhelper(self.functype, self.get_wrapper(space)) - @specialize.memo() def get_wrapper(self, space): wrapper = getattr(self, '_wrapper', None) @@ -387,9 +387,11 @@ INTERPLEVEL_API = {} # only for untranslated tests FUNCTIONS = {} + at specialize.memo() def constant_pyobj(space, name): - if we_are_translated(): - ZZZ # should return the C symbol "Py" + name, constant-folded + # returns the C symbol "Py" + name, constant-folded + if space.config.translating: + return rffi.CConstant("((PyObject *)&PyPy%s)" % (name,), PyObject) else: from pypy.module.cpyext.pyobject import as_pyobj w_obj = INTERPLEVEL_API[name] @@ -800,6 +802,7 @@ def build_bridge(space): "NOT_RPYTHON" from pypy.module.cpyext.pyobject import setup_prebuilt_pyobj, _Py_Dealloc + from pypy.module.cpyext.pyobject import get_pyobj_and_incref from rpython.rlib import rawrefcount export_symbols = sorted(FUNCTIONS) + sorted(SYMBOLS_C) + sorted(GLOBALS) @@ -881,7 +884,7 @@ if isptr: ptr = ctypes.c_void_p.in_dll(bridge, name) if typ == 'PyObject*': - value = make_ref(space, w_obj) + value = get_pyobj_and_incref(space, w_obj) elif typ == 'PyDateTime_CAPI*': value = w_obj else: @@ -1097,10 +1100,9 @@ def setup_library(space): "NOT_RPYTHON" - from pypy.module.cpyext.pyobject import make_ref - export_symbols = sorted(FUNCTIONS) + sorted(SYMBOLS_C) + sorted(GLOBALS) from rpython.translator.c.database import LowLevelDatabase + from pypy.module.cpyext.pyobject import get_pyobj_and_incref db = LowLevelDatabase() generate_macros(export_symbols, prefix='PyPy') @@ -1116,21 +1118,21 @@ setup_va_functions(eci) # populate static data - for name, (typ, expr) in GLOBALS.iteritems(): - name = name.replace("#", "") - if name.startswith('PyExc_'): - name = '_' + name - from pypy.module import cpyext - w_obj = eval(expr) - if typ in ('PyObject*', 'PyTypeObject*'): - struct_ptr = make_ref(space, w_obj) - elif typ == 'PyDateTime_CAPI*': - continue - else: - assert False, "Unknown static data: %s %s" % (typ, name) - struct = rffi.cast(get_structtype_for_ctype(typ), struct_ptr)._obj - struct._compilation_info = eci - export_struct(name, struct) + ## for name, (typ, expr) in GLOBALS.iteritems(): + ## name = name.replace("#", "") + ## if name.startswith('PyExc_'): + ## name = '_' + name + ## from pypy.module import cpyext + ## w_obj = eval(expr) + ## if typ in ('PyObject*', 'PyTypeObject*'): + ## struct_ptr = get_pyobj_and_incref(space, w_obj) + ## elif typ == 'PyDateTime_CAPI*': + ## continue + ## else: + ## assert False, "Unknown static data: %s %s" % (typ, name) + ## struct = rffi.cast(get_structtype_for_ctype(typ), struct_ptr)._obj + ## struct._compilation_info = eci + ## export_struct(name, struct) for name, func in FUNCTIONS.iteritems(): newname = mangle_name('PyPy', name) or name diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py --- a/pypy/module/cpyext/state.py +++ b/pypy/module/cpyext/state.py @@ -3,6 +3,7 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.executioncontext import AsyncAction from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.annlowlevel import llhelper from rpython.rlib.rdynload import DLLHANDLE from rpython.rlib import rawrefcount import sys @@ -81,7 +82,8 @@ from pypy.module.cpyext.api import INIT_FUNCTIONS if we_are_translated(): - rawrefcount.init(self.dealloc_trigger) + rawrefcount.init(llhelper(rawrefcount.RAWREFCOUNT_DEALLOC_TRIGGER, + self.dealloc_trigger)) setup_new_method_def(space) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -520,7 +520,7 @@ key = space.str_w(w_key) dict_w[key] = space.getitem(w_org_dict, w_key) except OperationError, e: - if e.async(self): + if e.async(space): raise add_operators(space, dict_w, pto) diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -21,7 +21,9 @@ def init(dealloc_trigger_callback=None): - "NOT_RPYTHON: set up rawrefcount with the GC" + """NOT_RPYTHON: set up rawrefcount with the GC. This is only used + for tests; it should not be called at all during translation. + """ global _p_list, _o_list, _adr2pypy, _pypy2ob global _d_list, _dealloc_trigger_callback _p_list = [] From noreply at buildbot.pypy.org Fri Oct 23 08:04:43 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Oct 2015 14:04:43 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: dict fixes Message-ID: <20151023120443.748CC1C135C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80407:f4b47915f927 Date: 2015-10-23 12:31 +0200 http://bitbucket.org/pypy/pypy/changeset/f4b47915f927/ Log: dict fixes diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -2,7 +2,7 @@ from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, build_type_checkers, Py_ssize_t, Py_ssize_tP, CONST_STRING) -from pypy.module.cpyext.pyobject import PyObject, PyObjectP +from pypy.module.cpyext.pyobject import PyObject, PyObjectP, as_xpyobj from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.interpreter.error import OperationError from pypy.objspace.std.dictmultiobject import W_DictMultiObject @@ -68,13 +68,13 @@ return 0 @cpython_api([PyObject], Py_ssize_t, error=-1) -def PyDict_Size(space, w_obj): +def PyDict_Size(space, w_dict): """ Return the number of items in the dictionary. This is equivalent to len(p) on a dictionary.""" if not isinstance(w_dict, W_DictMultiObject): PyErr_BadInternalCall(space) - return space.wrap(w_dict.length()) + return w_dict.length() @cpython_api([PyObject, PyObject], rffi.INT_real, error=-1) def PyDict_Contains(space, w_obj, w_value): From noreply at buildbot.pypy.org Fri Oct 23 08:04:47 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Oct 2015 14:04:47 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Fix PyDict_Next (still broken complexity) Message-ID: <20151023120447.E66B21C15A9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80409:03b0d3536b4c Date: 2015-10-23 14:03 +0200 http://bitbucket.org/pypy/pypy/changeset/03b0d3536b4c/ Log: Fix PyDict_Next (still broken complexity) diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -2,7 +2,7 @@ from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, build_type_checkers, Py_ssize_t, Py_ssize_tP, CONST_STRING) -from pypy.module.cpyext.pyobject import PyObject, PyObjectP, as_xpyobj +from pypy.module.cpyext.pyobject import PyObject, PyObjectP, as_xpyobj, as_pyobj from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.interpreter.error import OperationError from pypy.objspace.std.dictmultiobject import W_DictMultiObject @@ -164,28 +164,26 @@ } Py_DECREF(o); }""" - if w_dict is None: + if not isinstance(w_dict, W_DictMultiObject): return 0 - # Note: this is not efficient. Storing an iterator would probably + # XXX XXX PyDict_Next is not efficient. Storing an iterator would probably # work, but we can't work out how to not leak it if iteration does - # not complete. - ZZZ + # not complete. Alternatively, we could add some RPython-only + # dict-iterator method to move forward by N steps. + w_dict.ensure_object_strategy() + w_iter = space.call_method(space.w_dict, "iteritems", w_dict) try: - w_iter = space.call_method(space.w_dict, "iteritems", w_dict) - pos = ppos[0] - while pos: + for i in range(ppos[0]): space.call_method(w_iter, "next") - pos -= 1 w_item = space.call_method(w_iter, "next") w_key, w_value = space.fixedview(w_item, 2) - state = space.fromcache(RefcountState) if pkey: - pkey[0] = state.make_borrowed(w_dict, w_key) + pkey[0] = as_pyobj(space, w_key) if pvalue: - pvalue[0] = state.make_borrowed(w_dict, w_value) + pvalue[0] = as_pyobj(space, w_value) ppos[0] += 1 except OperationError, e: if not e.match(space, space.w_StopIteration): diff --git a/pypy/module/cpyext/test/test_dictobject.py b/pypy/module/cpyext/test/test_dictobject.py --- a/pypy/module/cpyext/test/test_dictobject.py +++ b/pypy/module/cpyext/test/test_dictobject.py @@ -1,7 +1,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.api import Py_ssize_tP, PyObjectP -from pypy.module.cpyext.pyobject import from_pyobj +from pypy.module.cpyext.pyobject import from_pyobj, as_pyobj from pypy.interpreter.error import OperationError class TestDictObject(BaseApiTest): @@ -91,7 +91,6 @@ def test_iter(self, space, api): w_dict = space.sys.getdict(space) - py_dict = make_ref(space, w_dict) ppos = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw') ppos[0] = 0 @@ -109,14 +108,11 @@ lltype.free(pkey, flavor='raw') lltype.free(pvalue, flavor='raw') - api.Py_DecRef(py_dict) # release borrowed references - assert space.eq_w(space.len(w_copy), space.len(w_dict)) assert space.eq_w(w_copy, w_dict) def test_iterkeys(self, space, api): w_dict = space.sys.getdict(space) - py_dict = make_ref(space, w_dict) ppos = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw') pkey = lltype.malloc(PyObjectP.TO, 1, flavor='raw') @@ -138,8 +134,6 @@ lltype.free(pkey, flavor='raw') lltype.free(pvalue, flavor='raw') - api.Py_DecRef(py_dict) # release borrowed references - assert space.eq_w(space.newlist(keys_w), space.call_method(w_dict, "keys")) assert space.eq_w(space.newlist(values_w), diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -336,6 +336,11 @@ F: D[k] = F[k]""" init_or_update(space, self, __args__, 'dict.update') + def ensure_object_strategy(self): # for cpyext + object_strategy = self.space.fromcache(ObjectDictStrategy) + if self.strategy is not object_strategy: + self.strategy.switch_to_object_strategy(self) + def _add_indirections(): dict_methods = "getitem getitem_str setitem setdefault \ From noreply at buildbot.pypy.org Fri Oct 23 08:04:45 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Oct 2015 14:04:45 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: fixes Message-ID: <20151023120445.D10771C1464@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80408:aa3df47ed02d Date: 2015-10-23 13:40 +0200 http://bitbucket.org/pypy/pypy/changeset/aa3df47ed02d/ Log: fixes diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -35,7 +35,7 @@ def PyDict_DelItem(space, w_dict, w_key): if not isinstance(w_dict, W_DictMultiObject): PyErr_BadInternalCall(space) - w_dict.delitem(w_key) + space.delitem(w_dict, w_key) return 0 @cpython_api([PyObject, CONST_STRING, PyObject], rffi.INT_real, error=-1) @@ -64,7 +64,7 @@ if not isinstance(w_dict, W_DictMultiObject): PyErr_BadInternalCall(space) key = rffi.charp2str(key_ptr) - w_dict.delitem(space.wrap(key)) + space.delitem(w_dict, space.wrap(key)) return 0 @cpython_api([PyObject], Py_ssize_t, error=-1) diff --git a/pypy/module/cpyext/test/test_dictobject.py b/pypy/module/cpyext/test/test_dictobject.py --- a/pypy/module/cpyext/test/test_dictobject.py +++ b/pypy/module/cpyext/test/test_dictobject.py @@ -9,8 +9,9 @@ d = api.PyDict_New() assert space.eq_w(d, space.newdict()) - assert space.eq_w(api.PyDict_GetItem(space.wrap({"a": 72}), - space.wrap("a")), + assert space.eq_w(from_pyobj(space, + api.PyDict_GetItem(space.wrap({"a": 72}), + space.wrap("a"))), space.wrap(72)) assert api.PyDict_SetItem(d, space.wrap("c"), space.wrap(42)) >= 0 @@ -18,7 +19,8 @@ space.wrap(42)) space.setitem(d, space.wrap("name"), space.wrap(3)) - assert space.eq_w(api.PyDict_GetItem(d, space.wrap("name")), + assert space.eq_w(from_pyobj(space, + api.PyDict_GetItem(d, space.wrap("name"))), space.wrap(3)) space.delitem(d, space.wrap("name")) From noreply at buildbot.pypy.org Fri Oct 23 08:04:41 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Oct 2015 14:04:41 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: more of the same Message-ID: <20151023120441.442821C130A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80406:33900512eefb Date: 2015-10-23 12:24 +0200 http://bitbucket.org/pypy/pypy/changeset/33900512eefb/ Log: more of the same diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -3,7 +3,6 @@ cpython_api, CANNOT_FAIL, build_type_checkers, Py_ssize_t, Py_ssize_tP, CONST_STRING) from pypy.module.cpyext.pyobject import PyObject, PyObjectP -from pypy.module.cpyext.pyobject import RefcountState from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.interpreter.error import OperationError from pypy.objspace.std.dictmultiobject import W_DictMultiObject diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -7,7 +7,7 @@ Py_GE, CONST_STRING, FILEP, fwrite) from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, create_ref, from_pyobj, Py_IncRef, Py_DecRef, - track_reference, get_typedescr, _Py_NewReference, RefcountState) + track_reference, get_typedescr, _Py_NewReference) from pypy.module.cpyext.typeobject import PyTypeObjectPtr from pypy.module.cpyext.pyerrors import PyErr_NoMemory, PyErr_BadInternalCall from pypy.objspace.std.typeobject import W_TypeObject diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -23,7 +23,6 @@ print "ZZZ: make_typedescr(%r)" % (arg0,) def get_typedescr(*args, **kwds): ZZZ -RefcountState = "ZZZ" RRC_PERMANENT = 'P' # the link pyobj<->pypy is permanent RRC_PERMANENT_LIGHT = 'p' # same, but tp_dealloc can be replaced with free() diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py --- a/pypy/module/cpyext/state.py +++ b/pypy/module/cpyext/state.py @@ -75,11 +75,11 @@ "This function is called when the program really starts" from pypy.module.cpyext.typeobject import setup_new_method_def - from pypy.module.cpyext.pyobject import RefcountState from pypy.module.cpyext.api import INIT_FUNCTIONS setup_new_method_def(space) if we_are_translated(): + ZZZ refcountstate = space.fromcache(RefcountState) refcountstate.init_r2w_from_w2r() rawrefcount.init(lambda ob: ZZZ) diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -14,7 +14,7 @@ from rpython.tool.udir import udir from pypy.module.cpyext import api from pypy.module.cpyext.state import State -from pypy.module.cpyext.pyobject import RefcountState, debug_collect +from pypy.module.cpyext.pyobject import debug_collect from pypy.module.cpyext.pyobject import Py_DecRef from rpython.tool.identity_dict import identity_dict from rpython.tool import leakfinder @@ -112,6 +112,7 @@ @staticmethod def cleanup_references(space): + ZZZ state = space.fromcache(RefcountState) import gc; gc.collect() diff --git a/pypy/module/cpyext/test/test_dictobject.py b/pypy/module/cpyext/test/test_dictobject.py --- a/pypy/module/cpyext/test/test_dictobject.py +++ b/pypy/module/cpyext/test/test_dictobject.py @@ -1,7 +1,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.api import Py_ssize_tP, PyObjectP -from pypy.module.cpyext.pyobject import make_ref, from_ref +from pypy.module.cpyext.pyobject import from_pyobj from pypy.interpreter.error import OperationError class TestDictObject(BaseApiTest): @@ -99,8 +99,8 @@ try: w_copy = space.newdict() while api.PyDict_Next(w_dict, ppos, pkey, pvalue): - w_key = from_ref(space, pkey[0]) - w_value = from_ref(space, pvalue[0]) + w_key = from_pyobj(space, pkey[0]) + w_value = from_pyobj(space, pvalue[0]) space.setitem(w_copy, w_key, w_value) finally: lltype.free(ppos, flavor='raw') @@ -125,11 +125,11 @@ try: ppos[0] = 0 while api.PyDict_Next(w_dict, ppos, pkey, None): - w_key = from_ref(space, pkey[0]) + w_key = from_pyobj(space, pkey[0]) keys_w.append(w_key) ppos[0] = 0 while api.PyDict_Next(w_dict, ppos, None, pvalue): - w_value = from_ref(space, pvalue[0]) + w_value = from_pyobj(space, pvalue[0]) values_w.append(w_value) finally: lltype.free(ppos, flavor='raw') diff --git a/pypy/module/cpyext/test/test_funcobject.py b/pypy/module/cpyext/test/test_funcobject.py --- a/pypy/module/cpyext/test/test_funcobject.py +++ b/pypy/module/cpyext/test/test_funcobject.py @@ -1,7 +1,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.module.cpyext.test.test_api import BaseApiTest -from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref +from pypy.module.cpyext.pyobject import PyObject, as_pyobj, from_pyobj from pypy.module.cpyext.funcobject import ( PyFunctionObject, PyCodeObject, CODE_FLAGS) from pypy.interpreter.function import Function, Method @@ -15,12 +15,11 @@ def f(): pass return f """) - ref = make_ref(space, w_function) - assert (from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is + ref = as_pyobj(space, w_function) + assert (from_pyobj(space, rffi.cast(PyObject, ref.c_ob_type)) is space.gettypeobject(Function.typedef)) assert "f" == space.unwrap( - from_ref(space, rffi.cast(PyFunctionObject, ref).c_func_name)) - api.Py_DecRef(ref) + from_pyobj(space, rffi.cast(PyFunctionObject, ref).c_func_name)) def test_method(self, space, api): w_method = space.appexec([], """(): @@ -48,13 +47,12 @@ w_code = api.PyFunction_GetCode(w_function) assert w_code.co_name == "func" - ref = make_ref(space, w_code) - assert (from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is + ref = as_pyobj(space, w_code) + assert (from_pyobj(space, rffi.cast(PyObject, ref.c_ob_type)) is space.gettypeobject(PyCode.typedef)) assert "func" == space.unwrap( - from_ref(space, rffi.cast(PyCodeObject, ref).c_co_name)) + from_pyobj(space, rffi.cast(PyCodeObject, ref).c_co_name)) assert 3 == rffi.cast(PyCodeObject, ref).c_co_argcount - api.Py_DecRef(ref) def test_co_flags(self, space, api): def get_flags(signature, body="pass"): @@ -62,9 +60,8 @@ def func(%s): %s return func.__code__ """ % (signature, body)) - ref = make_ref(space, w_code) + ref = as_pyobj(space, w_code) co_flags = rffi.cast(PyCodeObject, ref).c_co_flags - api.Py_DecRef(ref) return co_flags assert get_flags("x") == CO_NESTED | CO_OPTIMIZED | CO_NEWLOCALS assert get_flags("x", "exec x") == CO_NESTED | CO_NEWLOCALS diff --git a/pypy/module/cpyext/test/test_methodobject.py b/pypy/module/cpyext/test/test_methodobject.py --- a/pypy/module/cpyext/test/test_methodobject.py +++ b/pypy/module/cpyext/test/test_methodobject.py @@ -2,7 +2,7 @@ from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.module.cpyext.methodobject import PyMethodDef from pypy.module.cpyext.api import ApiFunction -from pypy.module.cpyext.pyobject import PyObject, make_ref, Py_DecRef +from pypy.module.cpyext.pyobject import PyObject, Py_DecRef from pypy.module.cpyext.methodobject import ( PyDescr_NewMethod, PyCFunction_typedef) from rpython.rtyper.lltypesystem import rffi, lltype diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -2,7 +2,6 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from rpython.rtyper.lltypesystem.lltype import nullptr from pypy.module.cpyext.pystate import PyInterpreterState, PyThreadState -from pypy.module.cpyext.pyobject import from_ref from rpython.rtyper.lltypesystem import lltype from pypy.module.cpyext.test.test_cpyext import LeakCheckingTest, freeze_refcnts from pypy.module.cpyext.pystate import PyThreadState_Get, PyInterpreterState_Head diff --git a/pypy/module/cpyext/test/test_setobject.py b/pypy/module/cpyext/test/test_setobject.py --- a/pypy/module/cpyext/test/test_setobject.py +++ b/pypy/module/cpyext/test/test_setobject.py @@ -1,6 +1,6 @@ import py -from pypy.module.cpyext.pyobject import PyObject, PyObjectP, make_ref, from_ref +from pypy.module.cpyext.pyobject import PyObject, PyObjectP from pypy.module.cpyext.test.test_api import BaseApiTest from rpython.rtyper.lltypesystem import rffi, lltype diff --git a/pypy/module/cpyext/test/test_traceback.py b/pypy/module/cpyext/test/test_traceback.py --- a/pypy/module/cpyext/test/test_traceback.py +++ b/pypy/module/cpyext/test/test_traceback.py @@ -1,6 +1,6 @@ from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.cpyext.test.test_api import BaseApiTest -from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref +from pypy.module.cpyext.pyobject import PyObject, as_pyobj, from_pyobj from pypy.module.cpyext.pytraceback import PyTracebackObject from pypy.interpreter.pytraceback import PyTraceback from pypy.interpreter.pyframe import PyFrame @@ -14,9 +14,9 @@ except: return sys.exc_info()[2] """) - py_obj = make_ref(space, w_traceback) + py_obj = as_pyobj(space, w_traceback) py_traceback = rffi.cast(PyTracebackObject, py_obj) - assert (from_ref(space, rffi.cast(PyObject, py_traceback.c_ob_type)) is + assert (from_pyobj(space, rffi.cast(PyObject, py_traceback.c_ob_type)) is space.gettypeobject(PyTraceback.typedef)) traceback = space.interp_w(PyTraceback, w_traceback) @@ -25,16 +25,14 @@ assert space.eq_w(space.getattr(w_traceback, space.wrap("tb_lasti")), space.wrap(py_traceback.c_tb_lasti)) assert space.is_w(space.getattr(w_traceback, space.wrap("tb_frame")), - from_ref(space, rffi.cast(PyObject, + from_pyobj(space, rffi.cast(PyObject, py_traceback.c_tb_frame))) while not space.is_w(w_traceback, space.w_None): assert space.is_w( w_traceback, - from_ref(space, rffi.cast(PyObject, py_traceback))) + from_pyobj(space, rffi.cast(PyObject, py_traceback))) w_traceback = space.getattr(w_traceback, space.wrap("tb_next")) py_traceback = py_traceback.c_tb_next assert lltype.normalizeptr(py_traceback) is None - - api.Py_DecRef(py_obj) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -1,7 +1,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.module.cpyext.test.test_api import BaseApiTest -from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref +from pypy.module.cpyext.pyobject import PyObject, as_pyobj, from_pyobj from pypy.module.cpyext.typeobject import PyTypeObjectPtr import py @@ -332,13 +332,11 @@ pass return A """) - ref = make_ref(space, w_class) + ref = as_pyobj(space, w_class) py_type = rffi.cast(PyTypeObjectPtr, ref) assert py_type.c_tp_alloc - assert from_ref(space, py_type.c_tp_mro).wrappeditems is w_class.mro_w - - api.Py_DecRef(ref) + assert from_pyobj(space, py_type.c_tp_mro).wrappeditems is w_class.mro_w def test_multiple_inheritance(self, space, api): w_class = space.appexec([], """(): @@ -350,8 +348,7 @@ pass return C """) - ref = make_ref(space, w_class) - api.Py_DecRef(ref) + ref = as_pyobj(space, w_class) def test_lookup(self, space, api): w_type = space.w_str @@ -366,8 +363,7 @@ w_obj = space.appexec([], """(): import _numpypy return _numpypy.multiarray.dtype('int64').type(2)""") - ref = make_ref(space, w_obj) - api.Py_DecRef(ref) + ref = as_pyobj(space, w_obj) class AppTestSlots(AppTestCpythonExtensionBase): def test_some_slots(self): diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -4,7 +4,7 @@ from pypy.module.cpyext.unicodeobject import ( Py_UNICODE, PyUnicodeObject, new_empty_unicode) from pypy.module.cpyext.api import PyObjectP, PyObject -from pypy.module.cpyext.pyobject import Py_DecRef, from_ref +from pypy.module.cpyext.pyobject import Py_DecRef, from_pyobj from rpython.rtyper.lltypesystem import rffi, lltype import sys, py @@ -147,7 +147,7 @@ assert space.unwrap(w_res) == u'sp�m' res = api.PyUnicode_FromStringAndSize(s, 4) - w_res = from_ref(space, res) + w_res = from_pyobj(space, res) api.Py_DecRef(res) assert space.unwrap(w_res) == u'sp�' rffi.free_charp(s) diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -22,7 +22,7 @@ from pypy.module.cpyext.pyobject import ( PyObject, create_ref, get_typedescr, from_pyobj, as_pyobj, setup_class_for_cpyext, get_pyobj_and_incref, get_pyobj_and_xincref, - track_reference, RefcountState, Py_DecRef, RRC_PERMANENT) + track_reference, Py_DecRef, RRC_PERMANENT) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) from pypy.module.cpyext.state import State From noreply at buildbot.pypy.org Fri Oct 23 08:04:39 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Oct 2015 14:04:39 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Fix a number of make_ref/from_ref/borrow_from. Message-ID: <20151023120439.129BA1C06AD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80405:b19c40f0c0ac Date: 2015-10-23 12:18 +0200 http://bitbucket.org/pypy/pypy/changeset/b19c40f0c0ac/ Log: Fix a number of make_ref/from_ref/borrow_from. diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -61,7 +61,6 @@ import pypy.module.cpyext.funcobject import pypy.module.cpyext.frameobject import pypy.module.cpyext.classobject -import pypy.module.cpyext.pypyintf import pypy.module.cpyext.memoryobject import pypy.module.cpyext.codecs import pypy.module.cpyext.pyfile diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -291,7 +291,6 @@ def unwrapper(space, *args): from pypy.module.cpyext.pyobject import Py_DecRef, from_xpyobj from pypy.module.cpyext.pyobject import as_xpyobj, is_pyobj - from pypy.module.cpyext.pyobject import Reference newargs = () keepalives = () assert len(args) == len(api_function.argtypes) diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -4,7 +4,7 @@ from pypy.module.cpyext.api import ( cpython_api, Py_ssize_t, cpython_struct, bootstrap_function, PyObjectFields, PyObject) -from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef, make_ref +from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef from pypy.module.array.interp_array import ArrayBuffer from pypy.objspace.std.bufferobject import W_Buffer @@ -51,6 +51,7 @@ # reference to buf. # Otherwise, b_base stays NULL, and we own the b_ptr. + ZZZ if isinstance(buf, StringBuffer): py_buf.c_b_base = lltype.nullptr(PyObject.TO) py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(buf.value)) diff --git a/pypy/module/cpyext/cdatetime.py b/pypy/module/cpyext/cdatetime.py --- a/pypy/module/cpyext/cdatetime.py +++ b/pypy/module/cpyext/cdatetime.py @@ -1,5 +1,5 @@ from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.module.cpyext.pyobject import PyObject, make_ref +from pypy.module.cpyext.pyobject import PyObject, get_pyobj_and_incref from pypy.module.cpyext.api import (cpython_api, CANNOT_FAIL, cpython_struct, PyObjectFields) from pypy.module.cpyext.import_ import PyImport_Import @@ -26,19 +26,19 @@ w_type = space.getattr(w_datetime, space.wrap("date")) datetimeAPI.c_DateType = rffi.cast( - PyTypeObjectPtr, make_ref(space, w_type)) + PyTypeObjectPtr, get_pyobj_and_incref(space, w_type)) w_type = space.getattr(w_datetime, space.wrap("datetime")) datetimeAPI.c_DateTimeType = rffi.cast( - PyTypeObjectPtr, make_ref(space, w_type)) + PyTypeObjectPtr, get_pyobj_and_incref(space, w_type)) w_type = space.getattr(w_datetime, space.wrap("time")) datetimeAPI.c_TimeType = rffi.cast( - PyTypeObjectPtr, make_ref(space, w_type)) + PyTypeObjectPtr, get_pyobj_and_incref(space, w_type)) w_type = space.getattr(w_datetime, space.wrap("timedelta")) datetimeAPI.c_DeltaType = rffi.cast( - PyTypeObjectPtr, make_ref(space, w_type)) + PyTypeObjectPtr, get_pyobj_and_incref(space, w_type)) return datetimeAPI diff --git a/pypy/module/cpyext/classobject.py b/pypy/module/cpyext/classobject.py --- a/pypy/module/cpyext/classobject.py +++ b/pypy/module/cpyext/classobject.py @@ -2,7 +2,7 @@ from pypy.module.cpyext.api import ( PyObjectFields, CANNOT_FAIL, cpython_api, bootstrap_function, cpython_struct, build_type_checkers) -from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref, Py_DecRef, make_typedescr +from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, make_typedescr from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.module.__builtin__.interp_classobj import W_ClassObject, W_InstanceObject diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -2,10 +2,11 @@ from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, build_type_checkers, Py_ssize_t, Py_ssize_tP, CONST_STRING) -from pypy.module.cpyext.pyobject import PyObject, PyObjectP, borrow_from +from pypy.module.cpyext.pyobject import PyObject, PyObjectP from pypy.module.cpyext.pyobject import RefcountState from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.interpreter.error import OperationError +from pypy.objspace.std.dictmultiobject import W_DictMultiObject from rpython.rlib.objectmodel import specialize @cpython_api([], PyObject) @@ -16,68 +17,65 @@ @cpython_api([PyObject, PyObject], PyObject, error=CANNOT_FAIL) def PyDict_GetItem(space, w_dict, w_key): - try: - w_res = space.getitem(w_dict, w_key) - except: - return None - return borrow_from(w_dict, w_res) + if not isinstance(w_dict, W_DictMultiObject): + w_res = None + else: + w_res = w_dict.getitem(w_key) # possibly None + # borrowed result. assumes that the dict *values* are always strongly + # referenced from inside the W_DictMultiObject. + return as_xpyobj(space, w_res) @cpython_api([PyObject, PyObject, PyObject], rffi.INT_real, error=-1) def PyDict_SetItem(space, w_dict, w_key, w_obj): - if PyDict_Check(space, w_dict): - space.setitem(w_dict, w_key, w_obj) - return 0 - else: + if not isinstance(w_dict, W_DictMultiObject): PyErr_BadInternalCall(space) + w_dict.setitem(w_key, w_obj) + return 0 @cpython_api([PyObject, PyObject], rffi.INT_real, error=-1) def PyDict_DelItem(space, w_dict, w_key): - if PyDict_Check(space, w_dict): - space.delitem(w_dict, w_key) - return 0 - else: + if not isinstance(w_dict, W_DictMultiObject): PyErr_BadInternalCall(space) + w_dict.delitem(w_key) + return 0 @cpython_api([PyObject, CONST_STRING, PyObject], rffi.INT_real, error=-1) def PyDict_SetItemString(space, w_dict, key_ptr, w_obj): - if PyDict_Check(space, w_dict): - key = rffi.charp2str(key_ptr) - space.setitem_str(w_dict, key, w_obj) - return 0 - else: + if not isinstance(w_dict, W_DictMultiObject): PyErr_BadInternalCall(space) + key = rffi.charp2str(key_ptr) + w_dict.setitem_str(key, w_obj) + return 0 @cpython_api([PyObject, CONST_STRING], PyObject, error=CANNOT_FAIL) def PyDict_GetItemString(space, w_dict, key): """This is the same as PyDict_GetItem(), but key is specified as a char*, rather than a PyObject*.""" - try: + if not isinstance(w_dict, W_DictMultiObject): + w_res = None + else: w_res = space.finditem_str(w_dict, rffi.charp2str(key)) - except: - w_res = None - if w_res is None: - return None - return borrow_from(w_dict, w_res) + # borrowed result, possibly None + return as_xpyobj(space, w_res) @cpython_api([PyObject, rffi.CCHARP], rffi.INT_real, error=-1) def PyDict_DelItemString(space, w_dict, key_ptr): """Remove the entry in dictionary p which has a key specified by the string key. Return 0 on success or -1 on failure.""" - if PyDict_Check(space, w_dict): - key = rffi.charp2str(key_ptr) - # our dicts dont have a standardized interface, so we need - # to go through the space - space.delitem(w_dict, space.wrap(key)) - return 0 - else: + if not isinstance(w_dict, W_DictMultiObject): PyErr_BadInternalCall(space) + key = rffi.charp2str(key_ptr) + w_dict.delitem(space.wrap(key)) + return 0 @cpython_api([PyObject], Py_ssize_t, error=-1) def PyDict_Size(space, w_obj): """ Return the number of items in the dictionary. This is equivalent to len(p) on a dictionary.""" - return space.len_w(w_obj) + if not isinstance(w_dict, W_DictMultiObject): + PyErr_BadInternalCall(space) + return space.wrap(w_dict.length()) @cpython_api([PyObject, PyObject], rffi.INT_real, error=-1) def PyDict_Contains(space, w_obj, w_value): @@ -173,6 +171,7 @@ # Note: this is not efficient. Storing an iterator would probably # work, but we can't work out how to not leak it if iteration does # not complete. + ZZZ try: w_iter = space.call_method(space.w_dict, "iteritems", w_dict) diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -4,7 +4,7 @@ from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, fread, feof, Py_ssize_tP, cpython_struct, is_valid_fp) -from pypy.module.cpyext.pyobject import PyObject, borrow_from +from pypy.module.cpyext.pyobject import PyObject, as_pyobj, as_xpyobj from pypy.module.cpyext.pyerrors import PyErr_SetFromErrno from pypy.module.cpyext.funcobject import PyCodeObject from pypy.module.__builtin__ import compiling @@ -36,7 +36,7 @@ w_builtins = w_builtins.getdict(space) else: w_builtins = space.builtin.getdict(space) - return borrow_from(None, w_builtins) + return as_pyobj(space, w_builtins) # borrowed @cpython_api([], PyObject, error=CANNOT_FAIL) def PyEval_GetLocals(space): @@ -44,8 +44,10 @@ frame, or NULL if no frame is currently executing.""" caller = space.getexecutioncontext().gettopframe_nohidden() if caller is None: - return None - return borrow_from(None, caller.getdictscope()) + w_res = None + else: + w_res = caller.getdictscope() + return as_xpyobj(space, w_res) # borrowed @cpython_api([], PyObject, error=CANNOT_FAIL) def PyEval_GetGlobals(space): @@ -53,8 +55,10 @@ frame, or NULL if no frame is currently executing.""" caller = space.getexecutioncontext().gettopframe_nohidden() if caller is None: - return None - return borrow_from(None, caller.w_globals) + w_res = None + else: + w_res = caller.w_globals + return as_xpyobj(space, w_res) # borrowed @cpython_api([PyCodeObject, PyObject, PyObject], PyObject) def PyEval_EvalCode(space, w_code, w_globals, w_locals): diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py --- a/pypy/module/cpyext/frameobject.py +++ b/pypy/module/cpyext/frameobject.py @@ -3,7 +3,7 @@ cpython_api, bootstrap_function, PyObjectFields, cpython_struct, CANNOT_FAIL) from pypy.module.cpyext.pyobject import ( - PyObject, Py_DecRef, make_ref, from_ref, track_reference, + PyObject, Py_DecRef, track_reference, get_pyobj_and_xincref, make_typedescr, get_typedescr) from pypy.module.cpyext.state import State from pypy.module.cpyext.pystate import PyThreadState @@ -33,6 +33,7 @@ "Fills a newly allocated PyFrameObject with a frame object" frame = space.interp_w(PyFrame, w_obj) py_frame = rffi.cast(PyFrameObject, py_obj) + ZZZ py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode)) py_frame.c_f_globals = make_ref(space, frame.w_globals) rffi.setintfield(py_frame, 'c_f_lineno', frame.getorcreatedebug().f_lineno) @@ -53,6 +54,7 @@ """ py_frame = rffi.cast(PyFrameObject, py_obj) py_code = rffi.cast(PyObject, py_frame.c_f_code) + ZZZ w_code = from_ref(space, py_code) code = space.interp_w(PyCode, w_code) w_globals = from_ref(space, py_frame.c_f_globals) @@ -70,8 +72,9 @@ py_obj = typedescr.allocate(space, space.gettypeobject(PyFrame.typedef)) py_frame = rffi.cast(PyFrameObject, py_obj) space.interp_w(PyCode, w_code) # sanity check - py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, w_code)) - py_frame.c_f_globals = make_ref(space, w_globals) + py_frame.c_f_code = rffi.cast(PyCodeObject, + get_pyobj_and_xincref(space, w_code)) + py_frame.c_f_globals = get_pyobj_and_xincref(space, w_globals) return py_frame @cpython_api([PyFrameObject], rffi.INT_real, error=-1) diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py --- a/pypy/module/cpyext/funcobject.py +++ b/pypy/module/cpyext/funcobject.py @@ -3,7 +3,7 @@ PyObjectFields, generic_cpy_call, CONST_STRING, CANNOT_FAIL, Py_ssize_t, cpython_api, bootstrap_function, cpython_struct, build_type_checkers) from pypy.module.cpyext.pyobject import ( - PyObject, make_ref, from_ref, Py_DecRef, make_typedescr, borrow_from) + PyObject, Py_DecRef, make_typedescr, as_pyobj, as_xpyobj) from rpython.rlib.unroll import unrolling_iterable from pypy.interpreter.error import OperationError from pypy.interpreter.function import Function, Method @@ -51,6 +51,7 @@ PyCode_Check, PyCode_CheckExact = build_type_checkers("Code", PyCode) def function_attach(space, py_obj, w_obj): + ZZZ py_func = rffi.cast(PyFunctionObject, py_obj) assert isinstance(w_obj, Function) py_func.c_func_name = make_ref(space, space.wrap(w_obj.name)) @@ -63,6 +64,7 @@ PyObject_dealloc(space, py_obj) def code_attach(space, py_obj, w_obj): + ZZZ py_code = rffi.cast(PyCodeObject, py_obj) assert isinstance(w_obj, PyCode) py_code.c_co_name = make_ref(space, space.wrap(w_obj.co_name)) @@ -84,8 +86,7 @@ def PyFunction_GetCode(space, w_func): """Return the code object associated with the function object op.""" func = space.interp_w(Function, w_func) - w_code = space.wrap(func.code) - return borrow_from(w_func, w_code) + return as_pyobj(space, func.code) # borrowed @cpython_api([PyObject, PyObject, PyObject], PyObject) def PyMethod_New(space, w_func, w_self, w_cls): @@ -100,21 +101,21 @@ def PyMethod_Function(space, w_method): """Return the function object associated with the method meth.""" assert isinstance(w_method, Method) - return borrow_from(w_method, w_method.w_function) + return as_pyobj(space, w_method.w_function) # borrowed @cpython_api([PyObject], PyObject) def PyMethod_Self(space, w_method): """Return the instance associated with the method meth if it is bound, otherwise return NULL.""" assert isinstance(w_method, Method) - return borrow_from(w_method, w_method.w_instance) + return as_xpyobj(space, w_method.w_instance) # borrowed @cpython_api([PyObject], PyObject) def PyMethod_Class(space, w_method): """Return the class object from which the method meth was created; if this was created from an instance, it will be the class of the instance.""" assert isinstance(w_method, Method) - return borrow_from(w_method, w_method.w_class) + return as_pyobj(space, w_method.w_class) # borrowed def unwrap_list_of_strings(space, w_list): return [space.str_w(w_item) for w_item in space.fixedview(w_list)] diff --git a/pypy/module/cpyext/import_.py b/pypy/module/cpyext/import_.py --- a/pypy/module/cpyext/import_.py +++ b/pypy/module/cpyext/import_.py @@ -1,7 +1,7 @@ from pypy.interpreter import module from pypy.module.cpyext.api import ( generic_cpy_call, cpython_api, PyObject, CONST_STRING) -from pypy.module.cpyext.pyobject import borrow_from +from pypy.module.cpyext.pyobject import as_pyobj from rpython.rtyper.lltypesystem import lltype, rffi from pypy.interpreter.error import OperationError from pypy.interpreter.module import Module @@ -74,15 +74,16 @@ w_mod = check_sys_modules_w(space, modulename) if not w_mod or space.is_w(w_mod, space.w_None): w_mod = Module(space, space.wrap(modulename)) - XXX - "insert it into sys.modules!" - return borrow_from(None, w_mod) + space.setitem(space.sys.get('modules'), space.wrap(modulename), w_mod) + # return a borrowed ref --- assumes one copy in sys.modules + return as_pyobj(space, w_mod) @cpython_api([], PyObject) def PyImport_GetModuleDict(space): """Return the dictionary used for the module administration (a.k.a. sys.modules). Note that this is a per-interpreter variable.""" w_modulesDict = space.sys.get('modules') - return borrow_from(None, w_modulesDict) + return as_pyobj(space, w_modulesDict) # borrowed ref @cpython_api([rffi.CCHARP, PyObject], PyObject) def PyImport_ExecCodeModule(space, name, w_code): diff --git a/pypy/module/cpyext/listobject.py b/pypy/module/cpyext/listobject.py --- a/pypy/module/cpyext/listobject.py +++ b/pypy/module/cpyext/listobject.py @@ -3,7 +3,7 @@ from pypy.module.cpyext.api import (cpython_api, CANNOT_FAIL, Py_ssize_t, build_type_checkers) from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall -from pypy.module.cpyext.pyobject import Py_DecRef, PyObject, borrow_from +from pypy.module.cpyext.pyobject import Py_DecRef, PyObject, as_pyobj from pypy.objspace.std.listobject import W_ListObject from pypy.interpreter.error import OperationError @@ -49,8 +49,9 @@ if index < 0 or index >= w_list.length(): raise OperationError(space.w_IndexError, space.wrap( "list index out of range")) - w_item = w_list.getitem(index) - return borrow_from(w_list, w_item) + # force the object strategy: it ensures a borrowed result stays around + w_list.ensure_object_strategy() + return as_pyobj(space, w_list.getitem(index)) @cpython_api([PyObject, PyObject], rffi.INT_real, error=-1) @@ -77,14 +78,14 @@ @cpython_api([PyObject], Py_ssize_t, error=-1) -def PyList_Size(space, ref): +def PyList_Size(space, w_list): """Return the length of the list object in list; this is equivalent to len(list) on a list object. """ - if not PyList_Check(space, ref): + if not isinstance(w_list, W_ListObject): raise OperationError(space.w_TypeError, space.wrap("expected list object")) - return PyList_GET_SIZE(space, ref) + return w_list.length() @cpython_api([PyObject], PyObject) def PyList_AsTuple(space, w_list): diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -12,7 +12,7 @@ METH_STATIC, METH_VARARGS, PyObject, PyObjectFields, bootstrap_function, build_type_checkers, cpython_api, cpython_struct, generic_cpy_call) from pypy.module.cpyext.pyobject import ( - Py_DecRef, from_pyobj, make_ref, make_typedescr) + Py_DecRef, from_pyobj, make_typedescr) PyCFunction_typedef = rffi.COpaquePtr(typedef='PyCFunction') PyCFunction = lltype.Ptr(lltype.FuncType([PyObject, PyObject], PyObject)) diff --git a/pypy/module/cpyext/number.py b/pypy/module/cpyext/number.py --- a/pypy/module/cpyext/number.py +++ b/pypy/module/cpyext/number.py @@ -1,6 +1,6 @@ from pypy.interpreter.error import OperationError from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL, Py_ssize_t -from pypy.module.cpyext.pyobject import PyObject, PyObjectP, make_ref, Py_DecRef +from pypy.module.cpyext.pyobject import PyObject, PyObjectP, Py_DecRef from pypy.module.cpyext.pyobject import from_pyobj, get_pyobj_and_incref from rpython.rtyper.lltypesystem import rffi, lltype from rpython.tool.sourcetools import func_with_new_name diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py --- a/pypy/module/cpyext/pyerrors.py +++ b/pypy/module/cpyext/pyerrors.py @@ -6,7 +6,8 @@ from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL, CONST_STRING from pypy.module.exceptions.interp_exceptions import W_RuntimeWarning from pypy.module.cpyext.pyobject import ( - PyObject, PyObjectP, make_ref, from_ref, Py_DecRef, as_pyobj) + PyObject, PyObjectP, Py_DecRef, as_pyobj, get_pyobj_and_incref, + get_w_obj_and_decref, from_pyobj, get_pyobj_and_xincref) from pypy.module.cpyext.state import State from pypy.module.cpyext.import_ import PyImport_Import from rpython.rlib import rposix, jit @@ -56,9 +57,9 @@ state = space.fromcache(State) operror = state.clear_exception() if operror: - ptype[0] = make_ref(space, operror.w_type) - pvalue[0] = make_ref(space, operror.get_w_value(space)) - ptraceback[0] = make_ref(space, space.wrap(operror.get_traceback())) + ptype[0] = get_pyobj_and_incref(space, operror.w_type) + pvalue[0] = get_pyobj_and_incref(space, operror.get_w_value(space)) + ptraceback[0] = get_pyobj_and_xincref(space, operror.get_traceback()) else: ptype[0] = lltype.nullptr(PyObject.TO) pvalue[0] = lltype.nullptr(PyObject.TO) @@ -95,13 +96,11 @@ not an instance of the same class. This function can be used to instantiate the class in that case. If the values are already normalized, nothing happens. The delayed normalization is implemented to improve performance.""" - operr = OperationError(from_ref(space, exc_p[0]), - from_ref(space, val_p[0])) + operr = OperationError(get_w_obj_and_decref(space, exc_p[0]), + get_w_obj_and_decref(space, val_p[0])) operr.normalize_exception(space) - Py_DecRef(space, exc_p[0]) - Py_DecRef(space, val_p[0]) - exc_p[0] = make_ref(space, operr.w_type) - val_p[0] = make_ref(space, operr.get_w_value(space)) + exc_p[0] = get_pyobj_and_incref(space, operr.w_type) + val_p[0] = get_pyobj_and_incref(space, operr.get_w_value(space)) @cpython_api([], rffi.INT_real, error=0) def PyErr_BadArgument(space): @@ -303,10 +302,8 @@ PyErr_PrintEx(space, 1) @cpython_api([PyObject, PyObject, PyObject], lltype.Void) -def PyErr_Display(space, w_type, w_value, tb): - if tb: - w_tb = from_ref(space, tb) - else: +def PyErr_Display(space, w_type, w_value, w_tb): + if w_tb is None: w_tb = space.w_None try: space.call_function(space.sys.get("excepthook"), @@ -372,9 +369,9 @@ ec = space.getexecutioncontext() operror = ec.sys_exc_info() if operror: - ptype[0] = make_ref(space, operror.w_type) - pvalue[0] = make_ref(space, operror.get_w_value(space)) - ptraceback[0] = make_ref(space, space.wrap(operror.get_traceback())) + ptype[0] = get_pyobj_and_incref(space, operror.w_type) + pvalue[0] = get_pyobj_and_incref(space, operror.get_w_value(space)) + ptraceback[0] = get_pyobj_and_xincref(space, operror.get_traceback()) else: ptype[0] = lltype.nullptr(PyObject.TO) pvalue[0] = lltype.nullptr(PyObject.TO) diff --git a/pypy/module/cpyext/pyfile.py b/pypy/module/cpyext/pyfile.py --- a/pypy/module/cpyext/pyfile.py +++ b/pypy/module/cpyext/pyfile.py @@ -1,7 +1,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, build_type_checkers) -from pypy.module.cpyext.pyobject import PyObject, borrow_from +from pypy.module.cpyext.pyobject import PyObject, as_pyobj from pypy.module.cpyext.object import Py_PRINT_RAW from pypy.interpreter.error import OperationError from pypy.module._file.interp_file import W_File @@ -83,7 +83,8 @@ @cpython_api([PyObject], PyObject) def PyFile_Name(space, w_p): """Return the name of the file specified by p as a string object.""" - return borrow_from(w_p, space.getattr(w_p, space.wrap("name"))) + w_name = space.getattr(w_p, space.wrap("name")) + return as_pyobj(space, w_name) # borrowed result @cpython_api([PyObject, rffi.INT_real], rffi.INT_real, error=CANNOT_FAIL) def PyFile_SoftSpace(space, w_p, newflag): diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -461,35 +461,6 @@ # ---------- -def make_ref(space, w_obj): - ZZZ - -def from_ref(space, ref): - """ - Finds the interpreter object corresponding to the given reference. If the - object is not yet realized (see stringobject.py), creates it. - """ - assert lltype.typeOf(ref) == PyObject - ZZZ - if not ref: - return None - state = space.fromcache(RefcountState) - ptr = rffi.cast(ADDR, ref) - - try: - return state.py_objects_r2w[ptr] - except KeyError: - pass - - # This reference is not yet a real interpreter object. - # Realize it. - ref_type = rffi.cast(PyObject, ref.c_ob_type) - if ref_type == ref: # recursion! - raise InvalidPointerException(str(ref)) - w_type = from_ref(space, ref_type) - assert isinstance(w_type, W_TypeObject) - return get_typedescr(w_type.instancetypedef).realize(space, ref) - @cpython_api([PyObject], lltype.Void) def Py_IncRef(space, obj): @@ -518,51 +489,6 @@ #___________________________________________________________ -# Support for borrowed references - -def make_borrowed_ref(space, w_container, w_borrowed): - """ - Create a borrowed reference, which will live as long as the container - has a living reference (as a PyObject!) - """ - ZZZ - if w_borrowed is None: - return lltype.nullptr(PyObject.TO) - - state = space.fromcache(RefcountState) - return state.make_borrowed(w_container, w_borrowed) - -class Reference: - def __init__(self, pyobj): - ZZZ - assert not isinstance(pyobj, W_Root) - self.pyobj = pyobj - - def get_ref(self, space): - return self.pyobj - - def get_wrapped(self, space): - return from_ref(space, self.pyobj) - -class BorrowPair(Reference): - """ - Delays the creation of a borrowed reference. - """ - def __init__(self, w_container, w_borrowed): - ZZZ - self.w_container = w_container - self.w_borrowed = w_borrowed - - def get_ref(self, space): - return make_borrowed_ref(space, self.w_container, self.w_borrowed) - - def get_wrapped(self, space): - return self.w_borrowed - -def borrow_from(container, borrowed): - return BorrowPair(container, borrowed) - -#___________________________________________________________ @cpython_api([rffi.VOIDP], lltype.Signed, error=CANNOT_FAIL) def _Py_HashPointer(space, ptr): diff --git a/pypy/module/cpyext/pypyintf.py b/pypy/module/cpyext/pypyintf.py deleted file mode 100644 --- a/pypy/module/cpyext/pypyintf.py +++ /dev/null @@ -1,9 +0,0 @@ -from pypy.module.cpyext.api import cpython_api -from pypy.module.cpyext.pyobject import PyObject, borrow_from - - - at cpython_api([PyObject, PyObject], PyObject) -def PyPy_Borrow(space, w_parentobj, w_obj): - """Returns a borrowed reference to 'obj', borrowing from the 'parentobj'. - """ - return borrow_from(w_parentobj, w_obj) diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -1,6 +1,7 @@ from pypy.module.cpyext.api import ( cpython_api, generic_cpy_call, CANNOT_FAIL, CConfig, cpython_struct) -from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, make_ref, from_ref +from pypy.module.cpyext.pyobject import ( + PyObject, Py_DecRef, get_pyobj_and_incref) from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rthread @@ -112,7 +113,7 @@ capsule = ThreadStateCapsule(space) ts = capsule.memory ts.c_interp = self.interpreter_state - ts.c_dict = make_ref(space, space.newdict()) + ts.c_dict = get_pyobj_and_incref(space, space.newdict()) return capsule diff --git a/pypy/module/cpyext/pytraceback.py b/pypy/module/cpyext/pytraceback.py --- a/pypy/module/cpyext/pytraceback.py +++ b/pypy/module/cpyext/pytraceback.py @@ -3,7 +3,7 @@ PyObjectFields, generic_cpy_call, CONST_STRING, CANNOT_FAIL, Py_ssize_t, cpython_api, bootstrap_function, cpython_struct, build_type_checkers) from pypy.module.cpyext.pyobject import ( - PyObject, make_ref, from_ref, Py_DecRef, make_typedescr, borrow_from) + PyObject, Py_DecRef, make_typedescr) from pypy.module.cpyext.frameobject import PyFrameObject from rpython.rlib.unroll import unrolling_iterable from pypy.interpreter.error import OperationError @@ -36,6 +36,7 @@ w_next_traceback = None else: w_next_traceback = space.wrap(traceback.next) + ZZZ py_traceback.c_tb_next = rffi.cast(PyTracebackObject, make_ref(space, w_next_traceback)) py_traceback.c_tb_frame = rffi.cast(PyFrameObject, make_ref(space, space.wrap(traceback.frame))) rffi.setintfield(py_traceback, 'c_tb_lasti', traceback.lasti) diff --git a/pypy/module/cpyext/sequence.py b/pypy/module/cpyext/sequence.py --- a/pypy/module/cpyext/sequence.py +++ b/pypy/module/cpyext/sequence.py @@ -2,7 +2,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, CONST_STRING, Py_ssize_t) -from pypy.module.cpyext.pyobject import PyObject, borrow_from +from pypy.module.cpyext.pyobject import PyObject, as_pyobj from rpython.rtyper.lltypesystem import rffi, lltype from pypy.objspace.std import listobject, tupleobject @@ -56,11 +56,12 @@ PySequence_Fast(), o is not NULL, and that i is within bounds. """ if isinstance(w_obj, listobject.W_ListObject): + w_obj.ensure_object_strategy() w_res = w_obj.getitem(index) else: assert isinstance(w_obj, tupleobject.W_TupleObject) w_res = w_obj.wrappeditems[index] - return borrow_from(w_obj, w_res) + return as_pyobj(space, w_res) # borrowed @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) def PySequence_Fast_GET_SIZE(space, w_obj): diff --git a/pypy/module/cpyext/setobject.py b/pypy/module/cpyext/setobject.py --- a/pypy/module/cpyext/setobject.py +++ b/pypy/module/cpyext/setobject.py @@ -2,8 +2,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import (cpython_api, Py_ssize_t, CANNOT_FAIL, build_type_checkers) -from pypy.module.cpyext.pyobject import (PyObject, PyObjectP, Py_DecRef, - borrow_from, make_ref, from_ref) +from pypy.module.cpyext.pyobject import PyObject, PyObjectP, Py_DecRef from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.objspace.std.setobject import W_SetObject, newset diff --git a/pypy/module/cpyext/sliceobject.py b/pypy/module/cpyext/sliceobject.py --- a/pypy/module/cpyext/sliceobject.py +++ b/pypy/module/cpyext/sliceobject.py @@ -3,7 +3,7 @@ cpython_api, cpython_struct, bootstrap_function, build_type_checkers, CANNOT_FAIL, Py_ssize_t, Py_ssize_tP, PyObjectFields) from pypy.module.cpyext.pyobject import ( - Py_DecRef, PyObject, make_ref, make_typedescr) + Py_DecRef, PyObject, make_typedescr) from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.interpreter.error import OperationError from pypy.objspace.std.sliceobject import W_SliceObject @@ -30,6 +30,7 @@ Fills a newly allocated PySliceObject with the given slice object. The fields must not be modified. """ + ZZZ py_slice = rffi.cast(PySliceObject, py_obj) assert isinstance(w_obj, W_SliceObject) py_slice.c_start = make_ref(space, w_obj.w_start) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -11,7 +11,7 @@ ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc, readbufferproc) -from pypy.module.cpyext.pyobject import from_ref +from pypy.module.cpyext.pyobject import from_pyobj from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.state import State from pypy.interpreter.error import OperationError, oefmt @@ -311,8 +311,7 @@ @cpython_api([PyTypeObjectPtr, PyObject, PyObject], PyObject, external=False) def slot_tp_new(space, type, w_args, w_kwds): from pypy.module.cpyext.tupleobject import PyTuple_Check - pyo = rffi.cast(PyObject, type) - w_type = from_ref(space, pyo) + w_type = from_pyobj(space, type) w_func = space.getattr(w_type, space.wrap("__new__")) assert PyTuple_Check(space, w_args) args_w = [w_type] + space.fixedview(w_args) diff --git a/pypy/module/cpyext/src/getargs.c b/pypy/module/cpyext/src/getargs.c --- a/pypy/module/cpyext/src/getargs.c +++ b/pypy/module/cpyext/src/getargs.c @@ -442,7 +442,7 @@ strncpy(msgbuf, "is not retrievable", bufsize); return msgbuf; } - PyPy_Borrow(arg, item); + //PyPy_Borrow(arg, item); msg = convertitem(item, &format, p_va, flags, levels+1, msgbuf, bufsize, freelist); /* PySequence_GetItem calls tp->sq_item, which INCREFs */ diff --git a/pypy/module/cpyext/sysmodule.py b/pypy/module/cpyext/sysmodule.py --- a/pypy/module/cpyext/sysmodule.py +++ b/pypy/module/cpyext/sysmodule.py @@ -1,7 +1,7 @@ from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import CANNOT_FAIL, cpython_api, CONST_STRING -from pypy.module.cpyext.pyobject import PyObject, borrow_from +from pypy.module.cpyext.pyobject import PyObject, as_xpyobj @cpython_api([CONST_STRING], PyObject, error=CANNOT_FAIL) def PySys_GetObject(space, name): @@ -10,7 +10,7 @@ name = rffi.charp2str(name) w_dict = space.sys.getdict(space) w_obj = space.finditem_str(w_dict, name) - return borrow_from(None, w_obj) + return as_xpyobj(space, w_obj) # borrowed @cpython_api([CONST_STRING, PyObject], rffi.INT_real, error=-1) def PySys_SetObject(space, name, w_obj): diff --git a/pypy/module/cpyext/test/test_borrow.py b/pypy/module/cpyext/test/test_borrow.py --- a/pypy/module/cpyext/test/test_borrow.py +++ b/pypy/module/cpyext/test/test_borrow.py @@ -1,20 +1,8 @@ import py from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.module.cpyext.test.test_api import BaseApiTest -from pypy.module.cpyext.pyobject import make_ref, borrow_from, RefcountState -class TestBorrowing(BaseApiTest): - def test_borrowing(self, space, api): - w_int = space.wrap(1) - w_tuple = space.newtuple([w_int]) - api.Py_IncRef(w_tuple) - one_pyo = borrow_from(w_tuple, w_int).get_ref(space) - api.Py_DecRef(w_tuple) - state = space.fromcache(RefcountState) - state.print_refcounts() - py.test.raises(AssertionError, api.Py_DecRef, one_pyo) - class AppTestBorrow(AppTestCpythonExtensionBase): def test_tuple_borrowing(self): module = self.import_extension('foo', [ @@ -76,4 +64,5 @@ ]) wr = module.run() # check that the set() object was deallocated + self.debug_collect() assert wr() is None diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -353,6 +353,7 @@ interp2app(record_imported_module)) self.w_here = self.space.wrap( str(py.path.local(pypydir)) + '/module/cpyext/test/') + self.w_debug_collect = self.space.wrap(interp2app(debug_collect)) # create the file lock before we count allocations diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -20,9 +20,9 @@ PyDescr_NewWrapper, PyCFunction_NewEx, PyCFunction_typedef) from pypy.module.cpyext.modsupport import convert_method_defs from pypy.module.cpyext.pyobject import ( - PyObject, make_ref, create_ref, get_typedescr, from_pyobj, as_pyobj, + PyObject, create_ref, get_typedescr, from_pyobj, as_pyobj, setup_class_for_cpyext, get_pyobj_and_incref, get_pyobj_and_xincref, - track_reference, RefcountState, borrow_from, Py_DecRef, RRC_PERMANENT) + track_reference, RefcountState, Py_DecRef, RRC_PERMANENT) from pypy.module.cpyext.slotdefs import ( slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function) from pypy.module.cpyext.state import State @@ -376,6 +376,7 @@ def type_alloc(space, w_metatype): + ZZZ metatype = rffi.cast(PyTypeObjectPtr, make_ref(space, w_metatype)) # Don't increase refcount for non-heaptypes if metatype: @@ -428,7 +429,7 @@ if pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE: w_typename = space.getattr(w_type, space.wrap('__name__')) heaptype = rffi.cast(PyHeapTypeObject, pto) - heaptype.c_ht_name = make_ref(space, w_typename) + heaptype.c_ht_name = get_pyobj_and_incref(space, w_typename) from pypy.module.cpyext.stringobject import PyString_AsString pto.c_tp_name = PyString_AsString(space, heaptype.c_ht_name) else: @@ -616,7 +617,9 @@ return None name = space.str_w(w_name) w_obj = w_type.lookup(name) - return borrow_from(w_type, w_obj) + # return a borrowed ref. assumes lookup() returns already-referenced + # objs OR that the result will not be used for long + return as_pyobj(space, w_obj) @cpython_api([PyTypeObjectPtr], lltype.Void) def PyType_Modified(space, w_obj): diff --git a/pypy/module/cpyext/typeobjectdefs.py b/pypy/module/cpyext/typeobjectdefs.py --- a/pypy/module/cpyext/typeobjectdefs.py +++ b/pypy/module/cpyext/typeobjectdefs.py @@ -3,7 +3,7 @@ from pypy.module.cpyext.api import (cpython_struct, Py_ssize_t, Py_ssize_tP, PyVarObjectFields, PyTypeObject, PyTypeObjectPtr, FILEP, Py_TPFLAGS_READYING, Py_TPFLAGS_READY, Py_TPFLAGS_HEAPTYPE) -from pypy.module.cpyext.pyobject import PyObject, make_ref, from_ref +from pypy.module.cpyext.pyobject import PyObject from pypy.module.cpyext.modsupport import PyMethodDef diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -7,8 +7,8 @@ CONST_WSTRING) from pypy.module.cpyext.pyerrors import PyErr_BadArgument from pypy.module.cpyext.pyobject import ( - PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, - make_typedescr, get_typedescr) + PyObject, PyObjectP, Py_DecRef, track_reference, get_pyobj_and_incref, + make_typedescr, get_typedescr, from_pyobj) from pypy.module.cpyext.stringobject import PyString_Check from pypy.module.sys.interp_encoding import setdefaultencoding from pypy.module._codecs.interp_codecs import CodecState @@ -206,7 +206,7 @@ ref_unicode = rffi.cast(PyUnicodeObject, ref) if not ref_unicode.c_buffer: # Copy unicode buffer - w_unicode = from_ref(space, ref) + w_unicode = from_pyobj(space, ref) u = space.unicode_w(w_unicode) ref_unicode.c_buffer = rffi.unicode2wcharp(u) return ref_unicode.c_buffer @@ -216,19 +216,18 @@ """Return a read-only pointer to the Unicode object's internal Py_UNICODE buffer, NULL if unicode is not a Unicode object.""" # Don't use PyUnicode_Check, it will realize the object :-( - w_type = from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) - if not space.is_true(space.issubtype(w_type, space.w_unicode)): + if not PyUnicode_Check(space, ref): raise OperationError(space.w_TypeError, space.wrap("expected unicode object")) return PyUnicode_AS_UNICODE(space, ref) @cpython_api([PyObject], Py_ssize_t, error=-1) def PyUnicode_GetSize(space, ref): - if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_unicode: + if PyUnicode_Check(space, ref): ref = rffi.cast(PyUnicodeObject, ref) return ref.c_size else: - w_obj = from_ref(space, ref) + w_obj = from_pyobj(space, ref) return space.len_w(w_obj) @cpython_api([PyUnicodeObject, rffi.CWCHARP, Py_ssize_t], Py_ssize_t, error=-1) @@ -330,7 +329,7 @@ is NULL.""" if wchar_p: s = rffi.wcharpsize2unicode(wchar_p, length) - return make_ref(space, space.wrap(s)) + return get_pyobj_and_incref(space, space.wrap(s)) else: return rffi.cast(PyObject, new_empty_unicode(space, length)) @@ -424,7 +423,7 @@ NULL, the return value might be a shared object. Therefore, modification of the resulting Unicode object is only allowed when u is NULL.""" if s: - return make_ref(space, PyUnicode_DecodeUTF8( + return get_pyobj_and_incref(space, PyUnicode_DecodeUTF8( space, s, size, lltype.nullptr(rffi.CCHARP.TO))) else: return rffi.cast(PyObject, new_empty_unicode(space, size)) diff --git a/pypy/module/cpyext/weakrefobject.py b/pypy/module/cpyext/weakrefobject.py --- a/pypy/module/cpyext/weakrefobject.py +++ b/pypy/module/cpyext/weakrefobject.py @@ -1,5 +1,5 @@ from pypy.module.cpyext.api import cpython_api -from pypy.module.cpyext.pyobject import PyObject, borrow_from +from pypy.module.cpyext.pyobject import PyObject, as_xpyobj from pypy.module._weakref.interp__weakref import W_Weakref, proxy @cpython_api([PyObject, PyObject], PyObject) @@ -42,12 +42,12 @@ """Similar to PyWeakref_GetObject(), but implemented as a macro that does no error checking. """ - return borrow_from(w_ref, space.call_function(w_ref)) + return as_xpyobj(space.call_function(w_ref)) # borrowed @cpython_api([PyObject], PyObject) def PyWeakref_LockObject(space, w_ref): """Return the referenced object from a weak reference. If the referent is no longer live, returns None. This function returns a new reference. + (This is a PyPy extension!) """ return space.call_function(w_ref) - diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -216,6 +216,11 @@ items = [space.unwrap(w_item) for w_item in w_list.getitems()] return list(items) + def ensure_object_strategy(self): # for cpyext + object_strategy = self.space.fromcache(ObjectListStrategy) + if self.strategy is not object_strategy: + self.switch_to_object_strategy() + def switch_to_object_strategy(self): list_w = self.getitems() object_strategy = self.space.fromcache(ObjectListStrategy) From noreply at buildbot.pypy.org Sat Oct 24 10:22:26 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Oct 2015 16:22:26 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: improve Message-ID: <20151024142226.4F17B1C147C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80425:f35fb68c3c92 Date: 2015-10-24 16:22 +0200 http://bitbucket.org/pypy/pypy/changeset/f35fb68c3c92/ Log: improve diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py --- a/pypy/module/cpyext/test/test_cpyext.py +++ b/pypy/module/cpyext/test/test_cpyext.py @@ -669,8 +669,10 @@ return NULL; refcnt_after = true_obj->ob_refcnt; Py_DECREF(tup); - fprintf(stderr, "REFCNT2 %i %i\\n", refcnt, refcnt_after); - return PyBool_FromLong(refcnt_after == refcnt + 1); + fprintf(stderr, "REFCNT2 %i %i %i\\n", refcnt, refcnt_after, + true_obj->ob_refcnt); + return PyBool_FromLong(refcnt_after == refcnt + 1 && + refcnt == true_obj->ob_refcnt); } static PyMethodDef methods[] = { From noreply at buildbot.pypy.org Sat Oct 24 05:11:41 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Oct 2015 11:11:41 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: in-progress Message-ID: <20151024091141.40EB61C0661@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80418:ee75effb7359 Date: 2015-10-24 09:31 +0200 http://bitbucket.org/pypy/pypy/changeset/ee75effb7359/ Log: in-progress diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -1116,27 +1116,44 @@ eci = build_eci(False, export_symbols, code) - space.fromcache(State).install_dll(eci) + state = space.fromcache(State) + state.install_dll(eci) run_bootstrap_functions(space) setup_va_functions(eci) - # populate static data - ## for name, (typ, expr) in GLOBALS.iteritems(): - ## name = name.replace("#", "") - ## if name.startswith('PyExc_'): - ## name = '_' + name - ## from pypy.module import cpyext - ## w_obj = eval(expr) - ## if typ in ('PyObject*', 'PyTypeObject*'): - ## struct_ptr = get_pyobj_and_incref(space, w_obj) - ## elif typ == 'PyDateTime_CAPI*': - ## continue - ## else: - ## assert False, "Unknown static data: %s %s" % (typ, name) - ## struct = rffi.cast(get_structtype_for_ctype(typ), struct_ptr)._obj - ## struct._compilation_info = eci - ## export_struct(name, struct) + # emit uninitialized static data + static_objs_w = [] + static_pyobjs = [] + lines = [] + entries = [] + for name, (typ, expr) in sorted(GLOBALS.items()): + name = name.replace("#", "") + if name.startswith('PyExc_'): + name = '_' + name + if typ in ('PyObject*', 'PyTypeObject*'): + w_obj = eval(expr) + static_objs_w.append(w_obj) + lines.append('%s %s;\n' % (typ, name)) + if typ == 'PyObject*': + entries.append('\t%s,\n' % (name,)) + else: + entries.append('\t(PyObject *)%s,\n' % (name,)) + elif typ == 'PyDateTime_CAPI*': + continue + else: + assert False, "Unknown static data: %s %s" % (typ, name) + lines.append('\n') + lines.append('PyObject *pypy_static_pyobjs[] = {\n') + lines.extend(entries) + lines.append('};\n') + eci2 = ExternalCompilationInfo( + separate_module_sources = ''.join(lines), + post_include_bits = ['RPY_EXTERN PyObject *pypy_static_pyobjs[];\n'], + ) + state.static_objs_w = static_objs_w + state.static_pyobjs = rffi.CExternVariable( + PyObjectP, 'pypy_static_pyobjs', eci2, c_type='PyObject **') for name, func in FUNCTIONS.iteritems(): newname = mangle_name('PyPy', name) or name @@ -1147,6 +1164,17 @@ trunk_include = pypydir.dirpath() / 'include' copy_header_files(trunk_include) +def init_static_data_translated(space): + # populate static data + state = space.fromcache(State) + for i, w_obj in enumerate(state.static_objs_w): + py_obj = state.static_pyobjs[i] + setup_prebuilt_pyobj(w_obj, py_obj) + # step 2 + for w_obj in state.static_objs_w: + w_obj.cpyext_fill_prebuilt_pyobj(space) + + def _load_from_cffi(space, name, path, initptr): from pypy.module._cffi_backend import cffi1_module cffi1_module.load_cffi1_module(space, name, path, initptr) diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py --- a/pypy/module/cpyext/state.py +++ b/pypy/module/cpyext/state.py @@ -80,10 +80,12 @@ from pypy.module.cpyext.typeobject import setup_new_method_def from pypy.module.cpyext.api import INIT_FUNCTIONS + from pypy.module.cpyext.api import init_static_data_translated if we_are_translated(): rawrefcount.init(llhelper(rawrefcount.RAWREFCOUNT_DEALLOC_TRIGGER, self.dealloc_trigger)) + init_static_data_translated(space) setup_new_method_def(space) From noreply at buildbot.pypy.org Sat Oct 24 05:11:43 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Oct 2015 11:11:43 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Translates and crashes Message-ID: <20151024091143.52F381C103D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80419:a14224c473b0 Date: 2015-10-24 10:15 +0100 http://bitbucket.org/pypy/pypy/changeset/a14224c473b0/ Log: Translates and crashes diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -25,7 +25,7 @@ from pypy.objspace.std.sliceobject import W_SliceObject from pypy.module.__builtin__.descriptor import W_Property from pypy.module.__builtin__.interp_classobj import W_ClassObject -from pypy.module.micronumpy.base import W_NDimArray +#from pypy.module.micronumpy.base import W_NDimArray ZZZ from rpython.rlib.entrypoint import entrypoint_lowlevel from rpython.rlib.rposix import is_valid_fd, validate_fd from rpython.rlib.unroll import unrolling_iterable @@ -497,7 +497,7 @@ "PyComplex_Type": "space.w_complex", "PyByteArray_Type": "space.w_bytearray", "PyMemoryView_Type": "space.w_memoryview", - "PyArray_Type": "space.gettypeobject(W_NDimArray.typedef)", + #"PyArray_Type": "space.gettypeobject(W_NDimArray.typedef)", ZZZ "PyBaseObject_Type": "space.w_object", 'PyNone_Type': 'space.type(space.w_None)', 'PyNotImplemented_Type': 'space.type(space.w_NotImplemented)', @@ -871,7 +871,7 @@ # populate static data to_fill = [] for name, (typ, expr) in GLOBALS.iteritems(): - from pypy.module import cpyext + from pypy.module import cpyext # for the eval() below w_obj = eval(expr) if name.endswith('#'): name = name[:-1] @@ -1049,7 +1049,7 @@ if name.endswith('#'): structs.append('%s %s;' % (typ[:-1], name[:-1])) elif name.startswith('PyExc_'): - structs.append('extern PyTypeObject _%s;' % (name,)) + structs.append('PyTypeObject _%s;' % (name,)) structs.append('PyObject* %s = (PyObject*)&_%s;' % (name, name)) elif typ == 'PyDateTime_CAPI*': structs.append('%s %s = NULL;' % (typ, name)) @@ -1125,35 +1125,35 @@ # emit uninitialized static data static_objs_w = [] static_pyobjs = [] - lines = [] - entries = [] + lines = ['PyObject *pypy_static_pyobjs[] = {\n'] + include_lines = ['RPY_EXTERN PyObject *pypy_static_pyobjs[];\n'] for name, (typ, expr) in sorted(GLOBALS.items()): - name = name.replace("#", "") - if name.startswith('PyExc_'): + if name.endswith('#'): + assert typ in ('PyObject*', 'PyTypeObject*') + typ, name = typ[:-1], name[:-1] + elif name.startswith('PyExc_'): + typ = 'PyTypeObject' name = '_' + name - if typ in ('PyObject*', 'PyTypeObject*'): - w_obj = eval(expr) - static_objs_w.append(w_obj) - lines.append('%s %s;\n' % (typ, name)) - if typ == 'PyObject*': - entries.append('\t%s,\n' % (name,)) - else: - entries.append('\t(PyObject *)%s,\n' % (name,)) elif typ == 'PyDateTime_CAPI*': continue else: assert False, "Unknown static data: %s %s" % (typ, name) - lines.append('\n') - lines.append('PyObject *pypy_static_pyobjs[] = {\n') - lines.extend(entries) + + from pypy.module import cpyext # for the eval() below + w_obj = eval(expr) + static_objs_w.append(w_obj) + lines.append('\t(PyObject *)&%s,\n' % (name,)) + include_lines.append('RPY_EXPORTED %s %s;\n' % (typ, name)) + lines.append('};\n') - eci2 = ExternalCompilationInfo( - separate_module_sources = ''.join(lines), - post_include_bits = ['RPY_EXTERN PyObject *pypy_static_pyobjs[];\n'], - ) + eci2 = CConfig._compilation_info_.merge(ExternalCompilationInfo( + separate_module_sources = [''.join(lines)], + post_include_bits = [''.join(include_lines)], + )) state.static_objs_w = static_objs_w - state.static_pyobjs = rffi.CExternVariable( - PyObjectP, 'pypy_static_pyobjs', eci2, c_type='PyObject **') + state.get_static_pyobjs = rffi.CExternVariable( + PyObjectP, 'pypy_static_pyobjs', eci2, c_type='PyObject **', + getter_only=True, declare_as_extern=False) for name, func in FUNCTIONS.iteritems(): newname = mangle_name('PyPy', name) or name @@ -1165,10 +1165,12 @@ copy_header_files(trunk_include) def init_static_data_translated(space): + from pypy.module.cpyext.pyobject import setup_prebuilt_pyobj # populate static data state = space.fromcache(State) + static_pyobjs = state.get_static_pyobjs() for i, w_obj in enumerate(state.static_objs_w): - py_obj = state.static_pyobjs[i] + py_obj = static_pyobjs[i] setup_prebuilt_pyobj(w_obj, py_obj) # step 2 for w_obj in state.static_objs_w: diff --git a/rpython/rlib/exports.py b/rpython/rlib/exports.py --- a/rpython/rlib/exports.py +++ b/rpython/rlib/exports.py @@ -1,5 +1,7 @@ from rpython.rtyper.lltypesystem.lltype import typeOf, ContainerType +# XXX kill me + def export_struct(name, struct): assert name not in EXPORTS_names, "Duplicate export " + name assert isinstance(typeOf(struct), ContainerType) diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -643,7 +643,8 @@ def CExternVariable(TYPE, name, eci, _CConstantClass=CConstant, sandboxsafe=False, _nowrapper=False, - c_type=None, getter_only=False): + c_type=None, getter_only=False, + declare_as_extern=(sys.platform != 'win32')): """Return a pair of functions - a getter and a setter - to access the given global C variable. """ @@ -673,7 +674,7 @@ c_setter = "void %(setter_name)s (%(c_type)s v) { %(name)s = v; }" % locals() lines = ["#include <%s>" % i for i in eci.includes] - if sys.platform != 'win32': + if declare_as_extern: lines.append('extern %s %s;' % (c_type, name)) lines.append(c_getter) if not getter_only: From noreply at buildbot.pypy.org Sat Oct 24 07:35:50 2015 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 24 Oct 2015 13:35:50 +0200 (CEST) Subject: [pypy-commit] pypy cpyext-gc-support: Remove "except OperationError: raise" from the final RPython code Message-ID: <20151024113551.1098E1C1277@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80421:7bd9cece7986 Date: 2015-10-24 13:36 +0200 http://bitbucket.org/pypy/pypy/changeset/7bd9cece7986/ Log: Remove "except OperationError: raise" from the final RPython code diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -330,23 +330,27 @@ # arg is not declared as PyObject, no magic arg = input_arg newargs += (arg, ) - try: + if not catch_exception: try: res = func(space, *newargs) finally: keepalive_until_here(*keepalives) - except OperationError, e: - if not catch_exception: - raise - if not hasattr(api_function, "error_value"): - raise - state = space.fromcache(State) - state.set_exception(e) - if is_PyObject(restype): - return None - else: - return api_function.error_value - if not we_are_translated(): + else: + # non-rpython variant + assert not we_are_translated() + try: + res = func(space, *newargs) + except OperationError, e: + if not hasattr(api_function, "error_value"): + raise + state = space.fromcache(State) + state.set_exception(e) + if is_PyObject(restype): + return None + else: + return api_function.error_value + finally: + keepalive_until_here(*keepalives) got_integer = isinstance(res, (int, long, float)) assert got_integer == expect_integer,'got %r not integer' % res return res From noreply at buildbot.pypy.org Fri Oct 23 13:58:07 2015 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 23 Oct 2015 19:58:07 +0200 (CEST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <20151023175807.210D91C1F64@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r643:b1e98c42b6dc Date: 2015-10-23 19:59 +0200 http://bitbucket.org/pypy/pypy.org/changeset/b1e98c42b6dc/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $60704 of $105000 (57.8%) + $60723 of $105000 (57.8%)
    @@ -23,7 +23,7 @@
  • diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,7 +9,7 @@ @@ -17,7 +17,7 @@ 2nd call: - $29527 of $80000 (36.9%) + $29577 of $80000 (37.0%)
    @@ -25,7 +25,7 @@
  • From noreply at buildbot.pypy.org Fri Oct 23 13:06:41 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 23 Oct 2015 19:06:41 +0200 (CEST) Subject: [pypy-commit] pypy lazy-fast2locals: a test that checks that w_locals is not there Message-ID: <20151023170641.532B31C1F14@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: lazy-fast2locals Changeset: r80413:561181018410 Date: 2015-10-23 19:05 +0200 http://bitbucket.org/pypy/pypy/changeset/561181018410/ Log: a test that checks that w_locals is not there diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -1,10 +1,14 @@ from rpython.tool import udir from pypy.conftest import option +from pypy.interpreter.gateway import interp2app +def check_no_w_locals(space, w_frame): + return space.wrap(w_frame.getorcreatedebug().w_locals is None) class AppTestPyFrame: def setup_class(cls): + space = cls.space cls.w_udir = cls.space.wrap(str(udir.udir)) cls.w_tempfile1 = cls.space.wrap(str(udir.udir.join('tempfile1'))) if not option.runappdirect: @@ -17,6 +21,8 @@ w_call_further.code.hidden_applevel = True # hack cls.w_call_further = w_call_further + cls.w_check_no_w_locals = space.wrap(interp2app(check_no_w_locals)) + # test for the presence of the attributes, not functionality def test_f_locals(self): @@ -493,6 +499,25 @@ sys.settrace(None) assert res == 42 + def test_fast2locals_called_lazily(self): + import sys + class FrameHolder: + pass + fh = FrameHolder() + def trace(frame, what, arg): + # trivial trace function, does not access f_locals + fh.frame = frame + return trace + def f(x): + x += 1 + return x + sys.settrace(trace) + res = f(1) + sys.settrace(None) + assert res == 2 + if hasattr(self, "check_no_w_locals"): # not appdirect + assert self.check_no_w_locals(fh.frame) + def test_set_unset_f_trace(self): import sys seen = [] From noreply at buildbot.pypy.org Mon Oct 26 03:47:04 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Oct 2015 08:47:04 +0100 (CET) Subject: [pypy-commit] pypy cpyext-gc-support: move file Message-ID: <20151026074704.688CB1C06AD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80451:ce4cf80cee85 Date: 2015-10-26 08:26 +0100 http://bitbucket.org/pypy/pypy/changeset/ce4cf80cee85/ Log: move file diff --git a/pypy/module/cpyext/Doc_stubgen_enable.patch b/pypy/module/cpyext/patches/Doc_stubgen_enable.patch rename from pypy/module/cpyext/Doc_stubgen_enable.patch rename to pypy/module/cpyext/patches/Doc_stubgen_enable.patch From noreply at buildbot.pypy.org Mon Oct 26 03:47:06 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Oct 2015 08:47:06 +0100 (CET) Subject: [pypy-commit] pypy cpyext-gc-support: Took me a while to figure out that the behavior we get now is consistent Message-ID: <20151026074706.749621C06AD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80452:c120d2c5b7dd Date: 2015-10-26 08:47 +0100 http://bitbucket.org/pypy/pypy/changeset/c120d2c5b7dd/ Log: Took me a while to figure out that the behavior we get now is consistent with CPython's own (but not with "default") diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c --- a/pypy/module/cpyext/test/foo.c +++ b/pypy/module/cpyext/test/foo.c @@ -150,6 +150,7 @@ if (v == -1 && PyErr_Occurred()) return -1; self->foo = v; + return 0; } return PyObject_GenericSetAttr((PyObject *)self, name, value); } From noreply at buildbot.pypy.org Mon Oct 26 04:09:07 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Oct 2015 09:09:07 +0100 (CET) Subject: [pypy-commit] pypy cpyext-gc-support: Don't call create_all_slots() on cpyext type objects. Fix for Message-ID: <20151026080907.35CDE1C0403@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80453:8fd6a686565e Date: 2015-10-26 09:09 +0100 http://bitbucket.org/pypy/pypy/changeset/8fd6a686565e/ Log: Don't call create_all_slots() on cpyext type objects. Fix for test_typeobject.py:test_sre diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -527,7 +527,8 @@ convert_getset_defs(space, dict_w, pto.c_tp_getset, w_type) convert_member_defs(space, dict_w, pto.c_tp_members, w_type) - W_TypeObject.__init__(w_type, space, name, bases_w, dict_w) + W_TypeObject.__init__(w_type, space, name, bases_w, dict_w, + from_cpyext=True) if not space.is_true(space.issubtype(w_type, space.w_type)): # ZZZ? w_type.flag_cpytype = True diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -126,7 +126,7 @@ @dont_look_inside def __init__(w_self, space, name, bases_w, dict_w, - overridetypedef=None): + overridetypedef=None, from_cpyext=False): w_self.space = space w_self.name = name w_self.bases_w = bases_w @@ -146,7 +146,7 @@ if overridetypedef is not None: setup_builtin_type(w_self) else: - setup_user_defined_type(w_self) + setup_user_defined_type(w_self, from_cpyext=from_cpyext) w_self.w_same_layout_as = get_parent_layout(w_self) if space.config.objspace.std.withtypeversion: @@ -1096,7 +1096,7 @@ return False return True -def setup_user_defined_type(w_self): +def setup_user_defined_type(w_self, from_cpyext=False): if len(w_self.bases_w) == 0: w_self.bases_w = [w_self.space.w_object] w_bestbase = check_and_find_best_base(w_self.space, w_self.bases_w) @@ -1108,8 +1108,9 @@ w_self.flag_cpytype |= w_base.flag_cpytype w_self.flag_abstract |= w_base.flag_abstract - hasoldstylebase = copy_flags_from_bases(w_self, w_bestbase) - create_all_slots(w_self, hasoldstylebase, w_bestbase) + if not from_cpyext: + hasoldstylebase = copy_flags_from_bases(w_self, w_bestbase) + create_all_slots(w_self, hasoldstylebase, w_bestbase) ensure_common_attributes(w_self) From noreply at buildbot.pypy.org Mon Oct 26 04:25:10 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Oct 2015 09:25:10 +0100 (CET) Subject: [pypy-commit] pypy cpyext-gc-support: fix Message-ID: <20151026082510.B49951C0403@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80454:f8e26f5b1f8b Date: 2015-10-26 09:24 +0100 http://bitbucket.org/pypy/pypy/changeset/f8e26f5b1f8b/ Log: fix diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -118,6 +118,7 @@ class W_PyCMethodObject(W_PyCFunctionObject): w_self = None + w_module = None def __init__(self, space, ml, w_type): self.space = space self.ml = ml @@ -137,6 +138,7 @@ class W_PyCClassMethodObject(W_PyCFunctionObject): w_self = None + w_module = None def __init__(self, space, ml, w_type): self.space = space self.ml = ml From noreply at buildbot.pypy.org Mon Oct 26 04:25:12 2015 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 26 Oct 2015 09:25:12 +0100 (CET) Subject: [pypy-commit] pypy cpyext-gc-support: more fix Message-ID: <20151026082512.D9D6B1C0403@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: cpyext-gc-support Changeset: r80455:85a201e4f4a4 Date: 2015-10-26 09:24 +0100 http://bitbucket.org/pypy/pypy/changeset/85a201e4f4a4/ Log: more fix diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -282,8 +282,8 @@ alloc_pyobj=type_alloc_pyobj, fill_pyobj=type_fill_pyobj, alloc_pypy=type_alloc_pypy, - fill_pypy=type_fill_pypy) - #dealloc=type_dealloc) + fill_pypy=type_fill_pypy, + dealloc=type_dealloc) @cpython_api([PyObject], lltype.Void, external=False) @@ -358,21 +358,18 @@ c_buf.c_bf_getreadbuffer = buf_getreadbuffer.api_func.get_llhelper(space) pto.c_tp_as_buffer = c_buf - at cpython_api([PyObject], lltype.Void, external=False) -def type_dealloc(space, obj): +def type_dealloc(space, obj_pto): from pypy.module.cpyext.object import PyObject_dealloc - obj_pto = rffi.cast(PyTypeObjectPtr, obj) base_pyo = rffi.cast(PyObject, obj_pto.c_tp_base) Py_DecRef(space, obj_pto.c_tp_bases) Py_DecRef(space, obj_pto.c_tp_mro) Py_DecRef(space, obj_pto.c_tp_cache) # let's do it like cpython Py_DecRef(space, obj_pto.c_tp_dict) if obj_pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE: - heaptype = rffi.cast(PyHeapTypeObject, obj) + heaptype = rffi.cast(PyHeapTypeObject, obj_pto) Py_DecRef(space, heaptype.c_ht_name) Py_DecRef(space, base_pyo) - PyObject_dealloc(space, obj) - + PyObject_dealloc(space, rffi.cast(PyObject, obj_pto)) def type_alloc(space, w_metatype): ZZZ @@ -397,9 +394,15 @@ return rffi.cast(PyObject, heaptype) def type_alloc_pyobj(space, w_type): - pto = lltype.malloc(PyTypeObject, flavor='raw', zero=True, + heaptype = lltype.malloc(PyHeapTypeObjectStruct, flavor='raw', zero=True, track_allocation=False) - pto.c_tp_flags |= Py_TPFLAGS_READYING + pto = heaptype.c_ht_type + pto.c_tp_flags |= Py_TPFLAGS_HEAPTYPE | Py_TPFLAGS_READYING + pto.c_tp_as_number = heaptype.c_as_number + pto.c_tp_as_sequence = heaptype.c_as_sequence + pto.c_tp_as_mapping = heaptype.c_as_mapping + pto.c_tp_as_buffer = heaptype.c_as_buffer + return pto, RRC_PERMANENT def type_fill_pyobj(space, w_type, pto): From noreply at buildbot.pypy.org Mon Oct 26 04:49:43 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Mon, 26 Oct 2015 09:49:43 +0100 (CET) Subject: [pypy-commit] pypy s390x-backend: copy copy copy. adding the skeleton structure for the assembler, regalloc and various other modules needed for assembly Message-ID: <20151026084943.0C6D11C0726@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80456:2fc81cd2ea51 Date: 2015-10-26 09:49 +0100 http://bitbucket.org/pypy/pypy/changeset/2fc81cd2ea51/ Log: copy copy copy. adding the skeleton structure for the assembler, regalloc and various other modules needed for assembly diff --git a/rpython/doc/index.rst b/rpython/doc/index.rst --- a/rpython/doc/index.rst +++ b/rpython/doc/index.rst @@ -37,6 +37,7 @@ arm logging + s390x Writing your own interpreter in RPython diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -146,7 +146,7 @@ MODEL_X86_64_SSE4: ['floats', 'singlefloats'], MODEL_ARM: ['floats', 'singlefloats', 'longlong'], MODEL_PPC_64: [], # we don't even have PPC directory, so no - MODEL_S390_64: [], + MODEL_S390_64: ['floats', 'longlong'], }[backend_name] if __name__ == '__main__': diff --git a/rpython/jit/backend/ppc/ppc_assembler.py b/rpython/jit/backend/ppc/ppc_assembler.py --- a/rpython/jit/backend/ppc/ppc_assembler.py +++ b/rpython/jit/backend/ppc/ppc_assembler.py @@ -820,7 +820,7 @@ frame_depth = regalloc.get_final_frame_depth() jump_target_descr = regalloc.jump_target_descr if jump_target_descr is not None: - tgt_depth = jump_target_descr._ppc_clt.frame_info.jfi_frame_depth + tgt_depth = jump_target_descr._zarch_clt.frame_info.jfi_frame_depth target_frame_depth = tgt_depth - JITFRAME_FIXED_SIZE frame_depth = max(frame_depth, target_frame_depth) return frame_depth diff --git a/rpython/jit/backend/zarch/arch.py b/rpython/jit/backend/zarch/arch.py --- a/rpython/jit/backend/zarch/arch.py +++ b/rpython/jit/backend/zarch/arch.py @@ -1,5 +1,4 @@ -# TODO -WORD = 8 +WORD = 4 JITFRAME_FIXED_SIZE = 48 diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -1,10 +1,17 @@ from rpython.jit.backend.llsupport.assembler import GuardToken, BaseAssembler from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper -from rpython.jit.backend.zarch import registers as reg +from rpython.jit.backend.llsupport import jitframe, rewrite +from rpython.jit.backend.model import CompiledLoopToken +from rpython.jit.backend.zarch import conditions as c +from rpython.jit.backend.zarch import registers as r from rpython.jit.backend.zarch import locations as loc from rpython.jit.backend.zarch.codebuilder import InstrBuilder +from rpython.jit.backend.zarch.arch import WORD +from rpython.jit.backend.zarch.regalloc import Regalloc from rpython.jit.metainterp.resoperation import rop from rpython.rlib.objectmodel import we_are_translated, specialize, compute_unique_id +from rpython.rlib import rgc +from rpython.rtyper.lltypesystem import lltype, rffi, llmemory class AssemblerZARCH(BaseAssembler): @@ -51,12 +58,12 @@ def gen_func_prolog(self): STACK_FRAME_SIZE = 40 - self.mc.STMG(reg.r11, reg.r15, loc.addr(-STACK_FRAME_SIZE, reg.sp)) - self.mc.AHI(reg.sp, loc.imm(-STACK_FRAME_SIZE)) + self.mc.STMG(r.r11, r.r15, loc.addr(-STACK_FRAME_SIZE, r.sp)) + self.mc.AHI(r.sp, loc.imm(-STACK_FRAME_SIZE)) def gen_func_epilog(self): - self.mc.LMG(reg.r11, reg.r15, loc.addr(0, reg.sp)) - self.jmpto(reg.r14) + self.mc.LMG(r.r11, r.r15, loc.addr(0, r.SPP)) + self.jmpto(r.r14) def jmpto(self, register): # TODO, manual says this is a performance killer, there @@ -92,6 +99,85 @@ def _build_stack_check_slowpath(self): pass # TODO + + def _call_header_with_stack_check(self): + pass # TODO + + @rgc.no_release_gil + def assemble_loop(self, jd_id, unique_id, logger, loopname, inputargs, + operations, looptoken, log): + clt = CompiledLoopToken(self.cpu, looptoken.number) + looptoken.compiled_loop_token = clt + clt._debug_nbargs = len(inputargs) + if not we_are_translated(): + # Arguments should be unique + assert len(set(inputargs)) == len(inputargs) + + self.setup(looptoken) + frame_info = self.datablockwrapper.malloc_aligned( + jitframe.JITFRAMEINFO_SIZE, alignment=WORD) + clt.frame_info = rffi.cast(jitframe.JITFRAMEINFOPTR, frame_info) + clt.allgcrefs = [] + clt.frame_info.clear() # for now + + if log: + operations = self._inject_debugging_code(looptoken, operations, + 'e', looptoken.number) + + regalloc = Regalloc(assembler=self) + # + self._call_header_with_stack_check() + operations = regalloc.prepare_loop(inputargs, operations, + looptoken, clt.allgcrefs) + looppos = self.mc.get_relative_pos() + frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, + operations) + self.update_frame_depth(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) + # + size_excluding_failure_stuff = self.mc.get_relative_pos() + self.write_pending_failure_recoveries() + full_size = self.mc.get_relative_pos() + # + self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) + rawstart = self.materialize_loop(looptoken) + # + looptoken._ll_loop_code = looppos + rawstart + debug_start("jit-backend-addr") + debug_print("Loop %d (%s) has address 0x%x to 0x%x (bootstrap 0x%x)" % ( + looptoken.number, loopname, + r_uint(rawstart + looppos), + r_uint(rawstart + size_excluding_failure_stuff), + r_uint(rawstart))) + debug_stop("jit-backend-addr") + self.patch_pending_failure_recoveries(rawstart) + # + ops_offset = self.mc.ops_offset + if not we_are_translated(): + # used only by looptoken.dump() -- useful in tests + looptoken._ppc_rawstart = rawstart + looptoken._ppc_fullsize = full_size + looptoken._ppc_ops_offset = ops_offset + looptoken._ll_function_addr = rawstart + if logger: + logger.log_loop(inputargs, operations, 0, "rewritten", + name=loopname, ops_offset=ops_offset) + + def _assemble(self, regalloc, inputargs, operations): + self._regalloc = regalloc + self.guard_success_cc = c.cond_none + regalloc.compute_hint_frame_locations(operations) + regalloc.walk_operations(inputargs, operations) + assert self.guard_success_cc == c.cond_none + if 1: # we_are_translated() or self.cpu.dont_keepalive_stuff: + self._regalloc = None # else keep it around for debugging + frame_depth = regalloc.get_final_frame_depth() + jump_target_descr = regalloc.jump_target_descr + if jump_target_descr is not None: + tgt_depth = jump_target_descr._ppc_clt.frame_info.jfi_frame_depth + target_frame_depth = tgt_depth - JITFRAME_FIXED_SIZE + frame_depth = max(frame_depth, target_frame_depth) + return frame_depth + # ________________________________________ # ASSEMBLER EMISSION diff --git a/rpython/jit/backend/zarch/conditions.py b/rpython/jit/backend/zarch/conditions.py --- a/rpython/jit/backend/zarch/conditions.py +++ b/rpython/jit/backend/zarch/conditions.py @@ -7,3 +7,5 @@ LE = loc.imm(EQ.value | LT.value) GE = loc.imm(EQ.value | GT.value) OVERFLOW = loc.imm(0x1) + +cond_none = loc.imm(0x0) diff --git a/rpython/jit/backend/zarch/registers.py b/rpython/jit/backend/zarch/registers.py --- a/rpython/jit/backend/zarch/registers.py +++ b/rpython/jit/backend/zarch/registers.py @@ -9,8 +9,13 @@ [r0,r1,r2,r3,r4,r5,r6,r7,r8, r9,r10,r11,r12,r13,r14,r15] = registers -sp = r15 -raddr = r14 +MANAGED_REGS = [r0,r1,r2,r3,r4] +VOLATILES = [r0,r1,r2,r3,r4] +SPP = r15 +RETURN = r14 [f0,f1,f2,f3,f4,f5,f6,f7,f8, f9,f10,f11,f12,f13,f14,f15] = fpregisters + +MANAGED_FP_REGS = fpregisters +VOLATILES_FLOAT = [] diff --git a/rpython/jit/backend/zarch/runner.py b/rpython/jit/backend/zarch/runner.py --- a/rpython/jit/backend/zarch/runner.py +++ b/rpython/jit/backend/zarch/runner.py @@ -1,5 +1,7 @@ from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU +from rpython.jit.backend.zarch.assembler import AssemblerZARCH from rpython.rtyper.lltypesystem import lltype, llmemory +from rpython.rlib import rgc class AbstractZARCHCPU(AbstractLLCPU): def __init__(self, rtyper, stats, opts=None, translate_support_code=False, @@ -14,4 +16,13 @@ cast_ptr_to_int = staticmethod(cast_ptr_to_int) class CPU_S390_64(AbstractZARCHCPU): - pass + def setup(self): + self.assembler = AssemblerZARCH(self) + + @rgc.no_release_gil + def setup_once(self): + self.assembler.setup_once() + + @rgc.no_release_gil + def finish_once(self): + self.assembler.finish_once() From noreply at buildbot.pypy.org Mon Oct 26 04:57:54 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Mon, 26 Oct 2015 09:57:54 +0100 (CET) Subject: [pypy-commit] pypy s390x-backend: added docu for the backend. described the missing libffi-devel on redhat linux 6.5 and how to install it manually Message-ID: <20151026085754.1C3041C1046@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80457:9a86a04617d8 Date: 2015-10-26 09:56 +0100 http://bitbucket.org/pypy/pypy/changeset/9a86a04617d8/ Log: added docu for the backend. described the missing libffi-devel on redhat linux 6.5 and how to install it manually diff --git a/rpython/doc/s390x.rst b/rpython/doc/s390x.rst new file mode 100644 --- /dev/null +++ b/rpython/doc/s390x.rst @@ -0,0 +1,16 @@ +.. _s390x: + +Translation on the IBM Mainframe +================================ + +Redhat Linux (rel65) +-------------------- + +Unfortunatley there is no ffi development package (yet?), thus +one needs to install this manually. +libffi is not installed on the rl65. +This can be resolved by installing it locally (./configure && make install) and +adjusting th PKG_CONFIG_PATH to point to the install location. +In addition the LD_LIBRARY_PATH must be set to the install location the libffi.so +can be found. + From noreply at buildbot.pypy.org Mon Oct 26 04:57:56 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Mon, 26 Oct 2015 09:57:56 +0100 (CET) Subject: [pypy-commit] pypy s390x-backend: more skeleton structure Message-ID: <20151026085756.207D81C1046@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80458:b0d5eccd58d1 Date: 2015-10-26 09:56 +0100 http://bitbucket.org/pypy/pypy/changeset/b0d5eccd58d1/ Log: more skeleton structure diff --git a/rpython/jit/backend/zarch/helper/__init__.py b/rpython/jit/backend/zarch/helper/__init__.py new file mode 100644 diff --git a/rpython/jit/backend/zarch/helper/assembler.py b/rpython/jit/backend/zarch/helper/assembler.py new file mode 100644 diff --git a/rpython/jit/backend/zarch/helper/regalloc.py b/rpython/jit/backend/zarch/helper/regalloc.py new file mode 100644 diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/regalloc.py @@ -0,0 +1,468 @@ +from rpython.jit.backend.llsupport.regalloc import (RegisterManager, FrameManager, + TempVar, compute_vars_longevity, + BaseRegalloc) +from rpython.jit.backend.zarch.arch import WORD +from rpython.jit.codewriter import longlong +from rpython.jit.backend.zarch.locations import imm, get_fp_offset +from rpython.jit.metainterp.history import (Const, ConstInt, ConstFloat, ConstPtr, + INT, REF, FLOAT, VOID) +from rpython.jit.metainterp.history import JitCellToken, TargetToken +from rpython.jit.metainterp.resoperation import rop +from rpython.jit.backend.zarch import locations +from rpython.rtyper.lltypesystem import rffi, lltype, rstr, llmemory +from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rtyper.annlowlevel import cast_instance_to_gcref +from rpython.jit.backend.llsupport import symbolic +from rpython.jit.backend.llsupport.descr import ArrayDescr +import rpython.jit.backend.zarch.registers as r +import rpython.jit.backend.zarch.conditions as c +from rpython.jit.backend.llsupport.descr import unpack_arraydescr +from rpython.jit.backend.llsupport.descr import unpack_fielddescr +from rpython.jit.backend.llsupport.descr import unpack_interiorfielddescr +from rpython.jit.backend.llsupport.gcmap import allocate_gcmap +from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.debug import debug_print +from rpython.jit.codewriter.effectinfo import EffectInfo +from rpython.rlib import rgc +from rpython.rlib.rarithmetic import r_uint + +LIMIT_LOOP_BREAK = 15000 # should be much smaller than 32 KB + + +class TempInt(TempVar): + type = INT + + def __repr__(self): + return "" % (id(self),) + +class TempPtr(TempVar): + type = REF + + def __repr__(self): + return "" % (id(self),) + +class TempFloat(TempVar): + type = FLOAT + + def __repr__(self): + return "" % (id(self),) + + +class FPRegisterManager(RegisterManager): + all_regs = r.MANAGED_FP_REGS + box_types = [FLOAT] + save_around_call_regs = r.VOLATILES_FLOAT + assert set(save_around_call_regs).issubset(all_regs) + + def convert_to_adr(self, c): + assert isinstance(c, ConstFloat) + adr = self.assembler.datablockwrapper.malloc_aligned(8, 8) + x = c.getfloatstorage() + rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), adr)[0] = x + return adr + + def convert_to_imm(self, c): + adr = self.convert_to_adr(c) + return locations.ConstFloatLoc(adr) + + def __init__(self, longevity, frame_manager=None, assembler=None): + RegisterManager.__init__(self, longevity, frame_manager, assembler) + + def call_result_location(self, v): + return r.f1 + + def ensure_reg(self, box): + if isinstance(box, Const): + loc = self.get_scratch_reg() + immadrvalue = self.convert_to_adr(box) + mc = self.assembler.mc + mc.load_imm(r.SCRATCH, immadrvalue) + mc.lfdx(loc.value, 0, r.SCRATCH.value) + else: + assert box in self.temp_boxes + loc = self.make_sure_var_in_reg(box, + forbidden_vars=self.temp_boxes) + return loc + + def get_scratch_reg(self): + box = TempFloat() + reg = self.force_allocate_reg(box, forbidden_vars=self.temp_boxes) + self.temp_boxes.append(box) + return reg + + +class ZARCHRegisterManager(RegisterManager): + all_regs = r.MANAGED_REGS + box_types = None # or a list of acceptable types + no_lower_byte_regs = all_regs + save_around_call_regs = r.VOLATILES + frame_reg = r.SPP + assert set(save_around_call_regs).issubset(all_regs) + + def __init__(self, longevity, frame_manager=None, assembler=None): + RegisterManager.__init__(self, longevity, frame_manager, assembler) + + def call_result_location(self, v): + return r.r2 + + def convert_to_int(self, c): + if isinstance(c, ConstInt): + return rffi.cast(lltype.Signed, c.value) + else: + assert isinstance(c, ConstPtr) + return rffi.cast(lltype.Signed, c.value) + + def convert_to_imm(self, c): + val = self.convert_to_int(c) + return locations.ImmLocation(val) + + def ensure_reg(self, box): + if isinstance(box, Const): + loc = self.get_scratch_reg() + immvalue = self.convert_to_int(box) + self.assembler.mc.load_imm(loc, immvalue) + else: + assert box in self.temp_boxes + loc = self.make_sure_var_in_reg(box, + forbidden_vars=self.temp_boxes) + return loc + + def get_scratch_reg(self): + box = TempVar() + reg = self.force_allocate_reg(box, forbidden_vars=self.temp_boxes) + self.temp_boxes.append(box) + return reg + + +class ZARCHFrameManager(FrameManager): + def __init__(self, base_ofs): + FrameManager.__init__(self) + self.used = [] + self.base_ofs = base_ofs + + def frame_pos(self, loc, box_type): + #return locations.StackLocation(loc, get_fp_offset(self.base_ofs, loc), box_type) + return locations.StackLocation(loc, get_fp_offset(self.base_ofs, loc), box_type) + + @staticmethod + def frame_size(type): + return 1 + + @staticmethod + def get_loc_index(loc): + assert isinstance(loc, locations.StackLocation) + return loc.position + + +class Regalloc(BaseRegalloc): + + def __init__(self, assembler=None): + self.cpu = assembler.cpu + self.assembler = assembler + self.jump_target_descr = None + self.final_jump_op = None + + def _prepare(self, inputargs, operations, allgcrefs): + cpu = self.assembler.cpu + self.fm = ZARCHFrameManager(cpu.get_baseofs_of_frame_field()) + operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations, + allgcrefs) + # compute longevity of variables + longevity, last_real_usage = compute_vars_longevity( + inputargs, operations) + self.longevity = longevity + self.last_real_usage = last_real_usage + self.rm = ZARCHRegisterManager(self.longevity, + frame_manager = self.fm, + assembler = self.assembler) + self.fprm = FPRegisterManager(self.longevity, frame_manager = self.fm, + assembler = self.assembler) + return operations + + def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): + operations = self._prepare(inputargs, operations, allgcrefs) + self._set_initial_bindings(inputargs, looptoken) + # note: we need to make a copy of inputargs because possibly_free_vars + # is also used on op args, which is a non-resizable list + self.possibly_free_vars(list(inputargs)) + self.min_bytes_before_label = 4 # for redirect_call_assembler() + return operations + + def prepare_bridge(self, inputargs, arglocs, operations, allgcrefs, + frame_info): + operations = self._prepare(inputargs, operations, allgcrefs) + self._update_bindings(arglocs, inputargs) + self.min_bytes_before_label = 0 + return operations + + def ensure_next_label_is_at_least_at_position(self, at_least_position): + self.min_bytes_before_label = max(self.min_bytes_before_label, + at_least_position) + + def _update_bindings(self, locs, inputargs): + # XXX this should probably go to llsupport/regalloc.py + used = {} + i = 0 + for loc in locs: + if loc is None: # xxx bit kludgy + loc = r.SPP + arg = inputargs[i] + i += 1 + if loc.is_reg(): + if loc is r.SPP: + self.rm.bindings_to_frame_reg[arg] = None + else: + self.rm.reg_bindings[arg] = loc + used[loc] = None + elif loc.is_fp_reg(): + self.fprm.reg_bindings[arg] = loc + used[loc] = None + else: + assert loc.is_stack() + self.fm.bind(arg, loc) + self.rm.free_regs = [] + for reg in self.rm.all_regs: + if reg not in used: + self.rm.free_regs.append(reg) + self.fprm.free_regs = [] + for reg in self.fprm.all_regs: + if reg not in used: + self.fprm.free_regs.append(reg) + self.possibly_free_vars(list(inputargs)) + self.fm.finish_binding() + self.rm._check_invariants() + self.fprm._check_invariants() + + def get_final_frame_depth(self): + return self.fm.get_frame_depth() + + def possibly_free_var(self, var): + if var is not None: + if var.type == FLOAT: + self.fprm.possibly_free_var(var) + else: + self.rm.possibly_free_var(var) + + def possibly_free_vars(self, vars): + for var in vars: + self.possibly_free_var(var) + + def possibly_free_vars_for_op(self, op): + for i in range(op.numargs()): + var = op.getarg(i) + self.possibly_free_var(var) + + def force_allocate_reg(self, var): + if var.type == FLOAT: + forbidden_vars = self.fprm.temp_boxes + return self.fprm.force_allocate_reg(var, forbidden_vars) + else: + forbidden_vars = self.rm.temp_boxes + return self.rm.force_allocate_reg(var, forbidden_vars) + + def force_allocate_reg_or_cc(self, var): + assert var.type == INT + if self.next_op_can_accept_cc(self.operations, self.rm.position): + # hack: return the SPP location to mean "lives in CC". This + # SPP will not actually be used, and the location will be freed + # after the next op as usual. + self.rm.force_allocate_frame_reg(var) + return r.SPP + else: + # else, return a regular register (not SPP). + return self.force_allocate_reg(var) + + def walk_operations(self, inputargs, operations): + from rpython.jit.backend.zarch.assembler import ( + asm_operations) + i = 0 + self.limit_loop_break = (self.assembler.mc.get_relative_pos() + + LIMIT_LOOP_BREAK) + self.operations = operations + while i < len(operations): + op = operations[i] + self.assembler.mc.mark_op(op) + self.rm.position = i + self.fprm.position = i + if op.has_no_side_effect() and op not in self.longevity: + i += 1 + self.possibly_free_vars_for_op(op) + continue + # + for j in range(op.numargs()): + box = op.getarg(j) + if box.type != FLOAT: + self.rm.temp_boxes.append(box) + else: + self.fprm.temp_boxes.append(box) + # + opnum = op.getopnum() + if not we_are_translated() and opnum == -127: + self._consider_force_spill(op) + else: + arglocs = prepare_oplist[opnum](self, op) + asm_operations[opnum](self.assembler, op, arglocs, self) + self.free_op_vars() + self.possibly_free_var(op) + self.rm._check_invariants() + self.fprm._check_invariants() + if self.assembler.mc.get_relative_pos() > self.limit_loop_break: + self.assembler.break_long_loop() + self.limit_loop_break = (self.assembler.mc.get_relative_pos() + + LIMIT_LOOP_BREAK) + i += 1 + assert not self.rm.reg_bindings + assert not self.fprm.reg_bindings + self.flush_loop() + self.assembler.mc.mark_op(None) # end of the loop + self.operations = None + for arg in inputargs: + self.possibly_free_var(arg) + + def flush_loop(self): + # Emit a nop in the rare case where we have a guard_not_invalidated + # immediately before a label + mc = self.assembler.mc + while self.min_bytes_before_label > mc.get_relative_pos(): + mc.nop() + + def get_gcmap(self, forbidden_regs=[], noregs=False): + frame_depth = self.fm.get_frame_depth() + gcmap = allocate_gcmap(self.assembler, frame_depth, + r.JITFRAME_FIXED_SIZE) + for box, loc in self.rm.reg_bindings.iteritems(): + if loc in forbidden_regs: + continue + if box.type == REF and self.rm.is_still_alive(box): + assert not noregs + assert loc.is_reg() + val = self.assembler.cpu.all_reg_indexes[loc.value] + gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8)) + for box, loc in self.fm.bindings.iteritems(): + if box.type == REF and self.rm.is_still_alive(box): + assert isinstance(loc, locations.StackLocation) + val = loc.get_position() + r.JITFRAME_FIXED_SIZE + gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8)) + return gcmap + + def loc(self, var): + if var.type == FLOAT: + return self.fprm.loc(var) + else: + return self.rm.loc(var) + + def next_instruction(self): + self.rm.next_instruction() + self.fprm.next_instruction() + + def force_spill_var(self, var): + if var.type == FLOAT: + self.fprm.force_spill_var(var) + else: + self.rm.force_spill_var(var) + + def _consider_force_spill(self, op): + # This operation is used only for testing + self.force_spill_var(op.getarg(0)) + + def before_call(self, force_store=[], save_all_regs=False): + self.rm.before_call(force_store, save_all_regs) + self.fprm.before_call(force_store, save_all_regs) + + def after_call(self, v): + if v.type == FLOAT: + return self.fprm.after_call(v) + else: + return self.rm.after_call(v) + + def call_result_location(self, v): + if v.type == FLOAT: + return self.fprm.call_result_location(v) + else: + return self.rm.call_result_location(v) + + def ensure_reg(self, box): + if box.type == FLOAT: + return self.fprm.ensure_reg(box) + else: + return self.rm.ensure_reg(box) + + def ensure_reg_or_16bit_imm(self, box): + if box.type == FLOAT: + return self.fprm.ensure_reg(box) + else: + if check_imm_box(box): + return imm(box.getint()) + return self.rm.ensure_reg(box) + + def ensure_reg_or_any_imm(self, box): + if box.type == FLOAT: + return self.fprm.ensure_reg(box) + else: + if isinstance(box, Const): + return imm(box.getint()) + return self.rm.ensure_reg(box) + + def get_scratch_reg(self, type): + if type == FLOAT: + return self.fprm.get_scratch_reg() + else: + return self.rm.get_scratch_reg() + + def free_op_vars(self): + # free the boxes in the 'temp_boxes' lists, which contain both + # temporary boxes and all the current operation's arguments + self.rm.free_temp_vars() + self.fprm.free_temp_vars() + + def compute_hint_frame_locations(self, operations): + # optimization only: fill in the 'hint_frame_locations' dictionary + # of rm and xrm based on the JUMP at the end of the loop, by looking + # at where we would like the boxes to be after the jump. + op = operations[-1] + if op.getopnum() != rop.JUMP: + return + self.final_jump_op = op + descr = op.getdescr() + assert isinstance(descr, TargetToken) + if descr._ll_loop_code != 0: + # if the target LABEL was already compiled, i.e. if it belongs + # to some already-compiled piece of code + self._compute_hint_frame_locations_from_descr(descr) + #else: + # The loop ends in a JUMP going back to a LABEL in the same loop. + # We cannot fill 'hint_frame_locations' immediately, but we can + # wait until the corresponding prepare_op_label() to know where the + # we would like the boxes to be after the jump. + + def _compute_hint_frame_locations_from_descr(self, descr): + arglocs = self.assembler.target_arglocs(descr) + jump_op = self.final_jump_op + assert len(arglocs) == jump_op.numargs() + for i in range(jump_op.numargs()): + box = jump_op.getarg(i) + if not isinstance(box, Const): + loc = arglocs[i] + if loc is not None and loc.is_stack(): + self.fm.hint_frame_pos[box] = self.fm.get_loc_index(loc) + + # ****************************************************** + # * P R E P A R E O P E R A T I O N S * + # ****************************************************** + +def notimplemented(self, op): + msg = '[S390X/regalloc] %s not implemented\n' % op.getopname() + if we_are_translated(): + llop.debug_print(lltype.Void, msg) + raise NotImplementedError(msg) + +prepare_oplist = [notimplemented] * (rop._LAST + 1) + +for key, value in rop.__dict__.items(): + key = key.lower() + if key.startswith('_'): + continue + methname = 'prepare_%s' % key + if hasattr(Regalloc, methname): + func = getattr(Regalloc, methname).im_func + prepare_oplist[value] = func diff --git a/rpython/jit/backend/zarch/test/test_runner.py b/rpython/jit/backend/zarch/test/test_runner.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/test/test_runner.py @@ -0,0 +1,25 @@ +from rpython.jit.backend.test.runner_test import LLtypeBackendTest +from rpython.jit.backend.zarch.runner import CPU_S390_64 +from rpython.jit.tool.oparser import parse +from rpython.jit.metainterp.history import (AbstractFailDescr, + AbstractDescr, + BasicFailDescr, BasicFinalDescr, + JitCellToken, TargetToken, + ConstInt, ConstPtr, + Const, ConstFloat) +from rpython.jit.metainterp.resoperation import InputArgInt, InputArgFloat +from rpython.rtyper.lltypesystem import lltype +from rpython.jit.metainterp.resoperation import ResOperation, rop +import py + +class FakeStats(object): + pass + +class TestPPC(LLtypeBackendTest): + # for the individual tests see + # ====> ../../test/runner_test.py + + def get_cpu(self): + cpu = CPU_S390_64(rtyper=None, stats=FakeStats()) + cpu.setup_once() + return cpu From noreply at buildbot.pypy.org Mon Oct 26 06:52:38 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Mon, 26 Oct 2015 11:52:38 +0100 (CET) Subject: [pypy-commit] pypy s390x-backend: adding and adjusting structure while processing through assemble_loop method Message-ID: <20151026105238.2EDFA1C1046@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80459:c877ffac4111 Date: 2015-10-26 11:52 +0100 http://bitbucket.org/pypy/pypy/changeset/c877ffac4111/ Log: adding and adjusting structure while processing through assemble_loop method diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -7,13 +7,14 @@ from rpython.jit.backend.zarch import locations as loc from rpython.jit.backend.zarch.codebuilder import InstrBuilder from rpython.jit.backend.zarch.arch import WORD +from rpython.jit.backend.zarch.opassembler import IntOpAssembler from rpython.jit.backend.zarch.regalloc import Regalloc from rpython.jit.metainterp.resoperation import rop from rpython.rlib.objectmodel import we_are_translated, specialize, compute_unique_id from rpython.rlib import rgc from rpython.rtyper.lltypesystem import lltype, rffi, llmemory -class AssemblerZARCH(BaseAssembler): +class AssemblerZARCH(BaseAssembler, IntOpAssembler): def __init__(self, cpu, translate_support_code=False): BaseAssembler.__init__(self, cpu, translate_support_code) @@ -178,21 +179,107 @@ frame_depth = max(frame_depth, target_frame_depth) return frame_depth + def regalloc_mov(self, prev_loc, loc): + if prev_loc.is_imm(): + value = prev_loc.getint() + # move immediate value to register + if loc.is_core_reg(): + self.mc.load_imm(loc, value) + return + # move immediate value to memory + elif loc.is_stack(): + with scratch_reg(self.mc): + offset = loc.value + self.mc.load_imm(r.SCRATCH, value) + self.mc.store(r.SCRATCH.value, r.SPP, offset) + return + assert 0, "not supported location" + elif prev_loc.is_stack(): + offset = prev_loc.value + # move from memory to register + if loc.is_core_reg(): + self.mc.load(loc, r.SPP, offset) + return + # move in memory + elif loc.is_stack(): + target_offset = loc.value + with scratch_reg(self.mc): + self.mc.load(r.SCRATCH.value, r.SPP, offset) + self.mc.store(r.SCRATCH.value, r.SPP, target_offset) + return + # move from memory to fp register + elif loc.is_fp_reg(): + assert prev_loc.type == FLOAT, 'source not float location' + self.mc.lfd(loc, r.SPP, offset) + return + assert 0, "not supported location" + elif prev_loc.is_core_reg(): + reg = prev_loc.value + # move to another register + if loc.is_core_reg(): + other_reg = loc.value + self.mc.mr(other_reg, reg) + return + # move to memory + elif loc.is_stack(): + offset = loc.value + self.mc.store(reg, r.SPP, offset) + return + assert 0, "not supported location" + elif prev_loc.is_imm_float(): + value = prev_loc.getint() + # move immediate value to fp register + if loc.is_fp_reg(): + with scratch_reg(self.mc): + self.mc.load_imm(r.SCRATCH, value) + self.mc.lfdx(loc.value, 0, r.SCRATCH.value) + return + # move immediate value to memory + elif loc.is_stack(): + with scratch_reg(self.mc): + offset = loc.value + self.mc.load_imm(r.SCRATCH, value) + self.mc.lfdx(r.FP_SCRATCH.value, 0, r.SCRATCH.value) + self.mc.stfd(r.FP_SCRATCH.value, r.SPP.value, offset) + return + assert 0, "not supported location" + elif prev_loc.is_fp_reg(): + reg = prev_loc.value + # move to another fp register + if loc.is_fp_reg(): + other_reg = loc.value + self.mc.fmr(other_reg, reg) + return + # move from fp register to memory + elif loc.is_stack(): + assert loc.type == FLOAT, "target not float location" + offset = loc.value + self.mc.stfd(reg, r.SPP.value, offset) + return + assert 0, "not supported location" + assert 0, "not supported location" + # ________________________________________ # ASSEMBLER EMISSION - def emit_op_int_add(self, op): - pass + def emit_increment_debug_counter(self, op, arglocs, regalloc): + pass # TODO -def notimplemented_op(self, op, arglocs, regalloc, fcond): + def emit_finish(self, op, arglocs, regalloc): + pass # TODO + +def notimplemented_op(asm, op, arglocs, regalloc): print "[ZARCH/asm] %s not implemented" % op.getopname() raise NotImplementedError(op) asm_operations = [notimplemented_op] * (rop._LAST + 1) asm_extra_operations = {} -for name, value in AssemblerZARCH.__dict__.iteritems(): - if name.startswith('emit_op_'): - opname = name[len('emit_op_'):] - num = getattr(rop, opname.upper()) - asm_operations[num] = value +for key, value in rop.__dict__.items(): + key = key.lower() + if key.startswith('_'): + continue + methname = 'emit_%s' % key + if hasattr(AssemblerZARCH, methname): + func = getattr(AssemblerZARCH, methname).im_func + asm_operations[value] = func diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -84,6 +84,9 @@ self.clear_cache(addr) self._dump(addr, "jit-backend-dump", "s390x") + def load(self, treg, sreg, offset): + self.LG(treg, loc.addr(offset, sreg)) + def currpos(self): return self.get_relative_pos() diff --git a/rpython/jit/backend/zarch/helper/regalloc.py b/rpython/jit/backend/zarch/helper/regalloc.py --- a/rpython/jit/backend/zarch/helper/regalloc.py +++ b/rpython/jit/backend/zarch/helper/regalloc.py @@ -0,0 +1,22 @@ +from rpython.jit.metainterp.history import ConstInt, FLOAT +from rpython.jit.backend.zarch.locations import imm + +def check_imm(arg, lower_bound=-2**15, upper_bound=2**15-1): + if isinstance(arg, ConstInt): + i = arg.getint() + return lower_bound <= i <= upper_bound + return False + +def _prepare_binary_arith(self, op): + a0 = op.getarg(0) + a1 = op.getarg(1) + if check_imm(a0): + a0, a1 = a1, a0 + l0 = self.ensure_reg(a0) + if check_imm(a1): + l1 = imm(a1.getint()) + else: + l1 = self.ensure_reg(a1) + self.free_op_vars() + self.force_result_in_reg(op, a0) + return [l0, l1] diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -20,8 +20,7 @@ 'AG': ('rxy', ['\xE3','\x08']), 'AGF': ('rxy', ['\xE3','\x18']), 'AHI': ('ri', ['\xA7','\x0A']), - - # floating point + 'AGHI': ('ri', ['\xA7','\x0B']), } logic_mnemonic_codes = { diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/zarch/opassembler.py @@ -0,0 +1,12 @@ + +class IntOpAssembler(object): + _mixin_ = True + + def emit_int_add(self, op, arglocs, regalloc): + l0, l1 = arglocs + assert not l0.is_imm() + if l1.is_imm(): + self.mc.AGHI(l0, l1) + else: + self.mc.AGR(l0, l1) + diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -16,6 +16,7 @@ from rpython.jit.backend.llsupport.descr import ArrayDescr import rpython.jit.backend.zarch.registers as r import rpython.jit.backend.zarch.conditions as c +import rpython.jit.backend.zarch.helper.regalloc as regallochelp from rpython.jit.backend.llsupport.descr import unpack_arraydescr from rpython.jit.backend.llsupport.descr import unpack_fielddescr from rpython.jit.backend.llsupport.descr import unpack_interiorfielddescr @@ -252,6 +253,14 @@ var = op.getarg(i) self.possibly_free_var(var) + def force_result_in_reg(self, var, loc): + if var.type == FLOAT: + forbidden_vars = self.fprm.temp_boxes + return self.fprm.force_result_in_reg(var, loc, forbidden_vars) + else: + forbidden_vars = self.rm.temp_boxes + return self.rm.force_result_in_reg(var, loc, forbidden_vars) + def force_allocate_reg(self, var): if var.type == FLOAT: forbidden_vars = self.fprm.temp_boxes @@ -450,6 +459,14 @@ # * P R E P A R E O P E R A T I O N S * # ****************************************************** + def prepare_increment_debug_counter(self, op): + pass # XXX + + prepare_int_add = regallochelp._prepare_binary_arith + + def prepare_finish(self, op): + return [] + def notimplemented(self, op): msg = '[S390X/regalloc] %s not implemented\n' % op.getopname() if we_are_translated(): From noreply at buildbot.pypy.org Mon Oct 26 08:31:49 2015 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 26 Oct 2015 13:31:49 +0100 (CET) Subject: [pypy-commit] pypy default: fix tests Message-ID: <20151026123149.A19811C1046@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80460:53d7da76969d Date: 2015-10-26 23:32 +1100 http://bitbucket.org/pypy/pypy/changeset/53d7da76969d/ Log: fix tests diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -430,5 +430,5 @@ assert (out == arr * 2).all() times2prime = mod.create_ufunc_signature() - out = times2prime(arr, sig='(d)->(d)', extobj=[0, 0, None]) + out = times2prime(arr, sig='d->d', extobj=[0, 0, None]) assert (out == arr * 2).all() diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -437,7 +437,7 @@ assert (b == a).all() d = np.empty([6,2], dtype=float) - d.view(int).fill(0xdeadbeef) + d.view('int64').fill(0xdeadbeef) e = d[0::3,:] e[...] = [[1, 2], [3, 4]] assert e.strides == (48, 8) @@ -447,7 +447,7 @@ assert (g == [[1, 2], [3, 4]]).all() k = np.empty([2, 8], dtype=float) - k.view(int).fill(0xdeadbeef) + k.view('int64').fill(0xdeadbeef) m = k[:, ::-4] m[...] = [[1, 2], [3, 4]] assert m.strides == (64, -32) From noreply at buildbot.pypy.org Mon Oct 26 08:38:26 2015 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 26 Oct 2015 13:38:26 +0100 (CET) Subject: [pypy-commit] pypy default: Added tag release-4.0.0 for changeset 850edf14b2c7 Message-ID: <20151026123826.BBA091C1046@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80461:d42afc04504f Date: 2015-10-26 23:37 +1100 http://bitbucket.org/pypy/pypy/changeset/d42afc04504f/ Log: Added tag release-4.0.0 for changeset 850edf14b2c7 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -16,3 +16,4 @@ e03971291f3a0729ecd3ee7fae7ddb0bb82d476c release-2.6.0 295ee98b69288471b0fcf2e0ede82ce5209eb90b release-2.6.0 f3ad1e1e1d6215e20d34bb65ab85ff9188c9f559 release-2.6.1 +850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0 From noreply at buildbot.pypy.org Mon Oct 26 08:38:28 2015 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 26 Oct 2015 13:38:28 +0100 (CET) Subject: [pypy-commit] pypy default: update documentation Message-ID: <20151026123828.D1CAD1C1046@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80462:1a6ab3d799b1 Date: 2015-10-26 23:38 +1100 http://bitbucket.org/pypy/pypy/changeset/1a6ab3d799b1/ Log: update documentation diff --git a/pypy/doc/release-4.0.0.rst b/pypy/doc/release-4.0.0.rst --- a/pypy/doc/release-4.0.0.rst +++ b/pypy/doc/release-4.0.0.rst @@ -198,6 +198,9 @@ * Handle getfield_gc_pure* and getfield_gc_* uniformly in heap.py + * Improve simple trace function performance by lazily calling fast2locals + and locals2fast only if truly necessary + .. _`vmprof`: https://vmprof.readthedocs.org .. _resolved: http://doc.pypy.org/en/latest/whatsnew-15.11.0.html diff --git a/pypy/doc/whatsnew-4.0.0.rst b/pypy/doc/whatsnew-4.0.0.rst --- a/pypy/doc/whatsnew-4.0.0.rst +++ b/pypy/doc/whatsnew-4.0.0.rst @@ -92,3 +92,6 @@ .. branch: osx-libffi +.. branch: lazy-fast2locals +improve the performance of simple trace functions by lazily calling +fast2locals and locals2fast only if f_locals is actually accessed. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,8 +3,6 @@ ========================= .. this is a revision shortly after release-4.0.0 -.. startrev: 9397d7c6f5aa +.. startrev: 57c9a47c70f6 -.. branch: lazy-fast2locals -improve the performance of simple trace functions by lazily calling -fast2locals and locals2fast only if f_locals is actually accessed. + From noreply at buildbot.pypy.org Mon Oct 26 08:38:30 2015 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 26 Oct 2015 13:38:30 +0100 (CET) Subject: [pypy-commit] pypy release-4.0.x: merge default into release Message-ID: <20151026123830.E70851C1046@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: release-4.0.x Changeset: r80463:814b3fadaad7 Date: 2015-10-26 23:38 +1100 http://bitbucket.org/pypy/pypy/changeset/814b3fadaad7/ Log: merge default into release diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -16,3 +16,4 @@ e03971291f3a0729ecd3ee7fae7ddb0bb82d476c release-2.6.0 295ee98b69288471b0fcf2e0ede82ce5209eb90b release-2.6.0 f3ad1e1e1d6215e20d34bb65ab85ff9188c9f559 release-2.6.1 +850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0 diff --git a/pypy/doc/release-4.0.0.rst b/pypy/doc/release-4.0.0.rst --- a/pypy/doc/release-4.0.0.rst +++ b/pypy/doc/release-4.0.0.rst @@ -198,6 +198,9 @@ * Handle getfield_gc_pure* and getfield_gc_* uniformly in heap.py + * Improve simple trace function performance by lazily calling fast2locals + and locals2fast only if truly necessary + .. _`vmprof`: https://vmprof.readthedocs.org .. _resolved: http://doc.pypy.org/en/latest/whatsnew-15.11.0.html diff --git a/pypy/doc/whatsnew-4.0.0.rst b/pypy/doc/whatsnew-4.0.0.rst --- a/pypy/doc/whatsnew-4.0.0.rst +++ b/pypy/doc/whatsnew-4.0.0.rst @@ -92,3 +92,6 @@ .. branch: osx-libffi +.. branch: lazy-fast2locals +improve the performance of simple trace functions by lazily calling +fast2locals and locals2fast only if f_locals is actually accessed. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,8 +3,6 @@ ========================= .. this is a revision shortly after release-4.0.0 -.. startrev: 9397d7c6f5aa +.. startrev: 57c9a47c70f6 -.. branch: lazy-fast2locals -improve the performance of simple trace functions by lazily calling -fast2locals and locals2fast only if f_locals is actually accessed. + diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -430,5 +430,5 @@ assert (out == arr * 2).all() times2prime = mod.create_ufunc_signature() - out = times2prime(arr, sig='(d)->(d)', extobj=[0, 0, None]) + out = times2prime(arr, sig='d->d', extobj=[0, 0, None]) assert (out == arr * 2).all() diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -437,7 +437,7 @@ assert (b == a).all() d = np.empty([6,2], dtype=float) - d.view(int).fill(0xdeadbeef) + d.view('int64').fill(0xdeadbeef) e = d[0::3,:] e[...] = [[1, 2], [3, 4]] assert e.strides == (48, 8) @@ -447,7 +447,7 @@ assert (g == [[1, 2], [3, 4]]).all() k = np.empty([2, 8], dtype=float) - k.view(int).fill(0xdeadbeef) + k.view('int64').fill(0xdeadbeef) m = k[:, ::-4] m[...] = [[1, 2], [3, 4]] assert m.strides == (64, -32) From noreply at buildbot.pypy.org Mon Oct 26 10:49:22 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Mon, 26 Oct 2015 15:49:22 +0100 (CET) Subject: [pypy-commit] pypy s390x-backend: many more methods in place. it is now possible to step through the whole assemble_loop method for a trace with like int_add(...), finish(...). sadly the code is not yet correct :) Message-ID: <20151026144922.A2BDE1C06AD@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80464:bc3119a598cf Date: 2015-10-26 15:38 +0100 http://bitbucket.org/pypy/pypy/changeset/bc3119a598cf/ Log: many more methods in place. it is now possible to step through the whole assemble_loop method for a trace with like int_add(...), finish(...). sadly the code is not yet correct :) diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -6,10 +6,13 @@ from rpython.jit.backend.zarch import registers as r from rpython.jit.backend.zarch import locations as loc from rpython.jit.backend.zarch.codebuilder import InstrBuilder -from rpython.jit.backend.zarch.arch import WORD +from rpython.jit.backend.zarch.arch import (WORD, JITFRAME_FIXED_SIZE) from rpython.jit.backend.zarch.opassembler import IntOpAssembler from rpython.jit.backend.zarch.regalloc import Regalloc from rpython.jit.metainterp.resoperation import rop +from rpython.rlib.debug import (debug_print, debug_start, debug_stop, + have_debug_prints) +from rpython.rlib.rarithmetic import r_uint from rpython.rlib.objectmodel import we_are_translated, specialize, compute_unique_id from rpython.rlib import rgc from rpython.rtyper.lltypesystem import lltype, rffi, llmemory @@ -35,7 +38,8 @@ self.debug = False self.current_clt = looptoken.compiled_loop_token self.mc = InstrBuilder() - self.pending_guards = [] + self.pending_guard_tokens = [] + self.pending_guard_tokens_recovered = 0 #assert self.datablockwrapper is None --- but obscure case # possible, e.g. getting MemoryError and continuing allblocks = self.get_asmmemmgr_blocks(looptoken) @@ -259,6 +263,67 @@ assert 0, "not supported location" assert 0, "not supported location" + def update_frame_depth(self, frame_depth): + if frame_depth > 0x7fff: + raise JitFrameTooDeep + baseofs = self.cpu.get_baseofs_of_frame_field() + self.current_clt.frame_info.update_frame_depth(baseofs, frame_depth) + + def write_pending_failure_recoveries(self): + # for each pending guard, generate the code of the recovery stub + # at the end of self.mc. + for i in range(self.pending_guard_tokens_recovered, + len(self.pending_guard_tokens)): + tok = self.pending_guard_tokens[i] + tok.pos_recovery_stub = self.generate_quick_failure(tok) + self.pending_guard_tokens_recovered = len(self.pending_guard_tokens) + + def patch_stack_checks(self, frame_depth): + if frame_depth > 0x7fff: + raise JitFrameTooDeep # XXX + for traps_pos, jmp_target in self.frame_depth_to_patch: + pmc = OverwritingBuilder(self.mc, traps_pos, 3) + # three traps, so exactly three instructions to patch here + #pmc.cmpdi(0, r.r2.value, frame_depth) # 1 + #pmc.bc(7, 0, jmp_target - (traps_pos + 4)) # 2 "bge+" + #pmc.li(r.r0.value, frame_depth) # 3 + #pmc.overwrite() + + def materialize_loop(self, looptoken): + self.datablockwrapper.done() + self.datablockwrapper = None + allblocks = self.get_asmmemmgr_blocks(looptoken) + start = self.mc.materialize(self.cpu, allblocks, + self.cpu.gc_ll_descr.gcrootmap) + return start + + def patch_pending_failure_recoveries(self, rawstart): + assert (self.pending_guard_tokens_recovered == + len(self.pending_guard_tokens)) + clt = self.current_clt + for tok in self.pending_guard_tokens: + addr = rawstart + tok.pos_jump_offset + # + # XXX see patch_jump_for_descr() + tok.faildescr.adr_jump_offset = rawstart + tok.pos_recovery_stub + # + relative_target = tok.pos_recovery_stub - tok.pos_jump_offset + # + if not tok.guard_not_invalidated(): + mc = InstrBuilder() + mc.b_cond_offset(relative_target, tok.fcond) + mc.copy_to_raw_memory(addr) + else: + # GUARD_NOT_INVALIDATED, record an entry in + # clt.invalidate_positions of the form: + # (addr-in-the-code-of-the-not-yet-written-jump-target, + # relative-target-to-use) + relpos = tok.pos_jump_offset + clt.invalidate_positions.append((rawstart + relpos, + relative_target)) + + + # ________________________________________ # ASSEMBLER EMISSION From noreply at buildbot.pypy.org Mon Oct 26 18:27:09 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 26 Oct 2015 23:27:09 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: add 4.0.0 checksums Message-ID: <20151026222709.0797B1C0403@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r644:7d4fc67b8725 Date: 2015-10-26 23:22 +0100 http://bitbucket.org/pypy/pypy.org/changeset/7d4fc67b8725/ Log: add 4.0.0 checksums diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -355,6 +355,19 @@

    Checksums

    Here are the checksums for each of the downloads

    +

    pypy-4.0.0 md5:

    +
    +c616cffee0f344c37fd4e045a7a87054  pypy-4.0.0-freebsd64.tar.bz2
    +82b11e63ab81db1604575dadd5cea427  pypy-4.0.0-linux64.tar.bz2
    +f91946d5abd5dff8e05ab0b5acffb432  pypy-4.0.0-linux-armel.tar.bz2
    +1db3ae7237a8a01f61b3b4ade65684ab  pypy-4.0.0-linux-armhf-raring.tar.bz2
    +14ac97384cfe772e4dab1974bb0010b6  pypy-4.0.0-linux-armhf-raspbian.tar.bz2
    +e1050332d8e1e6054906fa0a30cdc282  pypy-4.0.0-linux.tar.bz2
    +fcd8e2dacc2340173be206ab9de1d3fc  pypy-4.0.0-osx64.tar.bz2
    +57722fd5fc01734839ecc523ce965fbb  pypy-4.0.0-src.tar.bz2
    +8e90eed8aea1686d98c2e7dce5bda1e0  pypy-4.0.0-src.zip
    +fe4733ef55c9a4692abbc0caeb2f2b95  pypy-4.0.0-win32.zip
    +

    pypy-2.6.1 md5:

     2346426786459fdc72ad03fe75a98b35  pypy-2.6.1-freebsd64.tar.bz2
    diff --git a/source/download.txt b/source/download.txt
    --- a/source/download.txt
    +++ b/source/download.txt
    @@ -369,6 +369,20 @@
     
     Here are the checksums for each of the downloads
     
    +pypy-4.0.0 md5::
    +
    +    c616cffee0f344c37fd4e045a7a87054  pypy-4.0.0-freebsd64.tar.bz2
    +    82b11e63ab81db1604575dadd5cea427  pypy-4.0.0-linux64.tar.bz2
    +    f91946d5abd5dff8e05ab0b5acffb432  pypy-4.0.0-linux-armel.tar.bz2
    +    1db3ae7237a8a01f61b3b4ade65684ab  pypy-4.0.0-linux-armhf-raring.tar.bz2
    +    14ac97384cfe772e4dab1974bb0010b6  pypy-4.0.0-linux-armhf-raspbian.tar.bz2
    +    e1050332d8e1e6054906fa0a30cdc282  pypy-4.0.0-linux.tar.bz2
    +    fcd8e2dacc2340173be206ab9de1d3fc  pypy-4.0.0-osx64.tar.bz2
    +    57722fd5fc01734839ecc523ce965fbb  pypy-4.0.0-src.tar.bz2
    +    8e90eed8aea1686d98c2e7dce5bda1e0  pypy-4.0.0-src.zip
    +    fe4733ef55c9a4692abbc0caeb2f2b95  pypy-4.0.0-win32.zip
    +
    +
     pypy-2.6.1 md5::
     
         2346426786459fdc72ad03fe75a98b35  pypy-2.6.1-freebsd64.tar.bz2
    
    From noreply at buildbot.pypy.org  Mon Oct 26 18:27:11 2015
    From: noreply at buildbot.pypy.org (cfbolz)
    Date: Mon, 26 Oct 2015 23:27:11 +0100 (CET)
    Subject: [pypy-commit] pypy.org extradoc: add 4.0.0 checksums and fix a typo
    Message-ID: <20151026222711.046751C0590@cobra.cs.uni-duesseldorf.de>
    
    Author: Carl Friedrich Bolz 
    Branch: extradoc
    Changeset: r645:7f8ed30c6c40
    Date: 2015-10-26 23:27 +0100
    http://bitbucket.org/pypy/pypy.org/changeset/7f8ed30c6c40/
    
    Log:	add 4.0.0 checksums and fix a typo
    
    diff --git a/download.html b/download.html
    --- a/download.html
    +++ b/download.html
    @@ -115,33 +115,19 @@
     

    Python2.7 compatible PyPy 2.6.1

    -
    @@ -398,9 +384,22 @@ 2c9f0054f3b93a6473f10be35277825a pypy-1.8-sandbox-linux64.tar.bz2 009c970b5fa75754ae4c32a5d108a8d4 pypy-1.8-sandbox-linux.tar.bz2 -
    -
    pypy-2.6.1 sha1::
    -
    119148e67e94419e86ba11b6cfab826c093496eb pypy-2.6.1-freebsd64.tar.bz2 +

    pypy-4.0.0 md5:

    +
    +8de2a247e26872790090b7a7bc9128d263456ada  pypy-4.0.0-freebsd64.tar.bz2
    +aed958fdc720b77fdd52cb826239ccbd6d01f465  pypy-4.0.0-linux64.tar.bz2
    +65b50e0299dc0695a8460c14b401c783216464b1  pypy-4.0.0-linux-armel.tar.bz2
    +663afb7b0d77ddf53c78d49dbc36c6e8349c7fbb  pypy-4.0.0-linux-armhf-raring.tar.bz2
    +9998bfc5d0691ac23c12b268a7c8937dda0f4ed4  pypy-4.0.0-linux-armhf-raspbian.tar.bz2
    +685fb3b3e345d5c6404b4b143e3bae623fc727d3  pypy-4.0.0-linux.tar.bz2
    +7656c9975b353c801a15b924930ee47f173280b9  pypy-4.0.0-osx64.tar.bz2
    +b66a6a8c3ecdf6f62b13931cd2919dd160e1249b  pypy-4.0.0-src.tar.bz2
    +24550900db0a1a0b1a2554cb1fa480cd0ae2fbfd  pypy-4.0.0-src.zip
    +d519d5cfccbefb9d9773e655ee8865cf265f033e  pypy-4.0.0-win32.zip
    +
    +

    pypy-2.6.1 sha1:

    +
    +119148e67e94419e86ba11b6cfab826c093496eb  pypy-2.6.1-freebsd64.tar.bz2
     20197f670edb3783565bd092c14658fca61c695a  pypy-2.6.1-linux64.tar.bz2
     033f65def368025f5e051320be233ec60102f143  pypy-2.6.1-linux-armel.tar.bz2
     672bc22ad81c471b0d8622e826bf16c522bfbeb0  pypy-2.6.1-linux-armhf-raring.tar.bz2
    @@ -409,8 +408,8 @@
     a7b2dd8380ae96a9a8934e99d898853257c2e7e4  pypy-2.6.1-osx64.tar.bz2
     bf0f986bc64b71489983a12f2eb9b504d2ac6fd4  pypy-2.6.1-src.tar.bz2
     d4f7e6b7a2e85ea10365be5cadf46bc5d618dab3  pypy-2.6.1-src.zip
    -38f710c16f06cc4b99ff2b5bda902624711149bb  pypy-2.6.1-win32.zip
    -
    +38f710c16f06cc4b99ff2b5bda902624711149bb pypy-2.6.1-win32.zip +

    pypy3-2.4.0 sha1:

     7d715742f6929351b310a2ca3b924cab35913089  pypy3-2.4.0-linux64.tar.bz2
    diff --git a/source/download.txt b/source/download.txt
    --- a/source/download.txt
    +++ b/source/download.txt
    @@ -82,7 +82,7 @@
     * `ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Ubuntu Raring)`__ (see ``[1]`` below)
     * `ARM Softfloat Linux binary (ARMEL/gnueabi, tar.bz2,  Ubuntu Precise)`__ (see ``[1]`` below)
     * `Mac OS/X binary (64bit)`__
    -* `FreeBSD 9.2 x86 64 bit`__ (see ``[1]` below)
    +* `FreeBSD 9.2 x86 64 bit`__ (see ``[1]`` below)
     * `Windows binary (32bit)`__ (you might need the VS 2008 runtime library
       installer `vcredist_x86.exe`_.)
     * `Source (tar.bz2)`__; `Source (zip)`__.  See below for more about the sources.
    @@ -414,7 +414,23 @@
        2c9f0054f3b93a6473f10be35277825a  pypy-1.8-sandbox-linux64.tar.bz2
        009c970b5fa75754ae4c32a5d108a8d4  pypy-1.8-sandbox-linux.tar.bz2
     
    +
    +pypy-4.0.0 md5::
    +
    +    8de2a247e26872790090b7a7bc9128d263456ada  pypy-4.0.0-freebsd64.tar.bz2
    +    aed958fdc720b77fdd52cb826239ccbd6d01f465  pypy-4.0.0-linux64.tar.bz2
    +    65b50e0299dc0695a8460c14b401c783216464b1  pypy-4.0.0-linux-armel.tar.bz2
    +    663afb7b0d77ddf53c78d49dbc36c6e8349c7fbb  pypy-4.0.0-linux-armhf-raring.tar.bz2
    +    9998bfc5d0691ac23c12b268a7c8937dda0f4ed4  pypy-4.0.0-linux-armhf-raspbian.tar.bz2
    +    685fb3b3e345d5c6404b4b143e3bae623fc727d3  pypy-4.0.0-linux.tar.bz2
    +    7656c9975b353c801a15b924930ee47f173280b9  pypy-4.0.0-osx64.tar.bz2
    +    b66a6a8c3ecdf6f62b13931cd2919dd160e1249b  pypy-4.0.0-src.tar.bz2
    +    24550900db0a1a0b1a2554cb1fa480cd0ae2fbfd  pypy-4.0.0-src.zip
    +    d519d5cfccbefb9d9773e655ee8865cf265f033e  pypy-4.0.0-win32.zip
    +
    +
     pypy-2.6.1 sha1::
    +
         119148e67e94419e86ba11b6cfab826c093496eb  pypy-2.6.1-freebsd64.tar.bz2
         20197f670edb3783565bd092c14658fca61c695a  pypy-2.6.1-linux64.tar.bz2
         033f65def368025f5e051320be233ec60102f143  pypy-2.6.1-linux-armel.tar.bz2
    @@ -442,3 +458,4 @@
     
        895aaf7bba5787dd30adda5cc0e0e7fc297c0ca7  pypy-1.8-sandbox-linux64.tar.bz2
        be94460bed8b2682880495435c309b6611ae2c31  pypy-1.8-sandbox-linux.tar.bz2
    +
    
    From noreply at buildbot.pypy.org  Mon Oct 26 20:22:46 2015
    From: noreply at buildbot.pypy.org (stefanor)
    Date: Tue, 27 Oct 2015 01:22:46 +0100 (CET)
    Subject: [pypy-commit] pypy release-15.11: close release-15.11,
     we used released as 4.0.0
    Message-ID: <20151027002246.CD82C1C0403@cobra.cs.uni-duesseldorf.de>
    
    Author: Stefano Rivera 
    Branch: release-15.11
    Changeset: r80465:bc6d1dab0c3b
    Date: 2015-10-26 17:21 -0700
    http://bitbucket.org/pypy/pypy/changeset/bc6d1dab0c3b/
    
    Log:	close release-15.11, we used released as 4.0.0
    
    
    From noreply at buildbot.pypy.org  Tue Oct 27 02:15:07 2015
    From: noreply at buildbot.pypy.org (mattip)
    Date: Tue, 27 Oct 2015 07:15:07 +0100 (CET)
    Subject: [pypy-commit] pypy.org extradoc: update download page for release
     4.0.0
    Message-ID: <20151027061507.B2F801C0403@cobra.cs.uni-duesseldorf.de>
    
    Author: mattip 
    Branch: extradoc
    Changeset: r646:9ccd7cba87b2
    Date: 2015-10-27 17:16 +1100
    http://bitbucket.org/pypy/pypy.org/changeset/9ccd7cba87b2/
    
    Log:	update download page for release 4.0.0
    
    diff --git a/download.html b/download.html
    --- a/download.html
    +++ b/download.html
    @@ -74,7 +74,7 @@
     performance improvements.

    We provide binaries for x86 and ARM Linux, Mac OS/X and Windows for:

    @@ -113,19 +113,19 @@ degrees of being up-to-date.
  • -
    -

    Python2.7 compatible PyPy 2.6.1

    +
    +

    Python2.7 compatible PyPy 4.0.0

    @@ -191,7 +191,7 @@ uncompressed, they run in-place. For now you can uncompress them either somewhere in your home directory or, say, in /opt, and if you want, put a symlink from somewhere like -/usr/local/bin/pypy to /path/to/pypy-2.6.1/bin/pypy. Do +/usr/local/bin/pypy to /path/to/pypy-4.0.0/bin/pypy. Do not move or copy the executable pypy outside the tree – put a symlink to it, otherwise it will not find its libraries.

    @@ -233,7 +233,7 @@
  • Get the source code. The following packages contain the source at the same revision as the above binaries:

    Or you can checkout the current trunk using Mercurial (the trunk usually works and is of course more up-to-date):

    @@ -354,19 +354,6 @@ 8e90eed8aea1686d98c2e7dce5bda1e0 pypy-4.0.0-src.zip fe4733ef55c9a4692abbc0caeb2f2b95 pypy-4.0.0-win32.zip -

    pypy-2.6.1 md5:

    -
    -2346426786459fdc72ad03fe75a98b35  pypy-2.6.1-freebsd64.tar.bz2
    -eb265bad9f61029f7a6bc5032d0e5459  pypy-2.6.1-linux64.tar.bz2
    -45418996d8d81c7d72437d4a6e610fb3  pypy-2.6.1-linux-armel.tar.bz2
    -980cce0274b0a80d8b2da1242ab323e9  pypy-2.6.1-linux-armhf-raring.tar.bz2
    -56e80fdb9b3cdec59b4f38af2456c63c  pypy-2.6.1-linux-armhf-raspbian.tar.bz2
    -36c6d0ea043027e49cabb6a31fb3388a  pypy-2.6.1-linux.tar.bz2
    -d6f847a3c2fb795f5f4fbd670459908c  pypy-2.6.1-osx64.tar.bz2
    -7e53f72eeb6d9947fd5db6872213404d  pypy-2.6.1-src.tar.bz2
    -7df9dce6c6d353069463e4ecdf460fbf  pypy-2.6.1-src.zip
    -890465185948f4043c7104c05bf75fe2  pypy-2.6.1-win32.zip
    -

    pypy3-2.4.0 md5:

     eadbc9790823fc0ae40c943087cd7cb3  pypy3-2.4.0-linux64.tar.bz2
    @@ -384,7 +371,7 @@
     2c9f0054f3b93a6473f10be35277825a  pypy-1.8-sandbox-linux64.tar.bz2
     009c970b5fa75754ae4c32a5d108a8d4  pypy-1.8-sandbox-linux.tar.bz2
     
    -

    pypy-4.0.0 md5:

    +

    pypy-4.0.0 sha1:

     8de2a247e26872790090b7a7bc9128d263456ada  pypy-4.0.0-freebsd64.tar.bz2
     aed958fdc720b77fdd52cb826239ccbd6d01f465  pypy-4.0.0-linux64.tar.bz2
    @@ -397,19 +384,6 @@
     24550900db0a1a0b1a2554cb1fa480cd0ae2fbfd  pypy-4.0.0-src.zip
     d519d5cfccbefb9d9773e655ee8865cf265f033e  pypy-4.0.0-win32.zip
     
    -

    pypy-2.6.1 sha1:

    -
    -119148e67e94419e86ba11b6cfab826c093496eb  pypy-2.6.1-freebsd64.tar.bz2
    -20197f670edb3783565bd092c14658fca61c695a  pypy-2.6.1-linux64.tar.bz2
    -033f65def368025f5e051320be233ec60102f143  pypy-2.6.1-linux-armel.tar.bz2
    -672bc22ad81c471b0d8622e826bf16c522bfbeb0  pypy-2.6.1-linux-armhf-raring.tar.bz2
    -6c2b1113237da87867b0b06a044b26f506050abc  pypy-2.6.1-linux-armhf-raspbian.tar.bz2
    -1f27ed11398172a45f870cc37cfd0992bf49fba8  pypy-2.6.1-linux.tar.bz2
    -a7b2dd8380ae96a9a8934e99d898853257c2e7e4  pypy-2.6.1-osx64.tar.bz2
    -bf0f986bc64b71489983a12f2eb9b504d2ac6fd4  pypy-2.6.1-src.tar.bz2
    -d4f7e6b7a2e85ea10365be5cadf46bc5d618dab3  pypy-2.6.1-src.zip
    -38f710c16f06cc4b99ff2b5bda902624711149bb  pypy-2.6.1-win32.zip
    -

    pypy3-2.4.0 sha1:

     7d715742f6929351b310a2ca3b924cab35913089  pypy3-2.4.0-linux64.tar.bz2
    @@ -428,6 +402,12 @@
     be94460bed8b2682880495435c309b6611ae2c31  pypy-1.8-sandbox-linux.tar.bz2
     
  • +
    +

    Docutils System Messages

    +
    +

    System Message: ERROR/3 ([dynamic-text], line 12); backlink

    +Unknown target name: “what's new in pypy4.0.0?”.
    +
    diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -14,12 +14,12 @@ We provide binaries for x86 and ARM Linux, Mac OS/X and Windows for: -* the Python2.7 compatible release — **PyPy 2.6.1** — (`what's new in PyPy 2.6.1?`_) +* the Python2.7 compatible release — **PyPy 4.0.0** — (`what's new in PyPy4.0.0?`_) * the Python3.2.5 compatible release — **PyPy3 2.4.0** — (`what's new in PyPy3 2.4.0?`_). * the Python2.7 Software Transactional Memory special release — **PyPy-STM 2.5.1** (Linux x86-64 only) -.. _what's new in PyPy 2.6.1?: http://doc.pypy.org/en/latest/release-2.6.1.html +.. _what's new in PyPy 4.0.0?: http://doc.pypy.org/en/latest/release-4.0.0.html .. _what's new in PyPy3 2.4.0?: http://doc.pypy.org/en/latest/release-pypy3-2.4.0.html @@ -73,7 +73,7 @@ .. _`portable Linux binaries`: https://github.com/squeaky-pl/portable-pypy#portable-pypy-distribution-for-linux -Python2.7 compatible PyPy 2.6.1 +Python2.7 compatible PyPy 4.0.0 ----------------------------------- * `Linux x86 binary (32bit, tar.bz2 built on Ubuntu 12.04 - 14.04)`__ (see ``[1]`` below) @@ -89,16 +89,16 @@ * `All our downloads,`__ including previous versions. We also have a mirror_, but please use only if you have troubles accessing the links above -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.6.1-linux.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.6.1-linux64.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.6.1-linux-armhf-raspbian.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.6.1-linux-armhf-raring.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.6.1-linux-armel.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.6.1-osx64.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.6.1-freebsd64.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.6.1-win32.zip -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.6.1-src.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.6.1-src.zip +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-linux.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-linux64.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-linux-armhf-raspbian.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-linux-armhf-raring.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-linux-armel.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-osx64.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-freebsd64.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-win32.zip +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-src.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-src.zip .. _`vcredist_x86.exe`: http://www.microsoft.com/en-us/download/details.aspx?id=5582 .. __: https://bitbucket.org/pypy/pypy/downloads .. _mirror: http://cobra.cs.uni-duesseldorf.de/~buildmaster/mirror/ @@ -192,7 +192,7 @@ uncompressed, they run in-place. For now you can uncompress them either somewhere in your home directory or, say, in ``/opt``, and if you want, put a symlink from somewhere like -``/usr/local/bin/pypy`` to ``/path/to/pypy-2.6.1/bin/pypy``. Do +``/usr/local/bin/pypy`` to ``/path/to/pypy-4.0.0/bin/pypy``. Do not move or copy the executable ``pypy`` outside the tree --- put a symlink to it, otherwise it will not find its libraries. @@ -248,9 +248,9 @@ 1. Get the source code. The following packages contain the source at the same revision as the above binaries: - * `pypy-2.6.1-src.tar.bz2`__ (sources) + * `pypy-4.0.0-src.tar.bz2`__ (sources) - .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-2.6.1-src.tar.bz2 + .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-src.tar.bz2 Or you can checkout the current trunk using Mercurial_ (the trunk usually works and is of course more up-to-date):: @@ -382,20 +382,6 @@ 8e90eed8aea1686d98c2e7dce5bda1e0 pypy-4.0.0-src.zip fe4733ef55c9a4692abbc0caeb2f2b95 pypy-4.0.0-win32.zip - -pypy-2.6.1 md5:: - - 2346426786459fdc72ad03fe75a98b35 pypy-2.6.1-freebsd64.tar.bz2 - eb265bad9f61029f7a6bc5032d0e5459 pypy-2.6.1-linux64.tar.bz2 - 45418996d8d81c7d72437d4a6e610fb3 pypy-2.6.1-linux-armel.tar.bz2 - 980cce0274b0a80d8b2da1242ab323e9 pypy-2.6.1-linux-armhf-raring.tar.bz2 - 56e80fdb9b3cdec59b4f38af2456c63c pypy-2.6.1-linux-armhf-raspbian.tar.bz2 - 36c6d0ea043027e49cabb6a31fb3388a pypy-2.6.1-linux.tar.bz2 - d6f847a3c2fb795f5f4fbd670459908c pypy-2.6.1-osx64.tar.bz2 - 7e53f72eeb6d9947fd5db6872213404d pypy-2.6.1-src.tar.bz2 - 7df9dce6c6d353069463e4ecdf460fbf pypy-2.6.1-src.zip - 890465185948f4043c7104c05bf75fe2 pypy-2.6.1-win32.zip - pypy3-2.4.0 md5:: eadbc9790823fc0ae40c943087cd7cb3 pypy3-2.4.0-linux64.tar.bz2 @@ -415,7 +401,7 @@ 009c970b5fa75754ae4c32a5d108a8d4 pypy-1.8-sandbox-linux.tar.bz2 -pypy-4.0.0 md5:: +pypy-4.0.0 sha1:: 8de2a247e26872790090b7a7bc9128d263456ada pypy-4.0.0-freebsd64.tar.bz2 aed958fdc720b77fdd52cb826239ccbd6d01f465 pypy-4.0.0-linux64.tar.bz2 @@ -428,20 +414,6 @@ 24550900db0a1a0b1a2554cb1fa480cd0ae2fbfd pypy-4.0.0-src.zip d519d5cfccbefb9d9773e655ee8865cf265f033e pypy-4.0.0-win32.zip - -pypy-2.6.1 sha1:: - - 119148e67e94419e86ba11b6cfab826c093496eb pypy-2.6.1-freebsd64.tar.bz2 - 20197f670edb3783565bd092c14658fca61c695a pypy-2.6.1-linux64.tar.bz2 - 033f65def368025f5e051320be233ec60102f143 pypy-2.6.1-linux-armel.tar.bz2 - 672bc22ad81c471b0d8622e826bf16c522bfbeb0 pypy-2.6.1-linux-armhf-raring.tar.bz2 - 6c2b1113237da87867b0b06a044b26f506050abc pypy-2.6.1-linux-armhf-raspbian.tar.bz2 - 1f27ed11398172a45f870cc37cfd0992bf49fba8 pypy-2.6.1-linux.tar.bz2 - a7b2dd8380ae96a9a8934e99d898853257c2e7e4 pypy-2.6.1-osx64.tar.bz2 - bf0f986bc64b71489983a12f2eb9b504d2ac6fd4 pypy-2.6.1-src.tar.bz2 - d4f7e6b7a2e85ea10365be5cadf46bc5d618dab3 pypy-2.6.1-src.zip - 38f710c16f06cc4b99ff2b5bda902624711149bb pypy-2.6.1-win32.zip - pypy3-2.4.0 sha1:: 7d715742f6929351b310a2ca3b924cab35913089 pypy3-2.4.0-linux64.tar.bz2 From noreply at buildbot.pypy.org Tue Oct 27 06:20:42 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Tue, 27 Oct 2015 11:20:42 +0100 (CET) Subject: [pypy-commit] pypy s390x-backend: adapted some arch details, added failure recovery, finish resop (partly) Message-ID: <20151027102042.F028E1C0149@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80466:85a292e8a0ad Date: 2015-10-27 11:20 +0100 http://bitbucket.org/pypy/pypy/changeset/85a292e8a0ad/ Log: adapted some arch details, added failure recovery, finish resop (partly) diff --git a/rpython/jit/backend/zarch/arch.py b/rpython/jit/backend/zarch/arch.py --- a/rpython/jit/backend/zarch/arch.py +++ b/rpython/jit/backend/zarch/arch.py @@ -1,4 +1,20 @@ +WORD = 8 -WORD = 4 +# +# OFFSET +# +------------------------------+ 0 +# | gpr save are (int+float) | +# +------------------------------+ 8 +# | local vars | +# +------------------------------+ 0 +# | | +# +------------------------------+ +# | | +# +------------------------------+ <- SP 0 (r15) +# -JITFRAME_FIXED_SIZE = 48 +GPR_STACK_SAVE_IN_BYTES = 120 +STD_FRAME_SIZE_IN_BYTES = 140 +THREADLOCAL_ADDR_OFFSET = 8 + +assert STD_FRAME_SIZE_IN_BYTES % 2 == 0 diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -6,12 +6,16 @@ from rpython.jit.backend.zarch import registers as r from rpython.jit.backend.zarch import locations as loc from rpython.jit.backend.zarch.codebuilder import InstrBuilder -from rpython.jit.backend.zarch.arch import (WORD, JITFRAME_FIXED_SIZE) +from rpython.jit.backend.zarch.registers import JITFRAME_FIXED_SIZE +from rpython.jit.backend.zarch.arch import (WORD, + STD_FRAME_SIZE_IN_BYTES, GPR_STACK_SAVE_IN_BYTES, + THREADLOCAL_ADDR_OFFSET) from rpython.jit.backend.zarch.opassembler import IntOpAssembler from rpython.jit.backend.zarch.regalloc import Regalloc from rpython.jit.metainterp.resoperation import rop from rpython.rlib.debug import (debug_print, debug_start, debug_stop, have_debug_prints) +from rpython.jit.metainterp.history import (INT, REF, FLOAT) from rpython.rlib.rarithmetic import r_uint from rpython.rlib.objectmodel import we_are_translated, specialize, compute_unique_id from rpython.rlib import rgc @@ -30,6 +34,7 @@ self.stack_check_slowpath = 0 self.loop_run_counters = [] self.gcrootmap_retaddr_forced = 0 + self.failure_recovery_code = [0, 0, 0, 0] def setup(self, looptoken): BaseAssembler.setup(self, looptoken) @@ -62,12 +67,14 @@ return clt.asmmemmgr_blocks def gen_func_prolog(self): + """ NOT_RPYTHON """ STACK_FRAME_SIZE = 40 - self.mc.STMG(r.r11, r.r15, loc.addr(-STACK_FRAME_SIZE, r.sp)) + self.mc.STMG(r.r11, r.r15, loc.addr(-STACK_FRAME_SIZE, r.SP)) self.mc.AHI(r.sp, loc.imm(-STACK_FRAME_SIZE)) def gen_func_epilog(self): - self.mc.LMG(r.r11, r.r15, loc.addr(0, r.SPP)) + """ NOT_RPYTHON """ + self.mc.LMG(r.r11, r.r15, loc.addr(0, r.SP)) self.jmpto(r.r14) def jmpto(self, register): @@ -76,7 +83,42 @@ self.mc.BCR_rr(0xf, register.value) def _build_failure_recovery(self, exc, withfloats=False): - pass # TODO + mc = InstrBuilder() + self.mc = mc + # fill in the jf_descr and jf_gcmap fields of the frame according + # to which failure we are resuming from. These are set before + # this function is called (see generate_quick_failure()). + ofs = self.cpu.get_ofs_of_frame_field('jf_descr') + ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') + mc.STG(r.r2, loc.addr(ofs, r.SPP)) + mc.STG(r.r3, loc.addr(ofs2, r.SPP)) + + self._push_core_regs_to_jitframe(mc) + if withfloats: + self._push_fp_regs_to_jitframe(mc) + + if exc: + pass # TODO + #xxx + ## We might have an exception pending. + #mc.load_imm(r.r2, self.cpu.pos_exc_value()) + ## Copy it into 'jf_guard_exc' + #offset = self.cpu.get_ofs_of_frame_field('jf_guard_exc') + #mc.load(r.r0.value, r.r2.value, 0) + #mc.store(r.r0.value, r.SPP.value, offset) + ## Zero out the exception fields + #diff = self.cpu.pos_exception() - self.cpu.pos_exc_value() + #assert _check_imm_arg(diff) + #mc.li(r.r0.value, 0) + #mc.store(r.r0.value, r.r2.value, 0) + #mc.store(r.r0.value, r.r2.value, diff) + + # now we return from the complete frame, which starts from + # _call_header_with_stack_check(). The _call_footer below does it. + self._call_footer() + rawstart = mc.materialize(self.cpu, []) + self.failure_recovery_code[exc + 2 * withfloats] = rawstart + self.mc = None def _build_wb_slowpath(self, withcards, withfloats=False, for_frame=False): pass # TODO @@ -106,7 +148,23 @@ pass # TODO def _call_header_with_stack_check(self): - pass # TODO + self._call_header() + if self.stack_check_slowpath == 0: + pass # not translated + else: + endaddr, lengthaddr, _ = self.cpu.insert_stack_check() + diff = lengthaddr - endaddr + assert _check_imm_arg(diff) + + mc = self.mc + mc.load_imm(r.SCRATCH, self.stack_check_slowpath) + mc.load_imm(r.SCRATCH2, endaddr) # li r2, endaddr + mc.mtctr(r.SCRATCH.value) + mc.load(r.SCRATCH.value, r.SCRATCH2.value, 0) # ld r0, [end] + mc.load(r.SCRATCH2.value, r.SCRATCH2.value, diff)# ld r2, [length] + mc.subf(r.SCRATCH.value, r.SP.value, r.SCRATCH.value) # sub r0, SP + mc.cmp_op(0, r.SCRATCH.value, r.SCRATCH2.value, signed=False) + mc.bgtctrl() @rgc.no_release_gil def assemble_loop(self, jd_id, unique_id, logger, loopname, inputargs, @@ -322,7 +380,48 @@ clt.invalidate_positions.append((rawstart + relpos, relative_target)) + def _call_header(self): + # Reserve space for a function descriptor, 3 words + #self.mc.write64(0) + #self.mc.write64(0) + #self.mc.write64(0) + # Build a new stackframe of size STD_FRAME_SIZE_IN_BYTES + self.mc.STMG(r.r6, r.r15, loc.addr(-GPR_STACK_SAVE_IN_BYTES, r.SP)) + self.mc.AGHI(r.SP, loc.imm(-STD_FRAME_SIZE_IN_BYTES)) + + # save r4, the second argument, to THREADLOCAL_ADDR_OFFSET + self.mc.STG(r.r3, loc.addr(THREADLOCAL_ADDR_OFFSET, r.SP)) + + # move the first argument to SPP: the jitframe object + self.mc.LGR(r.SPP, r.r2) + + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + self._call_header_shadowstack(gcrootmap) + + def _call_footer(self): + # the return value is the jitframe + self.mc.LGR(r.r2, r.SPP) + + gcrootmap = self.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + self._call_footer_shadowstack(gcrootmap) + + # restore registers r6-r15 + upoffset = STD_FRAME_SIZE_IN_BYTES-GPR_STACK_SAVE_IN_BYTES + self.mc.LMG(r.r6, r.r15, loc.addr(upoffset, r.SP)) + self.jmpto(r.r14) + + def _push_core_regs_to_jitframe(self, mc, includes=r.MANAGED_REGS): + base_ofs = self.cpu.get_baseofs_of_frame_field() + assert len(includes) == 16 + mc.STMG(r.r0, r.r15, loc.addr(base_ofs, r.SPP)) + + def _push_fp_regs_to_jitframe(self, mc, includes=r.MANAGED_FP_REGS): + base_ofs = self.cpu.get_baseofs_of_frame_field() + assert len(includes) == 16 + mc.LMG(r.r0, r.r15, loc.addr(base_ofs, r.SPP)) # ________________________________________ # ASSEMBLER EMISSION @@ -331,7 +430,52 @@ pass # TODO def emit_finish(self, op, arglocs, regalloc): - pass # TODO + base_ofs = self.cpu.get_baseofs_of_frame_field() + if len(arglocs) > 1: + [return_val, fail_descr_loc] = arglocs + if op.getarg(0).type == FLOAT: + raise NotImplementedError + #self.mc.stfd(return_val, loc.addr(base_ofs, r.SPP)) + else: + self.mc.STG(return_val, loc.addr(base_ofs, r.SPP)) + else: + [fail_descr_loc] = arglocs + + ofs = self.cpu.get_ofs_of_frame_field('jf_descr') + ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') + + # gcmap logic here: + arglist = op.getarglist() + if arglist and arglist[0].type == REF: + if self._finish_gcmap: + # we're returning with a guard_not_forced_2, and + # additionally we need to say that the result contains + # a reference too: + self._finish_gcmap[0] |= r_uint(1) + gcmap = self._finish_gcmap + else: + gcmap = self.gcmap_for_finish + elif self._finish_gcmap: + # we're returning with a guard_not_forced_2 + gcmap = self._finish_gcmap + else: + gcmap = lltype.nullptr(jitframe.GCMAP) + # TODO self.load_gcmap(self.mc, r.r2, gcmap) + + assert fail_descr_loc.getint() <= 2**12-1 + self.mc.LGHI(r.r5, fail_descr_loc) + self.mc.STG(r.r5, loc.addr(ofs, r.SPP)) + self.mc.XGR(r.r2, r.r2) + self.mc.STG(r.r2, loc.addr(ofs2, r.SPP)) + + # exit function + self._call_footer() + + def load_gcmap(self, mc, reg, gcmap): + # load the current gcmap into register 'reg' + ptr = rffi.cast(lltype.Signed, gcmap) + #mc.LGHI(mc.pool + #mc.load_imm(reg, ptr) def notimplemented_op(asm, op, arglocs, regalloc): print "[ZARCH/asm] %s not implemented" % op.getopname() diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -73,15 +73,23 @@ # load memory 'LMD': ('sse', ['\xEF']), 'LMG': ('rsy', ['\xEB','\x04']), + 'LHI': ('ri', ['\xA7','\x08']), 'LGHI': ('ri', ['\xA7','\x09']), 'LR': ('rr', ['\x18']), 'LGR': ('rre', ['\xB9','\x04']), 'LG': ('rxy', ['\xE3','\x04']), + # store memory + 'STMG': ('rsy', ['\xEB','\x24']), + 'ST': ('rx', ['\x50']), + 'STG': ('rxy', ['\xE3','\x24']), + 'STY': ('rxy', ['\xE3','\x50']), + # store float 'STE': ('rx', ['\x70']), 'STD': ('rx', ['\x60']), + # load binary float # E -> short (32bit), # D -> long (64bit), @@ -166,7 +174,6 @@ 'MVCK': ('ssd', ['\xD9']), 'PKA': ('ssf', ['\xE9']), - 'STMG': ('rsy', ['\xEB','\x24']), 'SVC': ('i', ['\x0A']), } diff --git a/rpython/jit/backend/zarch/locations.py b/rpython/jit/backend/zarch/locations.py --- a/rpython/jit/backend/zarch/locations.py +++ b/rpython/jit/backend/zarch/locations.py @@ -1,5 +1,5 @@ from rpython.jit.metainterp.history import INT, FLOAT -from rpython.jit.backend.zarch.arch import WORD, JITFRAME_FIXED_SIZE +from rpython.jit.backend.zarch.arch import WORD class AssemblerLocation(object): _immutable_ = True @@ -190,6 +190,7 @@ return ImmLocation(i) def get_fp_offset(base_ofs, position): + from rpython.jit.backend.zarch.registers import JITFRAME_FIXED_SIZE return base_ofs + WORD * (position + JITFRAME_FIXED_SIZE) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -465,7 +465,17 @@ prepare_int_add = regallochelp._prepare_binary_arith def prepare_finish(self, op): - return [] + descr = op.getdescr() + fail_descr = cast_instance_to_gcref(descr) + # we know it does not move, but well + rgc._make_sure_does_not_move(fail_descr) + fail_descr = rffi.cast(lltype.Signed, fail_descr) + if op.numargs() > 0: + loc = self.ensure_reg(op.getarg(0)) + locs = [loc, imm(fail_descr)] + else: + locs = [imm(fail_descr)] + return locs def notimplemented(self, op): msg = '[S390X/regalloc] %s not implemented\n' % op.getopname() diff --git a/rpython/jit/backend/zarch/registers.py b/rpython/jit/backend/zarch/registers.py --- a/rpython/jit/backend/zarch/registers.py +++ b/rpython/jit/backend/zarch/registers.py @@ -1,5 +1,3 @@ - - from rpython.jit.backend.zarch.locations import FloatRegisterLocation from rpython.jit.backend.zarch.locations import RegisterLocation @@ -9,13 +7,28 @@ [r0,r1,r2,r3,r4,r5,r6,r7,r8, r9,r10,r11,r12,r13,r14,r15] = registers -MANAGED_REGS = [r0,r1,r2,r3,r4] -VOLATILES = [r0,r1,r2,r3,r4] -SPP = r15 +MANAGED_REGS = registers +VOLATILES = [r6,r7,r8,r9,r10,r11,r12,r13,r14,r15] +SP = r15 RETURN = r14 +POOL = r13 +SPP = r11 [f0,f1,f2,f3,f4,f5,f6,f7,f8, f9,f10,f11,f12,f13,f14,f15] = fpregisters MANAGED_FP_REGS = fpregisters VOLATILES_FLOAT = [] + +# The JITFRAME_FIXED_SIZE is measured in words, and should be the +# number of registers that need to be saved into the jitframe when +# failing a guard, for example. +ALL_REG_INDEXES = {} +for _r in MANAGED_REGS: + ALL_REG_INDEXES[_r] = len(ALL_REG_INDEXES) +for _r in MANAGED_FP_REGS: + ALL_REG_INDEXES[_r] = len(ALL_REG_INDEXES) + 1 + # we leave a never-used hole for f0 ^^^ in the jitframe + # to simplify store_info_on_descr(), which assumes that the + # register number N is at offset N after the non-fp regs +JITFRAME_FIXED_SIZE = len(ALL_REG_INDEXES) + 1 diff --git a/rpython/jit/backend/zarch/test/test_runner.py b/rpython/jit/backend/zarch/test/test_runner.py --- a/rpython/jit/backend/zarch/test/test_runner.py +++ b/rpython/jit/backend/zarch/test/test_runner.py @@ -15,7 +15,7 @@ class FakeStats(object): pass -class TestPPC(LLtypeBackendTest): +class TestZARCH(LLtypeBackendTest): # for the individual tests see # ====> ../../test/runner_test.py From noreply at buildbot.pypy.org Tue Oct 27 12:00:55 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Tue, 27 Oct 2015 17:00:55 +0100 (CET) Subject: [pypy-commit] pypy s390x-backend: added floating point operations (add, sub, mul, div) as resoperation, added first version of the literal pool prepended to the assembler piece Message-ID: <20151027160056.03C681C05B5@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80467:768bbe0e9944 Date: 2015-10-27 17:01 +0100 http://bitbucket.org/pypy/pypy/changeset/768bbe0e9944/ Log: added floating point operations (add,sub,mul,div) as resoperation, added first version of the literal pool prepended to the assembler piece diff --git a/rpython/jit/backend/zarch/arch.py b/rpython/jit/backend/zarch/arch.py --- a/rpython/jit/backend/zarch/arch.py +++ b/rpython/jit/backend/zarch/arch.py @@ -1,4 +1,5 @@ -WORD = 8 +WORD = 8 # well, we only support 64 bit +DOUBLE_WORD = 8 # # OFFSET diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -4,13 +4,14 @@ from rpython.jit.backend.model import CompiledLoopToken from rpython.jit.backend.zarch import conditions as c from rpython.jit.backend.zarch import registers as r -from rpython.jit.backend.zarch import locations as loc +from rpython.jit.backend.zarch import locations as l from rpython.jit.backend.zarch.codebuilder import InstrBuilder from rpython.jit.backend.zarch.registers import JITFRAME_FIXED_SIZE from rpython.jit.backend.zarch.arch import (WORD, STD_FRAME_SIZE_IN_BYTES, GPR_STACK_SAVE_IN_BYTES, THREADLOCAL_ADDR_OFFSET) -from rpython.jit.backend.zarch.opassembler import IntOpAssembler +from rpython.jit.backend.zarch.opassembler import (IntOpAssembler, + FloatOpAssembler) from rpython.jit.backend.zarch.regalloc import Regalloc from rpython.jit.metainterp.resoperation import rop from rpython.rlib.debug import (debug_print, debug_start, debug_stop, @@ -19,13 +20,97 @@ from rpython.rlib.rarithmetic import r_uint from rpython.rlib.objectmodel import we_are_translated, specialize, compute_unique_id from rpython.rlib import rgc +from rpython.rlib.longlong2float import float2longlong from rpython.rtyper.lltypesystem import lltype, rffi, llmemory -class AssemblerZARCH(BaseAssembler, IntOpAssembler): +class LiteralPool(object): + def __init__(self): + self.size = 0 + # the offset to index the pool + self.rel_offset = 0 + self.offset = 0 + self.places = [] + + def place(self, var): + assert var.is_constant() + self.places.append(var) + off = self.rel_offset + self.rel_offset += 8 + return off + + def ensure_can_hold_constants(self, op): + for arg in op.getarglist(): + if arg.is_constant(): + self.reserve_literal(8) + + def reserve_literal(self, size): + self.size += size + + def reset(self): + self.size = 0 + self.offset = 0 + self.rel_offset = 0 + + def walk_operations(self, operations): + # O(len(operations)). I do not think there is a way + # around this. + # + # Problem: + # constants such as floating point operations, plain pointers, + # or integers might serve as parameter to an operation. thus + # it must be loaded into a register. You cannot do this with + # assembler immediates, because the biggest immediate value + # is 32 bit for branch instructions. + # + # Solution: + # the current solution (gcc does the same), use a literal pool + # located at register r13. This one can easily offset with 20 + # bit signed values (should be enough) + for op in operations: + self.ensure_can_hold_constants(op) + + def pre_assemble(self, mc): + if self.size == 0: + # no pool needed! + return + if self.size % 2 == 1: + self.size += 1 + assert self.size < 2**16-1 + self.offset = mc.get_relative_pos() + mc.BRAS(r.POOL, l.imm(self.size)) + mc.write('\x00' * self.size) + print "pool with %d bytes %d // 8" % (self.size, self.size // 8) + + def overwrite_64(self, mc, index, value): + mc.overwrite(index, chr(value >> 56 & 0xff)) + mc.overwrite(index+1, chr(value >> 48 & 0xff)) + mc.overwrite(index+2, chr(value >> 40 & 0xff)) + mc.overwrite(index+3, chr(value >> 32 & 0xff)) + mc.overwrite(index+4, chr(value >> 24 & 0xff)) + mc.overwrite(index+5, chr(value >> 16 & 0xff)) + mc.overwrite(index+6, chr(value >> 8 & 0xff)) + mc.overwrite(index+7, chr(value & 0xff)) + + def post_assemble(self, mc): + assert self.offset != 0 + for var in self.places: + if var.type == FLOAT: + self.overwrite_64(mc, self.offset, float2longlong(var.value)) + self.offset += 8 + elif var.type == INT: + self.overwrite(mc, self.offset, var.value) + self.offset += 8 + else: + raise NotImplementedError + self.places = [] + +class AssemblerZARCH(BaseAssembler, + IntOpAssembler, FloatOpAssembler): def __init__(self, cpu, translate_support_code=False): BaseAssembler.__init__(self, cpu, translate_support_code) self.mc = None + self.pool = LiteralPool() self.pending_guards = None self.current_clt = None self._regalloc = None @@ -69,17 +154,16 @@ def gen_func_prolog(self): """ NOT_RPYTHON """ STACK_FRAME_SIZE = 40 - self.mc.STMG(r.r11, r.r15, loc.addr(-STACK_FRAME_SIZE, r.SP)) - self.mc.AHI(r.sp, loc.imm(-STACK_FRAME_SIZE)) + self.mc.STMG(r.r11, r.r15, l.addr(-STACK_FRAME_SIZE, r.SP)) + self.mc.AHI(r.SP, l.imm(-STACK_FRAME_SIZE)) def gen_func_epilog(self): """ NOT_RPYTHON """ - self.mc.LMG(r.r11, r.r15, loc.addr(0, r.SP)) + self.mc.LMG(r.r11, r.r15, l.addr(0, r.SP)) self.jmpto(r.r14) def jmpto(self, register): - # TODO, manual says this is a performance killer, there - # might be another operation for unconditional JMP? + # unconditional jump self.mc.BCR_rr(0xf, register.value) def _build_failure_recovery(self, exc, withfloats=False): @@ -90,8 +174,8 @@ # this function is called (see generate_quick_failure()). ofs = self.cpu.get_ofs_of_frame_field('jf_descr') ofs2 = self.cpu.get_ofs_of_frame_field('jf_gcmap') - mc.STG(r.r2, loc.addr(ofs, r.SPP)) - mc.STG(r.r3, loc.addr(ofs2, r.SPP)) + mc.STG(r.r2, l.addr(ofs, r.SPP)) + mc.STG(r.r3, l.addr(ofs2, r.SPP)) self._push_core_regs_to_jitframe(mc) if withfloats: @@ -193,8 +277,12 @@ operations = regalloc.prepare_loop(inputargs, operations, looptoken, clt.allgcrefs) looppos = self.mc.get_relative_pos() + self.pool.reset() + self.pool.walk_operations(operations) + self.pool.pre_assemble(self.mc) frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) + self.pool.post_assemble(self.mc) self.update_frame_depth(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) # size_excluding_failure_stuff = self.mc.get_relative_pos() @@ -272,7 +360,7 @@ # move from memory to fp register elif loc.is_fp_reg(): assert prev_loc.type == FLOAT, 'source not float location' - self.mc.lfd(loc, r.SPP, offset) + self.mc.LDY(loc, l.addr(offset, r.SPP)) return assert 0, "not supported location" elif prev_loc.is_core_reg(): @@ -387,11 +475,11 @@ #self.mc.write64(0) # Build a new stackframe of size STD_FRAME_SIZE_IN_BYTES - self.mc.STMG(r.r6, r.r15, loc.addr(-GPR_STACK_SAVE_IN_BYTES, r.SP)) - self.mc.AGHI(r.SP, loc.imm(-STD_FRAME_SIZE_IN_BYTES)) + self.mc.STMG(r.r6, r.r15, l.addr(-GPR_STACK_SAVE_IN_BYTES, r.SP)) + self.mc.AGHI(r.SP, l.imm(-STD_FRAME_SIZE_IN_BYTES)) # save r4, the second argument, to THREADLOCAL_ADDR_OFFSET - self.mc.STG(r.r3, loc.addr(THREADLOCAL_ADDR_OFFSET, r.SP)) + self.mc.STG(r.r3, l.addr(THREADLOCAL_ADDR_OFFSET, r.SP)) # move the first argument to SPP: the jitframe object self.mc.LGR(r.SPP, r.r2) @@ -410,18 +498,18 @@ # restore registers r6-r15 upoffset = STD_FRAME_SIZE_IN_BYTES-GPR_STACK_SAVE_IN_BYTES - self.mc.LMG(r.r6, r.r15, loc.addr(upoffset, r.SP)) + self.mc.LMG(r.r6, r.r15, l.addr(upoffset, r.SP)) self.jmpto(r.r14) def _push_core_regs_to_jitframe(self, mc, includes=r.MANAGED_REGS): base_ofs = self.cpu.get_baseofs_of_frame_field() assert len(includes) == 16 - mc.STMG(r.r0, r.r15, loc.addr(base_ofs, r.SPP)) + mc.STMG(r.r0, r.r15, l.addr(base_ofs, r.SPP)) def _push_fp_regs_to_jitframe(self, mc, includes=r.MANAGED_FP_REGS): base_ofs = self.cpu.get_baseofs_of_frame_field() assert len(includes) == 16 - mc.LMG(r.r0, r.r15, loc.addr(base_ofs, r.SPP)) + mc.LMG(r.r0, r.r15, l.addr(base_ofs, r.SPP)) # ________________________________________ # ASSEMBLER EMISSION @@ -434,10 +522,9 @@ if len(arglocs) > 1: [return_val, fail_descr_loc] = arglocs if op.getarg(0).type == FLOAT: - raise NotImplementedError - #self.mc.stfd(return_val, loc.addr(base_ofs, r.SPP)) + self.mc.STD(return_val, l.addr(base_ofs, r.SPP)) else: - self.mc.STG(return_val, loc.addr(base_ofs, r.SPP)) + self.mc.STG(return_val, l.addr(base_ofs, r.SPP)) else: [fail_descr_loc] = arglocs @@ -464,9 +551,9 @@ assert fail_descr_loc.getint() <= 2**12-1 self.mc.LGHI(r.r5, fail_descr_loc) - self.mc.STG(r.r5, loc.addr(ofs, r.SPP)) + self.mc.STG(r.r5, l.addr(ofs, r.SPP)) self.mc.XGR(r.r2, r.r2) - self.mc.STG(r.r2, loc.addr(ofs2, r.SPP)) + self.mc.STG(r.r2, l.addr(ofs2, r.SPP)) # exit function self._call_footer() diff --git a/rpython/jit/backend/zarch/helper/regalloc.py b/rpython/jit/backend/zarch/helper/regalloc.py --- a/rpython/jit/backend/zarch/helper/regalloc.py +++ b/rpython/jit/backend/zarch/helper/regalloc.py @@ -7,7 +7,7 @@ return lower_bound <= i <= upper_bound return False -def _prepare_binary_arith(self, op): +def _prepare_int_binary_arith(self, op): a0 = op.getarg(0) a1 = op.getarg(1) if check_imm(a0): @@ -20,3 +20,12 @@ self.free_op_vars() self.force_result_in_reg(op, a0) return [l0, l1] + +def _prepare_float_binary_arith(self, op): + a0 = op.getarg(0) + a1 = op.getarg(1) + l0 = self.ensure_reg(a0) + l1 = self.ensure_reg(a1) + self.free_op_vars() + self.force_result_in_reg(op, a0) + return [l0, l1] diff --git a/rpython/jit/backend/zarch/locations.py b/rpython/jit/backend/zarch/locations.py --- a/rpython/jit/backend/zarch/locations.py +++ b/rpython/jit/backend/zarch/locations.py @@ -1,5 +1,5 @@ from rpython.jit.metainterp.history import INT, FLOAT -from rpython.jit.backend.zarch.arch import WORD +from rpython.jit.backend.zarch.arch import WORD, DOUBLE_WORD class AssemblerLocation(object): _immutable_ = True @@ -17,7 +17,7 @@ def is_core_reg(self): return False - def is_vfp_reg(self): + def is_fp_reg(self): return False def is_imm_float(self): @@ -26,6 +26,9 @@ def is_float(self): return False + def is_in_pool(self): + return False + def as_key(self): raise NotImplementedError @@ -52,7 +55,7 @@ class FloatRegisterLocation(RegisterLocation): _immutable_ = True type = FLOAT - width = WORD + width = DOUBLE_WORD def __repr__(self): return 'f%d' % self.value @@ -60,7 +63,7 @@ def is_core_reg(self): return False - def is_vfp_reg(self): + def is_fp_reg(self): return True def as_key(self): # 20 <= as_key <= 35 @@ -85,7 +88,6 @@ def is_imm(self): return True - class ConstFloatLoc(AssemblerLocation): """This class represents an imm float value which is stored in memory at the address stored in the field value""" @@ -183,12 +185,40 @@ if length: self.length = length.value +class PoolLoc(AddressLocation): + _immutable_ = True + width = WORD + + def __init__(self, offset, isfloat=False): + AddressLocation.__init__(self, None, None, offset, None) + self.base = 13 + self.isfloat = isfloat + + def is_in_pool(self): + return True + + def is_imm(self): + return True + + def is_imm_float(self): + return self.isfloat + + def is_float(self): + return self.isfloat + + def __repr__(self): + return "pool(i,%d)" % self.value + + def addr(displace, basereg=None, indexreg=None, length=None): return AddressLocation(basereg, indexreg, displace, length) def imm(i): return ImmLocation(i) +def pool(off, float=False): + return PoolLoc(off, float) + def get_fp_offset(base_ofs, position): from rpython.jit.backend.zarch.registers import JITFRAME_FIXED_SIZE return base_ofs + WORD * (position + JITFRAME_FIXED_SIZE) diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -10,3 +10,33 @@ else: self.mc.AGR(l0, l1) +class FloatOpAssembler(object): + _mixin_ = True + + def emit_float_add(self, op, arglocs, regalloc): + l0, l1 = arglocs + if l1.is_in_pool(): + self.mc.ADB(l0, l1) + else: + self.mc.ADBR(l0, l1) + + def emit_float_sub(self, op, arglocs, regalloc): + l0, l1 = arglocs + if l1.is_in_pool(): + self.mc.SDB(l0, l1) + else: + self.mc.SDBR(l0, l1) + + def emit_float_mul(self, op, arglocs, regalloc): + l0, l1 = arglocs + if l1.is_in_pool(): + self.mc.MDB(l0, l1) + else: + self.mc.MDBR(l0, l1) + + def emit_float_div(self, op, arglocs, regalloc): + l0, l1 = arglocs + if l1.is_in_pool(): + self.mc.DDB(l0, l1) + else: + self.mc.DDBR(l0, l1) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -72,13 +72,14 @@ def call_result_location(self, v): return r.f1 + def place_in_pool(self, var): + offset = self.assembler.pool.place(var) + return locations.pool(offset, r.POOL) + def ensure_reg(self, box): if isinstance(box, Const): - loc = self.get_scratch_reg() - immadrvalue = self.convert_to_adr(box) - mc = self.assembler.mc - mc.load_imm(r.SCRATCH, immadrvalue) - mc.lfdx(loc.value, 0, r.SCRATCH.value) + # TODO, allocate in a register or just load it straight from pool? + return self.place_in_pool(box) else: assert box in self.temp_boxes loc = self.make_sure_var_in_reg(box, @@ -462,7 +463,11 @@ def prepare_increment_debug_counter(self, op): pass # XXX - prepare_int_add = regallochelp._prepare_binary_arith + prepare_int_add = regallochelp._prepare_int_binary_arith + prepare_float_add = regallochelp._prepare_float_binary_arith + prepare_float_sub = regallochelp._prepare_float_binary_arith + prepare_float_mul = regallochelp._prepare_float_binary_arith + prepare_float_div = regallochelp._prepare_float_binary_arith def prepare_finish(self, op): descr = op.getdescr() diff --git a/rpython/jit/backend/zarch/runner.py b/rpython/jit/backend/zarch/runner.py --- a/rpython/jit/backend/zarch/runner.py +++ b/rpython/jit/backend/zarch/runner.py @@ -16,6 +16,8 @@ cast_ptr_to_int = staticmethod(cast_ptr_to_int) class CPU_S390_64(AbstractZARCHCPU): + supports_floats = True + def setup(self): self.assembler = AssemblerZARCH(self) diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -111,7 +111,7 @@ i = rop.INT_ADD from rpython.jit.backend.zarch import assembler assert assembler.asm_operations[i] \ - is AssemblerZARCH.emit_op_int_add.im_func + is AssemblerZARCH.emit_int_add.im_func def test_byte_count_instr(self): byte_count(self.mc.BRC) == 4 @@ -130,11 +130,11 @@ def test_simple_func(self): # enter - self.a.mc.STMG(reg.r11, reg.r15, loc.addr(-96, reg.sp)) - self.a.mc.AHI(reg.sp, loc.imm(-96)) + self.a.mc.STMG(reg.r11, reg.r15, loc.addr(-96, reg.SP)) + self.a.mc.AHI(reg.SP, loc.imm(-96)) # from the start of BRASL to end of jmpto there are 8+6 bytes self.a.mc.BRASL(reg.r14, loc.imm(8+6)) - self.a.mc.LMG(reg.r11, reg.r15, loc.addr(0, reg.sp)) + self.a.mc.LMG(reg.r11, reg.r15, loc.addr(0, reg.SP)) self.a.jmpto(reg.r14) addr = self.a.mc.get_relative_pos() From noreply at buildbot.pypy.org Tue Oct 27 12:12:25 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Tue, 27 Oct 2015 17:12:25 +0100 (CET) Subject: [pypy-commit] pypy s390x-backend: literal/constant pool correctly assembled, float test (linear float loop) passing Message-ID: <20151027161225.5CC1E1C0149@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80468:ab25d19c932c Date: 2015-10-27 17:12 +0100 http://bitbucket.org/pypy/pypy/changeset/ab25d19c932c/ Log: literal/constant pool correctly assembled, float test (linear float loop) passing diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -76,8 +76,8 @@ if self.size % 2 == 1: self.size += 1 assert self.size < 2**16-1 + mc.BRAS(r.POOL, l.imm(self.size+mc.BRAS._byte_count)) self.offset = mc.get_relative_pos() - mc.BRAS(r.POOL, l.imm(self.size)) mc.write('\x00' * self.size) print "pool with %d bytes %d // 8" % (self.size, self.size // 8) From noreply at buildbot.pypy.org Tue Oct 27 13:28:19 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Tue, 27 Oct 2015 18:28:19 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: fix the windows checksum Message-ID: <20151027172819.DF4C81C0149@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: extradoc Changeset: r647:f628fd601f84 Date: 2015-10-27 18:28 +0100 http://bitbucket.org/pypy/pypy.org/changeset/f628fd601f84/ Log: fix the windows checksum diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -352,7 +352,7 @@ fcd8e2dacc2340173be206ab9de1d3fc pypy-4.0.0-osx64.tar.bz2 57722fd5fc01734839ecc523ce965fbb pypy-4.0.0-src.tar.bz2 8e90eed8aea1686d98c2e7dce5bda1e0 pypy-4.0.0-src.zip -fe4733ef55c9a4692abbc0caeb2f2b95 pypy-4.0.0-win32.zip +fb013ec74edbf8dfd4f059e934fd4e5c pypy-4.0.0-win32.zip

    pypy3-2.4.0 md5:

    @@ -382,7 +382,7 @@
     7656c9975b353c801a15b924930ee47f173280b9  pypy-4.0.0-osx64.tar.bz2
     b66a6a8c3ecdf6f62b13931cd2919dd160e1249b  pypy-4.0.0-src.tar.bz2
     24550900db0a1a0b1a2554cb1fa480cd0ae2fbfd  pypy-4.0.0-src.zip
    -d519d5cfccbefb9d9773e655ee8865cf265f033e  pypy-4.0.0-win32.zip
    +7369248180bdc9c6055272b1dedb3b3a70181497  pypy-4.0.0-win32.zip
     

    pypy3-2.4.0 sha1:

    diff --git a/source/download.txt b/source/download.txt
    --- a/source/download.txt
    +++ b/source/download.txt
    @@ -380,7 +380,7 @@
         fcd8e2dacc2340173be206ab9de1d3fc  pypy-4.0.0-osx64.tar.bz2
         57722fd5fc01734839ecc523ce965fbb  pypy-4.0.0-src.tar.bz2
         8e90eed8aea1686d98c2e7dce5bda1e0  pypy-4.0.0-src.zip
    -    fe4733ef55c9a4692abbc0caeb2f2b95  pypy-4.0.0-win32.zip
    +    fb013ec74edbf8dfd4f059e934fd4e5c  pypy-4.0.0-win32.zip
     
     pypy3-2.4.0 md5::
     
    @@ -412,7 +412,7 @@
         7656c9975b353c801a15b924930ee47f173280b9  pypy-4.0.0-osx64.tar.bz2
         b66a6a8c3ecdf6f62b13931cd2919dd160e1249b  pypy-4.0.0-src.tar.bz2
         24550900db0a1a0b1a2554cb1fa480cd0ae2fbfd  pypy-4.0.0-src.zip
    -    d519d5cfccbefb9d9773e655ee8865cf265f033e  pypy-4.0.0-win32.zip
    +    7369248180bdc9c6055272b1dedb3b3a70181497  pypy-4.0.0-win32.zip
     
     pypy3-2.4.0 sha1::
     
    
    From noreply at buildbot.pypy.org  Tue Oct 27 17:45:12 2015
    From: noreply at buildbot.pypy.org (mattip)
    Date: Tue, 27 Oct 2015 22:45:12 +0100 (CET)
    Subject: [pypy-commit] pypy default: mention memory savings in release notice
    Message-ID: <20151027214512.4BA451C0403@cobra.cs.uni-duesseldorf.de>
    
    Author: mattip 
    Branch: 
    Changeset: r80469:92df32c22fa4
    Date: 2015-10-28 08:45 +1100
    http://bitbucket.org/pypy/pypy/changeset/92df32c22fa4/
    
    Log:	mention memory savings in release notice
    
    diff --git a/pypy/doc/release-4.0.0.rst b/pypy/doc/release-4.0.0.rst
    --- a/pypy/doc/release-4.0.0.rst
    +++ b/pypy/doc/release-4.0.0.rst
    @@ -50,12 +50,14 @@
     
     Benchmarks and a summary of this work appear `here`_
     
    -Internal Refactoring and Warmup Time Improvement
    -================================================
    +Internal Refactoring: Warmup Time Improvement and Reduced Memory Usage
    +======================================================================
     
     Maciej Fijalkowski and Armin Rigo refactored internals of rpython that now allow
    -PyPy to more efficiently use `guards`_ in jitted code. They also rewrote unrolling,
    -leading to a warmup time improvement of 20% or so.
    +PyPy to more efficiently use `guards`_ in jitted code. They also rewrote 
    +unrolling, leading to a warmup time improvement of 20% or so. The reduction in
    +guards also means a reduction in the use of memory, also a savings of around
    +20%.
     
     Numpy
     =====
    
    From noreply at buildbot.pypy.org  Tue Oct 27 18:53:50 2015
    From: noreply at buildbot.pypy.org (arigo)
    Date: Tue, 27 Oct 2015 23:53:50 +0100 (CET)
    Subject: [pypy-commit] pypy default: Issue #2169: fix (not tested,
     unsure if it's worth the effort, see issue)
    Message-ID: <20151027225350.135301C0149@cobra.cs.uni-duesseldorf.de>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r80470:a49a3f9c9805
    Date: 2015-10-27 23:53 +0100
    http://bitbucket.org/pypy/pypy/changeset/a49a3f9c9805/
    
    Log:	Issue #2169: fix (not tested, unsure if it's worth the effort, see
    	issue)
    
    diff --git a/lib_pypy/dbm.py b/lib_pypy/dbm.py
    --- a/lib_pypy/dbm.py
    +++ b/lib_pypy/dbm.py
    @@ -149,6 +149,7 @@
     _init_func('store', (c_void_p, datum, datum, c_int), restype=c_int)
     _init_func('error', (c_void_p,), restype=c_int)
     _init_func('delete', (c_void_p, datum), restype=c_int)
    +_init_func('clearerr', (c_void_p,), restype=c_int)
     
     lib.DBM_INSERT = 0
     lib.DBM_REPLACE = 1
    
    From noreply at buildbot.pypy.org  Wed Oct 28 07:02:25 2015
    From: noreply at buildbot.pypy.org (plan_rich)
    Date: Wed, 28 Oct 2015 12:02:25 +0100 (CET)
    Subject: [pypy-commit] pypy s390x-backend: adding resoperations to
     regalloc/assembler (label, int_(lt, eq, ...), guards)
    Message-ID: <20151028110225.D437F1C14D6@cobra.cs.uni-duesseldorf.de>
    
    Author: Richard Plangger 
    Branch: s390x-backend
    Changeset: r80471:3a2cb683d03e
    Date: 2015-10-28 10:11 +0100
    http://bitbucket.org/pypy/pypy/changeset/3a2cb683d03e/
    
    Log:	adding resoperations to regalloc/assembler (label,int_(lt,eq,...),
    	guards)
    
    diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py
    --- a/rpython/jit/backend/zarch/assembler.py
    +++ b/rpython/jit/backend/zarch/assembler.py
    @@ -11,7 +11,7 @@
             STD_FRAME_SIZE_IN_BYTES, GPR_STACK_SAVE_IN_BYTES,
             THREADLOCAL_ADDR_OFFSET)
     from rpython.jit.backend.zarch.opassembler import (IntOpAssembler,
    -    FloatOpAssembler)
    +    FloatOpAssembler, GuardOpAssembler)
     from rpython.jit.backend.zarch.regalloc import Regalloc
     from rpython.jit.metainterp.resoperation import rop
     from rpython.rlib.debug import (debug_print, debug_start, debug_stop,
    @@ -105,7 +105,8 @@
             self.places = []
     
     class AssemblerZARCH(BaseAssembler,
    -        IntOpAssembler, FloatOpAssembler):
    +        IntOpAssembler, FloatOpAssembler,
    +        GuardOpAssembler):
     
         def __init__(self, cpu, translate_support_code=False):
             BaseAssembler.__init__(self, cpu, translate_support_code)
    @@ -145,6 +146,9 @@
             self.mc = None
             self.pending_guards = None
     
    +    def target_arglocs(self, looptoken):
    +        return looptoken._zarch_arglocs
    +
         def get_asmmemmgr_blocks(self, looptoken):
             clt = looptoken.compiled_loop_token
             if clt.asmmemmgr_blocks is None:
    @@ -333,7 +337,7 @@
             if prev_loc.is_imm():
                 value = prev_loc.getint()
                 # move immediate value to register
    -            if loc.is_core_reg():
    +            if loc.is_reg():
                     self.mc.load_imm(loc, value)
                     return
                 # move immediate value to memory
    @@ -347,7 +351,7 @@
             elif prev_loc.is_stack():
                 offset = prev_loc.value
                 # move from memory to register
    -            if loc.is_core_reg():
    +            if loc.is_reg():
                     self.mc.load(loc, r.SPP, offset)
                     return
                 # move in memory
    @@ -363,17 +367,15 @@
                     self.mc.LDY(loc, l.addr(offset, r.SPP))
                     return
                 assert 0, "not supported location"
    -        elif prev_loc.is_core_reg():
    -            reg = prev_loc.value
    +        elif prev_loc.is_reg():
                 # move to another register
    -            if loc.is_core_reg():
    -                other_reg = loc.value
    -                self.mc.mr(other_reg, reg)
    +            if loc.is_reg():
    +                self.mc.LGR(loc, prev_loc)
                     return
                 # move to memory
                 elif loc.is_stack():
                     offset = loc.value
    -                self.mc.store(reg, r.SPP, offset)
    +                self.mc.store(prev_loc, r.SPP, offset)
                     return
                 assert 0, "not supported location"
             elif prev_loc.is_imm_float():
    @@ -517,6 +519,9 @@
         def emit_increment_debug_counter(self, op, arglocs, regalloc):
             pass # TODO
     
    +    def emit_label(self, op, arglocs, regalloc):
    +        pass
    +
         def emit_finish(self, op, arglocs, regalloc):
             base_ofs = self.cpu.get_baseofs_of_frame_field()
             if len(arglocs) > 1:
    diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py
    --- a/rpython/jit/backend/zarch/codebuilder.py
    +++ b/rpython/jit/backend/zarch/codebuilder.py
    @@ -1,8 +1,9 @@
    -from rpython.jit.backend.zarch import conditions as cond
    -from rpython.jit.backend.zarch import registers as reg
    -from rpython.jit.backend.zarch import locations as loc
    +from rpython.jit.backend.zarch import conditions as c
    +from rpython.jit.backend.zarch import registers as r
    +from rpython.jit.backend.zarch import locations as l
     from rpython.jit.backend.zarch.instruction_builder import build_instr_codes
     from rpython.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin
    +from rpython.jit.backend.llsupport.assembler import GuardToken
     from rpython.rlib.objectmodel import we_are_translated
     from rpython.rlib.unroll import unrolling_iterable
     from rpython.rtyper.lltypesystem import lltype, rffi, llmemory
    @@ -19,15 +20,19 @@
     def binary_helper_call(name):
         function = getattr(support, 'arm_%s' % name)
     
    -    def f(self, c=cond.AL):
    +    def f(self, c=c.AL):
             """Generates a call to a helper function, takes its
             arguments in r0 and r1, result is placed in r0"""
             addr = rffi.cast(lltype.Signed, function)
             self.BL(addr, c)
         return f
     
    -class Operand(object):
    -    pass
    +class ZARCHGuardToken(GuardToken):
    +    def __init__(self, cpu, gcmap, descr, failargs, faillocs,
    +                 guard_opnum, frame_depth, fcond=c.cond_none):
    +        GuardToken.__init__(self, cpu, gcmap, descr, failargs, faillocs,
    +                            guard_opnum, frame_depth)
    +        self.fcond = fcond
     
     class AbstractZARCHBuilder(object):
         def write_i32(self, word):
    @@ -85,11 +90,32 @@
             self._dump(addr, "jit-backend-dump", "s390x")
     
         def load(self, treg, sreg, offset):
    -        self.LG(treg, loc.addr(offset, sreg))
    +        self.LG(treg, l.addr(offset, sreg))
     
         def currpos(self):
             return self.get_relative_pos()
     
    +    def cmp_op(self, a, b, pool=False, signed=True, fp=False):
    +        if fp == True:
    +            xxx
    +            self.fcmpu(a, b)
    +        else:
    +            if signed:
    +                if pool:
    +                    # 64 bit immediate signed
    +                    self.CLG(a, b)
    +                else:
    +                    # 64 bit signed
    +                    self.CLGR(a, b)
    +            else:
    +                if pool:
    +                    # 64 bit immediate unsigned
    +                    self.CG(a, b)
    +                else:
    +                    # 64 bit unsigned
    +                    self.CGR(a, b)
    +
    +
     _classes = (AbstractZARCHBuilder,)
     
     # Used to build the MachineCodeBlockWrapper
    diff --git a/rpython/jit/backend/zarch/conditions.py b/rpython/jit/backend/zarch/conditions.py
    --- a/rpython/jit/backend/zarch/conditions.py
    +++ b/rpython/jit/backend/zarch/conditions.py
    @@ -6,6 +6,10 @@
     GT = loc.imm(0x2)
     LE = loc.imm(EQ.value | LT.value)
     GE = loc.imm(EQ.value | GT.value)
    +NE = loc.imm(LT.value | GT.value)
     OVERFLOW = loc.imm(0x1)
     
     cond_none = loc.imm(0x0)
    +
    +def negate(cond):
    +    return cond
    diff --git a/rpython/jit/backend/zarch/helper/assembler.py b/rpython/jit/backend/zarch/helper/assembler.py
    --- a/rpython/jit/backend/zarch/helper/assembler.py
    +++ b/rpython/jit/backend/zarch/helper/assembler.py
    @@ -0,0 +1,69 @@
    +import rpython.jit.backend.zarch.conditions as c
    +import rpython.jit.backend.zarch.registers as r
    +from rpython.rlib.rarithmetic import intmask
    +from rpython.jit.backend.zarch.arch import WORD
    +from rpython.jit.metainterp.history import FLOAT
    +from rpython.jit.metainterp.resoperation import rop
    +from rpython.rtyper.lltypesystem import rffi, lltype
    +
    +def flush_cc(asm, condition, result_loc):
    +    # After emitting an instruction that leaves a boolean result in
    +    # a condition code (cc), call this.  In the common case, result_loc
    +    # will be set to SPP by the regalloc, which in this case means
    +    # "propagate it between this operation and the next guard by keeping
    +    # it in the cc".  In the uncommon case, result_loc is another
    +    # register, and we emit a load from the cc into this register.
    +    assert asm.guard_success_cc == c.cond_none
    +    if result_loc is r.SPP:
    +        asm.guard_success_cc = condition
    +    else:
    +        # Possibly invert the bit in the CR
    +        bit, invert = c.encoding[condition]
    +        assert 0 <= bit <= 3
    +        if invert == 12:
    +            pass
    +        elif invert == 4:
    +            asm.mc.crnor(bit, bit, bit)
    +        else:
    +            assert 0
    +
    +        resval = result_loc.value
    +        # move the content of the CR to resval
    +        asm.mc.mfcr(resval)
    +        # zero out everything except of the result
    +        asm.mc.rlwinm(resval, resval, 1 + bit, 31, 31)
    +
    +
    +def do_emit_cmp_op(self, arglocs, condition, signed, fp):
    +    l0 = arglocs[0]
    +    l1 = arglocs[1]
    +    assert not l0.is_imm()
    +    # do the comparison
    +    self.mc.cmp_op(l0, l1, pool=l1.is_in_pool(), signed=signed, fp=fp)
    +
    +    # CR bits:
    +    #     0: LT
    +    #     1: GT
    +    #     2: EQ
    +    #     3: UNordered
    +
    +    if fp:
    +        # Support for NaNs: with LE or GE, if one of the operands is a
    +        # NaN, we get CR=1,0,0,0 (unordered bit only).  We're about to
    +        # check "not GT" or "not LT", but in case of NaN we want to
    +        # get the answer False.
    +        #if condition == c.LE:
    +        #    self.mc.crnor(1, 1, 3)
    +        #    condition = c.GT
    +        #elif condition == c.GE:
    +        #    self.mc.crnor(0, 0, 3)
    +        #    condition = c.LT
    +        pass
    +
    +    flush_cc(self, condition, r.SPP)
    +
    +
    +def gen_emit_cmp_op(condition, signed=True, fp=False):
    +    def f(self, op, arglocs, regalloc):
    +        do_emit_cmp_op(self, arglocs, condition, signed, fp)
    +    return f
    diff --git a/rpython/jit/backend/zarch/helper/regalloc.py b/rpython/jit/backend/zarch/helper/regalloc.py
    --- a/rpython/jit/backend/zarch/helper/regalloc.py
    +++ b/rpython/jit/backend/zarch/helper/regalloc.py
    @@ -7,7 +7,7 @@
             return lower_bound <= i <= upper_bound
         return False
     
    -def _prepare_int_binary_arith(self, op):
    +def prepare_int_add_or_mul(self, op):
         a0 = op.getarg(0)
         a1 = op.getarg(1)
         if check_imm(a0):
    @@ -21,7 +21,32 @@
         self.force_result_in_reg(op, a0)
         return [l0, l1]
     
    -def _prepare_float_binary_arith(self, op):
    +def prepare_int_sub(self, op):
    +    a0 = op.getarg(0)
    +    a1 = op.getarg(1)
    +    if isinstance(a0, ConstInt):
    +        a0, a1 = a1, a0
    +    l0 = self.ensure_reg(a0)
    +    l1 = self.ensure_reg(a1)
    +    self.free_op_vars()
    +    self.force_result_in_reg(op, a0)
    +    return [l0, l1]
    +
    +def prepare_cmp_op(self, op):
    +    a0 = op.getarg(0)
    +    a1 = op.getarg(1)
    +    if check_imm(a0):
    +        a0, a1 = a1, a0
    +    l0 = self.ensure_reg(a0)
    +    if check_imm(a1):
    +        l1 = imm(a1.getint())
    +    else:
    +        l1 = self.ensure_reg(a1)
    +    self.free_op_vars()
    +    self.force_result_in_reg(op, a0)
    +    return [l0, l1]
    +
    +def prepare_binary_op(self, op):
         a0 = op.getarg(0)
         a1 = op.getarg(1)
         l0 = self.ensure_reg(a0)
    diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py
    --- a/rpython/jit/backend/zarch/instructions.py
    +++ b/rpython/jit/backend/zarch/instructions.py
    @@ -21,6 +21,13 @@
         'AGF':     ('rxy',   ['\xE3','\x18']),
         'AHI':     ('ri',    ['\xA7','\x0A']),
         'AGHI':    ('ri',    ['\xA7','\x0B']),
    +
    +
    +    # comparision
    +    'CGR':     ('rre',    ['\xB9','\x20']),
    +    'CG':      ('rxy',    ['\xE3','\x20']),
    +    'CLGR':    ('rre',    ['\xB9','\x21']),
    +    'CLG':     ('rxy',    ['\xE3','\x20']),
     }
     
     logic_mnemonic_codes = {
    diff --git a/rpython/jit/backend/zarch/locations.py b/rpython/jit/backend/zarch/locations.py
    --- a/rpython/jit/backend/zarch/locations.py
    +++ b/rpython/jit/backend/zarch/locations.py
    @@ -14,7 +14,7 @@
         def is_raw_sp(self):
             return False
     
    -    def is_core_reg(self):
    +    def is_reg(self):
             return False
     
         def is_fp_reg(self):
    @@ -45,7 +45,7 @@
         def __repr__(self):
             return 'r%d' % self.value
     
    -    def is_core_reg(self):
    +    def is_reg(self):
             return True
     
         def as_key(self):       # 0 <= as_key <= 15
    @@ -60,7 +60,7 @@
         def __repr__(self):
             return 'f%d' % self.value
     
    -    def is_core_reg(self):
    +    def is_reg(self):
             return False
     
         def is_fp_reg(self):
    diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py
    --- a/rpython/jit/backend/zarch/opassembler.py
    +++ b/rpython/jit/backend/zarch/opassembler.py
    @@ -1,15 +1,28 @@
    +from rpython.jit.backend.zarch.helper.assembler import gen_emit_cmp_op
    +from rpython.jit.backend.zarch.codebuilder import ZARCHGuardToken
    +import rpython.jit.backend.zarch.conditions as c
    +import rpython.jit.backend.zarch.registers as r
    +from rpython.jit.backend.llsupport.gcmap import allocate_gcmap
     
     class IntOpAssembler(object):
         _mixin_ = True
     
         def emit_int_add(self, op, arglocs, regalloc):
             l0, l1 = arglocs
    -        assert not l0.is_imm()
             if l1.is_imm():
                 self.mc.AGHI(l0, l1)
    +        elif l1.is_in_pool():
    +            self.mc.AG(l0, l1)
             else:
                 self.mc.AGR(l0, l1)
     
    +    emit_int_le = gen_emit_cmp_op(c.LE)
    +    emit_int_lt = gen_emit_cmp_op(c.LT)
    +    emit_int_gt = gen_emit_cmp_op(c.GT)
    +    emit_int_ge = gen_emit_cmp_op(c.GE)
    +    emit_int_eq = gen_emit_cmp_op(c.EQ)
    +    emit_int_ne = gen_emit_cmp_op(c.NE)
    +
     class FloatOpAssembler(object):
         _mixin_ = True
     
    @@ -40,3 +53,192 @@
                 self.mc.DDB(l0, l1)
             else:
                 self.mc.DDBR(l0, l1)
    +
    +class GuardOpAssembler(object):
    +    _mixin_ = True
    +
    +    def _emit_guard(self, op, arglocs, is_guard_not_invalidated=False):
    +        if is_guard_not_invalidated:
    +            fcond = c.cond_none
    +        else:
    +            fcond = self.guard_success_cc
    +            self.guard_success_cc = c.cond_none
    +            assert fcond != c.cond_none
    +            fcond = c.negate(fcond)
    +        token = self.build_guard_token(op, arglocs[0].value, arglocs[1:], fcond)
    +        token.pos_jump_offset = self.mc.currpos()
    +        assert token.guard_not_invalidated() == is_guard_not_invalidated
    +        if not is_guard_not_invalidated:
    +            self.mc.trap()     # has to be patched later on
    +        self.pending_guard_tokens.append(token)
    +
    +    def build_guard_token(self, op, frame_depth, arglocs, fcond):
    +        descr = op.getdescr()
    +        gcmap = allocate_gcmap(self, frame_depth, r.JITFRAME_FIXED_SIZE)
    +        token = ZARCHGuardToken(self.cpu, gcmap, descr, op.getfailargs(),
    +                              arglocs, op.getopnum(), frame_depth,
    +                              fcond)
    +        return token
    +
    +    def emit_guard_true(self, op, arglocs, regalloc):
    +        self._emit_guard(op, arglocs)
    +
    +    def emit_guard_false(self, op, arglocs, regalloc):
    +        self.guard_success_cc = c.negate(self.guard_success_cc)
    +        self._emit_guard(op, arglocs)
    +
    +    def emit_guard_overflow(self, op, arglocs, regalloc):
    +        self.guard_success_cc = c.SO
    +        self._emit_guard(op, arglocs)
    +
    +    def emit_guard_no_overflow(self, op, arglocs, regalloc):
    +        self.guard_success_cc = c.NS
    +        self._emit_guard(op, arglocs)
    +
    +    def emit_guard_value(self, op, arglocs, regalloc):
    +        l0 = arglocs[0]
    +        l1 = arglocs[1]
    +        failargs = arglocs[2:]
    +
    +        if l0.is_reg():
    +            if l1.is_imm():
    +                self.mc.cmp_op(0, l0.value, l1.getint(), imm=True)
    +            else:
    +                self.mc.cmp_op(0, l0.value, l1.value)
    +        elif l0.is_fp_reg():
    +            assert l1.is_fp_reg()
    +            self.mc.cmp_op(0, l0.value, l1.value, fp=True)
    +        self.guard_success_cc = c.EQ
    +        self._emit_guard(op, failargs)
    +
    +    emit_guard_nonnull = emit_guard_true
    +    emit_guard_isnull = emit_guard_false
    +
    +    def emit_guard_class(self, op, arglocs, regalloc):
    +        self._cmp_guard_class(op, arglocs, regalloc)
    +        self.guard_success_cc = c.EQ
    +        self._emit_guard(op, arglocs[2:])
    +
    +    def emit_guard_nonnull_class(self, op, arglocs, regalloc):
    +        self.mc.cmp_op(0, arglocs[0].value, 1, imm=True, signed=False)
    +        patch_pos = self.mc.currpos()
    +        self.mc.trap()
    +        self._cmp_guard_class(op, arglocs, regalloc)
    +        pmc = OverwritingBuilder(self.mc, patch_pos, 1)
    +        pmc.blt(self.mc.currpos() - patch_pos)
    +        pmc.overwrite()
    +        self.guard_success_cc = c.EQ
    +        self._emit_guard(op, arglocs[2:])
    +
    +    def _cmp_guard_class(self, op, locs, regalloc):
    +        offset = self.cpu.vtable_offset
    +        if offset is not None:
    +            # could be one instruction shorter, but don't care because
    +            # it's not this case that is commonly translated
    +            self.mc.load(r.SCRATCH.value, locs[0].value, offset)
    +            self.mc.load_imm(r.SCRATCH2, locs[1].value)
    +            self.mc.cmp_op(0, r.SCRATCH.value, r.SCRATCH2.value)
    +        else:
    +            expected_typeid = (self.cpu.gc_ll_descr
    +                    .get_typeid_from_classptr_if_gcremovetypeptr(locs[1].value))
    +            self._cmp_guard_gc_type(locs[0], expected_typeid)
    +
    +    def _read_typeid(self, targetreg, loc_ptr):
    +        # Note that the typeid half-word is at offset 0 on a little-endian
    +        # machine; it is at offset 2 or 4 on a big-endian machine.
    +        assert self.cpu.supports_guard_gc_type
    +        if IS_PPC_32:
    +            self.mc.lhz(targetreg.value, loc_ptr.value, 2 * IS_BIG_ENDIAN)
    +        else:
    +            self.mc.lwz(targetreg.value, loc_ptr.value, 4 * IS_BIG_ENDIAN)
    +
    +    def _cmp_guard_gc_type(self, loc_ptr, expected_typeid):
    +        self._read_typeid(r.SCRATCH2, loc_ptr)
    +        assert 0 <= expected_typeid <= 0x7fffffff   # 4 bytes are always enough
    +        if expected_typeid > 0xffff:     # if 2 bytes are not enough
    +            self.mc.subis(r.SCRATCH2.value, r.SCRATCH2.value,
    +                          expected_typeid >> 16)
    +            expected_typeid = expected_typeid & 0xffff
    +        self.mc.cmp_op(0, r.SCRATCH2.value, expected_typeid,
    +                       imm=True, signed=False)
    +
    +    def emit_guard_gc_type(self, op, arglocs, regalloc):
    +        self._cmp_guard_gc_type(arglocs[0], arglocs[1].value)
    +        self.guard_success_cc = c.EQ
    +        self._emit_guard(op, arglocs[2:])
    +
    +    def emit_guard_is_object(self, op, arglocs, regalloc):
    +        assert self.cpu.supports_guard_gc_type
    +        loc_object = arglocs[0]
    +        # idea: read the typeid, fetch one byte of the field 'infobits' from
    +        # the big typeinfo table, and check the flag 'T_IS_RPYTHON_INSTANCE'.
    +        base_type_info, shift_by, sizeof_ti = (
    +            self.cpu.gc_ll_descr.get_translated_info_for_typeinfo())
    +        infobits_offset, IS_OBJECT_FLAG = (
    +            self.cpu.gc_ll_descr.get_translated_info_for_guard_is_object())
    +
    +        self._read_typeid(r.SCRATCH2, loc_object)
    +        self.mc.load_imm(r.SCRATCH, base_type_info + infobits_offset)
    +        assert shift_by == 0     # on PPC64; fixme for PPC32
    +        self.mc.lbzx(r.SCRATCH2.value, r.SCRATCH2.value, r.SCRATCH.value)
    +        self.mc.andix(r.SCRATCH2.value, r.SCRATCH2.value, IS_OBJECT_FLAG & 0xff)
    +        self.guard_success_cc = c.NE
    +        self._emit_guard(op, arglocs[1:])
    +
    +    def emit_guard_subclass(self, op, arglocs, regalloc):
    +        assert self.cpu.supports_guard_gc_type
    +        loc_object = arglocs[0]
    +        loc_check_against_class = arglocs[1]
    +        offset = self.cpu.vtable_offset
    +        offset2 = self.cpu.subclassrange_min_offset
    +        if offset is not None:
    +            # read this field to get the vtable pointer
    +            self.mc.load(r.SCRATCH2.value, loc_object.value, offset)
    +            # read the vtable's subclassrange_min field
    +            assert _check_imm_arg(offset2)
    +            self.mc.ld(r.SCRATCH2.value, r.SCRATCH2.value, offset2)
    +        else:
    +            # read the typeid
    +            self._read_typeid(r.SCRATCH, loc_object)
    +            # read the vtable's subclassrange_min field, as a single
    +            # step with the correct offset
    +            base_type_info, shift_by, sizeof_ti = (
    +                self.cpu.gc_ll_descr.get_translated_info_for_typeinfo())
    +            self.mc.load_imm(r.SCRATCH2, base_type_info + sizeof_ti + offset2)
    +            assert shift_by == 0     # on PPC64; fixme for PPC32
    +            self.mc.ldx(r.SCRATCH2.value, r.SCRATCH2.value, r.SCRATCH.value)
    +        # get the two bounds to check against
    +        vtable_ptr = loc_check_against_class.getint()
    +        vtable_ptr = rffi.cast(rclass.CLASSTYPE, vtable_ptr)
    +        check_min = vtable_ptr.subclassrange_min
    +        check_max = vtable_ptr.subclassrange_max
    +        assert check_max > check_min
    +        check_diff = check_max - check_min - 1
    +        # right now, a full PyPy uses less than 6000 numbers,
    +        # so we'll assert here that it always fit inside 15 bits
    +        assert 0 <= check_min <= 0x7fff
    +        assert 0 <= check_diff <= 0xffff
    +        # check by doing the unsigned comparison (tmp - min) < (max - min)
    +        self.mc.subi(r.SCRATCH2.value, r.SCRATCH2.value, check_min)
    +        self.mc.cmp_op(0, r.SCRATCH2.value, check_diff, imm=True, signed=False)
    +        # the guard passes if we get a result of "below or equal"
    +        self.guard_success_cc = c.LE
    +        self._emit_guard(op, arglocs[2:])
    +
    +    def emit_guard_not_invalidated(self, op, arglocs, regalloc):
    +        self._emit_guard(op, arglocs, is_guard_not_invalidated=True)
    +
    +    def emit_guard_not_forced(self, op, arglocs, regalloc):
    +        ofs = self.cpu.get_ofs_of_frame_field('jf_descr')
    +        self.mc.ld(r.SCRATCH.value, r.SPP.value, ofs)
    +        self.mc.cmp_op(0, r.SCRATCH.value, 0, imm=True)
    +        self.guard_success_cc = c.EQ
    +        self._emit_guard(op, arglocs)
    +
    +    def emit_guard_not_forced_2(self, op, arglocs, regalloc):
    +        guard_token = self.build_guard_token(op, arglocs[0].value, arglocs[1:],
    +                                             c.cond_none)
    +        self._finish_gcmap = guard_token.gcmap
    +        self._store_force_index(op)
    +        self.store_info_on_descr(0, guard_token)
    +
    diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py
    --- a/rpython/jit/backend/zarch/regalloc.py
    +++ b/rpython/jit/backend/zarch/regalloc.py
    @@ -8,7 +8,7 @@
                                                 INT, REF, FLOAT, VOID)
     from rpython.jit.metainterp.history import JitCellToken, TargetToken
     from rpython.jit.metainterp.resoperation import rop
    -from rpython.jit.backend.zarch import locations
    +from rpython.jit.backend.zarch import locations as l
     from rpython.rtyper.lltypesystem import rffi, lltype, rstr, llmemory
     from rpython.rtyper.lltypesystem.lloperation import llop
     from rpython.rtyper.annlowlevel import cast_instance_to_gcref
    @@ -16,7 +16,7 @@
     from rpython.jit.backend.llsupport.descr import ArrayDescr
     import rpython.jit.backend.zarch.registers as r
     import rpython.jit.backend.zarch.conditions as c
    -import rpython.jit.backend.zarch.helper.regalloc as regallochelp
    +import rpython.jit.backend.zarch.helper.regalloc as helper
     from rpython.jit.backend.llsupport.descr import unpack_arraydescr
     from rpython.jit.backend.llsupport.descr import unpack_fielddescr
     from rpython.jit.backend.llsupport.descr import unpack_interiorfielddescr
    @@ -64,7 +64,7 @@
     
         def convert_to_imm(self, c):
             adr = self.convert_to_adr(c)
    -        return locations.ConstFloatLoc(adr)
    +        return l.ConstFloatLoc(adr)
     
         def __init__(self, longevity, frame_manager=None, assembler=None):
             RegisterManager.__init__(self, longevity, frame_manager, assembler)
    @@ -74,7 +74,7 @@
     
         def place_in_pool(self, var):
             offset = self.assembler.pool.place(var)
    -        return locations.pool(offset, r.POOL)
    +        return l.pool(offset, r.POOL)
     
         def ensure_reg(self, box):
             if isinstance(box, Const):
    @@ -116,7 +116,7 @@
     
         def convert_to_imm(self, c):
             val = self.convert_to_int(c)
    -        return locations.ImmLocation(val)
    +        return l.ImmLocation(val)
     
         def ensure_reg(self, box):
             if isinstance(box, Const):
    @@ -143,8 +143,8 @@
             self.base_ofs = base_ofs
     
         def frame_pos(self, loc, box_type):
    -        #return locations.StackLocation(loc, get_fp_offset(self.base_ofs, loc), box_type)
    -        return locations.StackLocation(loc, get_fp_offset(self.base_ofs, loc), box_type)
    +        #return l.StackLocation(loc, get_fp_offset(self.base_ofs, loc), box_type)
    +        return l.StackLocation(loc, get_fp_offset(self.base_ofs, loc), box_type)
     
         @staticmethod
         def frame_size(type):
    @@ -152,7 +152,7 @@
     
         @staticmethod
         def get_loc_index(loc):
    -        assert isinstance(loc, locations.StackLocation)
    +        assert isinstance(loc, l.StackLocation)
             return loc.position
     
     
    @@ -350,7 +350,7 @@
                     gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8))
             for box, loc in self.fm.bindings.iteritems():
                 if box.type == REF and self.rm.is_still_alive(box):
    -                assert isinstance(loc, locations.StackLocation)
    +                assert isinstance(loc, l.StackLocation)
                     val = loc.get_position() + r.JITFRAME_FIXED_SIZE
                     gcmap[val // WORD // 8] |= r_uint(1) << (val % (WORD * 8))
             return gcmap
    @@ -463,11 +463,103 @@
         def prepare_increment_debug_counter(self, op):
             pass # XXX
     
    -    prepare_int_add = regallochelp._prepare_int_binary_arith
    -    prepare_float_add = regallochelp._prepare_float_binary_arith
    -    prepare_float_sub = regallochelp._prepare_float_binary_arith
    -    prepare_float_mul = regallochelp._prepare_float_binary_arith
    -    prepare_float_div = regallochelp._prepare_float_binary_arith
    +    prepare_int_add = helper.prepare_int_add_or_mul
    +    prepare_int_sub = helper.prepare_int_sub
    +    prepare_int_mul = helper.prepare_int_add_or_mul
    +
    +    prepare_int_le = helper.prepare_cmp_op
    +    prepare_int_lt = helper.prepare_cmp_op
    +    prepare_int_ge = helper.prepare_cmp_op
    +    prepare_int_gt = helper.prepare_cmp_op
    +    prepare_int_eq = helper.prepare_cmp_op
    +    prepare_int_ne = helper.prepare_cmp_op
    +
    +    prepare_float_add = helper.prepare_binary_op
    +    prepare_float_sub = helper.prepare_binary_op
    +    prepare_float_mul = helper.prepare_binary_op
    +    prepare_float_truediv = helper.prepare_binary_op
    +
    +    def _prepare_guard(self, op, args=None):
    +        if args is None:
    +            args = []
    +        args.append(imm(self.fm.get_frame_depth()))
    +        for arg in op.getfailargs():
    +            if arg:
    +                args.append(self.loc(arg))
    +            else:
    +                args.append(None)
    +        self.possibly_free_vars(op.getfailargs())
    +        #
    +        # generate_quick_failure() produces up to 14 instructions per guard
    +        self.limit_loop_break -= 14 * 4
    +        #
    +        return args
    +
    +    def load_condition_into_cc(self, box):
    +        if self.assembler.guard_success_cc == c.cond_none:
    +            xxx
    +            loc = self.ensure_reg(box)
    +            mc = self.assembler.mc
    +            mc.cmp_op(loc, l.imm(0), imm=True)
    +            self.assembler.guard_success_cc = c.NE
    +
    +    def _prepare_guard_cc(self, op):
    +        self.load_condition_into_cc(op.getarg(0))
    +        return self._prepare_guard(op)
    +
    +    prepare_guard_true = _prepare_guard_cc
    +    prepare_guard_false = _prepare_guard_cc
    +    prepare_guard_nonnull = _prepare_guard_cc
    +    prepare_guard_isnull = _prepare_guard_cc
    +
    +    def prepare_label(self, op):
    +        descr = op.getdescr()
    +        assert isinstance(descr, TargetToken)
    +        inputargs = op.getarglist()
    +        arglocs = [None] * len(inputargs)
    +        #
    +        # we use force_spill() on the boxes that are not going to be really
    +        # used any more in the loop, but that are kept alive anyway
    +        # by being in a next LABEL's or a JUMP's argument or fail_args
    +        # of some guard
    +        position = self.rm.position
    +        for arg in inputargs:
    +            assert not isinstance(arg, Const)
    +            if self.last_real_usage.get(arg, -1) <= position:
    +                self.force_spill_var(arg)
    +        #
    +        # we need to make sure that no variable is stored in spp (=r31)
    +        for arg in inputargs:
    +            assert self.loc(arg) is not r.SPP, (
    +                "variable stored in spp in prepare_label")
    +        self.rm.bindings_to_frame_reg.clear()
    +        #
    +        for i in range(len(inputargs)):
    +            arg = inputargs[i]
    +            assert not isinstance(arg, Const)
    +            loc = self.loc(arg)
    +            assert loc is not r.SPP
    +            arglocs[i] = loc
    +            if loc.is_reg():
    +                self.fm.mark_as_free(arg)
    +        #
    +        # if we are too close to the start of the loop, the label's target may
    +        # get overridden by redirect_call_assembler().  (rare case)
    +        self.flush_loop()
    +        #
    +        descr._zarch_arglocs = arglocs
    +        descr._ll_loop_code = self.assembler.mc.currpos()
    +        descr._zarch_clt = self.assembler.current_clt
    +        self.assembler.target_tokens_currently_compiling[descr] = None
    +        self.possibly_free_vars_for_op(op)
    +        #
    +        # if the LABEL's descr is precisely the target of the JUMP at the
    +        # end of the same loop, i.e. if what we are compiling is a single
    +        # loop that ends up jumping to this LABEL, then we can now provide
    +        # the hints about the expected position of the spilled variables.
    +        jump_op = self.final_jump_op
    +        if jump_op is not None and jump_op.getdescr() is descr:
    +            self._compute_hint_frame_locations_from_descr(descr)
     
         def prepare_finish(self, op):
             descr = op.getdescr()
    
    From noreply at buildbot.pypy.org  Wed Oct 28 07:02:27 2015
    From: noreply at buildbot.pypy.org (plan_rich)
    Date: Wed, 28 Oct 2015 12:02:27 +0100 (CET)
    Subject: [pypy-commit] pypy s390x-backend: adding jump instruction and
     working on correct assembly of guard failure
    Message-ID: <20151028110227.E71D51C14D6@cobra.cs.uni-duesseldorf.de>
    
    Author: Richard Plangger 
    Branch: s390x-backend
    Changeset: r80472:dd415ed2de42
    Date: 2015-10-28 12:01 +0100
    http://bitbucket.org/pypy/pypy/changeset/dd415ed2de42/
    
    Log:	adding jump instruction and working on correct assembly of guard
    	failure
    
    diff --git a/rpython/jit/backend/llsupport/jump.py b/rpython/jit/backend/llsupport/jump.py
    new file mode 100644
    --- /dev/null
    +++ b/rpython/jit/backend/llsupport/jump.py
    @@ -0,0 +1,109 @@
    +def remap_frame_layout(assembler, src_locations, dst_locations, tmpreg):
    +    pending_dests = len(dst_locations)
    +    srccount = {}    # maps dst_locations to how many times the same
    +                     # location appears in src_locations
    +    for dst in dst_locations:
    +        key = dst.as_key()
    +        assert key not in srccount, "duplicate value in dst_locations!"
    +        srccount[key] = 0
    +    for i in range(len(dst_locations)):
    +        src = src_locations[i]
    +        if src.is_imm():
    +            continue
    +        key = src.as_key()
    +        if key in srccount:
    +            if key == dst_locations[i].as_key():
    +                # ignore a move "x = x"
    +                # setting any "large enough" negative value is ok, but
    +                # be careful of overflows, don't use -sys.maxint
    +                srccount[key] = -len(dst_locations) - 1
    +                pending_dests -= 1
    +            else:
    +                srccount[key] += 1
    +
    +    while pending_dests > 0:
    +        progress = False
    +        for i in range(len(dst_locations)):
    +            dst = dst_locations[i]
    +            key = dst.as_key()
    +            if srccount[key] == 0:
    +                srccount[key] = -1       # means "it's done"
    +                pending_dests -= 1
    +                src = src_locations[i]
    +                if not src.is_imm():
    +                    key = src.as_key()
    +                    if key in srccount:
    +                        srccount[key] -= 1
    +                _move(assembler, src, dst, tmpreg)
    +                progress = True
    +        if not progress:
    +            # we are left with only pure disjoint cycles
    +            sources = {}     # maps dst_locations to src_locations
    +            for i in range(len(dst_locations)):
    +                src = src_locations[i]
    +                dst = dst_locations[i]
    +                sources[dst.as_key()] = src
    +            #
    +            for i in range(len(dst_locations)):
    +                dst = dst_locations[i]
    +                originalkey = dst.as_key()
    +                if srccount[originalkey] >= 0:
    +                    assembler.regalloc_push(dst, 0)
    +                    while True:
    +                        key = dst.as_key()
    +                        assert srccount[key] == 1
    +                        # ^^^ because we are in a simple cycle
    +                        srccount[key] = -1
    +                        pending_dests -= 1
    +                        src = sources[key]
    +                        if src.as_key() == originalkey:
    +                            break
    +                        _move(assembler, src, dst, tmpreg)
    +                        dst = src
    +                    assembler.regalloc_pop(dst, 0)
    +            assert pending_dests == 0
    +
    +def _move(assembler, src, dst, tmpreg):
    +    if dst.is_stack() and src.is_stack():
    +        assembler.regalloc_mov(src, tmpreg)
    +        src = tmpreg
    +    assembler.regalloc_mov(src, dst)
    +
    +def remap_frame_layout_mixed(assembler,
    +                             src_locations1, dst_locations1, tmpreg1,
    +                             src_locations2, dst_locations2, tmpreg2):
    +    # find and push the fp stack locations from src_locations2 that
    +    # are going to be overwritten by dst_locations1
    +    # TODO
    +    from rpython.jit.backend.zarch.arch import WORD
    +    extrapushes = []
    +    dst_keys = {}
    +    for loc in dst_locations1:
    +        dst_keys[loc.as_key()] = None
    +    src_locations2red = []
    +    dst_locations2red = []
    +    for i in range(len(src_locations2)):
    +        loc    = src_locations2[i]
    +        dstloc = dst_locations2[i]
    +        if loc.is_stack():
    +            key = loc.as_key()
    +            if (key in dst_keys or (loc.width > WORD and
    +                                    (key + 1) in dst_keys)):
    +                assembler.regalloc_push(loc, len(extrapushes))
    +                extrapushes.append(dstloc)
    +                continue
    +        src_locations2red.append(loc)
    +        dst_locations2red.append(dstloc)
    +    src_locations2 = src_locations2red
    +    dst_locations2 = dst_locations2red
    +    #
    +    # remap the integer and pointer registers and stack locations
    +    remap_frame_layout(assembler, src_locations1, dst_locations1, tmpreg1)
    +    #
    +    # remap the fp registers and stack locations
    +    remap_frame_layout(assembler, src_locations2, dst_locations2, tmpreg2)
    +    #
    +    # finally, pop the extra fp stack locations
    +    while len(extrapushes) > 0:
    +        loc = extrapushes.pop()
    +        assembler.regalloc_pop(loc, len(extrapushes))
    diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py
    --- a/rpython/jit/backend/zarch/assembler.py
    +++ b/rpython/jit/backend/zarch/assembler.py
    @@ -16,7 +16,8 @@
     from rpython.jit.metainterp.resoperation import rop
     from rpython.rlib.debug import (debug_print, debug_start, debug_stop,
                                     have_debug_prints)
    -from rpython.jit.metainterp.history import (INT, REF, FLOAT)
    +from rpython.jit.metainterp.history import (INT, REF, FLOAT,
    +        TargetToken)
     from rpython.rlib.rarithmetic import r_uint
     from rpython.rlib.objectmodel import we_are_translated, specialize, compute_unique_id
     from rpython.rlib import rgc
    @@ -208,6 +209,21 @@
             self.failure_recovery_code[exc + 2 * withfloats] = rawstart
             self.mc = None
     
    +    def generate_quick_failure(self, guardtok):
    +        startpos = self.mc.currpos()
    +        fail_descr, target = self.store_info_on_descr(startpos, guardtok)
    +        assert target != 0
    +        self.load_gcmap(self.mc, r.r2, gcmap=guardtok.gcmap)
    +        self.mc.write('\x00\x00\x00\x00')
    +        #load_imm(r.r0, target)
    +        #self.mc.mtctr(r.r0.value)
    +        #self.mc.load_imm(r.r0, fail_descr)
    +        #self.mc.bctr()
    +        # we need to write at least 6 insns here, for patch_jump_for_descr()
    +        #while self.mc.currpos() < startpos + 6 * 4:
    +        #    self.mc.trap()
    +        return startpos
    +
         def _build_wb_slowpath(self, withcards, withfloats=False, for_frame=False):
             pass # TODO
     
    @@ -309,9 +325,9 @@
             ops_offset = self.mc.ops_offset
             if not we_are_translated():
                 # used only by looptoken.dump() -- useful in tests
    -            looptoken._ppc_rawstart = rawstart
    -            looptoken._ppc_fullsize = full_size
    -            looptoken._ppc_ops_offset = ops_offset
    +            looptoken._zarch_rawstart = rawstart
    +            looptoken._zarch_fullsize = full_size
    +            looptoken._zarch_ops_offset = ops_offset
             looptoken._ll_function_addr = rawstart
             if logger:
                 logger.log_loop(inputargs, operations, 0, "rewritten",
    @@ -328,7 +344,7 @@
             frame_depth = regalloc.get_final_frame_depth()
             jump_target_descr = regalloc.jump_target_descr
             if jump_target_descr is not None:
    -            tgt_depth = jump_target_descr._ppc_clt.frame_info.jfi_frame_depth
    +            tgt_depth = jump_target_descr._zarch_clt.frame_info.jfi_frame_depth
                 target_frame_depth = tgt_depth - JITFRAME_FIXED_SIZE
                 frame_depth = max(frame_depth, target_frame_depth)
             return frame_depth
    @@ -375,7 +391,7 @@
                 # move to memory
                 elif loc.is_stack():
                     offset = loc.value
    -                self.mc.store(prev_loc, r.SPP, offset)
    +                self.mc.STG(prev_loc, l.addr(offset, r.SPP))
                     return
                 assert 0, "not supported location"
             elif prev_loc.is_imm_float():
    @@ -522,6 +538,24 @@
         def emit_label(self, op, arglocs, regalloc):
             pass
     
    +    def emit_jump(self, op, arglocs, regalloc):
    +        # The backend's logic assumes that the target code is in a piece of
    +        # assembler that was also called with the same number of arguments,
    +        # so that the locations [ebp+8..] of the input arguments are valid
    +        # stack locations both before and after the jump.
    +        #
    +        descr = op.getdescr()
    +        assert isinstance(descr, TargetToken)
    +        my_nbargs = self.current_clt._debug_nbargs
    +        target_nbargs = descr._zarch_clt._debug_nbargs
    +        assert my_nbargs == target_nbargs
    +
    +        if descr in self.target_tokens_currently_compiling:
    +            self.mc.b_offset(descr._ll_loop_code)
    +        else:
    +            self.mc.b_abs(descr._ll_loop_code)
    +
    +
         def emit_finish(self, op, arglocs, regalloc):
             base_ofs = self.cpu.get_baseofs_of_frame_field()
             if len(arglocs) > 1:
    @@ -552,7 +586,7 @@
                 gcmap = self._finish_gcmap
             else:
                 gcmap = lltype.nullptr(jitframe.GCMAP)
    -        # TODO self.load_gcmap(self.mc, r.r2, gcmap)
    +        self.load_gcmap(self.mc, r.r2, gcmap)
     
             assert fail_descr_loc.getint() <= 2**12-1
             self.mc.LGHI(r.r5, fail_descr_loc)
    @@ -566,8 +600,8 @@
         def load_gcmap(self, mc, reg, gcmap):
             # load the current gcmap into register 'reg'
             ptr = rffi.cast(lltype.Signed, gcmap)
    -        #mc.LGHI(mc.pool
    -        #mc.load_imm(reg, ptr)
    +        assert 0 <= ptr <= 2**15-1
    +        mc.LGHI(reg, loc.imm(ptr))
     
     def notimplemented_op(asm, op, arglocs, regalloc):
         print "[ZARCH/asm] %s not implemented" % op.getopname()
    diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py
    --- a/rpython/jit/backend/zarch/codebuilder.py
    +++ b/rpython/jit/backend/zarch/codebuilder.py
    @@ -95,6 +95,10 @@
         def currpos(self):
             return self.get_relative_pos()
     
    +    def b_offset(self, reladdr):
    +        offset = reladdr - self.get_relative_pos()
    +        self.BRC(l.imm(0xf), l.imm(offset))
    +
         def cmp_op(self, a, b, pool=False, signed=True, fp=False):
             if fp == True:
                 xxx
    diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py
    --- a/rpython/jit/backend/zarch/opassembler.py
    +++ b/rpython/jit/backend/zarch/opassembler.py
    @@ -69,7 +69,9 @@
             token.pos_jump_offset = self.mc.currpos()
             assert token.guard_not_invalidated() == is_guard_not_invalidated
             if not is_guard_not_invalidated:
    -            self.mc.trap()     # has to be patched later on
    +            pass
    +            # TODO
    +            #self.mc.trap()     # has to be patched later on
             self.pending_guard_tokens.append(token)
     
         def build_guard_token(self, op, frame_depth, arglocs, fcond):
    diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py
    --- a/rpython/jit/backend/zarch/regalloc.py
    +++ b/rpython/jit/backend/zarch/regalloc.py
    @@ -1,6 +1,7 @@
     from rpython.jit.backend.llsupport.regalloc import (RegisterManager, FrameManager,
                                                         TempVar, compute_vars_longevity,
                                                         BaseRegalloc)
    +from rpython.jit.backend.llsupport.jump import remap_frame_layout_mixed
     from rpython.jit.backend.zarch.arch import WORD
     from rpython.jit.codewriter import longlong
     from rpython.jit.backend.zarch.locations import imm, get_fp_offset
    @@ -561,6 +562,39 @@
             if jump_op is not None and jump_op.getdescr() is descr:
                 self._compute_hint_frame_locations_from_descr(descr)
     
    +    def prepare_jump(self, op):
    +        descr = op.getdescr()
    +        assert isinstance(descr, TargetToken)
    +        self.jump_target_descr = descr
    +        arglocs = self.assembler.target_arglocs(descr)
    +
    +        # get temporary locs
    +        tmploc = r.SCRATCH
    +        fptmploc = r.f0
    +
    +        # Part about non-floats
    +        src_locations1 = []
    +        dst_locations1 = []
    +        src_locations2 = []
    +        dst_locations2 = []
    +
    +        # Build the four lists
    +        for i in range(op.numargs()):
    +            box = op.getarg(i)
    +            src_loc = self.loc(box)
    +            dst_loc = arglocs[i]
    +            if box.type != FLOAT:
    +                src_locations1.append(src_loc)
    +                dst_locations1.append(dst_loc)
    +            else:
    +                src_locations2.append(src_loc)
    +                dst_locations2.append(dst_loc)
    +
    +        remap_frame_layout_mixed(self.assembler,
    +                                 src_locations1, dst_locations1, tmploc,
    +                                 src_locations2, dst_locations2, fptmploc)
    +        return []
    +
         def prepare_finish(self, op):
             descr = op.getdescr()
             fail_descr = cast_instance_to_gcref(descr)
    diff --git a/rpython/jit/backend/zarch/registers.py b/rpython/jit/backend/zarch/registers.py
    --- a/rpython/jit/backend/zarch/registers.py
    +++ b/rpython/jit/backend/zarch/registers.py
    @@ -13,6 +13,7 @@
     RETURN = r14
     POOL = r13
     SPP = r11
    +SCRATCH = r0
     
     [f0,f1,f2,f3,f4,f5,f6,f7,f8,
      f9,f10,f11,f12,f13,f14,f15] = fpregisters
    diff --git a/rpython/jit/backend/zarch/runner.py b/rpython/jit/backend/zarch/runner.py
    --- a/rpython/jit/backend/zarch/runner.py
    +++ b/rpython/jit/backend/zarch/runner.py
    @@ -1,7 +1,8 @@
     from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU
    +from rpython.jit.backend.zarch import registers as r
     from rpython.jit.backend.zarch.assembler import AssemblerZARCH
    +from rpython.rlib import rgc
     from rpython.rtyper.lltypesystem import lltype, llmemory
    -from rpython.rlib import rgc
     
     class AbstractZARCHCPU(AbstractLLCPU):
         def __init__(self, rtyper, stats, opts=None, translate_support_code=False,
    @@ -18,6 +19,15 @@
     class CPU_S390_64(AbstractZARCHCPU):
         supports_floats = True
     
    +    IS_64_BIT = True
    +
    +    frame_reg = r.SP
    +    all_reg_indexes = [-1] * 32
    +    for _i, _r in enumerate(r.MANAGED_REGS):
    +        all_reg_indexes[_r.value] = _i
    +    gen_regs = r.MANAGED_REGS
    +    float_regs = r.MANAGED_FP_REGS
    +
         def setup(self):
             self.assembler = AssemblerZARCH(self)
     
    
    From noreply at buildbot.pypy.org  Wed Oct 28 08:58:31 2015
    From: noreply at buildbot.pypy.org (arigo)
    Date: Wed, 28 Oct 2015 13:58:31 +0100 (CET)
    Subject: [pypy-commit] pypy.org extradoc: PowerPC64 binaries
    Message-ID: <20151028125831.DF0591C0776@cobra.cs.uni-duesseldorf.de>
    
    Author: Armin Rigo 
    Branch: extradoc
    Changeset: r648:76d8ec204c42
    Date: 2015-10-28 13:59 +0100
    http://bitbucket.org/pypy/pypy.org/changeset/76d8ec204c42/
    
    Log:	PowerPC64 binaries
    
    diff --git a/source/download.txt b/source/download.txt
    --- a/source/download.txt
    +++ b/source/download.txt
    @@ -85,6 +85,8 @@
     * `FreeBSD 9.2 x86 64 bit`__ (see ``[1]`` below)
     * `Windows binary (32bit)`__ (you might need the VS 2008 runtime library
       installer `vcredist_x86.exe`_.)
    +* `PowerPC64 Linux binary (64bit big-endian, Fedora 20)`__ (see ``[1]`` below)
    +* `PowerPC64le Linux binary (64bit little-endian, Fedora 21)`__ (see ``[1]`` below)
     * `Source (tar.bz2)`__; `Source (zip)`__.  See below for more about the sources.
     * `All our downloads,`__ including previous versions.  We also have a
       mirror_, but please use only if you have troubles accessing the links above
    @@ -97,6 +99,8 @@
     .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-osx64.tar.bz2
     .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-freebsd64.tar.bz2
     .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-win32.zip
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-ppc64.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-ppc64le.tar.bz2
     .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-src.tar.bz2
     .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-src.zip
     .. _`vcredist_x86.exe`: http://www.microsoft.com/en-us/download/details.aspx?id=5582
    @@ -381,6 +385,8 @@
         57722fd5fc01734839ecc523ce965fbb  pypy-4.0.0-src.tar.bz2
         8e90eed8aea1686d98c2e7dce5bda1e0  pypy-4.0.0-src.zip
         fb013ec74edbf8dfd4f059e934fd4e5c  pypy-4.0.0-win32.zip
    +    911e91b80f18b1bd3ddaf4905d077568  pypy-4.0.0-ppc64.tar.bz2
    +    ade07e0fd3eb62155829680898b73c3c  pypy-4.0.0-ppc64le.tar.bz2
     
     pypy3-2.4.0 md5::
     
    @@ -413,6 +419,8 @@
         b66a6a8c3ecdf6f62b13931cd2919dd160e1249b  pypy-4.0.0-src.tar.bz2
         24550900db0a1a0b1a2554cb1fa480cd0ae2fbfd  pypy-4.0.0-src.zip
         7369248180bdc9c6055272b1dedb3b3a70181497  pypy-4.0.0-win32.zip
    +    b5c930aa70ef602daeacf9cc0cd80c1587158eaa  pypy-4.0.0-ppc64.tar.bz2
    +    47c34b8a1ba418bf3e06c4741778ad198e2f3bba  pypy-4.0.0-ppc64le.tar.bz2
     
     pypy3-2.4.0 sha1::
     
    
    From noreply at buildbot.pypy.org  Wed Oct 28 08:59:25 2015
    From: noreply at buildbot.pypy.org (arigo)
    Date: Wed, 28 Oct 2015 13:59:25 +0100 (CET)
    Subject: [pypy-commit] pypy.org extradoc: regen
    Message-ID: <20151028125925.828DF1C0776@cobra.cs.uni-duesseldorf.de>
    
    Author: Armin Rigo 
    Branch: extradoc
    Changeset: r649:5dc2016d3b95
    Date: 2015-10-28 14:00 +0100
    http://bitbucket.org/pypy/pypy.org/changeset/5dc2016d3b95/
    
    Log:	regen
    
    diff --git a/download.html b/download.html
    --- a/download.html
    +++ b/download.html
    @@ -74,7 +74,7 @@
     performance improvements.

    We provide binaries for x86 and ARM Linux, Mac OS/X and Windows for:

    @@ -125,6 +125,8 @@
  • FreeBSD 9.2 x86 64 bit (see [1] below)
  • Windows binary (32bit) (you might need the VS 2008 runtime library installer vcredist_x86.exe.)
  • +
  • PowerPC64 Linux binary (64bit big-endian, Fedora 20) (see [1] below)
  • +
  • PowerPC64le Linux binary (64bit little-endian, Fedora 21) (see [1] below)
  • Source (tar.bz2); Source (zip). See below for more about the sources.
  • All our downloads, including previous versions. We also have a mirror, but please use only if you have troubles accessing the links above
  • @@ -353,6 +355,8 @@ 57722fd5fc01734839ecc523ce965fbb pypy-4.0.0-src.tar.bz2 8e90eed8aea1686d98c2e7dce5bda1e0 pypy-4.0.0-src.zip fb013ec74edbf8dfd4f059e934fd4e5c pypy-4.0.0-win32.zip +911e91b80f18b1bd3ddaf4905d077568 pypy-4.0.0-ppc64.tar.bz2 +ade07e0fd3eb62155829680898b73c3c pypy-4.0.0-ppc64le.tar.bz2

    pypy3-2.4.0 md5:

    @@ -383,6 +387,8 @@
     b66a6a8c3ecdf6f62b13931cd2919dd160e1249b  pypy-4.0.0-src.tar.bz2
     24550900db0a1a0b1a2554cb1fa480cd0ae2fbfd  pypy-4.0.0-src.zip
     7369248180bdc9c6055272b1dedb3b3a70181497  pypy-4.0.0-win32.zip
    +b5c930aa70ef602daeacf9cc0cd80c1587158eaa  pypy-4.0.0-ppc64.tar.bz2
    +47c34b8a1ba418bf3e06c4741778ad198e2f3bba  pypy-4.0.0-ppc64le.tar.bz2
     

    pypy3-2.4.0 sha1:

    @@ -404,8 +410,8 @@
     
     

    Docutils System Messages

    -
    -

    System Message: ERROR/3 ([dynamic-text], line 12); backlink

    +
    +

    System Message: ERROR/3 ([dynamic-text], line 12); backlink

    Unknown target name: “what's new in pypy4.0.0?”.
    From noreply at buildbot.pypy.org Wed Oct 28 11:04:15 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Wed, 28 Oct 2015 16:04:15 +0100 (CET) Subject: [pypy-commit] pypy s390x-backend: towards a correct guard_quick_failure Message-ID: <20151028150415.618961C0776@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80473:2c01ef6261a8 Date: 2015-10-28 16:03 +0100 http://bitbucket.org/pypy/pypy/changeset/2c01ef6261a8/ Log: towards a correct guard_quick_failure diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -31,6 +31,20 @@ self.rel_offset = 0 self.offset = 0 self.places = [] + self.offset_map = {} + + def load_gcmap(self, mc, reg, gcmap): + # load the current gcmap into register 'reg' + if gcmap == 0x0: + mc.XGR(reg, reg) + return + ptr = rffi.cast(lltype.Signed, gcmap) + mc.LG(reg, self.pooled_imm(ptr)) + + def pooled_imm(self, ident): + if not we_are_translated(): + assert ident in self.offset_map + return l.addr(r.r13, self.offset_map[ident]) def place(self, var): assert var.is_constant() @@ -213,12 +227,13 @@ startpos = self.mc.currpos() fail_descr, target = self.store_info_on_descr(startpos, guardtok) assert target != 0 - self.load_gcmap(self.mc, r.r2, gcmap=guardtok.gcmap) - self.mc.write('\x00\x00\x00\x00') + self.pool.load_gcmap(self.mc, r.r2, gcmap=guardtok.gcmap) #load_imm(r.r0, target) - #self.mc.mtctr(r.r0.value) + target_addr, fail_descr_addr = pool.pool_quick_failure(target, fail_descr) + self.mc.LG(r.r0, target_addr) #self.mc.load_imm(r.r0, fail_descr) - #self.mc.bctr() + self.mc.LG(r.r2, fail_descr_addr) + self.BRC(l.imm(0xf), l.imm(offset)) # we need to write at least 6 insns here, for patch_jump_for_descr() #while self.mc.currpos() < startpos + 6 * 4: # self.mc.trap() @@ -586,7 +601,7 @@ gcmap = self._finish_gcmap else: gcmap = lltype.nullptr(jitframe.GCMAP) - self.load_gcmap(self.mc, r.r2, gcmap) + self.pool.load_gcmap(self.mc, r.r2, gcmap) assert fail_descr_loc.getint() <= 2**12-1 self.mc.LGHI(r.r5, fail_descr_loc) @@ -597,12 +612,6 @@ # exit function self._call_footer() - def load_gcmap(self, mc, reg, gcmap): - # load the current gcmap into register 'reg' - ptr = rffi.cast(lltype.Signed, gcmap) - assert 0 <= ptr <= 2**15-1 - mc.LGHI(reg, loc.imm(ptr)) - def notimplemented_op(asm, op, arglocs, regalloc): print "[ZARCH/asm] %s not implemented" % op.getopname() raise NotImplementedError(op) From noreply at buildbot.pypy.org Thu Oct 29 05:20:49 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 29 Oct 2015 10:20:49 +0100 (CET) Subject: [pypy-commit] pypy default: allow indexing by lists of floats Message-ID: <20151029092049.76BA01C0FF4@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80474:1351a22e9a74 Date: 2015-10-29 09:10 +1100 http://bitbucket.org/pypy/pypy/changeset/1351a22e9a74/ Log: allow indexing by lists of floats diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -144,7 +144,7 @@ return [], w_index.get_shape(), w_index.get_shape(), [w_index] w_lst = space.listview(w_index) for w_item in w_lst: - if not space.isinstance_w(w_item, space.w_int): + if not (space.isinstance_w(w_item, space.w_int) or space.isinstance_w(w_item, space.w_float)): break else: arr = convert_to_array(space, w_index) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -676,6 +676,12 @@ a[self.CustomIntObject(1)] = 100 assert a[1] == 100 + def test_setitem_list_of_float(self): + from numpy import arange + a = arange(10) + a[[0.9]] = -10 + assert a[0] == -10 + def test_delitem(self): import numpy as np a = np.arange(10) From noreply at buildbot.pypy.org Thu Oct 29 05:20:51 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 29 Oct 2015 10:20:51 +0100 (CET) Subject: [pypy-commit] pypy default: test, fix extra quotes in astype() for object to str ndarray conversion Message-ID: <20151029092051.B293C1C1237@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80475:cdb5f895892c Date: 2015-10-29 09:53 +1100 http://bitbucket.org/pypy/pypy/changeset/cdb5f895892c/ Log: test, fix extra quotes in astype() for object to str ndarray conversion diff --git a/pypy/module/micronumpy/test/test_object_arrays.py b/pypy/module/micronumpy/test/test_object_arrays.py --- a/pypy/module/micronumpy/test/test_object_arrays.py +++ b/pypy/module/micronumpy/test/test_object_arrays.py @@ -172,6 +172,7 @@ a = np.array([b'a' * 100], dtype='O') assert 'a' * 100 in str(a) b = a.astype('S') + assert b.dtype == 'S100' assert 'a' * 100 in str(b) a = np.array([123], dtype='U') assert a[0] == u'123' diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1891,6 +1891,12 @@ return self.BoxType(w_obj) def str_format(self, box, add_quotes=True): + if not add_quotes: + as_str = self.space.str_w(self.space.repr(self.unbox(box))) + as_strl = len(as_str) - 1 + if as_strl>1 and as_str[0] == "'" and as_str[as_strl] == "'": + as_str = as_str[1:as_strl] + return as_str return self.space.str_w(self.space.repr(self.unbox(box))) def runpack_str(self, space, s, native): From noreply at buildbot.pypy.org Thu Oct 29 05:20:53 2015 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 29 Oct 2015 10:20:53 +0100 (CET) Subject: [pypy-commit] pypy default: test, tweak handling of 'order' in various ndarray creation methods Message-ID: <20151029092053.CFF321C1279@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80476:393569a1c0ba Date: 2015-10-29 16:58 +1100 http://bitbucket.org/pypy/pypy/changeset/393569a1c0ba/ Log: test, tweak handling of 'order' in various ndarray creation methods diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -23,11 +23,14 @@ ''' argsort (return the indices to sort) a list of strides ''' - def __init__(self, rangelist, strides): + def __init__(self, rangelist, strides, order): self.strides = strides + self.order = order TimSort.__init__(self, rangelist) def lt(self, a, b): + if self.order == NPY.CORDER: + return self.strides[a] <= self.strides[b] return self.strides[a] < self.strides[b] @@ -311,12 +314,9 @@ backstrides, shape, self, orig_array) def copy(self, space, order=NPY.ANYORDER): - order = support.get_order_as_CF(self.order, order) - strides, backstrides = calc_strides(self.get_shape(), self.dtype, - order) - impl = ConcreteArray(self.get_shape(), self.dtype, order, strides, - backstrides) - return loop.setslice(space, self.get_shape(), impl, self) + if order == NPY.ANYORDER: + order = NPY.KEEPORDER + return self.astype(space, self.dtype, order, copy=True) def create_iter(self, shape=None, backward_broadcast=False): if shape is not None and \ @@ -363,7 +363,7 @@ def get_buffer(self, space, readonly): return ArrayBuffer(self, readonly) - def astype(self, space, dtype, order): + def astype(self, space, dtype, order, copy=True): # copy the general pattern of the strides # but make the array storage contiguous in memory shape = self.get_shape() @@ -377,18 +377,21 @@ t_strides, backstrides = calc_strides(shape, dtype, order) else: indx_array = range(len(strides)) - list_sorter = StrideSort(indx_array, strides) + list_sorter = StrideSort(indx_array, strides, self.order) list_sorter.sort() t_elsize = dtype.elsize t_strides = strides[:] base = dtype.elsize + print 'start strides, shape, indx_array', strides, shape, indx_array for i in indx_array: t_strides[i] = base base *= shape[i] + print 'final strides', t_strides backstrides = calc_backstrides(t_strides, shape) order = support.get_order_as_CF(self.order, order) impl = ConcreteArray(shape, dtype, order, t_strides, backstrides) - loop.setslice(space, impl.get_shape(), impl, self) + if copy: + loop.setslice(space, impl.get_shape(), impl, self) return impl OBJECTSTORE = lltype.GcStruct('ObjectStore', diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -103,15 +103,18 @@ copy = False dtype = descriptor.decode_w_dtype(space, w_dtype) - if space.is_none(w_order): - w_order = space.wrap('C') - npy_order = order_converter(space, w_order, NPY.CORDER) if isinstance(w_object, W_NDimArray): - if (dtype is None or w_object.get_dtype() is dtype): - if copy and (subok or type(w_object) is W_NDimArray): - return w_object.descr_copy(space, w_order) - elif not copy and (subok or type(w_object) is W_NDimArray): + npy_order = order_converter(space, w_order, NPY.ANYORDER) + if (dtype is None or w_object.get_dtype() is dtype) and (subok or + type(w_object) is W_NDimArray): + flags = w_object.get_flags() + must_copy = copy + must_copy |= (npy_order == NPY.CORDER and not flags & NPY.ARRAY_C_CONTIGUOUS) + must_copy |= (npy_order == NPY.FORTRANORDER and not flags & NPY.ARRAY_F_CONTIGUOUS) + if must_copy: + return w_object.descr_copy(space, space.wrap(npy_order)) + else: return w_object if subok and not type(w_object) is W_NDimArray: raise oefmt(space.w_NotImplementedError, @@ -124,7 +127,8 @@ copy = True if copy: shape = w_object.get_shape() - w_arr = W_NDimArray.from_shape(space, shape, dtype, order=npy_order) + order = support.get_order_as_CF(w_object.get_order(), npy_order) + w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) if support.product(shape) == 1: w_arr.set_scalar_value(dtype.coerce(space, w_object.implementation.getitem(0))) @@ -148,6 +152,7 @@ w_base=w_base, strides=imp.strides, start=imp.start) else: # not an array + npy_order = order_converter(space, w_order, NPY.CORDER) shape, elems_w = find_shape_and_elems(space, w_object, dtype) if dtype is None and space.isinstance_w(w_object, space.w_buffer): dtype = descriptor.get_dtype_cache(space).w_uint8dtype @@ -271,6 +276,7 @@ def _zeros_or_empty(space, w_shape, w_dtype, w_order, zero): + # w_order can be None, str, or boolean order = order_converter(space, w_order, NPY.CORDER) dtype = space.interp_w(descriptor.W_Dtype, space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -16,6 +16,7 @@ self.dtype = base.get_dtype() self.shape = [base.get_size()] self.storage = self._base.implementation.storage + self.order = base.get_order() def base(self): return self._base diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -370,11 +370,11 @@ def descr_copy(self, space, w_order=None): if w_order is None: - order = NPY.KEEPORDER + order = NPY.CORDER elif space.isinstance_w(w_order, space.w_int): order = space.int_w(w_order) else: - order = order_converter(space, w_order, NPY.KEEPORDER) + order = order_converter(space, w_order, NPY.CORDER) copy = self.implementation.copy(space, order) w_subtype = space.type(self) return wrap_impl(space, w_subtype, self, copy) diff --git a/pypy/module/micronumpy/test/test_flagsobj.py b/pypy/module/micronumpy/test/test_flagsobj.py --- a/pypy/module/micronumpy/test/test_flagsobj.py +++ b/pypy/module/micronumpy/test/test_flagsobj.py @@ -47,3 +47,104 @@ b = np.array([4,5,6,7]) assert a.flags == b.flags assert not a.flags != b.flags + + def test_copy_order(self): + import numpy as np + tst = np.ones((10, 1), order='C').flags.f_contiguous + NPY_RELAXED_STRIDES_CHECKING = tst + a = np.arange(24).reshape(2, 1, 3, 4) + b = a.copy(order='F') + c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3) + + def check_copy_result(x, y, ccontig, fcontig, strides=False): + assert x is not y + assert (x == y).all() + assert res.flags.c_contiguous == ccontig + assert res.flags.f_contiguous == fcontig + # This check is impossible only because + # NPY_RELAXED_STRIDES_CHECKING changes the strides actively + if not NPY_RELAXED_STRIDES_CHECKING: + if strides: + assert x.strides == y.strides + else: + assert x.strides != y.strides + + # Validate the initial state of a, b, and c + assert a.flags.c_contiguous + assert not a.flags.f_contiguous + assert not b.flags.c_contiguous + assert b.flags.f_contiguous + assert not c.flags.c_contiguous + assert not c.flags.f_contiguous + + # Copy with order='C' + res = a.copy(order='C') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = b.copy(order='C') + check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) + res = c.copy(order='C') + check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) + + # Copy with order='F' + res = a.copy(order='F') + check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) + res = b.copy(order='F') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = c.copy(order='F') + check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) + + # Copy with order='K' + res = a.copy(order='K') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = b.copy(order='K') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = c.copy(order='K') + check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) + + def test_contiguous_flags(self): + import numpy as np + tst = np.ones((10, 1), order='C').flags.f_contiguous + NPY_RELAXED_STRIDES_CHECKING = tst + a = np.ones((4, 4, 1))[::2,:,:] + if NPY_RELAXED_STRIDES_CHECKING: + a.strides = a.strides[:2] + (-123,) + b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4) + + def check_contig(a, ccontig, fcontig): + assert a.flags.c_contiguous == ccontig + assert a.flags.f_contiguous == fcontig + + # Check if new arrays are correct: + check_contig(a, False, False) + check_contig(b, False, False) + if NPY_RELAXED_STRIDES_CHECKING: + check_contig(np.empty((2, 2, 0, 2, 2)), True, True) + check_contig(np.array([[[1], [2]]], order='F'), True, True) + else: + check_contig(np.empty((2, 2, 0, 2, 2)), True, False) + check_contig(np.array([[[1], [2]]], order='F'), False, True) + check_contig(np.empty((2, 2)), True, False) + check_contig(np.empty((2, 2), order='F'), False, True) + + # Check that np.array creates correct contiguous flags: + check_contig(np.array(a, copy=False), False, False) + check_contig(np.array(a, copy=False, order='C'), True, False) + check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True) + + if NPY_RELAXED_STRIDES_CHECKING: + # Check slicing update of flags and : + check_contig(a[0], True, True) + check_contig(a[None, ::4, ..., None], True, True) + check_contig(b[0, 0, ...], False, True) + check_contig(b[:,:, 0:0,:,:], True, True) + else: + # Check slicing update of flags: + check_contig(a[0], True, False) + # Would be nice if this was C-Contiguous: + check_contig(a[None, 0, ..., None], False, False) + check_contig(b[0, 0, 0, ...], False, True) + + # Test ravel and squeeze. + check_contig(a.ravel(), True, True) + check_contig(np.ones((1, 3, 1)).squeeze(), True, True) + From noreply at buildbot.pypy.org Thu Oct 29 05:26:50 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Thu, 29 Oct 2015 10:26:50 +0100 (CET) Subject: [pypy-commit] pypy default: issue #1383 fixed Message-ID: <20151029092650.8E7041C1237@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r80477:bffa98819224 Date: 2015-10-29 10:26 +0100 http://bitbucket.org/pypy/pypy/changeset/bffa98819224/ Log: issue #1383 fixed don't accept None as encoding in various places diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -199,13 +199,9 @@ if w_source is None: w_source = space.wrap('') - if w_encoding is None: - w_encoding = space.w_None - if w_errors is None: - w_errors = space.w_None # Unicode argument - if not space.is_w(w_encoding, space.w_None): + if w_encoding is not None: from pypy.objspace.std.unicodeobject import ( _get_encoding_and_errors, encode_object ) diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -16,8 +16,8 @@ from pypy.objspace.std.formatting import mod_format from pypy.objspace.std.stringmethods import StringMethods from pypy.objspace.std.unicodeobject import ( - _get_encoding_and_errors, decode_object, unicode_from_encoded_object, - unicode_from_string) + decode_object, unicode_from_encoded_object, + unicode_from_string, getdefaultencoding) class W_AbstractBytesObject(W_Root): @@ -39,13 +39,8 @@ def unicode_w(self, space): # Use the default encoding. - w_defaultencoding = space.call_function(space.sys.get( - 'getdefaultencoding')) - encoding, errors = _get_encoding_and_errors(space, w_defaultencoding, - space.w_None) - if encoding is None and errors is None: - return space.unicode_w(unicode_from_string(space, self)) - return space.unicode_w(decode_object(space, self, encoding, errors)) + encoding = getdefaultencoding(space) + return space.unicode_w(decode_object(space, self, encoding, None)) def descr_add(self, space, w_other): """x.__add__(y) <==> x+y""" diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -992,3 +992,10 @@ assert u''.join([s1]) is not s1 s2 = StrSubclass(u'a') assert u''.join([s2]) is not s2 + + def test_encoding_and_errors_cant_be_none(self): + raises(TypeError, "''.decode(None)") + raises(TypeError, "u''.encode(None)") + raises(TypeError, "unicode('', encoding=None)") + raises(TypeError, 'u"".encode("utf-8", None)') + diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -427,8 +427,8 @@ def _get_encoding_and_errors(space, w_encoding, w_errors): - encoding = None if space.is_none(w_encoding) else space.str_w(w_encoding) - errors = None if space.is_none(w_errors) else space.str_w(w_errors) + encoding = None if w_encoding is None else space.str_w(w_encoding) + errors = None if w_errors is None else space.str_w(w_errors) return encoding, errors From noreply at buildbot.pypy.org Thu Oct 29 11:18:44 2015 From: noreply at buildbot.pypy.org (plan_rich) Date: Thu, 29 Oct 2015 16:18:44 +0100 (CET) Subject: [pypy-commit] pypy s390x-backend: correctly jumping out of the program after guard failure, but there is something wrong with the saving to the dead frame Message-ID: <20151029151844.0369B1C12B2@cobra.cs.uni-duesseldorf.de> Author: Richard Plangger Branch: s390x-backend Changeset: r80481:b39bf57b3e2f Date: 2015-10-29 16:18 +0100 http://bitbucket.org/pypy/pypy/changeset/b39bf57b3e2f/ Log: correctly jumping out of the program after guard failure, but there is something wrong with the saving to the dead frame diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -138,18 +138,17 @@ # overwrite the gcmap in the pool offset = pool_offset + RECOVERY_GCMAP_POOL_OFFSET - self.pool.overwrite_64(self.mc, offset, target) - self.mc.LG(r.r2, l.pool(offset)) + self.mc.LG(r.r3, l.pool(offset)) # overwrite the target in pool offset = pool_offset + RECOVERY_TARGET_POOL_OFFSET self.pool.overwrite_64(self.mc, offset, target) - self.mc.LG(r.r0, l.pool(offset)) + self.mc.LG(r.r14, l.pool(offset)) # TODO what is the biggest integer an opaque pointer # can have? if not < 2**15-1 then we need to put it on the pool - self.mc.LGHI(r.r3, l.imm(fail_descr)) - self.mc.BCR(l.imm(0xf), r.r0) + self.mc.LGHI(r.r2, l.imm(fail_descr)) + self.mc.BCR(l.imm(0xf), r.r14) # TODO do we need to patch this memory region? # we need to write at least 6 insns here, for patch_jump_for_descr() #while self.mc.currpos() < startpos + 6 * 4: @@ -406,7 +405,6 @@ if not tok.guard_not_invalidated(): mc = InstrBuilder() mc.b_cond_offset(relative_target, tok.fcond) - import pdb; pdb.set_trace() mc.copy_to_raw_memory(addr) else: # GUARD_NOT_INVALIDATED, record an entry in diff --git a/rpython/jit/backend/zarch/locations.py b/rpython/jit/backend/zarch/locations.py --- a/rpython/jit/backend/zarch/locations.py +++ b/rpython/jit/backend/zarch/locations.py @@ -217,6 +217,7 @@ return ImmLocation(i) def pool(off, float=False): + print "loading pool", off return PoolLoc(off, float) def get_fp_offset(base_ofs, position): diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -3,6 +3,8 @@ from rpython.jit.metainterp.history import (INT, REF, FLOAT, TargetToken) from rpython.rtyper.lltypesystem import lltype, rffi, llmemory +from rpython.jit.backend.zarch.arch import (WORD, + RECOVERY_GCMAP_POOL_OFFSET, RECOVERY_TARGET_POOL_OFFSET) class LiteralPool(object): def __init__(self): @@ -24,10 +26,12 @@ def reserve_literal(self, size): self.size += size + print "resized to", self.size, "(+",size,")" def reset(self): + self.pool_start = 0 self.size = 0 - self.rel_offset = 0 + self.offset = 0 def walk_operations(self, operations): # O(len(operations)). I do not think there is a way @@ -57,12 +61,13 @@ self.size += 1 assert self.size < 2**16-1 mc.BRAS(r.POOL, l.imm(self.size+mc.BRAS._byte_count)) - self.pool_offset = mc.get_relative_pos() + self.pool_start = mc.get_relative_pos() mc.write('\x00' * self.size) - print "pool with %d bytes %d // 8" % (self.size, self.size // 8) + print "pool with %d quad words" % (self.size // 8) def overwrite_64(self, mc, index, value): - print("value", hex(value), "at", index) + index += self.pool_start + print("value", hex(value), "at", index - self.pool_start) mc.overwrite(index, chr(value >> 56 & 0xff)) mc.overwrite(index+1, chr(value >> 48 & 0xff)) mc.overwrite(index+2, chr(value >> 40 & 0xff)) @@ -76,6 +81,7 @@ if self.size == 0: return for val, offset in self.offset_map.items(): + print val, offset if val.is_constant(): if val.type == FLOAT: self.overwrite_64(mc, offset, float2longlong(val.value)) @@ -91,6 +97,6 @@ offset = self.offset_map[descr] guard_token._pool_offset = offset ptr = rffi.cast(lltype.Signed, guard_token.gcmap) - self.overwrite_64(mc, offset + 8, ptr) + self.overwrite_64(mc, offset + RECOVERY_GCMAP_POOL_OFFSET, ptr) self.offset_map.clear() From noreply at buildbot.pypy.org Thu Oct 29 11:34:08 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 29 Oct 2015 16:34:08 +0100 (CET) Subject: [pypy-commit] pypy vmprof-newstack: bump the OS X min version Message-ID: <20151029153408.945551C1391@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: vmprof-newstack Changeset: r80482:37f4ca60d4ee Date: 2015-10-29 14:56 +0000 http://bitbucket.org/pypy/pypy/changeset/37f4ca60d4ee/ Log: bump the OS X min version diff --git a/rpython/translator/platform/darwin.py b/rpython/translator/platform/darwin.py --- a/rpython/translator/platform/darwin.py +++ b/rpython/translator/platform/darwin.py @@ -5,9 +5,10 @@ # # Although Intel 32bit is supported since Apple Mac OS X 10.4, (and PPC since, ever) # the @rpath handling used in Darwin._args_for_shared is only availabe -# since 10.5, so we use that as minimum requirement. +# since 10.5, so we use that as minimum requirement. We bump it to 10.6 +# since 10.11 does not ship with 10.5 libs any more # -DARWIN_VERSION_MIN = '-mmacosx-version-min=10.5' +DARWIN_VERSION_MIN = '-mmacosx-version-min=10.6' class Darwin(posix.BasePosix): name = "darwin" From noreply at buildbot.pypy.org Thu Oct 29 11:37:40 2015 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 29 Oct 2015 16:37:40 +0100 (CET) Subject: [pypy-commit] pypy default: manually port b86e4b638d20, I dont get hg Message-ID: <20151029153740.564B71C1391@cobra.cs.uni-duesseldorf.de> Author: fijal Branch: Changeset: r80483:09c74c7aac71 Date: 2015-10-29 15:37 +0000 http://bitbucket.org/pypy/pypy/changeset/09c74c7aac71/ Log: manually port b86e4b638d20, I dont get hg diff --git a/rpython/translator/platform/darwin.py b/rpython/translator/platform/darwin.py --- a/rpython/translator/platform/darwin.py +++ b/rpython/translator/platform/darwin.py @@ -5,9 +5,10 @@ # # Although Intel 32bit is supported since Apple Mac OS X 10.4, (and PPC since, ever) # the @rpath handling used in Darwin._args_for_shared is only availabe -# since 10.5, so we use that as minimum requirement. +# since 10.5, so we use that as minimum requirement. Bumped to 10.6 +# because 10.11 does not ship with 10.5 versions of libs # -DARWIN_VERSION_MIN = '-mmacosx-version-min=10.5' +DARWIN_VERSION_MIN = '-mmacosx-version-min=10.6' class Darwin(posix.BasePosix): name = "darwin" From noreply at buildbot.pypy.org Thu Oct 29 12:11:59 2015 From: noreply at buildbot.pypy.org (sbauman) Date: Thu, 29 Oct 2015 17:11:59 +0100 (CET) Subject: [pypy-commit] pypy remove-getfield-pure: Ignore getfield operations in pure pass Message-ID: <20151029161159.8543A1C1237@cobra.cs.uni-duesseldorf.de> Author: Spenser Bauman Branch: remove-getfield-pure Changeset: r80484:64ea6827371e Date: 2015-10-28 21:06 -0400 http://bitbucket.org/pypy/pypy/changeset/64ea6827371e/ Log: Ignore getfield operations in pure pass diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -493,6 +493,10 @@ return pendingfields def optimize_GETFIELD_GC_I(self, op): + if op.is_always_pure() and self.get_constant_box(op.getarg(0)): + resbox = self.optimizer.constant_fold(op) + self.optimizer.make_constant(op, resbox) + return structinfo = self.ensure_ptr_info_arg0(op) cf = self.field_cache(op.getdescr()) field = cf.getfield_from_cache(self, structinfo, op.getdescr()) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -74,7 +74,7 @@ dispatch_opt(self, op) def optimize_default(self, op): - canfold = op.is_always_pure() + canfold = op.is_always_pure() and not op.is_getfield() if op.is_ovf(): self.postponed_op = op return @@ -220,7 +220,7 @@ def produce_potential_short_preamble_ops(self, sb): ops = self.optimizer._newoperations for i, op in enumerate(ops): - if op.is_always_pure(): + if op.is_always_pure() and not op.is_getfield(): sb.add_pure_op(op) if op.is_ovf() and ops[i + 1].getopnum() == rop.GUARD_NO_OVERFLOW: sb.add_pure_op(op) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -8778,7 +8778,7 @@ def test_virtual_back_and_forth(self): ops = """ [p0] - p1 = getfield_gc_r(p0, descr=valuedescr3) + p1 = getfield_gc_r(p0, descr=nextdescr3) ptemp = new_with_vtable(descr=nodesize) setfield_gc(ptemp, p1, descr=nextdescr) p2 = getfield_gc_r(ptemp, descr=nextdescr) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py --- a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py @@ -227,7 +227,7 @@ jump(p0) """ es, loop, preamble = self.optimize(loop) - assert len(es.short_boxes) == 7 + assert len(es.short_boxes) == 4 # both getfields are available as # well as getfield_gc From noreply at buildbot.pypy.org Fri Oct 30 02:16:41 2015 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 30 Oct 2015 07:16:41 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: note that freebsd download does not exist Message-ID: <20151030061641.5937D1C1279@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: extradoc Changeset: r650:50ee6dfbcc99 Date: 2015-10-30 17:17 +1100 http://bitbucket.org/pypy/pypy.org/changeset/50ee6dfbcc99/ Log: note that freebsd download does not exist diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -74,7 +74,7 @@ performance improvements.

    We provide binaries for x86 and ARM Linux, Mac OS/X and Windows for:

    @@ -122,7 +122,7 @@
  • ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Ubuntu Raring) (see [1] below)
  • ARM Softfloat Linux binary (ARMEL/gnueabi, tar.bz2, Ubuntu Precise) (see [1] below)
  • Mac OS/X binary (64bit)
  • -
  • FreeBSD 9.2 x86 64 bit (see [1] below)
  • +
  • FreeBSD 9.2 x86 64 bit (hopefully availabe soon) (see [1] below)
  • Windows binary (32bit) (you might need the VS 2008 runtime library installer vcredist_x86.exe.)
  • PowerPC64 Linux binary (64bit big-endian, Fedora 20) (see [1] below)
  • @@ -345,7 +345,6 @@

    Here are the checksums for each of the downloads

    pypy-4.0.0 md5:

    -c616cffee0f344c37fd4e045a7a87054  pypy-4.0.0-freebsd64.tar.bz2
     82b11e63ab81db1604575dadd5cea427  pypy-4.0.0-linux64.tar.bz2
     f91946d5abd5dff8e05ab0b5acffb432  pypy-4.0.0-linux-armel.tar.bz2
     1db3ae7237a8a01f61b3b4ade65684ab  pypy-4.0.0-linux-armhf-raring.tar.bz2
    @@ -377,7 +376,6 @@
     

    pypy-4.0.0 sha1:

    -8de2a247e26872790090b7a7bc9128d263456ada  pypy-4.0.0-freebsd64.tar.bz2
     aed958fdc720b77fdd52cb826239ccbd6d01f465  pypy-4.0.0-linux64.tar.bz2
     65b50e0299dc0695a8460c14b401c783216464b1  pypy-4.0.0-linux-armel.tar.bz2
     663afb7b0d77ddf53c78d49dbc36c6e8349c7fbb  pypy-4.0.0-linux-armhf-raring.tar.bz2
    @@ -408,12 +406,6 @@
     be94460bed8b2682880495435c309b6611ae2c31  pypy-1.8-sandbox-linux.tar.bz2
     
    -
    -

    Docutils System Messages

    -
    -

    System Message: ERROR/3 ([dynamic-text], line 12); backlink

    -Unknown target name: “what's new in pypy4.0.0?”.
    -
    diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -14,7 +14,7 @@ We provide binaries for x86 and ARM Linux, Mac OS/X and Windows for: -* the Python2.7 compatible release — **PyPy 4.0.0** — (`what's new in PyPy4.0.0?`_) +* the Python2.7 compatible release — **PyPy 4.0.0** — (`what's new in PyPy 4.0.0?`_) * the Python3.2.5 compatible release — **PyPy3 2.4.0** — (`what's new in PyPy3 2.4.0?`_). * the Python2.7 Software Transactional Memory special release — **PyPy-STM 2.5.1** (Linux x86-64 only) @@ -82,7 +82,7 @@ * `ARM Hardfloat Linux binary (ARMHF/gnueabihf, tar.bz2, Ubuntu Raring)`__ (see ``[1]`` below) * `ARM Softfloat Linux binary (ARMEL/gnueabi, tar.bz2, Ubuntu Precise)`__ (see ``[1]`` below) * `Mac OS/X binary (64bit)`__ -* `FreeBSD 9.2 x86 64 bit`__ (see ``[1]`` below) +* FreeBSD 9.2 x86 64 bit *(hopefully availabe soon)* (see ``[1]`` below) * `Windows binary (32bit)`__ (you might need the VS 2008 runtime library installer `vcredist_x86.exe`_.) * `PowerPC64 Linux binary (64bit big-endian, Fedora 20)`__ (see ``[1]`` below) @@ -97,7 +97,6 @@ .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-linux-armhf-raring.tar.bz2 .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-linux-armel.tar.bz2 .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-osx64.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-freebsd64.tar.bz2 .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-win32.zip .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-ppc64.tar.bz2 .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-ppc64le.tar.bz2 @@ -375,7 +374,6 @@ pypy-4.0.0 md5:: - c616cffee0f344c37fd4e045a7a87054 pypy-4.0.0-freebsd64.tar.bz2 82b11e63ab81db1604575dadd5cea427 pypy-4.0.0-linux64.tar.bz2 f91946d5abd5dff8e05ab0b5acffb432 pypy-4.0.0-linux-armel.tar.bz2 1db3ae7237a8a01f61b3b4ade65684ab pypy-4.0.0-linux-armhf-raring.tar.bz2 @@ -409,7 +407,6 @@ pypy-4.0.0 sha1:: - 8de2a247e26872790090b7a7bc9128d263456ada pypy-4.0.0-freebsd64.tar.bz2 aed958fdc720b77fdd52cb826239ccbd6d01f465 pypy-4.0.0-linux64.tar.bz2 65b50e0299dc0695a8460c14b401c783216464b1 pypy-4.0.0-linux-armel.tar.bz2 663afb7b0d77ddf53c78d49dbc36c6e8349c7fbb pypy-4.0.0-linux-armhf-raring.tar.bz2 From noreply at buildbot.pypy.org Fri Oct 30 04:24:14 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 30 Oct 2015 09:24:14 +0100 (CET) Subject: [pypy-commit] pypy default: fix cpyext Message-ID: <20151030082414.E777C1C1397@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r80485:e92c8d8e17e1 Date: 2015-10-30 09:20 +0100 http://bitbucket.org/pypy/pypy/changeset/e92c8d8e17e1/ Log: fix cpyext diff --git a/pypy/module/cpyext/stringobject.py b/pypy/module/cpyext/stringobject.py --- a/pypy/module/cpyext/stringobject.py +++ b/pypy/module/cpyext/stringobject.py @@ -287,7 +287,7 @@ if not PyString_Check(space, w_str): PyErr_BadArgument(space) - w_encoding = w_errors = space.w_None + w_encoding = w_errors = None if encoding: w_encoding = space.wrap(rffi.charp2str(encoding)) if errors: @@ -307,7 +307,7 @@ if not PyString_Check(space, w_str): PyErr_BadArgument(space) - w_encoding = w_errors = space.w_None + w_encoding = w_errors = None if encoding: w_encoding = space.wrap(rffi.charp2str(encoding)) if errors: diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -360,7 +360,7 @@ if errors: w_errors = space.wrap(rffi.charp2str(errors)) else: - w_errors = space.w_None + w_errors = None return space.call_method(w_str, 'decode', w_encoding, w_errors) @cpython_api([PyObject], PyObject) @@ -391,7 +391,7 @@ if errors: w_errors = space.wrap(rffi.charp2str(errors)) else: - w_errors = space.w_None + w_errors = None # - unicode is disallowed # - raise TypeError for non-string types @@ -484,7 +484,7 @@ if errors: w_errors = space.wrap(rffi.charp2str(errors)) else: - w_errors = space.w_None + w_errors = None return space.call_method(w_s, 'decode', space.wrap(encoding), w_errors) globals()['PyUnicode_Decode%s' % suffix] = PyUnicode_DecodeXXX @@ -498,7 +498,7 @@ if errors: w_errors = space.wrap(rffi.charp2str(errors)) else: - w_errors = space.w_None + w_errors = None return space.call_method(w_u, 'encode', space.wrap(encoding), w_errors) globals()['PyUnicode_Encode%s' % suffix] = PyUnicode_EncodeXXX From noreply at buildbot.pypy.org Fri Oct 30 05:27:16 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 30 Oct 2015 10:27:16 +0100 (CET) Subject: [pypy-commit] pypy default: more encoding problems Message-ID: <20151030092716.967011C129E@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r80486:7b5e71e6e62f Date: 2015-10-30 10:27 +0100 http://bitbucket.org/pypy/pypy/changeset/7b5e71e6e62f/ Log: more encoding problems diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -302,9 +302,13 @@ data = space.getarg_w('s*', w_data).as_str() else: if space.isinstance_w(w_data, space.w_unicode): + w_errors = w_encoding = None + if self.encoding: + w_encoding = space.wrap(self.encoding) + if self.errors: + w_errors = space.wrap(self.errors) w_data = space.call_method(w_data, "encode", - space.wrap(self.encoding), - space.wrap(self.errors)) + w_encoding, w_errors) data = space.charbuf_w(w_data) self.do_direct_write(data) diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -459,9 +459,9 @@ else: def getcwdu(space): """Return the current working directory as a unicode string.""" - filesystemencoding = space.sys.filesystemencoding + w_filesystemencoding = getfilesystemencoding(space) return space.call_method(getcwd(space), 'decode', - space.wrap(filesystemencoding)) + w_filesystemencoding) def chdir(space, w_path): """Change the current working directory to the specified path.""" From noreply at buildbot.pypy.org Fri Oct 30 13:32:32 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 30 Oct 2015 18:32:32 +0100 (CET) Subject: [pypy-commit] pypy default: don't slice the input string every time for non-greedy automata Message-ID: <20151030173232.18BEB1C071E@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r80489:0cf2468ac04a Date: 2015-10-30 16:46 +0100 http://bitbucket.org/pypy/pypy/changeset/0cf2468ac04a/ Log: don't slice the input string every time for non-greedy automata diff --git a/pypy/interpreter/pyparser/automata.py b/pypy/interpreter/pyparser/automata.py --- a/pypy/interpreter/pyparser/automata.py +++ b/pypy/interpreter/pyparser/automata.py @@ -66,7 +66,8 @@ def recognize (self, inVec, pos = 0): crntState = self.start i = pos - for item in inVec[pos:]: + for i in range(pos, len(inVec)): + item = inVec[i] # arcMap, accept = self.states[crntState] arcMap = self.states[crntState] accept = self.accepts[crntState] From noreply at buildbot.pypy.org Fri Oct 30 13:32:34 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Fri, 30 Oct 2015 18:32:34 +0100 (CET) Subject: [pypy-commit] pypy default: don't do one to two dict lookups per parsed character Message-ID: <20151030173234.3C5B91C071E@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r80490:986b29dd1c0b Date: 2015-10-30 18:30 +0100 http://bitbucket.org/pypy/pypy/changeset/986b29dd1c0b/ Log: don't do one to two dict lookups per parsed character this is done by computing a different representation of the state transition table (a big string, instead of many dictionaries). A bit of a "just-because" commit. diff --git a/pypy/interpreter/pyparser/automata.py b/pypy/interpreter/pyparser/automata.py --- a/pypy/interpreter/pyparser/automata.py +++ b/pypy/interpreter/pyparser/automata.py @@ -22,27 +22,61 @@ # PYPY Modification : removed all automata functions (any, maybe, # newArcPair, etc.) +ERROR_STATE = chr(255) + class DFA: # ____________________________________________________________ def __init__(self, states, accepts, start = 0): - self.states = states + """ NOT_RPYTHON """ + assert len(states) < 255 # no support for huge amounts of states + # construct string for looking up state transitions + string_states = [] * len(states) + # compute maximum + maximum = 0 + for state in states: + for key in state: + if key == DEFAULT: + continue + maximum = max(ord(key), maximum) + self.max_char = maximum + 1 + + defaults = [] + for i, state in enumerate(states): + default = ERROR_STATE + if DEFAULT in state: + default = chr(state[DEFAULT]) + defaults.append(default) + string_state = [default] * self.max_char + for key, value in state.iteritems(): + if key == DEFAULT: + continue + assert len(key) == 1 + assert ord(key) < self.max_char + string_state[ord(key)] = chr(value) + string_states.extend(string_state) + self.states = "".join(string_states) + self.defaults = "".join(defaults) self.accepts = accepts self.start = start # ____________________________________________________________ - def recognize (self, inVec, pos = 0): # greedy = True + + def _next_state(self, item, crntState): + if ord(item) >= self.max_char: + return self.defaults[crntState] + else: + return self.states[crntState * self.max_char + ord(item)] + + def recognize(self, inVec, pos = 0): crntState = self.start lastAccept = False i = pos for i in range(pos, len(inVec)): item = inVec[i] - # arcMap, accept = self.states[crntState] - arcMap = self.states[crntState] accept = self.accepts[crntState] - if item in arcMap: - crntState = arcMap[item] - elif DEFAULT in arcMap: - crntState = arcMap[DEFAULT] + crntState = self._next_state(item, crntState) + if crntState != ERROR_STATE: + pass elif accept: return i elif lastAccept: @@ -51,6 +85,7 @@ return i - 1 else: return -1 + crntState = ord(crntState) lastAccept = accept # if self.states[crntState][1]: if self.accepts[crntState]: @@ -63,24 +98,20 @@ # ______________________________________________________________________ class NonGreedyDFA (DFA): - def recognize (self, inVec, pos = 0): + + def recognize(self, inVec, pos = 0): crntState = self.start i = pos for i in range(pos, len(inVec)): item = inVec[i] - # arcMap, accept = self.states[crntState] - arcMap = self.states[crntState] accept = self.accepts[crntState] if accept: return i - elif item in arcMap: - crntState = arcMap[item] - elif DEFAULT in arcMap: - crntState = arcMap[DEFAULT] - else: + crntState = self._next_state(item, crntState) + if crntState == ERROR_STATE: return -1 + crntState = ord(crntState) i += 1 - # if self.states[crntState][1]: if self.accepts[crntState]: return i else: diff --git a/pypy/interpreter/pyparser/test/test_automata.py b/pypy/interpreter/pyparser/test/test_automata.py new file mode 100644 --- /dev/null +++ b/pypy/interpreter/pyparser/test/test_automata.py @@ -0,0 +1,12 @@ +from pypy.interpreter.pyparser.automata import DFA, DEFAULT + +def test_states(): + d = DFA([{"\x00": 1}, {"\x01": 0}], [False, True]) + assert d.states == "\x01\xff\xff\x00" + assert d.defaults == "\xff\xff" + assert d.max_char == 2 + + d = DFA([{"\x00": 1}, {DEFAULT: 0}], [False, True]) + assert d.states == "\x01\x00" + assert d.defaults == "\xff\x00" + assert d.max_char == 1 From noreply at buildbot.pypy.org Sat Oct 31 07:42:47 2015 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 31 Oct 2015 12:42:47 +0100 (CET) Subject: [pypy-commit] pypy default: remove debug cruft Message-ID: <20151031114248.011C81C07D9@cobra.cs.uni-duesseldorf.de> Author: mattip Branch: Changeset: r80491:652865747e1a Date: 2015-10-31 22:38 +1100 http://bitbucket.org/pypy/pypy/changeset/652865747e1a/ Log: remove debug cruft diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -382,11 +382,9 @@ t_elsize = dtype.elsize t_strides = strides[:] base = dtype.elsize - print 'start strides, shape, indx_array', strides, shape, indx_array for i in indx_array: t_strides[i] = base base *= shape[i] - print 'final strides', t_strides backstrides = calc_backstrides(t_strides, shape) order = support.get_order_as_CF(self.order, order) impl = ConcreteArray(shape, dtype, order, t_strides, backstrides) From noreply at buildbot.pypy.org Sat Oct 31 08:27:11 2015 From: noreply at buildbot.pypy.org (cfbolz) Date: Sat, 31 Oct 2015 13:27:11 +0100 (CET) Subject: [pypy-commit] pypy default: issue #1522 Message-ID: <20151031122711.4E1891C1328@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r80492:fbabd7db9dad Date: 2015-10-31 13:27 +0100 http://bitbucket.org/pypy/pypy/changeset/fbabd7db9dad/ Log: issue #1522 this fixes the problem of making a tiny set out of a really huge iterable without generating a list out of the iterable first. diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1589,18 +1589,18 @@ w_set.sstorage = strategy.get_storage_from_unwrapped_list(intlist) return + length_hint = space.length_hint(w_iterable, 0) + + if jit.isconstant(length_hint): + return _pick_correct_strategy_unroll(space, w_set, w_iterable) + + _create_from_iterable(space, w_set, w_iterable) + + + at jit.unroll_safe +def _pick_correct_strategy_unroll(space, w_set, w_iterable): + iterable_w = space.listview(w_iterable) - - if len(iterable_w) == 0: - w_set.strategy = strategy = space.fromcache(EmptySetStrategy) - w_set.sstorage = strategy.get_empty_storage() - return - - _pick_correct_strategy(space, w_set, iterable_w) - - at jit.look_inside_iff(lambda space, w_set, iterable_w: - jit.loop_unrolling_heuristic(iterable_w, len(iterable_w), UNROLL_CUTOFF)) -def _pick_correct_strategy(space, w_set, iterable_w): # check for integers for w_item in iterable_w: if type(w_item) is not W_IntObject: @@ -1640,6 +1640,23 @@ w_set.strategy = space.fromcache(ObjectSetStrategy) w_set.sstorage = w_set.strategy.get_storage_from_list(iterable_w) + +create_set_driver = jit.JitDriver(name='create_set', + greens=['tp', 'strategy'], + reds='auto') + +def _create_from_iterable(space, w_set, w_iterable): + iterable = space.iteriterable(w_iterable) + w_set.strategy = strategy = space.fromcache(EmptySetStrategy) + w_set.sstorage = strategy.get_empty_storage() + + tp = space.type(w_iterable) + for w_item in space.iteriterable(w_iterable): + create_set_driver.jit_merge_point(tp=tp, strategy=w_set.strategy) + w_set.add(w_item) + + + init_signature = Signature(['some_iterable'], None, None) init_defaults = [None] def _initialize_set(space, w_obj, w_iterable=None):